xref: /libuv/src/unix/async.c (revision 18d48bc1)
1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2  * Permission is hereby granted, free of charge, to any person obtaining a copy
3  * of this software and associated documentation files (the "Software"), to
4  * deal in the Software without restriction, including without limitation the
5  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6  * sell copies of the Software, and to permit persons to whom the Software is
7  * furnished to do so, subject to the following conditions:
8  *
9  * The above copyright notice and this permission notice shall be included in
10  * all copies or substantial portions of the Software.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
18  * IN THE SOFTWARE.
19  */
20 
21 /* This file contains both the uv__async internal infrastructure and the
22  * user-facing uv_async_t functions.
23  */
24 
25 #include "uv.h"
26 #include "internal.h"
27 
28 #include <errno.h>
29 #include <stdatomic.h>
30 #include <stdio.h>  /* snprintf() */
31 #include <assert.h>
32 #include <stdlib.h>
33 #include <string.h>
34 #include <unistd.h>
35 #include <sched.h>  /* sched_yield() */
36 
37 #ifdef __linux__
38 #include <sys/eventfd.h>
39 #endif
40 
41 static void uv__async_send(uv_loop_t* loop);
42 static int uv__async_start(uv_loop_t* loop);
43 static void uv__cpu_relax(void);
44 
45 
uv_async_init(uv_loop_t * loop,uv_async_t * handle,uv_async_cb async_cb)46 int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) {
47   int err;
48 
49   err = uv__async_start(loop);
50   if (err)
51     return err;
52 
53   uv__handle_init(loop, (uv_handle_t*)handle, UV_ASYNC);
54   handle->async_cb = async_cb;
55   handle->pending = 0;
56   handle->u.fd = 0; /* This will be used as a busy flag. */
57 
58   uv__queue_insert_tail(&loop->async_handles, &handle->queue);
59   uv__handle_start(handle);
60 
61   return 0;
62 }
63 
64 
uv_async_send(uv_async_t * handle)65 int uv_async_send(uv_async_t* handle) {
66   _Atomic int* pending;
67   _Atomic int* busy;
68 
69   pending = (_Atomic int*) &handle->pending;
70   busy = (_Atomic int*) &handle->u.fd;
71 
72   /* Do a cheap read first. */
73   if (atomic_load_explicit(pending, memory_order_relaxed) != 0)
74     return 0;
75 
76   /* Set the loop to busy. */
77   atomic_fetch_add(busy, 1);
78 
79   /* Wake up the other thread's event loop. */
80   if (atomic_exchange(pending, 1) == 0)
81     uv__async_send(handle->loop);
82 
83   /* Set the loop to not-busy. */
84   atomic_fetch_add(busy, -1);
85 
86   return 0;
87 }
88 
89 
90 /* Wait for the busy flag to clear before closing.
91  * Only call this from the event loop thread. */
uv__async_spin(uv_async_t * handle)92 static void uv__async_spin(uv_async_t* handle) {
93   _Atomic int* pending;
94   _Atomic int* busy;
95   int i;
96 
97   pending = (_Atomic int*) &handle->pending;
98   busy = (_Atomic int*) &handle->u.fd;
99 
100   /* Set the pending flag first, so no new events will be added by other
101    * threads after this function returns. */
102   atomic_store(pending, 1);
103 
104   for (;;) {
105     /* 997 is not completely chosen at random. It's a prime number, acyclic by
106      * nature, and should therefore hopefully dampen sympathetic resonance.
107      */
108     for (i = 0; i < 997; i++) {
109       if (atomic_load(busy) == 0)
110         return;
111 
112       /* Other thread is busy with this handle, spin until it's done. */
113       uv__cpu_relax();
114     }
115 
116     /* Yield the CPU. We may have preempted the other thread while it's
117      * inside the critical section and if it's running on the same CPU
118      * as us, we'll just burn CPU cycles until the end of our time slice.
119      */
120     sched_yield();
121   }
122 }
123 
124 
uv__async_close(uv_async_t * handle)125 void uv__async_close(uv_async_t* handle) {
126   uv__async_spin(handle);
127   uv__queue_remove(&handle->queue);
128   uv__handle_stop(handle);
129 }
130 
131 
uv__async_io(uv_loop_t * loop,uv__io_t * w,unsigned int events)132 static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
133   char buf[1024];
134   ssize_t r;
135   struct uv__queue queue;
136   struct uv__queue* q;
137   uv_async_t* h;
138   _Atomic int *pending;
139 
140   assert(w == &loop->async_io_watcher);
141 
142   for (;;) {
143     r = read(w->fd, buf, sizeof(buf));
144 
145     if (r == sizeof(buf))
146       continue;
147 
148     if (r != -1)
149       break;
150 
151     if (errno == EAGAIN || errno == EWOULDBLOCK)
152       break;
153 
154     if (errno == EINTR)
155       continue;
156 
157     abort();
158   }
159 
160   uv__queue_move(&loop->async_handles, &queue);
161   while (!uv__queue_empty(&queue)) {
162     q = uv__queue_head(&queue);
163     h = uv__queue_data(q, uv_async_t, queue);
164 
165     uv__queue_remove(q);
166     uv__queue_insert_tail(&loop->async_handles, q);
167 
168     /* Atomically fetch and clear pending flag */
169     pending = (_Atomic int*) &h->pending;
170     if (atomic_exchange(pending, 0) == 0)
171       continue;
172 
173     if (h->async_cb == NULL)
174       continue;
175 
176     h->async_cb(h);
177   }
178 }
179 
180 
uv__async_send(uv_loop_t * loop)181 static void uv__async_send(uv_loop_t* loop) {
182   const void* buf;
183   ssize_t len;
184   int fd;
185   int r;
186 
187   buf = "";
188   len = 1;
189   fd = loop->async_wfd;
190 
191 #if defined(__linux__)
192   if (fd == -1) {
193     static const uint64_t val = 1;
194     buf = &val;
195     len = sizeof(val);
196     fd = loop->async_io_watcher.fd;  /* eventfd */
197   }
198 #endif
199 
200   do
201     r = write(fd, buf, len);
202   while (r == -1 && errno == EINTR);
203 
204   if (r == len)
205     return;
206 
207   if (r == -1)
208     if (errno == EAGAIN || errno == EWOULDBLOCK)
209       return;
210 
211   abort();
212 }
213 
214 
uv__async_start(uv_loop_t * loop)215 static int uv__async_start(uv_loop_t* loop) {
216   int pipefd[2];
217   int err;
218 
219   if (loop->async_io_watcher.fd != -1)
220     return 0;
221 
222 #ifdef __linux__
223   err = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
224   if (err < 0)
225     return UV__ERR(errno);
226 
227   pipefd[0] = err;
228   pipefd[1] = -1;
229 #else
230   err = uv__make_pipe(pipefd, UV_NONBLOCK_PIPE);
231   if (err < 0)
232     return err;
233 #endif
234 
235   uv__io_init(&loop->async_io_watcher, uv__async_io, pipefd[0]);
236   uv__io_start(loop, &loop->async_io_watcher, POLLIN);
237   loop->async_wfd = pipefd[1];
238 
239   return 0;
240 }
241 
242 
uv__async_stop(uv_loop_t * loop)243 void uv__async_stop(uv_loop_t* loop) {
244   struct uv__queue queue;
245   struct uv__queue* q;
246   uv_async_t* h;
247 
248   if (loop->async_io_watcher.fd == -1)
249     return;
250 
251   /* Make sure no other thread is accessing the async handle fd after the loop
252    * cleanup.
253    */
254   uv__queue_move(&loop->async_handles, &queue);
255   while (!uv__queue_empty(&queue)) {
256     q = uv__queue_head(&queue);
257     h = uv__queue_data(q, uv_async_t, queue);
258 
259     uv__queue_remove(q);
260     uv__queue_insert_tail(&loop->async_handles, q);
261 
262     uv__async_spin(h);
263   }
264 
265   if (loop->async_wfd != -1) {
266     if (loop->async_wfd != loop->async_io_watcher.fd)
267       uv__close(loop->async_wfd);
268     loop->async_wfd = -1;
269   }
270 
271   uv__io_stop(loop, &loop->async_io_watcher, POLLIN);
272   uv__close(loop->async_io_watcher.fd);
273   loop->async_io_watcher.fd = -1;
274 }
275 
276 
uv__async_fork(uv_loop_t * loop)277 int uv__async_fork(uv_loop_t* loop) {
278   struct uv__queue queue;
279   struct uv__queue* q;
280   uv_async_t* h;
281 
282   if (loop->async_io_watcher.fd == -1) /* never started */
283     return 0;
284 
285   uv__queue_move(&loop->async_handles, &queue);
286   while (!uv__queue_empty(&queue)) {
287     q = uv__queue_head(&queue);
288     h = uv__queue_data(q, uv_async_t, queue);
289 
290     uv__queue_remove(q);
291     uv__queue_insert_tail(&loop->async_handles, q);
292 
293     /* The state of any thread that set pending is now likely corrupt in this
294      * child because the user called fork, so just clear these flags and move
295      * on. Calling most libc functions after `fork` is declared to be undefined
296      * behavior anyways, unless async-signal-safe, for multithreaded programs
297      * like libuv, and nothing interesting in pthreads is async-signal-safe.
298      */
299     h->pending = 0;
300     /* This is the busy flag, and we just abruptly lost all other threads. */
301     h->u.fd = 0;
302   }
303 
304   /* Recreate these, since they still exist, but belong to the wrong pid now. */
305   if (loop->async_wfd != -1) {
306     if (loop->async_wfd != loop->async_io_watcher.fd)
307       uv__close(loop->async_wfd);
308     loop->async_wfd = -1;
309   }
310 
311   uv__io_stop(loop, &loop->async_io_watcher, POLLIN);
312   uv__close(loop->async_io_watcher.fd);
313   loop->async_io_watcher.fd = -1;
314 
315   return uv__async_start(loop);
316 }
317 
318 
uv__cpu_relax(void)319 static void uv__cpu_relax(void) {
320 #if defined(__i386__) || defined(__x86_64__)
321   __asm__ __volatile__ ("rep; nop" ::: "memory");  /* a.k.a. PAUSE */
322 #elif (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__)
323   __asm__ __volatile__ ("yield" ::: "memory");
324 #elif (defined(__ppc__) || defined(__ppc64__)) && defined(__APPLE__)
325   __asm volatile ("" : : : "memory");
326 #elif !defined(__APPLE__) && (defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__))
327   __asm__ __volatile__ ("or 1,1,1; or 2,2,2" ::: "memory");
328 #endif
329 }
330