xref: /libuv/src/unix/async.c (revision 27134547)
1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2  * Permission is hereby granted, free of charge, to any person obtaining a copy
3  * of this software and associated documentation files (the "Software"), to
4  * deal in the Software without restriction, including without limitation the
5  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6  * sell copies of the Software, and to permit persons to whom the Software is
7  * furnished to do so, subject to the following conditions:
8  *
9  * The above copyright notice and this permission notice shall be included in
10  * all copies or substantial portions of the Software.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
18  * IN THE SOFTWARE.
19  */
20 
21 /* This file contains both the uv__async internal infrastructure and the
22  * user-facing uv_async_t functions.
23  */
24 
25 #include "uv.h"
26 #include "internal.h"
27 
28 #include <errno.h>
29 #include <stdatomic.h>
30 #include <stdio.h>  /* snprintf() */
31 #include <assert.h>
32 #include <stdlib.h>
33 #include <string.h>
34 #include <unistd.h>
35 #include <sched.h>  /* sched_yield() */
36 
37 #ifdef __linux__
38 #include <sys/eventfd.h>
39 #endif
40 
41 #if UV__KQUEUE_EVFILT_USER
42 static uv_once_t kqueue_runtime_detection_guard = UV_ONCE_INIT;
43 static int kqueue_evfilt_user_support = 1;
44 
45 
uv__kqueue_runtime_detection(void)46 static void uv__kqueue_runtime_detection(void) {
47   int kq;
48   struct kevent ev[2];
49   struct timespec timeout = {0, 0};
50 
51   /* Perform the runtime detection to ensure that kqueue with
52    * EVFILT_USER actually works. */
53   kq = kqueue();
54   EV_SET(ev, UV__KQUEUE_EVFILT_USER_IDENT, EVFILT_USER,
55          EV_ADD | EV_CLEAR, 0, 0, 0);
56   EV_SET(ev + 1, UV__KQUEUE_EVFILT_USER_IDENT, EVFILT_USER,
57          0, NOTE_TRIGGER, 0, 0);
58   if (kevent(kq, ev, 2, ev, 1, &timeout) < 1 ||
59       ev[0].filter != EVFILT_USER ||
60       ev[0].ident != UV__KQUEUE_EVFILT_USER_IDENT ||
61       ev[0].flags & EV_ERROR)
62     /* If we wind up here, we can assume that EVFILT_USER is defined but
63      * broken on the current system. */
64     kqueue_evfilt_user_support = 0;
65   uv__close(kq);
66 }
67 #endif
68 
69 static void uv__async_send(uv_loop_t* loop);
70 static int uv__async_start(uv_loop_t* loop);
71 static void uv__cpu_relax(void);
72 
73 
uv_async_init(uv_loop_t * loop,uv_async_t * handle,uv_async_cb async_cb)74 int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) {
75   int err;
76 
77   err = uv__async_start(loop);
78   if (err)
79     return err;
80 
81   uv__handle_init(loop, (uv_handle_t*)handle, UV_ASYNC);
82   handle->async_cb = async_cb;
83   handle->pending = 0;
84   handle->u.fd = 0; /* This will be used as a busy flag. */
85 
86   uv__queue_insert_tail(&loop->async_handles, &handle->queue);
87   uv__handle_start(handle);
88 
89   return 0;
90 }
91 
92 
uv_async_send(uv_async_t * handle)93 int uv_async_send(uv_async_t* handle) {
94   _Atomic int* pending;
95   _Atomic int* busy;
96 
97   pending = (_Atomic int*) &handle->pending;
98   busy = (_Atomic int*) &handle->u.fd;
99 
100   /* Do a cheap read first. */
101   if (atomic_load_explicit(pending, memory_order_relaxed) != 0)
102     return 0;
103 
104   /* Set the loop to busy. */
105   atomic_fetch_add(busy, 1);
106 
107   /* Wake up the other thread's event loop. */
108   if (atomic_exchange(pending, 1) == 0)
109     uv__async_send(handle->loop);
110 
111   /* Set the loop to not-busy. */
112   atomic_fetch_add(busy, -1);
113 
114   return 0;
115 }
116 
117 
118 /* Wait for the busy flag to clear before closing.
119  * Only call this from the event loop thread. */
uv__async_spin(uv_async_t * handle)120 static void uv__async_spin(uv_async_t* handle) {
121   _Atomic int* pending;
122   _Atomic int* busy;
123   int i;
124 
125   pending = (_Atomic int*) &handle->pending;
126   busy = (_Atomic int*) &handle->u.fd;
127 
128   /* Set the pending flag first, so no new events will be added by other
129    * threads after this function returns. */
130   atomic_store(pending, 1);
131 
132   for (;;) {
133     /* 997 is not completely chosen at random. It's a prime number, acyclic by
134      * nature, and should therefore hopefully dampen sympathetic resonance.
135      */
136     for (i = 0; i < 997; i++) {
137       if (atomic_load(busy) == 0)
138         return;
139 
140       /* Other thread is busy with this handle, spin until it's done. */
141       uv__cpu_relax();
142     }
143 
144     /* Yield the CPU. We may have preempted the other thread while it's
145      * inside the critical section and if it's running on the same CPU
146      * as us, we'll just burn CPU cycles until the end of our time slice.
147      */
148     sched_yield();
149   }
150 }
151 
152 
uv__async_close(uv_async_t * handle)153 void uv__async_close(uv_async_t* handle) {
154   uv__async_spin(handle);
155   uv__queue_remove(&handle->queue);
156   uv__handle_stop(handle);
157 }
158 
159 
uv__async_io(uv_loop_t * loop,uv__io_t * w,unsigned int events)160 static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
161 #ifndef __linux__
162   char buf[1024];
163   ssize_t r;
164 #endif
165   struct uv__queue queue;
166   struct uv__queue* q;
167   uv_async_t* h;
168   _Atomic int *pending;
169 
170   assert(w == &loop->async_io_watcher);
171 
172 #ifndef __linux__
173 #if UV__KQUEUE_EVFILT_USER
174   for (;!kqueue_evfilt_user_support;) {
175 #else
176   for (;;) {
177 #endif
178     r = read(w->fd, buf, sizeof(buf));
179 
180     if (r == sizeof(buf))
181       continue;
182 
183     if (r != -1)
184       break;
185 
186     if (errno == EAGAIN || errno == EWOULDBLOCK)
187       break;
188 
189     if (errno == EINTR)
190       continue;
191 
192     abort();
193   }
194 #endif /* !__linux__ */
195 
196   uv__queue_move(&loop->async_handles, &queue);
197   while (!uv__queue_empty(&queue)) {
198     q = uv__queue_head(&queue);
199     h = uv__queue_data(q, uv_async_t, queue);
200 
201     uv__queue_remove(q);
202     uv__queue_insert_tail(&loop->async_handles, q);
203 
204     /* Atomically fetch and clear pending flag */
205     pending = (_Atomic int*) &h->pending;
206     if (atomic_exchange(pending, 0) == 0)
207       continue;
208 
209     if (h->async_cb == NULL)
210       continue;
211 
212     h->async_cb(h);
213   }
214 }
215 
216 
217 static void uv__async_send(uv_loop_t* loop) {
218   int fd;
219   ssize_t r;
220 #ifdef __linux__
221   uint64_t val;
222 
223   fd = loop->async_io_watcher.fd;  /* eventfd */
224   for (val = 1; /* empty */; val = 1) {
225     r = write(fd, &val, sizeof(uint64_t));
226     if (r < 0) {
227       /* When EAGAIN occurs, the eventfd counter hits the maximum value of the unsigned 64-bit.
228        * We need to first drain the eventfd and then write again.
229        *
230        * Check out https://man7.org/linux/man-pages/man2/eventfd.2.html for details.
231        */
232       if (errno == EAGAIN) {
233         /* It's ready to retry. */
234         if (read(fd, &val, sizeof(uint64_t)) > 0 || errno == EAGAIN) {
235           continue;
236         }
237       }
238       /* Unknown error occurs. */
239       break;
240     }
241     return;
242   }
243 #else
244 #if UV__KQUEUE_EVFILT_USER
245   struct kevent ev;
246 
247   if (kqueue_evfilt_user_support) {
248     fd = loop->async_io_watcher.fd; /* magic number for EVFILT_USER */
249     EV_SET(&ev, fd, EVFILT_USER, 0, NOTE_TRIGGER, 0, 0);
250     r = kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL);
251     if (r == 0)
252       return;
253     else
254       abort();
255   }
256 #endif
257 
258   fd = loop->async_wfd; /* write end of the pipe */
259   do
260     r = write(fd, "x", 1);
261   while (r == -1 && errno == EINTR);
262 
263   if (r == 1)
264     return;
265 
266   if (r == -1)
267     if (errno == EAGAIN || errno == EWOULDBLOCK)
268       return;
269 #endif
270 
271   abort();
272 }
273 
274 
275 static int uv__async_start(uv_loop_t* loop) {
276   int pipefd[2];
277   int err;
278 #if UV__KQUEUE_EVFILT_USER
279   struct kevent ev;
280 #endif
281 
282   if (loop->async_io_watcher.fd != -1)
283     return 0;
284 
285 #ifdef __linux__
286   err = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
287   if (err < 0)
288     return UV__ERR(errno);
289 
290   pipefd[0] = err;
291   pipefd[1] = -1;
292 #elif UV__KQUEUE_EVFILT_USER
293   uv_once(&kqueue_runtime_detection_guard, uv__kqueue_runtime_detection);
294   if (kqueue_evfilt_user_support) {
295     /* In order not to break the generic pattern of I/O polling, a valid
296      * file descriptor is required to take up a room in loop->watchers,
297      * thus we create one for that, but this fd will not be actually used,
298      * it's just a placeholder and magic number which is going to be closed
299      * during the cleanup, as other FDs. */
300     err = uv__open_cloexec("/dev/null", O_RDONLY);
301     if (err < 0)
302       return err;
303 
304     pipefd[0] = err;
305     pipefd[1] = -1;
306 
307     /* When using EVFILT_USER event to wake up the kqueue, this event must be
308      * registered beforehand. Otherwise, calling kevent() to issue an
309      * unregistered EVFILT_USER event will get an ENOENT.
310      * Since uv__async_send() may happen before uv__io_poll() with multi-threads,
311      * we can't defer this registration of EVFILT_USER event as we did for other
312      * events, but must perform it right away. */
313     EV_SET(&ev, err, EVFILT_USER, EV_ADD | EV_CLEAR, 0, 0, 0);
314     err = kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL);
315     if (err < 0)
316       return UV__ERR(errno);
317   } else {
318     err = uv__make_pipe(pipefd, UV_NONBLOCK_PIPE);
319     if (err < 0)
320       return err;
321   }
322 #else
323   err = uv__make_pipe(pipefd, UV_NONBLOCK_PIPE);
324   if (err < 0)
325     return err;
326 #endif
327 
328   uv__io_init(&loop->async_io_watcher, uv__async_io, pipefd[0]);
329   uv__io_start(loop, &loop->async_io_watcher, POLLIN);
330   loop->async_wfd = pipefd[1];
331 
332 #if UV__KQUEUE_EVFILT_USER
333   /* Prevent the EVFILT_USER event from being added to kqueue redundantly
334    * and mistakenly later in uv__io_poll(). */
335   if (kqueue_evfilt_user_support)
336     loop->async_io_watcher.events = loop->async_io_watcher.pevents;
337 #endif
338 
339   return 0;
340 }
341 
342 
343 void uv__async_stop(uv_loop_t* loop) {
344   struct uv__queue queue;
345   struct uv__queue* q;
346   uv_async_t* h;
347 
348   if (loop->async_io_watcher.fd == -1)
349     return;
350 
351   /* Make sure no other thread is accessing the async handle fd after the loop
352    * cleanup.
353    */
354   uv__queue_move(&loop->async_handles, &queue);
355   while (!uv__queue_empty(&queue)) {
356     q = uv__queue_head(&queue);
357     h = uv__queue_data(q, uv_async_t, queue);
358 
359     uv__queue_remove(q);
360     uv__queue_insert_tail(&loop->async_handles, q);
361 
362     uv__async_spin(h);
363   }
364 
365   if (loop->async_wfd != -1) {
366     if (loop->async_wfd != loop->async_io_watcher.fd)
367       uv__close(loop->async_wfd);
368     loop->async_wfd = -1;
369   }
370 
371   uv__io_stop(loop, &loop->async_io_watcher, POLLIN);
372   uv__close(loop->async_io_watcher.fd);
373   loop->async_io_watcher.fd = -1;
374 }
375 
376 
377 int uv__async_fork(uv_loop_t* loop) {
378   struct uv__queue queue;
379   struct uv__queue* q;
380   uv_async_t* h;
381 
382   if (loop->async_io_watcher.fd == -1) /* never started */
383     return 0;
384 
385   uv__queue_move(&loop->async_handles, &queue);
386   while (!uv__queue_empty(&queue)) {
387     q = uv__queue_head(&queue);
388     h = uv__queue_data(q, uv_async_t, queue);
389 
390     uv__queue_remove(q);
391     uv__queue_insert_tail(&loop->async_handles, q);
392 
393     /* The state of any thread that set pending is now likely corrupt in this
394      * child because the user called fork, so just clear these flags and move
395      * on. Calling most libc functions after `fork` is declared to be undefined
396      * behavior anyways, unless async-signal-safe, for multithreaded programs
397      * like libuv, and nothing interesting in pthreads is async-signal-safe.
398      */
399     h->pending = 0;
400     /* This is the busy flag, and we just abruptly lost all other threads. */
401     h->u.fd = 0;
402   }
403 
404   /* Recreate these, since they still exist, but belong to the wrong pid now. */
405   if (loop->async_wfd != -1) {
406     if (loop->async_wfd != loop->async_io_watcher.fd)
407       uv__close(loop->async_wfd);
408     loop->async_wfd = -1;
409   }
410 
411   uv__io_stop(loop, &loop->async_io_watcher, POLLIN);
412   uv__close(loop->async_io_watcher.fd);
413   loop->async_io_watcher.fd = -1;
414 
415   return uv__async_start(loop);
416 }
417 
418 
419 static void uv__cpu_relax(void) {
420 #if defined(__i386__) || defined(__x86_64__)
421   __asm__ __volatile__ ("rep; nop" ::: "memory");  /* a.k.a. PAUSE */
422 #elif (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__)
423   __asm__ __volatile__ ("yield" ::: "memory");
424 #elif (defined(__ppc__) || defined(__ppc64__)) && defined(__APPLE__)
425   __asm volatile ("" : : : "memory");
426 #elif !defined(__APPLE__) && (defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__))
427   __asm__ __volatile__ ("or 1,1,1; or 2,2,2" ::: "memory");
428 #endif
429 }
430