xref: /libuv/src/unix/posix-poll.c (revision 1b01b786)
1 /* Copyright libuv project contributors. All rights reserved.
2  *
3  * Permission is hereby granted, free of charge, to any person obtaining a copy
4  * of this software and associated documentation files (the "Software"), to
5  * deal in the Software without restriction, including without limitation the
6  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7  * sell copies of the Software, and to permit persons to whom the Software is
8  * furnished to do so, subject to the following conditions:
9  *
10  * The above copyright notice and this permission notice shall be included in
11  * all copies or substantial portions of the Software.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19  * IN THE SOFTWARE.
20  */
21 
22 #include "uv.h"
23 #include "internal.h"
24 
25 /* POSIX defines poll() as a portable way to wait on file descriptors.
26  * Here we maintain a dynamically sized array of file descriptors and
27  * events to pass as the first argument to poll().
28  */
29 
30 #include <assert.h>
31 #include <stddef.h>
32 #include <stdint.h>
33 #include <errno.h>
34 #include <unistd.h>
35 
uv__platform_loop_init(uv_loop_t * loop)36 int uv__platform_loop_init(uv_loop_t* loop) {
37   loop->poll_fds = NULL;
38   loop->poll_fds_used = 0;
39   loop->poll_fds_size = 0;
40   loop->poll_fds_iterating = 0;
41   return 0;
42 }
43 
uv__platform_loop_delete(uv_loop_t * loop)44 void uv__platform_loop_delete(uv_loop_t* loop) {
45   uv__free(loop->poll_fds);
46   loop->poll_fds = NULL;
47 }
48 
uv__io_fork(uv_loop_t * loop)49 int uv__io_fork(uv_loop_t* loop) {
50   uv__platform_loop_delete(loop);
51   return uv__platform_loop_init(loop);
52 }
53 
54 /* Allocate or dynamically resize our poll fds array.  */
uv__pollfds_maybe_resize(uv_loop_t * loop)55 static void uv__pollfds_maybe_resize(uv_loop_t* loop) {
56   size_t i;
57   size_t n;
58   struct pollfd* p;
59 
60   if (loop->poll_fds_used < loop->poll_fds_size)
61     return;
62 
63   n = loop->poll_fds_size ? loop->poll_fds_size * 2 : 64;
64   p = uv__reallocf(loop->poll_fds, n * sizeof(*loop->poll_fds));
65   if (p == NULL)
66     abort();
67 
68   loop->poll_fds = p;
69   for (i = loop->poll_fds_size; i < n; i++) {
70     loop->poll_fds[i].fd = -1;
71     loop->poll_fds[i].events = 0;
72     loop->poll_fds[i].revents = 0;
73   }
74   loop->poll_fds_size = n;
75 }
76 
77 /* Primitive swap operation on poll fds array elements.  */
uv__pollfds_swap(uv_loop_t * loop,size_t l,size_t r)78 static void uv__pollfds_swap(uv_loop_t* loop, size_t l, size_t r) {
79   struct pollfd pfd;
80   pfd = loop->poll_fds[l];
81   loop->poll_fds[l] = loop->poll_fds[r];
82   loop->poll_fds[r] = pfd;
83 }
84 
85 /* Add a watcher's fd to our poll fds array with its pending events.  */
uv__pollfds_add(uv_loop_t * loop,uv__io_t * w)86 static void uv__pollfds_add(uv_loop_t* loop, uv__io_t* w) {
87   size_t i;
88   struct pollfd* pe;
89 
90   /* If the fd is already in the set just update its events.  */
91   assert(!loop->poll_fds_iterating);
92   for (i = 0; i < loop->poll_fds_used; ++i) {
93     if (loop->poll_fds[i].fd == w->fd) {
94       loop->poll_fds[i].events = w->pevents;
95       return;
96     }
97   }
98 
99   /* Otherwise, allocate a new slot in the set for the fd.  */
100   uv__pollfds_maybe_resize(loop);
101   pe = &loop->poll_fds[loop->poll_fds_used++];
102   pe->fd = w->fd;
103   pe->events = w->pevents;
104 }
105 
106 /* Remove a watcher's fd from our poll fds array.  */
uv__pollfds_del(uv_loop_t * loop,int fd)107 static void uv__pollfds_del(uv_loop_t* loop, int fd) {
108   size_t i;
109   assert(!loop->poll_fds_iterating);
110   for (i = 0; i < loop->poll_fds_used;) {
111     if (loop->poll_fds[i].fd == fd) {
112       /* swap to last position and remove */
113       --loop->poll_fds_used;
114       uv__pollfds_swap(loop, i, loop->poll_fds_used);
115       loop->poll_fds[loop->poll_fds_used].fd = -1;
116       loop->poll_fds[loop->poll_fds_used].events = 0;
117       loop->poll_fds[loop->poll_fds_used].revents = 0;
118       /* This method is called with an fd of -1 to purge the invalidated fds,
119        * so we may possibly have multiples to remove.
120        */
121       if (-1 != fd)
122         return;
123     } else {
124       /* We must only increment the loop counter when the fds do not match.
125        * Otherwise, when we are purging an invalidated fd, the value just
126        * swapped here from the previous end of the array will be skipped.
127        */
128        ++i;
129     }
130   }
131 }
132 
133 
uv__io_poll(uv_loop_t * loop,int timeout)134 void uv__io_poll(uv_loop_t* loop, int timeout) {
135   uv__loop_internal_fields_t* lfields;
136   sigset_t* pset;
137   sigset_t set;
138   uint64_t time_base;
139   uint64_t time_diff;
140   struct uv__queue* q;
141   uv__io_t* w;
142   size_t i;
143   unsigned int nevents;
144   int nfds;
145   int have_signals;
146   struct pollfd* pe;
147   int fd;
148   int user_timeout;
149   int reset_timeout;
150 
151   if (loop->nfds == 0) {
152     assert(uv__queue_empty(&loop->watcher_queue));
153     return;
154   }
155 
156   lfields = uv__get_internal_fields(loop);
157 
158   /* Take queued watchers and add their fds to our poll fds array.  */
159   while (!uv__queue_empty(&loop->watcher_queue)) {
160     q = uv__queue_head(&loop->watcher_queue);
161     uv__queue_remove(q);
162     uv__queue_init(q);
163 
164     w = uv__queue_data(q, uv__io_t, watcher_queue);
165     assert(w->pevents != 0);
166     assert(w->fd >= 0);
167     assert(w->fd < (int) loop->nwatchers);
168 
169     uv__pollfds_add(loop, w);
170 
171     w->events = w->pevents;
172   }
173 
174   /* Prepare a set of signals to block around poll(), if any.  */
175   pset = NULL;
176   if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
177     pset = &set;
178     sigemptyset(pset);
179     sigaddset(pset, SIGPROF);
180   }
181 
182   assert(timeout >= -1);
183   time_base = loop->time;
184 
185   if (lfields->flags & UV_METRICS_IDLE_TIME) {
186     reset_timeout = 1;
187     user_timeout = timeout;
188     timeout = 0;
189   } else {
190     reset_timeout = 0;
191   }
192 
193   /* Loop calls to poll() and processing of results.  If we get some
194    * results from poll() but they turn out not to be interesting to
195    * our caller then we need to loop around and poll() again.
196    */
197   for (;;) {
198     /* Only need to set the provider_entry_time if timeout != 0. The function
199      * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
200      */
201     if (timeout != 0)
202       uv__metrics_set_provider_entry_time(loop);
203 
204     /* Store the current timeout in a location that's globally accessible so
205      * other locations like uv__work_done() can determine whether the queue
206      * of events in the callback were waiting when poll was called.
207      */
208     lfields->current_timeout = timeout;
209 
210     if (pset != NULL)
211       if (pthread_sigmask(SIG_BLOCK, pset, NULL))
212         abort();
213     nfds = poll(loop->poll_fds, (nfds_t)loop->poll_fds_used, timeout);
214     if (pset != NULL)
215       if (pthread_sigmask(SIG_UNBLOCK, pset, NULL))
216         abort();
217 
218     /* Update loop->time unconditionally. It's tempting to skip the update when
219      * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
220      * operating system didn't reschedule our process while in the syscall.
221      */
222     SAVE_ERRNO(uv__update_time(loop));
223 
224     if (nfds == 0) {
225       if (reset_timeout != 0) {
226         timeout = user_timeout;
227         reset_timeout = 0;
228         if (timeout == -1)
229           continue;
230         if (timeout > 0)
231           goto update_timeout;
232       }
233 
234       assert(timeout != -1);
235       return;
236     }
237 
238     if (nfds == -1) {
239       if (errno != EINTR)
240         abort();
241 
242       if (reset_timeout != 0) {
243         timeout = user_timeout;
244         reset_timeout = 0;
245       }
246 
247       if (timeout == -1)
248         continue;
249 
250       if (timeout == 0)
251         return;
252 
253       /* Interrupted by a signal. Update timeout and poll again. */
254       goto update_timeout;
255     }
256 
257     /* Tell uv__platform_invalidate_fd not to manipulate our array
258      * while we are iterating over it.
259      */
260     loop->poll_fds_iterating = 1;
261 
262     /* Initialize a count of events that we care about.  */
263     nevents = 0;
264     have_signals = 0;
265 
266     /* Loop over the entire poll fds array looking for returned events.  */
267     for (i = 0; i < loop->poll_fds_used; i++) {
268       pe = loop->poll_fds + i;
269       fd = pe->fd;
270 
271       /* Skip invalidated events, see uv__platform_invalidate_fd.  */
272       if (fd == -1)
273         continue;
274 
275       assert(fd >= 0);
276       assert((unsigned) fd < loop->nwatchers);
277 
278       w = loop->watchers[fd];
279 
280       if (w == NULL) {
281         /* File descriptor that we've stopped watching, ignore.  */
282         uv__platform_invalidate_fd(loop, fd);
283         continue;
284       }
285 
286       /* Filter out events that user has not requested us to watch
287        * (e.g. POLLNVAL).
288        */
289       pe->revents &= w->pevents | POLLERR | POLLHUP;
290 
291       if (pe->revents != 0) {
292         /* Run signal watchers last.  */
293         if (w == &loop->signal_io_watcher) {
294           have_signals = 1;
295         } else {
296           uv__metrics_update_idle_time(loop);
297           w->cb(loop, w, pe->revents);
298         }
299 
300         nevents++;
301       }
302     }
303 
304     uv__metrics_inc_events(loop, nevents);
305     if (reset_timeout != 0) {
306       timeout = user_timeout;
307       reset_timeout = 0;
308       uv__metrics_inc_events_waiting(loop, nevents);
309     }
310 
311     if (have_signals != 0) {
312       uv__metrics_update_idle_time(loop);
313       loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
314     }
315 
316     loop->poll_fds_iterating = 0;
317 
318     /* Purge invalidated fds from our poll fds array.  */
319     uv__pollfds_del(loop, -1);
320 
321     if (have_signals != 0)
322       return;  /* Event loop should cycle now so don't poll again. */
323 
324     if (nevents != 0)
325       return;
326 
327     if (timeout == 0)
328       return;
329 
330     if (timeout == -1)
331       continue;
332 
333 update_timeout:
334     assert(timeout > 0);
335 
336     time_diff = loop->time - time_base;
337     if (time_diff >= (uint64_t) timeout)
338       return;
339 
340     timeout -= time_diff;
341   }
342 }
343 
344 /* Remove the given fd from our poll fds array because no one
345  * is interested in its events anymore.
346  */
uv__platform_invalidate_fd(uv_loop_t * loop,int fd)347 void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
348   size_t i;
349 
350   assert(fd >= 0);
351 
352   if (loop->poll_fds_iterating) {
353     /* uv__io_poll is currently iterating.  Just invalidate fd.  */
354     for (i = 0; i < loop->poll_fds_used; i++)
355       if (loop->poll_fds[i].fd == fd) {
356         loop->poll_fds[i].fd = -1;
357         loop->poll_fds[i].events = 0;
358         loop->poll_fds[i].revents = 0;
359       }
360   } else {
361     /* uv__io_poll is not iterating.  Delete fd from the set.  */
362     uv__pollfds_del(loop, fd);
363   }
364 }
365 
366 /* Check whether the given fd is supported by poll().  */
uv__io_check_fd(uv_loop_t * loop,int fd)367 int uv__io_check_fd(uv_loop_t* loop, int fd) {
368   struct pollfd p[1];
369   int rv;
370 
371   p[0].fd = fd;
372   p[0].events = POLLIN;
373 
374   do
375     rv = poll(p, 1, 0);
376   while (rv == -1 && (errno == EINTR || errno == EAGAIN));
377 
378   if (rv == -1)
379     return UV__ERR(errno);
380 
381   if (p[0].revents & POLLNVAL)
382     return UV_EINVAL;
383 
384   return 0;
385 }
386