1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 * Permission is hereby granted, free of charge, to any person obtaining a copy
3 * of this software and associated documentation files (the "Software"), to
4 * deal in the Software without restriction, including without limitation the
5 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6 * sell copies of the Software, and to permit persons to whom the Software is
7 * furnished to do so, subject to the following conditions:
8 *
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
18 * IN THE SOFTWARE.
19 */
20
21 #include "uv.h"
22 #include "internal.h"
23 #include "strtok.h"
24
25 #include <stddef.h> /* NULL */
26 #include <stdio.h> /* printf */
27 #include <stdlib.h>
28 #include <string.h> /* strerror */
29 #include <errno.h>
30 #include <assert.h>
31 #include <unistd.h>
32 #include <sys/types.h>
33 #include <sys/stat.h>
34 #include <fcntl.h> /* O_CLOEXEC */
35 #include <sys/ioctl.h>
36 #include <sys/socket.h>
37 #include <sys/un.h>
38 #include <netinet/in.h>
39 #include <arpa/inet.h>
40 #include <limits.h> /* INT_MAX, PATH_MAX, IOV_MAX */
41 #include <sys/uio.h> /* writev */
42 #include <sys/resource.h> /* getrusage */
43 #include <pwd.h>
44 #include <grp.h>
45 #include <sys/utsname.h>
46 #include <sys/time.h>
47 #include <time.h> /* clock_gettime */
48
49 #ifdef __sun
50 # include <sys/filio.h>
51 # include <sys/wait.h>
52 #endif
53
54 #if defined(__APPLE__)
55 # include <sys/filio.h>
56 # include <sys/sysctl.h>
57 #endif /* defined(__APPLE__) */
58
59
60 #if defined(__APPLE__) && !TARGET_OS_IPHONE
61 # include <crt_externs.h>
62 # include <mach-o/dyld.h> /* _NSGetExecutablePath */
63 # define environ (*_NSGetEnviron())
64 #else /* defined(__APPLE__) && !TARGET_OS_IPHONE */
65 extern char** environ;
66 #endif /* !(defined(__APPLE__) && !TARGET_OS_IPHONE) */
67
68
69 #if defined(__DragonFly__) || \
70 defined(__FreeBSD__) || \
71 defined(__NetBSD__) || \
72 defined(__OpenBSD__)
73 # include <sys/sysctl.h>
74 # include <sys/filio.h>
75 # include <sys/wait.h>
76 # include <sys/param.h>
77 # if defined(__FreeBSD__)
78 # include <sys/cpuset.h>
79 # define uv__accept4 accept4
80 # endif
81 # if defined(__NetBSD__)
82 # define uv__accept4(a, b, c, d) paccept((a), (b), (c), NULL, (d))
83 # endif
84 #endif
85
86 #if defined(__MVS__)
87 # include <sys/ioctl.h>
88 # include "zos-sys-info.h"
89 #endif
90
91 #if defined(__linux__)
92 # include <sched.h>
93 # include <sys/syscall.h>
94 # define gettid() syscall(SYS_gettid)
95 # define uv__accept4 accept4
96 #endif
97
98 #if defined(__FreeBSD__)
99 # include <sys/param.h>
100 # include <sys/cpuset.h>
101 #endif
102
103 #if defined(__NetBSD__)
104 # include <sched.h>
105 #endif
106
107 #if defined(__linux__) && defined(__SANITIZE_THREAD__) && defined(__clang__)
108 # include <sanitizer/linux_syscall_hooks.h>
109 #endif
110
111 static void uv__run_pending(uv_loop_t* loop);
112
113 /* Verify that uv_buf_t is ABI-compatible with struct iovec. */
114 STATIC_ASSERT(sizeof(uv_buf_t) == sizeof(struct iovec));
115 STATIC_ASSERT(sizeof(((uv_buf_t*) 0)->base) ==
116 sizeof(((struct iovec*) 0)->iov_base));
117 STATIC_ASSERT(sizeof(((uv_buf_t*) 0)->len) ==
118 sizeof(((struct iovec*) 0)->iov_len));
119 STATIC_ASSERT(offsetof(uv_buf_t, base) == offsetof(struct iovec, iov_base));
120 STATIC_ASSERT(offsetof(uv_buf_t, len) == offsetof(struct iovec, iov_len));
121
122
123 /* https://github.com/libuv/libuv/issues/1674 */
uv_clock_gettime(uv_clock_id clock_id,uv_timespec64_t * ts)124 int uv_clock_gettime(uv_clock_id clock_id, uv_timespec64_t* ts) {
125 struct timespec t;
126 int r;
127
128 if (ts == NULL)
129 return UV_EFAULT;
130
131 switch (clock_id) {
132 default:
133 return UV_EINVAL;
134 case UV_CLOCK_MONOTONIC:
135 r = clock_gettime(CLOCK_MONOTONIC, &t);
136 break;
137 case UV_CLOCK_REALTIME:
138 r = clock_gettime(CLOCK_REALTIME, &t);
139 break;
140 }
141
142 if (r)
143 return UV__ERR(errno);
144
145 ts->tv_sec = t.tv_sec;
146 ts->tv_nsec = t.tv_nsec;
147
148 return 0;
149 }
150
151
uv_hrtime(void)152 uint64_t uv_hrtime(void) {
153 return uv__hrtime(UV_CLOCK_PRECISE);
154 }
155
156
uv_close(uv_handle_t * handle,uv_close_cb close_cb)157 void uv_close(uv_handle_t* handle, uv_close_cb close_cb) {
158 assert(!uv__is_closing(handle));
159
160 handle->flags |= UV_HANDLE_CLOSING;
161 handle->close_cb = close_cb;
162
163 switch (handle->type) {
164 case UV_NAMED_PIPE:
165 uv__pipe_close((uv_pipe_t*)handle);
166 break;
167
168 case UV_TTY:
169 uv__tty_close((uv_tty_t*)handle);
170 break;
171
172 case UV_TCP:
173 uv__tcp_close((uv_tcp_t*)handle);
174 break;
175
176 case UV_UDP:
177 uv__udp_close((uv_udp_t*)handle);
178 break;
179
180 case UV_PREPARE:
181 uv__prepare_close((uv_prepare_t*)handle);
182 break;
183
184 case UV_CHECK:
185 uv__check_close((uv_check_t*)handle);
186 break;
187
188 case UV_IDLE:
189 uv__idle_close((uv_idle_t*)handle);
190 break;
191
192 case UV_ASYNC:
193 uv__async_close((uv_async_t*)handle);
194 break;
195
196 case UV_TIMER:
197 uv__timer_close((uv_timer_t*)handle);
198 break;
199
200 case UV_PROCESS:
201 uv__process_close((uv_process_t*)handle);
202 break;
203
204 case UV_FS_EVENT:
205 uv__fs_event_close((uv_fs_event_t*)handle);
206 #if defined(__sun) || defined(__MVS__)
207 /*
208 * On Solaris, illumos, and z/OS we will not be able to dissociate the
209 * watcher for an event which is pending delivery, so we cannot always call
210 * uv__make_close_pending() straight away. The backend will call the
211 * function once the event has cleared.
212 */
213 return;
214 #endif
215 break;
216
217 case UV_POLL:
218 uv__poll_close((uv_poll_t*)handle);
219 break;
220
221 case UV_FS_POLL:
222 uv__fs_poll_close((uv_fs_poll_t*)handle);
223 /* Poll handles use file system requests, and one of them may still be
224 * running. The poll code will call uv__make_close_pending() for us. */
225 return;
226
227 case UV_SIGNAL:
228 uv__signal_close((uv_signal_t*) handle);
229 break;
230
231 default:
232 assert(0);
233 }
234
235 uv__make_close_pending(handle);
236 }
237
uv__socket_sockopt(uv_handle_t * handle,int optname,int * value)238 int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value) {
239 int r;
240 int fd;
241 socklen_t len;
242
243 if (handle == NULL || value == NULL)
244 return UV_EINVAL;
245
246 if (handle->type == UV_TCP || handle->type == UV_NAMED_PIPE)
247 fd = uv__stream_fd((uv_stream_t*) handle);
248 else if (handle->type == UV_UDP)
249 fd = ((uv_udp_t *) handle)->io_watcher.fd;
250 else
251 return UV_ENOTSUP;
252
253 len = sizeof(*value);
254
255 if (*value == 0)
256 r = getsockopt(fd, SOL_SOCKET, optname, value, &len);
257 else
258 r = setsockopt(fd, SOL_SOCKET, optname, (const void*) value, len);
259
260 if (r < 0)
261 return UV__ERR(errno);
262
263 return 0;
264 }
265
uv__make_close_pending(uv_handle_t * handle)266 void uv__make_close_pending(uv_handle_t* handle) {
267 assert(handle->flags & UV_HANDLE_CLOSING);
268 assert(!(handle->flags & UV_HANDLE_CLOSED));
269 handle->next_closing = handle->loop->closing_handles;
270 handle->loop->closing_handles = handle;
271 }
272
uv__getiovmax(void)273 int uv__getiovmax(void) {
274 #if defined(IOV_MAX)
275 return IOV_MAX;
276 #elif defined(_SC_IOV_MAX)
277 static _Atomic int iovmax_cached = -1;
278 int iovmax;
279
280 iovmax = atomic_load_explicit(&iovmax_cached, memory_order_relaxed);
281 if (iovmax != -1)
282 return iovmax;
283
284 /* On some embedded devices (arm-linux-uclibc based ip camera),
285 * sysconf(_SC_IOV_MAX) can not get the correct value. The return
286 * value is -1 and the errno is EINPROGRESS. Degrade the value to 1.
287 */
288 iovmax = sysconf(_SC_IOV_MAX);
289 if (iovmax == -1)
290 iovmax = 1;
291
292 atomic_store_explicit(&iovmax_cached, iovmax, memory_order_relaxed);
293
294 return iovmax;
295 #else
296 return 1024;
297 #endif
298 }
299
300
uv__finish_close(uv_handle_t * handle)301 static void uv__finish_close(uv_handle_t* handle) {
302 uv_signal_t* sh;
303
304 /* Note: while the handle is in the UV_HANDLE_CLOSING state now, it's still
305 * possible for it to be active in the sense that uv__is_active() returns
306 * true.
307 *
308 * A good example is when the user calls uv_shutdown(), immediately followed
309 * by uv_close(). The handle is considered active at this point because the
310 * completion of the shutdown req is still pending.
311 */
312 assert(handle->flags & UV_HANDLE_CLOSING);
313 assert(!(handle->flags & UV_HANDLE_CLOSED));
314 handle->flags |= UV_HANDLE_CLOSED;
315
316 switch (handle->type) {
317 case UV_PREPARE:
318 case UV_CHECK:
319 case UV_IDLE:
320 case UV_ASYNC:
321 case UV_TIMER:
322 case UV_PROCESS:
323 case UV_FS_EVENT:
324 case UV_FS_POLL:
325 case UV_POLL:
326 break;
327
328 case UV_SIGNAL:
329 /* If there are any caught signals "trapped" in the signal pipe,
330 * we can't call the close callback yet. Reinserting the handle
331 * into the closing queue makes the event loop spin but that's
332 * okay because we only need to deliver the pending events.
333 */
334 sh = (uv_signal_t*) handle;
335 if (sh->caught_signals > sh->dispatched_signals) {
336 handle->flags ^= UV_HANDLE_CLOSED;
337 uv__make_close_pending(handle); /* Back into the queue. */
338 return;
339 }
340 break;
341
342 case UV_NAMED_PIPE:
343 case UV_TCP:
344 case UV_TTY:
345 uv__stream_destroy((uv_stream_t*)handle);
346 break;
347
348 case UV_UDP:
349 uv__udp_finish_close((uv_udp_t*)handle);
350 break;
351
352 default:
353 assert(0);
354 break;
355 }
356
357 uv__handle_unref(handle);
358 uv__queue_remove(&handle->handle_queue);
359
360 if (handle->close_cb) {
361 handle->close_cb(handle);
362 }
363 }
364
365
uv__run_closing_handles(uv_loop_t * loop)366 static void uv__run_closing_handles(uv_loop_t* loop) {
367 uv_handle_t* p;
368 uv_handle_t* q;
369
370 p = loop->closing_handles;
371 loop->closing_handles = NULL;
372
373 while (p) {
374 q = p->next_closing;
375 uv__finish_close(p);
376 p = q;
377 }
378 }
379
380
uv_is_closing(const uv_handle_t * handle)381 int uv_is_closing(const uv_handle_t* handle) {
382 return uv__is_closing(handle);
383 }
384
385
uv_backend_fd(const uv_loop_t * loop)386 int uv_backend_fd(const uv_loop_t* loop) {
387 return loop->backend_fd;
388 }
389
390
uv__loop_alive(const uv_loop_t * loop)391 static int uv__loop_alive(const uv_loop_t* loop) {
392 return uv__has_active_handles(loop) ||
393 uv__has_active_reqs(loop) ||
394 !uv__queue_empty(&loop->pending_queue) ||
395 loop->closing_handles != NULL;
396 }
397
398
uv__backend_timeout(const uv_loop_t * loop)399 static int uv__backend_timeout(const uv_loop_t* loop) {
400 if (loop->stop_flag == 0 &&
401 /* uv__loop_alive(loop) && */
402 (uv__has_active_handles(loop) || uv__has_active_reqs(loop)) &&
403 uv__queue_empty(&loop->pending_queue) &&
404 uv__queue_empty(&loop->idle_handles) &&
405 (loop->flags & UV_LOOP_REAP_CHILDREN) == 0 &&
406 loop->closing_handles == NULL)
407 return uv__next_timeout(loop);
408 return 0;
409 }
410
411
uv_backend_timeout(const uv_loop_t * loop)412 int uv_backend_timeout(const uv_loop_t* loop) {
413 if (uv__queue_empty(&loop->watcher_queue))
414 return uv__backend_timeout(loop);
415 /* Need to call uv_run to update the backend fd state. */
416 return 0;
417 }
418
419
uv_loop_alive(const uv_loop_t * loop)420 int uv_loop_alive(const uv_loop_t* loop) {
421 return uv__loop_alive(loop);
422 }
423
424
uv_run(uv_loop_t * loop,uv_run_mode mode)425 int uv_run(uv_loop_t* loop, uv_run_mode mode) {
426 int timeout;
427 int r;
428 int can_sleep;
429
430 r = uv__loop_alive(loop);
431 if (!r)
432 uv__update_time(loop);
433
434 /* Maintain backwards compatibility by processing timers before entering the
435 * while loop for UV_RUN_DEFAULT. Otherwise timers only need to be executed
436 * once, which should be done after polling in order to maintain proper
437 * execution order of the conceptual event loop. */
438 if (mode == UV_RUN_DEFAULT && r != 0 && loop->stop_flag == 0) {
439 uv__update_time(loop);
440 uv__run_timers(loop);
441 }
442
443 while (r != 0 && loop->stop_flag == 0) {
444 can_sleep =
445 uv__queue_empty(&loop->pending_queue) &&
446 uv__queue_empty(&loop->idle_handles);
447
448 uv__run_pending(loop);
449 uv__run_idle(loop);
450 uv__run_prepare(loop);
451
452 timeout = 0;
453 if ((mode == UV_RUN_ONCE && can_sleep) || mode == UV_RUN_DEFAULT)
454 timeout = uv__backend_timeout(loop);
455
456 uv__metrics_inc_loop_count(loop);
457
458 uv__io_poll(loop, timeout);
459
460 /* Process immediate callbacks (e.g. write_cb) a small fixed number of
461 * times to avoid loop starvation.*/
462 for (r = 0; r < 8 && !uv__queue_empty(&loop->pending_queue); r++)
463 uv__run_pending(loop);
464
465 /* Run one final update on the provider_idle_time in case uv__io_poll
466 * returned because the timeout expired, but no events were received. This
467 * call will be ignored if the provider_entry_time was either never set (if
468 * the timeout == 0) or was already updated b/c an event was received.
469 */
470 uv__metrics_update_idle_time(loop);
471
472 uv__run_check(loop);
473 uv__run_closing_handles(loop);
474
475 uv__update_time(loop);
476 uv__run_timers(loop);
477
478 r = uv__loop_alive(loop);
479 if (mode == UV_RUN_ONCE || mode == UV_RUN_NOWAIT)
480 break;
481 }
482
483 /* The if statement lets gcc compile it to a conditional store. Avoids
484 * dirtying a cache line.
485 */
486 if (loop->stop_flag != 0)
487 loop->stop_flag = 0;
488
489 return r;
490 }
491
492
uv_update_time(uv_loop_t * loop)493 void uv_update_time(uv_loop_t* loop) {
494 uv__update_time(loop);
495 }
496
497
uv_is_active(const uv_handle_t * handle)498 int uv_is_active(const uv_handle_t* handle) {
499 return uv__is_active(handle);
500 }
501
502
503 /* Open a socket in non-blocking close-on-exec mode, atomically if possible. */
uv__socket(int domain,int type,int protocol)504 int uv__socket(int domain, int type, int protocol) {
505 int sockfd;
506 int err;
507
508 #if defined(SOCK_NONBLOCK) && defined(SOCK_CLOEXEC)
509 sockfd = socket(domain, type | SOCK_NONBLOCK | SOCK_CLOEXEC, protocol);
510 if (sockfd != -1)
511 return sockfd;
512
513 if (errno != EINVAL)
514 return UV__ERR(errno);
515 #endif
516
517 sockfd = socket(domain, type, protocol);
518 if (sockfd == -1)
519 return UV__ERR(errno);
520
521 err = uv__nonblock(sockfd, 1);
522 if (err == 0)
523 err = uv__cloexec(sockfd, 1);
524
525 if (err) {
526 uv__close(sockfd);
527 return err;
528 }
529
530 #if defined(SO_NOSIGPIPE)
531 {
532 int on = 1;
533 setsockopt(sockfd, SOL_SOCKET, SO_NOSIGPIPE, &on, sizeof(on));
534 }
535 #endif
536
537 return sockfd;
538 }
539
540 /* get a file pointer to a file in read-only and close-on-exec mode */
uv__open_file(const char * path)541 FILE* uv__open_file(const char* path) {
542 int fd;
543 FILE* fp;
544
545 fd = uv__open_cloexec(path, O_RDONLY);
546 if (fd < 0)
547 return NULL;
548
549 fp = fdopen(fd, "r");
550 if (fp == NULL)
551 uv__close(fd);
552
553 return fp;
554 }
555
556
uv__accept(int sockfd)557 int uv__accept(int sockfd) {
558 int peerfd;
559 int err;
560
561 (void) &err;
562 assert(sockfd >= 0);
563
564 do
565 #ifdef uv__accept4
566 peerfd = uv__accept4(sockfd, NULL, NULL, SOCK_NONBLOCK|SOCK_CLOEXEC);
567 #else
568 peerfd = accept(sockfd, NULL, NULL);
569 #endif
570 while (peerfd == -1 && errno == EINTR);
571
572 if (peerfd == -1)
573 return UV__ERR(errno);
574
575 #ifndef uv__accept4
576 err = uv__cloexec(peerfd, 1);
577 if (err == 0)
578 err = uv__nonblock(peerfd, 1);
579
580 if (err != 0) {
581 uv__close(peerfd);
582 return err;
583 }
584 #endif
585
586 return peerfd;
587 }
588
589
590 /* close() on macos has the "interesting" quirk that it fails with EINTR
591 * without closing the file descriptor when a thread is in the cancel state.
592 * That's why libuv calls close$NOCANCEL() instead.
593 *
594 * glibc on linux has a similar issue: close() is a cancellation point and
595 * will unwind the thread when it's in the cancel state. Work around that
596 * by making the system call directly. Musl libc is unaffected.
597 */
uv__close_nocancel(int fd)598 int uv__close_nocancel(int fd) {
599 #if defined(__APPLE__)
600 #pragma GCC diagnostic push
601 #pragma GCC diagnostic ignored "-Wdollar-in-identifier-extension"
602 #if defined(__LP64__) || TARGET_OS_IPHONE
603 extern int close$NOCANCEL(int);
604 return close$NOCANCEL(fd);
605 #else
606 extern int close$NOCANCEL$UNIX2003(int);
607 return close$NOCANCEL$UNIX2003(fd);
608 #endif
609 #pragma GCC diagnostic pop
610 #elif defined(__linux__) && defined(__SANITIZE_THREAD__) && defined(__clang__)
611 long rc;
612 __sanitizer_syscall_pre_close(fd);
613 rc = syscall(SYS_close, fd);
614 __sanitizer_syscall_post_close(rc, fd);
615 return rc;
616 #elif defined(__linux__) && !defined(__SANITIZE_THREAD__)
617 return syscall(SYS_close, fd);
618 #else
619 return close(fd);
620 #endif
621 }
622
623
uv__close_nocheckstdio(int fd)624 int uv__close_nocheckstdio(int fd) {
625 int saved_errno;
626 int rc;
627
628 assert(fd > -1); /* Catch uninitialized io_watcher.fd bugs. */
629
630 saved_errno = errno;
631 rc = uv__close_nocancel(fd);
632 if (rc == -1) {
633 rc = UV__ERR(errno);
634 if (rc == UV_EINTR || rc == UV__ERR(EINPROGRESS))
635 rc = 0; /* The close is in progress, not an error. */
636 errno = saved_errno;
637 }
638
639 return rc;
640 }
641
642
uv__close(int fd)643 int uv__close(int fd) {
644 assert(fd > STDERR_FILENO); /* Catch stdio close bugs. */
645 #if defined(__MVS__)
646 SAVE_ERRNO(epoll_file_close(fd));
647 #endif
648 return uv__close_nocheckstdio(fd);
649 }
650
651 #if UV__NONBLOCK_IS_IOCTL
uv__nonblock_ioctl(int fd,int set)652 int uv__nonblock_ioctl(int fd, int set) {
653 int r;
654
655 do
656 r = ioctl(fd, FIONBIO, &set);
657 while (r == -1 && errno == EINTR);
658
659 if (r)
660 return UV__ERR(errno);
661
662 return 0;
663 }
664 #endif
665
666
uv__nonblock_fcntl(int fd,int set)667 int uv__nonblock_fcntl(int fd, int set) {
668 int flags;
669 int r;
670
671 do
672 r = fcntl(fd, F_GETFL);
673 while (r == -1 && errno == EINTR);
674
675 if (r == -1)
676 return UV__ERR(errno);
677
678 /* Bail out now if already set/clear. */
679 if (!!(r & O_NONBLOCK) == !!set)
680 return 0;
681
682 if (set)
683 flags = r | O_NONBLOCK;
684 else
685 flags = r & ~O_NONBLOCK;
686
687 do
688 r = fcntl(fd, F_SETFL, flags);
689 while (r == -1 && errno == EINTR);
690
691 if (r)
692 return UV__ERR(errno);
693
694 return 0;
695 }
696
697
uv__cloexec(int fd,int set)698 int uv__cloexec(int fd, int set) {
699 int flags;
700 int r;
701
702 flags = 0;
703 if (set)
704 flags = FD_CLOEXEC;
705
706 do
707 r = fcntl(fd, F_SETFD, flags);
708 while (r == -1 && errno == EINTR);
709
710 if (r)
711 return UV__ERR(errno);
712
713 return 0;
714 }
715
716
uv__recvmsg(int fd,struct msghdr * msg,int flags)717 ssize_t uv__recvmsg(int fd, struct msghdr* msg, int flags) {
718 #if defined(__ANDROID__) || \
719 defined(__DragonFly__) || \
720 defined(__FreeBSD__) || \
721 defined(__NetBSD__) || \
722 defined(__OpenBSD__) || \
723 defined(__linux__)
724 ssize_t rc;
725 rc = recvmsg(fd, msg, flags | MSG_CMSG_CLOEXEC);
726 if (rc == -1)
727 return UV__ERR(errno);
728 return rc;
729 #else
730 struct cmsghdr* cmsg;
731 int* pfd;
732 int* end;
733 ssize_t rc;
734 rc = recvmsg(fd, msg, flags);
735 if (rc == -1)
736 return UV__ERR(errno);
737 if (msg->msg_controllen == 0)
738 return rc;
739 for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg))
740 if (cmsg->cmsg_type == SCM_RIGHTS)
741 for (pfd = (int*) CMSG_DATA(cmsg),
742 end = (int*) ((char*) cmsg + cmsg->cmsg_len);
743 pfd < end;
744 pfd += 1)
745 uv__cloexec(*pfd, 1);
746 return rc;
747 #endif
748 }
749
750
uv_cwd(char * buffer,size_t * size)751 int uv_cwd(char* buffer, size_t* size) {
752 char scratch[1 + UV__PATH_MAX];
753
754 if (buffer == NULL || size == NULL || *size == 0)
755 return UV_EINVAL;
756
757 /* Try to read directly into the user's buffer first... */
758 if (getcwd(buffer, *size) != NULL)
759 goto fixup;
760
761 if (errno != ERANGE)
762 return UV__ERR(errno);
763
764 /* ...or into scratch space if the user's buffer is too small
765 * so we can report how much space to provide on the next try.
766 */
767 if (getcwd(scratch, sizeof(scratch)) == NULL)
768 return UV__ERR(errno);
769
770 buffer = scratch;
771
772 fixup:
773
774 *size = strlen(buffer);
775
776 if (*size > 1 && buffer[*size - 1] == '/') {
777 *size -= 1;
778 buffer[*size] = '\0';
779 }
780
781 if (buffer == scratch) {
782 *size += 1;
783 return UV_ENOBUFS;
784 }
785
786 return 0;
787 }
788
789
uv_chdir(const char * dir)790 int uv_chdir(const char* dir) {
791 if (chdir(dir))
792 return UV__ERR(errno);
793
794 return 0;
795 }
796
797
uv_disable_stdio_inheritance(void)798 void uv_disable_stdio_inheritance(void) {
799 int fd;
800
801 /* Set the CLOEXEC flag on all open descriptors. Unconditionally try the
802 * first 16 file descriptors. After that, bail out after the first error.
803 */
804 for (fd = 0; ; fd++)
805 if (uv__cloexec(fd, 1) && fd > 15)
806 break;
807 }
808
809
uv_fileno(const uv_handle_t * handle,uv_os_fd_t * fd)810 int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd) {
811 int fd_out;
812
813 switch (handle->type) {
814 case UV_TCP:
815 case UV_NAMED_PIPE:
816 case UV_TTY:
817 fd_out = uv__stream_fd((uv_stream_t*) handle);
818 break;
819
820 case UV_UDP:
821 fd_out = ((uv_udp_t *) handle)->io_watcher.fd;
822 break;
823
824 case UV_POLL:
825 fd_out = ((uv_poll_t *) handle)->io_watcher.fd;
826 break;
827
828 default:
829 return UV_EINVAL;
830 }
831
832 if (uv__is_closing(handle) || fd_out == -1)
833 return UV_EBADF;
834
835 *fd = fd_out;
836 return 0;
837 }
838
839
uv__run_pending(uv_loop_t * loop)840 static void uv__run_pending(uv_loop_t* loop) {
841 struct uv__queue* q;
842 struct uv__queue pq;
843 uv__io_t* w;
844
845 uv__queue_move(&loop->pending_queue, &pq);
846
847 while (!uv__queue_empty(&pq)) {
848 q = uv__queue_head(&pq);
849 uv__queue_remove(q);
850 uv__queue_init(q);
851 w = uv__queue_data(q, uv__io_t, pending_queue);
852 w->cb(loop, w, POLLOUT);
853 }
854 }
855
856
next_power_of_two(unsigned int val)857 static unsigned int next_power_of_two(unsigned int val) {
858 val -= 1;
859 val |= val >> 1;
860 val |= val >> 2;
861 val |= val >> 4;
862 val |= val >> 8;
863 val |= val >> 16;
864 val += 1;
865 return val;
866 }
867
maybe_resize(uv_loop_t * loop,unsigned int len)868 static void maybe_resize(uv_loop_t* loop, unsigned int len) {
869 uv__io_t** watchers;
870 void* fake_watcher_list;
871 void* fake_watcher_count;
872 unsigned int nwatchers;
873 unsigned int i;
874
875 if (len <= loop->nwatchers)
876 return;
877
878 /* Preserve fake watcher list and count at the end of the watchers */
879 if (loop->watchers != NULL) {
880 fake_watcher_list = loop->watchers[loop->nwatchers];
881 fake_watcher_count = loop->watchers[loop->nwatchers + 1];
882 } else {
883 fake_watcher_list = NULL;
884 fake_watcher_count = NULL;
885 }
886
887 nwatchers = next_power_of_two(len + 2) - 2;
888 watchers = uv__reallocf(loop->watchers,
889 (nwatchers + 2) * sizeof(loop->watchers[0]));
890
891 if (watchers == NULL)
892 abort();
893 for (i = loop->nwatchers; i < nwatchers; i++)
894 watchers[i] = NULL;
895 watchers[nwatchers] = fake_watcher_list;
896 watchers[nwatchers + 1] = fake_watcher_count;
897
898 loop->watchers = watchers;
899 loop->nwatchers = nwatchers;
900 }
901
902
uv__io_init(uv__io_t * w,uv__io_cb cb,int fd)903 void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd) {
904 assert(cb != NULL);
905 assert(fd >= -1);
906 uv__queue_init(&w->pending_queue);
907 uv__queue_init(&w->watcher_queue);
908 w->cb = cb;
909 w->fd = fd;
910 w->events = 0;
911 w->pevents = 0;
912 }
913
914
uv__io_start(uv_loop_t * loop,uv__io_t * w,unsigned int events)915 void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
916 assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
917 assert(0 != events);
918 assert(w->fd >= 0);
919 assert(w->fd < INT_MAX);
920
921 w->pevents |= events;
922 maybe_resize(loop, w->fd + 1);
923
924 #if !defined(__sun)
925 /* The event ports backend needs to rearm all file descriptors on each and
926 * every tick of the event loop but the other backends allow us to
927 * short-circuit here if the event mask is unchanged.
928 */
929 if (w->events == w->pevents)
930 return;
931 #endif
932
933 if (uv__queue_empty(&w->watcher_queue))
934 uv__queue_insert_tail(&loop->watcher_queue, &w->watcher_queue);
935
936 if (loop->watchers[w->fd] == NULL) {
937 loop->watchers[w->fd] = w;
938 loop->nfds++;
939 }
940 }
941
942
uv__io_stop(uv_loop_t * loop,uv__io_t * w,unsigned int events)943 void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
944 assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
945 assert(0 != events);
946
947 if (w->fd == -1)
948 return;
949
950 assert(w->fd >= 0);
951
952 /* Happens when uv__io_stop() is called on a handle that was never started. */
953 if ((unsigned) w->fd >= loop->nwatchers)
954 return;
955
956 w->pevents &= ~events;
957
958 if (w->pevents == 0) {
959 uv__queue_remove(&w->watcher_queue);
960 uv__queue_init(&w->watcher_queue);
961 w->events = 0;
962
963 if (w == loop->watchers[w->fd]) {
964 assert(loop->nfds > 0);
965 loop->watchers[w->fd] = NULL;
966 loop->nfds--;
967 }
968 }
969 else if (uv__queue_empty(&w->watcher_queue))
970 uv__queue_insert_tail(&loop->watcher_queue, &w->watcher_queue);
971 }
972
973
uv__io_close(uv_loop_t * loop,uv__io_t * w)974 void uv__io_close(uv_loop_t* loop, uv__io_t* w) {
975 uv__io_stop(loop, w, POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
976 uv__queue_remove(&w->pending_queue);
977
978 /* Remove stale events for this file descriptor */
979 if (w->fd != -1)
980 uv__platform_invalidate_fd(loop, w->fd);
981 }
982
983
uv__io_feed(uv_loop_t * loop,uv__io_t * w)984 void uv__io_feed(uv_loop_t* loop, uv__io_t* w) {
985 if (uv__queue_empty(&w->pending_queue))
986 uv__queue_insert_tail(&loop->pending_queue, &w->pending_queue);
987 }
988
989
uv__io_active(const uv__io_t * w,unsigned int events)990 int uv__io_active(const uv__io_t* w, unsigned int events) {
991 assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
992 assert(0 != events);
993 return 0 != (w->pevents & events);
994 }
995
996
uv__fd_exists(uv_loop_t * loop,int fd)997 int uv__fd_exists(uv_loop_t* loop, int fd) {
998 return (unsigned) fd < loop->nwatchers && loop->watchers[fd] != NULL;
999 }
1000
1001
uv_getrusage(uv_rusage_t * rusage)1002 int uv_getrusage(uv_rusage_t* rusage) {
1003 struct rusage usage;
1004
1005 if (getrusage(RUSAGE_SELF, &usage))
1006 return UV__ERR(errno);
1007
1008 rusage->ru_utime.tv_sec = usage.ru_utime.tv_sec;
1009 rusage->ru_utime.tv_usec = usage.ru_utime.tv_usec;
1010
1011 rusage->ru_stime.tv_sec = usage.ru_stime.tv_sec;
1012 rusage->ru_stime.tv_usec = usage.ru_stime.tv_usec;
1013
1014 #if !defined(__MVS__) && !defined(__HAIKU__)
1015 rusage->ru_maxrss = usage.ru_maxrss;
1016 rusage->ru_ixrss = usage.ru_ixrss;
1017 rusage->ru_idrss = usage.ru_idrss;
1018 rusage->ru_isrss = usage.ru_isrss;
1019 rusage->ru_minflt = usage.ru_minflt;
1020 rusage->ru_majflt = usage.ru_majflt;
1021 rusage->ru_nswap = usage.ru_nswap;
1022 rusage->ru_inblock = usage.ru_inblock;
1023 rusage->ru_oublock = usage.ru_oublock;
1024 rusage->ru_msgsnd = usage.ru_msgsnd;
1025 rusage->ru_msgrcv = usage.ru_msgrcv;
1026 rusage->ru_nsignals = usage.ru_nsignals;
1027 rusage->ru_nvcsw = usage.ru_nvcsw;
1028 rusage->ru_nivcsw = usage.ru_nivcsw;
1029 #endif
1030
1031 /* Most platforms report ru_maxrss in kilobytes; macOS and Solaris are
1032 * the outliers because of course they are.
1033 */
1034 #if defined(__APPLE__)
1035 rusage->ru_maxrss /= 1024; /* macOS and iOS report bytes. */
1036 #elif defined(__sun)
1037 rusage->ru_maxrss *= getpagesize() / 1024; /* Solaris reports pages. */
1038 #endif
1039
1040 return 0;
1041 }
1042
1043
uv__open_cloexec(const char * path,int flags)1044 int uv__open_cloexec(const char* path, int flags) {
1045 #if defined(O_CLOEXEC)
1046 int fd;
1047
1048 fd = open(path, flags | O_CLOEXEC);
1049 if (fd == -1)
1050 return UV__ERR(errno);
1051
1052 return fd;
1053 #else /* O_CLOEXEC */
1054 int err;
1055 int fd;
1056
1057 fd = open(path, flags);
1058 if (fd == -1)
1059 return UV__ERR(errno);
1060
1061 err = uv__cloexec(fd, 1);
1062 if (err) {
1063 uv__close(fd);
1064 return err;
1065 }
1066
1067 return fd;
1068 #endif /* O_CLOEXEC */
1069 }
1070
1071
uv__slurp(const char * filename,char * buf,size_t len)1072 int uv__slurp(const char* filename, char* buf, size_t len) {
1073 ssize_t n;
1074 int fd;
1075
1076 assert(len > 0);
1077
1078 fd = uv__open_cloexec(filename, O_RDONLY);
1079 if (fd < 0)
1080 return fd;
1081
1082 do
1083 n = read(fd, buf, len - 1);
1084 while (n == -1 && errno == EINTR);
1085
1086 if (uv__close_nocheckstdio(fd))
1087 abort();
1088
1089 if (n < 0)
1090 return UV__ERR(errno);
1091
1092 buf[n] = '\0';
1093
1094 return 0;
1095 }
1096
1097
uv__dup2_cloexec(int oldfd,int newfd)1098 int uv__dup2_cloexec(int oldfd, int newfd) {
1099 #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__linux__)
1100 int r;
1101
1102 r = dup3(oldfd, newfd, O_CLOEXEC);
1103 if (r == -1)
1104 return UV__ERR(errno);
1105
1106 return r;
1107 #else
1108 int err;
1109 int r;
1110
1111 r = dup2(oldfd, newfd); /* Never retry. */
1112 if (r == -1)
1113 return UV__ERR(errno);
1114
1115 err = uv__cloexec(newfd, 1);
1116 if (err != 0) {
1117 uv__close(newfd);
1118 return err;
1119 }
1120
1121 return r;
1122 #endif
1123 }
1124
1125
uv_os_homedir(char * buffer,size_t * size)1126 int uv_os_homedir(char* buffer, size_t* size) {
1127 uv_passwd_t pwd;
1128 size_t len;
1129 int r;
1130
1131 /* Check if the HOME environment variable is set first. The task of
1132 performing input validation on buffer and size is taken care of by
1133 uv_os_getenv(). */
1134 r = uv_os_getenv("HOME", buffer, size);
1135
1136 if (r != UV_ENOENT)
1137 return r;
1138
1139 /* HOME is not set, so call uv_os_get_passwd() */
1140 r = uv_os_get_passwd(&pwd);
1141
1142 if (r != 0) {
1143 return r;
1144 }
1145
1146 len = strlen(pwd.homedir);
1147
1148 if (len >= *size) {
1149 *size = len + 1;
1150 uv_os_free_passwd(&pwd);
1151 return UV_ENOBUFS;
1152 }
1153
1154 memcpy(buffer, pwd.homedir, len + 1);
1155 *size = len;
1156 uv_os_free_passwd(&pwd);
1157
1158 return 0;
1159 }
1160
1161
uv_os_tmpdir(char * buffer,size_t * size)1162 int uv_os_tmpdir(char* buffer, size_t* size) {
1163 const char* buf;
1164 size_t len;
1165
1166 if (buffer == NULL || size == NULL || *size == 0)
1167 return UV_EINVAL;
1168
1169 #define CHECK_ENV_VAR(name) \
1170 do { \
1171 buf = getenv(name); \
1172 if (buf != NULL) \
1173 goto return_buffer; \
1174 } \
1175 while (0)
1176
1177 /* Check the TMPDIR, TMP, TEMP, and TEMPDIR environment variables in order */
1178 CHECK_ENV_VAR("TMPDIR");
1179 CHECK_ENV_VAR("TMP");
1180 CHECK_ENV_VAR("TEMP");
1181 CHECK_ENV_VAR("TEMPDIR");
1182
1183 #undef CHECK_ENV_VAR
1184
1185 /* No temp environment variables defined */
1186 #if defined(__ANDROID__)
1187 buf = "/data/local/tmp";
1188 #else
1189 buf = "/tmp";
1190 #endif
1191
1192 return_buffer:
1193 len = strlen(buf);
1194
1195 if (len >= *size) {
1196 *size = len + 1;
1197 return UV_ENOBUFS;
1198 }
1199
1200 /* The returned directory should not have a trailing slash. */
1201 if (len > 1 && buf[len - 1] == '/') {
1202 len--;
1203 }
1204
1205 memcpy(buffer, buf, len + 1);
1206 buffer[len] = '\0';
1207 *size = len;
1208
1209 return 0;
1210 }
1211
1212
uv__getpwuid_r(uv_passwd_t * pwd,uid_t uid)1213 static int uv__getpwuid_r(uv_passwd_t *pwd, uid_t uid) {
1214 struct passwd pw;
1215 struct passwd* result;
1216 char* buf;
1217 size_t bufsize;
1218 size_t name_size;
1219 size_t homedir_size;
1220 size_t shell_size;
1221 int r;
1222
1223 if (pwd == NULL)
1224 return UV_EINVAL;
1225
1226 /* Calling sysconf(_SC_GETPW_R_SIZE_MAX) would get the suggested size, but it
1227 * is frequently 1024 or 4096, so we can just use that directly. The pwent
1228 * will not usually be large. */
1229 for (bufsize = 2000;; bufsize *= 2) {
1230 buf = uv__malloc(bufsize);
1231
1232 if (buf == NULL)
1233 return UV_ENOMEM;
1234
1235 do
1236 r = getpwuid_r(uid, &pw, buf, bufsize, &result);
1237 while (r == EINTR);
1238
1239 if (r != 0 || result == NULL)
1240 uv__free(buf);
1241
1242 if (r != ERANGE)
1243 break;
1244 }
1245
1246 if (r != 0)
1247 return UV__ERR(r);
1248
1249 if (result == NULL)
1250 return UV_ENOENT;
1251
1252 /* Allocate memory for the username, shell, and home directory */
1253 name_size = strlen(pw.pw_name) + 1;
1254 homedir_size = strlen(pw.pw_dir) + 1;
1255 shell_size = strlen(pw.pw_shell) + 1;
1256 pwd->username = uv__malloc(name_size + homedir_size + shell_size);
1257
1258 if (pwd->username == NULL) {
1259 uv__free(buf);
1260 return UV_ENOMEM;
1261 }
1262
1263 /* Copy the username */
1264 memcpy(pwd->username, pw.pw_name, name_size);
1265
1266 /* Copy the home directory */
1267 pwd->homedir = pwd->username + name_size;
1268 memcpy(pwd->homedir, pw.pw_dir, homedir_size);
1269
1270 /* Copy the shell */
1271 pwd->shell = pwd->homedir + homedir_size;
1272 memcpy(pwd->shell, pw.pw_shell, shell_size);
1273
1274 /* Copy the uid and gid */
1275 pwd->uid = pw.pw_uid;
1276 pwd->gid = pw.pw_gid;
1277
1278 uv__free(buf);
1279
1280 return 0;
1281 }
1282
1283
uv_os_get_group(uv_group_t * grp,uv_uid_t gid)1284 int uv_os_get_group(uv_group_t* grp, uv_uid_t gid) {
1285 #if defined(__ANDROID__) && __ANDROID_API__ < 24
1286 /* This function getgrgid_r() was added in Android N (level 24) */
1287 return UV_ENOSYS;
1288 #else
1289 struct group gp;
1290 struct group* result;
1291 char* buf;
1292 char* gr_mem;
1293 size_t bufsize;
1294 size_t name_size;
1295 long members;
1296 size_t mem_size;
1297 int r;
1298
1299 if (grp == NULL)
1300 return UV_EINVAL;
1301
1302 /* Calling sysconf(_SC_GETGR_R_SIZE_MAX) would get the suggested size, but it
1303 * is frequently 1024 or 4096, so we can just use that directly. The pwent
1304 * will not usually be large. */
1305 for (bufsize = 2000;; bufsize *= 2) {
1306 buf = uv__malloc(bufsize);
1307
1308 if (buf == NULL)
1309 return UV_ENOMEM;
1310
1311 do
1312 r = getgrgid_r(gid, &gp, buf, bufsize, &result);
1313 while (r == EINTR);
1314
1315 if (r != 0 || result == NULL)
1316 uv__free(buf);
1317
1318 if (r != ERANGE)
1319 break;
1320 }
1321
1322 if (r != 0)
1323 return UV__ERR(r);
1324
1325 if (result == NULL)
1326 return UV_ENOENT;
1327
1328 /* Allocate memory for the groupname and members. */
1329 name_size = strlen(gp.gr_name) + 1;
1330 members = 0;
1331 mem_size = sizeof(char*);
1332 for (r = 0; gp.gr_mem[r] != NULL; r++) {
1333 mem_size += strlen(gp.gr_mem[r]) + 1 + sizeof(char*);
1334 members++;
1335 }
1336
1337 gr_mem = uv__malloc(name_size + mem_size);
1338 if (gr_mem == NULL) {
1339 uv__free(buf);
1340 return UV_ENOMEM;
1341 }
1342
1343 /* Copy the members */
1344 grp->members = (char**) gr_mem;
1345 grp->members[members] = NULL;
1346 gr_mem = (char*) &grp->members[members + 1];
1347 for (r = 0; r < members; r++) {
1348 grp->members[r] = gr_mem;
1349 strcpy(gr_mem, gp.gr_mem[r]);
1350 gr_mem += strlen(gr_mem) + 1;
1351 }
1352 assert(gr_mem == (char*)grp->members + mem_size);
1353
1354 /* Copy the groupname */
1355 grp->groupname = gr_mem;
1356 memcpy(grp->groupname, gp.gr_name, name_size);
1357 gr_mem += name_size;
1358
1359 /* Copy the gid */
1360 grp->gid = gp.gr_gid;
1361
1362 uv__free(buf);
1363
1364 return 0;
1365 #endif
1366 }
1367
1368
uv_os_get_passwd(uv_passwd_t * pwd)1369 int uv_os_get_passwd(uv_passwd_t* pwd) {
1370 return uv__getpwuid_r(pwd, geteuid());
1371 }
1372
1373
uv_os_get_passwd2(uv_passwd_t * pwd,uv_uid_t uid)1374 int uv_os_get_passwd2(uv_passwd_t* pwd, uv_uid_t uid) {
1375 return uv__getpwuid_r(pwd, uid);
1376 }
1377
1378
uv_translate_sys_error(int sys_errno)1379 int uv_translate_sys_error(int sys_errno) {
1380 /* If < 0 then it's already a libuv error. */
1381 return sys_errno <= 0 ? sys_errno : -sys_errno;
1382 }
1383
1384
uv_os_environ(uv_env_item_t ** envitems,int * count)1385 int uv_os_environ(uv_env_item_t** envitems, int* count) {
1386 int i, j, cnt;
1387 uv_env_item_t* envitem;
1388
1389 *envitems = NULL;
1390 *count = 0;
1391
1392 for (i = 0; environ[i] != NULL; i++);
1393
1394 *envitems = uv__calloc(i, sizeof(**envitems));
1395
1396 if (*envitems == NULL)
1397 return UV_ENOMEM;
1398
1399 for (j = 0, cnt = 0; j < i; j++) {
1400 char* buf;
1401 char* ptr;
1402
1403 if (environ[j] == NULL)
1404 break;
1405
1406 buf = uv__strdup(environ[j]);
1407 if (buf == NULL)
1408 goto fail;
1409
1410 ptr = strchr(buf, '=');
1411 if (ptr == NULL) {
1412 uv__free(buf);
1413 continue;
1414 }
1415
1416 *ptr = '\0';
1417
1418 envitem = &(*envitems)[cnt];
1419 envitem->name = buf;
1420 envitem->value = ptr + 1;
1421
1422 cnt++;
1423 }
1424
1425 *count = cnt;
1426 return 0;
1427
1428 fail:
1429 for (i = 0; i < cnt; i++) {
1430 envitem = &(*envitems)[cnt];
1431 uv__free(envitem->name);
1432 }
1433 uv__free(*envitems);
1434
1435 *envitems = NULL;
1436 *count = 0;
1437 return UV_ENOMEM;
1438 }
1439
1440
uv_os_getenv(const char * name,char * buffer,size_t * size)1441 int uv_os_getenv(const char* name, char* buffer, size_t* size) {
1442 char* var;
1443 size_t len;
1444
1445 if (name == NULL || buffer == NULL || size == NULL || *size == 0)
1446 return UV_EINVAL;
1447
1448 var = getenv(name);
1449
1450 if (var == NULL)
1451 return UV_ENOENT;
1452
1453 len = strlen(var);
1454
1455 if (len >= *size) {
1456 *size = len + 1;
1457 return UV_ENOBUFS;
1458 }
1459
1460 memcpy(buffer, var, len + 1);
1461 *size = len;
1462
1463 return 0;
1464 }
1465
1466
uv_os_setenv(const char * name,const char * value)1467 int uv_os_setenv(const char* name, const char* value) {
1468 if (name == NULL || value == NULL)
1469 return UV_EINVAL;
1470
1471 if (setenv(name, value, 1) != 0)
1472 return UV__ERR(errno);
1473
1474 return 0;
1475 }
1476
1477
uv_os_unsetenv(const char * name)1478 int uv_os_unsetenv(const char* name) {
1479 if (name == NULL)
1480 return UV_EINVAL;
1481
1482 if (unsetenv(name) != 0)
1483 return UV__ERR(errno);
1484
1485 return 0;
1486 }
1487
1488
uv_os_gethostname(char * buffer,size_t * size)1489 int uv_os_gethostname(char* buffer, size_t* size) {
1490 /*
1491 On some platforms, if the input buffer is not large enough, gethostname()
1492 succeeds, but truncates the result. libuv can detect this and return ENOBUFS
1493 instead by creating a large enough buffer and comparing the hostname length
1494 to the size input.
1495 */
1496 char buf[UV_MAXHOSTNAMESIZE];
1497 size_t len;
1498
1499 if (buffer == NULL || size == NULL || *size == 0)
1500 return UV_EINVAL;
1501
1502 if (gethostname(buf, sizeof(buf)) != 0)
1503 return UV__ERR(errno);
1504
1505 buf[sizeof(buf) - 1] = '\0'; /* Null terminate, just to be safe. */
1506 len = strlen(buf);
1507
1508 if (len >= *size) {
1509 *size = len + 1;
1510 return UV_ENOBUFS;
1511 }
1512
1513 memcpy(buffer, buf, len + 1);
1514 *size = len;
1515 return 0;
1516 }
1517
1518
uv_get_osfhandle(int fd)1519 uv_os_fd_t uv_get_osfhandle(int fd) {
1520 return fd;
1521 }
1522
uv_open_osfhandle(uv_os_fd_t os_fd)1523 int uv_open_osfhandle(uv_os_fd_t os_fd) {
1524 return os_fd;
1525 }
1526
uv_os_getpid(void)1527 uv_pid_t uv_os_getpid(void) {
1528 return getpid();
1529 }
1530
1531
uv_os_getppid(void)1532 uv_pid_t uv_os_getppid(void) {
1533 return getppid();
1534 }
1535
uv_cpumask_size(void)1536 int uv_cpumask_size(void) {
1537 #if UV__CPU_AFFINITY_SUPPORTED
1538 return CPU_SETSIZE;
1539 #else
1540 return UV_ENOTSUP;
1541 #endif
1542 }
1543
uv_os_getpriority(uv_pid_t pid,int * priority)1544 int uv_os_getpriority(uv_pid_t pid, int* priority) {
1545 int r;
1546
1547 if (priority == NULL)
1548 return UV_EINVAL;
1549
1550 errno = 0;
1551 r = getpriority(PRIO_PROCESS, (int) pid);
1552
1553 if (r == -1 && errno != 0)
1554 return UV__ERR(errno);
1555
1556 *priority = r;
1557 return 0;
1558 }
1559
1560
uv_os_setpriority(uv_pid_t pid,int priority)1561 int uv_os_setpriority(uv_pid_t pid, int priority) {
1562 if (priority < UV_PRIORITY_HIGHEST || priority > UV_PRIORITY_LOW)
1563 return UV_EINVAL;
1564
1565 if (setpriority(PRIO_PROCESS, (int) pid, priority) != 0)
1566 return UV__ERR(errno);
1567
1568 return 0;
1569 }
1570
1571 /**
1572 * If the function succeeds, the return value is 0.
1573 * If the function fails, the return value is non-zero.
1574 * for Linux, when schedule policy is SCHED_OTHER (default), priority is 0.
1575 * So the output parameter priority is actually the nice value.
1576 */
uv_thread_getpriority(uv_thread_t tid,int * priority)1577 int uv_thread_getpriority(uv_thread_t tid, int* priority) {
1578 int r;
1579 int policy;
1580 struct sched_param param;
1581 #ifdef __linux__
1582 pid_t pid = gettid();
1583 #endif
1584
1585 if (priority == NULL)
1586 return UV_EINVAL;
1587
1588 r = pthread_getschedparam(tid, &policy, ¶m);
1589 if (r != 0)
1590 return UV__ERR(errno);
1591
1592 #ifdef __linux__
1593 if (SCHED_OTHER == policy && pthread_equal(tid, pthread_self())) {
1594 errno = 0;
1595 r = getpriority(PRIO_PROCESS, pid);
1596 if (r == -1 && errno != 0)
1597 return UV__ERR(errno);
1598 *priority = r;
1599 return 0;
1600 }
1601 #endif
1602
1603 *priority = param.sched_priority;
1604 return 0;
1605 }
1606
1607 #ifdef __linux__
set_nice_for_calling_thread(int priority)1608 static int set_nice_for_calling_thread(int priority) {
1609 int r;
1610 int nice;
1611
1612 if (priority < UV_THREAD_PRIORITY_LOWEST || priority > UV_THREAD_PRIORITY_HIGHEST)
1613 return UV_EINVAL;
1614
1615 pid_t pid = gettid();
1616 nice = 0 - priority * 2;
1617 r = setpriority(PRIO_PROCESS, pid, nice);
1618 if (r != 0)
1619 return UV__ERR(errno);
1620 return 0;
1621 }
1622 #endif
1623
1624 /**
1625 * If the function succeeds, the return value is 0.
1626 * If the function fails, the return value is non-zero.
1627 */
uv_thread_setpriority(uv_thread_t tid,int priority)1628 int uv_thread_setpriority(uv_thread_t tid, int priority) {
1629 #if !defined(__GNU__)
1630 int r;
1631 int min;
1632 int max;
1633 int range;
1634 int prio;
1635 int policy;
1636 struct sched_param param;
1637
1638 if (priority < UV_THREAD_PRIORITY_LOWEST || priority > UV_THREAD_PRIORITY_HIGHEST)
1639 return UV_EINVAL;
1640
1641 r = pthread_getschedparam(tid, &policy, ¶m);
1642 if (r != 0)
1643 return UV__ERR(errno);
1644
1645 #ifdef __linux__
1646 /**
1647 * for Linux, when schedule policy is SCHED_OTHER (default), priority must be 0,
1648 * we should set the nice value in this case.
1649 */
1650 if (SCHED_OTHER == policy && pthread_equal(tid, pthread_self()))
1651 return set_nice_for_calling_thread(priority);
1652 #endif
1653
1654 #ifdef __PASE__
1655 min = 1;
1656 max = 127;
1657 #else
1658 min = sched_get_priority_min(policy);
1659 max = sched_get_priority_max(policy);
1660 #endif
1661
1662 if (min == -1 || max == -1)
1663 return UV__ERR(errno);
1664
1665 range = max - min;
1666
1667 switch (priority) {
1668 case UV_THREAD_PRIORITY_HIGHEST:
1669 prio = max;
1670 break;
1671 case UV_THREAD_PRIORITY_ABOVE_NORMAL:
1672 prio = min + range * 3 / 4;
1673 break;
1674 case UV_THREAD_PRIORITY_NORMAL:
1675 prio = min + range / 2;
1676 break;
1677 case UV_THREAD_PRIORITY_BELOW_NORMAL:
1678 prio = min + range / 4;
1679 break;
1680 case UV_THREAD_PRIORITY_LOWEST:
1681 prio = min;
1682 break;
1683 default:
1684 return 0;
1685 }
1686
1687 if (param.sched_priority != prio) {
1688 param.sched_priority = prio;
1689 r = pthread_setschedparam(tid, policy, ¶m);
1690 if (r != 0)
1691 return UV__ERR(errno);
1692 }
1693
1694 return 0;
1695 #else /* !defined(__GNU__) */
1696 /* Simulate success on systems where thread priority is not implemented. */
1697 return 0;
1698 #endif /* !defined(__GNU__) */
1699 }
1700
uv_os_uname(uv_utsname_t * buffer)1701 int uv_os_uname(uv_utsname_t* buffer) {
1702 struct utsname buf;
1703 int r;
1704
1705 if (buffer == NULL)
1706 return UV_EINVAL;
1707
1708 if (uname(&buf) == -1) {
1709 r = UV__ERR(errno);
1710 goto error;
1711 }
1712
1713 r = uv__strscpy(buffer->sysname, buf.sysname, sizeof(buffer->sysname));
1714 if (r == UV_E2BIG)
1715 goto error;
1716
1717 #ifdef _AIX
1718 r = snprintf(buffer->release,
1719 sizeof(buffer->release),
1720 "%s.%s",
1721 buf.version,
1722 buf.release);
1723 if (r >= sizeof(buffer->release)) {
1724 r = UV_E2BIG;
1725 goto error;
1726 }
1727 #else
1728 r = uv__strscpy(buffer->release, buf.release, sizeof(buffer->release));
1729 if (r == UV_E2BIG)
1730 goto error;
1731 #endif
1732
1733 r = uv__strscpy(buffer->version, buf.version, sizeof(buffer->version));
1734 if (r == UV_E2BIG)
1735 goto error;
1736
1737 #if defined(_AIX) || defined(__PASE__)
1738 r = uv__strscpy(buffer->machine, "ppc64", sizeof(buffer->machine));
1739 #else
1740 r = uv__strscpy(buffer->machine, buf.machine, sizeof(buffer->machine));
1741 #endif
1742
1743 if (r == UV_E2BIG)
1744 goto error;
1745
1746 return 0;
1747
1748 error:
1749 buffer->sysname[0] = '\0';
1750 buffer->release[0] = '\0';
1751 buffer->version[0] = '\0';
1752 buffer->machine[0] = '\0';
1753 return r;
1754 }
1755
uv__getsockpeername(const uv_handle_t * handle,uv__peersockfunc func,struct sockaddr * name,int * namelen)1756 int uv__getsockpeername(const uv_handle_t* handle,
1757 uv__peersockfunc func,
1758 struct sockaddr* name,
1759 int* namelen) {
1760 socklen_t socklen;
1761 uv_os_fd_t fd;
1762 int r;
1763
1764 r = uv_fileno(handle, &fd);
1765 if (r < 0)
1766 return r;
1767
1768 /* sizeof(socklen_t) != sizeof(int) on some systems. */
1769 socklen = (socklen_t) *namelen;
1770
1771 if (func(fd, name, &socklen))
1772 return UV__ERR(errno);
1773
1774 *namelen = (int) socklen;
1775 return 0;
1776 }
1777
uv_gettimeofday(uv_timeval64_t * tv)1778 int uv_gettimeofday(uv_timeval64_t* tv) {
1779 struct timeval time;
1780
1781 if (tv == NULL)
1782 return UV_EINVAL;
1783
1784 if (gettimeofday(&time, NULL) != 0)
1785 return UV__ERR(errno);
1786
1787 tv->tv_sec = (int64_t) time.tv_sec;
1788 tv->tv_usec = (int32_t) time.tv_usec;
1789 return 0;
1790 }
1791
uv_sleep(unsigned int msec)1792 void uv_sleep(unsigned int msec) {
1793 struct timespec timeout;
1794 int rc;
1795
1796 timeout.tv_sec = msec / 1000;
1797 timeout.tv_nsec = (msec % 1000) * 1000 * 1000;
1798
1799 do
1800 rc = nanosleep(&timeout, &timeout);
1801 while (rc == -1 && errno == EINTR);
1802
1803 assert(rc == 0);
1804 }
1805
uv__search_path(const char * prog,char * buf,size_t * buflen)1806 int uv__search_path(const char* prog, char* buf, size_t* buflen) {
1807 char abspath[UV__PATH_MAX];
1808 size_t abspath_size;
1809 char trypath[UV__PATH_MAX];
1810 char* cloned_path;
1811 char* path_env;
1812 char* token;
1813 char* itr;
1814
1815 if (buf == NULL || buflen == NULL || *buflen == 0)
1816 return UV_EINVAL;
1817
1818 /*
1819 * Possibilities for prog:
1820 * i) an absolute path such as: /home/user/myprojects/nodejs/node
1821 * ii) a relative path such as: ./node or ../myprojects/nodejs/node
1822 * iii) a bare filename such as "node", after exporting PATH variable
1823 * to its location.
1824 */
1825
1826 /* Case i) and ii) absolute or relative paths */
1827 if (strchr(prog, '/') != NULL) {
1828 if (realpath(prog, abspath) != abspath)
1829 return UV__ERR(errno);
1830
1831 abspath_size = strlen(abspath);
1832
1833 *buflen -= 1;
1834 if (*buflen > abspath_size)
1835 *buflen = abspath_size;
1836
1837 memcpy(buf, abspath, *buflen);
1838 buf[*buflen] = '\0';
1839
1840 return 0;
1841 }
1842
1843 /* Case iii). Search PATH environment variable */
1844 cloned_path = NULL;
1845 token = NULL;
1846 path_env = getenv("PATH");
1847
1848 if (path_env == NULL)
1849 return UV_EINVAL;
1850
1851 cloned_path = uv__strdup(path_env);
1852 if (cloned_path == NULL)
1853 return UV_ENOMEM;
1854
1855 token = uv__strtok(cloned_path, ":", &itr);
1856 while (token != NULL) {
1857 snprintf(trypath, sizeof(trypath) - 1, "%s/%s", token, prog);
1858 if (realpath(trypath, abspath) == abspath) {
1859 /* Check the match is executable */
1860 if (access(abspath, X_OK) == 0) {
1861 abspath_size = strlen(abspath);
1862
1863 *buflen -= 1;
1864 if (*buflen > abspath_size)
1865 *buflen = abspath_size;
1866
1867 memcpy(buf, abspath, *buflen);
1868 buf[*buflen] = '\0';
1869
1870 uv__free(cloned_path);
1871 return 0;
1872 }
1873 }
1874 token = uv__strtok(NULL, ":", &itr);
1875 }
1876 uv__free(cloned_path);
1877
1878 /* Out of tokens (path entries), and no match found */
1879 return UV_EINVAL;
1880 }
1881
1882 #if defined(__linux__) || defined (__FreeBSD__)
1883 # define uv__cpu_count(cpuset) CPU_COUNT(cpuset)
1884 #elif defined(__NetBSD__)
uv__cpu_count(cpuset_t * set)1885 static int uv__cpu_count(cpuset_t* set) {
1886 int rc;
1887 cpuid_t i;
1888
1889 rc = 0;
1890 for (i = 0;; i++) {
1891 int r = cpuset_isset(i, set);
1892 if (r < 0)
1893 break;
1894 if (r)
1895 rc++;
1896 }
1897
1898 return rc;
1899 }
1900 #endif /* __NetBSD__ */
1901
uv_available_parallelism(void)1902 unsigned int uv_available_parallelism(void) {
1903 long rc = -1;
1904
1905 #ifdef __linux__
1906 cpu_set_t set;
1907
1908 memset(&set, 0, sizeof(set));
1909
1910 /* sysconf(_SC_NPROCESSORS_ONLN) in musl calls sched_getaffinity() but in
1911 * glibc it's... complicated... so for consistency try sched_getaffinity()
1912 * before falling back to sysconf(_SC_NPROCESSORS_ONLN).
1913 */
1914 if (0 == sched_getaffinity(0, sizeof(set), &set))
1915 rc = uv__cpu_count(&set);
1916 #elif defined(__MVS__)
1917 rc = __get_num_online_cpus();
1918 if (rc < 1)
1919 rc = 1;
1920
1921 return (unsigned) rc;
1922 #elif defined(__FreeBSD__)
1923 cpuset_t set;
1924
1925 memset(&set, 0, sizeof(set));
1926
1927 if (0 == cpuset_getaffinity(CPU_LEVEL_WHICH, CPU_WHICH_PID, -1, sizeof(set), &set))
1928 rc = uv__cpu_count(&set);
1929 #elif defined(__NetBSD__)
1930 cpuset_t* set = cpuset_create();
1931 if (set != NULL) {
1932 if (0 == sched_getaffinity_np(getpid(), sizeof(set), &set))
1933 rc = uv__cpu_count(&set);
1934 cpuset_destroy(set);
1935 }
1936 #elif defined(__APPLE__)
1937 int nprocs;
1938 size_t i;
1939 size_t len = sizeof(nprocs);
1940 static const char *mib[] = {
1941 "hw.activecpu",
1942 "hw.logicalcpu",
1943 "hw.ncpu"
1944 };
1945
1946 for (i = 0; i < ARRAY_SIZE(mib); i++) {
1947 if (0 == sysctlbyname(mib[i], &nprocs, &len, NULL, 0) &&
1948 len == sizeof(nprocs) &&
1949 nprocs > 0) {
1950 rc = nprocs;
1951 break;
1952 }
1953 }
1954 #elif defined(__OpenBSD__)
1955 int nprocs;
1956 size_t i;
1957 size_t len = sizeof(nprocs);
1958 static int mib[][2] = {
1959 # ifdef HW_NCPUONLINE
1960 { CTL_HW, HW_NCPUONLINE },
1961 # endif
1962 { CTL_HW, HW_NCPU }
1963 };
1964
1965 for (i = 0; i < ARRAY_SIZE(mib); i++) {
1966 if (0 == sysctl(mib[i], ARRAY_SIZE(mib[i]), &nprocs, &len, NULL, 0) &&
1967 len == sizeof(nprocs) &&
1968 nprocs > 0) {
1969 rc = nprocs;
1970 break;
1971 }
1972 }
1973 #endif /* __linux__ */
1974
1975 if (rc < 0)
1976 rc = sysconf(_SC_NPROCESSORS_ONLN);
1977
1978 #ifdef __linux__
1979 {
1980 double rc_with_cgroup;
1981 uv__cpu_constraint c = {0, 0, 0.0};
1982
1983 if (uv__get_constrained_cpu(&c) == 0 && c.period_length > 0) {
1984 rc_with_cgroup = (double)c.quota_per_period / c.period_length * c.proportions;
1985 if (rc_with_cgroup < rc)
1986 rc = (long)rc_with_cgroup; /* Casting is safe since rc_with_cgroup < rc < LONG_MAX */
1987 }
1988 }
1989 #endif /* __linux__ */
1990
1991 if (rc < 1)
1992 rc = 1;
1993
1994 return (unsigned) rc;
1995 }
1996
uv__sock_reuseport(int fd)1997 int uv__sock_reuseport(int fd) {
1998 int on = 1;
1999 #if defined(__FreeBSD__) && __FreeBSD__ >= 12 && defined(SO_REUSEPORT_LB)
2000 /* FreeBSD 12 introduced a new socket option named SO_REUSEPORT_LB
2001 * with the capability of load balancing, it's the substitution of
2002 * the SO_REUSEPORTs on Linux and DragonFlyBSD. */
2003 if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT_LB, &on, sizeof(on)))
2004 return UV__ERR(errno);
2005 #elif (defined(__linux__) || \
2006 defined(_AIX73) || \
2007 (defined(__DragonFly__) && __DragonFly_version >= 300600) || \
2008 (defined(UV__SOLARIS_11_4) && UV__SOLARIS_11_4)) && \
2009 defined(SO_REUSEPORT)
2010 /* On Linux 3.9+, the SO_REUSEPORT implementation distributes connections
2011 * evenly across all of the threads (or processes) that are blocked in
2012 * accept() on the same port. As with TCP, SO_REUSEPORT distributes datagrams
2013 * evenly across all of the receiving threads (or process).
2014 *
2015 * DragonFlyBSD 3.6.0 extended SO_REUSEPORT to distribute workload to
2016 * available sockets, which made it the equivalent of Linux's SO_REUSEPORT.
2017 *
2018 * AIX 7.2.5 added the feature that would add the capability to distribute
2019 * incoming connections or datagrams across all listening ports for SO_REUSEPORT.
2020 *
2021 * Solaris 11 supported SO_REUSEPORT, but it's implemented only for
2022 * binding to the same address and port, without load balancing.
2023 * Solaris 11.4 extended SO_REUSEPORT with the capability of load balancing.
2024 */
2025 if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &on, sizeof(on)))
2026 return UV__ERR(errno);
2027 #else
2028 (void) (fd);
2029 (void) (on);
2030 /* SO_REUSEPORTs do not have the capability of load balancing on platforms
2031 * other than those mentioned above. The semantics are completely different,
2032 * therefore we shouldn't enable it, but fail this operation to indicate that
2033 * UV_[TCP/UDP]_REUSEPORT is not supported on these platforms. */
2034 return UV_ENOTSUP;
2035 #endif
2036
2037 return 0;
2038 }
2039