1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 * Permission is hereby granted, free of charge, to any person obtaining a copy
3 * of this software and associated documentation files (the "Software"), to
4 * deal in the Software without restriction, including without limitation the
5 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6 * sell copies of the Software, and to permit persons to whom the Software is
7 * furnished to do so, subject to the following conditions:
8 *
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
18 * IN THE SOFTWARE.
19 */
20
21 #include "uv.h"
22 #include "internal.h"
23 #include "strtok.h"
24
25 #include <stddef.h> /* NULL */
26 #include <stdio.h> /* printf */
27 #include <stdlib.h>
28 #include <string.h> /* strerror */
29 #include <errno.h>
30 #include <assert.h>
31 #include <unistd.h>
32 #include <sys/types.h>
33 #include <sys/stat.h>
34 #include <fcntl.h> /* O_CLOEXEC */
35 #include <sys/ioctl.h>
36 #include <sys/socket.h>
37 #include <sys/un.h>
38 #include <netinet/in.h>
39 #include <arpa/inet.h>
40 #include <limits.h> /* INT_MAX, PATH_MAX, IOV_MAX */
41 #include <sys/uio.h> /* writev */
42 #include <sys/resource.h> /* getrusage */
43 #include <pwd.h>
44 #include <grp.h>
45 #include <sys/utsname.h>
46 #include <sys/time.h>
47 #include <time.h> /* clock_gettime */
48
49 #ifdef __sun
50 # include <sys/filio.h>
51 # include <sys/wait.h>
52 #endif
53
54 #if defined(__APPLE__)
55 # include <sys/filio.h>
56 # endif /* defined(__APPLE__) */
57
58
59 #if defined(__APPLE__) && !TARGET_OS_IPHONE
60 # include <crt_externs.h>
61 # include <mach-o/dyld.h> /* _NSGetExecutablePath */
62 # define environ (*_NSGetEnviron())
63 #else /* defined(__APPLE__) && !TARGET_OS_IPHONE */
64 extern char** environ;
65 #endif /* !(defined(__APPLE__) && !TARGET_OS_IPHONE) */
66
67
68 #if defined(__DragonFly__) || \
69 defined(__FreeBSD__) || \
70 defined(__NetBSD__) || \
71 defined(__OpenBSD__)
72 # include <sys/sysctl.h>
73 # include <sys/filio.h>
74 # include <sys/wait.h>
75 # include <sys/param.h>
76 # if defined(__FreeBSD__)
77 # include <sys/cpuset.h>
78 # define uv__accept4 accept4
79 # endif
80 # if defined(__NetBSD__)
81 # define uv__accept4(a, b, c, d) paccept((a), (b), (c), NULL, (d))
82 # endif
83 #endif
84
85 #if defined(__MVS__)
86 # include <sys/ioctl.h>
87 # include "zos-sys-info.h"
88 #endif
89
90 #if defined(__linux__)
91 # include <sched.h>
92 # include <sys/syscall.h>
93 # define gettid() syscall(SYS_gettid)
94 # define uv__accept4 accept4
95 #endif
96
97 #if defined(__linux__) && defined(__SANITIZE_THREAD__) && defined(__clang__)
98 # include <sanitizer/linux_syscall_hooks.h>
99 #endif
100
101 static void uv__run_pending(uv_loop_t* loop);
102
103 /* Verify that uv_buf_t is ABI-compatible with struct iovec. */
104 STATIC_ASSERT(sizeof(uv_buf_t) == sizeof(struct iovec));
105 STATIC_ASSERT(sizeof(((uv_buf_t*) 0)->base) ==
106 sizeof(((struct iovec*) 0)->iov_base));
107 STATIC_ASSERT(sizeof(((uv_buf_t*) 0)->len) ==
108 sizeof(((struct iovec*) 0)->iov_len));
109 STATIC_ASSERT(offsetof(uv_buf_t, base) == offsetof(struct iovec, iov_base));
110 STATIC_ASSERT(offsetof(uv_buf_t, len) == offsetof(struct iovec, iov_len));
111
112
113 /* https://github.com/libuv/libuv/issues/1674 */
uv_clock_gettime(uv_clock_id clock_id,uv_timespec64_t * ts)114 int uv_clock_gettime(uv_clock_id clock_id, uv_timespec64_t* ts) {
115 struct timespec t;
116 int r;
117
118 if (ts == NULL)
119 return UV_EFAULT;
120
121 switch (clock_id) {
122 default:
123 return UV_EINVAL;
124 case UV_CLOCK_MONOTONIC:
125 r = clock_gettime(CLOCK_MONOTONIC, &t);
126 break;
127 case UV_CLOCK_REALTIME:
128 r = clock_gettime(CLOCK_REALTIME, &t);
129 break;
130 }
131
132 if (r)
133 return UV__ERR(errno);
134
135 ts->tv_sec = t.tv_sec;
136 ts->tv_nsec = t.tv_nsec;
137
138 return 0;
139 }
140
141
uv_hrtime(void)142 uint64_t uv_hrtime(void) {
143 return uv__hrtime(UV_CLOCK_PRECISE);
144 }
145
146
uv_close(uv_handle_t * handle,uv_close_cb close_cb)147 void uv_close(uv_handle_t* handle, uv_close_cb close_cb) {
148 assert(!uv__is_closing(handle));
149
150 handle->flags |= UV_HANDLE_CLOSING;
151 handle->close_cb = close_cb;
152
153 switch (handle->type) {
154 case UV_NAMED_PIPE:
155 uv__pipe_close((uv_pipe_t*)handle);
156 break;
157
158 case UV_TTY:
159 uv__tty_close((uv_tty_t*)handle);
160 break;
161
162 case UV_TCP:
163 uv__tcp_close((uv_tcp_t*)handle);
164 break;
165
166 case UV_UDP:
167 uv__udp_close((uv_udp_t*)handle);
168 break;
169
170 case UV_PREPARE:
171 uv__prepare_close((uv_prepare_t*)handle);
172 break;
173
174 case UV_CHECK:
175 uv__check_close((uv_check_t*)handle);
176 break;
177
178 case UV_IDLE:
179 uv__idle_close((uv_idle_t*)handle);
180 break;
181
182 case UV_ASYNC:
183 uv__async_close((uv_async_t*)handle);
184 break;
185
186 case UV_TIMER:
187 uv__timer_close((uv_timer_t*)handle);
188 break;
189
190 case UV_PROCESS:
191 uv__process_close((uv_process_t*)handle);
192 break;
193
194 case UV_FS_EVENT:
195 uv__fs_event_close((uv_fs_event_t*)handle);
196 #if defined(__sun) || defined(__MVS__)
197 /*
198 * On Solaris, illumos, and z/OS we will not be able to dissociate the
199 * watcher for an event which is pending delivery, so we cannot always call
200 * uv__make_close_pending() straight away. The backend will call the
201 * function once the event has cleared.
202 */
203 return;
204 #endif
205 break;
206
207 case UV_POLL:
208 uv__poll_close((uv_poll_t*)handle);
209 break;
210
211 case UV_FS_POLL:
212 uv__fs_poll_close((uv_fs_poll_t*)handle);
213 /* Poll handles use file system requests, and one of them may still be
214 * running. The poll code will call uv__make_close_pending() for us. */
215 return;
216
217 case UV_SIGNAL:
218 uv__signal_close((uv_signal_t*) handle);
219 break;
220
221 default:
222 assert(0);
223 }
224
225 uv__make_close_pending(handle);
226 }
227
uv__socket_sockopt(uv_handle_t * handle,int optname,int * value)228 int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value) {
229 int r;
230 int fd;
231 socklen_t len;
232
233 if (handle == NULL || value == NULL)
234 return UV_EINVAL;
235
236 if (handle->type == UV_TCP || handle->type == UV_NAMED_PIPE)
237 fd = uv__stream_fd((uv_stream_t*) handle);
238 else if (handle->type == UV_UDP)
239 fd = ((uv_udp_t *) handle)->io_watcher.fd;
240 else
241 return UV_ENOTSUP;
242
243 len = sizeof(*value);
244
245 if (*value == 0)
246 r = getsockopt(fd, SOL_SOCKET, optname, value, &len);
247 else
248 r = setsockopt(fd, SOL_SOCKET, optname, (const void*) value, len);
249
250 if (r < 0)
251 return UV__ERR(errno);
252
253 return 0;
254 }
255
uv__make_close_pending(uv_handle_t * handle)256 void uv__make_close_pending(uv_handle_t* handle) {
257 assert(handle->flags & UV_HANDLE_CLOSING);
258 assert(!(handle->flags & UV_HANDLE_CLOSED));
259 handle->next_closing = handle->loop->closing_handles;
260 handle->loop->closing_handles = handle;
261 }
262
uv__getiovmax(void)263 int uv__getiovmax(void) {
264 #if defined(IOV_MAX)
265 return IOV_MAX;
266 #elif defined(_SC_IOV_MAX)
267 static _Atomic int iovmax_cached = -1;
268 int iovmax;
269
270 iovmax = atomic_load_explicit(&iovmax_cached, memory_order_relaxed);
271 if (iovmax != -1)
272 return iovmax;
273
274 /* On some embedded devices (arm-linux-uclibc based ip camera),
275 * sysconf(_SC_IOV_MAX) can not get the correct value. The return
276 * value is -1 and the errno is EINPROGRESS. Degrade the value to 1.
277 */
278 iovmax = sysconf(_SC_IOV_MAX);
279 if (iovmax == -1)
280 iovmax = 1;
281
282 atomic_store_explicit(&iovmax_cached, iovmax, memory_order_relaxed);
283
284 return iovmax;
285 #else
286 return 1024;
287 #endif
288 }
289
290
uv__finish_close(uv_handle_t * handle)291 static void uv__finish_close(uv_handle_t* handle) {
292 uv_signal_t* sh;
293
294 /* Note: while the handle is in the UV_HANDLE_CLOSING state now, it's still
295 * possible for it to be active in the sense that uv__is_active() returns
296 * true.
297 *
298 * A good example is when the user calls uv_shutdown(), immediately followed
299 * by uv_close(). The handle is considered active at this point because the
300 * completion of the shutdown req is still pending.
301 */
302 assert(handle->flags & UV_HANDLE_CLOSING);
303 assert(!(handle->flags & UV_HANDLE_CLOSED));
304 handle->flags |= UV_HANDLE_CLOSED;
305
306 switch (handle->type) {
307 case UV_PREPARE:
308 case UV_CHECK:
309 case UV_IDLE:
310 case UV_ASYNC:
311 case UV_TIMER:
312 case UV_PROCESS:
313 case UV_FS_EVENT:
314 case UV_FS_POLL:
315 case UV_POLL:
316 break;
317
318 case UV_SIGNAL:
319 /* If there are any caught signals "trapped" in the signal pipe,
320 * we can't call the close callback yet. Reinserting the handle
321 * into the closing queue makes the event loop spin but that's
322 * okay because we only need to deliver the pending events.
323 */
324 sh = (uv_signal_t*) handle;
325 if (sh->caught_signals > sh->dispatched_signals) {
326 handle->flags ^= UV_HANDLE_CLOSED;
327 uv__make_close_pending(handle); /* Back into the queue. */
328 return;
329 }
330 break;
331
332 case UV_NAMED_PIPE:
333 case UV_TCP:
334 case UV_TTY:
335 uv__stream_destroy((uv_stream_t*)handle);
336 break;
337
338 case UV_UDP:
339 uv__udp_finish_close((uv_udp_t*)handle);
340 break;
341
342 default:
343 assert(0);
344 break;
345 }
346
347 uv__handle_unref(handle);
348 uv__queue_remove(&handle->handle_queue);
349
350 if (handle->close_cb) {
351 handle->close_cb(handle);
352 }
353 }
354
355
uv__run_closing_handles(uv_loop_t * loop)356 static void uv__run_closing_handles(uv_loop_t* loop) {
357 uv_handle_t* p;
358 uv_handle_t* q;
359
360 p = loop->closing_handles;
361 loop->closing_handles = NULL;
362
363 while (p) {
364 q = p->next_closing;
365 uv__finish_close(p);
366 p = q;
367 }
368 }
369
370
uv_is_closing(const uv_handle_t * handle)371 int uv_is_closing(const uv_handle_t* handle) {
372 return uv__is_closing(handle);
373 }
374
375
uv_backend_fd(const uv_loop_t * loop)376 int uv_backend_fd(const uv_loop_t* loop) {
377 return loop->backend_fd;
378 }
379
380
uv__loop_alive(const uv_loop_t * loop)381 static int uv__loop_alive(const uv_loop_t* loop) {
382 return uv__has_active_handles(loop) ||
383 uv__has_active_reqs(loop) ||
384 !uv__queue_empty(&loop->pending_queue) ||
385 loop->closing_handles != NULL;
386 }
387
388
uv__backend_timeout(const uv_loop_t * loop)389 static int uv__backend_timeout(const uv_loop_t* loop) {
390 if (loop->stop_flag == 0 &&
391 /* uv__loop_alive(loop) && */
392 (uv__has_active_handles(loop) || uv__has_active_reqs(loop)) &&
393 uv__queue_empty(&loop->pending_queue) &&
394 uv__queue_empty(&loop->idle_handles) &&
395 (loop->flags & UV_LOOP_REAP_CHILDREN) == 0 &&
396 loop->closing_handles == NULL)
397 return uv__next_timeout(loop);
398 return 0;
399 }
400
401
uv_backend_timeout(const uv_loop_t * loop)402 int uv_backend_timeout(const uv_loop_t* loop) {
403 if (uv__queue_empty(&loop->watcher_queue))
404 return uv__backend_timeout(loop);
405 /* Need to call uv_run to update the backend fd state. */
406 return 0;
407 }
408
409
uv_loop_alive(const uv_loop_t * loop)410 int uv_loop_alive(const uv_loop_t* loop) {
411 return uv__loop_alive(loop);
412 }
413
414
uv_run(uv_loop_t * loop,uv_run_mode mode)415 int uv_run(uv_loop_t* loop, uv_run_mode mode) {
416 int timeout;
417 int r;
418 int can_sleep;
419
420 r = uv__loop_alive(loop);
421 if (!r)
422 uv__update_time(loop);
423
424 /* Maintain backwards compatibility by processing timers before entering the
425 * while loop for UV_RUN_DEFAULT. Otherwise timers only need to be executed
426 * once, which should be done after polling in order to maintain proper
427 * execution order of the conceptual event loop. */
428 if (mode == UV_RUN_DEFAULT && r != 0 && loop->stop_flag == 0) {
429 uv__update_time(loop);
430 uv__run_timers(loop);
431 }
432
433 while (r != 0 && loop->stop_flag == 0) {
434 can_sleep =
435 uv__queue_empty(&loop->pending_queue) &&
436 uv__queue_empty(&loop->idle_handles);
437
438 uv__run_pending(loop);
439 uv__run_idle(loop);
440 uv__run_prepare(loop);
441
442 timeout = 0;
443 if ((mode == UV_RUN_ONCE && can_sleep) || mode == UV_RUN_DEFAULT)
444 timeout = uv__backend_timeout(loop);
445
446 uv__metrics_inc_loop_count(loop);
447
448 uv__io_poll(loop, timeout);
449
450 /* Process immediate callbacks (e.g. write_cb) a small fixed number of
451 * times to avoid loop starvation.*/
452 for (r = 0; r < 8 && !uv__queue_empty(&loop->pending_queue); r++)
453 uv__run_pending(loop);
454
455 /* Run one final update on the provider_idle_time in case uv__io_poll
456 * returned because the timeout expired, but no events were received. This
457 * call will be ignored if the provider_entry_time was either never set (if
458 * the timeout == 0) or was already updated b/c an event was received.
459 */
460 uv__metrics_update_idle_time(loop);
461
462 uv__run_check(loop);
463 uv__run_closing_handles(loop);
464
465 uv__update_time(loop);
466 uv__run_timers(loop);
467
468 r = uv__loop_alive(loop);
469 if (mode == UV_RUN_ONCE || mode == UV_RUN_NOWAIT)
470 break;
471 }
472
473 /* The if statement lets gcc compile it to a conditional store. Avoids
474 * dirtying a cache line.
475 */
476 if (loop->stop_flag != 0)
477 loop->stop_flag = 0;
478
479 return r;
480 }
481
482
uv_update_time(uv_loop_t * loop)483 void uv_update_time(uv_loop_t* loop) {
484 uv__update_time(loop);
485 }
486
487
uv_is_active(const uv_handle_t * handle)488 int uv_is_active(const uv_handle_t* handle) {
489 return uv__is_active(handle);
490 }
491
492
493 /* Open a socket in non-blocking close-on-exec mode, atomically if possible. */
uv__socket(int domain,int type,int protocol)494 int uv__socket(int domain, int type, int protocol) {
495 int sockfd;
496 int err;
497
498 #if defined(SOCK_NONBLOCK) && defined(SOCK_CLOEXEC)
499 sockfd = socket(domain, type | SOCK_NONBLOCK | SOCK_CLOEXEC, protocol);
500 if (sockfd != -1)
501 return sockfd;
502
503 if (errno != EINVAL)
504 return UV__ERR(errno);
505 #endif
506
507 sockfd = socket(domain, type, protocol);
508 if (sockfd == -1)
509 return UV__ERR(errno);
510
511 err = uv__nonblock(sockfd, 1);
512 if (err == 0)
513 err = uv__cloexec(sockfd, 1);
514
515 if (err) {
516 uv__close(sockfd);
517 return err;
518 }
519
520 #if defined(SO_NOSIGPIPE)
521 {
522 int on = 1;
523 setsockopt(sockfd, SOL_SOCKET, SO_NOSIGPIPE, &on, sizeof(on));
524 }
525 #endif
526
527 return sockfd;
528 }
529
530 /* get a file pointer to a file in read-only and close-on-exec mode */
uv__open_file(const char * path)531 FILE* uv__open_file(const char* path) {
532 int fd;
533 FILE* fp;
534
535 fd = uv__open_cloexec(path, O_RDONLY);
536 if (fd < 0)
537 return NULL;
538
539 fp = fdopen(fd, "r");
540 if (fp == NULL)
541 uv__close(fd);
542
543 return fp;
544 }
545
546
uv__accept(int sockfd)547 int uv__accept(int sockfd) {
548 int peerfd;
549 int err;
550
551 (void) &err;
552 assert(sockfd >= 0);
553
554 do
555 #ifdef uv__accept4
556 peerfd = uv__accept4(sockfd, NULL, NULL, SOCK_NONBLOCK|SOCK_CLOEXEC);
557 #else
558 peerfd = accept(sockfd, NULL, NULL);
559 #endif
560 while (peerfd == -1 && errno == EINTR);
561
562 if (peerfd == -1)
563 return UV__ERR(errno);
564
565 #ifndef uv__accept4
566 err = uv__cloexec(peerfd, 1);
567 if (err == 0)
568 err = uv__nonblock(peerfd, 1);
569
570 if (err != 0) {
571 uv__close(peerfd);
572 return err;
573 }
574 #endif
575
576 return peerfd;
577 }
578
579
580 /* close() on macos has the "interesting" quirk that it fails with EINTR
581 * without closing the file descriptor when a thread is in the cancel state.
582 * That's why libuv calls close$NOCANCEL() instead.
583 *
584 * glibc on linux has a similar issue: close() is a cancellation point and
585 * will unwind the thread when it's in the cancel state. Work around that
586 * by making the system call directly. Musl libc is unaffected.
587 */
uv__close_nocancel(int fd)588 int uv__close_nocancel(int fd) {
589 #if defined(__APPLE__)
590 #pragma GCC diagnostic push
591 #pragma GCC diagnostic ignored "-Wdollar-in-identifier-extension"
592 #if defined(__LP64__) || TARGET_OS_IPHONE
593 extern int close$NOCANCEL(int);
594 return close$NOCANCEL(fd);
595 #else
596 extern int close$NOCANCEL$UNIX2003(int);
597 return close$NOCANCEL$UNIX2003(fd);
598 #endif
599 #pragma GCC diagnostic pop
600 #elif defined(__linux__) && defined(__SANITIZE_THREAD__) && defined(__clang__)
601 long rc;
602 __sanitizer_syscall_pre_close(fd);
603 rc = syscall(SYS_close, fd);
604 __sanitizer_syscall_post_close(rc, fd);
605 return rc;
606 #elif defined(__linux__) && !defined(__SANITIZE_THREAD__)
607 return syscall(SYS_close, fd);
608 #else
609 return close(fd);
610 #endif
611 }
612
613
uv__close_nocheckstdio(int fd)614 int uv__close_nocheckstdio(int fd) {
615 int saved_errno;
616 int rc;
617
618 assert(fd > -1); /* Catch uninitialized io_watcher.fd bugs. */
619
620 saved_errno = errno;
621 rc = uv__close_nocancel(fd);
622 if (rc == -1) {
623 rc = UV__ERR(errno);
624 if (rc == UV_EINTR || rc == UV__ERR(EINPROGRESS))
625 rc = 0; /* The close is in progress, not an error. */
626 errno = saved_errno;
627 }
628
629 return rc;
630 }
631
632
uv__close(int fd)633 int uv__close(int fd) {
634 assert(fd > STDERR_FILENO); /* Catch stdio close bugs. */
635 #if defined(__MVS__)
636 SAVE_ERRNO(epoll_file_close(fd));
637 #endif
638 return uv__close_nocheckstdio(fd);
639 }
640
641 #if UV__NONBLOCK_IS_IOCTL
uv__nonblock_ioctl(int fd,int set)642 int uv__nonblock_ioctl(int fd, int set) {
643 int r;
644
645 do
646 r = ioctl(fd, FIONBIO, &set);
647 while (r == -1 && errno == EINTR);
648
649 if (r)
650 return UV__ERR(errno);
651
652 return 0;
653 }
654 #endif
655
656
uv__nonblock_fcntl(int fd,int set)657 int uv__nonblock_fcntl(int fd, int set) {
658 int flags;
659 int r;
660
661 do
662 r = fcntl(fd, F_GETFL);
663 while (r == -1 && errno == EINTR);
664
665 if (r == -1)
666 return UV__ERR(errno);
667
668 /* Bail out now if already set/clear. */
669 if (!!(r & O_NONBLOCK) == !!set)
670 return 0;
671
672 if (set)
673 flags = r | O_NONBLOCK;
674 else
675 flags = r & ~O_NONBLOCK;
676
677 do
678 r = fcntl(fd, F_SETFL, flags);
679 while (r == -1 && errno == EINTR);
680
681 if (r)
682 return UV__ERR(errno);
683
684 return 0;
685 }
686
687
uv__cloexec(int fd,int set)688 int uv__cloexec(int fd, int set) {
689 int flags;
690 int r;
691
692 flags = 0;
693 if (set)
694 flags = FD_CLOEXEC;
695
696 do
697 r = fcntl(fd, F_SETFD, flags);
698 while (r == -1 && errno == EINTR);
699
700 if (r)
701 return UV__ERR(errno);
702
703 return 0;
704 }
705
706
uv__recvmsg(int fd,struct msghdr * msg,int flags)707 ssize_t uv__recvmsg(int fd, struct msghdr* msg, int flags) {
708 #if defined(__ANDROID__) || \
709 defined(__DragonFly__) || \
710 defined(__FreeBSD__) || \
711 defined(__NetBSD__) || \
712 defined(__OpenBSD__) || \
713 defined(__linux__)
714 ssize_t rc;
715 rc = recvmsg(fd, msg, flags | MSG_CMSG_CLOEXEC);
716 if (rc == -1)
717 return UV__ERR(errno);
718 return rc;
719 #else
720 struct cmsghdr* cmsg;
721 int* pfd;
722 int* end;
723 ssize_t rc;
724 rc = recvmsg(fd, msg, flags);
725 if (rc == -1)
726 return UV__ERR(errno);
727 if (msg->msg_controllen == 0)
728 return rc;
729 for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg))
730 if (cmsg->cmsg_type == SCM_RIGHTS)
731 for (pfd = (int*) CMSG_DATA(cmsg),
732 end = (int*) ((char*) cmsg + cmsg->cmsg_len);
733 pfd < end;
734 pfd += 1)
735 uv__cloexec(*pfd, 1);
736 return rc;
737 #endif
738 }
739
740
uv_cwd(char * buffer,size_t * size)741 int uv_cwd(char* buffer, size_t* size) {
742 char scratch[1 + UV__PATH_MAX];
743
744 if (buffer == NULL || size == NULL)
745 return UV_EINVAL;
746
747 /* Try to read directly into the user's buffer first... */
748 if (getcwd(buffer, *size) != NULL)
749 goto fixup;
750
751 if (errno != ERANGE)
752 return UV__ERR(errno);
753
754 /* ...or into scratch space if the user's buffer is too small
755 * so we can report how much space to provide on the next try.
756 */
757 if (getcwd(scratch, sizeof(scratch)) == NULL)
758 return UV__ERR(errno);
759
760 buffer = scratch;
761
762 fixup:
763
764 *size = strlen(buffer);
765
766 if (*size > 1 && buffer[*size - 1] == '/') {
767 *size -= 1;
768 buffer[*size] = '\0';
769 }
770
771 if (buffer == scratch) {
772 *size += 1;
773 return UV_ENOBUFS;
774 }
775
776 return 0;
777 }
778
779
uv_chdir(const char * dir)780 int uv_chdir(const char* dir) {
781 if (chdir(dir))
782 return UV__ERR(errno);
783
784 return 0;
785 }
786
787
uv_disable_stdio_inheritance(void)788 void uv_disable_stdio_inheritance(void) {
789 int fd;
790
791 /* Set the CLOEXEC flag on all open descriptors. Unconditionally try the
792 * first 16 file descriptors. After that, bail out after the first error.
793 */
794 for (fd = 0; ; fd++)
795 if (uv__cloexec(fd, 1) && fd > 15)
796 break;
797 }
798
799
uv_fileno(const uv_handle_t * handle,uv_os_fd_t * fd)800 int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd) {
801 int fd_out;
802
803 switch (handle->type) {
804 case UV_TCP:
805 case UV_NAMED_PIPE:
806 case UV_TTY:
807 fd_out = uv__stream_fd((uv_stream_t*) handle);
808 break;
809
810 case UV_UDP:
811 fd_out = ((uv_udp_t *) handle)->io_watcher.fd;
812 break;
813
814 case UV_POLL:
815 fd_out = ((uv_poll_t *) handle)->io_watcher.fd;
816 break;
817
818 default:
819 return UV_EINVAL;
820 }
821
822 if (uv__is_closing(handle) || fd_out == -1)
823 return UV_EBADF;
824
825 *fd = fd_out;
826 return 0;
827 }
828
829
uv__run_pending(uv_loop_t * loop)830 static void uv__run_pending(uv_loop_t* loop) {
831 struct uv__queue* q;
832 struct uv__queue pq;
833 uv__io_t* w;
834
835 uv__queue_move(&loop->pending_queue, &pq);
836
837 while (!uv__queue_empty(&pq)) {
838 q = uv__queue_head(&pq);
839 uv__queue_remove(q);
840 uv__queue_init(q);
841 w = uv__queue_data(q, uv__io_t, pending_queue);
842 w->cb(loop, w, POLLOUT);
843 }
844 }
845
846
next_power_of_two(unsigned int val)847 static unsigned int next_power_of_two(unsigned int val) {
848 val -= 1;
849 val |= val >> 1;
850 val |= val >> 2;
851 val |= val >> 4;
852 val |= val >> 8;
853 val |= val >> 16;
854 val += 1;
855 return val;
856 }
857
maybe_resize(uv_loop_t * loop,unsigned int len)858 static void maybe_resize(uv_loop_t* loop, unsigned int len) {
859 uv__io_t** watchers;
860 void* fake_watcher_list;
861 void* fake_watcher_count;
862 unsigned int nwatchers;
863 unsigned int i;
864
865 if (len <= loop->nwatchers)
866 return;
867
868 /* Preserve fake watcher list and count at the end of the watchers */
869 if (loop->watchers != NULL) {
870 fake_watcher_list = loop->watchers[loop->nwatchers];
871 fake_watcher_count = loop->watchers[loop->nwatchers + 1];
872 } else {
873 fake_watcher_list = NULL;
874 fake_watcher_count = NULL;
875 }
876
877 nwatchers = next_power_of_two(len + 2) - 2;
878 watchers = uv__reallocf(loop->watchers,
879 (nwatchers + 2) * sizeof(loop->watchers[0]));
880
881 if (watchers == NULL)
882 abort();
883 for (i = loop->nwatchers; i < nwatchers; i++)
884 watchers[i] = NULL;
885 watchers[nwatchers] = fake_watcher_list;
886 watchers[nwatchers + 1] = fake_watcher_count;
887
888 loop->watchers = watchers;
889 loop->nwatchers = nwatchers;
890 }
891
892
uv__io_init(uv__io_t * w,uv__io_cb cb,int fd)893 void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd) {
894 assert(cb != NULL);
895 assert(fd >= -1);
896 uv__queue_init(&w->pending_queue);
897 uv__queue_init(&w->watcher_queue);
898 w->cb = cb;
899 w->fd = fd;
900 w->events = 0;
901 w->pevents = 0;
902 }
903
904
uv__io_start(uv_loop_t * loop,uv__io_t * w,unsigned int events)905 void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
906 assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
907 assert(0 != events);
908 assert(w->fd >= 0);
909 assert(w->fd < INT_MAX);
910
911 w->pevents |= events;
912 maybe_resize(loop, w->fd + 1);
913
914 #if !defined(__sun)
915 /* The event ports backend needs to rearm all file descriptors on each and
916 * every tick of the event loop but the other backends allow us to
917 * short-circuit here if the event mask is unchanged.
918 */
919 if (w->events == w->pevents)
920 return;
921 #endif
922
923 if (uv__queue_empty(&w->watcher_queue))
924 uv__queue_insert_tail(&loop->watcher_queue, &w->watcher_queue);
925
926 if (loop->watchers[w->fd] == NULL) {
927 loop->watchers[w->fd] = w;
928 loop->nfds++;
929 }
930 }
931
932
uv__io_stop(uv_loop_t * loop,uv__io_t * w,unsigned int events)933 void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
934 assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
935 assert(0 != events);
936
937 if (w->fd == -1)
938 return;
939
940 assert(w->fd >= 0);
941
942 /* Happens when uv__io_stop() is called on a handle that was never started. */
943 if ((unsigned) w->fd >= loop->nwatchers)
944 return;
945
946 w->pevents &= ~events;
947
948 if (w->pevents == 0) {
949 uv__queue_remove(&w->watcher_queue);
950 uv__queue_init(&w->watcher_queue);
951 w->events = 0;
952
953 if (w == loop->watchers[w->fd]) {
954 assert(loop->nfds > 0);
955 loop->watchers[w->fd] = NULL;
956 loop->nfds--;
957 }
958 }
959 else if (uv__queue_empty(&w->watcher_queue))
960 uv__queue_insert_tail(&loop->watcher_queue, &w->watcher_queue);
961 }
962
963
uv__io_close(uv_loop_t * loop,uv__io_t * w)964 void uv__io_close(uv_loop_t* loop, uv__io_t* w) {
965 uv__io_stop(loop, w, POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
966 uv__queue_remove(&w->pending_queue);
967
968 /* Remove stale events for this file descriptor */
969 if (w->fd != -1)
970 uv__platform_invalidate_fd(loop, w->fd);
971 }
972
973
uv__io_feed(uv_loop_t * loop,uv__io_t * w)974 void uv__io_feed(uv_loop_t* loop, uv__io_t* w) {
975 if (uv__queue_empty(&w->pending_queue))
976 uv__queue_insert_tail(&loop->pending_queue, &w->pending_queue);
977 }
978
979
uv__io_active(const uv__io_t * w,unsigned int events)980 int uv__io_active(const uv__io_t* w, unsigned int events) {
981 assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
982 assert(0 != events);
983 return 0 != (w->pevents & events);
984 }
985
986
uv__fd_exists(uv_loop_t * loop,int fd)987 int uv__fd_exists(uv_loop_t* loop, int fd) {
988 return (unsigned) fd < loop->nwatchers && loop->watchers[fd] != NULL;
989 }
990
991
uv_getrusage(uv_rusage_t * rusage)992 int uv_getrusage(uv_rusage_t* rusage) {
993 struct rusage usage;
994
995 if (getrusage(RUSAGE_SELF, &usage))
996 return UV__ERR(errno);
997
998 rusage->ru_utime.tv_sec = usage.ru_utime.tv_sec;
999 rusage->ru_utime.tv_usec = usage.ru_utime.tv_usec;
1000
1001 rusage->ru_stime.tv_sec = usage.ru_stime.tv_sec;
1002 rusage->ru_stime.tv_usec = usage.ru_stime.tv_usec;
1003
1004 #if !defined(__MVS__) && !defined(__HAIKU__)
1005 rusage->ru_maxrss = usage.ru_maxrss;
1006 rusage->ru_ixrss = usage.ru_ixrss;
1007 rusage->ru_idrss = usage.ru_idrss;
1008 rusage->ru_isrss = usage.ru_isrss;
1009 rusage->ru_minflt = usage.ru_minflt;
1010 rusage->ru_majflt = usage.ru_majflt;
1011 rusage->ru_nswap = usage.ru_nswap;
1012 rusage->ru_inblock = usage.ru_inblock;
1013 rusage->ru_oublock = usage.ru_oublock;
1014 rusage->ru_msgsnd = usage.ru_msgsnd;
1015 rusage->ru_msgrcv = usage.ru_msgrcv;
1016 rusage->ru_nsignals = usage.ru_nsignals;
1017 rusage->ru_nvcsw = usage.ru_nvcsw;
1018 rusage->ru_nivcsw = usage.ru_nivcsw;
1019 #endif
1020
1021 /* Most platforms report ru_maxrss in kilobytes; macOS and Solaris are
1022 * the outliers because of course they are.
1023 */
1024 #if defined(__APPLE__)
1025 rusage->ru_maxrss /= 1024; /* macOS and iOS report bytes. */
1026 #elif defined(__sun)
1027 rusage->ru_maxrss /= getpagesize() / 1024; /* Solaris reports pages. */
1028 #endif
1029
1030 return 0;
1031 }
1032
1033
uv__open_cloexec(const char * path,int flags)1034 int uv__open_cloexec(const char* path, int flags) {
1035 #if defined(O_CLOEXEC)
1036 int fd;
1037
1038 fd = open(path, flags | O_CLOEXEC);
1039 if (fd == -1)
1040 return UV__ERR(errno);
1041
1042 return fd;
1043 #else /* O_CLOEXEC */
1044 int err;
1045 int fd;
1046
1047 fd = open(path, flags);
1048 if (fd == -1)
1049 return UV__ERR(errno);
1050
1051 err = uv__cloexec(fd, 1);
1052 if (err) {
1053 uv__close(fd);
1054 return err;
1055 }
1056
1057 return fd;
1058 #endif /* O_CLOEXEC */
1059 }
1060
1061
uv__slurp(const char * filename,char * buf,size_t len)1062 int uv__slurp(const char* filename, char* buf, size_t len) {
1063 ssize_t n;
1064 int fd;
1065
1066 assert(len > 0);
1067
1068 fd = uv__open_cloexec(filename, O_RDONLY);
1069 if (fd < 0)
1070 return fd;
1071
1072 do
1073 n = read(fd, buf, len - 1);
1074 while (n == -1 && errno == EINTR);
1075
1076 if (uv__close_nocheckstdio(fd))
1077 abort();
1078
1079 if (n < 0)
1080 return UV__ERR(errno);
1081
1082 buf[n] = '\0';
1083
1084 return 0;
1085 }
1086
1087
uv__dup2_cloexec(int oldfd,int newfd)1088 int uv__dup2_cloexec(int oldfd, int newfd) {
1089 #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__linux__)
1090 int r;
1091
1092 r = dup3(oldfd, newfd, O_CLOEXEC);
1093 if (r == -1)
1094 return UV__ERR(errno);
1095
1096 return r;
1097 #else
1098 int err;
1099 int r;
1100
1101 r = dup2(oldfd, newfd); /* Never retry. */
1102 if (r == -1)
1103 return UV__ERR(errno);
1104
1105 err = uv__cloexec(newfd, 1);
1106 if (err != 0) {
1107 uv__close(newfd);
1108 return err;
1109 }
1110
1111 return r;
1112 #endif
1113 }
1114
1115
uv_os_homedir(char * buffer,size_t * size)1116 int uv_os_homedir(char* buffer, size_t* size) {
1117 uv_passwd_t pwd;
1118 size_t len;
1119 int r;
1120
1121 /* Check if the HOME environment variable is set first. The task of
1122 performing input validation on buffer and size is taken care of by
1123 uv_os_getenv(). */
1124 r = uv_os_getenv("HOME", buffer, size);
1125
1126 if (r != UV_ENOENT)
1127 return r;
1128
1129 /* HOME is not set, so call uv_os_get_passwd() */
1130 r = uv_os_get_passwd(&pwd);
1131
1132 if (r != 0) {
1133 return r;
1134 }
1135
1136 len = strlen(pwd.homedir);
1137
1138 if (len >= *size) {
1139 *size = len + 1;
1140 uv_os_free_passwd(&pwd);
1141 return UV_ENOBUFS;
1142 }
1143
1144 memcpy(buffer, pwd.homedir, len + 1);
1145 *size = len;
1146 uv_os_free_passwd(&pwd);
1147
1148 return 0;
1149 }
1150
1151
uv_os_tmpdir(char * buffer,size_t * size)1152 int uv_os_tmpdir(char* buffer, size_t* size) {
1153 const char* buf;
1154 size_t len;
1155
1156 if (buffer == NULL || size == NULL || *size == 0)
1157 return UV_EINVAL;
1158
1159 #define CHECK_ENV_VAR(name) \
1160 do { \
1161 buf = getenv(name); \
1162 if (buf != NULL) \
1163 goto return_buffer; \
1164 } \
1165 while (0)
1166
1167 /* Check the TMPDIR, TMP, TEMP, and TEMPDIR environment variables in order */
1168 CHECK_ENV_VAR("TMPDIR");
1169 CHECK_ENV_VAR("TMP");
1170 CHECK_ENV_VAR("TEMP");
1171 CHECK_ENV_VAR("TEMPDIR");
1172
1173 #undef CHECK_ENV_VAR
1174
1175 /* No temp environment variables defined */
1176 #if defined(__ANDROID__)
1177 buf = "/data/local/tmp";
1178 #else
1179 buf = "/tmp";
1180 #endif
1181
1182 return_buffer:
1183 len = strlen(buf);
1184
1185 if (len >= *size) {
1186 *size = len + 1;
1187 return UV_ENOBUFS;
1188 }
1189
1190 /* The returned directory should not have a trailing slash. */
1191 if (len > 1 && buf[len - 1] == '/') {
1192 len--;
1193 }
1194
1195 memcpy(buffer, buf, len + 1);
1196 buffer[len] = '\0';
1197 *size = len;
1198
1199 return 0;
1200 }
1201
1202
uv__getpwuid_r(uv_passwd_t * pwd,uid_t uid)1203 static int uv__getpwuid_r(uv_passwd_t *pwd, uid_t uid) {
1204 struct passwd pw;
1205 struct passwd* result;
1206 char* buf;
1207 size_t bufsize;
1208 size_t name_size;
1209 size_t homedir_size;
1210 size_t shell_size;
1211 int r;
1212
1213 if (pwd == NULL)
1214 return UV_EINVAL;
1215
1216 /* Calling sysconf(_SC_GETPW_R_SIZE_MAX) would get the suggested size, but it
1217 * is frequently 1024 or 4096, so we can just use that directly. The pwent
1218 * will not usually be large. */
1219 for (bufsize = 2000;; bufsize *= 2) {
1220 buf = uv__malloc(bufsize);
1221
1222 if (buf == NULL)
1223 return UV_ENOMEM;
1224
1225 do
1226 r = getpwuid_r(uid, &pw, buf, bufsize, &result);
1227 while (r == EINTR);
1228
1229 if (r != 0 || result == NULL)
1230 uv__free(buf);
1231
1232 if (r != ERANGE)
1233 break;
1234 }
1235
1236 if (r != 0)
1237 return UV__ERR(r);
1238
1239 if (result == NULL)
1240 return UV_ENOENT;
1241
1242 /* Allocate memory for the username, shell, and home directory */
1243 name_size = strlen(pw.pw_name) + 1;
1244 homedir_size = strlen(pw.pw_dir) + 1;
1245 shell_size = strlen(pw.pw_shell) + 1;
1246 pwd->username = uv__malloc(name_size + homedir_size + shell_size);
1247
1248 if (pwd->username == NULL) {
1249 uv__free(buf);
1250 return UV_ENOMEM;
1251 }
1252
1253 /* Copy the username */
1254 memcpy(pwd->username, pw.pw_name, name_size);
1255
1256 /* Copy the home directory */
1257 pwd->homedir = pwd->username + name_size;
1258 memcpy(pwd->homedir, pw.pw_dir, homedir_size);
1259
1260 /* Copy the shell */
1261 pwd->shell = pwd->homedir + homedir_size;
1262 memcpy(pwd->shell, pw.pw_shell, shell_size);
1263
1264 /* Copy the uid and gid */
1265 pwd->uid = pw.pw_uid;
1266 pwd->gid = pw.pw_gid;
1267
1268 uv__free(buf);
1269
1270 return 0;
1271 }
1272
1273
uv_os_get_group(uv_group_t * grp,uv_uid_t gid)1274 int uv_os_get_group(uv_group_t* grp, uv_uid_t gid) {
1275 #if defined(__ANDROID__) && __ANDROID_API__ < 24
1276 /* This function getgrgid_r() was added in Android N (level 24) */
1277 return UV_ENOSYS;
1278 #else
1279 struct group gp;
1280 struct group* result;
1281 char* buf;
1282 char* gr_mem;
1283 size_t bufsize;
1284 size_t name_size;
1285 long members;
1286 size_t mem_size;
1287 int r;
1288
1289 if (grp == NULL)
1290 return UV_EINVAL;
1291
1292 /* Calling sysconf(_SC_GETGR_R_SIZE_MAX) would get the suggested size, but it
1293 * is frequently 1024 or 4096, so we can just use that directly. The pwent
1294 * will not usually be large. */
1295 for (bufsize = 2000;; bufsize *= 2) {
1296 buf = uv__malloc(bufsize);
1297
1298 if (buf == NULL)
1299 return UV_ENOMEM;
1300
1301 do
1302 r = getgrgid_r(gid, &gp, buf, bufsize, &result);
1303 while (r == EINTR);
1304
1305 if (r != 0 || result == NULL)
1306 uv__free(buf);
1307
1308 if (r != ERANGE)
1309 break;
1310 }
1311
1312 if (r != 0)
1313 return UV__ERR(r);
1314
1315 if (result == NULL)
1316 return UV_ENOENT;
1317
1318 /* Allocate memory for the groupname and members. */
1319 name_size = strlen(gp.gr_name) + 1;
1320 members = 0;
1321 mem_size = sizeof(char*);
1322 for (r = 0; gp.gr_mem[r] != NULL; r++) {
1323 mem_size += strlen(gp.gr_mem[r]) + 1 + sizeof(char*);
1324 members++;
1325 }
1326
1327 gr_mem = uv__malloc(name_size + mem_size);
1328 if (gr_mem == NULL) {
1329 uv__free(buf);
1330 return UV_ENOMEM;
1331 }
1332
1333 /* Copy the members */
1334 grp->members = (char**) gr_mem;
1335 grp->members[members] = NULL;
1336 gr_mem = (char*) &grp->members[members + 1];
1337 for (r = 0; r < members; r++) {
1338 grp->members[r] = gr_mem;
1339 strcpy(gr_mem, gp.gr_mem[r]);
1340 gr_mem += strlen(gr_mem) + 1;
1341 }
1342 assert(gr_mem == (char*)grp->members + mem_size);
1343
1344 /* Copy the groupname */
1345 grp->groupname = gr_mem;
1346 memcpy(grp->groupname, gp.gr_name, name_size);
1347 gr_mem += name_size;
1348
1349 /* Copy the gid */
1350 grp->gid = gp.gr_gid;
1351
1352 uv__free(buf);
1353
1354 return 0;
1355 #endif
1356 }
1357
1358
uv_os_get_passwd(uv_passwd_t * pwd)1359 int uv_os_get_passwd(uv_passwd_t* pwd) {
1360 return uv__getpwuid_r(pwd, geteuid());
1361 }
1362
1363
uv_os_get_passwd2(uv_passwd_t * pwd,uv_uid_t uid)1364 int uv_os_get_passwd2(uv_passwd_t* pwd, uv_uid_t uid) {
1365 return uv__getpwuid_r(pwd, uid);
1366 }
1367
1368
uv_translate_sys_error(int sys_errno)1369 int uv_translate_sys_error(int sys_errno) {
1370 /* If < 0 then it's already a libuv error. */
1371 return sys_errno <= 0 ? sys_errno : -sys_errno;
1372 }
1373
1374
uv_os_environ(uv_env_item_t ** envitems,int * count)1375 int uv_os_environ(uv_env_item_t** envitems, int* count) {
1376 int i, j, cnt;
1377 uv_env_item_t* envitem;
1378
1379 *envitems = NULL;
1380 *count = 0;
1381
1382 for (i = 0; environ[i] != NULL; i++);
1383
1384 *envitems = uv__calloc(i, sizeof(**envitems));
1385
1386 if (*envitems == NULL)
1387 return UV_ENOMEM;
1388
1389 for (j = 0, cnt = 0; j < i; j++) {
1390 char* buf;
1391 char* ptr;
1392
1393 if (environ[j] == NULL)
1394 break;
1395
1396 buf = uv__strdup(environ[j]);
1397 if (buf == NULL)
1398 goto fail;
1399
1400 ptr = strchr(buf, '=');
1401 if (ptr == NULL) {
1402 uv__free(buf);
1403 continue;
1404 }
1405
1406 *ptr = '\0';
1407
1408 envitem = &(*envitems)[cnt];
1409 envitem->name = buf;
1410 envitem->value = ptr + 1;
1411
1412 cnt++;
1413 }
1414
1415 *count = cnt;
1416 return 0;
1417
1418 fail:
1419 for (i = 0; i < cnt; i++) {
1420 envitem = &(*envitems)[cnt];
1421 uv__free(envitem->name);
1422 }
1423 uv__free(*envitems);
1424
1425 *envitems = NULL;
1426 *count = 0;
1427 return UV_ENOMEM;
1428 }
1429
1430
uv_os_getenv(const char * name,char * buffer,size_t * size)1431 int uv_os_getenv(const char* name, char* buffer, size_t* size) {
1432 char* var;
1433 size_t len;
1434
1435 if (name == NULL || buffer == NULL || size == NULL || *size == 0)
1436 return UV_EINVAL;
1437
1438 var = getenv(name);
1439
1440 if (var == NULL)
1441 return UV_ENOENT;
1442
1443 len = strlen(var);
1444
1445 if (len >= *size) {
1446 *size = len + 1;
1447 return UV_ENOBUFS;
1448 }
1449
1450 memcpy(buffer, var, len + 1);
1451 *size = len;
1452
1453 return 0;
1454 }
1455
1456
uv_os_setenv(const char * name,const char * value)1457 int uv_os_setenv(const char* name, const char* value) {
1458 if (name == NULL || value == NULL)
1459 return UV_EINVAL;
1460
1461 if (setenv(name, value, 1) != 0)
1462 return UV__ERR(errno);
1463
1464 return 0;
1465 }
1466
1467
uv_os_unsetenv(const char * name)1468 int uv_os_unsetenv(const char* name) {
1469 if (name == NULL)
1470 return UV_EINVAL;
1471
1472 if (unsetenv(name) != 0)
1473 return UV__ERR(errno);
1474
1475 return 0;
1476 }
1477
1478
uv_os_gethostname(char * buffer,size_t * size)1479 int uv_os_gethostname(char* buffer, size_t* size) {
1480 /*
1481 On some platforms, if the input buffer is not large enough, gethostname()
1482 succeeds, but truncates the result. libuv can detect this and return ENOBUFS
1483 instead by creating a large enough buffer and comparing the hostname length
1484 to the size input.
1485 */
1486 char buf[UV_MAXHOSTNAMESIZE];
1487 size_t len;
1488
1489 if (buffer == NULL || size == NULL || *size == 0)
1490 return UV_EINVAL;
1491
1492 if (gethostname(buf, sizeof(buf)) != 0)
1493 return UV__ERR(errno);
1494
1495 buf[sizeof(buf) - 1] = '\0'; /* Null terminate, just to be safe. */
1496 len = strlen(buf);
1497
1498 if (len >= *size) {
1499 *size = len + 1;
1500 return UV_ENOBUFS;
1501 }
1502
1503 memcpy(buffer, buf, len + 1);
1504 *size = len;
1505 return 0;
1506 }
1507
1508
uv_get_osfhandle(int fd)1509 uv_os_fd_t uv_get_osfhandle(int fd) {
1510 return fd;
1511 }
1512
uv_open_osfhandle(uv_os_fd_t os_fd)1513 int uv_open_osfhandle(uv_os_fd_t os_fd) {
1514 return os_fd;
1515 }
1516
uv_os_getpid(void)1517 uv_pid_t uv_os_getpid(void) {
1518 return getpid();
1519 }
1520
1521
uv_os_getppid(void)1522 uv_pid_t uv_os_getppid(void) {
1523 return getppid();
1524 }
1525
uv_cpumask_size(void)1526 int uv_cpumask_size(void) {
1527 #if UV__CPU_AFFINITY_SUPPORTED
1528 return CPU_SETSIZE;
1529 #else
1530 return UV_ENOTSUP;
1531 #endif
1532 }
1533
uv_os_getpriority(uv_pid_t pid,int * priority)1534 int uv_os_getpriority(uv_pid_t pid, int* priority) {
1535 int r;
1536
1537 if (priority == NULL)
1538 return UV_EINVAL;
1539
1540 errno = 0;
1541 r = getpriority(PRIO_PROCESS, (int) pid);
1542
1543 if (r == -1 && errno != 0)
1544 return UV__ERR(errno);
1545
1546 *priority = r;
1547 return 0;
1548 }
1549
1550
uv_os_setpriority(uv_pid_t pid,int priority)1551 int uv_os_setpriority(uv_pid_t pid, int priority) {
1552 if (priority < UV_PRIORITY_HIGHEST || priority > UV_PRIORITY_LOW)
1553 return UV_EINVAL;
1554
1555 if (setpriority(PRIO_PROCESS, (int) pid, priority) != 0)
1556 return UV__ERR(errno);
1557
1558 return 0;
1559 }
1560
1561 /**
1562 * If the function succeeds, the return value is 0.
1563 * If the function fails, the return value is non-zero.
1564 * for Linux, when schedule policy is SCHED_OTHER (default), priority is 0.
1565 * So the output parameter priority is actually the nice value.
1566 */
uv_thread_getpriority(uv_thread_t tid,int * priority)1567 int uv_thread_getpriority(uv_thread_t tid, int* priority) {
1568 int r;
1569 int policy;
1570 struct sched_param param;
1571 #ifdef __linux__
1572 pid_t pid = gettid();
1573 #endif
1574
1575 if (priority == NULL)
1576 return UV_EINVAL;
1577
1578 r = pthread_getschedparam(tid, &policy, ¶m);
1579 if (r != 0)
1580 return UV__ERR(errno);
1581
1582 #ifdef __linux__
1583 if (SCHED_OTHER == policy && pthread_equal(tid, pthread_self())) {
1584 errno = 0;
1585 r = getpriority(PRIO_PROCESS, pid);
1586 if (r == -1 && errno != 0)
1587 return UV__ERR(errno);
1588 *priority = r;
1589 return 0;
1590 }
1591 #endif
1592
1593 *priority = param.sched_priority;
1594 return 0;
1595 }
1596
1597 #ifdef __linux__
set_nice_for_calling_thread(int priority)1598 static int set_nice_for_calling_thread(int priority) {
1599 int r;
1600 int nice;
1601
1602 if (priority < UV_THREAD_PRIORITY_LOWEST || priority > UV_THREAD_PRIORITY_HIGHEST)
1603 return UV_EINVAL;
1604
1605 pid_t pid = gettid();
1606 nice = 0 - priority * 2;
1607 r = setpriority(PRIO_PROCESS, pid, nice);
1608 if (r != 0)
1609 return UV__ERR(errno);
1610 return 0;
1611 }
1612 #endif
1613
1614 /**
1615 * If the function succeeds, the return value is 0.
1616 * If the function fails, the return value is non-zero.
1617 */
uv_thread_setpriority(uv_thread_t tid,int priority)1618 int uv_thread_setpriority(uv_thread_t tid, int priority) {
1619 #if !defined(__GNU__)
1620 int r;
1621 int min;
1622 int max;
1623 int range;
1624 int prio;
1625 int policy;
1626 struct sched_param param;
1627
1628 if (priority < UV_THREAD_PRIORITY_LOWEST || priority > UV_THREAD_PRIORITY_HIGHEST)
1629 return UV_EINVAL;
1630
1631 r = pthread_getschedparam(tid, &policy, ¶m);
1632 if (r != 0)
1633 return UV__ERR(errno);
1634
1635 #ifdef __linux__
1636 /**
1637 * for Linux, when schedule policy is SCHED_OTHER (default), priority must be 0,
1638 * we should set the nice value in this case.
1639 */
1640 if (SCHED_OTHER == policy && pthread_equal(tid, pthread_self()))
1641 return set_nice_for_calling_thread(priority);
1642 #endif
1643
1644 #ifdef __PASE__
1645 min = 1;
1646 max = 127;
1647 #else
1648 min = sched_get_priority_min(policy);
1649 max = sched_get_priority_max(policy);
1650 #endif
1651
1652 if (min == -1 || max == -1)
1653 return UV__ERR(errno);
1654
1655 range = max - min;
1656
1657 switch (priority) {
1658 case UV_THREAD_PRIORITY_HIGHEST:
1659 prio = max;
1660 break;
1661 case UV_THREAD_PRIORITY_ABOVE_NORMAL:
1662 prio = min + range * 3 / 4;
1663 break;
1664 case UV_THREAD_PRIORITY_NORMAL:
1665 prio = min + range / 2;
1666 break;
1667 case UV_THREAD_PRIORITY_BELOW_NORMAL:
1668 prio = min + range / 4;
1669 break;
1670 case UV_THREAD_PRIORITY_LOWEST:
1671 prio = min;
1672 break;
1673 default:
1674 return 0;
1675 }
1676
1677 if (param.sched_priority != prio) {
1678 param.sched_priority = prio;
1679 r = pthread_setschedparam(tid, policy, ¶m);
1680 if (r != 0)
1681 return UV__ERR(errno);
1682 }
1683
1684 return 0;
1685 #else /* !defined(__GNU__) */
1686 /* Simulate success on systems where thread priority is not implemented. */
1687 return 0;
1688 #endif /* !defined(__GNU__) */
1689 }
1690
uv_os_uname(uv_utsname_t * buffer)1691 int uv_os_uname(uv_utsname_t* buffer) {
1692 struct utsname buf;
1693 int r;
1694
1695 if (buffer == NULL)
1696 return UV_EINVAL;
1697
1698 if (uname(&buf) == -1) {
1699 r = UV__ERR(errno);
1700 goto error;
1701 }
1702
1703 r = uv__strscpy(buffer->sysname, buf.sysname, sizeof(buffer->sysname));
1704 if (r == UV_E2BIG)
1705 goto error;
1706
1707 #ifdef _AIX
1708 r = snprintf(buffer->release,
1709 sizeof(buffer->release),
1710 "%s.%s",
1711 buf.version,
1712 buf.release);
1713 if (r >= sizeof(buffer->release)) {
1714 r = UV_E2BIG;
1715 goto error;
1716 }
1717 #else
1718 r = uv__strscpy(buffer->release, buf.release, sizeof(buffer->release));
1719 if (r == UV_E2BIG)
1720 goto error;
1721 #endif
1722
1723 r = uv__strscpy(buffer->version, buf.version, sizeof(buffer->version));
1724 if (r == UV_E2BIG)
1725 goto error;
1726
1727 #if defined(_AIX) || defined(__PASE__)
1728 r = uv__strscpy(buffer->machine, "ppc64", sizeof(buffer->machine));
1729 #else
1730 r = uv__strscpy(buffer->machine, buf.machine, sizeof(buffer->machine));
1731 #endif
1732
1733 if (r == UV_E2BIG)
1734 goto error;
1735
1736 return 0;
1737
1738 error:
1739 buffer->sysname[0] = '\0';
1740 buffer->release[0] = '\0';
1741 buffer->version[0] = '\0';
1742 buffer->machine[0] = '\0';
1743 return r;
1744 }
1745
uv__getsockpeername(const uv_handle_t * handle,uv__peersockfunc func,struct sockaddr * name,int * namelen)1746 int uv__getsockpeername(const uv_handle_t* handle,
1747 uv__peersockfunc func,
1748 struct sockaddr* name,
1749 int* namelen) {
1750 socklen_t socklen;
1751 uv_os_fd_t fd;
1752 int r;
1753
1754 r = uv_fileno(handle, &fd);
1755 if (r < 0)
1756 return r;
1757
1758 /* sizeof(socklen_t) != sizeof(int) on some systems. */
1759 socklen = (socklen_t) *namelen;
1760
1761 if (func(fd, name, &socklen))
1762 return UV__ERR(errno);
1763
1764 *namelen = (int) socklen;
1765 return 0;
1766 }
1767
uv_gettimeofday(uv_timeval64_t * tv)1768 int uv_gettimeofday(uv_timeval64_t* tv) {
1769 struct timeval time;
1770
1771 if (tv == NULL)
1772 return UV_EINVAL;
1773
1774 if (gettimeofday(&time, NULL) != 0)
1775 return UV__ERR(errno);
1776
1777 tv->tv_sec = (int64_t) time.tv_sec;
1778 tv->tv_usec = (int32_t) time.tv_usec;
1779 return 0;
1780 }
1781
uv_sleep(unsigned int msec)1782 void uv_sleep(unsigned int msec) {
1783 struct timespec timeout;
1784 int rc;
1785
1786 timeout.tv_sec = msec / 1000;
1787 timeout.tv_nsec = (msec % 1000) * 1000 * 1000;
1788
1789 do
1790 rc = nanosleep(&timeout, &timeout);
1791 while (rc == -1 && errno == EINTR);
1792
1793 assert(rc == 0);
1794 }
1795
uv__search_path(const char * prog,char * buf,size_t * buflen)1796 int uv__search_path(const char* prog, char* buf, size_t* buflen) {
1797 char abspath[UV__PATH_MAX];
1798 size_t abspath_size;
1799 char trypath[UV__PATH_MAX];
1800 char* cloned_path;
1801 char* path_env;
1802 char* token;
1803 char* itr;
1804
1805 if (buf == NULL || buflen == NULL || *buflen == 0)
1806 return UV_EINVAL;
1807
1808 /*
1809 * Possibilities for prog:
1810 * i) an absolute path such as: /home/user/myprojects/nodejs/node
1811 * ii) a relative path such as: ./node or ../myprojects/nodejs/node
1812 * iii) a bare filename such as "node", after exporting PATH variable
1813 * to its location.
1814 */
1815
1816 /* Case i) and ii) absolute or relative paths */
1817 if (strchr(prog, '/') != NULL) {
1818 if (realpath(prog, abspath) != abspath)
1819 return UV__ERR(errno);
1820
1821 abspath_size = strlen(abspath);
1822
1823 *buflen -= 1;
1824 if (*buflen > abspath_size)
1825 *buflen = abspath_size;
1826
1827 memcpy(buf, abspath, *buflen);
1828 buf[*buflen] = '\0';
1829
1830 return 0;
1831 }
1832
1833 /* Case iii). Search PATH environment variable */
1834 cloned_path = NULL;
1835 token = NULL;
1836 path_env = getenv("PATH");
1837
1838 if (path_env == NULL)
1839 return UV_EINVAL;
1840
1841 cloned_path = uv__strdup(path_env);
1842 if (cloned_path == NULL)
1843 return UV_ENOMEM;
1844
1845 token = uv__strtok(cloned_path, ":", &itr);
1846 while (token != NULL) {
1847 snprintf(trypath, sizeof(trypath) - 1, "%s/%s", token, prog);
1848 if (realpath(trypath, abspath) == abspath) {
1849 /* Check the match is executable */
1850 if (access(abspath, X_OK) == 0) {
1851 abspath_size = strlen(abspath);
1852
1853 *buflen -= 1;
1854 if (*buflen > abspath_size)
1855 *buflen = abspath_size;
1856
1857 memcpy(buf, abspath, *buflen);
1858 buf[*buflen] = '\0';
1859
1860 uv__free(cloned_path);
1861 return 0;
1862 }
1863 }
1864 token = uv__strtok(NULL, ":", &itr);
1865 }
1866 uv__free(cloned_path);
1867
1868 /* Out of tokens (path entries), and no match found */
1869 return UV_EINVAL;
1870 }
1871
1872
uv_available_parallelism(void)1873 unsigned int uv_available_parallelism(void) {
1874 #ifdef __linux__
1875 cpu_set_t set;
1876 long rc;
1877 double rc_with_cgroup;
1878 uv__cpu_constraint c = {0, 0, 0.0};
1879
1880 memset(&set, 0, sizeof(set));
1881
1882 /* sysconf(_SC_NPROCESSORS_ONLN) in musl calls sched_getaffinity() but in
1883 * glibc it's... complicated... so for consistency try sched_getaffinity()
1884 * before falling back to sysconf(_SC_NPROCESSORS_ONLN).
1885 */
1886 if (0 == sched_getaffinity(0, sizeof(set), &set))
1887 rc = CPU_COUNT(&set);
1888 else
1889 rc = sysconf(_SC_NPROCESSORS_ONLN);
1890
1891 if (uv__get_constrained_cpu(&c) == 0 && c.period_length > 0) {
1892 rc_with_cgroup = (double)c.quota_per_period / c.period_length * c.proportions;
1893 if (rc_with_cgroup < rc)
1894 rc = (long)rc_with_cgroup; /* Casting is safe since rc_with_cgroup < rc < LONG_MAX */
1895 }
1896 if (rc < 1)
1897 rc = 1;
1898
1899 return (unsigned) rc;
1900 #elif defined(__MVS__)
1901 int rc;
1902
1903 rc = __get_num_online_cpus();
1904 if (rc < 1)
1905 rc = 1;
1906
1907 return (unsigned) rc;
1908 #else /* __linux__ */
1909 long rc;
1910
1911 rc = sysconf(_SC_NPROCESSORS_ONLN);
1912 if (rc < 1)
1913 rc = 1;
1914
1915 return (unsigned) rc;
1916 #endif /* __linux__ */
1917 }
1918
uv__sock_reuseport(int fd)1919 int uv__sock_reuseport(int fd) {
1920 int on = 1;
1921 #if defined(__FreeBSD__) && __FreeBSD__ >= 12 && defined(SO_REUSEPORT_LB)
1922 /* FreeBSD 12 introduced a new socket option named SO_REUSEPORT_LB
1923 * with the capability of load balancing, it's the substitution of
1924 * the SO_REUSEPORTs on Linux and DragonFlyBSD. */
1925 if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT_LB, &on, sizeof(on)))
1926 return UV__ERR(errno);
1927 #elif (defined(__linux__) || \
1928 defined(_AIX73) || \
1929 (defined(__DragonFly__) && __DragonFly_version >= 300600) || \
1930 (defined(UV__SOLARIS_11_4) && UV__SOLARIS_11_4)) && \
1931 defined(SO_REUSEPORT)
1932 /* On Linux 3.9+, the SO_REUSEPORT implementation distributes connections
1933 * evenly across all of the threads (or processes) that are blocked in
1934 * accept() on the same port. As with TCP, SO_REUSEPORT distributes datagrams
1935 * evenly across all of the receiving threads (or process).
1936 *
1937 * DragonFlyBSD 3.6.0 extended SO_REUSEPORT to distribute workload to
1938 * available sockets, which made it the equivalent of Linux's SO_REUSEPORT.
1939 *
1940 * AIX 7.2.5 added the feature that would add the capability to distribute
1941 * incoming connections or datagrams across all listening ports for SO_REUSEPORT.
1942 *
1943 * Solaris 11 supported SO_REUSEPORT, but it's implemented only for
1944 * binding to the same address and port, without load balancing.
1945 * Solaris 11.4 extended SO_REUSEPORT with the capability of load balancing.
1946 */
1947 if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &on, sizeof(on)))
1948 return UV__ERR(errno);
1949 #else
1950 (void) (fd);
1951 (void) (on);
1952 /* SO_REUSEPORTs do not have the capability of load balancing on platforms
1953 * other than those mentioned above. The semantics are completely different,
1954 * therefore we shouldn't enable it, but fail this operation to indicate that
1955 * UV_[TCP/UDP]_REUSEPORT is not supported on these platforms. */
1956 return UV_ENOTSUP;
1957 #endif
1958
1959 return 0;
1960 }
1961