1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 * Permission is hereby granted, free of charge, to any person obtaining a copy
3 * of this software and associated documentation files (the "Software"), to
4 * deal in the Software without restriction, including without limitation the
5 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6 * sell copies of the Software, and to permit persons to whom the Software is
7 * furnished to do so, subject to the following conditions:
8 *
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
18 * IN THE SOFTWARE.
19 */
20
21 #include "uv.h"
22 #include "internal.h"
23
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <assert.h>
29 #include <errno.h>
30
31 #ifndef SUNOS_NO_IFADDRS
32 # include <ifaddrs.h>
33 #endif
34 #include <net/if.h>
35 #include <net/if_dl.h>
36 #include <net/if_arp.h>
37 #include <sys/sockio.h>
38
39 #include <sys/loadavg.h>
40 #include <sys/time.h>
41 #include <unistd.h>
42 #include <kstat.h>
43 #include <fcntl.h>
44
45 #include <sys/port.h>
46 #include <port.h>
47
48 #define PORT_FIRED 0x69
49 #define PORT_UNUSED 0x0
50 #define PORT_LOADED 0x99
51 #define PORT_DELETED -1
52
53 #if (!defined(_LP64)) && (_FILE_OFFSET_BITS - 0 == 64)
54 #define PROCFS_FILE_OFFSET_BITS_HACK 1
55 #undef _FILE_OFFSET_BITS
56 #else
57 #define PROCFS_FILE_OFFSET_BITS_HACK 0
58 #endif
59
60 #include <procfs.h>
61
62 #if (PROCFS_FILE_OFFSET_BITS_HACK - 0 == 1)
63 #define _FILE_OFFSET_BITS 64
64 #endif
65
66
uv__platform_loop_init(uv_loop_t * loop)67 int uv__platform_loop_init(uv_loop_t* loop) {
68 int err;
69 int fd;
70
71 loop->fs_fd = -1;
72 loop->backend_fd = -1;
73
74 fd = port_create();
75 if (fd == -1)
76 return UV__ERR(errno);
77
78 err = uv__cloexec(fd, 1);
79 if (err) {
80 uv__close(fd);
81 return err;
82 }
83 loop->backend_fd = fd;
84
85 return 0;
86 }
87
88
uv__platform_loop_delete(uv_loop_t * loop)89 void uv__platform_loop_delete(uv_loop_t* loop) {
90 if (loop->fs_fd != -1) {
91 uv__close(loop->fs_fd);
92 loop->fs_fd = -1;
93 }
94
95 if (loop->backend_fd != -1) {
96 uv__close(loop->backend_fd);
97 loop->backend_fd = -1;
98 }
99 }
100
101
uv__io_fork(uv_loop_t * loop)102 int uv__io_fork(uv_loop_t* loop) {
103 #if defined(PORT_SOURCE_FILE)
104 if (loop->fs_fd != -1) {
105 /* stop the watcher before we blow away its fileno */
106 uv__io_stop(loop, &loop->fs_event_watcher, POLLIN);
107 }
108 #endif
109 uv__platform_loop_delete(loop);
110 return uv__platform_loop_init(loop);
111 }
112
113
uv__platform_invalidate_fd(uv_loop_t * loop,int fd)114 void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
115 struct port_event* events;
116 uintptr_t i;
117 uintptr_t nfds;
118
119 assert(loop->watchers != NULL);
120 assert(fd >= 0);
121
122 events = (struct port_event*) loop->watchers[loop->nwatchers];
123 nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
124 if (events == NULL)
125 return;
126
127 /* Invalidate events with same file descriptor */
128 for (i = 0; i < nfds; i++)
129 if ((int) events[i].portev_object == fd)
130 events[i].portev_object = -1;
131 }
132
133
uv__io_check_fd(uv_loop_t * loop,int fd)134 int uv__io_check_fd(uv_loop_t* loop, int fd) {
135 if (port_associate(loop->backend_fd, PORT_SOURCE_FD, fd, POLLIN, 0))
136 return UV__ERR(errno);
137
138 if (port_dissociate(loop->backend_fd, PORT_SOURCE_FD, fd)) {
139 perror("(libuv) port_dissociate()");
140 abort();
141 }
142
143 return 0;
144 }
145
146
uv__io_poll(uv_loop_t * loop,int timeout)147 void uv__io_poll(uv_loop_t* loop, int timeout) {
148 struct port_event events[1024];
149 struct port_event* pe;
150 struct timespec spec;
151 struct uv__queue* q;
152 uv__io_t* w;
153 sigset_t* pset;
154 sigset_t set;
155 uint64_t base;
156 uint64_t diff;
157 unsigned int nfds;
158 unsigned int i;
159 int saved_errno;
160 int have_signals;
161 int nevents;
162 int count;
163 int err;
164 int fd;
165 int user_timeout;
166 int reset_timeout;
167
168 if (loop->nfds == 0) {
169 assert(uv__queue_empty(&loop->watcher_queue));
170 return;
171 }
172
173 while (!uv__queue_empty(&loop->watcher_queue)) {
174 q = uv__queue_head(&loop->watcher_queue);
175 uv__queue_remove(q);
176 uv__queue_init(q);
177
178 w = uv__queue_data(q, uv__io_t, watcher_queue);
179 assert(w->pevents != 0);
180
181 if (port_associate(loop->backend_fd,
182 PORT_SOURCE_FD,
183 w->fd,
184 w->pevents,
185 0)) {
186 perror("(libuv) port_associate()");
187 abort();
188 }
189
190 w->events = w->pevents;
191 }
192
193 pset = NULL;
194 if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
195 pset = &set;
196 sigemptyset(pset);
197 sigaddset(pset, SIGPROF);
198 }
199
200 assert(timeout >= -1);
201 base = loop->time;
202 count = 48; /* Benchmarks suggest this gives the best throughput. */
203
204 if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
205 reset_timeout = 1;
206 user_timeout = timeout;
207 timeout = 0;
208 } else {
209 reset_timeout = 0;
210 }
211
212 for (;;) {
213 /* Only need to set the provider_entry_time if timeout != 0. The function
214 * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
215 */
216 if (timeout != 0)
217 uv__metrics_set_provider_entry_time(loop);
218
219 if (timeout != -1) {
220 spec.tv_sec = timeout / 1000;
221 spec.tv_nsec = (timeout % 1000) * 1000000;
222 }
223
224 /* Work around a kernel bug where nfds is not updated. */
225 events[0].portev_source = 0;
226
227 nfds = 1;
228 saved_errno = 0;
229
230 if (pset != NULL)
231 pthread_sigmask(SIG_BLOCK, pset, NULL);
232
233 err = port_getn(loop->backend_fd,
234 events,
235 ARRAY_SIZE(events),
236 &nfds,
237 timeout == -1 ? NULL : &spec);
238
239 if (pset != NULL)
240 pthread_sigmask(SIG_UNBLOCK, pset, NULL);
241
242 if (err) {
243 /* Work around another kernel bug: port_getn() may return events even
244 * on error.
245 */
246 if (errno == EINTR || errno == ETIME) {
247 saved_errno = errno;
248 } else {
249 perror("(libuv) port_getn()");
250 abort();
251 }
252 }
253
254 /* Update loop->time unconditionally. It's tempting to skip the update when
255 * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
256 * operating system didn't reschedule our process while in the syscall.
257 */
258 SAVE_ERRNO(uv__update_time(loop));
259
260 if (events[0].portev_source == 0) {
261 if (reset_timeout != 0) {
262 timeout = user_timeout;
263 reset_timeout = 0;
264 }
265
266 if (timeout == 0)
267 return;
268
269 if (timeout == -1)
270 continue;
271
272 goto update_timeout;
273 }
274
275 if (nfds == 0) {
276 assert(timeout != -1);
277 return;
278 }
279
280 have_signals = 0;
281 nevents = 0;
282
283 assert(loop->watchers != NULL);
284 loop->watchers[loop->nwatchers] = (void*) events;
285 loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
286 for (i = 0; i < nfds; i++) {
287 pe = events + i;
288 fd = pe->portev_object;
289
290 /* Skip invalidated events, see uv__platform_invalidate_fd */
291 if (fd == -1)
292 continue;
293
294 assert(fd >= 0);
295 assert((unsigned) fd < loop->nwatchers);
296
297 w = loop->watchers[fd];
298
299 /* File descriptor that we've stopped watching, ignore. */
300 if (w == NULL)
301 continue;
302
303 /* Run signal watchers last. This also affects child process watchers
304 * because those are implemented in terms of signal watchers.
305 */
306 if (w == &loop->signal_io_watcher) {
307 have_signals = 1;
308 } else {
309 uv__metrics_update_idle_time(loop);
310 w->cb(loop, w, pe->portev_events);
311 }
312
313 nevents++;
314
315 if (w != loop->watchers[fd])
316 continue; /* Disabled by callback. */
317
318 /* Events Ports operates in oneshot mode, rearm timer on next run. */
319 if (w->pevents != 0 && uv__queue_empty(&w->watcher_queue))
320 uv__queue_insert_tail(&loop->watcher_queue, &w->watcher_queue);
321 }
322
323 uv__metrics_inc_events(loop, nevents);
324 if (reset_timeout != 0) {
325 timeout = user_timeout;
326 reset_timeout = 0;
327 uv__metrics_inc_events_waiting(loop, nevents);
328 }
329
330 if (have_signals != 0) {
331 uv__metrics_update_idle_time(loop);
332 loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
333 }
334
335 loop->watchers[loop->nwatchers] = NULL;
336 loop->watchers[loop->nwatchers + 1] = NULL;
337
338 if (have_signals != 0)
339 return; /* Event loop should cycle now so don't poll again. */
340
341 if (nevents != 0) {
342 if (nfds == ARRAY_SIZE(events) && --count != 0) {
343 /* Poll for more events but don't block this time. */
344 timeout = 0;
345 continue;
346 }
347 return;
348 }
349
350 if (saved_errno == ETIME) {
351 assert(timeout != -1);
352 return;
353 }
354
355 if (timeout == 0)
356 return;
357
358 if (timeout == -1)
359 continue;
360
361 update_timeout:
362 assert(timeout > 0);
363
364 diff = loop->time - base;
365 if (diff >= (uint64_t) timeout)
366 return;
367
368 timeout -= diff;
369 }
370 }
371
372
uv__hrtime(uv_clocktype_t type)373 uint64_t uv__hrtime(uv_clocktype_t type) {
374 return gethrtime();
375 }
376
377
378 /*
379 * We could use a static buffer for the path manipulations that we need outside
380 * of the function, but this function could be called by multiple consumers and
381 * we don't want to potentially create a race condition in the use of snprintf.
382 */
uv_exepath(char * buffer,size_t * size)383 int uv_exepath(char* buffer, size_t* size) {
384 ssize_t res;
385 char buf[128];
386
387 if (buffer == NULL || size == NULL || *size == 0)
388 return UV_EINVAL;
389
390 snprintf(buf, sizeof(buf), "/proc/%lu/path/a.out", (unsigned long) getpid());
391
392 res = *size - 1;
393 if (res > 0)
394 res = readlink(buf, buffer, res);
395
396 if (res == -1)
397 return UV__ERR(errno);
398
399 buffer[res] = '\0';
400 *size = res;
401 return 0;
402 }
403
404
uv_get_free_memory(void)405 uint64_t uv_get_free_memory(void) {
406 return (uint64_t) sysconf(_SC_PAGESIZE) * sysconf(_SC_AVPHYS_PAGES);
407 }
408
409
uv_get_total_memory(void)410 uint64_t uv_get_total_memory(void) {
411 return (uint64_t) sysconf(_SC_PAGESIZE) * sysconf(_SC_PHYS_PAGES);
412 }
413
414
uv_get_constrained_memory(void)415 uint64_t uv_get_constrained_memory(void) {
416 return 0; /* Memory constraints are unknown. */
417 }
418
419
uv_get_available_memory(void)420 uint64_t uv_get_available_memory(void) {
421 return uv_get_free_memory();
422 }
423
424
uv_loadavg(double avg[3])425 void uv_loadavg(double avg[3]) {
426 (void) getloadavg(avg, 3);
427 }
428
429
430 #if defined(PORT_SOURCE_FILE)
431
uv__fs_event_rearm(uv_fs_event_t * handle)432 static int uv__fs_event_rearm(uv_fs_event_t *handle) {
433 if (handle->fd == PORT_DELETED)
434 return UV_EBADF;
435
436 if (port_associate(handle->loop->fs_fd,
437 PORT_SOURCE_FILE,
438 (uintptr_t) &handle->fo,
439 FILE_ATTRIB | FILE_MODIFIED,
440 handle) == -1) {
441 return UV__ERR(errno);
442 }
443 handle->fd = PORT_LOADED;
444
445 return 0;
446 }
447
448
uv__fs_event_read(uv_loop_t * loop,uv__io_t * w,unsigned int revents)449 static void uv__fs_event_read(uv_loop_t* loop,
450 uv__io_t* w,
451 unsigned int revents) {
452 uv_fs_event_t *handle = NULL;
453 timespec_t timeout;
454 port_event_t pe;
455 int events;
456 int r;
457
458 (void) w;
459 (void) revents;
460
461 do {
462 uint_t n = 1;
463
464 /*
465 * Note that our use of port_getn() here (and not port_get()) is deliberate:
466 * there is a bug in event ports (Sun bug 6456558) whereby a zeroed timeout
467 * causes port_get() to return success instead of ETIME when there aren't
468 * actually any events (!); by using port_getn() in lieu of port_get(),
469 * we can at least workaround the bug by checking for zero returned events
470 * and treating it as we would ETIME.
471 */
472 do {
473 memset(&timeout, 0, sizeof timeout);
474 r = port_getn(loop->fs_fd, &pe, 1, &n, &timeout);
475 }
476 while (r == -1 && errno == EINTR);
477
478 if ((r == -1 && errno == ETIME) || n == 0)
479 break;
480
481 handle = (uv_fs_event_t*) pe.portev_user;
482 assert((r == 0) && "unexpected port_get() error");
483
484 if (uv__is_closing(handle)) {
485 uv__handle_stop(handle);
486 uv__make_close_pending((uv_handle_t*) handle);
487 break;
488 }
489
490 events = 0;
491 if (pe.portev_events & (FILE_ATTRIB | FILE_MODIFIED))
492 events |= UV_CHANGE;
493 if (pe.portev_events & ~(FILE_ATTRIB | FILE_MODIFIED))
494 events |= UV_RENAME;
495 assert(events != 0);
496 handle->fd = PORT_FIRED;
497 handle->cb(handle, NULL, events, 0);
498
499 if (handle->fd != PORT_DELETED) {
500 r = uv__fs_event_rearm(handle);
501 if (r != 0)
502 handle->cb(handle, NULL, 0, r);
503 }
504 }
505 while (handle->fd != PORT_DELETED);
506 }
507
508
uv_fs_event_init(uv_loop_t * loop,uv_fs_event_t * handle)509 int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
510 uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
511 return 0;
512 }
513
514
uv_fs_event_start(uv_fs_event_t * handle,uv_fs_event_cb cb,const char * path,unsigned int flags)515 int uv_fs_event_start(uv_fs_event_t* handle,
516 uv_fs_event_cb cb,
517 const char* path,
518 unsigned int flags) {
519 int portfd;
520 int first_run;
521 int err;
522
523 if (uv__is_active(handle))
524 return UV_EINVAL;
525
526 first_run = 0;
527 if (handle->loop->fs_fd == -1) {
528 portfd = port_create();
529 if (portfd == -1)
530 return UV__ERR(errno);
531 handle->loop->fs_fd = portfd;
532 first_run = 1;
533 }
534
535 uv__handle_start(handle);
536 handle->path = uv__strdup(path);
537 handle->fd = PORT_UNUSED;
538 handle->cb = cb;
539
540 memset(&handle->fo, 0, sizeof handle->fo);
541 handle->fo.fo_name = handle->path;
542 err = uv__fs_event_rearm(handle);
543 if (err != 0) {
544 uv_fs_event_stop(handle);
545 return err;
546 }
547
548 if (first_run) {
549 uv__io_init(&handle->loop->fs_event_watcher, uv__fs_event_read, portfd);
550 uv__io_start(handle->loop, &handle->loop->fs_event_watcher, POLLIN);
551 }
552
553 return 0;
554 }
555
556
uv__fs_event_stop(uv_fs_event_t * handle)557 static int uv__fs_event_stop(uv_fs_event_t* handle) {
558 int ret = 0;
559
560 if (!uv__is_active(handle))
561 return 0;
562
563 if (handle->fd == PORT_LOADED) {
564 ret = port_dissociate(handle->loop->fs_fd,
565 PORT_SOURCE_FILE,
566 (uintptr_t) &handle->fo);
567 }
568
569 handle->fd = PORT_DELETED;
570 uv__free(handle->path);
571 handle->path = NULL;
572 handle->fo.fo_name = NULL;
573 if (ret == 0)
574 uv__handle_stop(handle);
575
576 return ret;
577 }
578
uv_fs_event_stop(uv_fs_event_t * handle)579 int uv_fs_event_stop(uv_fs_event_t* handle) {
580 (void) uv__fs_event_stop(handle);
581 return 0;
582 }
583
uv__fs_event_close(uv_fs_event_t * handle)584 void uv__fs_event_close(uv_fs_event_t* handle) {
585 /*
586 * If we were unable to dissociate the port here, then it is most likely
587 * that there is a pending queued event. When this happens, we don't want
588 * to complete the close as it will free the underlying memory for the
589 * handle, causing a use-after-free problem when the event is processed.
590 * We defer the final cleanup until after the event is consumed in
591 * uv__fs_event_read().
592 */
593 if (uv__fs_event_stop(handle) == 0)
594 uv__make_close_pending((uv_handle_t*) handle);
595 }
596
597 #else /* !defined(PORT_SOURCE_FILE) */
598
uv_fs_event_init(uv_loop_t * loop,uv_fs_event_t * handle)599 int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
600 return UV_ENOSYS;
601 }
602
603
uv_fs_event_start(uv_fs_event_t * handle,uv_fs_event_cb cb,const char * filename,unsigned int flags)604 int uv_fs_event_start(uv_fs_event_t* handle,
605 uv_fs_event_cb cb,
606 const char* filename,
607 unsigned int flags) {
608 return UV_ENOSYS;
609 }
610
611
uv_fs_event_stop(uv_fs_event_t * handle)612 int uv_fs_event_stop(uv_fs_event_t* handle) {
613 return UV_ENOSYS;
614 }
615
616
uv__fs_event_close(uv_fs_event_t * handle)617 void uv__fs_event_close(uv_fs_event_t* handle) {
618 UNREACHABLE();
619 }
620
621 #endif /* defined(PORT_SOURCE_FILE) */
622
623
uv_resident_set_memory(size_t * rss)624 int uv_resident_set_memory(size_t* rss) {
625 psinfo_t psinfo;
626 int err;
627 int fd;
628
629 fd = open("/proc/self/psinfo", O_RDONLY);
630 if (fd == -1)
631 return UV__ERR(errno);
632
633 /* FIXME(bnoordhuis) Handle EINTR. */
634 err = UV_EINVAL;
635 if (read(fd, &psinfo, sizeof(psinfo)) == sizeof(psinfo)) {
636 *rss = (size_t)psinfo.pr_rssize * 1024;
637 err = 0;
638 }
639 uv__close(fd);
640
641 return err;
642 }
643
644
uv_uptime(double * uptime)645 int uv_uptime(double* uptime) {
646 kstat_ctl_t *kc;
647 kstat_t *ksp;
648 kstat_named_t *knp;
649
650 long hz = sysconf(_SC_CLK_TCK);
651
652 kc = kstat_open();
653 if (kc == NULL)
654 return UV_EPERM;
655
656 ksp = kstat_lookup(kc, (char*) "unix", 0, (char*) "system_misc");
657 if (kstat_read(kc, ksp, NULL) == -1) {
658 *uptime = -1;
659 } else {
660 knp = (kstat_named_t*) kstat_data_lookup(ksp, (char*) "clk_intr");
661 *uptime = knp->value.ul / hz;
662 }
663 kstat_close(kc);
664
665 return 0;
666 }
667
668
uv_cpu_info(uv_cpu_info_t ** cpu_infos,int * count)669 int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
670 int lookup_instance;
671 kstat_ctl_t *kc;
672 kstat_t *ksp;
673 kstat_named_t *knp;
674 uv_cpu_info_t* cpu_info;
675
676 kc = kstat_open();
677 if (kc == NULL)
678 return UV_EPERM;
679
680 /* Get count of cpus */
681 lookup_instance = 0;
682 while ((ksp = kstat_lookup(kc, (char*) "cpu_info", lookup_instance, NULL))) {
683 lookup_instance++;
684 }
685
686 *cpu_infos = uv__malloc(lookup_instance * sizeof(**cpu_infos));
687 if (!(*cpu_infos)) {
688 kstat_close(kc);
689 return UV_ENOMEM;
690 }
691
692 *count = lookup_instance;
693
694 cpu_info = *cpu_infos;
695 lookup_instance = 0;
696 while ((ksp = kstat_lookup(kc, (char*) "cpu_info", lookup_instance, NULL))) {
697 if (kstat_read(kc, ksp, NULL) == -1) {
698 cpu_info->speed = 0;
699 cpu_info->model = NULL;
700 } else {
701 knp = kstat_data_lookup(ksp, (char*) "clock_MHz");
702 assert(knp->data_type == KSTAT_DATA_INT32 ||
703 knp->data_type == KSTAT_DATA_INT64);
704 cpu_info->speed = (knp->data_type == KSTAT_DATA_INT32) ? knp->value.i32
705 : knp->value.i64;
706
707 knp = kstat_data_lookup(ksp, (char*) "brand");
708 assert(knp->data_type == KSTAT_DATA_STRING);
709 cpu_info->model = uv__strdup(KSTAT_NAMED_STR_PTR(knp));
710 }
711
712 lookup_instance++;
713 cpu_info++;
714 }
715
716 cpu_info = *cpu_infos;
717 lookup_instance = 0;
718 for (;;) {
719 ksp = kstat_lookup(kc, (char*) "cpu", lookup_instance, (char*) "sys");
720
721 if (ksp == NULL)
722 break;
723
724 if (kstat_read(kc, ksp, NULL) == -1) {
725 cpu_info->cpu_times.user = 0;
726 cpu_info->cpu_times.nice = 0;
727 cpu_info->cpu_times.sys = 0;
728 cpu_info->cpu_times.idle = 0;
729 cpu_info->cpu_times.irq = 0;
730 } else {
731 knp = kstat_data_lookup(ksp, (char*) "cpu_ticks_user");
732 assert(knp->data_type == KSTAT_DATA_UINT64);
733 cpu_info->cpu_times.user = knp->value.ui64;
734
735 knp = kstat_data_lookup(ksp, (char*) "cpu_ticks_kernel");
736 assert(knp->data_type == KSTAT_DATA_UINT64);
737 cpu_info->cpu_times.sys = knp->value.ui64;
738
739 knp = kstat_data_lookup(ksp, (char*) "cpu_ticks_idle");
740 assert(knp->data_type == KSTAT_DATA_UINT64);
741 cpu_info->cpu_times.idle = knp->value.ui64;
742
743 knp = kstat_data_lookup(ksp, (char*) "intr");
744 assert(knp->data_type == KSTAT_DATA_UINT64);
745 cpu_info->cpu_times.irq = knp->value.ui64;
746 cpu_info->cpu_times.nice = 0;
747 }
748
749 lookup_instance++;
750 cpu_info++;
751 }
752
753 kstat_close(kc);
754
755 return 0;
756 }
757
758
759 #ifdef SUNOS_NO_IFADDRS
uv_interface_addresses(uv_interface_address_t ** addresses,int * count)760 int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
761 *count = 0;
762 *addresses = NULL;
763 return UV_ENOSYS;
764 }
765 #else /* SUNOS_NO_IFADDRS */
766 /*
767 * Inspired By:
768 * https://blogs.oracle.com/paulie/entry/retrieving_mac_address_in_solaris
769 * http://www.pauliesworld.org/project/getmac.c
770 */
uv__set_phys_addr(uv_interface_address_t * address,struct ifaddrs * ent)771 static int uv__set_phys_addr(uv_interface_address_t* address,
772 struct ifaddrs* ent) {
773
774 struct sockaddr_dl* sa_addr;
775 int sockfd;
776 size_t i;
777 struct arpreq arpreq;
778
779 /* This appears to only work as root */
780 sa_addr = (struct sockaddr_dl*)(ent->ifa_addr);
781 memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr));
782 for (i = 0; i < sizeof(address->phys_addr); i++) {
783 /* Check that all bytes of phys_addr are zero. */
784 if (address->phys_addr[i] != 0)
785 return 0;
786 }
787 memset(&arpreq, 0, sizeof(arpreq));
788 if (address->address.address4.sin_family == AF_INET) {
789 struct sockaddr_in* sin = ((struct sockaddr_in*)&arpreq.arp_pa);
790 sin->sin_addr.s_addr = address->address.address4.sin_addr.s_addr;
791 } else if (address->address.address4.sin_family == AF_INET6) {
792 struct sockaddr_in6* sin = ((struct sockaddr_in6*)&arpreq.arp_pa);
793 memcpy(sin->sin6_addr.s6_addr,
794 address->address.address6.sin6_addr.s6_addr,
795 sizeof(address->address.address6.sin6_addr.s6_addr));
796 } else {
797 return 0;
798 }
799
800 sockfd = socket(AF_INET, SOCK_DGRAM, 0);
801 if (sockfd < 0)
802 return UV__ERR(errno);
803
804 if (ioctl(sockfd, SIOCGARP, (char*)&arpreq) == -1) {
805 uv__close(sockfd);
806 return UV__ERR(errno);
807 }
808 memcpy(address->phys_addr, arpreq.arp_ha.sa_data, sizeof(address->phys_addr));
809 uv__close(sockfd);
810 return 0;
811 }
812
813
uv__ifaddr_exclude(struct ifaddrs * ent)814 static int uv__ifaddr_exclude(struct ifaddrs *ent) {
815 if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)))
816 return 1;
817 if (ent->ifa_addr == NULL)
818 return 1;
819 if (ent->ifa_addr->sa_family != AF_INET &&
820 ent->ifa_addr->sa_family != AF_INET6)
821 return 1;
822 return 0;
823 }
824
uv_interface_addresses(uv_interface_address_t ** addresses,int * count)825 int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
826 uv_interface_address_t* address;
827 struct ifaddrs* addrs;
828 struct ifaddrs* ent;
829
830 *count = 0;
831 *addresses = NULL;
832
833 if (getifaddrs(&addrs))
834 return UV__ERR(errno);
835
836 /* Count the number of interfaces */
837 for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
838 if (uv__ifaddr_exclude(ent))
839 continue;
840 (*count)++;
841 }
842
843 if (*count == 0) {
844 freeifaddrs(addrs);
845 return 0;
846 }
847
848 *addresses = uv__malloc(*count * sizeof(**addresses));
849 if (!(*addresses)) {
850 freeifaddrs(addrs);
851 return UV_ENOMEM;
852 }
853
854 address = *addresses;
855
856 for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
857 if (uv__ifaddr_exclude(ent))
858 continue;
859
860 address->name = uv__strdup(ent->ifa_name);
861
862 if (ent->ifa_addr->sa_family == AF_INET6) {
863 address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr);
864 } else {
865 address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr);
866 }
867
868 if (ent->ifa_netmask->sa_family == AF_INET6) {
869 address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask);
870 } else {
871 address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask);
872 }
873
874 address->is_internal = !!((ent->ifa_flags & IFF_PRIVATE) ||
875 (ent->ifa_flags & IFF_LOOPBACK));
876
877 uv__set_phys_addr(address, ent);
878 address++;
879 }
880
881 freeifaddrs(addrs);
882
883 return 0;
884 }
885 #endif /* SUNOS_NO_IFADDRS */
886
uv_free_interface_addresses(uv_interface_address_t * addresses,int count)887 void uv_free_interface_addresses(uv_interface_address_t* addresses,
888 int count) {
889 int i;
890
891 for (i = 0; i < count; i++) {
892 uv__free(addresses[i].name);
893 }
894
895 uv__free(addresses);
896 }
897
898
899 #if !defined(_POSIX_VERSION) || _POSIX_VERSION < 200809L
strnlen(const char * s,size_t maxlen)900 size_t strnlen(const char* s, size_t maxlen) {
901 const char* end;
902 end = memchr(s, '\0', maxlen);
903 if (end == NULL)
904 return maxlen;
905 return end - s;
906 }
907 #endif
908