xref: /libuv/src/unix/aix.c (revision 1b01b786)
1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2  *
3  * Permission is hereby granted, free of charge, to any person obtaining a copy
4  * of this software and associated documentation files (the "Software"), to
5  * deal in the Software without restriction, including without limitation the
6  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7  * sell copies of the Software, and to permit persons to whom the Software is
8  * furnished to do so, subject to the following conditions:
9  *
10  * The above copyright notice and this permission notice shall be included in
11  * all copies or substantial portions of the Software.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19  * IN THE SOFTWARE.
20  */
21 
22 #include "uv.h"
23 #include "internal.h"
24 
25 #include <stdio.h>
26 #include <stdint.h>
27 #include <stdlib.h>
28 #include <string.h>
29 #include <assert.h>
30 #include <errno.h>
31 
32 #include <sys/types.h>
33 #include <sys/socket.h>
34 #include <sys/ioctl.h>
35 #include <net/if.h>
36 #include <netinet/in.h>
37 #include <arpa/inet.h>
38 
39 #include <sys/time.h>
40 #include <unistd.h>
41 #include <fcntl.h>
42 #include <utmp.h>
43 #include <libgen.h>
44 
45 #include <sys/protosw.h>
46 #include <libperfstat.h>
47 #include <procinfo.h>
48 #include <sys/proc.h>
49 #include <sys/procfs.h>
50 
51 #include <sys/poll.h>
52 
53 #include <sys/pollset.h>
54 #include <ctype.h>
55 #ifdef HAVE_SYS_AHAFS_EVPRODS_H
56 #include <sys/ahafs_evProds.h>
57 #endif
58 
59 #include <sys/mntctl.h>
60 #include <sys/vmount.h>
61 #include <limits.h>
62 #include <strings.h>
63 #include <sys/vnode.h>
64 
65 #define RDWR_BUF_SIZE   4096
66 #define EQ(a,b)         (strcmp(a,b) == 0)
67 
68 char* original_exepath = NULL;
69 uv_mutex_t process_title_mutex;
70 uv_once_t process_title_mutex_once = UV_ONCE_INIT;
71 static void* args_mem = NULL;
72 static char** process_argv = NULL;
73 static int process_argc = 0;
74 static char* process_title_ptr = NULL;
75 
init_process_title_mutex_once(void)76 void init_process_title_mutex_once(void) {
77   uv_mutex_init(&process_title_mutex);
78 }
79 
80 
uv__platform_loop_init(uv_loop_t * loop)81 int uv__platform_loop_init(uv_loop_t* loop) {
82   loop->fs_fd = -1;
83 
84   /* Passing maxfd of -1 should mean the limit is determined
85    * by the user's ulimit or the global limit as per the doc */
86   loop->backend_fd = pollset_create(-1);
87 
88   if (loop->backend_fd == -1)
89     return -1;
90 
91   return 0;
92 }
93 
94 
uv__platform_loop_delete(uv_loop_t * loop)95 void uv__platform_loop_delete(uv_loop_t* loop) {
96   if (loop->fs_fd != -1) {
97     uv__close(loop->fs_fd);
98     loop->fs_fd = -1;
99   }
100 
101   if (loop->backend_fd != -1) {
102     pollset_destroy(loop->backend_fd);
103     loop->backend_fd = -1;
104   }
105 }
106 
107 
uv__io_fork(uv_loop_t * loop)108 int uv__io_fork(uv_loop_t* loop) {
109   uv__platform_loop_delete(loop);
110 
111   return uv__platform_loop_init(loop);
112 }
113 
114 
uv__io_check_fd(uv_loop_t * loop,int fd)115 int uv__io_check_fd(uv_loop_t* loop, int fd) {
116   struct poll_ctl pc;
117 
118   pc.events = POLLIN;
119   pc.cmd = PS_MOD;  /* Equivalent to PS_ADD if the fd is not in the pollset. */
120   pc.fd = fd;
121 
122   if (pollset_ctl(loop->backend_fd, &pc, 1))
123     return UV__ERR(errno);
124 
125   pc.cmd = PS_DELETE;
126   if (pollset_ctl(loop->backend_fd, &pc, 1))
127     abort();
128 
129   return 0;
130 }
131 
132 
uv__io_poll(uv_loop_t * loop,int timeout)133 void uv__io_poll(uv_loop_t* loop, int timeout) {
134   uv__loop_internal_fields_t* lfields;
135   struct pollfd events[1024];
136   struct pollfd pqry;
137   struct pollfd* pe;
138   struct poll_ctl pc;
139   struct uv__queue* q;
140   uv__io_t* w;
141   uint64_t base;
142   uint64_t diff;
143   int have_signals;
144   int nevents;
145   int count;
146   int nfds;
147   int i;
148   int rc;
149   int add_failed;
150   int user_timeout;
151   int reset_timeout;
152 
153   if (loop->nfds == 0) {
154     assert(uv__queue_empty(&loop->watcher_queue));
155     return;
156   }
157 
158   lfields = uv__get_internal_fields(loop);
159 
160   while (!uv__queue_empty(&loop->watcher_queue)) {
161     q = uv__queue_head(&loop->watcher_queue);
162     uv__queue_remove(q);
163     uv__queue_init(q);
164 
165     w = uv__queue_data(q, uv__io_t, watcher_queue);
166     assert(w->pevents != 0);
167     assert(w->fd >= 0);
168     assert(w->fd < (int) loop->nwatchers);
169 
170     pc.events = w->pevents;
171     pc.fd = w->fd;
172 
173     add_failed = 0;
174     if (w->events == 0) {
175       pc.cmd = PS_ADD;
176       if (pollset_ctl(loop->backend_fd, &pc, 1)) {
177         if (errno != EINVAL) {
178           assert(0 && "Failed to add file descriptor (pc.fd) to pollset");
179           abort();
180         }
181         /* Check if the fd is already in the pollset */
182         pqry.fd = pc.fd;
183         rc = pollset_query(loop->backend_fd, &pqry);
184         switch (rc) {
185         case -1:
186           assert(0 && "Failed to query pollset for file descriptor");
187           abort();
188         case 0:
189           assert(0 && "Pollset does not contain file descriptor");
190           abort();
191         }
192         /* If we got here then the pollset already contained the file descriptor even though
193          * we didn't think it should. This probably shouldn't happen, but we can continue. */
194         add_failed = 1;
195       }
196     }
197     if (w->events != 0 || add_failed) {
198       /* Modify, potentially removing events -- need to delete then add.
199        * Could maybe mod if we knew for sure no events are removed, but
200        * content of w->events is handled above as not reliable (falls back)
201        * so may require a pollset_query() which would have to be pretty cheap
202        * compared to a PS_DELETE to be worth optimizing. Alternatively, could
203        * lazily remove events, squelching them in the mean time. */
204       pc.cmd = PS_DELETE;
205       if (pollset_ctl(loop->backend_fd, &pc, 1)) {
206         assert(0 && "Failed to delete file descriptor (pc.fd) from pollset");
207         abort();
208       }
209       pc.cmd = PS_ADD;
210       if (pollset_ctl(loop->backend_fd, &pc, 1)) {
211         assert(0 && "Failed to add file descriptor (pc.fd) to pollset");
212         abort();
213       }
214     }
215 
216     w->events = w->pevents;
217   }
218 
219   assert(timeout >= -1);
220   base = loop->time;
221   count = 48; /* Benchmarks suggest this gives the best throughput. */
222 
223   if (lfields->flags & UV_METRICS_IDLE_TIME) {
224     reset_timeout = 1;
225     user_timeout = timeout;
226     timeout = 0;
227   } else {
228     reset_timeout = 0;
229   }
230 
231   for (;;) {
232     /* Only need to set the provider_entry_time if timeout != 0. The function
233      * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
234      */
235     if (timeout != 0)
236       uv__metrics_set_provider_entry_time(loop);
237 
238     /* Store the current timeout in a location that's globally accessible so
239      * other locations like uv__work_done() can determine whether the queue
240      * of events in the callback were waiting when poll was called.
241      */
242     lfields->current_timeout = timeout;
243 
244     nfds = pollset_poll(loop->backend_fd,
245                         events,
246                         ARRAY_SIZE(events),
247                         timeout);
248 
249     /* Update loop->time unconditionally. It's tempting to skip the update when
250      * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
251      * operating system didn't reschedule our process while in the syscall.
252      */
253     SAVE_ERRNO(uv__update_time(loop));
254 
255     if (nfds == 0) {
256       if (reset_timeout != 0) {
257         timeout = user_timeout;
258         reset_timeout = 0;
259         if (timeout == -1)
260           continue;
261         if (timeout > 0)
262           goto update_timeout;
263       }
264 
265       assert(timeout != -1);
266       return;
267     }
268 
269     if (nfds == -1) {
270       if (errno != EINTR) {
271         abort();
272       }
273 
274       if (reset_timeout != 0) {
275         timeout = user_timeout;
276         reset_timeout = 0;
277       }
278 
279       if (timeout == -1)
280         continue;
281 
282       if (timeout == 0)
283         return;
284 
285       /* Interrupted by a signal. Update timeout and poll again. */
286       goto update_timeout;
287     }
288 
289     have_signals = 0;
290     nevents = 0;
291 
292     assert(loop->watchers != NULL);
293     loop->watchers[loop->nwatchers] = (void*) events;
294     loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
295 
296     for (i = 0; i < nfds; i++) {
297       pe = events + i;
298       pc.cmd = PS_DELETE;
299       pc.fd = pe->fd;
300 
301       /* Skip invalidated events, see uv__platform_invalidate_fd */
302       if (pc.fd == -1)
303         continue;
304 
305       assert(pc.fd >= 0);
306       assert((unsigned) pc.fd < loop->nwatchers);
307 
308       w = loop->watchers[pc.fd];
309 
310       if (w == NULL) {
311         /* File descriptor that we've stopped watching, disarm it.
312          *
313          * Ignore all errors because we may be racing with another thread
314          * when the file descriptor is closed.
315          */
316         pollset_ctl(loop->backend_fd, &pc, 1);
317         continue;
318       }
319 
320       /* Run signal watchers last.  This also affects child process watchers
321        * because those are implemented in terms of signal watchers.
322        */
323       if (w == &loop->signal_io_watcher) {
324         have_signals = 1;
325       } else {
326         uv__metrics_update_idle_time(loop);
327         w->cb(loop, w, pe->revents);
328       }
329 
330       nevents++;
331     }
332 
333     uv__metrics_inc_events(loop, nevents);
334     if (reset_timeout != 0) {
335       timeout = user_timeout;
336       reset_timeout = 0;
337       uv__metrics_inc_events_waiting(loop, nevents);
338     }
339 
340     if (have_signals != 0) {
341       uv__metrics_update_idle_time(loop);
342       loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
343     }
344 
345     loop->watchers[loop->nwatchers] = NULL;
346     loop->watchers[loop->nwatchers + 1] = NULL;
347 
348     if (have_signals != 0)
349       return;  /* Event loop should cycle now so don't poll again. */
350 
351     if (nevents != 0) {
352       if (nfds == ARRAY_SIZE(events) && --count != 0) {
353         /* Poll for more events but don't block this time. */
354         timeout = 0;
355         continue;
356       }
357       return;
358     }
359 
360     if (timeout == 0)
361       return;
362 
363     if (timeout == -1)
364       continue;
365 
366 update_timeout:
367     assert(timeout > 0);
368 
369     diff = loop->time - base;
370     if (diff >= (uint64_t) timeout)
371       return;
372 
373     timeout -= diff;
374   }
375 }
376 
377 
uv_get_free_memory(void)378 uint64_t uv_get_free_memory(void) {
379   perfstat_memory_total_t mem_total;
380   int result = perfstat_memory_total(NULL, &mem_total, sizeof(mem_total), 1);
381   if (result == -1) {
382     return 0;
383   }
384   return mem_total.real_free * 4096;
385 }
386 
387 
uv_get_total_memory(void)388 uint64_t uv_get_total_memory(void) {
389   perfstat_memory_total_t mem_total;
390   int result = perfstat_memory_total(NULL, &mem_total, sizeof(mem_total), 1);
391   if (result == -1) {
392     return 0;
393   }
394   return mem_total.real_total * 4096;
395 }
396 
397 
uv_get_constrained_memory(void)398 uint64_t uv_get_constrained_memory(void) {
399   return 0;  /* Memory constraints are unknown. */
400 }
401 
402 
uv_get_available_memory(void)403 uint64_t uv_get_available_memory(void) {
404   return uv_get_free_memory();
405 }
406 
407 
uv_loadavg(double avg[3])408 void uv_loadavg(double avg[3]) {
409   perfstat_cpu_total_t ps_total;
410   int result = perfstat_cpu_total(NULL, &ps_total, sizeof(ps_total), 1);
411   if (result == -1) {
412     avg[0] = 0.; avg[1] = 0.; avg[2] = 0.;
413     return;
414   }
415   avg[0] = ps_total.loadavg[0] / (double)(1 << SBITS);
416   avg[1] = ps_total.loadavg[1] / (double)(1 << SBITS);
417   avg[2] = ps_total.loadavg[2] / (double)(1 << SBITS);
418 }
419 
420 
421 #ifdef HAVE_SYS_AHAFS_EVPRODS_H
uv__rawname(const char * cp,char (* dst)[FILENAME_MAX+1])422 static char* uv__rawname(const char* cp, char (*dst)[FILENAME_MAX+1]) {
423   char* dp;
424 
425   dp = rindex(cp, '/');
426   if (dp == 0)
427     return 0;
428 
429   snprintf(*dst, sizeof(*dst), "%.*s/r%s", (int) (dp - cp), cp, dp + 1);
430   return *dst;
431 }
432 
433 
434 /*
435  * Determine whether given pathname is a directory
436  * Returns 0 if the path is a directory, -1 if not
437  *
438  * Note: Opportunity here for more detailed error information but
439  *       that requires changing callers of this function as well
440  */
uv__path_is_a_directory(char * filename)441 static int uv__path_is_a_directory(char* filename) {
442   struct stat statbuf;
443 
444   if (uv__stat(filename, &statbuf) < 0)
445     return -1;  /* failed: not a directory, assume it is a file */
446 
447   if (statbuf.st_type == VDIR)
448     return 0;
449 
450   return -1;
451 }
452 
453 
454 /*
455  * Check whether AHAFS is mounted.
456  * Returns 0 if AHAFS is mounted, or an error code < 0 on failure
457  */
uv__is_ahafs_mounted(void)458 static int uv__is_ahafs_mounted(void){
459   char rawbuf[FILENAME_MAX+1];
460   int rv, i = 2;
461   struct vmount *p;
462   int size_multiplier = 10;
463   size_t siz = sizeof(struct vmount)*size_multiplier;
464   struct vmount *vmt;
465   const char *dev = "/aha";
466   char *obj, *stub;
467 
468   p = uv__malloc(siz);
469   if (p == NULL)
470     return UV__ERR(errno);
471 
472   /* Retrieve all mounted filesystems */
473   rv = mntctl(MCTL_QUERY, siz, (char*)p);
474   if (rv < 0)
475     return UV__ERR(errno);
476   if (rv == 0) {
477     /* buffer was not large enough, reallocate to correct size */
478     siz = *(int*)p;
479     uv__free(p);
480     p = uv__malloc(siz);
481     if (p == NULL)
482       return UV__ERR(errno);
483     rv = mntctl(MCTL_QUERY, siz, (char*)p);
484     if (rv < 0)
485       return UV__ERR(errno);
486   }
487 
488   /* Look for dev in filesystems mount info */
489   for(vmt = p, i = 0; i < rv; i++) {
490     obj = vmt2dataptr(vmt, VMT_OBJECT);     /* device */
491     stub = vmt2dataptr(vmt, VMT_STUB);      /* mount point */
492 
493     if (EQ(obj, dev) || EQ(uv__rawname(obj, &rawbuf), dev) || EQ(stub, dev)) {
494       uv__free(p);  /* Found a match */
495       return 0;
496     }
497     vmt = (struct vmount *) ((char *) vmt + vmt->vmt_length);
498   }
499 
500   /* /aha is required for monitoring filesystem changes */
501   return -1;
502 }
503 
504 /*
505  * Recursive call to mkdir() to create intermediate folders, if any
506  * Returns code from mkdir call
507  */
uv__makedir_p(const char * dir)508 static int uv__makedir_p(const char *dir) {
509   char tmp[256];
510   char *p = NULL;
511   size_t len;
512   int err;
513 
514   /* TODO(bnoordhuis) Check uv__strscpy() return value. */
515   uv__strscpy(tmp, dir, sizeof(tmp));
516   len = strlen(tmp);
517   if (tmp[len - 1] == '/')
518     tmp[len - 1] = 0;
519   for (p = tmp + 1; *p; p++) {
520     if (*p == '/') {
521       *p = 0;
522       err = mkdir(tmp, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
523       if (err != 0 && errno != EEXIST)
524         return err;
525       *p = '/';
526     }
527   }
528   return mkdir(tmp, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
529 }
530 
531 /*
532  * Creates necessary subdirectories in the AIX Event Infrastructure
533  * file system for monitoring the object specified.
534  * Returns code from mkdir call
535  */
uv__make_subdirs_p(const char * filename)536 static int uv__make_subdirs_p(const char *filename) {
537   char cmd[2048];
538   char *p;
539   int rc = 0;
540 
541   /* Strip off the monitor file name */
542   p = strrchr(filename, '/');
543 
544   if (p == NULL)
545     return 0;
546 
547   if (uv__path_is_a_directory((char*)filename) == 0) {
548     sprintf(cmd, "/aha/fs/modDir.monFactory");
549   } else {
550     sprintf(cmd, "/aha/fs/modFile.monFactory");
551   }
552 
553   strncat(cmd, filename, (p - filename));
554   rc = uv__makedir_p(cmd);
555 
556   if (rc == -1 && errno != EEXIST){
557     return UV__ERR(errno);
558   }
559 
560   return rc;
561 }
562 
563 
564 /*
565  * Checks if /aha is mounted, then proceeds to set up the monitoring
566  * objects for the specified file.
567  * Returns 0 on success, or an error code < 0 on failure
568  */
uv__setup_ahafs(const char * filename,int * fd)569 static int uv__setup_ahafs(const char* filename, int *fd) {
570   int rc = 0;
571   char mon_file_write_string[RDWR_BUF_SIZE];
572   char mon_file[PATH_MAX];
573   int file_is_directory = 0; /* -1 == NO, 0 == YES  */
574 
575   /* Create monitor file name for object */
576   file_is_directory = uv__path_is_a_directory((char*)filename);
577 
578   if (file_is_directory == 0)
579     sprintf(mon_file, "/aha/fs/modDir.monFactory");
580   else
581     sprintf(mon_file, "/aha/fs/modFile.monFactory");
582 
583   if ((strlen(mon_file) + strlen(filename) + 5) > PATH_MAX)
584     return UV_ENAMETOOLONG;
585 
586   /* Make the necessary subdirectories for the monitor file */
587   rc = uv__make_subdirs_p(filename);
588   if (rc == -1 && errno != EEXIST)
589     return rc;
590 
591   strcat(mon_file, filename);
592   strcat(mon_file, ".mon");
593 
594   *fd = 0; errno = 0;
595 
596   /* Open the monitor file, creating it if necessary */
597   *fd = open(mon_file, O_CREAT|O_RDWR);
598   if (*fd < 0)
599     return UV__ERR(errno);
600 
601   /* Write out the monitoring specifications.
602    * In this case, we are monitoring for a state change event type
603    *    CHANGED=YES
604    * We will be waiting in select call, rather than a read:
605    *    WAIT_TYPE=WAIT_IN_SELECT
606    * We only want minimal information for files:
607    *      INFO_LVL=1
608    * For directories, we want more information to track what file
609    * caused the change
610    *      INFO_LVL=2
611    */
612 
613   if (file_is_directory == 0)
614     sprintf(mon_file_write_string, "CHANGED=YES;WAIT_TYPE=WAIT_IN_SELECT;INFO_LVL=2");
615   else
616     sprintf(mon_file_write_string, "CHANGED=YES;WAIT_TYPE=WAIT_IN_SELECT;INFO_LVL=1");
617 
618   rc = write(*fd, mon_file_write_string, strlen(mon_file_write_string)+1);
619   if (rc < 0 && errno != EBUSY)
620     return UV__ERR(errno);
621 
622   return 0;
623 }
624 
625 /*
626  * Skips a specified number of lines in the buffer passed in.
627  * Walks the buffer pointed to by p and attempts to skip n lines.
628  * Returns the total number of lines skipped
629  */
uv__skip_lines(char ** p,int n)630 static int uv__skip_lines(char **p, int n) {
631   int lines = 0;
632 
633   while(n > 0) {
634     *p = strchr(*p, '\n');
635     if (!p)
636       return lines;
637 
638     (*p)++;
639     n--;
640     lines++;
641   }
642   return lines;
643 }
644 
645 
646 /*
647  * Parse the event occurrence data to figure out what event just occurred
648  * and take proper action.
649  *
650  * The buf is a pointer to the buffer containing the event occurrence data
651  * Returns 0 on success, -1 if unrecoverable error in parsing
652  *
653  */
uv__parse_data(char * buf,int * events,uv_fs_event_t * handle)654 static int uv__parse_data(char *buf, int *events, uv_fs_event_t* handle) {
655   int    evp_rc, i;
656   char   *p;
657   char   filename[PATH_MAX]; /* To be used when handling directories */
658 
659   p = buf;
660   *events = 0;
661 
662   /* Clean the filename buffer*/
663   for(i = 0; i < PATH_MAX; i++) {
664     filename[i] = 0;
665   }
666   i = 0;
667 
668   /* Check for BUF_WRAP */
669   if (strncmp(buf, "BUF_WRAP", strlen("BUF_WRAP")) == 0) {
670     assert(0 && "Buffer wrap detected, Some event occurrences lost!");
671     return 0;
672   }
673 
674   /* Since we are using the default buffer size (4K), and have specified
675    * INFO_LVL=1, we won't see any EVENT_OVERFLOW conditions.  Applications
676    * should check for this keyword if they are using an INFO_LVL of 2 or
677    * higher, and have a buffer size of <= 4K
678    */
679 
680   /* Skip to RC_FROM_EVPROD */
681   if (uv__skip_lines(&p, 9) != 9)
682     return -1;
683 
684   if (sscanf(p, "RC_FROM_EVPROD=%d\nEND_EVENT_DATA", &evp_rc) == 1) {
685     if (uv__path_is_a_directory(handle->path) == 0) { /* Directory */
686       if (evp_rc == AHAFS_MODDIR_UNMOUNT || evp_rc == AHAFS_MODDIR_REMOVE_SELF) {
687         /* The directory is no longer available for monitoring */
688         *events = UV_RENAME;
689         handle->dir_filename = NULL;
690       } else {
691         /* A file was added/removed inside the directory */
692         *events = UV_CHANGE;
693 
694         /* Get the EVPROD_INFO */
695         if (uv__skip_lines(&p, 1) != 1)
696           return -1;
697 
698         /* Scan out the name of the file that triggered the event*/
699         if (sscanf(p, "BEGIN_EVPROD_INFO\n%sEND_EVPROD_INFO", filename) == 1) {
700           handle->dir_filename = uv__strdup((const char*)&filename);
701         } else
702           return -1;
703         }
704     } else { /* Regular File */
705       if (evp_rc == AHAFS_MODFILE_RENAME)
706         *events = UV_RENAME;
707       else
708         *events = UV_CHANGE;
709     }
710   }
711   else
712     return -1;
713 
714   return 0;
715 }
716 
717 
718 /* This is the internal callback */
uv__ahafs_event(uv_loop_t * loop,uv__io_t * event_watch,unsigned int fflags)719 static void uv__ahafs_event(uv_loop_t* loop, uv__io_t* event_watch, unsigned int fflags) {
720   char   result_data[RDWR_BUF_SIZE];
721   int bytes, rc = 0;
722   uv_fs_event_t* handle;
723   int events = 0;
724   char fname[PATH_MAX];
725   char *p;
726 
727   handle = container_of(event_watch, uv_fs_event_t, event_watcher);
728 
729   /* At this point, we assume that polling has been done on the
730    * file descriptor, so we can just read the AHAFS event occurrence
731    * data and parse its results without having to block anything
732    */
733   bytes = pread(event_watch->fd, result_data, RDWR_BUF_SIZE, 0);
734 
735   assert((bytes >= 0) && "uv__ahafs_event - Error reading monitor file");
736 
737   /* In file / directory move cases, AIX Event infrastructure
738    * produces a second event with no data.
739    * Ignore it and return gracefully.
740    */
741   if(bytes == 0)
742     return;
743 
744   /* Parse the data */
745   if(bytes > 0)
746     rc = uv__parse_data(result_data, &events, handle);
747 
748   /* Unrecoverable error */
749   if (rc == -1)
750     return;
751 
752   /* For directory changes, the name of the files that triggered the change
753    * are never absolute pathnames
754    */
755   if (uv__path_is_a_directory(handle->path) == 0) {
756     p = handle->dir_filename;
757   } else {
758     p = strrchr(handle->path, '/');
759     if (p == NULL)
760       p = handle->path;
761     else
762       p++;
763   }
764 
765   /* TODO(bnoordhuis) Check uv__strscpy() return value. */
766   uv__strscpy(fname, p, sizeof(fname));
767 
768   handle->cb(handle, fname, events, 0);
769 }
770 #endif
771 
772 
uv_fs_event_init(uv_loop_t * loop,uv_fs_event_t * handle)773 int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
774 #ifdef HAVE_SYS_AHAFS_EVPRODS_H
775   uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
776   return 0;
777 #else
778   return UV_ENOSYS;
779 #endif
780 }
781 
782 
uv_fs_event_start(uv_fs_event_t * handle,uv_fs_event_cb cb,const char * filename,unsigned int flags)783 int uv_fs_event_start(uv_fs_event_t* handle,
784                       uv_fs_event_cb cb,
785                       const char* filename,
786                       unsigned int flags) {
787 #ifdef HAVE_SYS_AHAFS_EVPRODS_H
788   int  fd, rc, str_offset = 0;
789   char cwd[PATH_MAX];
790   char absolute_path[PATH_MAX];
791   char readlink_cwd[PATH_MAX];
792   struct timeval zt;
793   fd_set pollfd;
794 
795 
796   /* Figure out whether filename is absolute or not */
797   if (filename[0] == '\0') {
798     /* Missing a pathname */
799     return UV_ENOENT;
800   }
801   else if (filename[0] == '/') {
802     /* We have absolute pathname */
803     /* TODO(bnoordhuis) Check uv__strscpy() return value. */
804     uv__strscpy(absolute_path, filename, sizeof(absolute_path));
805   } else {
806     /* We have a relative pathname, compose the absolute pathname */
807     snprintf(cwd, sizeof(cwd), "/proc/%lu/cwd", (unsigned long) getpid());
808     rc = readlink(cwd, readlink_cwd, sizeof(readlink_cwd) - 1);
809     if (rc < 0)
810       return rc;
811     /* readlink does not null terminate our string */
812     readlink_cwd[rc] = '\0';
813 
814     if (filename[0] == '.' && filename[1] == '/')
815       str_offset = 2;
816 
817     snprintf(absolute_path, sizeof(absolute_path), "%s%s", readlink_cwd,
818              filename + str_offset);
819   }
820 
821   if (uv__is_ahafs_mounted() < 0)  /* /aha checks failed */
822     return UV_ENOSYS;
823 
824   /* Setup ahafs */
825   rc = uv__setup_ahafs((const char *)absolute_path, &fd);
826   if (rc != 0)
827     return rc;
828 
829   /* Setup/Initialize all the libuv routines */
830   uv__handle_start(handle);
831   uv__io_init(&handle->event_watcher, uv__ahafs_event, fd);
832   handle->path = uv__strdup(filename);
833   handle->cb = cb;
834   handle->dir_filename = NULL;
835 
836   uv__io_start(handle->loop, &handle->event_watcher, POLLIN);
837 
838   /* AHAFS wants someone to poll for it to start mointoring.
839    *  so kick-start it so that we don't miss an event in the
840    *  eventuality of an event that occurs in the current loop. */
841   do {
842     memset(&zt, 0, sizeof(zt));
843     FD_ZERO(&pollfd);
844     FD_SET(fd, &pollfd);
845     rc = select(fd + 1, &pollfd, NULL, NULL, &zt);
846   } while (rc == -1 && errno == EINTR);
847   return 0;
848 #else
849   return UV_ENOSYS;
850 #endif
851 }
852 
853 
uv_fs_event_stop(uv_fs_event_t * handle)854 int uv_fs_event_stop(uv_fs_event_t* handle) {
855 #ifdef HAVE_SYS_AHAFS_EVPRODS_H
856   if (!uv__is_active(handle))
857     return 0;
858 
859   uv__io_close(handle->loop, &handle->event_watcher);
860   uv__handle_stop(handle);
861 
862   if (uv__path_is_a_directory(handle->path) == 0) {
863     uv__free(handle->dir_filename);
864     handle->dir_filename = NULL;
865   }
866 
867   uv__free(handle->path);
868   handle->path = NULL;
869   uv__close(handle->event_watcher.fd);
870   handle->event_watcher.fd = -1;
871 
872   return 0;
873 #else
874   return UV_ENOSYS;
875 #endif
876 }
877 
878 
uv__fs_event_close(uv_fs_event_t * handle)879 void uv__fs_event_close(uv_fs_event_t* handle) {
880 #ifdef HAVE_SYS_AHAFS_EVPRODS_H
881   uv_fs_event_stop(handle);
882 #else
883   UNREACHABLE();
884 #endif
885 }
886 
887 
uv_setup_args(int argc,char ** argv)888 char** uv_setup_args(int argc, char** argv) {
889   char exepath[UV__PATH_MAX];
890   char** new_argv;
891   size_t size;
892   char* s;
893   int i;
894 
895   if (argc <= 0)
896     return argv;
897 
898   /* Save the original pointer to argv.
899    * AIX uses argv to read the process name.
900    * (Not the memory pointed to by argv[0..n] as on Linux.)
901    */
902   process_argv = argv;
903   process_argc = argc;
904 
905   /* Use argv[0] to determine value for uv_exepath(). */
906   size = sizeof(exepath);
907   if (uv__search_path(argv[0], exepath, &size) == 0) {
908     uv_once(&process_title_mutex_once, init_process_title_mutex_once);
909     uv_mutex_lock(&process_title_mutex);
910     original_exepath = uv__strdup(exepath);
911     uv_mutex_unlock(&process_title_mutex);
912   }
913 
914   /* Calculate how much memory we need for the argv strings. */
915   size = 0;
916   for (i = 0; i < argc; i++)
917     size += strlen(argv[i]) + 1;
918 
919   /* Add space for the argv pointers. */
920   size += (argc + 1) * sizeof(char*);
921 
922   new_argv = uv__malloc(size);
923   if (new_argv == NULL)
924     return argv;
925   args_mem = new_argv;
926 
927   /* Copy over the strings and set up the pointer table. */
928   s = (char*) &new_argv[argc + 1];
929   for (i = 0; i < argc; i++) {
930     size = strlen(argv[i]) + 1;
931     memcpy(s, argv[i], size);
932     new_argv[i] = s;
933     s += size;
934   }
935   new_argv[i] = NULL;
936 
937   return new_argv;
938 }
939 
940 
uv_set_process_title(const char * title)941 int uv_set_process_title(const char* title) {
942   char* new_title;
943 
944   /* If uv_setup_args wasn't called or failed, we can't continue. */
945   if (process_argv == NULL || args_mem == NULL)
946     return UV_ENOBUFS;
947 
948   /* We cannot free this pointer when libuv shuts down,
949    * the process may still be using it.
950    */
951   new_title = uv__strdup(title);
952   if (new_title == NULL)
953     return UV_ENOMEM;
954 
955   uv_once(&process_title_mutex_once, init_process_title_mutex_once);
956   uv_mutex_lock(&process_title_mutex);
957 
958   /* If this is the first time this is set,
959    * don't free and set argv[1] to NULL.
960    */
961   if (process_title_ptr != NULL)
962     uv__free(process_title_ptr);
963 
964   process_title_ptr = new_title;
965 
966   process_argv[0] = process_title_ptr;
967   if (process_argc > 1)
968      process_argv[1] = NULL;
969 
970   uv_mutex_unlock(&process_title_mutex);
971 
972   return 0;
973 }
974 
975 
uv_get_process_title(char * buffer,size_t size)976 int uv_get_process_title(char* buffer, size_t size) {
977   size_t len;
978   if (buffer == NULL || size == 0)
979     return UV_EINVAL;
980 
981   /* If uv_setup_args wasn't called, we can't continue. */
982   if (process_argv == NULL)
983     return UV_ENOBUFS;
984 
985   uv_once(&process_title_mutex_once, init_process_title_mutex_once);
986   uv_mutex_lock(&process_title_mutex);
987 
988   len = strlen(process_argv[0]);
989   if (size <= len) {
990     uv_mutex_unlock(&process_title_mutex);
991     return UV_ENOBUFS;
992   }
993 
994   memcpy(buffer, process_argv[0], len);
995   buffer[len] = '\0';
996 
997   uv_mutex_unlock(&process_title_mutex);
998 
999   return 0;
1000 }
1001 
1002 
uv__process_title_cleanup(void)1003 void uv__process_title_cleanup(void) {
1004   uv__free(args_mem);  /* Keep valgrind happy. */
1005   args_mem = NULL;
1006 }
1007 
1008 
uv_resident_set_memory(size_t * rss)1009 int uv_resident_set_memory(size_t* rss) {
1010   char pp[64];
1011   psinfo_t psinfo;
1012   int err;
1013   int fd;
1014 
1015   snprintf(pp, sizeof(pp), "/proc/%lu/psinfo", (unsigned long) getpid());
1016 
1017   fd = open(pp, O_RDONLY);
1018   if (fd == -1)
1019     return UV__ERR(errno);
1020 
1021   /* FIXME(bnoordhuis) Handle EINTR. */
1022   err = UV_EINVAL;
1023   if (read(fd, &psinfo, sizeof(psinfo)) == sizeof(psinfo)) {
1024     *rss = (size_t)psinfo.pr_rssize * 1024;
1025     err = 0;
1026   }
1027   uv__close(fd);
1028 
1029   return err;
1030 }
1031 
1032 
uv_uptime(double * uptime)1033 int uv_uptime(double* uptime) {
1034   struct utmp *utmp_buf;
1035   size_t entries = 0;
1036   time_t boot_time;
1037 
1038   boot_time = 0;
1039   utmpname(UTMP_FILE);
1040 
1041   setutent();
1042 
1043   while ((utmp_buf = getutent()) != NULL) {
1044     if (utmp_buf->ut_user[0] && utmp_buf->ut_type == USER_PROCESS)
1045       ++entries;
1046     if (utmp_buf->ut_type == BOOT_TIME)
1047       boot_time = utmp_buf->ut_time;
1048   }
1049 
1050   endutent();
1051 
1052   if (boot_time == 0)
1053     return UV_ENOSYS;
1054 
1055   *uptime = time(NULL) - boot_time;
1056   return 0;
1057 }
1058 
1059 
uv_cpu_info(uv_cpu_info_t ** cpu_infos,int * count)1060 int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
1061   uv_cpu_info_t* cpu_info;
1062   perfstat_cpu_total_t ps_total;
1063   perfstat_cpu_t* ps_cpus;
1064   perfstat_id_t cpu_id;
1065   int result, ncpus, idx = 0;
1066 
1067   result = perfstat_cpu_total(NULL, &ps_total, sizeof(ps_total), 1);
1068   if (result == -1) {
1069     return UV_ENOSYS;
1070   }
1071 
1072   ncpus = result = perfstat_cpu(NULL, NULL, sizeof(perfstat_cpu_t), 0);
1073   if (result == -1) {
1074     return UV_ENOSYS;
1075   }
1076 
1077   ps_cpus = (perfstat_cpu_t*) uv__malloc(ncpus * sizeof(perfstat_cpu_t));
1078   if (!ps_cpus) {
1079     return UV_ENOMEM;
1080   }
1081 
1082   /* TODO(bnoordhuis) Check uv__strscpy() return value. */
1083   uv__strscpy(cpu_id.name, FIRST_CPU, sizeof(cpu_id.name));
1084   result = perfstat_cpu(&cpu_id, ps_cpus, sizeof(perfstat_cpu_t), ncpus);
1085   if (result == -1) {
1086     uv__free(ps_cpus);
1087     return UV_ENOSYS;
1088   }
1089 
1090   *cpu_infos = (uv_cpu_info_t*) uv__malloc(ncpus * sizeof(uv_cpu_info_t));
1091   if (!*cpu_infos) {
1092     uv__free(ps_cpus);
1093     return UV_ENOMEM;
1094   }
1095 
1096   *count = ncpus;
1097 
1098   cpu_info = *cpu_infos;
1099   while (idx < ncpus) {
1100     cpu_info->speed = (int)(ps_total.processorHZ / 1000000);
1101     cpu_info->model = uv__strdup(ps_total.description);
1102     cpu_info->cpu_times.user = ps_cpus[idx].user;
1103     cpu_info->cpu_times.sys = ps_cpus[idx].sys;
1104     cpu_info->cpu_times.idle = ps_cpus[idx].idle;
1105     cpu_info->cpu_times.irq = ps_cpus[idx].wait;
1106     cpu_info->cpu_times.nice = 0;
1107     cpu_info++;
1108     idx++;
1109   }
1110 
1111   uv__free(ps_cpus);
1112   return 0;
1113 }
1114 
1115 
uv_interface_addresses(uv_interface_address_t ** addresses,int * count)1116 int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
1117   uv_interface_address_t* address;
1118   int sockfd, sock6fd, inet6, i, r, size = 1;
1119   struct ifconf ifc;
1120   struct ifreq *ifr, *p, flg;
1121   struct in6_ifreq if6;
1122   struct sockaddr_dl* sa_addr;
1123 
1124   ifc.ifc_req = NULL;
1125   sock6fd = -1;
1126   r = 0;
1127   *count = 0;
1128   *addresses = NULL;
1129 
1130   if (0 > (sockfd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP))) {
1131     r = UV__ERR(errno);
1132     goto cleanup;
1133   }
1134 
1135   if (0 > (sock6fd = socket(AF_INET6, SOCK_DGRAM, IPPROTO_IP))) {
1136     r = UV__ERR(errno);
1137     goto cleanup;
1138   }
1139 
1140   if (ioctl(sockfd, SIOCGSIZIFCONF, &size) == -1) {
1141     r = UV__ERR(errno);
1142     goto cleanup;
1143   }
1144 
1145   ifc.ifc_req = (struct ifreq*)uv__malloc(size);
1146   if (ifc.ifc_req == NULL) {
1147     r = UV_ENOMEM;
1148     goto cleanup;
1149   }
1150   ifc.ifc_len = size;
1151   if (ioctl(sockfd, SIOCGIFCONF, &ifc) == -1) {
1152     r = UV__ERR(errno);
1153     goto cleanup;
1154   }
1155 
1156 #define ADDR_SIZE(p) MAX((p).sa_len, sizeof(p))
1157 
1158   /* Count all up and running ipv4/ipv6 addresses */
1159   ifr = ifc.ifc_req;
1160   while ((char*)ifr < (char*)ifc.ifc_req + ifc.ifc_len) {
1161     p = ifr;
1162     ifr = (struct ifreq*)
1163       ((char*)ifr + sizeof(ifr->ifr_name) + ADDR_SIZE(ifr->ifr_addr));
1164 
1165     if (!(p->ifr_addr.sa_family == AF_INET6 ||
1166           p->ifr_addr.sa_family == AF_INET))
1167       continue;
1168 
1169     memcpy(flg.ifr_name, p->ifr_name, sizeof(flg.ifr_name));
1170     if (ioctl(sockfd, SIOCGIFFLAGS, &flg) == -1) {
1171       r = UV__ERR(errno);
1172       goto cleanup;
1173     }
1174 
1175     if (!(flg.ifr_flags & IFF_UP && flg.ifr_flags & IFF_RUNNING))
1176       continue;
1177 
1178     (*count)++;
1179   }
1180 
1181   if (*count == 0)
1182     goto cleanup;
1183 
1184   /* Alloc the return interface structs */
1185   *addresses = uv__calloc(*count, sizeof(**addresses));
1186   if (!(*addresses)) {
1187     r = UV_ENOMEM;
1188     goto cleanup;
1189   }
1190   address = *addresses;
1191 
1192   ifr = ifc.ifc_req;
1193   while ((char*)ifr < (char*)ifc.ifc_req + ifc.ifc_len) {
1194     p = ifr;
1195     ifr = (struct ifreq*)
1196       ((char*)ifr + sizeof(ifr->ifr_name) + ADDR_SIZE(ifr->ifr_addr));
1197 
1198     if (!(p->ifr_addr.sa_family == AF_INET6 ||
1199           p->ifr_addr.sa_family == AF_INET))
1200       continue;
1201 
1202     inet6 = (p->ifr_addr.sa_family == AF_INET6);
1203 
1204     memcpy(flg.ifr_name, p->ifr_name, sizeof(flg.ifr_name));
1205     if (ioctl(sockfd, SIOCGIFFLAGS, &flg) == -1)
1206       goto syserror;
1207 
1208     if (!(flg.ifr_flags & IFF_UP && flg.ifr_flags & IFF_RUNNING))
1209       continue;
1210 
1211     /* All conditions above must match count loop */
1212 
1213     address->name = uv__strdup(p->ifr_name);
1214 
1215     if (inet6)
1216       address->address.address6 = *((struct sockaddr_in6*) &p->ifr_addr);
1217     else
1218       address->address.address4 = *((struct sockaddr_in*) &p->ifr_addr);
1219 
1220     if (inet6) {
1221       memset(&if6, 0, sizeof(if6));
1222       r = uv__strscpy(if6.ifr_name, p->ifr_name, sizeof(if6.ifr_name));
1223       if (r == UV_E2BIG)
1224         goto cleanup;
1225       r = 0;
1226       memcpy(&if6.ifr_Addr, &p->ifr_addr, sizeof(if6.ifr_Addr));
1227       if (ioctl(sock6fd, SIOCGIFNETMASK6, &if6) == -1)
1228         goto syserror;
1229       address->netmask.netmask6 = *((struct sockaddr_in6*) &if6.ifr_Addr);
1230       /* Explicitly set family as the ioctl call appears to return it as 0. */
1231       address->netmask.netmask6.sin6_family = AF_INET6;
1232     } else {
1233       if (ioctl(sockfd, SIOCGIFNETMASK, p) == -1)
1234         goto syserror;
1235       address->netmask.netmask4 = *((struct sockaddr_in*) &p->ifr_addr);
1236       /* Explicitly set family as the ioctl call appears to return it as 0. */
1237       address->netmask.netmask4.sin_family = AF_INET;
1238     }
1239 
1240     address->is_internal = flg.ifr_flags & IFF_LOOPBACK ? 1 : 0;
1241 
1242     address++;
1243   }
1244 
1245   /* Fill in physical addresses. */
1246   ifr = ifc.ifc_req;
1247   while ((char*)ifr < (char*)ifc.ifc_req + ifc.ifc_len) {
1248     p = ifr;
1249     ifr = (struct ifreq*)
1250       ((char*)ifr + sizeof(ifr->ifr_name) + ADDR_SIZE(ifr->ifr_addr));
1251 
1252     if (p->ifr_addr.sa_family != AF_LINK)
1253       continue;
1254 
1255     address = *addresses;
1256     for (i = 0; i < *count; i++) {
1257       if (strcmp(address->name, p->ifr_name) == 0) {
1258         sa_addr = (struct sockaddr_dl*) &p->ifr_addr;
1259         memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr));
1260       }
1261       address++;
1262     }
1263   }
1264 
1265 #undef ADDR_SIZE
1266   goto cleanup;
1267 
1268 syserror:
1269   uv_free_interface_addresses(*addresses, *count);
1270   *addresses = NULL;
1271   *count = 0;
1272   r = UV_ENOSYS;
1273 
1274 cleanup:
1275   if (sockfd != -1)
1276     uv__close(sockfd);
1277   if (sock6fd != -1)
1278     uv__close(sock6fd);
1279   uv__free(ifc.ifc_req);
1280   return r;
1281 }
1282 
1283 
uv_free_interface_addresses(uv_interface_address_t * addresses,int count)1284 void uv_free_interface_addresses(uv_interface_address_t* addresses,
1285   int count) {
1286   int i;
1287 
1288   for (i = 0; i < count; ++i) {
1289     uv__free(addresses[i].name);
1290   }
1291 
1292   uv__free(addresses);
1293 }
1294 
1295 
uv__platform_invalidate_fd(uv_loop_t * loop,int fd)1296 void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
1297   struct pollfd* events;
1298   uintptr_t i;
1299   uintptr_t nfds;
1300   struct poll_ctl pc;
1301 
1302   assert(loop->watchers != NULL);
1303   assert(fd >= 0);
1304 
1305   events = (struct pollfd*) loop->watchers[loop->nwatchers];
1306   nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
1307 
1308   if (events != NULL)
1309     /* Invalidate events with same file descriptor */
1310     for (i = 0; i < nfds; i++)
1311       if ((int) events[i].fd == fd)
1312         events[i].fd = -1;
1313 
1314   /* Remove the file descriptor from the poll set */
1315   pc.events = 0;
1316   pc.cmd = PS_DELETE;
1317   pc.fd = fd;
1318   if(loop->backend_fd >= 0)
1319     pollset_ctl(loop->backend_fd, &pc, 1);
1320 }
1321