xref: /libuv/src/unix/fs.c (revision 32603fd5)
1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2  *
3  * Permission is hereby granted, free of charge, to any person obtaining a copy
4  * of this software and associated documentation files (the "Software"), to
5  * deal in the Software without restriction, including without limitation the
6  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7  * sell copies of the Software, and to permit persons to whom the Software is
8  * furnished to do so, subject to the following conditions:
9  *
10  * The above copyright notice and this permission notice shall be included in
11  * all copies or substantial portions of the Software.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19  * IN THE SOFTWARE.
20  */
21 
22 /* Caveat emptor: this file deviates from the libuv convention of returning
23  * negated errno codes. Most uv_fs_*() functions map directly to the system
24  * call of the same name. For more complex wrappers, it's easier to just
25  * return -1 with errno set. The dispatcher in uv__fs_work() takes care of
26  * getting the errno to the right place (req->result or as the return value.)
27  */
28 
29 #include "uv.h"
30 #include "internal.h"
31 
32 #include <errno.h>
33 #include <dlfcn.h>
34 #include <stdatomic.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <string.h>
38 #include <limits.h> /* PATH_MAX */
39 
40 #include <sys/types.h>
41 #include <sys/socket.h>
42 #include <sys/stat.h>
43 #include <sys/time.h>
44 #include <sys/uio.h>
45 #include <unistd.h>
46 #include <fcntl.h>
47 #include <poll.h>
48 
49 #if defined(__linux__)
50 # include <sys/sendfile.h>
51 #endif
52 
53 #if defined(__sun)
54 # include <sys/sendfile.h>
55 # include <sys/sysmacros.h>
56 #endif
57 
58 #if defined(__APPLE__)
59 # include <sys/sysctl.h>
60 #elif defined(__linux__) && !defined(FICLONE)
61 # include <sys/ioctl.h>
62 # define FICLONE _IOW(0x94, 9, int)
63 #endif
64 
65 #if defined(_AIX) && !defined(_AIX71)
66 # include <utime.h>
67 #endif
68 
69 #if defined(__APPLE__)            ||                                      \
70     defined(__DragonFly__)        ||                                      \
71     defined(__FreeBSD__)          ||                                      \
72     defined(__OpenBSD__)          ||                                      \
73     defined(__NetBSD__)
74 # include <sys/param.h>
75 # include <sys/mount.h>
76 #elif defined(__sun)      || \
77       defined(__MVS__)    || \
78       defined(__NetBSD__) || \
79       defined(__HAIKU__)  || \
80       defined(__QNX__)
81 # include <sys/statvfs.h>
82 #else
83 # include <sys/statfs.h>
84 #endif
85 
86 #if defined(_AIX) && _XOPEN_SOURCE <= 600
87 extern char *mkdtemp(char *template); /* See issue #740 on AIX < 7 */
88 #endif
89 
90 #define INIT(subtype)                                                         \
91   do {                                                                        \
92     if (req == NULL)                                                          \
93       return UV_EINVAL;                                                       \
94     UV_REQ_INIT(req, UV_FS);                                                  \
95     req->fs_type = UV_FS_ ## subtype;                                         \
96     req->result = 0;                                                          \
97     req->ptr = NULL;                                                          \
98     req->loop = loop;                                                         \
99     req->path = NULL;                                                         \
100     req->new_path = NULL;                                                     \
101     req->bufs = NULL;                                                         \
102     req->cb = cb;                                                             \
103   }                                                                           \
104   while (0)
105 
106 #define PATH                                                                  \
107   do {                                                                        \
108     assert(path != NULL);                                                     \
109     if (cb == NULL) {                                                         \
110       req->path = path;                                                       \
111     } else {                                                                  \
112       req->path = uv__strdup(path);                                           \
113       if (req->path == NULL)                                                  \
114         return UV_ENOMEM;                                                     \
115     }                                                                         \
116   }                                                                           \
117   while (0)
118 
119 #define PATH2                                                                 \
120   do {                                                                        \
121     if (cb == NULL) {                                                         \
122       req->path = path;                                                       \
123       req->new_path = new_path;                                               \
124     } else {                                                                  \
125       size_t path_len;                                                        \
126       size_t new_path_len;                                                    \
127       path_len = strlen(path) + 1;                                            \
128       new_path_len = strlen(new_path) + 1;                                    \
129       req->path = uv__malloc(path_len + new_path_len);                        \
130       if (req->path == NULL)                                                  \
131         return UV_ENOMEM;                                                     \
132       req->new_path = req->path + path_len;                                   \
133       memcpy((void*) req->path, path, path_len);                              \
134       memcpy((void*) req->new_path, new_path, new_path_len);                  \
135     }                                                                         \
136   }                                                                           \
137   while (0)
138 
139 #define POST                                                                  \
140   do {                                                                        \
141     if (cb != NULL) {                                                         \
142       uv__req_register(loop);                                                 \
143       uv__work_submit(loop,                                                   \
144                       &req->work_req,                                         \
145                       UV__WORK_FAST_IO,                                       \
146                       uv__fs_work,                                            \
147                       uv__fs_done);                                           \
148       return 0;                                                               \
149     }                                                                         \
150     else {                                                                    \
151       uv__fs_work(&req->work_req);                                            \
152       return req->result;                                                     \
153     }                                                                         \
154   }                                                                           \
155   while (0)
156 
157 
uv__fs_close(int fd)158 static int uv__fs_close(int fd) {
159   int rc;
160 
161   rc = uv__close_nocancel(fd);
162   if (rc == -1)
163     if (errno == EINTR || errno == EINPROGRESS)
164       rc = 0;  /* The close is in progress, not an error. */
165 
166   return rc;
167 }
168 
169 
uv__fs_fsync(uv_fs_t * req)170 static ssize_t uv__fs_fsync(uv_fs_t* req) {
171 #if defined(__APPLE__)
172   /* Apple's fdatasync and fsync explicitly do NOT flush the drive write cache
173    * to the drive platters. This is in contrast to Linux's fdatasync and fsync
174    * which do, according to recent man pages. F_FULLFSYNC is Apple's equivalent
175    * for flushing buffered data to permanent storage. If F_FULLFSYNC is not
176    * supported by the file system we fall back to F_BARRIERFSYNC or fsync().
177    * This is the same approach taken by sqlite, except sqlite does not issue
178    * an F_BARRIERFSYNC call.
179    */
180   int r;
181 
182   r = fcntl(req->file, F_FULLFSYNC);
183   if (r != 0)
184     r = fcntl(req->file, 85 /* F_BARRIERFSYNC */);  /* fsync + barrier */
185   if (r != 0)
186     r = fsync(req->file);
187   return r;
188 #else
189   return fsync(req->file);
190 #endif
191 }
192 
193 
uv__fs_fdatasync(uv_fs_t * req)194 static ssize_t uv__fs_fdatasync(uv_fs_t* req) {
195 #if defined(__linux__) || defined(__sun) || defined(__NetBSD__)
196   return fdatasync(req->file);
197 #elif defined(__APPLE__)
198   /* See the comment in uv__fs_fsync. */
199   return uv__fs_fsync(req);
200 #else
201   return fsync(req->file);
202 #endif
203 }
204 
205 
UV_UNUSED(static struct timespec uv__fs_to_timespec (double time))206 UV_UNUSED(static struct timespec uv__fs_to_timespec(double time)) {
207   struct timespec ts;
208   ts.tv_sec  = time;
209   ts.tv_nsec = (time - ts.tv_sec) * 1e9;
210 
211  /* TODO(bnoordhuis) Remove this. utimesat() has nanosecond resolution but we
212   * stick to microsecond resolution for the sake of consistency with other
213   * platforms. I'm the original author of this compatibility hack but I'm
214   * less convinced it's useful nowadays.
215   */
216   ts.tv_nsec -= ts.tv_nsec % 1000;
217 
218   if (ts.tv_nsec < 0) {
219     ts.tv_nsec += 1e9;
220     ts.tv_sec -= 1;
221   }
222   return ts;
223 }
224 
UV_UNUSED(static struct timeval uv__fs_to_timeval (double time))225 UV_UNUSED(static struct timeval uv__fs_to_timeval(double time)) {
226   struct timeval tv;
227   tv.tv_sec  = time;
228   tv.tv_usec = (time - tv.tv_sec) * 1e6;
229   if (tv.tv_usec < 0) {
230     tv.tv_usec += 1e6;
231     tv.tv_sec -= 1;
232   }
233   return tv;
234 }
235 
uv__fs_futime(uv_fs_t * req)236 static ssize_t uv__fs_futime(uv_fs_t* req) {
237 #if defined(__linux__)                                                        \
238     || defined(_AIX71)                                                        \
239     || defined(__HAIKU__)                                                     \
240     || defined(__GNU__)
241   struct timespec ts[2];
242   ts[0] = uv__fs_to_timespec(req->atime);
243   ts[1] = uv__fs_to_timespec(req->mtime);
244   return futimens(req->file, ts);
245 #elif defined(__APPLE__)                                                      \
246     || defined(__DragonFly__)                                                 \
247     || defined(__FreeBSD__)                                                   \
248     || defined(__NetBSD__)                                                    \
249     || defined(__OpenBSD__)                                                   \
250     || defined(__sun)
251   struct timeval tv[2];
252   tv[0] = uv__fs_to_timeval(req->atime);
253   tv[1] = uv__fs_to_timeval(req->mtime);
254 # if defined(__sun)
255   return futimesat(req->file, NULL, tv);
256 # else
257   return futimes(req->file, tv);
258 # endif
259 #elif defined(__MVS__)
260   attrib_t atr;
261   memset(&atr, 0, sizeof(atr));
262   atr.att_mtimechg = 1;
263   atr.att_atimechg = 1;
264   atr.att_mtime = req->mtime;
265   atr.att_atime = req->atime;
266   return __fchattr(req->file, &atr, sizeof(atr));
267 #else
268   errno = ENOSYS;
269   return -1;
270 #endif
271 }
272 
273 
uv__fs_mkdtemp(uv_fs_t * req)274 static ssize_t uv__fs_mkdtemp(uv_fs_t* req) {
275   return mkdtemp((char*) req->path) ? 0 : -1;
276 }
277 
278 
279 static int (*uv__mkostemp)(char*, int);
280 
281 
uv__mkostemp_initonce(void)282 static void uv__mkostemp_initonce(void) {
283   /* z/os doesn't have RTLD_DEFAULT but that's okay
284    * because it doesn't have mkostemp(O_CLOEXEC) either.
285    */
286 #ifdef RTLD_DEFAULT
287   uv__mkostemp = (int (*)(char*, int)) dlsym(RTLD_DEFAULT, "mkostemp");
288 
289   /* We don't care about errors, but we do want to clean them up.
290    * If there has been no error, then dlerror() will just return
291    * NULL.
292    */
293   dlerror();
294 #endif  /* RTLD_DEFAULT */
295 }
296 
297 
uv__fs_mkstemp(uv_fs_t * req)298 static int uv__fs_mkstemp(uv_fs_t* req) {
299   static uv_once_t once = UV_ONCE_INIT;
300   int r;
301 #ifdef O_CLOEXEC
302   static _Atomic int no_cloexec_support;
303 #endif
304   static const char pattern[] = "XXXXXX";
305   static const size_t pattern_size = sizeof(pattern) - 1;
306   char* path;
307   size_t path_length;
308 
309   path = (char*) req->path;
310   path_length = strlen(path);
311 
312   /* EINVAL can be returned for 2 reasons:
313       1. The template's last 6 characters were not XXXXXX
314       2. open() didn't support O_CLOEXEC
315      We want to avoid going to the fallback path in case
316      of 1, so it's manually checked before. */
317   if (path_length < pattern_size ||
318       strcmp(path + path_length - pattern_size, pattern)) {
319     errno = EINVAL;
320     r = -1;
321     goto clobber;
322   }
323 
324   uv_once(&once, uv__mkostemp_initonce);
325 
326 #ifdef O_CLOEXEC
327   if (atomic_load_explicit(&no_cloexec_support, memory_order_relaxed) == 0 &&
328       uv__mkostemp != NULL) {
329     r = uv__mkostemp(path, O_CLOEXEC);
330 
331     if (r >= 0)
332       return r;
333 
334     /* If mkostemp() returns EINVAL, it means the kernel doesn't
335        support O_CLOEXEC, so we just fallback to mkstemp() below. */
336     if (errno != EINVAL)
337       goto clobber;
338 
339     /* We set the static variable so that next calls don't even
340        try to use mkostemp. */
341     atomic_store_explicit(&no_cloexec_support, 1, memory_order_relaxed);
342   }
343 #endif  /* O_CLOEXEC */
344 
345   if (req->cb != NULL)
346     uv_rwlock_rdlock(&req->loop->cloexec_lock);
347 
348   r = mkstemp(path);
349 
350   /* In case of failure `uv__cloexec` will leave error in `errno`,
351    * so it is enough to just set `r` to `-1`.
352    */
353   if (r >= 0 && uv__cloexec(r, 1) != 0) {
354     r = uv__close(r);
355     if (r != 0)
356       abort();
357     r = -1;
358   }
359 
360   if (req->cb != NULL)
361     uv_rwlock_rdunlock(&req->loop->cloexec_lock);
362 
363 clobber:
364   if (r < 0)
365     path[0] = '\0';
366   return r;
367 }
368 
369 
uv__fs_open(uv_fs_t * req)370 static ssize_t uv__fs_open(uv_fs_t* req) {
371 #ifdef O_CLOEXEC
372   return open(req->path, req->flags | O_CLOEXEC, req->mode);
373 #else  /* O_CLOEXEC */
374   int r;
375 
376   if (req->cb != NULL)
377     uv_rwlock_rdlock(&req->loop->cloexec_lock);
378 
379   r = open(req->path, req->flags, req->mode);
380 
381   /* In case of failure `uv__cloexec` will leave error in `errno`,
382    * so it is enough to just set `r` to `-1`.
383    */
384   if (r >= 0 && uv__cloexec(r, 1) != 0) {
385     r = uv__close(r);
386     if (r != 0)
387       abort();
388     r = -1;
389   }
390 
391   if (req->cb != NULL)
392     uv_rwlock_rdunlock(&req->loop->cloexec_lock);
393 
394   return r;
395 #endif  /* O_CLOEXEC */
396 }
397 
398 
uv__preadv_or_pwritev_emul(int fd,const struct iovec * bufs,size_t nbufs,off_t off,int is_pread)399 static ssize_t uv__preadv_or_pwritev_emul(int fd,
400                                           const struct iovec* bufs,
401                                           size_t nbufs,
402                                           off_t off,
403                                           int is_pread) {
404   ssize_t total;
405   ssize_t r;
406   size_t i;
407   size_t n;
408   void* p;
409 
410   total = 0;
411   for (i = 0; i < (size_t) nbufs; i++) {
412     p = bufs[i].iov_base;
413     n = bufs[i].iov_len;
414 
415     do
416       if (is_pread)
417         r = pread(fd, p, n, off);
418       else
419         r = pwrite(fd, p, n, off);
420     while (r == -1 && errno == EINTR);
421 
422     if (r == -1) {
423       if (total > 0)
424         return total;
425       return -1;
426     }
427 
428     off += r;
429     total += r;
430 
431     if ((size_t) r < n)
432       return total;
433   }
434 
435   return total;
436 }
437 
438 
439 #ifdef __linux__
440 typedef int uv__iovcnt;
441 #else
442 typedef size_t uv__iovcnt;
443 #endif
444 
445 
uv__preadv_emul(int fd,const struct iovec * bufs,uv__iovcnt nbufs,off_t off)446 static ssize_t uv__preadv_emul(int fd,
447                                const struct iovec* bufs,
448                                uv__iovcnt nbufs,
449                                off_t off) {
450   return uv__preadv_or_pwritev_emul(fd, bufs, nbufs, off, /*is_pread*/1);
451 }
452 
453 
uv__pwritev_emul(int fd,const struct iovec * bufs,uv__iovcnt nbufs,off_t off)454 static ssize_t uv__pwritev_emul(int fd,
455                                 const struct iovec* bufs,
456                                 uv__iovcnt nbufs,
457                                 off_t off) {
458   return uv__preadv_or_pwritev_emul(fd, bufs, nbufs, off, /*is_pread*/0);
459 }
460 
461 
462 /* The function pointer cache is an uintptr_t because _Atomic void*
463  * doesn't work on macos/ios/etc...
464  */
uv__preadv_or_pwritev(int fd,const struct iovec * bufs,size_t nbufs,off_t off,_Atomic uintptr_t * cache,int is_pread)465 static ssize_t uv__preadv_or_pwritev(int fd,
466                                      const struct iovec* bufs,
467                                      size_t nbufs,
468                                      off_t off,
469                                      _Atomic uintptr_t* cache,
470                                      int is_pread) {
471   ssize_t (*f)(int, const struct iovec*, uv__iovcnt, off_t);
472   void* p;
473 
474   p = (void*) atomic_load_explicit(cache, memory_order_relaxed);
475   if (p == NULL) {
476 #ifdef RTLD_DEFAULT
477     p = dlsym(RTLD_DEFAULT, is_pread ? "preadv" : "pwritev");
478     dlerror();  /* Clear errors. */
479 #endif  /* RTLD_DEFAULT */
480     if (p == NULL)
481       p = is_pread ? uv__preadv_emul : uv__pwritev_emul;
482     atomic_store_explicit(cache, (uintptr_t) p, memory_order_relaxed);
483   }
484 
485   /* Use memcpy instead of `f = p` to work around a compiler bug,
486    * see https://github.com/libuv/libuv/issues/4532
487    */
488   memcpy(&f, &p, sizeof(p));
489   return f(fd, bufs, nbufs, off);
490 }
491 
492 
uv__preadv(int fd,const struct iovec * bufs,size_t nbufs,off_t off)493 static ssize_t uv__preadv(int fd,
494                           const struct iovec* bufs,
495                           size_t nbufs,
496                           off_t off) {
497   static _Atomic uintptr_t cache;
498   return uv__preadv_or_pwritev(fd, bufs, nbufs, off, &cache, /*is_pread*/1);
499 }
500 
501 
uv__pwritev(int fd,const struct iovec * bufs,size_t nbufs,off_t off)502 static ssize_t uv__pwritev(int fd,
503                            const struct iovec* bufs,
504                            size_t nbufs,
505                            off_t off) {
506   static _Atomic uintptr_t cache;
507   return uv__preadv_or_pwritev(fd, bufs, nbufs, off, &cache, /*is_pread*/0);
508 }
509 
510 
uv__fs_read(uv_fs_t * req)511 static ssize_t uv__fs_read(uv_fs_t* req) {
512   const struct iovec* bufs;
513   unsigned int iovmax;
514   size_t nbufs;
515   ssize_t r;
516   off_t off;
517   int fd;
518 
519   fd = req->file;
520   off = req->off;
521   bufs = (const struct iovec*) req->bufs;
522   nbufs = req->nbufs;
523 
524   iovmax = uv__getiovmax();
525   if (nbufs > iovmax)
526     nbufs = iovmax;
527 
528   r = 0;
529   if (off < 0) {
530     if (nbufs == 1)
531       r = read(fd, bufs->iov_base, bufs->iov_len);
532     else if (nbufs > 1)
533       r = readv(fd, bufs, nbufs);
534   } else {
535     if (nbufs == 1)
536       r = pread(fd, bufs->iov_base, bufs->iov_len, off);
537     else if (nbufs > 1)
538       r = uv__preadv(fd, bufs, nbufs, off);
539   }
540 
541 #ifdef __PASE__
542   /* PASE returns EOPNOTSUPP when reading a directory, convert to EISDIR */
543   if (r == -1 && errno == EOPNOTSUPP) {
544     struct stat buf;
545     ssize_t rc;
546     rc = uv__fstat(fd, &buf);
547     if (rc == 0 && S_ISDIR(buf.st_mode)) {
548       errno = EISDIR;
549     }
550   }
551 #endif
552 
553   /* We don't own the buffer list in the synchronous case. */
554   if (req->cb != NULL)
555     if (req->bufs != req->bufsml)
556       uv__free(req->bufs);
557 
558   req->bufs = NULL;
559   req->nbufs = 0;
560 
561   return r;
562 }
563 
564 
uv__fs_scandir_filter(const uv__dirent_t * dent)565 static int uv__fs_scandir_filter(const uv__dirent_t* dent) {
566   return strcmp(dent->d_name, ".") != 0 && strcmp(dent->d_name, "..") != 0;
567 }
568 
569 
uv__fs_scandir_sort(const uv__dirent_t ** a,const uv__dirent_t ** b)570 static int uv__fs_scandir_sort(const uv__dirent_t** a, const uv__dirent_t** b) {
571   return strcmp((*a)->d_name, (*b)->d_name);
572 }
573 
574 
uv__fs_scandir(uv_fs_t * req)575 static ssize_t uv__fs_scandir(uv_fs_t* req) {
576   uv__dirent_t** dents;
577   int n;
578 
579   dents = NULL;
580   n = scandir(req->path, &dents, uv__fs_scandir_filter, uv__fs_scandir_sort);
581 
582   /* NOTE: We will use nbufs as an index field */
583   req->nbufs = 0;
584 
585   if (n == 0) {
586     /* OS X still needs to deallocate some memory.
587      * Memory was allocated using the system allocator, so use free() here.
588      */
589     free(dents);
590     dents = NULL;
591   } else if (n == -1) {
592     return n;
593   }
594 
595   req->ptr = dents;
596 
597   return n;
598 }
599 
uv__fs_opendir(uv_fs_t * req)600 static int uv__fs_opendir(uv_fs_t* req) {
601   uv_dir_t* dir;
602 
603   dir = uv__malloc(sizeof(*dir));
604   if (dir == NULL)
605     goto error;
606 
607   dir->dir = opendir(req->path);
608   if (dir->dir == NULL)
609     goto error;
610 
611   req->ptr = dir;
612   return 0;
613 
614 error:
615   uv__free(dir);
616   req->ptr = NULL;
617   return -1;
618 }
619 
uv__fs_readdir(uv_fs_t * req)620 static int uv__fs_readdir(uv_fs_t* req) {
621   uv_dir_t* dir;
622   uv_dirent_t* dirent;
623   struct dirent* res;
624   unsigned int dirent_idx;
625   unsigned int i;
626 
627   dir = req->ptr;
628   dirent_idx = 0;
629 
630   while (dirent_idx < dir->nentries) {
631     /* readdir() returns NULL on end of directory, as well as on error. errno
632        is used to differentiate between the two conditions. */
633     errno = 0;
634     res = readdir(dir->dir);
635 
636     if (res == NULL) {
637       if (errno != 0)
638         goto error;
639       break;
640     }
641 
642     if (strcmp(res->d_name, ".") == 0 || strcmp(res->d_name, "..") == 0)
643       continue;
644 
645     dirent = &dir->dirents[dirent_idx];
646     dirent->name = uv__strdup(res->d_name);
647 
648     if (dirent->name == NULL)
649       goto error;
650 
651     dirent->type = uv__fs_get_dirent_type(res);
652     ++dirent_idx;
653   }
654 
655   return dirent_idx;
656 
657 error:
658   for (i = 0; i < dirent_idx; ++i) {
659     uv__free((char*) dir->dirents[i].name);
660     dir->dirents[i].name = NULL;
661   }
662 
663   return -1;
664 }
665 
uv__fs_closedir(uv_fs_t * req)666 static int uv__fs_closedir(uv_fs_t* req) {
667   uv_dir_t* dir;
668 
669   dir = req->ptr;
670 
671   if (dir->dir != NULL) {
672     closedir(dir->dir);
673     dir->dir = NULL;
674   }
675 
676   uv__free(req->ptr);
677   req->ptr = NULL;
678   return 0;
679 }
680 
uv__fs_statfs(uv_fs_t * req)681 static int uv__fs_statfs(uv_fs_t* req) {
682   uv_statfs_t* stat_fs;
683 #if defined(__sun)      || \
684     defined(__MVS__)    || \
685     defined(__NetBSD__) || \
686     defined(__HAIKU__)  || \
687     defined(__QNX__)
688   struct statvfs buf;
689 
690   if (0 != statvfs(req->path, &buf))
691 #else
692   struct statfs buf;
693 
694   if (0 != statfs(req->path, &buf))
695 #endif /* defined(__sun) */
696     return -1;
697 
698   stat_fs = uv__malloc(sizeof(*stat_fs));
699   if (stat_fs == NULL) {
700     errno = ENOMEM;
701     return -1;
702   }
703 
704 #if defined(__sun)        || \
705     defined(__MVS__)      || \
706     defined(__OpenBSD__)  || \
707     defined(__NetBSD__)   || \
708     defined(__HAIKU__)    || \
709     defined(__QNX__)
710   stat_fs->f_type = 0;  /* f_type is not supported. */
711 #else
712   stat_fs->f_type = buf.f_type;
713 #endif
714   stat_fs->f_bsize = buf.f_bsize;
715   stat_fs->f_blocks = buf.f_blocks;
716   stat_fs->f_bfree = buf.f_bfree;
717   stat_fs->f_bavail = buf.f_bavail;
718   stat_fs->f_files = buf.f_files;
719   stat_fs->f_ffree = buf.f_ffree;
720   req->ptr = stat_fs;
721   return 0;
722 }
723 
uv__fs_pathmax_size(const char * path)724 static ssize_t uv__fs_pathmax_size(const char* path) {
725   ssize_t pathmax;
726 
727   pathmax = pathconf(path, _PC_PATH_MAX);
728 
729   if (pathmax == -1)
730     pathmax = UV__PATH_MAX;
731 
732   return pathmax;
733 }
734 
uv__fs_readlink(uv_fs_t * req)735 static ssize_t uv__fs_readlink(uv_fs_t* req) {
736   ssize_t maxlen;
737   ssize_t len;
738   char* buf;
739 
740 #if defined(_POSIX_PATH_MAX) || defined(PATH_MAX)
741   maxlen = uv__fs_pathmax_size(req->path);
742 #else
743   /* We may not have a real PATH_MAX.  Read size of link.  */
744   struct stat st;
745   int ret;
746   ret = uv__lstat(req->path, &st);
747   if (ret != 0)
748     return -1;
749   if (!S_ISLNK(st.st_mode)) {
750     errno = EINVAL;
751     return -1;
752   }
753 
754   maxlen = st.st_size;
755 
756   /* According to readlink(2) lstat can report st_size == 0
757      for some symlinks, such as those in /proc or /sys.  */
758   if (maxlen == 0)
759     maxlen = uv__fs_pathmax_size(req->path);
760 #endif
761 
762   buf = uv__malloc(maxlen);
763 
764   if (buf == NULL) {
765     errno = ENOMEM;
766     return -1;
767   }
768 
769 #if defined(__MVS__)
770   len = os390_readlink(req->path, buf, maxlen);
771 #else
772   len = readlink(req->path, buf, maxlen);
773 #endif
774 
775   if (len == -1) {
776     uv__free(buf);
777     return -1;
778   }
779 
780   /* Uncommon case: resize to make room for the trailing nul byte. */
781   if (len == maxlen) {
782     buf = uv__reallocf(buf, len + 1);
783 
784     if (buf == NULL)
785       return -1;
786   }
787 
788   buf[len] = '\0';
789   req->ptr = buf;
790 
791   return 0;
792 }
793 
uv__fs_realpath(uv_fs_t * req)794 static ssize_t uv__fs_realpath(uv_fs_t* req) {
795   char* buf;
796   char* tmp;
797 
798 #if defined(_POSIX_VERSION) && _POSIX_VERSION >= 200809L
799   tmp = realpath(req->path, NULL);
800   if (tmp == NULL)
801     return -1;
802   buf = uv__strdup(tmp);
803   free(tmp); /* _Not_ uv__free. */
804   if (buf == NULL) {
805     errno = ENOMEM;
806     return -1;
807   }
808 #else
809   ssize_t len;
810 
811   (void)tmp;
812 
813   len = uv__fs_pathmax_size(req->path);
814   buf = uv__malloc(len + 1);
815 
816   if (buf == NULL) {
817     errno = ENOMEM;
818     return -1;
819   }
820 
821   if (realpath(req->path, buf) == NULL) {
822     uv__free(buf);
823     return -1;
824   }
825 #endif
826 
827   req->ptr = buf;
828 
829   return 0;
830 }
831 
uv__fs_sendfile_emul(uv_fs_t * req)832 static ssize_t uv__fs_sendfile_emul(uv_fs_t* req) {
833   struct pollfd pfd;
834   int use_pread;
835   off_t offset;
836   ssize_t nsent;
837   ssize_t nread;
838   ssize_t nwritten;
839   size_t buflen;
840   size_t len;
841   ssize_t n;
842   int in_fd;
843   int out_fd;
844   char buf[8192];
845 
846   len = req->bufsml[0].len;
847   in_fd = req->flags;
848   out_fd = req->file;
849   offset = req->off;
850   use_pread = 1;
851 
852   /* Here are the rules regarding errors:
853    *
854    * 1. Read errors are reported only if nsent==0, otherwise we return nsent.
855    *    The user needs to know that some data has already been sent, to stop
856    *    them from sending it twice.
857    *
858    * 2. Write errors are always reported. Write errors are bad because they
859    *    mean data loss: we've read data but now we can't write it out.
860    *
861    * We try to use pread() and fall back to regular read() if the source fd
862    * doesn't support positional reads, for example when it's a pipe fd.
863    *
864    * If we get EAGAIN when writing to the target fd, we poll() on it until
865    * it becomes writable again.
866    *
867    * FIXME: If we get a write error when use_pread==1, it should be safe to
868    *        return the number of sent bytes instead of an error because pread()
869    *        is, in theory, idempotent. However, special files in /dev or /proc
870    *        may support pread() but not necessarily return the same data on
871    *        successive reads.
872    *
873    * FIXME: There is no way now to signal that we managed to send *some* data
874    *        before a write error.
875    */
876   for (nsent = 0; (size_t) nsent < len; ) {
877     buflen = len - nsent;
878 
879     if (buflen > sizeof(buf))
880       buflen = sizeof(buf);
881 
882     do
883       if (use_pread)
884         nread = pread(in_fd, buf, buflen, offset);
885       else
886         nread = read(in_fd, buf, buflen);
887     while (nread == -1 && errno == EINTR);
888 
889     if (nread == 0)
890       goto out;
891 
892     if (nread == -1) {
893       if (use_pread && nsent == 0 && (errno == EIO || errno == ESPIPE)) {
894         use_pread = 0;
895         continue;
896       }
897 
898       if (nsent == 0)
899         nsent = -1;
900 
901       goto out;
902     }
903 
904     for (nwritten = 0; nwritten < nread; ) {
905       do
906         n = write(out_fd, buf + nwritten, nread - nwritten);
907       while (n == -1 && errno == EINTR);
908 
909       if (n != -1) {
910         nwritten += n;
911         continue;
912       }
913 
914       if (errno != EAGAIN && errno != EWOULDBLOCK) {
915         nsent = -1;
916         goto out;
917       }
918 
919       pfd.fd = out_fd;
920       pfd.events = POLLOUT;
921       pfd.revents = 0;
922 
923       do
924         n = poll(&pfd, 1, -1);
925       while (n == -1 && errno == EINTR);
926 
927       if (n == -1 || (pfd.revents & ~POLLOUT) != 0) {
928         errno = EIO;
929         nsent = -1;
930         goto out;
931       }
932     }
933 
934     offset += nread;
935     nsent += nread;
936   }
937 
938 out:
939   if (nsent != -1)
940     req->off = offset;
941 
942   return nsent;
943 }
944 
945 
946 #ifdef __linux__
947 /* Pre-4.20 kernels have a bug where CephFS uses the RADOS copy-from command
948  * in copy_file_range() when it shouldn't. There is no workaround except to
949  * fall back to a regular copy.
950  */
uv__is_buggy_cephfs(int fd)951 static int uv__is_buggy_cephfs(int fd) {
952   struct statfs s;
953 
954   if (-1 == fstatfs(fd, &s))
955     return 0;
956 
957   if (s.f_type != /* CephFS */ 0xC36400)
958     return 0;
959 
960   return uv__kernel_version() < /* 4.20.0 */ 0x041400;
961 }
962 
963 
uv__is_cifs_or_smb(int fd)964 static int uv__is_cifs_or_smb(int fd) {
965   struct statfs s;
966 
967   if (-1 == fstatfs(fd, &s))
968     return 0;
969 
970   switch ((unsigned) s.f_type) {
971   case 0x0000517Bu:  /* SMB */
972   case 0xFE534D42u:  /* SMB2 */
973   case 0xFF534D42u:  /* CIFS */
974     return 1;
975   }
976 
977   return 0;
978 }
979 
980 
uv__fs_try_copy_file_range(int in_fd,off_t * off,int out_fd,size_t len)981 static ssize_t uv__fs_try_copy_file_range(int in_fd, off_t* off,
982                                           int out_fd, size_t len) {
983   static _Atomic int no_copy_file_range_support;
984   ssize_t r;
985 
986   if (atomic_load_explicit(&no_copy_file_range_support, memory_order_relaxed)) {
987     errno = ENOSYS;
988     return -1;
989   }
990 
991   r = uv__fs_copy_file_range(in_fd, off, out_fd, NULL, len, 0);
992 
993   if (r != -1)
994     return r;
995 
996   switch (errno) {
997   case EACCES:
998     /* Pre-4.20 kernels have a bug where CephFS uses the RADOS
999      * copy-from command when it shouldn't.
1000      */
1001     if (uv__is_buggy_cephfs(in_fd))
1002       errno = ENOSYS;  /* Use fallback. */
1003     break;
1004   case ENOSYS:
1005     atomic_store_explicit(&no_copy_file_range_support, 1, memory_order_relaxed);
1006     break;
1007   case EPERM:
1008     /* It's been reported that CIFS spuriously fails.
1009      * Consider it a transient error.
1010      */
1011     if (uv__is_cifs_or_smb(out_fd))
1012       errno = ENOSYS;  /* Use fallback. */
1013     break;
1014   case ENOTSUP:
1015   case EXDEV:
1016     /* ENOTSUP - it could work on another file system type.
1017      * EXDEV - it will not work when in_fd and out_fd are not on the same
1018      *         mounted filesystem (pre Linux 5.3)
1019      */
1020     errno = ENOSYS;  /* Use fallback. */
1021     break;
1022   }
1023 
1024   return -1;
1025 }
1026 
1027 #endif  /* __linux__ */
1028 
1029 
uv__fs_sendfile(uv_fs_t * req)1030 static ssize_t uv__fs_sendfile(uv_fs_t* req) {
1031   int in_fd;
1032   int out_fd;
1033 
1034   in_fd = req->flags;
1035   out_fd = req->file;
1036 
1037 #if defined(__linux__) || defined(__sun)
1038   {
1039     off_t off;
1040     ssize_t r;
1041     size_t len;
1042     int try_sendfile;
1043 
1044     off = req->off;
1045     len = req->bufsml[0].len;
1046     try_sendfile = 1;
1047 
1048 #ifdef __linux__
1049     r = uv__fs_try_copy_file_range(in_fd, &off, out_fd, len);
1050     try_sendfile = (r == -1 && errno == ENOSYS);
1051 #endif
1052 
1053     if (try_sendfile)
1054       r = sendfile(out_fd, in_fd, &off, len);
1055 
1056     /* sendfile() on SunOS returns EINVAL if the target fd is not a socket but
1057      * it still writes out data. Fortunately, we can detect it by checking if
1058      * the offset has been updated.
1059      */
1060     if (r != -1 || off > req->off) {
1061       r = off - req->off;
1062       req->off = off;
1063       return r;
1064     }
1065 
1066     if (errno == EINVAL ||
1067         errno == EIO ||
1068         errno == ENOTSOCK ||
1069         errno == EXDEV) {
1070       errno = 0;
1071       return uv__fs_sendfile_emul(req);
1072     }
1073 
1074     return -1;
1075   }
1076 /* sendfile() on iOS(arm64) will throw SIGSYS signal cause crash. */
1077 #elif (defined(__APPLE__) && !TARGET_OS_IPHONE)                               \
1078     || defined(__DragonFly__)                                                 \
1079     || defined(__FreeBSD__)
1080   {
1081     off_t len;
1082     ssize_t r;
1083 
1084     /* sendfile() on FreeBSD and Darwin returns EAGAIN if the target fd is in
1085      * non-blocking mode and not all data could be written. If a non-zero
1086      * number of bytes have been sent, we don't consider it an error.
1087      */
1088 
1089 #if defined(__FreeBSD__) || defined(__DragonFly__)
1090 #if defined(__FreeBSD__)
1091     off_t off;
1092 
1093     off = req->off;
1094     r = uv__fs_copy_file_range(in_fd, &off, out_fd, NULL, req->bufsml[0].len, 0);
1095     if (r >= 0) {
1096         r = off - req->off;
1097         req->off = off;
1098         return r;
1099     }
1100 #endif
1101     len = 0;
1102     r = sendfile(in_fd, out_fd, req->off, req->bufsml[0].len, NULL, &len, 0);
1103 #else
1104     /* The darwin sendfile takes len as an input for the length to send,
1105      * so make sure to initialize it with the caller's value. */
1106     len = req->bufsml[0].len;
1107     r = sendfile(in_fd, out_fd, req->off, &len, NULL, 0);
1108 #endif
1109 
1110      /*
1111      * The man page for sendfile(2) on DragonFly states that `len` contains
1112      * a meaningful value ONLY in case of EAGAIN and EINTR.
1113      * Nothing is said about it's value in case of other errors, so better
1114      * not depend on the potential wrong assumption that is was not modified
1115      * by the syscall.
1116      */
1117     if (r == 0 || ((errno == EAGAIN || errno == EINTR) && len != 0)) {
1118       req->off += len;
1119       return (ssize_t) len;
1120     }
1121 
1122     if (errno == EINVAL ||
1123         errno == EIO ||
1124         errno == ENOTSOCK ||
1125         errno == EXDEV) {
1126       errno = 0;
1127       return uv__fs_sendfile_emul(req);
1128     }
1129 
1130     return -1;
1131   }
1132 #else
1133   /* Squelch compiler warnings. */
1134   (void) &in_fd;
1135   (void) &out_fd;
1136 
1137   return uv__fs_sendfile_emul(req);
1138 #endif
1139 }
1140 
1141 
uv__fs_utime(uv_fs_t * req)1142 static ssize_t uv__fs_utime(uv_fs_t* req) {
1143 #if defined(__linux__)                                                         \
1144     || defined(_AIX71)                                                         \
1145     || defined(__sun)                                                          \
1146     || defined(__HAIKU__)
1147   struct timespec ts[2];
1148   ts[0] = uv__fs_to_timespec(req->atime);
1149   ts[1] = uv__fs_to_timespec(req->mtime);
1150   return utimensat(AT_FDCWD, req->path, ts, 0);
1151 #elif defined(__APPLE__)                                                      \
1152     || defined(__DragonFly__)                                                 \
1153     || defined(__FreeBSD__)                                                   \
1154     || defined(__NetBSD__)                                                    \
1155     || defined(__OpenBSD__)
1156   struct timeval tv[2];
1157   tv[0] = uv__fs_to_timeval(req->atime);
1158   tv[1] = uv__fs_to_timeval(req->mtime);
1159   return utimes(req->path, tv);
1160 #elif defined(_AIX)                                                           \
1161     && !defined(_AIX71)
1162   struct utimbuf buf;
1163   buf.actime = req->atime;
1164   buf.modtime = req->mtime;
1165   return utime(req->path, &buf);
1166 #elif defined(__MVS__)
1167   attrib_t atr;
1168   memset(&atr, 0, sizeof(atr));
1169   atr.att_mtimechg = 1;
1170   atr.att_atimechg = 1;
1171   atr.att_mtime = req->mtime;
1172   atr.att_atime = req->atime;
1173   return __lchattr((char*) req->path, &atr, sizeof(atr));
1174 #else
1175   errno = ENOSYS;
1176   return -1;
1177 #endif
1178 }
1179 
1180 
uv__fs_lutime(uv_fs_t * req)1181 static ssize_t uv__fs_lutime(uv_fs_t* req) {
1182 #if defined(__linux__)            ||                                           \
1183     defined(_AIX71)               ||                                           \
1184     defined(__sun)                ||                                           \
1185     defined(__HAIKU__)            ||                                           \
1186     defined(__GNU__)              ||                                           \
1187     defined(__OpenBSD__)
1188   struct timespec ts[2];
1189   ts[0] = uv__fs_to_timespec(req->atime);
1190   ts[1] = uv__fs_to_timespec(req->mtime);
1191   return utimensat(AT_FDCWD, req->path, ts, AT_SYMLINK_NOFOLLOW);
1192 #elif defined(__APPLE__)          ||                                          \
1193       defined(__DragonFly__)      ||                                          \
1194       defined(__FreeBSD__)        ||                                          \
1195       defined(__NetBSD__)
1196   struct timeval tv[2];
1197   tv[0] = uv__fs_to_timeval(req->atime);
1198   tv[1] = uv__fs_to_timeval(req->mtime);
1199   return lutimes(req->path, tv);
1200 #else
1201   errno = ENOSYS;
1202   return -1;
1203 #endif
1204 }
1205 
1206 
uv__fs_write(uv_fs_t * req)1207 static ssize_t uv__fs_write(uv_fs_t* req) {
1208   const struct iovec* bufs;
1209   size_t nbufs;
1210   ssize_t r;
1211   off_t off;
1212   int fd;
1213 
1214   fd = req->file;
1215   off = req->off;
1216   bufs = (const struct iovec*) req->bufs;
1217   nbufs = req->nbufs;
1218 
1219   r = 0;
1220   if (off < 0) {
1221     if (nbufs == 1)
1222       r = write(fd, bufs->iov_base, bufs->iov_len);
1223     else if (nbufs > 1)
1224       r = writev(fd, bufs, nbufs);
1225   } else {
1226     if (nbufs == 1)
1227       r = pwrite(fd, bufs->iov_base, bufs->iov_len, off);
1228     else if (nbufs > 1)
1229       r = uv__pwritev(fd, bufs, nbufs, off);
1230   }
1231 
1232   return r;
1233 }
1234 
1235 
uv__fs_copyfile(uv_fs_t * req)1236 static ssize_t uv__fs_copyfile(uv_fs_t* req) {
1237   uv_fs_t fs_req;
1238   uv_file srcfd;
1239   uv_file dstfd;
1240   struct stat src_statsbuf;
1241   struct stat dst_statsbuf;
1242   struct timespec times[2];
1243   int dst_flags;
1244   int result;
1245   int err;
1246   off_t bytes_to_send;
1247   off_t in_offset;
1248   off_t bytes_written;
1249   size_t bytes_chunk;
1250 
1251   dstfd = -1;
1252   err = 0;
1253 
1254   /* Open the source file. */
1255   srcfd = uv_fs_open(NULL, &fs_req, req->path, O_RDONLY, 0, NULL);
1256   uv_fs_req_cleanup(&fs_req);
1257 
1258   if (srcfd < 0)
1259     return srcfd;
1260 
1261   /* Get the source file's mode. */
1262   if (uv__fstat(srcfd, &src_statsbuf)) {
1263     err = UV__ERR(errno);
1264     goto out;
1265   }
1266 
1267   dst_flags = O_WRONLY | O_CREAT;
1268 
1269   if (req->flags & UV_FS_COPYFILE_EXCL)
1270     dst_flags |= O_EXCL;
1271 
1272   /* Open the destination file. */
1273   dstfd = uv_fs_open(NULL,
1274                      &fs_req,
1275                      req->new_path,
1276                      dst_flags,
1277                      src_statsbuf.st_mode,
1278                      NULL);
1279   uv_fs_req_cleanup(&fs_req);
1280 
1281   if (dstfd < 0) {
1282     err = dstfd;
1283     goto out;
1284   }
1285 
1286   /* If the file is not being opened exclusively, verify that the source and
1287      destination are not the same file. If they are the same, bail out early. */
1288   if ((req->flags & UV_FS_COPYFILE_EXCL) == 0) {
1289     /* Get the destination file's mode. */
1290     if (uv__fstat(dstfd, &dst_statsbuf)) {
1291       err = UV__ERR(errno);
1292       goto out;
1293     }
1294 
1295     /* Check if srcfd and dstfd refer to the same file */
1296     if (src_statsbuf.st_dev == dst_statsbuf.st_dev &&
1297         src_statsbuf.st_ino == dst_statsbuf.st_ino) {
1298       goto out;
1299     }
1300 
1301     /* Truncate the file in case the destination already existed. */
1302     if (ftruncate(dstfd, 0) != 0) {
1303       err = UV__ERR(errno);
1304 
1305       /* ftruncate() on ceph-fuse fails with EACCES when the file is created
1306        * with read only permissions. Since ftruncate() on a newly created
1307        * file is a meaningless operation anyway, detect that condition
1308        * and squelch the error.
1309        */
1310       if (err != UV_EACCES)
1311         goto out;
1312 
1313       if (dst_statsbuf.st_size > 0)
1314         goto out;
1315 
1316       err = 0;
1317     }
1318   }
1319 
1320   /**
1321    * Change the timestamps of the destination file to match the source file.
1322    */
1323 #if defined(__APPLE__)
1324   times[0] = src_statsbuf.st_atimespec;
1325   times[1] = src_statsbuf.st_mtimespec;
1326 #elif defined(_AIX)
1327   times[0].tv_sec = src_statsbuf.st_atime;
1328   times[0].tv_nsec = src_statsbuf.st_atime_n;
1329   times[1].tv_sec = src_statsbuf.st_mtime;
1330   times[1].tv_nsec = src_statsbuf.st_mtime_n;
1331 #else
1332   times[0] = src_statsbuf.st_atim;
1333   times[1] = src_statsbuf.st_mtim;
1334 #endif
1335 
1336   if (futimens(dstfd, times) == -1) {
1337     err = UV__ERR(errno);
1338     goto out;
1339   }
1340 
1341   /*
1342    * Change the ownership and permissions of the destination file to match the
1343    * source file.
1344    * `cp -p` does not care about errors here, so we don't either. Reuse the
1345    * `result` variable to silence a -Wunused-result warning.
1346    */
1347   result = fchown(dstfd, src_statsbuf.st_uid, src_statsbuf.st_gid);
1348 
1349   if (fchmod(dstfd, src_statsbuf.st_mode) == -1) {
1350     err = UV__ERR(errno);
1351 #ifdef __linux__
1352     /* fchmod() on CIFS shares always fails with EPERM unless the share is
1353      * mounted with "noperm". As fchmod() is a meaningless operation on such
1354      * shares anyway, detect that condition and squelch the error.
1355      */
1356     if (err != UV_EPERM)
1357       goto out;
1358 
1359     if (!uv__is_cifs_or_smb(dstfd))
1360       goto out;
1361 
1362     err = 0;
1363 #else  /* !__linux__ */
1364     goto out;
1365 #endif  /* !__linux__ */
1366   }
1367 
1368 #ifdef FICLONE
1369   if (req->flags & UV_FS_COPYFILE_FICLONE ||
1370       req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
1371     if (ioctl(dstfd, FICLONE, srcfd) == 0) {
1372       /* ioctl() with FICLONE succeeded. */
1373       goto out;
1374     }
1375     /* If an error occurred and force was set, return the error to the caller;
1376      * fall back to sendfile() when force was not set. */
1377     if (req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
1378       err = UV__ERR(errno);
1379       goto out;
1380     }
1381   }
1382 #else
1383   if (req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
1384     err = UV_ENOSYS;
1385     goto out;
1386   }
1387 #endif
1388 
1389   bytes_to_send = src_statsbuf.st_size;
1390   in_offset = 0;
1391   while (bytes_to_send != 0) {
1392     bytes_chunk = SSIZE_MAX;
1393     if (bytes_to_send < (off_t) bytes_chunk)
1394       bytes_chunk = bytes_to_send;
1395     uv_fs_sendfile(NULL, &fs_req, dstfd, srcfd, in_offset, bytes_chunk, NULL);
1396     bytes_written = fs_req.result;
1397     uv_fs_req_cleanup(&fs_req);
1398 
1399     if (bytes_written < 0) {
1400       err = bytes_written;
1401       break;
1402     }
1403 
1404     bytes_to_send -= bytes_written;
1405     in_offset += bytes_written;
1406   }
1407 
1408 out:
1409   if (err < 0)
1410     result = err;
1411   else
1412     result = 0;
1413 
1414   /* Close the source file. */
1415   err = uv__close_nocheckstdio(srcfd);
1416 
1417   /* Don't overwrite any existing errors. */
1418   if (err != 0 && result == 0)
1419     result = err;
1420 
1421   /* Close the destination file if it is open. */
1422   if (dstfd >= 0) {
1423     err = uv__close_nocheckstdio(dstfd);
1424 
1425     /* Don't overwrite any existing errors. */
1426     if (err != 0 && result == 0)
1427       result = err;
1428 
1429     /* Remove the destination file if something went wrong. */
1430     if (result != 0) {
1431       uv_fs_unlink(NULL, &fs_req, req->new_path, NULL);
1432       /* Ignore the unlink return value, as an error already happened. */
1433       uv_fs_req_cleanup(&fs_req);
1434     }
1435   }
1436 
1437   if (result == 0)
1438     return 0;
1439 
1440   errno = UV__ERR(result);
1441   return -1;
1442 }
1443 
uv__to_stat(struct stat * src,uv_stat_t * dst)1444 static void uv__to_stat(struct stat* src, uv_stat_t* dst) {
1445   dst->st_dev = src->st_dev;
1446   dst->st_mode = src->st_mode;
1447   dst->st_nlink = src->st_nlink;
1448   dst->st_uid = src->st_uid;
1449   dst->st_gid = src->st_gid;
1450   dst->st_rdev = src->st_rdev;
1451   dst->st_ino = src->st_ino;
1452   dst->st_size = src->st_size;
1453   dst->st_blksize = src->st_blksize;
1454   dst->st_blocks = src->st_blocks;
1455 
1456 #if defined(__APPLE__)
1457   dst->st_atim.tv_sec = src->st_atimespec.tv_sec;
1458   dst->st_atim.tv_nsec = src->st_atimespec.tv_nsec;
1459   dst->st_mtim.tv_sec = src->st_mtimespec.tv_sec;
1460   dst->st_mtim.tv_nsec = src->st_mtimespec.tv_nsec;
1461   dst->st_ctim.tv_sec = src->st_ctimespec.tv_sec;
1462   dst->st_ctim.tv_nsec = src->st_ctimespec.tv_nsec;
1463   dst->st_birthtim.tv_sec = src->st_birthtimespec.tv_sec;
1464   dst->st_birthtim.tv_nsec = src->st_birthtimespec.tv_nsec;
1465   dst->st_flags = src->st_flags;
1466   dst->st_gen = src->st_gen;
1467 #elif defined(__ANDROID__)
1468   dst->st_atim.tv_sec = src->st_atime;
1469   dst->st_atim.tv_nsec = src->st_atimensec;
1470   dst->st_mtim.tv_sec = src->st_mtime;
1471   dst->st_mtim.tv_nsec = src->st_mtimensec;
1472   dst->st_ctim.tv_sec = src->st_ctime;
1473   dst->st_ctim.tv_nsec = src->st_ctimensec;
1474   dst->st_birthtim.tv_sec = src->st_ctime;
1475   dst->st_birthtim.tv_nsec = src->st_ctimensec;
1476   dst->st_flags = 0;
1477   dst->st_gen = 0;
1478 #elif !defined(_AIX) &&         \
1479     !defined(__MVS__) && (      \
1480     defined(__DragonFly__)   || \
1481     defined(__FreeBSD__)     || \
1482     defined(__OpenBSD__)     || \
1483     defined(__NetBSD__)      || \
1484     defined(_GNU_SOURCE)     || \
1485     defined(_BSD_SOURCE)     || \
1486     defined(_SVID_SOURCE)    || \
1487     defined(_XOPEN_SOURCE)   || \
1488     defined(_DEFAULT_SOURCE))
1489   dst->st_atim.tv_sec = src->st_atim.tv_sec;
1490   dst->st_atim.tv_nsec = src->st_atim.tv_nsec;
1491   dst->st_mtim.tv_sec = src->st_mtim.tv_sec;
1492   dst->st_mtim.tv_nsec = src->st_mtim.tv_nsec;
1493   dst->st_ctim.tv_sec = src->st_ctim.tv_sec;
1494   dst->st_ctim.tv_nsec = src->st_ctim.tv_nsec;
1495 # if defined(__FreeBSD__)    || \
1496      defined(__NetBSD__)
1497   dst->st_birthtim.tv_sec = src->st_birthtim.tv_sec;
1498   dst->st_birthtim.tv_nsec = src->st_birthtim.tv_nsec;
1499   dst->st_flags = src->st_flags;
1500   dst->st_gen = src->st_gen;
1501 # else
1502   dst->st_birthtim.tv_sec = src->st_ctim.tv_sec;
1503   dst->st_birthtim.tv_nsec = src->st_ctim.tv_nsec;
1504   dst->st_flags = 0;
1505   dst->st_gen = 0;
1506 # endif
1507 #else
1508   dst->st_atim.tv_sec = src->st_atime;
1509   dst->st_atim.tv_nsec = 0;
1510   dst->st_mtim.tv_sec = src->st_mtime;
1511   dst->st_mtim.tv_nsec = 0;
1512   dst->st_ctim.tv_sec = src->st_ctime;
1513   dst->st_ctim.tv_nsec = 0;
1514   dst->st_birthtim.tv_sec = src->st_ctime;
1515   dst->st_birthtim.tv_nsec = 0;
1516   dst->st_flags = 0;
1517   dst->st_gen = 0;
1518 #endif
1519 }
1520 
1521 
uv__fs_statx(int fd,const char * path,int is_fstat,int is_lstat,uv_stat_t * buf)1522 static int uv__fs_statx(int fd,
1523                         const char* path,
1524                         int is_fstat,
1525                         int is_lstat,
1526                         uv_stat_t* buf) {
1527   STATIC_ASSERT(UV_ENOSYS != -1);
1528 #ifdef __linux__
1529   static _Atomic int no_statx;
1530   struct uv__statx statxbuf;
1531   int dirfd;
1532   int flags;
1533   int mode;
1534   int rc;
1535 
1536   if (atomic_load_explicit(&no_statx, memory_order_relaxed))
1537     return UV_ENOSYS;
1538 
1539   dirfd = AT_FDCWD;
1540   flags = 0; /* AT_STATX_SYNC_AS_STAT */
1541   mode = 0xFFF; /* STATX_BASIC_STATS + STATX_BTIME */
1542 
1543   if (is_fstat) {
1544     dirfd = fd;
1545     flags |= 0x1000; /* AT_EMPTY_PATH */
1546   }
1547 
1548   if (is_lstat)
1549     flags |= AT_SYMLINK_NOFOLLOW;
1550 
1551   rc = uv__statx(dirfd, path, flags, mode, &statxbuf);
1552 
1553   switch (rc) {
1554   case 0:
1555     break;
1556   case -1:
1557     /* EPERM happens when a seccomp filter rejects the system call.
1558      * Has been observed with libseccomp < 2.3.3 and docker < 18.04.
1559      * EOPNOTSUPP is used on DVS exported filesystems
1560      */
1561     if (errno != EINVAL && errno != EPERM && errno != ENOSYS && errno != EOPNOTSUPP)
1562       return -1;
1563     /* Fall through. */
1564   default:
1565     /* Normally on success, zero is returned and On error, -1 is returned.
1566      * Observed on S390 RHEL running in a docker container with statx not
1567      * implemented, rc might return 1 with 0 set as the error code in which
1568      * case we return ENOSYS.
1569      */
1570     atomic_store_explicit(&no_statx, 1, memory_order_relaxed);
1571     return UV_ENOSYS;
1572   }
1573 
1574   uv__statx_to_stat(&statxbuf, buf);
1575 
1576   return 0;
1577 #else
1578   return UV_ENOSYS;
1579 #endif /* __linux__ */
1580 }
1581 
1582 
uv__fs_stat(const char * path,uv_stat_t * buf)1583 static int uv__fs_stat(const char *path, uv_stat_t *buf) {
1584   struct stat pbuf;
1585   int ret;
1586 
1587   ret = uv__fs_statx(-1, path, /* is_fstat */ 0, /* is_lstat */ 0, buf);
1588   if (ret != UV_ENOSYS)
1589     return ret;
1590 
1591   ret = uv__stat(path, &pbuf);
1592   if (ret == 0)
1593     uv__to_stat(&pbuf, buf);
1594 
1595   return ret;
1596 }
1597 
1598 
uv__fs_lstat(const char * path,uv_stat_t * buf)1599 static int uv__fs_lstat(const char *path, uv_stat_t *buf) {
1600   struct stat pbuf;
1601   int ret;
1602 
1603   ret = uv__fs_statx(-1, path, /* is_fstat */ 0, /* is_lstat */ 1, buf);
1604   if (ret != UV_ENOSYS)
1605     return ret;
1606 
1607   ret = uv__lstat(path, &pbuf);
1608   if (ret == 0)
1609     uv__to_stat(&pbuf, buf);
1610 
1611   return ret;
1612 }
1613 
1614 
uv__fs_fstat(int fd,uv_stat_t * buf)1615 static int uv__fs_fstat(int fd, uv_stat_t *buf) {
1616   struct stat pbuf;
1617   int ret;
1618 
1619   ret = uv__fs_statx(fd, "", /* is_fstat */ 1, /* is_lstat */ 0, buf);
1620   if (ret != UV_ENOSYS)
1621     return ret;
1622 
1623   ret = uv__fstat(fd, &pbuf);
1624   if (ret == 0)
1625     uv__to_stat(&pbuf, buf);
1626 
1627   return ret;
1628 }
1629 
uv__fs_buf_offset(uv_buf_t * bufs,size_t size)1630 static size_t uv__fs_buf_offset(uv_buf_t* bufs, size_t size) {
1631   size_t offset;
1632   /* Figure out which bufs are done */
1633   for (offset = 0; size > 0 && bufs[offset].len <= size; ++offset)
1634     size -= bufs[offset].len;
1635 
1636   /* Fix a partial read/write */
1637   if (size > 0) {
1638     bufs[offset].base += size;
1639     bufs[offset].len -= size;
1640   }
1641   return offset;
1642 }
1643 
uv__fs_write_all(uv_fs_t * req)1644 static ssize_t uv__fs_write_all(uv_fs_t* req) {
1645   unsigned int iovmax;
1646   unsigned int nbufs;
1647   uv_buf_t* bufs;
1648   ssize_t total;
1649   ssize_t result;
1650 
1651   iovmax = uv__getiovmax();
1652   nbufs = req->nbufs;
1653   bufs = req->bufs;
1654   total = 0;
1655 
1656   while (nbufs > 0) {
1657     req->nbufs = nbufs;
1658     if (req->nbufs > iovmax)
1659       req->nbufs = iovmax;
1660 
1661     do
1662       result = uv__fs_write(req);
1663     while (result < 0 && errno == EINTR);
1664 
1665     if (result <= 0) {
1666       if (total == 0)
1667         total = result;
1668       break;
1669     }
1670 
1671     if (req->off >= 0)
1672       req->off += result;
1673 
1674     req->nbufs = uv__fs_buf_offset(req->bufs, result);
1675     req->bufs += req->nbufs;
1676     nbufs -= req->nbufs;
1677     total += result;
1678   }
1679 
1680   if (bufs != req->bufsml)
1681     uv__free(bufs);
1682 
1683   req->bufs = NULL;
1684   req->nbufs = 0;
1685 
1686   return total;
1687 }
1688 
1689 
uv__fs_work(struct uv__work * w)1690 static void uv__fs_work(struct uv__work* w) {
1691   int retry_on_eintr;
1692   uv_fs_t* req;
1693   ssize_t r;
1694 
1695   req = container_of(w, uv_fs_t, work_req);
1696   retry_on_eintr = !(req->fs_type == UV_FS_CLOSE ||
1697                      req->fs_type == UV_FS_READ);
1698 
1699   do {
1700     errno = 0;
1701 
1702 #define X(type, action)                                                       \
1703   case UV_FS_ ## type:                                                        \
1704     r = action;                                                               \
1705     break;
1706 
1707     switch (req->fs_type) {
1708     X(ACCESS, access(req->path, req->flags));
1709     X(CHMOD, chmod(req->path, req->mode));
1710     X(CHOWN, chown(req->path, req->uid, req->gid));
1711     X(CLOSE, uv__fs_close(req->file));
1712     X(COPYFILE, uv__fs_copyfile(req));
1713     X(FCHMOD, fchmod(req->file, req->mode));
1714     X(FCHOWN, fchown(req->file, req->uid, req->gid));
1715     X(LCHOWN, lchown(req->path, req->uid, req->gid));
1716     X(FDATASYNC, uv__fs_fdatasync(req));
1717     X(FSTAT, uv__fs_fstat(req->file, &req->statbuf));
1718     X(FSYNC, uv__fs_fsync(req));
1719     X(FTRUNCATE, ftruncate(req->file, req->off));
1720     X(FUTIME, uv__fs_futime(req));
1721     X(LUTIME, uv__fs_lutime(req));
1722     X(LSTAT, uv__fs_lstat(req->path, &req->statbuf));
1723     X(LINK, link(req->path, req->new_path));
1724     X(MKDIR, mkdir(req->path, req->mode));
1725     X(MKDTEMP, uv__fs_mkdtemp(req));
1726     X(MKSTEMP, uv__fs_mkstemp(req));
1727     X(OPEN, uv__fs_open(req));
1728     X(READ, uv__fs_read(req));
1729     X(SCANDIR, uv__fs_scandir(req));
1730     X(OPENDIR, uv__fs_opendir(req));
1731     X(READDIR, uv__fs_readdir(req));
1732     X(CLOSEDIR, uv__fs_closedir(req));
1733     X(READLINK, uv__fs_readlink(req));
1734     X(REALPATH, uv__fs_realpath(req));
1735     X(RENAME, rename(req->path, req->new_path));
1736     X(RMDIR, rmdir(req->path));
1737     X(SENDFILE, uv__fs_sendfile(req));
1738     X(STAT, uv__fs_stat(req->path, &req->statbuf));
1739     X(STATFS, uv__fs_statfs(req));
1740     X(SYMLINK, symlink(req->path, req->new_path));
1741     X(UNLINK, unlink(req->path));
1742     X(UTIME, uv__fs_utime(req));
1743     X(WRITE, uv__fs_write_all(req));
1744     default: abort();
1745     }
1746 #undef X
1747   } while (r == -1 && errno == EINTR && retry_on_eintr);
1748 
1749   if (r == -1)
1750     req->result = UV__ERR(errno);
1751   else
1752     req->result = r;
1753 
1754   if (r == 0 && (req->fs_type == UV_FS_STAT ||
1755                  req->fs_type == UV_FS_FSTAT ||
1756                  req->fs_type == UV_FS_LSTAT)) {
1757     req->ptr = &req->statbuf;
1758   }
1759 }
1760 
1761 
uv__fs_done(struct uv__work * w,int status)1762 static void uv__fs_done(struct uv__work* w, int status) {
1763   uv_fs_t* req;
1764 
1765   req = container_of(w, uv_fs_t, work_req);
1766   uv__req_unregister(req->loop);
1767 
1768   if (status == UV_ECANCELED) {
1769     assert(req->result == 0);
1770     req->result = UV_ECANCELED;
1771   }
1772 
1773   req->cb(req);
1774 }
1775 
1776 
uv__fs_post(uv_loop_t * loop,uv_fs_t * req)1777 void uv__fs_post(uv_loop_t* loop, uv_fs_t* req) {
1778   uv__req_register(loop);
1779   uv__work_submit(loop,
1780                   &req->work_req,
1781                   UV__WORK_FAST_IO,
1782                   uv__fs_work,
1783                   uv__fs_done);
1784 }
1785 
1786 
uv_fs_access(uv_loop_t * loop,uv_fs_t * req,const char * path,int flags,uv_fs_cb cb)1787 int uv_fs_access(uv_loop_t* loop,
1788                  uv_fs_t* req,
1789                  const char* path,
1790                  int flags,
1791                  uv_fs_cb cb) {
1792   INIT(ACCESS);
1793   PATH;
1794   req->flags = flags;
1795   POST;
1796 }
1797 
1798 
uv_fs_chmod(uv_loop_t * loop,uv_fs_t * req,const char * path,int mode,uv_fs_cb cb)1799 int uv_fs_chmod(uv_loop_t* loop,
1800                 uv_fs_t* req,
1801                 const char* path,
1802                 int mode,
1803                 uv_fs_cb cb) {
1804   INIT(CHMOD);
1805   PATH;
1806   req->mode = mode;
1807   POST;
1808 }
1809 
1810 
uv_fs_chown(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_uid_t uid,uv_gid_t gid,uv_fs_cb cb)1811 int uv_fs_chown(uv_loop_t* loop,
1812                 uv_fs_t* req,
1813                 const char* path,
1814                 uv_uid_t uid,
1815                 uv_gid_t gid,
1816                 uv_fs_cb cb) {
1817   INIT(CHOWN);
1818   PATH;
1819   req->uid = uid;
1820   req->gid = gid;
1821   POST;
1822 }
1823 
1824 
uv_fs_close(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_fs_cb cb)1825 int uv_fs_close(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1826   INIT(CLOSE);
1827   req->file = file;
1828   if (cb != NULL)
1829     if (uv__iou_fs_close(loop, req))
1830       return 0;
1831   POST;
1832 }
1833 
1834 
uv_fs_fchmod(uv_loop_t * loop,uv_fs_t * req,uv_file file,int mode,uv_fs_cb cb)1835 int uv_fs_fchmod(uv_loop_t* loop,
1836                  uv_fs_t* req,
1837                  uv_file file,
1838                  int mode,
1839                  uv_fs_cb cb) {
1840   INIT(FCHMOD);
1841   req->file = file;
1842   req->mode = mode;
1843   POST;
1844 }
1845 
1846 
uv_fs_fchown(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_uid_t uid,uv_gid_t gid,uv_fs_cb cb)1847 int uv_fs_fchown(uv_loop_t* loop,
1848                  uv_fs_t* req,
1849                  uv_file file,
1850                  uv_uid_t uid,
1851                  uv_gid_t gid,
1852                  uv_fs_cb cb) {
1853   INIT(FCHOWN);
1854   req->file = file;
1855   req->uid = uid;
1856   req->gid = gid;
1857   POST;
1858 }
1859 
1860 
uv_fs_lchown(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_uid_t uid,uv_gid_t gid,uv_fs_cb cb)1861 int uv_fs_lchown(uv_loop_t* loop,
1862                  uv_fs_t* req,
1863                  const char* path,
1864                  uv_uid_t uid,
1865                  uv_gid_t gid,
1866                  uv_fs_cb cb) {
1867   INIT(LCHOWN);
1868   PATH;
1869   req->uid = uid;
1870   req->gid = gid;
1871   POST;
1872 }
1873 
1874 
uv_fs_fdatasync(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_fs_cb cb)1875 int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1876   INIT(FDATASYNC);
1877   req->file = file;
1878   if (cb != NULL)
1879     if (uv__iou_fs_fsync_or_fdatasync(loop, req, /* IORING_FSYNC_DATASYNC */ 1))
1880       return 0;
1881   POST;
1882 }
1883 
1884 
uv_fs_fstat(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_fs_cb cb)1885 int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1886   INIT(FSTAT);
1887   req->file = file;
1888   if (cb != NULL)
1889     if (uv__iou_fs_statx(loop, req, /* is_fstat */ 1, /* is_lstat */ 0))
1890       return 0;
1891   POST;
1892 }
1893 
1894 
uv_fs_fsync(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_fs_cb cb)1895 int uv_fs_fsync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1896   INIT(FSYNC);
1897   req->file = file;
1898   if (cb != NULL)
1899     if (uv__iou_fs_fsync_or_fdatasync(loop, req, /* no flags */ 0))
1900       return 0;
1901   POST;
1902 }
1903 
1904 
uv_fs_ftruncate(uv_loop_t * loop,uv_fs_t * req,uv_file file,int64_t off,uv_fs_cb cb)1905 int uv_fs_ftruncate(uv_loop_t* loop,
1906                     uv_fs_t* req,
1907                     uv_file file,
1908                     int64_t off,
1909                     uv_fs_cb cb) {
1910   INIT(FTRUNCATE);
1911   req->file = file;
1912   req->off = off;
1913   POST;
1914 }
1915 
1916 
uv_fs_futime(uv_loop_t * loop,uv_fs_t * req,uv_file file,double atime,double mtime,uv_fs_cb cb)1917 int uv_fs_futime(uv_loop_t* loop,
1918                  uv_fs_t* req,
1919                  uv_file file,
1920                  double atime,
1921                  double mtime,
1922                  uv_fs_cb cb) {
1923   INIT(FUTIME);
1924   req->file = file;
1925   req->atime = atime;
1926   req->mtime = mtime;
1927   POST;
1928 }
1929 
uv_fs_lutime(uv_loop_t * loop,uv_fs_t * req,const char * path,double atime,double mtime,uv_fs_cb cb)1930 int uv_fs_lutime(uv_loop_t* loop,
1931                  uv_fs_t* req,
1932                  const char* path,
1933                  double atime,
1934                  double mtime,
1935                  uv_fs_cb cb) {
1936   INIT(LUTIME);
1937   PATH;
1938   req->atime = atime;
1939   req->mtime = mtime;
1940   POST;
1941 }
1942 
1943 
uv_fs_lstat(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)1944 int uv_fs_lstat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
1945   INIT(LSTAT);
1946   PATH;
1947   if (cb != NULL)
1948     if (uv__iou_fs_statx(loop, req, /* is_fstat */ 0, /* is_lstat */ 1))
1949       return 0;
1950   POST;
1951 }
1952 
1953 
uv_fs_link(uv_loop_t * loop,uv_fs_t * req,const char * path,const char * new_path,uv_fs_cb cb)1954 int uv_fs_link(uv_loop_t* loop,
1955                uv_fs_t* req,
1956                const char* path,
1957                const char* new_path,
1958                uv_fs_cb cb) {
1959   INIT(LINK);
1960   PATH2;
1961   if (cb != NULL)
1962     if (uv__iou_fs_link(loop, req))
1963       return 0;
1964   POST;
1965 }
1966 
1967 
uv_fs_mkdir(uv_loop_t * loop,uv_fs_t * req,const char * path,int mode,uv_fs_cb cb)1968 int uv_fs_mkdir(uv_loop_t* loop,
1969                 uv_fs_t* req,
1970                 const char* path,
1971                 int mode,
1972                 uv_fs_cb cb) {
1973   INIT(MKDIR);
1974   PATH;
1975   req->mode = mode;
1976   if (cb != NULL)
1977     if (uv__iou_fs_mkdir(loop, req))
1978       return 0;
1979   POST;
1980 }
1981 
1982 
uv_fs_mkdtemp(uv_loop_t * loop,uv_fs_t * req,const char * tpl,uv_fs_cb cb)1983 int uv_fs_mkdtemp(uv_loop_t* loop,
1984                   uv_fs_t* req,
1985                   const char* tpl,
1986                   uv_fs_cb cb) {
1987   INIT(MKDTEMP);
1988   req->path = uv__strdup(tpl);
1989   if (req->path == NULL)
1990     return UV_ENOMEM;
1991   POST;
1992 }
1993 
1994 
uv_fs_mkstemp(uv_loop_t * loop,uv_fs_t * req,const char * tpl,uv_fs_cb cb)1995 int uv_fs_mkstemp(uv_loop_t* loop,
1996                   uv_fs_t* req,
1997                   const char* tpl,
1998                   uv_fs_cb cb) {
1999   INIT(MKSTEMP);
2000   req->path = uv__strdup(tpl);
2001   if (req->path == NULL)
2002     return UV_ENOMEM;
2003   POST;
2004 }
2005 
2006 
uv_fs_open(uv_loop_t * loop,uv_fs_t * req,const char * path,int flags,int mode,uv_fs_cb cb)2007 int uv_fs_open(uv_loop_t* loop,
2008                uv_fs_t* req,
2009                const char* path,
2010                int flags,
2011                int mode,
2012                uv_fs_cb cb) {
2013   INIT(OPEN);
2014   PATH;
2015   req->flags = flags;
2016   req->mode = mode;
2017   if (cb != NULL)
2018     if (uv__iou_fs_open(loop, req))
2019       return 0;
2020   POST;
2021 }
2022 
2023 
uv_fs_read(uv_loop_t * loop,uv_fs_t * req,uv_file file,const uv_buf_t bufs[],unsigned int nbufs,int64_t off,uv_fs_cb cb)2024 int uv_fs_read(uv_loop_t* loop, uv_fs_t* req,
2025                uv_file file,
2026                const uv_buf_t bufs[],
2027                unsigned int nbufs,
2028                int64_t off,
2029                uv_fs_cb cb) {
2030   INIT(READ);
2031 
2032   if (bufs == NULL || nbufs == 0)
2033     return UV_EINVAL;
2034 
2035   req->off = off;
2036   req->file = file;
2037   req->bufs = (uv_buf_t*) bufs;  /* Safe, doesn't mutate |bufs| */
2038   req->nbufs = nbufs;
2039 
2040   if (cb == NULL)
2041     goto post;
2042 
2043   req->bufs = req->bufsml;
2044   if (nbufs > ARRAY_SIZE(req->bufsml))
2045     req->bufs = uv__malloc(nbufs * sizeof(*bufs));
2046 
2047   if (req->bufs == NULL)
2048     return UV_ENOMEM;
2049 
2050   memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
2051 
2052   if (uv__iou_fs_read_or_write(loop, req, /* is_read */ 1))
2053     return 0;
2054 
2055 post:
2056   POST;
2057 }
2058 
2059 
uv_fs_scandir(uv_loop_t * loop,uv_fs_t * req,const char * path,int flags,uv_fs_cb cb)2060 int uv_fs_scandir(uv_loop_t* loop,
2061                   uv_fs_t* req,
2062                   const char* path,
2063                   int flags,
2064                   uv_fs_cb cb) {
2065   INIT(SCANDIR);
2066   PATH;
2067   req->flags = flags;
2068   POST;
2069 }
2070 
uv_fs_opendir(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2071 int uv_fs_opendir(uv_loop_t* loop,
2072                   uv_fs_t* req,
2073                   const char* path,
2074                   uv_fs_cb cb) {
2075   INIT(OPENDIR);
2076   PATH;
2077   POST;
2078 }
2079 
uv_fs_readdir(uv_loop_t * loop,uv_fs_t * req,uv_dir_t * dir,uv_fs_cb cb)2080 int uv_fs_readdir(uv_loop_t* loop,
2081                   uv_fs_t* req,
2082                   uv_dir_t* dir,
2083                   uv_fs_cb cb) {
2084   INIT(READDIR);
2085 
2086   if (dir == NULL || dir->dir == NULL || dir->dirents == NULL)
2087     return UV_EINVAL;
2088 
2089   req->ptr = dir;
2090   POST;
2091 }
2092 
uv_fs_closedir(uv_loop_t * loop,uv_fs_t * req,uv_dir_t * dir,uv_fs_cb cb)2093 int uv_fs_closedir(uv_loop_t* loop,
2094                    uv_fs_t* req,
2095                    uv_dir_t* dir,
2096                    uv_fs_cb cb) {
2097   INIT(CLOSEDIR);
2098 
2099   if (dir == NULL)
2100     return UV_EINVAL;
2101 
2102   req->ptr = dir;
2103   POST;
2104 }
2105 
uv_fs_readlink(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2106 int uv_fs_readlink(uv_loop_t* loop,
2107                    uv_fs_t* req,
2108                    const char* path,
2109                    uv_fs_cb cb) {
2110   INIT(READLINK);
2111   PATH;
2112   POST;
2113 }
2114 
2115 
uv_fs_realpath(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2116 int uv_fs_realpath(uv_loop_t* loop,
2117                   uv_fs_t* req,
2118                   const char * path,
2119                   uv_fs_cb cb) {
2120   INIT(REALPATH);
2121   PATH;
2122   POST;
2123 }
2124 
2125 
uv_fs_rename(uv_loop_t * loop,uv_fs_t * req,const char * path,const char * new_path,uv_fs_cb cb)2126 int uv_fs_rename(uv_loop_t* loop,
2127                  uv_fs_t* req,
2128                  const char* path,
2129                  const char* new_path,
2130                  uv_fs_cb cb) {
2131   INIT(RENAME);
2132   PATH2;
2133   if (cb != NULL)
2134     if (uv__iou_fs_rename(loop, req))
2135       return 0;
2136   POST;
2137 }
2138 
2139 
uv_fs_rmdir(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2140 int uv_fs_rmdir(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
2141   INIT(RMDIR);
2142   PATH;
2143   POST;
2144 }
2145 
2146 
uv_fs_sendfile(uv_loop_t * loop,uv_fs_t * req,uv_file out_fd,uv_file in_fd,int64_t off,size_t len,uv_fs_cb cb)2147 int uv_fs_sendfile(uv_loop_t* loop,
2148                    uv_fs_t* req,
2149                    uv_file out_fd,
2150                    uv_file in_fd,
2151                    int64_t off,
2152                    size_t len,
2153                    uv_fs_cb cb) {
2154   INIT(SENDFILE);
2155   req->flags = in_fd; /* hack */
2156   req->file = out_fd;
2157   req->off = off;
2158   req->bufsml[0].len = len;
2159   POST;
2160 }
2161 
2162 
uv_fs_stat(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2163 int uv_fs_stat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
2164   INIT(STAT);
2165   PATH;
2166   if (cb != NULL)
2167     if (uv__iou_fs_statx(loop, req, /* is_fstat */ 0, /* is_lstat */ 0))
2168       return 0;
2169   POST;
2170 }
2171 
2172 
uv_fs_symlink(uv_loop_t * loop,uv_fs_t * req,const char * path,const char * new_path,int flags,uv_fs_cb cb)2173 int uv_fs_symlink(uv_loop_t* loop,
2174                   uv_fs_t* req,
2175                   const char* path,
2176                   const char* new_path,
2177                   int flags,
2178                   uv_fs_cb cb) {
2179   INIT(SYMLINK);
2180   PATH2;
2181   req->flags = flags;
2182   if (cb != NULL)
2183     if (uv__iou_fs_symlink(loop, req))
2184       return 0;
2185   POST;
2186 }
2187 
2188 
uv_fs_unlink(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2189 int uv_fs_unlink(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
2190   INIT(UNLINK);
2191   PATH;
2192   if (cb != NULL)
2193     if (uv__iou_fs_unlink(loop, req))
2194       return 0;
2195   POST;
2196 }
2197 
2198 
uv_fs_utime(uv_loop_t * loop,uv_fs_t * req,const char * path,double atime,double mtime,uv_fs_cb cb)2199 int uv_fs_utime(uv_loop_t* loop,
2200                 uv_fs_t* req,
2201                 const char* path,
2202                 double atime,
2203                 double mtime,
2204                 uv_fs_cb cb) {
2205   INIT(UTIME);
2206   PATH;
2207   req->atime = atime;
2208   req->mtime = mtime;
2209   POST;
2210 }
2211 
2212 
uv_fs_write(uv_loop_t * loop,uv_fs_t * req,uv_file file,const uv_buf_t bufs[],unsigned int nbufs,int64_t off,uv_fs_cb cb)2213 int uv_fs_write(uv_loop_t* loop,
2214                 uv_fs_t* req,
2215                 uv_file file,
2216                 const uv_buf_t bufs[],
2217                 unsigned int nbufs,
2218                 int64_t off,
2219                 uv_fs_cb cb) {
2220   INIT(WRITE);
2221 
2222   if (bufs == NULL || nbufs == 0)
2223     return UV_EINVAL;
2224 
2225   req->file = file;
2226 
2227   req->nbufs = nbufs;
2228   req->bufs = req->bufsml;
2229   if (nbufs > ARRAY_SIZE(req->bufsml))
2230     req->bufs = uv__malloc(nbufs * sizeof(*bufs));
2231 
2232   if (req->bufs == NULL)
2233     return UV_ENOMEM;
2234 
2235   memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
2236 
2237   req->off = off;
2238 
2239   if (cb != NULL)
2240     if (uv__iou_fs_read_or_write(loop, req, /* is_read */ 0))
2241       return 0;
2242 
2243   POST;
2244 }
2245 
2246 
uv_fs_req_cleanup(uv_fs_t * req)2247 void uv_fs_req_cleanup(uv_fs_t* req) {
2248   if (req == NULL)
2249     return;
2250 
2251   /* Only necessary for asynchronous requests, i.e., requests with a callback.
2252    * Synchronous ones don't copy their arguments and have req->path and
2253    * req->new_path pointing to user-owned memory.  UV_FS_MKDTEMP and
2254    * UV_FS_MKSTEMP are the exception to the rule, they always allocate memory.
2255    */
2256   if (req->path != NULL &&
2257       (req->cb != NULL ||
2258         req->fs_type == UV_FS_MKDTEMP || req->fs_type == UV_FS_MKSTEMP))
2259     uv__free((void*) req->path);  /* Memory is shared with req->new_path. */
2260 
2261   req->path = NULL;
2262   req->new_path = NULL;
2263 
2264   if (req->fs_type == UV_FS_READDIR && req->ptr != NULL)
2265     uv__fs_readdir_cleanup(req);
2266 
2267   if (req->fs_type == UV_FS_SCANDIR && req->ptr != NULL)
2268     uv__fs_scandir_cleanup(req);
2269 
2270   if (req->bufs != req->bufsml)
2271     uv__free(req->bufs);
2272   req->bufs = NULL;
2273 
2274   if (req->fs_type != UV_FS_OPENDIR && req->ptr != &req->statbuf)
2275     uv__free(req->ptr);
2276   req->ptr = NULL;
2277 }
2278 
2279 
uv_fs_copyfile(uv_loop_t * loop,uv_fs_t * req,const char * path,const char * new_path,int flags,uv_fs_cb cb)2280 int uv_fs_copyfile(uv_loop_t* loop,
2281                    uv_fs_t* req,
2282                    const char* path,
2283                    const char* new_path,
2284                    int flags,
2285                    uv_fs_cb cb) {
2286   INIT(COPYFILE);
2287 
2288   if (flags & ~(UV_FS_COPYFILE_EXCL |
2289                 UV_FS_COPYFILE_FICLONE |
2290                 UV_FS_COPYFILE_FICLONE_FORCE)) {
2291     return UV_EINVAL;
2292   }
2293 
2294   PATH2;
2295   req->flags = flags;
2296   POST;
2297 }
2298 
2299 
uv_fs_statfs(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2300 int uv_fs_statfs(uv_loop_t* loop,
2301                  uv_fs_t* req,
2302                  const char* path,
2303                  uv_fs_cb cb) {
2304   INIT(STATFS);
2305   PATH;
2306   POST;
2307 }
2308 
uv_fs_get_system_error(const uv_fs_t * req)2309 int uv_fs_get_system_error(const uv_fs_t* req) {
2310   return -req->result;
2311 }
2312