xref: /libuv/src/unix/fs.c (revision 8d957c56)
1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2  *
3  * Permission is hereby granted, free of charge, to any person obtaining a copy
4  * of this software and associated documentation files (the "Software"), to
5  * deal in the Software without restriction, including without limitation the
6  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7  * sell copies of the Software, and to permit persons to whom the Software is
8  * furnished to do so, subject to the following conditions:
9  *
10  * The above copyright notice and this permission notice shall be included in
11  * all copies or substantial portions of the Software.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19  * IN THE SOFTWARE.
20  */
21 
22 /* Caveat emptor: this file deviates from the libuv convention of returning
23  * negated errno codes. Most uv_fs_*() functions map directly to the system
24  * call of the same name. For more complex wrappers, it's easier to just
25  * return -1 with errno set. The dispatcher in uv__fs_work() takes care of
26  * getting the errno to the right place (req->result or as the return value.)
27  */
28 
29 #include "uv.h"
30 #include "internal.h"
31 
32 #include <errno.h>
33 #include <dlfcn.h>
34 #include <stdatomic.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <string.h>
38 #include <limits.h> /* PATH_MAX */
39 
40 #include <sys/types.h>
41 #include <sys/socket.h>
42 #include <sys/stat.h>
43 #include <sys/time.h>
44 #include <sys/uio.h>
45 #include <unistd.h>
46 #include <fcntl.h>
47 #include <poll.h>
48 
49 #if defined(__linux__)
50 # include <sys/sendfile.h>
51 #endif
52 
53 #if defined(__sun)
54 # include <sys/sendfile.h>
55 # include <sys/sysmacros.h>
56 #endif
57 
58 #if defined(__APPLE__)
59 # include <sys/sysctl.h>
60 #elif defined(__linux__) && !defined(FICLONE)
61 # include <sys/ioctl.h>
62 # define FICLONE _IOW(0x94, 9, int)
63 #endif
64 
65 #if defined(_AIX) && !defined(_AIX71)
66 # include <utime.h>
67 #endif
68 
69 #if defined(__APPLE__)            ||                                      \
70     defined(__DragonFly__)        ||                                      \
71     defined(__FreeBSD__)          ||                                      \
72     defined(__OpenBSD__)          ||                                      \
73     defined(__NetBSD__)
74 # include <sys/param.h>
75 # include <sys/mount.h>
76 #elif defined(__sun)      || \
77       defined(__MVS__)    || \
78       defined(__NetBSD__) || \
79       defined(__HAIKU__)  || \
80       defined(__QNX__)
81 # include <sys/statvfs.h>
82 #else
83 # include <sys/statfs.h>
84 #endif
85 
86 #if defined(_AIX) && _XOPEN_SOURCE <= 600
87 extern char *mkdtemp(char *template); /* See issue #740 on AIX < 7 */
88 #endif
89 
90 #define INIT(subtype)                                                         \
91   do {                                                                        \
92     if (req == NULL)                                                          \
93       return UV_EINVAL;                                                       \
94     UV_REQ_INIT(req, UV_FS);                                                  \
95     req->fs_type = UV_FS_ ## subtype;                                         \
96     req->result = 0;                                                          \
97     req->ptr = NULL;                                                          \
98     req->loop = loop;                                                         \
99     req->path = NULL;                                                         \
100     req->new_path = NULL;                                                     \
101     req->bufs = NULL;                                                         \
102     req->cb = cb;                                                             \
103   }                                                                           \
104   while (0)
105 
106 #define PATH                                                                  \
107   do {                                                                        \
108     assert(path != NULL);                                                     \
109     if (cb == NULL) {                                                         \
110       req->path = path;                                                       \
111     } else {                                                                  \
112       req->path = uv__strdup(path);                                           \
113       if (req->path == NULL)                                                  \
114         return UV_ENOMEM;                                                     \
115     }                                                                         \
116   }                                                                           \
117   while (0)
118 
119 #define PATH2                                                                 \
120   do {                                                                        \
121     if (cb == NULL) {                                                         \
122       req->path = path;                                                       \
123       req->new_path = new_path;                                               \
124     } else {                                                                  \
125       size_t path_len;                                                        \
126       size_t new_path_len;                                                    \
127       path_len = strlen(path) + 1;                                            \
128       new_path_len = strlen(new_path) + 1;                                    \
129       req->path = uv__malloc(path_len + new_path_len);                        \
130       if (req->path == NULL)                                                  \
131         return UV_ENOMEM;                                                     \
132       req->new_path = req->path + path_len;                                   \
133       memcpy((void*) req->path, path, path_len);                              \
134       memcpy((void*) req->new_path, new_path, new_path_len);                  \
135     }                                                                         \
136   }                                                                           \
137   while (0)
138 
139 #define POST                                                                  \
140   do {                                                                        \
141     if (cb != NULL) {                                                         \
142       uv__req_register(loop);                                                 \
143       uv__work_submit(loop,                                                   \
144                       &req->work_req,                                         \
145                       UV__WORK_FAST_IO,                                       \
146                       uv__fs_work,                                            \
147                       uv__fs_done);                                           \
148       return 0;                                                               \
149     }                                                                         \
150     else {                                                                    \
151       uv__fs_work(&req->work_req);                                            \
152       return req->result;                                                     \
153     }                                                                         \
154   }                                                                           \
155   while (0)
156 
157 
uv__fs_close(int fd)158 static int uv__fs_close(int fd) {
159   int rc;
160 
161   rc = uv__close_nocancel(fd);
162   if (rc == -1)
163     if (errno == EINTR || errno == EINPROGRESS)
164       rc = 0;  /* The close is in progress, not an error. */
165 
166   return rc;
167 }
168 
169 
uv__fs_fsync(uv_fs_t * req)170 static ssize_t uv__fs_fsync(uv_fs_t* req) {
171 #if defined(__APPLE__)
172   /* Apple's fdatasync and fsync explicitly do NOT flush the drive write cache
173    * to the drive platters. This is in contrast to Linux's fdatasync and fsync
174    * which do, according to recent man pages. F_FULLFSYNC is Apple's equivalent
175    * for flushing buffered data to permanent storage. If F_FULLFSYNC is not
176    * supported by the file system we fall back to F_BARRIERFSYNC or fsync().
177    * This is the same approach taken by sqlite, except sqlite does not issue
178    * an F_BARRIERFSYNC call.
179    */
180   int r;
181 
182   r = fcntl(req->file, F_FULLFSYNC);
183   if (r != 0)
184     r = fcntl(req->file, 85 /* F_BARRIERFSYNC */);  /* fsync + barrier */
185   if (r != 0)
186     r = fsync(req->file);
187   return r;
188 #else
189   return fsync(req->file);
190 #endif
191 }
192 
193 
uv__fs_fdatasync(uv_fs_t * req)194 static ssize_t uv__fs_fdatasync(uv_fs_t* req) {
195 #if defined(__linux__) || defined(__sun) || defined(__NetBSD__)
196   return fdatasync(req->file);
197 #elif defined(__APPLE__)
198   /* See the comment in uv__fs_fsync. */
199   return uv__fs_fsync(req);
200 #else
201   return fsync(req->file);
202 #endif
203 }
204 
205 
UV_UNUSED(static struct timespec uv__fs_to_timespec (double time))206 UV_UNUSED(static struct timespec uv__fs_to_timespec(double time)) {
207   struct timespec ts;
208   ts.tv_sec  = time;
209   ts.tv_nsec = (time - ts.tv_sec) * 1e9;
210 
211  /* TODO(bnoordhuis) Remove this. utimesat() has nanosecond resolution but we
212   * stick to microsecond resolution for the sake of consistency with other
213   * platforms. I'm the original author of this compatibility hack but I'm
214   * less convinced it's useful nowadays.
215   */
216   ts.tv_nsec -= ts.tv_nsec % 1000;
217 
218   if (ts.tv_nsec < 0) {
219     ts.tv_nsec += 1e9;
220     ts.tv_sec -= 1;
221   }
222   return ts;
223 }
224 
UV_UNUSED(static struct timeval uv__fs_to_timeval (double time))225 UV_UNUSED(static struct timeval uv__fs_to_timeval(double time)) {
226   struct timeval tv;
227   tv.tv_sec  = time;
228   tv.tv_usec = (time - tv.tv_sec) * 1e6;
229   if (tv.tv_usec < 0) {
230     tv.tv_usec += 1e6;
231     tv.tv_sec -= 1;
232   }
233   return tv;
234 }
235 
uv__fs_futime(uv_fs_t * req)236 static ssize_t uv__fs_futime(uv_fs_t* req) {
237 #if defined(__linux__)                                                        \
238     || defined(_AIX71)                                                        \
239     || defined(__HAIKU__)                                                     \
240     || defined(__GNU__)
241   struct timespec ts[2];
242   ts[0] = uv__fs_to_timespec(req->atime);
243   ts[1] = uv__fs_to_timespec(req->mtime);
244   return futimens(req->file, ts);
245 #elif defined(__APPLE__)                                                      \
246     || defined(__DragonFly__)                                                 \
247     || defined(__FreeBSD__)                                                   \
248     || defined(__NetBSD__)                                                    \
249     || defined(__OpenBSD__)                                                   \
250     || defined(__sun)
251   struct timeval tv[2];
252   tv[0] = uv__fs_to_timeval(req->atime);
253   tv[1] = uv__fs_to_timeval(req->mtime);
254 # if defined(__sun)
255   return futimesat(req->file, NULL, tv);
256 # else
257   return futimes(req->file, tv);
258 # endif
259 #elif defined(__MVS__)
260   attrib_t atr;
261   memset(&atr, 0, sizeof(atr));
262   atr.att_mtimechg = 1;
263   atr.att_atimechg = 1;
264   atr.att_mtime = req->mtime;
265   atr.att_atime = req->atime;
266   return __fchattr(req->file, &atr, sizeof(atr));
267 #else
268   errno = ENOSYS;
269   return -1;
270 #endif
271 }
272 
273 
uv__fs_mkdtemp(uv_fs_t * req)274 static ssize_t uv__fs_mkdtemp(uv_fs_t* req) {
275   return mkdtemp((char*) req->path) ? 0 : -1;
276 }
277 
278 
279 static int (*uv__mkostemp)(char*, int);
280 
281 
uv__mkostemp_initonce(void)282 static void uv__mkostemp_initonce(void) {
283   /* z/os doesn't have RTLD_DEFAULT but that's okay
284    * because it doesn't have mkostemp(O_CLOEXEC) either.
285    */
286 #ifdef RTLD_DEFAULT
287   uv__mkostemp = (int (*)(char*, int)) dlsym(RTLD_DEFAULT, "mkostemp");
288 
289   /* We don't care about errors, but we do want to clean them up.
290    * If there has been no error, then dlerror() will just return
291    * NULL.
292    */
293   dlerror();
294 #endif  /* RTLD_DEFAULT */
295 }
296 
297 
uv__fs_mkstemp(uv_fs_t * req)298 static int uv__fs_mkstemp(uv_fs_t* req) {
299   static uv_once_t once = UV_ONCE_INIT;
300   int r;
301 #ifdef O_CLOEXEC
302   static _Atomic int no_cloexec_support;
303 #endif
304   static const char pattern[] = "XXXXXX";
305   static const size_t pattern_size = sizeof(pattern) - 1;
306   char* path;
307   size_t path_length;
308 
309   path = (char*) req->path;
310   path_length = strlen(path);
311 
312   /* EINVAL can be returned for 2 reasons:
313       1. The template's last 6 characters were not XXXXXX
314       2. open() didn't support O_CLOEXEC
315      We want to avoid going to the fallback path in case
316      of 1, so it's manually checked before. */
317   if (path_length < pattern_size ||
318       strcmp(path + path_length - pattern_size, pattern)) {
319     errno = EINVAL;
320     r = -1;
321     goto clobber;
322   }
323 
324   uv_once(&once, uv__mkostemp_initonce);
325 
326 #ifdef O_CLOEXEC
327   if (atomic_load_explicit(&no_cloexec_support, memory_order_relaxed) == 0 &&
328       uv__mkostemp != NULL) {
329     r = uv__mkostemp(path, O_CLOEXEC);
330 
331     if (r >= 0)
332       return r;
333 
334     /* If mkostemp() returns EINVAL, it means the kernel doesn't
335        support O_CLOEXEC, so we just fallback to mkstemp() below. */
336     if (errno != EINVAL)
337       goto clobber;
338 
339     /* We set the static variable so that next calls don't even
340        try to use mkostemp. */
341     atomic_store_explicit(&no_cloexec_support, 1, memory_order_relaxed);
342   }
343 #endif  /* O_CLOEXEC */
344 
345   if (req->cb != NULL)
346     uv_rwlock_rdlock(&req->loop->cloexec_lock);
347 
348   r = mkstemp(path);
349 
350   /* In case of failure `uv__cloexec` will leave error in `errno`,
351    * so it is enough to just set `r` to `-1`.
352    */
353   if (r >= 0 && uv__cloexec(r, 1) != 0) {
354     r = uv__close(r);
355     if (r != 0)
356       abort();
357     r = -1;
358   }
359 
360   if (req->cb != NULL)
361     uv_rwlock_rdunlock(&req->loop->cloexec_lock);
362 
363 clobber:
364   if (r < 0)
365     path[0] = '\0';
366   return r;
367 }
368 
369 
uv__fs_open(uv_fs_t * req)370 static ssize_t uv__fs_open(uv_fs_t* req) {
371 #ifdef O_CLOEXEC
372   return open(req->path, req->flags | O_CLOEXEC, req->mode);
373 #else  /* O_CLOEXEC */
374   int r;
375 
376   if (req->cb != NULL)
377     uv_rwlock_rdlock(&req->loop->cloexec_lock);
378 
379   r = open(req->path, req->flags, req->mode);
380 
381   /* In case of failure `uv__cloexec` will leave error in `errno`,
382    * so it is enough to just set `r` to `-1`.
383    */
384   if (r >= 0 && uv__cloexec(r, 1) != 0) {
385     r = uv__close(r);
386     if (r != 0)
387       abort();
388     r = -1;
389   }
390 
391   if (req->cb != NULL)
392     uv_rwlock_rdunlock(&req->loop->cloexec_lock);
393 
394   return r;
395 #endif  /* O_CLOEXEC */
396 }
397 
398 
uv__preadv_or_pwritev_emul(int fd,const struct iovec * bufs,size_t nbufs,off_t off,int is_pread)399 static ssize_t uv__preadv_or_pwritev_emul(int fd,
400                                           const struct iovec* bufs,
401                                           size_t nbufs,
402                                           off_t off,
403                                           int is_pread) {
404   ssize_t total;
405   ssize_t r;
406   size_t i;
407   size_t n;
408   void* p;
409 
410   total = 0;
411   for (i = 0; i < (size_t) nbufs; i++) {
412     p = bufs[i].iov_base;
413     n = bufs[i].iov_len;
414 
415     do
416       if (is_pread)
417         r = pread(fd, p, n, off);
418       else
419         r = pwrite(fd, p, n, off);
420     while (r == -1 && errno == EINTR);
421 
422     if (r == -1) {
423       if (total > 0)
424         return total;
425       return -1;
426     }
427 
428     off += r;
429     total += r;
430 
431     if ((size_t) r < n)
432       return total;
433   }
434 
435   return total;
436 }
437 
438 
439 #ifdef __linux__
440 typedef int uv__iovcnt;
441 #else
442 typedef size_t uv__iovcnt;
443 #endif
444 
445 
uv__preadv_emul(int fd,const struct iovec * bufs,uv__iovcnt nbufs,off_t off)446 static ssize_t uv__preadv_emul(int fd,
447                                const struct iovec* bufs,
448                                uv__iovcnt nbufs,
449                                off_t off) {
450   return uv__preadv_or_pwritev_emul(fd, bufs, nbufs, off, /*is_pread*/1);
451 }
452 
453 
uv__pwritev_emul(int fd,const struct iovec * bufs,uv__iovcnt nbufs,off_t off)454 static ssize_t uv__pwritev_emul(int fd,
455                                 const struct iovec* bufs,
456                                 uv__iovcnt nbufs,
457                                 off_t off) {
458   return uv__preadv_or_pwritev_emul(fd, bufs, nbufs, off, /*is_pread*/0);
459 }
460 
461 
462 /* The function pointer cache is an uintptr_t because _Atomic void*
463  * doesn't work on macos/ios/etc...
464  * Disable optimization on armv7 to work around the bug described in
465  * https://github.com/libuv/libuv/issues/4532
466  */
467 #if defined(__arm__) && (__ARM_ARCH == 7)
468 __attribute__((optimize("O0")))
469 #endif
uv__preadv_or_pwritev(int fd,const struct iovec * bufs,size_t nbufs,off_t off,_Atomic uintptr_t * cache,int is_pread)470 static ssize_t uv__preadv_or_pwritev(int fd,
471                                      const struct iovec* bufs,
472                                      size_t nbufs,
473                                      off_t off,
474                                      _Atomic uintptr_t* cache,
475                                      int is_pread) {
476   ssize_t (*f)(int, const struct iovec*, uv__iovcnt, off_t);
477   void* p;
478 
479   p = (void*) atomic_load_explicit(cache, memory_order_relaxed);
480   if (p == NULL) {
481 #ifdef RTLD_DEFAULT
482     p = dlsym(RTLD_DEFAULT, is_pread ? "preadv" : "pwritev");
483     dlerror();  /* Clear errors. */
484 #endif  /* RTLD_DEFAULT */
485     if (p == NULL)
486       p = is_pread ? uv__preadv_emul : uv__pwritev_emul;
487     atomic_store_explicit(cache, (uintptr_t) p, memory_order_relaxed);
488   }
489 
490   /* Use memcpy instead of `f = p` to work around a compiler bug,
491    * see https://github.com/libuv/libuv/issues/4532
492    */
493   memcpy(&f, &p, sizeof(p));
494   return f(fd, bufs, nbufs, off);
495 }
496 
497 
uv__preadv(int fd,const struct iovec * bufs,size_t nbufs,off_t off)498 static ssize_t uv__preadv(int fd,
499                           const struct iovec* bufs,
500                           size_t nbufs,
501                           off_t off) {
502   static _Atomic uintptr_t cache;
503   return uv__preadv_or_pwritev(fd, bufs, nbufs, off, &cache, /*is_pread*/1);
504 }
505 
506 
uv__pwritev(int fd,const struct iovec * bufs,size_t nbufs,off_t off)507 static ssize_t uv__pwritev(int fd,
508                            const struct iovec* bufs,
509                            size_t nbufs,
510                            off_t off) {
511   static _Atomic uintptr_t cache;
512   return uv__preadv_or_pwritev(fd, bufs, nbufs, off, &cache, /*is_pread*/0);
513 }
514 
515 
uv__fs_read(uv_fs_t * req)516 static ssize_t uv__fs_read(uv_fs_t* req) {
517   const struct iovec* bufs;
518   unsigned int iovmax;
519   size_t nbufs;
520   ssize_t r;
521   off_t off;
522   int fd;
523 
524   fd = req->file;
525   off = req->off;
526   bufs = (const struct iovec*) req->bufs;
527   nbufs = req->nbufs;
528 
529   iovmax = uv__getiovmax();
530   if (nbufs > iovmax)
531     nbufs = iovmax;
532 
533   r = 0;
534   if (off < 0) {
535     if (nbufs == 1)
536       r = read(fd, bufs->iov_base, bufs->iov_len);
537     else if (nbufs > 1)
538       r = readv(fd, bufs, nbufs);
539   } else {
540     if (nbufs == 1)
541       r = pread(fd, bufs->iov_base, bufs->iov_len, off);
542     else if (nbufs > 1)
543       r = uv__preadv(fd, bufs, nbufs, off);
544   }
545 
546 #ifdef __PASE__
547   /* PASE returns EOPNOTSUPP when reading a directory, convert to EISDIR */
548   if (r == -1 && errno == EOPNOTSUPP) {
549     struct stat buf;
550     ssize_t rc;
551     rc = uv__fstat(fd, &buf);
552     if (rc == 0 && S_ISDIR(buf.st_mode)) {
553       errno = EISDIR;
554     }
555   }
556 #endif
557 
558   /* We don't own the buffer list in the synchronous case. */
559   if (req->cb != NULL)
560     if (req->bufs != req->bufsml)
561       uv__free(req->bufs);
562 
563   req->bufs = NULL;
564   req->nbufs = 0;
565 
566   return r;
567 }
568 
569 
uv__fs_scandir_filter(const uv__dirent_t * dent)570 static int uv__fs_scandir_filter(const uv__dirent_t* dent) {
571   return strcmp(dent->d_name, ".") != 0 && strcmp(dent->d_name, "..") != 0;
572 }
573 
574 
uv__fs_scandir_sort(const uv__dirent_t ** a,const uv__dirent_t ** b)575 static int uv__fs_scandir_sort(const uv__dirent_t** a, const uv__dirent_t** b) {
576   return strcmp((*a)->d_name, (*b)->d_name);
577 }
578 
579 
uv__fs_scandir(uv_fs_t * req)580 static ssize_t uv__fs_scandir(uv_fs_t* req) {
581   uv__dirent_t** dents;
582   int n;
583 
584   dents = NULL;
585   n = scandir(req->path, &dents, uv__fs_scandir_filter, uv__fs_scandir_sort);
586 
587   /* NOTE: We will use nbufs as an index field */
588   req->nbufs = 0;
589 
590   if (n == 0) {
591     /* OS X still needs to deallocate some memory.
592      * Memory was allocated using the system allocator, so use free() here.
593      */
594     free(dents);
595     dents = NULL;
596   } else if (n == -1) {
597     return n;
598   }
599 
600   req->ptr = dents;
601 
602   return n;
603 }
604 
uv__fs_opendir(uv_fs_t * req)605 static int uv__fs_opendir(uv_fs_t* req) {
606   uv_dir_t* dir;
607 
608   dir = uv__malloc(sizeof(*dir));
609   if (dir == NULL)
610     goto error;
611 
612   dir->dir = opendir(req->path);
613   if (dir->dir == NULL)
614     goto error;
615 
616   req->ptr = dir;
617   return 0;
618 
619 error:
620   uv__free(dir);
621   req->ptr = NULL;
622   return -1;
623 }
624 
uv__fs_readdir(uv_fs_t * req)625 static int uv__fs_readdir(uv_fs_t* req) {
626   uv_dir_t* dir;
627   uv_dirent_t* dirent;
628   struct dirent* res;
629   unsigned int dirent_idx;
630   unsigned int i;
631 
632   dir = req->ptr;
633   dirent_idx = 0;
634 
635   while (dirent_idx < dir->nentries) {
636     /* readdir() returns NULL on end of directory, as well as on error. errno
637        is used to differentiate between the two conditions. */
638     errno = 0;
639     res = readdir(dir->dir);
640 
641     if (res == NULL) {
642       if (errno != 0)
643         goto error;
644       break;
645     }
646 
647     if (strcmp(res->d_name, ".") == 0 || strcmp(res->d_name, "..") == 0)
648       continue;
649 
650     dirent = &dir->dirents[dirent_idx];
651     dirent->name = uv__strdup(res->d_name);
652 
653     if (dirent->name == NULL)
654       goto error;
655 
656     dirent->type = uv__fs_get_dirent_type(res);
657     ++dirent_idx;
658   }
659 
660   return dirent_idx;
661 
662 error:
663   for (i = 0; i < dirent_idx; ++i) {
664     uv__free((char*) dir->dirents[i].name);
665     dir->dirents[i].name = NULL;
666   }
667 
668   return -1;
669 }
670 
uv__fs_closedir(uv_fs_t * req)671 static int uv__fs_closedir(uv_fs_t* req) {
672   uv_dir_t* dir;
673 
674   dir = req->ptr;
675 
676   if (dir->dir != NULL) {
677     closedir(dir->dir);
678     dir->dir = NULL;
679   }
680 
681   uv__free(req->ptr);
682   req->ptr = NULL;
683   return 0;
684 }
685 
uv__fs_statfs(uv_fs_t * req)686 static int uv__fs_statfs(uv_fs_t* req) {
687   uv_statfs_t* stat_fs;
688 #if defined(__sun)      || \
689     defined(__MVS__)    || \
690     defined(__NetBSD__) || \
691     defined(__HAIKU__)  || \
692     defined(__QNX__)
693   struct statvfs buf;
694 
695   if (0 != statvfs(req->path, &buf))
696 #else
697   struct statfs buf;
698 
699   if (0 != statfs(req->path, &buf))
700 #endif /* defined(__sun) */
701     return -1;
702 
703   stat_fs = uv__malloc(sizeof(*stat_fs));
704   if (stat_fs == NULL) {
705     errno = ENOMEM;
706     return -1;
707   }
708 
709 #if defined(__sun)        || \
710     defined(__MVS__)      || \
711     defined(__OpenBSD__)  || \
712     defined(__NetBSD__)   || \
713     defined(__HAIKU__)    || \
714     defined(__QNX__)
715   stat_fs->f_type = 0;  /* f_type is not supported. */
716 #else
717   stat_fs->f_type = buf.f_type;
718 #endif
719   stat_fs->f_bsize = buf.f_bsize;
720   stat_fs->f_blocks = buf.f_blocks;
721   stat_fs->f_bfree = buf.f_bfree;
722   stat_fs->f_bavail = buf.f_bavail;
723   stat_fs->f_files = buf.f_files;
724   stat_fs->f_ffree = buf.f_ffree;
725   req->ptr = stat_fs;
726   return 0;
727 }
728 
uv__fs_pathmax_size(const char * path)729 static ssize_t uv__fs_pathmax_size(const char* path) {
730   ssize_t pathmax;
731 
732   pathmax = pathconf(path, _PC_PATH_MAX);
733 
734   if (pathmax == -1)
735     pathmax = UV__PATH_MAX;
736 
737   return pathmax;
738 }
739 
uv__fs_readlink(uv_fs_t * req)740 static ssize_t uv__fs_readlink(uv_fs_t* req) {
741   ssize_t maxlen;
742   ssize_t len;
743   char* buf;
744 
745 #if defined(_POSIX_PATH_MAX) || defined(PATH_MAX)
746   maxlen = uv__fs_pathmax_size(req->path);
747 #else
748   /* We may not have a real PATH_MAX.  Read size of link.  */
749   struct stat st;
750   int ret;
751   ret = uv__lstat(req->path, &st);
752   if (ret != 0)
753     return -1;
754   if (!S_ISLNK(st.st_mode)) {
755     errno = EINVAL;
756     return -1;
757   }
758 
759   maxlen = st.st_size;
760 
761   /* According to readlink(2) lstat can report st_size == 0
762      for some symlinks, such as those in /proc or /sys.  */
763   if (maxlen == 0)
764     maxlen = uv__fs_pathmax_size(req->path);
765 #endif
766 
767   buf = uv__malloc(maxlen);
768 
769   if (buf == NULL) {
770     errno = ENOMEM;
771     return -1;
772   }
773 
774 #if defined(__MVS__)
775   len = os390_readlink(req->path, buf, maxlen);
776 #else
777   len = readlink(req->path, buf, maxlen);
778 #endif
779 
780   if (len == -1) {
781     uv__free(buf);
782     return -1;
783   }
784 
785   /* Uncommon case: resize to make room for the trailing nul byte. */
786   if (len == maxlen) {
787     buf = uv__reallocf(buf, len + 1);
788 
789     if (buf == NULL)
790       return -1;
791   }
792 
793   buf[len] = '\0';
794   req->ptr = buf;
795 
796   return 0;
797 }
798 
uv__fs_realpath(uv_fs_t * req)799 static ssize_t uv__fs_realpath(uv_fs_t* req) {
800   char* buf;
801   char* tmp;
802 
803 #if defined(_POSIX_VERSION) && _POSIX_VERSION >= 200809L
804   tmp = realpath(req->path, NULL);
805   if (tmp == NULL)
806     return -1;
807   buf = uv__strdup(tmp);
808   free(tmp); /* _Not_ uv__free. */
809   if (buf == NULL) {
810     errno = ENOMEM;
811     return -1;
812   }
813 #else
814   ssize_t len;
815 
816   (void)tmp;
817 
818   len = uv__fs_pathmax_size(req->path);
819   buf = uv__malloc(len + 1);
820 
821   if (buf == NULL) {
822     errno = ENOMEM;
823     return -1;
824   }
825 
826   if (realpath(req->path, buf) == NULL) {
827     uv__free(buf);
828     return -1;
829   }
830 #endif
831 
832   req->ptr = buf;
833 
834   return 0;
835 }
836 
uv__fs_sendfile_emul(uv_fs_t * req)837 static ssize_t uv__fs_sendfile_emul(uv_fs_t* req) {
838   struct pollfd pfd;
839   int use_pread;
840   off_t offset;
841   ssize_t nsent;
842   ssize_t nread;
843   ssize_t nwritten;
844   size_t buflen;
845   size_t len;
846   ssize_t n;
847   int in_fd;
848   int out_fd;
849   char buf[8192];
850 
851   len = req->bufsml[0].len;
852   in_fd = req->flags;
853   out_fd = req->file;
854   offset = req->off;
855   use_pread = 1;
856 
857   /* Here are the rules regarding errors:
858    *
859    * 1. Read errors are reported only if nsent==0, otherwise we return nsent.
860    *    The user needs to know that some data has already been sent, to stop
861    *    them from sending it twice.
862    *
863    * 2. Write errors are always reported. Write errors are bad because they
864    *    mean data loss: we've read data but now we can't write it out.
865    *
866    * We try to use pread() and fall back to regular read() if the source fd
867    * doesn't support positional reads, for example when it's a pipe fd.
868    *
869    * If we get EAGAIN when writing to the target fd, we poll() on it until
870    * it becomes writable again.
871    *
872    * FIXME: If we get a write error when use_pread==1, it should be safe to
873    *        return the number of sent bytes instead of an error because pread()
874    *        is, in theory, idempotent. However, special files in /dev or /proc
875    *        may support pread() but not necessarily return the same data on
876    *        successive reads.
877    *
878    * FIXME: There is no way now to signal that we managed to send *some* data
879    *        before a write error.
880    */
881   for (nsent = 0; (size_t) nsent < len; ) {
882     buflen = len - nsent;
883 
884     if (buflen > sizeof(buf))
885       buflen = sizeof(buf);
886 
887     do
888       if (use_pread)
889         nread = pread(in_fd, buf, buflen, offset);
890       else
891         nread = read(in_fd, buf, buflen);
892     while (nread == -1 && errno == EINTR);
893 
894     if (nread == 0)
895       goto out;
896 
897     if (nread == -1) {
898       if (use_pread && nsent == 0 && (errno == EIO || errno == ESPIPE)) {
899         use_pread = 0;
900         continue;
901       }
902 
903       if (nsent == 0)
904         nsent = -1;
905 
906       goto out;
907     }
908 
909     for (nwritten = 0; nwritten < nread; ) {
910       do
911         n = write(out_fd, buf + nwritten, nread - nwritten);
912       while (n == -1 && errno == EINTR);
913 
914       if (n != -1) {
915         nwritten += n;
916         continue;
917       }
918 
919       if (errno != EAGAIN && errno != EWOULDBLOCK) {
920         nsent = -1;
921         goto out;
922       }
923 
924       pfd.fd = out_fd;
925       pfd.events = POLLOUT;
926       pfd.revents = 0;
927 
928       do
929         n = poll(&pfd, 1, -1);
930       while (n == -1 && errno == EINTR);
931 
932       if (n == -1 || (pfd.revents & ~POLLOUT) != 0) {
933         errno = EIO;
934         nsent = -1;
935         goto out;
936       }
937     }
938 
939     offset += nread;
940     nsent += nread;
941   }
942 
943 out:
944   if (nsent != -1)
945     req->off = offset;
946 
947   return nsent;
948 }
949 
950 
951 #ifdef __linux__
952 /* Pre-4.20 kernels have a bug where CephFS uses the RADOS copy-from command
953  * in copy_file_range() when it shouldn't. There is no workaround except to
954  * fall back to a regular copy.
955  */
uv__is_buggy_cephfs(int fd)956 static int uv__is_buggy_cephfs(int fd) {
957   struct statfs s;
958 
959   if (-1 == fstatfs(fd, &s))
960     return 0;
961 
962   if (s.f_type != /* CephFS */ 0xC36400)
963     return 0;
964 
965   return uv__kernel_version() < /* 4.20.0 */ 0x041400;
966 }
967 
968 
uv__is_cifs_or_smb(int fd)969 static int uv__is_cifs_or_smb(int fd) {
970   struct statfs s;
971 
972   if (-1 == fstatfs(fd, &s))
973     return 0;
974 
975   switch ((unsigned) s.f_type) {
976   case 0x0000517Bu:  /* SMB */
977   case 0xFE534D42u:  /* SMB2 */
978   case 0xFF534D42u:  /* CIFS */
979     return 1;
980   }
981 
982   return 0;
983 }
984 
985 
uv__fs_try_copy_file_range(int in_fd,off_t * off,int out_fd,size_t len)986 static ssize_t uv__fs_try_copy_file_range(int in_fd, off_t* off,
987                                           int out_fd, size_t len) {
988   static _Atomic int no_copy_file_range_support;
989   ssize_t r;
990 
991   if (atomic_load_explicit(&no_copy_file_range_support, memory_order_relaxed)) {
992     errno = ENOSYS;
993     return -1;
994   }
995 
996   r = uv__fs_copy_file_range(in_fd, off, out_fd, NULL, len, 0);
997 
998   if (r != -1)
999     return r;
1000 
1001   switch (errno) {
1002   case EACCES:
1003     /* Pre-4.20 kernels have a bug where CephFS uses the RADOS
1004      * copy-from command when it shouldn't.
1005      */
1006     if (uv__is_buggy_cephfs(in_fd))
1007       errno = ENOSYS;  /* Use fallback. */
1008     break;
1009   case ENOSYS:
1010     atomic_store_explicit(&no_copy_file_range_support, 1, memory_order_relaxed);
1011     break;
1012   case EPERM:
1013     /* It's been reported that CIFS spuriously fails.
1014      * Consider it a transient error.
1015      */
1016     if (uv__is_cifs_or_smb(out_fd))
1017       errno = ENOSYS;  /* Use fallback. */
1018     break;
1019   case ENOTSUP:
1020   case EXDEV:
1021     /* ENOTSUP - it could work on another file system type.
1022      * EXDEV - it will not work when in_fd and out_fd are not on the same
1023      *         mounted filesystem (pre Linux 5.3)
1024      */
1025     errno = ENOSYS;  /* Use fallback. */
1026     break;
1027   }
1028 
1029   return -1;
1030 }
1031 
1032 #endif  /* __linux__ */
1033 
1034 
uv__fs_sendfile(uv_fs_t * req)1035 static ssize_t uv__fs_sendfile(uv_fs_t* req) {
1036   int in_fd;
1037   int out_fd;
1038 
1039   in_fd = req->flags;
1040   out_fd = req->file;
1041 
1042 #if defined(__linux__) || defined(__sun)
1043   {
1044     off_t off;
1045     ssize_t r;
1046     size_t len;
1047     int try_sendfile;
1048 
1049     off = req->off;
1050     len = req->bufsml[0].len;
1051     try_sendfile = 1;
1052 
1053 #ifdef __linux__
1054     r = uv__fs_try_copy_file_range(in_fd, &off, out_fd, len);
1055     try_sendfile = (r == -1 && errno == ENOSYS);
1056 #endif
1057 
1058     if (try_sendfile)
1059       r = sendfile(out_fd, in_fd, &off, len);
1060 
1061     /* sendfile() on SunOS returns EINVAL if the target fd is not a socket but
1062      * it still writes out data. Fortunately, we can detect it by checking if
1063      * the offset has been updated.
1064      */
1065     if (r != -1 || off > req->off) {
1066       r = off - req->off;
1067       req->off = off;
1068       return r;
1069     }
1070 
1071     if (errno == EINVAL ||
1072         errno == EIO ||
1073         errno == ENOTSOCK ||
1074         errno == EXDEV) {
1075       errno = 0;
1076       return uv__fs_sendfile_emul(req);
1077     }
1078 
1079     return -1;
1080   }
1081 /* sendfile() on iOS(arm64) will throw SIGSYS signal cause crash. */
1082 #elif (defined(__APPLE__) && !TARGET_OS_IPHONE)                               \
1083     || defined(__DragonFly__)                                                 \
1084     || defined(__FreeBSD__)
1085   {
1086     off_t len;
1087     ssize_t r;
1088 
1089     /* sendfile() on FreeBSD and Darwin returns EAGAIN if the target fd is in
1090      * non-blocking mode and not all data could be written. If a non-zero
1091      * number of bytes have been sent, we don't consider it an error.
1092      */
1093 
1094 #if defined(__FreeBSD__) || defined(__DragonFly__)
1095 #if defined(__FreeBSD__)
1096     off_t off;
1097 
1098     off = req->off;
1099     r = uv__fs_copy_file_range(in_fd, &off, out_fd, NULL, req->bufsml[0].len, 0);
1100     if (r >= 0) {
1101         r = off - req->off;
1102         req->off = off;
1103         return r;
1104     }
1105 #endif
1106     len = 0;
1107     r = sendfile(in_fd, out_fd, req->off, req->bufsml[0].len, NULL, &len, 0);
1108 #else
1109     /* The darwin sendfile takes len as an input for the length to send,
1110      * so make sure to initialize it with the caller's value. */
1111     len = req->bufsml[0].len;
1112     r = sendfile(in_fd, out_fd, req->off, &len, NULL, 0);
1113 #endif
1114 
1115      /*
1116      * The man page for sendfile(2) on DragonFly states that `len` contains
1117      * a meaningful value ONLY in case of EAGAIN and EINTR.
1118      * Nothing is said about it's value in case of other errors, so better
1119      * not depend on the potential wrong assumption that is was not modified
1120      * by the syscall.
1121      */
1122     if (r == 0 || ((errno == EAGAIN || errno == EINTR) && len != 0)) {
1123       req->off += len;
1124       return (ssize_t) len;
1125     }
1126 
1127     if (errno == EINVAL ||
1128         errno == EIO ||
1129         errno == ENOTSOCK ||
1130         errno == EXDEV) {
1131       errno = 0;
1132       return uv__fs_sendfile_emul(req);
1133     }
1134 
1135     return -1;
1136   }
1137 #else
1138   /* Squelch compiler warnings. */
1139   (void) &in_fd;
1140   (void) &out_fd;
1141 
1142   return uv__fs_sendfile_emul(req);
1143 #endif
1144 }
1145 
1146 
uv__fs_utime(uv_fs_t * req)1147 static ssize_t uv__fs_utime(uv_fs_t* req) {
1148 #if defined(__linux__)                                                         \
1149     || defined(_AIX71)                                                         \
1150     || defined(__sun)                                                          \
1151     || defined(__HAIKU__)
1152   struct timespec ts[2];
1153   ts[0] = uv__fs_to_timespec(req->atime);
1154   ts[1] = uv__fs_to_timespec(req->mtime);
1155   return utimensat(AT_FDCWD, req->path, ts, 0);
1156 #elif defined(__APPLE__)                                                      \
1157     || defined(__DragonFly__)                                                 \
1158     || defined(__FreeBSD__)                                                   \
1159     || defined(__NetBSD__)                                                    \
1160     || defined(__OpenBSD__)
1161   struct timeval tv[2];
1162   tv[0] = uv__fs_to_timeval(req->atime);
1163   tv[1] = uv__fs_to_timeval(req->mtime);
1164   return utimes(req->path, tv);
1165 #elif defined(_AIX)                                                           \
1166     && !defined(_AIX71)
1167   struct utimbuf buf;
1168   buf.actime = req->atime;
1169   buf.modtime = req->mtime;
1170   return utime(req->path, &buf);
1171 #elif defined(__MVS__)
1172   attrib_t atr;
1173   memset(&atr, 0, sizeof(atr));
1174   atr.att_mtimechg = 1;
1175   atr.att_atimechg = 1;
1176   atr.att_mtime = req->mtime;
1177   atr.att_atime = req->atime;
1178   return __lchattr((char*) req->path, &atr, sizeof(atr));
1179 #else
1180   errno = ENOSYS;
1181   return -1;
1182 #endif
1183 }
1184 
1185 
uv__fs_lutime(uv_fs_t * req)1186 static ssize_t uv__fs_lutime(uv_fs_t* req) {
1187 #if defined(__linux__)            ||                                           \
1188     defined(_AIX71)               ||                                           \
1189     defined(__sun)                ||                                           \
1190     defined(__HAIKU__)            ||                                           \
1191     defined(__GNU__)              ||                                           \
1192     defined(__OpenBSD__)
1193   struct timespec ts[2];
1194   ts[0] = uv__fs_to_timespec(req->atime);
1195   ts[1] = uv__fs_to_timespec(req->mtime);
1196   return utimensat(AT_FDCWD, req->path, ts, AT_SYMLINK_NOFOLLOW);
1197 #elif defined(__APPLE__)          ||                                          \
1198       defined(__DragonFly__)      ||                                          \
1199       defined(__FreeBSD__)        ||                                          \
1200       defined(__NetBSD__)
1201   struct timeval tv[2];
1202   tv[0] = uv__fs_to_timeval(req->atime);
1203   tv[1] = uv__fs_to_timeval(req->mtime);
1204   return lutimes(req->path, tv);
1205 #else
1206   errno = ENOSYS;
1207   return -1;
1208 #endif
1209 }
1210 
1211 
uv__fs_write(uv_fs_t * req)1212 static ssize_t uv__fs_write(uv_fs_t* req) {
1213   const struct iovec* bufs;
1214   size_t nbufs;
1215   ssize_t r;
1216   off_t off;
1217   int fd;
1218 
1219   fd = req->file;
1220   off = req->off;
1221   bufs = (const struct iovec*) req->bufs;
1222   nbufs = req->nbufs;
1223 
1224   r = 0;
1225   if (off < 0) {
1226     if (nbufs == 1)
1227       r = write(fd, bufs->iov_base, bufs->iov_len);
1228     else if (nbufs > 1)
1229       r = writev(fd, bufs, nbufs);
1230   } else {
1231     if (nbufs == 1)
1232       r = pwrite(fd, bufs->iov_base, bufs->iov_len, off);
1233     else if (nbufs > 1)
1234       r = uv__pwritev(fd, bufs, nbufs, off);
1235   }
1236 
1237   return r;
1238 }
1239 
1240 
uv__fs_copyfile(uv_fs_t * req)1241 static ssize_t uv__fs_copyfile(uv_fs_t* req) {
1242   uv_fs_t fs_req;
1243   uv_file srcfd;
1244   uv_file dstfd;
1245   struct stat src_statsbuf;
1246   struct stat dst_statsbuf;
1247   struct timespec times[2];
1248   int dst_flags;
1249   int result;
1250   int err;
1251   off_t bytes_to_send;
1252   off_t in_offset;
1253   off_t bytes_written;
1254   size_t bytes_chunk;
1255 
1256   dstfd = -1;
1257   err = 0;
1258 
1259   /* Open the source file. */
1260   srcfd = uv_fs_open(NULL, &fs_req, req->path, O_RDONLY, 0, NULL);
1261   uv_fs_req_cleanup(&fs_req);
1262 
1263   if (srcfd < 0)
1264     return srcfd;
1265 
1266   /* Get the source file's mode. */
1267   if (uv__fstat(srcfd, &src_statsbuf)) {
1268     err = UV__ERR(errno);
1269     goto out;
1270   }
1271 
1272   dst_flags = O_WRONLY | O_CREAT;
1273 
1274   if (req->flags & UV_FS_COPYFILE_EXCL)
1275     dst_flags |= O_EXCL;
1276 
1277   /* Open the destination file. */
1278   dstfd = uv_fs_open(NULL,
1279                      &fs_req,
1280                      req->new_path,
1281                      dst_flags,
1282                      src_statsbuf.st_mode,
1283                      NULL);
1284   uv_fs_req_cleanup(&fs_req);
1285 
1286   if (dstfd < 0) {
1287     err = dstfd;
1288     goto out;
1289   }
1290 
1291   /* If the file is not being opened exclusively, verify that the source and
1292      destination are not the same file. If they are the same, bail out early. */
1293   if ((req->flags & UV_FS_COPYFILE_EXCL) == 0) {
1294     /* Get the destination file's mode. */
1295     if (uv__fstat(dstfd, &dst_statsbuf)) {
1296       err = UV__ERR(errno);
1297       goto out;
1298     }
1299 
1300     /* Check if srcfd and dstfd refer to the same file */
1301     if (src_statsbuf.st_dev == dst_statsbuf.st_dev &&
1302         src_statsbuf.st_ino == dst_statsbuf.st_ino) {
1303       goto out;
1304     }
1305 
1306     /* Truncate the file in case the destination already existed. */
1307     if (ftruncate(dstfd, 0) != 0) {
1308       err = UV__ERR(errno);
1309 
1310       /* ftruncate() on ceph-fuse fails with EACCES when the file is created
1311        * with read only permissions. Since ftruncate() on a newly created
1312        * file is a meaningless operation anyway, detect that condition
1313        * and squelch the error.
1314        */
1315       if (err != UV_EACCES)
1316         goto out;
1317 
1318       if (dst_statsbuf.st_size > 0)
1319         goto out;
1320 
1321       err = 0;
1322     }
1323   }
1324 
1325   /**
1326    * Change the timestamps of the destination file to match the source file.
1327    */
1328 #if defined(__APPLE__)
1329   times[0] = src_statsbuf.st_atimespec;
1330   times[1] = src_statsbuf.st_mtimespec;
1331 #elif defined(_AIX)
1332   times[0].tv_sec = src_statsbuf.st_atime;
1333   times[0].tv_nsec = src_statsbuf.st_atime_n;
1334   times[1].tv_sec = src_statsbuf.st_mtime;
1335   times[1].tv_nsec = src_statsbuf.st_mtime_n;
1336 #else
1337   times[0] = src_statsbuf.st_atim;
1338   times[1] = src_statsbuf.st_mtim;
1339 #endif
1340 
1341   if (futimens(dstfd, times) == -1) {
1342     err = UV__ERR(errno);
1343     goto out;
1344   }
1345 
1346   /*
1347    * Change the ownership and permissions of the destination file to match the
1348    * source file.
1349    * `cp -p` does not care about errors here, so we don't either. Reuse the
1350    * `result` variable to silence a -Wunused-result warning.
1351    */
1352   result = fchown(dstfd, src_statsbuf.st_uid, src_statsbuf.st_gid);
1353 
1354   if (fchmod(dstfd, src_statsbuf.st_mode) == -1) {
1355     err = UV__ERR(errno);
1356 #ifdef __linux__
1357     /* fchmod() on CIFS shares always fails with EPERM unless the share is
1358      * mounted with "noperm". As fchmod() is a meaningless operation on such
1359      * shares anyway, detect that condition and squelch the error.
1360      */
1361     if (err != UV_EPERM)
1362       goto out;
1363 
1364     if (!uv__is_cifs_or_smb(dstfd))
1365       goto out;
1366 
1367     err = 0;
1368 #else  /* !__linux__ */
1369     goto out;
1370 #endif  /* !__linux__ */
1371   }
1372 
1373 #ifdef FICLONE
1374   if (req->flags & UV_FS_COPYFILE_FICLONE ||
1375       req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
1376     if (ioctl(dstfd, FICLONE, srcfd) == 0) {
1377       /* ioctl() with FICLONE succeeded. */
1378       goto out;
1379     }
1380     /* If an error occurred and force was set, return the error to the caller;
1381      * fall back to sendfile() when force was not set. */
1382     if (req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
1383       err = UV__ERR(errno);
1384       goto out;
1385     }
1386   }
1387 #else
1388   if (req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
1389     err = UV_ENOSYS;
1390     goto out;
1391   }
1392 #endif
1393 
1394   bytes_to_send = src_statsbuf.st_size;
1395   in_offset = 0;
1396   while (bytes_to_send != 0) {
1397     bytes_chunk = SSIZE_MAX;
1398     if (bytes_to_send < (off_t) bytes_chunk)
1399       bytes_chunk = bytes_to_send;
1400     uv_fs_sendfile(NULL, &fs_req, dstfd, srcfd, in_offset, bytes_chunk, NULL);
1401     bytes_written = fs_req.result;
1402     uv_fs_req_cleanup(&fs_req);
1403 
1404     if (bytes_written < 0) {
1405       err = bytes_written;
1406       break;
1407     }
1408 
1409     bytes_to_send -= bytes_written;
1410     in_offset += bytes_written;
1411   }
1412 
1413 out:
1414   if (err < 0)
1415     result = err;
1416   else
1417     result = 0;
1418 
1419   /* Close the source file. */
1420   err = uv__close_nocheckstdio(srcfd);
1421 
1422   /* Don't overwrite any existing errors. */
1423   if (err != 0 && result == 0)
1424     result = err;
1425 
1426   /* Close the destination file if it is open. */
1427   if (dstfd >= 0) {
1428     err = uv__close_nocheckstdio(dstfd);
1429 
1430     /* Don't overwrite any existing errors. */
1431     if (err != 0 && result == 0)
1432       result = err;
1433 
1434     /* Remove the destination file if something went wrong. */
1435     if (result != 0) {
1436       uv_fs_unlink(NULL, &fs_req, req->new_path, NULL);
1437       /* Ignore the unlink return value, as an error already happened. */
1438       uv_fs_req_cleanup(&fs_req);
1439     }
1440   }
1441 
1442   if (result == 0)
1443     return 0;
1444 
1445   errno = UV__ERR(result);
1446   return -1;
1447 }
1448 
uv__to_stat(struct stat * src,uv_stat_t * dst)1449 static void uv__to_stat(struct stat* src, uv_stat_t* dst) {
1450   dst->st_dev = src->st_dev;
1451   dst->st_mode = src->st_mode;
1452   dst->st_nlink = src->st_nlink;
1453   dst->st_uid = src->st_uid;
1454   dst->st_gid = src->st_gid;
1455   dst->st_rdev = src->st_rdev;
1456   dst->st_ino = src->st_ino;
1457   dst->st_size = src->st_size;
1458   dst->st_blksize = src->st_blksize;
1459   dst->st_blocks = src->st_blocks;
1460 
1461 #if defined(__APPLE__)
1462   dst->st_atim.tv_sec = src->st_atimespec.tv_sec;
1463   dst->st_atim.tv_nsec = src->st_atimespec.tv_nsec;
1464   dst->st_mtim.tv_sec = src->st_mtimespec.tv_sec;
1465   dst->st_mtim.tv_nsec = src->st_mtimespec.tv_nsec;
1466   dst->st_ctim.tv_sec = src->st_ctimespec.tv_sec;
1467   dst->st_ctim.tv_nsec = src->st_ctimespec.tv_nsec;
1468   dst->st_birthtim.tv_sec = src->st_birthtimespec.tv_sec;
1469   dst->st_birthtim.tv_nsec = src->st_birthtimespec.tv_nsec;
1470   dst->st_flags = src->st_flags;
1471   dst->st_gen = src->st_gen;
1472 #elif defined(__ANDROID__)
1473   dst->st_atim.tv_sec = src->st_atime;
1474   dst->st_atim.tv_nsec = src->st_atimensec;
1475   dst->st_mtim.tv_sec = src->st_mtime;
1476   dst->st_mtim.tv_nsec = src->st_mtimensec;
1477   dst->st_ctim.tv_sec = src->st_ctime;
1478   dst->st_ctim.tv_nsec = src->st_ctimensec;
1479   dst->st_birthtim.tv_sec = src->st_ctime;
1480   dst->st_birthtim.tv_nsec = src->st_ctimensec;
1481   dst->st_flags = 0;
1482   dst->st_gen = 0;
1483 #elif !defined(_AIX) &&         \
1484     !defined(__MVS__) && (      \
1485     defined(__DragonFly__)   || \
1486     defined(__FreeBSD__)     || \
1487     defined(__OpenBSD__)     || \
1488     defined(__NetBSD__)      || \
1489     defined(_GNU_SOURCE)     || \
1490     defined(_BSD_SOURCE)     || \
1491     defined(_SVID_SOURCE)    || \
1492     defined(_XOPEN_SOURCE)   || \
1493     defined(_DEFAULT_SOURCE))
1494   dst->st_atim.tv_sec = src->st_atim.tv_sec;
1495   dst->st_atim.tv_nsec = src->st_atim.tv_nsec;
1496   dst->st_mtim.tv_sec = src->st_mtim.tv_sec;
1497   dst->st_mtim.tv_nsec = src->st_mtim.tv_nsec;
1498   dst->st_ctim.tv_sec = src->st_ctim.tv_sec;
1499   dst->st_ctim.tv_nsec = src->st_ctim.tv_nsec;
1500 # if defined(__FreeBSD__)    || \
1501      defined(__NetBSD__)
1502   dst->st_birthtim.tv_sec = src->st_birthtim.tv_sec;
1503   dst->st_birthtim.tv_nsec = src->st_birthtim.tv_nsec;
1504   dst->st_flags = src->st_flags;
1505   dst->st_gen = src->st_gen;
1506 # else
1507   dst->st_birthtim.tv_sec = src->st_ctim.tv_sec;
1508   dst->st_birthtim.tv_nsec = src->st_ctim.tv_nsec;
1509   dst->st_flags = 0;
1510   dst->st_gen = 0;
1511 # endif
1512 #else
1513   dst->st_atim.tv_sec = src->st_atime;
1514   dst->st_atim.tv_nsec = 0;
1515   dst->st_mtim.tv_sec = src->st_mtime;
1516   dst->st_mtim.tv_nsec = 0;
1517   dst->st_ctim.tv_sec = src->st_ctime;
1518   dst->st_ctim.tv_nsec = 0;
1519   dst->st_birthtim.tv_sec = src->st_ctime;
1520   dst->st_birthtim.tv_nsec = 0;
1521   dst->st_flags = 0;
1522   dst->st_gen = 0;
1523 #endif
1524 }
1525 
1526 
uv__fs_statx(int fd,const char * path,int is_fstat,int is_lstat,uv_stat_t * buf)1527 static int uv__fs_statx(int fd,
1528                         const char* path,
1529                         int is_fstat,
1530                         int is_lstat,
1531                         uv_stat_t* buf) {
1532   STATIC_ASSERT(UV_ENOSYS != -1);
1533 #ifdef __linux__
1534   static _Atomic int no_statx;
1535   struct uv__statx statxbuf;
1536   int dirfd;
1537   int flags;
1538   int mode;
1539   int rc;
1540 
1541   if (atomic_load_explicit(&no_statx, memory_order_relaxed))
1542     return UV_ENOSYS;
1543 
1544   dirfd = AT_FDCWD;
1545   flags = 0; /* AT_STATX_SYNC_AS_STAT */
1546   mode = 0xFFF; /* STATX_BASIC_STATS + STATX_BTIME */
1547 
1548   if (is_fstat) {
1549     dirfd = fd;
1550     flags |= 0x1000; /* AT_EMPTY_PATH */
1551   }
1552 
1553   if (is_lstat)
1554     flags |= AT_SYMLINK_NOFOLLOW;
1555 
1556   rc = uv__statx(dirfd, path, flags, mode, &statxbuf);
1557 
1558   switch (rc) {
1559   case 0:
1560     break;
1561   case -1:
1562     /* EPERM happens when a seccomp filter rejects the system call.
1563      * Has been observed with libseccomp < 2.3.3 and docker < 18.04.
1564      * EOPNOTSUPP is used on DVS exported filesystems
1565      */
1566     if (errno != EINVAL && errno != EPERM && errno != ENOSYS && errno != EOPNOTSUPP)
1567       return -1;
1568     /* Fall through. */
1569   default:
1570     /* Normally on success, zero is returned and On error, -1 is returned.
1571      * Observed on S390 RHEL running in a docker container with statx not
1572      * implemented, rc might return 1 with 0 set as the error code in which
1573      * case we return ENOSYS.
1574      */
1575     atomic_store_explicit(&no_statx, 1, memory_order_relaxed);
1576     return UV_ENOSYS;
1577   }
1578 
1579   uv__statx_to_stat(&statxbuf, buf);
1580 
1581   return 0;
1582 #else
1583   return UV_ENOSYS;
1584 #endif /* __linux__ */
1585 }
1586 
1587 
uv__fs_stat(const char * path,uv_stat_t * buf)1588 static int uv__fs_stat(const char *path, uv_stat_t *buf) {
1589   struct stat pbuf;
1590   int ret;
1591 
1592   ret = uv__fs_statx(-1, path, /* is_fstat */ 0, /* is_lstat */ 0, buf);
1593   if (ret != UV_ENOSYS)
1594     return ret;
1595 
1596   ret = uv__stat(path, &pbuf);
1597   if (ret == 0)
1598     uv__to_stat(&pbuf, buf);
1599 
1600   return ret;
1601 }
1602 
1603 
uv__fs_lstat(const char * path,uv_stat_t * buf)1604 static int uv__fs_lstat(const char *path, uv_stat_t *buf) {
1605   struct stat pbuf;
1606   int ret;
1607 
1608   ret = uv__fs_statx(-1, path, /* is_fstat */ 0, /* is_lstat */ 1, buf);
1609   if (ret != UV_ENOSYS)
1610     return ret;
1611 
1612   ret = uv__lstat(path, &pbuf);
1613   if (ret == 0)
1614     uv__to_stat(&pbuf, buf);
1615 
1616   return ret;
1617 }
1618 
1619 
uv__fs_fstat(int fd,uv_stat_t * buf)1620 static int uv__fs_fstat(int fd, uv_stat_t *buf) {
1621   struct stat pbuf;
1622   int ret;
1623 
1624   ret = uv__fs_statx(fd, "", /* is_fstat */ 1, /* is_lstat */ 0, buf);
1625   if (ret != UV_ENOSYS)
1626     return ret;
1627 
1628   ret = uv__fstat(fd, &pbuf);
1629   if (ret == 0)
1630     uv__to_stat(&pbuf, buf);
1631 
1632   return ret;
1633 }
1634 
uv__fs_buf_offset(uv_buf_t * bufs,size_t size)1635 static size_t uv__fs_buf_offset(uv_buf_t* bufs, size_t size) {
1636   size_t offset;
1637   /* Figure out which bufs are done */
1638   for (offset = 0; size > 0 && bufs[offset].len <= size; ++offset)
1639     size -= bufs[offset].len;
1640 
1641   /* Fix a partial read/write */
1642   if (size > 0) {
1643     bufs[offset].base += size;
1644     bufs[offset].len -= size;
1645   }
1646   return offset;
1647 }
1648 
uv__fs_write_all(uv_fs_t * req)1649 static ssize_t uv__fs_write_all(uv_fs_t* req) {
1650   unsigned int iovmax;
1651   unsigned int nbufs;
1652   uv_buf_t* bufs;
1653   ssize_t total;
1654   ssize_t result;
1655 
1656   iovmax = uv__getiovmax();
1657   nbufs = req->nbufs;
1658   bufs = req->bufs;
1659   total = 0;
1660 
1661   while (nbufs > 0) {
1662     req->nbufs = nbufs;
1663     if (req->nbufs > iovmax)
1664       req->nbufs = iovmax;
1665 
1666     do
1667       result = uv__fs_write(req);
1668     while (result < 0 && errno == EINTR);
1669 
1670     if (result <= 0) {
1671       if (total == 0)
1672         total = result;
1673       break;
1674     }
1675 
1676     if (req->off >= 0)
1677       req->off += result;
1678 
1679     req->nbufs = uv__fs_buf_offset(req->bufs, result);
1680     req->bufs += req->nbufs;
1681     nbufs -= req->nbufs;
1682     total += result;
1683   }
1684 
1685   if (bufs != req->bufsml)
1686     uv__free(bufs);
1687 
1688   req->bufs = NULL;
1689   req->nbufs = 0;
1690 
1691   return total;
1692 }
1693 
1694 
uv__fs_work(struct uv__work * w)1695 static void uv__fs_work(struct uv__work* w) {
1696   int retry_on_eintr;
1697   uv_fs_t* req;
1698   ssize_t r;
1699 
1700   req = container_of(w, uv_fs_t, work_req);
1701   retry_on_eintr = !(req->fs_type == UV_FS_CLOSE ||
1702                      req->fs_type == UV_FS_READ);
1703 
1704   do {
1705     errno = 0;
1706 
1707 #define X(type, action)                                                       \
1708   case UV_FS_ ## type:                                                        \
1709     r = action;                                                               \
1710     break;
1711 
1712     switch (req->fs_type) {
1713     X(ACCESS, access(req->path, req->flags));
1714     X(CHMOD, chmod(req->path, req->mode));
1715     X(CHOWN, chown(req->path, req->uid, req->gid));
1716     X(CLOSE, uv__fs_close(req->file));
1717     X(COPYFILE, uv__fs_copyfile(req));
1718     X(FCHMOD, fchmod(req->file, req->mode));
1719     X(FCHOWN, fchown(req->file, req->uid, req->gid));
1720     X(LCHOWN, lchown(req->path, req->uid, req->gid));
1721     X(FDATASYNC, uv__fs_fdatasync(req));
1722     X(FSTAT, uv__fs_fstat(req->file, &req->statbuf));
1723     X(FSYNC, uv__fs_fsync(req));
1724     X(FTRUNCATE, ftruncate(req->file, req->off));
1725     X(FUTIME, uv__fs_futime(req));
1726     X(LUTIME, uv__fs_lutime(req));
1727     X(LSTAT, uv__fs_lstat(req->path, &req->statbuf));
1728     X(LINK, link(req->path, req->new_path));
1729     X(MKDIR, mkdir(req->path, req->mode));
1730     X(MKDTEMP, uv__fs_mkdtemp(req));
1731     X(MKSTEMP, uv__fs_mkstemp(req));
1732     X(OPEN, uv__fs_open(req));
1733     X(READ, uv__fs_read(req));
1734     X(SCANDIR, uv__fs_scandir(req));
1735     X(OPENDIR, uv__fs_opendir(req));
1736     X(READDIR, uv__fs_readdir(req));
1737     X(CLOSEDIR, uv__fs_closedir(req));
1738     X(READLINK, uv__fs_readlink(req));
1739     X(REALPATH, uv__fs_realpath(req));
1740     X(RENAME, rename(req->path, req->new_path));
1741     X(RMDIR, rmdir(req->path));
1742     X(SENDFILE, uv__fs_sendfile(req));
1743     X(STAT, uv__fs_stat(req->path, &req->statbuf));
1744     X(STATFS, uv__fs_statfs(req));
1745     X(SYMLINK, symlink(req->path, req->new_path));
1746     X(UNLINK, unlink(req->path));
1747     X(UTIME, uv__fs_utime(req));
1748     X(WRITE, uv__fs_write_all(req));
1749     default: abort();
1750     }
1751 #undef X
1752   } while (r == -1 && errno == EINTR && retry_on_eintr);
1753 
1754   if (r == -1)
1755     req->result = UV__ERR(errno);
1756   else
1757     req->result = r;
1758 
1759   if (r == 0 && (req->fs_type == UV_FS_STAT ||
1760                  req->fs_type == UV_FS_FSTAT ||
1761                  req->fs_type == UV_FS_LSTAT)) {
1762     req->ptr = &req->statbuf;
1763   }
1764 }
1765 
1766 
uv__fs_done(struct uv__work * w,int status)1767 static void uv__fs_done(struct uv__work* w, int status) {
1768   uv_fs_t* req;
1769 
1770   req = container_of(w, uv_fs_t, work_req);
1771   uv__req_unregister(req->loop);
1772 
1773   if (status == UV_ECANCELED) {
1774     assert(req->result == 0);
1775     req->result = UV_ECANCELED;
1776   }
1777 
1778   req->cb(req);
1779 }
1780 
1781 
uv__fs_post(uv_loop_t * loop,uv_fs_t * req)1782 void uv__fs_post(uv_loop_t* loop, uv_fs_t* req) {
1783   uv__req_register(loop);
1784   uv__work_submit(loop,
1785                   &req->work_req,
1786                   UV__WORK_FAST_IO,
1787                   uv__fs_work,
1788                   uv__fs_done);
1789 }
1790 
1791 
uv_fs_access(uv_loop_t * loop,uv_fs_t * req,const char * path,int flags,uv_fs_cb cb)1792 int uv_fs_access(uv_loop_t* loop,
1793                  uv_fs_t* req,
1794                  const char* path,
1795                  int flags,
1796                  uv_fs_cb cb) {
1797   INIT(ACCESS);
1798   PATH;
1799   req->flags = flags;
1800   POST;
1801 }
1802 
1803 
uv_fs_chmod(uv_loop_t * loop,uv_fs_t * req,const char * path,int mode,uv_fs_cb cb)1804 int uv_fs_chmod(uv_loop_t* loop,
1805                 uv_fs_t* req,
1806                 const char* path,
1807                 int mode,
1808                 uv_fs_cb cb) {
1809   INIT(CHMOD);
1810   PATH;
1811   req->mode = mode;
1812   POST;
1813 }
1814 
1815 
uv_fs_chown(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_uid_t uid,uv_gid_t gid,uv_fs_cb cb)1816 int uv_fs_chown(uv_loop_t* loop,
1817                 uv_fs_t* req,
1818                 const char* path,
1819                 uv_uid_t uid,
1820                 uv_gid_t gid,
1821                 uv_fs_cb cb) {
1822   INIT(CHOWN);
1823   PATH;
1824   req->uid = uid;
1825   req->gid = gid;
1826   POST;
1827 }
1828 
1829 
uv_fs_close(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_fs_cb cb)1830 int uv_fs_close(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1831   INIT(CLOSE);
1832   req->file = file;
1833   if (cb != NULL)
1834     if (uv__iou_fs_close(loop, req))
1835       return 0;
1836   POST;
1837 }
1838 
1839 
uv_fs_fchmod(uv_loop_t * loop,uv_fs_t * req,uv_file file,int mode,uv_fs_cb cb)1840 int uv_fs_fchmod(uv_loop_t* loop,
1841                  uv_fs_t* req,
1842                  uv_file file,
1843                  int mode,
1844                  uv_fs_cb cb) {
1845   INIT(FCHMOD);
1846   req->file = file;
1847   req->mode = mode;
1848   POST;
1849 }
1850 
1851 
uv_fs_fchown(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_uid_t uid,uv_gid_t gid,uv_fs_cb cb)1852 int uv_fs_fchown(uv_loop_t* loop,
1853                  uv_fs_t* req,
1854                  uv_file file,
1855                  uv_uid_t uid,
1856                  uv_gid_t gid,
1857                  uv_fs_cb cb) {
1858   INIT(FCHOWN);
1859   req->file = file;
1860   req->uid = uid;
1861   req->gid = gid;
1862   POST;
1863 }
1864 
1865 
uv_fs_lchown(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_uid_t uid,uv_gid_t gid,uv_fs_cb cb)1866 int uv_fs_lchown(uv_loop_t* loop,
1867                  uv_fs_t* req,
1868                  const char* path,
1869                  uv_uid_t uid,
1870                  uv_gid_t gid,
1871                  uv_fs_cb cb) {
1872   INIT(LCHOWN);
1873   PATH;
1874   req->uid = uid;
1875   req->gid = gid;
1876   POST;
1877 }
1878 
1879 
uv_fs_fdatasync(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_fs_cb cb)1880 int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1881   INIT(FDATASYNC);
1882   req->file = file;
1883   if (cb != NULL)
1884     if (uv__iou_fs_fsync_or_fdatasync(loop, req, /* IORING_FSYNC_DATASYNC */ 1))
1885       return 0;
1886   POST;
1887 }
1888 
1889 
uv_fs_fstat(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_fs_cb cb)1890 int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1891   INIT(FSTAT);
1892   req->file = file;
1893   if (cb != NULL)
1894     if (uv__iou_fs_statx(loop, req, /* is_fstat */ 1, /* is_lstat */ 0))
1895       return 0;
1896   POST;
1897 }
1898 
1899 
uv_fs_fsync(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_fs_cb cb)1900 int uv_fs_fsync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1901   INIT(FSYNC);
1902   req->file = file;
1903   if (cb != NULL)
1904     if (uv__iou_fs_fsync_or_fdatasync(loop, req, /* no flags */ 0))
1905       return 0;
1906   POST;
1907 }
1908 
1909 
uv_fs_ftruncate(uv_loop_t * loop,uv_fs_t * req,uv_file file,int64_t off,uv_fs_cb cb)1910 int uv_fs_ftruncate(uv_loop_t* loop,
1911                     uv_fs_t* req,
1912                     uv_file file,
1913                     int64_t off,
1914                     uv_fs_cb cb) {
1915   INIT(FTRUNCATE);
1916   req->file = file;
1917   req->off = off;
1918   if (cb != NULL)
1919     if (uv__iou_fs_ftruncate(loop, req))
1920       return 0;
1921   POST;
1922 }
1923 
1924 
uv_fs_futime(uv_loop_t * loop,uv_fs_t * req,uv_file file,double atime,double mtime,uv_fs_cb cb)1925 int uv_fs_futime(uv_loop_t* loop,
1926                  uv_fs_t* req,
1927                  uv_file file,
1928                  double atime,
1929                  double mtime,
1930                  uv_fs_cb cb) {
1931   INIT(FUTIME);
1932   req->file = file;
1933   req->atime = atime;
1934   req->mtime = mtime;
1935   POST;
1936 }
1937 
uv_fs_lutime(uv_loop_t * loop,uv_fs_t * req,const char * path,double atime,double mtime,uv_fs_cb cb)1938 int uv_fs_lutime(uv_loop_t* loop,
1939                  uv_fs_t* req,
1940                  const char* path,
1941                  double atime,
1942                  double mtime,
1943                  uv_fs_cb cb) {
1944   INIT(LUTIME);
1945   PATH;
1946   req->atime = atime;
1947   req->mtime = mtime;
1948   POST;
1949 }
1950 
1951 
uv_fs_lstat(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)1952 int uv_fs_lstat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
1953   INIT(LSTAT);
1954   PATH;
1955   if (cb != NULL)
1956     if (uv__iou_fs_statx(loop, req, /* is_fstat */ 0, /* is_lstat */ 1))
1957       return 0;
1958   POST;
1959 }
1960 
1961 
uv_fs_link(uv_loop_t * loop,uv_fs_t * req,const char * path,const char * new_path,uv_fs_cb cb)1962 int uv_fs_link(uv_loop_t* loop,
1963                uv_fs_t* req,
1964                const char* path,
1965                const char* new_path,
1966                uv_fs_cb cb) {
1967   INIT(LINK);
1968   PATH2;
1969   if (cb != NULL)
1970     if (uv__iou_fs_link(loop, req))
1971       return 0;
1972   POST;
1973 }
1974 
1975 
uv_fs_mkdir(uv_loop_t * loop,uv_fs_t * req,const char * path,int mode,uv_fs_cb cb)1976 int uv_fs_mkdir(uv_loop_t* loop,
1977                 uv_fs_t* req,
1978                 const char* path,
1979                 int mode,
1980                 uv_fs_cb cb) {
1981   INIT(MKDIR);
1982   PATH;
1983   req->mode = mode;
1984   if (cb != NULL)
1985     if (uv__iou_fs_mkdir(loop, req))
1986       return 0;
1987   POST;
1988 }
1989 
1990 
uv_fs_mkdtemp(uv_loop_t * loop,uv_fs_t * req,const char * tpl,uv_fs_cb cb)1991 int uv_fs_mkdtemp(uv_loop_t* loop,
1992                   uv_fs_t* req,
1993                   const char* tpl,
1994                   uv_fs_cb cb) {
1995   INIT(MKDTEMP);
1996   req->path = uv__strdup(tpl);
1997   if (req->path == NULL)
1998     return UV_ENOMEM;
1999   POST;
2000 }
2001 
2002 
uv_fs_mkstemp(uv_loop_t * loop,uv_fs_t * req,const char * tpl,uv_fs_cb cb)2003 int uv_fs_mkstemp(uv_loop_t* loop,
2004                   uv_fs_t* req,
2005                   const char* tpl,
2006                   uv_fs_cb cb) {
2007   INIT(MKSTEMP);
2008   req->path = uv__strdup(tpl);
2009   if (req->path == NULL)
2010     return UV_ENOMEM;
2011   POST;
2012 }
2013 
2014 
uv_fs_open(uv_loop_t * loop,uv_fs_t * req,const char * path,int flags,int mode,uv_fs_cb cb)2015 int uv_fs_open(uv_loop_t* loop,
2016                uv_fs_t* req,
2017                const char* path,
2018                int flags,
2019                int mode,
2020                uv_fs_cb cb) {
2021   INIT(OPEN);
2022   PATH;
2023   req->flags = flags;
2024   req->mode = mode;
2025   if (cb != NULL)
2026     if (uv__iou_fs_open(loop, req))
2027       return 0;
2028   POST;
2029 }
2030 
2031 
uv_fs_read(uv_loop_t * loop,uv_fs_t * req,uv_file file,const uv_buf_t bufs[],unsigned int nbufs,int64_t off,uv_fs_cb cb)2032 int uv_fs_read(uv_loop_t* loop, uv_fs_t* req,
2033                uv_file file,
2034                const uv_buf_t bufs[],
2035                unsigned int nbufs,
2036                int64_t off,
2037                uv_fs_cb cb) {
2038   INIT(READ);
2039 
2040   if (bufs == NULL || nbufs == 0)
2041     return UV_EINVAL;
2042 
2043   req->off = off;
2044   req->file = file;
2045   req->bufs = (uv_buf_t*) bufs;  /* Safe, doesn't mutate |bufs| */
2046   req->nbufs = nbufs;
2047 
2048   if (cb == NULL)
2049     goto post;
2050 
2051   req->bufs = req->bufsml;
2052   if (nbufs > ARRAY_SIZE(req->bufsml))
2053     req->bufs = uv__malloc(nbufs * sizeof(*bufs));
2054 
2055   if (req->bufs == NULL)
2056     return UV_ENOMEM;
2057 
2058   memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
2059 
2060   if (uv__iou_fs_read_or_write(loop, req, /* is_read */ 1))
2061     return 0;
2062 
2063 post:
2064   POST;
2065 }
2066 
2067 
uv_fs_scandir(uv_loop_t * loop,uv_fs_t * req,const char * path,int flags,uv_fs_cb cb)2068 int uv_fs_scandir(uv_loop_t* loop,
2069                   uv_fs_t* req,
2070                   const char* path,
2071                   int flags,
2072                   uv_fs_cb cb) {
2073   INIT(SCANDIR);
2074   PATH;
2075   req->flags = flags;
2076   POST;
2077 }
2078 
uv_fs_opendir(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2079 int uv_fs_opendir(uv_loop_t* loop,
2080                   uv_fs_t* req,
2081                   const char* path,
2082                   uv_fs_cb cb) {
2083   INIT(OPENDIR);
2084   PATH;
2085   POST;
2086 }
2087 
uv_fs_readdir(uv_loop_t * loop,uv_fs_t * req,uv_dir_t * dir,uv_fs_cb cb)2088 int uv_fs_readdir(uv_loop_t* loop,
2089                   uv_fs_t* req,
2090                   uv_dir_t* dir,
2091                   uv_fs_cb cb) {
2092   INIT(READDIR);
2093 
2094   if (dir == NULL || dir->dir == NULL || dir->dirents == NULL)
2095     return UV_EINVAL;
2096 
2097   req->ptr = dir;
2098   POST;
2099 }
2100 
uv_fs_closedir(uv_loop_t * loop,uv_fs_t * req,uv_dir_t * dir,uv_fs_cb cb)2101 int uv_fs_closedir(uv_loop_t* loop,
2102                    uv_fs_t* req,
2103                    uv_dir_t* dir,
2104                    uv_fs_cb cb) {
2105   INIT(CLOSEDIR);
2106 
2107   if (dir == NULL)
2108     return UV_EINVAL;
2109 
2110   req->ptr = dir;
2111   POST;
2112 }
2113 
uv_fs_readlink(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2114 int uv_fs_readlink(uv_loop_t* loop,
2115                    uv_fs_t* req,
2116                    const char* path,
2117                    uv_fs_cb cb) {
2118   INIT(READLINK);
2119   PATH;
2120   POST;
2121 }
2122 
2123 
uv_fs_realpath(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2124 int uv_fs_realpath(uv_loop_t* loop,
2125                   uv_fs_t* req,
2126                   const char * path,
2127                   uv_fs_cb cb) {
2128   INIT(REALPATH);
2129   PATH;
2130   POST;
2131 }
2132 
2133 
uv_fs_rename(uv_loop_t * loop,uv_fs_t * req,const char * path,const char * new_path,uv_fs_cb cb)2134 int uv_fs_rename(uv_loop_t* loop,
2135                  uv_fs_t* req,
2136                  const char* path,
2137                  const char* new_path,
2138                  uv_fs_cb cb) {
2139   INIT(RENAME);
2140   PATH2;
2141   if (cb != NULL)
2142     if (uv__iou_fs_rename(loop, req))
2143       return 0;
2144   POST;
2145 }
2146 
2147 
uv_fs_rmdir(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2148 int uv_fs_rmdir(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
2149   INIT(RMDIR);
2150   PATH;
2151   POST;
2152 }
2153 
2154 
uv_fs_sendfile(uv_loop_t * loop,uv_fs_t * req,uv_file out_fd,uv_file in_fd,int64_t off,size_t len,uv_fs_cb cb)2155 int uv_fs_sendfile(uv_loop_t* loop,
2156                    uv_fs_t* req,
2157                    uv_file out_fd,
2158                    uv_file in_fd,
2159                    int64_t off,
2160                    size_t len,
2161                    uv_fs_cb cb) {
2162   INIT(SENDFILE);
2163   req->flags = in_fd; /* hack */
2164   req->file = out_fd;
2165   req->off = off;
2166   req->bufsml[0].len = len;
2167   POST;
2168 }
2169 
2170 
uv_fs_stat(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2171 int uv_fs_stat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
2172   INIT(STAT);
2173   PATH;
2174   if (cb != NULL)
2175     if (uv__iou_fs_statx(loop, req, /* is_fstat */ 0, /* is_lstat */ 0))
2176       return 0;
2177   POST;
2178 }
2179 
2180 
uv_fs_symlink(uv_loop_t * loop,uv_fs_t * req,const char * path,const char * new_path,int flags,uv_fs_cb cb)2181 int uv_fs_symlink(uv_loop_t* loop,
2182                   uv_fs_t* req,
2183                   const char* path,
2184                   const char* new_path,
2185                   int flags,
2186                   uv_fs_cb cb) {
2187   INIT(SYMLINK);
2188   PATH2;
2189   req->flags = flags;
2190   if (cb != NULL)
2191     if (uv__iou_fs_symlink(loop, req))
2192       return 0;
2193   POST;
2194 }
2195 
2196 
uv_fs_unlink(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2197 int uv_fs_unlink(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
2198   INIT(UNLINK);
2199   PATH;
2200   if (cb != NULL)
2201     if (uv__iou_fs_unlink(loop, req))
2202       return 0;
2203   POST;
2204 }
2205 
2206 
uv_fs_utime(uv_loop_t * loop,uv_fs_t * req,const char * path,double atime,double mtime,uv_fs_cb cb)2207 int uv_fs_utime(uv_loop_t* loop,
2208                 uv_fs_t* req,
2209                 const char* path,
2210                 double atime,
2211                 double mtime,
2212                 uv_fs_cb cb) {
2213   INIT(UTIME);
2214   PATH;
2215   req->atime = atime;
2216   req->mtime = mtime;
2217   POST;
2218 }
2219 
2220 
uv_fs_write(uv_loop_t * loop,uv_fs_t * req,uv_file file,const uv_buf_t bufs[],unsigned int nbufs,int64_t off,uv_fs_cb cb)2221 int uv_fs_write(uv_loop_t* loop,
2222                 uv_fs_t* req,
2223                 uv_file file,
2224                 const uv_buf_t bufs[],
2225                 unsigned int nbufs,
2226                 int64_t off,
2227                 uv_fs_cb cb) {
2228   INIT(WRITE);
2229 
2230   if (bufs == NULL || nbufs == 0)
2231     return UV_EINVAL;
2232 
2233   req->file = file;
2234 
2235   req->nbufs = nbufs;
2236   req->bufs = req->bufsml;
2237   if (nbufs > ARRAY_SIZE(req->bufsml))
2238     req->bufs = uv__malloc(nbufs * sizeof(*bufs));
2239 
2240   if (req->bufs == NULL)
2241     return UV_ENOMEM;
2242 
2243   memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
2244 
2245   req->off = off;
2246 
2247   if (cb != NULL)
2248     if (uv__iou_fs_read_or_write(loop, req, /* is_read */ 0))
2249       return 0;
2250 
2251   POST;
2252 }
2253 
2254 
uv_fs_req_cleanup(uv_fs_t * req)2255 void uv_fs_req_cleanup(uv_fs_t* req) {
2256   if (req == NULL)
2257     return;
2258 
2259   /* Only necessary for asynchronous requests, i.e., requests with a callback.
2260    * Synchronous ones don't copy their arguments and have req->path and
2261    * req->new_path pointing to user-owned memory.  UV_FS_MKDTEMP and
2262    * UV_FS_MKSTEMP are the exception to the rule, they always allocate memory.
2263    */
2264   if (req->path != NULL &&
2265       (req->cb != NULL ||
2266         req->fs_type == UV_FS_MKDTEMP || req->fs_type == UV_FS_MKSTEMP))
2267     uv__free((void*) req->path);  /* Memory is shared with req->new_path. */
2268 
2269   req->path = NULL;
2270   req->new_path = NULL;
2271 
2272   if (req->fs_type == UV_FS_READDIR && req->ptr != NULL)
2273     uv__fs_readdir_cleanup(req);
2274 
2275   if (req->fs_type == UV_FS_SCANDIR && req->ptr != NULL)
2276     uv__fs_scandir_cleanup(req);
2277 
2278   if (req->bufs != req->bufsml)
2279     uv__free(req->bufs);
2280   req->bufs = NULL;
2281 
2282   if (req->fs_type != UV_FS_OPENDIR && req->ptr != &req->statbuf)
2283     uv__free(req->ptr);
2284   req->ptr = NULL;
2285 }
2286 
2287 
uv_fs_copyfile(uv_loop_t * loop,uv_fs_t * req,const char * path,const char * new_path,int flags,uv_fs_cb cb)2288 int uv_fs_copyfile(uv_loop_t* loop,
2289                    uv_fs_t* req,
2290                    const char* path,
2291                    const char* new_path,
2292                    int flags,
2293                    uv_fs_cb cb) {
2294   INIT(COPYFILE);
2295 
2296   if (flags & ~(UV_FS_COPYFILE_EXCL |
2297                 UV_FS_COPYFILE_FICLONE |
2298                 UV_FS_COPYFILE_FICLONE_FORCE)) {
2299     return UV_EINVAL;
2300   }
2301 
2302   PATH2;
2303   req->flags = flags;
2304   POST;
2305 }
2306 
2307 
uv_fs_statfs(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2308 int uv_fs_statfs(uv_loop_t* loop,
2309                  uv_fs_t* req,
2310                  const char* path,
2311                  uv_fs_cb cb) {
2312   INIT(STATFS);
2313   PATH;
2314   POST;
2315 }
2316 
uv_fs_get_system_error(const uv_fs_t * req)2317 int uv_fs_get_system_error(const uv_fs_t* req) {
2318   return -req->result;
2319 }
2320