1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to
5 * deal in the Software without restriction, including without limitation the
6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7 * sell copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19 * IN THE SOFTWARE.
20 */
21
22 /* Caveat emptor: this file deviates from the libuv convention of returning
23 * negated errno codes. Most uv_fs_*() functions map directly to the system
24 * call of the same name. For more complex wrappers, it's easier to just
25 * return -1 with errno set. The dispatcher in uv__fs_work() takes care of
26 * getting the errno to the right place (req->result or as the return value.)
27 */
28
29 #include "uv.h"
30 #include "internal.h"
31
32 #include <errno.h>
33 #include <dlfcn.h>
34 #include <stdatomic.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <string.h>
38 #include <limits.h> /* PATH_MAX */
39
40 #include <sys/types.h>
41 #include <sys/socket.h>
42 #include <sys/stat.h>
43 #include <sys/time.h>
44 #include <sys/uio.h>
45 #include <unistd.h>
46 #include <fcntl.h>
47 #include <poll.h>
48
49 #if defined(__linux__)
50 # include <sys/sendfile.h>
51 #endif
52
53 #if defined(__sun)
54 # include <sys/sendfile.h>
55 # include <sys/sysmacros.h>
56 #endif
57
58 #if defined(__APPLE__)
59 # include <sys/sysctl.h>
60 #elif defined(__linux__) && !defined(FICLONE)
61 # include <sys/ioctl.h>
62 # define FICLONE _IOW(0x94, 9, int)
63 #endif
64
65 #if defined(_AIX) && !defined(_AIX71)
66 # include <utime.h>
67 #endif
68
69 #if defined(__APPLE__) || \
70 defined(__DragonFly__) || \
71 defined(__FreeBSD__) || \
72 defined(__OpenBSD__) || \
73 defined(__NetBSD__)
74 # include <sys/param.h>
75 # include <sys/mount.h>
76 #elif defined(__sun) || \
77 defined(__MVS__) || \
78 defined(__NetBSD__) || \
79 defined(__HAIKU__) || \
80 defined(__QNX__)
81 # include <sys/statvfs.h>
82 #else
83 # include <sys/statfs.h>
84 #endif
85
86 #if defined(_AIX) && _XOPEN_SOURCE <= 600
87 extern char *mkdtemp(char *template); /* See issue #740 on AIX < 7 */
88 #endif
89
90 #define INIT(subtype) \
91 do { \
92 if (req == NULL) \
93 return UV_EINVAL; \
94 UV_REQ_INIT(req, UV_FS); \
95 req->fs_type = UV_FS_ ## subtype; \
96 req->result = 0; \
97 req->ptr = NULL; \
98 req->loop = loop; \
99 req->path = NULL; \
100 req->new_path = NULL; \
101 req->bufs = NULL; \
102 req->cb = cb; \
103 } \
104 while (0)
105
106 #define PATH \
107 do { \
108 assert(path != NULL); \
109 if (cb == NULL) { \
110 req->path = path; \
111 } else { \
112 req->path = uv__strdup(path); \
113 if (req->path == NULL) \
114 return UV_ENOMEM; \
115 } \
116 } \
117 while (0)
118
119 #define PATH2 \
120 do { \
121 if (cb == NULL) { \
122 req->path = path; \
123 req->new_path = new_path; \
124 } else { \
125 size_t path_len; \
126 size_t new_path_len; \
127 path_len = strlen(path) + 1; \
128 new_path_len = strlen(new_path) + 1; \
129 req->path = uv__malloc(path_len + new_path_len); \
130 if (req->path == NULL) \
131 return UV_ENOMEM; \
132 req->new_path = req->path + path_len; \
133 memcpy((void*) req->path, path, path_len); \
134 memcpy((void*) req->new_path, new_path, new_path_len); \
135 } \
136 } \
137 while (0)
138
139 #define POST \
140 do { \
141 if (cb != NULL) { \
142 uv__req_register(loop, req); \
143 uv__work_submit(loop, \
144 &req->work_req, \
145 UV__WORK_FAST_IO, \
146 uv__fs_work, \
147 uv__fs_done); \
148 return 0; \
149 } \
150 else { \
151 uv__fs_work(&req->work_req); \
152 return req->result; \
153 } \
154 } \
155 while (0)
156
157
uv__fs_close(int fd)158 static int uv__fs_close(int fd) {
159 int rc;
160
161 rc = uv__close_nocancel(fd);
162 if (rc == -1)
163 if (errno == EINTR || errno == EINPROGRESS)
164 rc = 0; /* The close is in progress, not an error. */
165
166 return rc;
167 }
168
169
uv__fs_fsync(uv_fs_t * req)170 static ssize_t uv__fs_fsync(uv_fs_t* req) {
171 #if defined(__APPLE__)
172 /* Apple's fdatasync and fsync explicitly do NOT flush the drive write cache
173 * to the drive platters. This is in contrast to Linux's fdatasync and fsync
174 * which do, according to recent man pages. F_FULLFSYNC is Apple's equivalent
175 * for flushing buffered data to permanent storage. If F_FULLFSYNC is not
176 * supported by the file system we fall back to F_BARRIERFSYNC or fsync().
177 * This is the same approach taken by sqlite, except sqlite does not issue
178 * an F_BARRIERFSYNC call.
179 */
180 int r;
181
182 r = fcntl(req->file, F_FULLFSYNC);
183 if (r != 0)
184 r = fcntl(req->file, 85 /* F_BARRIERFSYNC */); /* fsync + barrier */
185 if (r != 0)
186 r = fsync(req->file);
187 return r;
188 #else
189 return fsync(req->file);
190 #endif
191 }
192
193
uv__fs_fdatasync(uv_fs_t * req)194 static ssize_t uv__fs_fdatasync(uv_fs_t* req) {
195 #if defined(__linux__) || defined(__sun) || defined(__NetBSD__)
196 return fdatasync(req->file);
197 #elif defined(__APPLE__)
198 /* See the comment in uv__fs_fsync. */
199 return uv__fs_fsync(req);
200 #else
201 return fsync(req->file);
202 #endif
203 }
204
205
UV_UNUSED(static struct timespec uv__fs_to_timespec (double time))206 UV_UNUSED(static struct timespec uv__fs_to_timespec(double time)) {
207 struct timespec ts;
208 ts.tv_sec = time;
209 ts.tv_nsec = (time - ts.tv_sec) * 1e9;
210
211 /* TODO(bnoordhuis) Remove this. utimesat() has nanosecond resolution but we
212 * stick to microsecond resolution for the sake of consistency with other
213 * platforms. I'm the original author of this compatibility hack but I'm
214 * less convinced it's useful nowadays.
215 */
216 ts.tv_nsec -= ts.tv_nsec % 1000;
217
218 if (ts.tv_nsec < 0) {
219 ts.tv_nsec += 1e9;
220 ts.tv_sec -= 1;
221 }
222 return ts;
223 }
224
UV_UNUSED(static struct timeval uv__fs_to_timeval (double time))225 UV_UNUSED(static struct timeval uv__fs_to_timeval(double time)) {
226 struct timeval tv;
227 tv.tv_sec = time;
228 tv.tv_usec = (time - tv.tv_sec) * 1e6;
229 if (tv.tv_usec < 0) {
230 tv.tv_usec += 1e6;
231 tv.tv_sec -= 1;
232 }
233 return tv;
234 }
235
uv__fs_futime(uv_fs_t * req)236 static ssize_t uv__fs_futime(uv_fs_t* req) {
237 #if defined(__linux__) \
238 || defined(_AIX71) \
239 || defined(__HAIKU__) \
240 || defined(__GNU__)
241 struct timespec ts[2];
242 ts[0] = uv__fs_to_timespec(req->atime);
243 ts[1] = uv__fs_to_timespec(req->mtime);
244 return futimens(req->file, ts);
245 #elif defined(__APPLE__) \
246 || defined(__DragonFly__) \
247 || defined(__FreeBSD__) \
248 || defined(__NetBSD__) \
249 || defined(__OpenBSD__) \
250 || defined(__sun)
251 struct timeval tv[2];
252 tv[0] = uv__fs_to_timeval(req->atime);
253 tv[1] = uv__fs_to_timeval(req->mtime);
254 # if defined(__sun)
255 return futimesat(req->file, NULL, tv);
256 # else
257 return futimes(req->file, tv);
258 # endif
259 #elif defined(__MVS__)
260 attrib_t atr;
261 memset(&atr, 0, sizeof(atr));
262 atr.att_mtimechg = 1;
263 atr.att_atimechg = 1;
264 atr.att_mtime = req->mtime;
265 atr.att_atime = req->atime;
266 return __fchattr(req->file, &atr, sizeof(atr));
267 #else
268 errno = ENOSYS;
269 return -1;
270 #endif
271 }
272
273
uv__fs_mkdtemp(uv_fs_t * req)274 static ssize_t uv__fs_mkdtemp(uv_fs_t* req) {
275 return mkdtemp((char*) req->path) ? 0 : -1;
276 }
277
278
279 static int (*uv__mkostemp)(char*, int);
280
281
uv__mkostemp_initonce(void)282 static void uv__mkostemp_initonce(void) {
283 /* z/os doesn't have RTLD_DEFAULT but that's okay
284 * because it doesn't have mkostemp(O_CLOEXEC) either.
285 */
286 #ifdef RTLD_DEFAULT
287 uv__mkostemp = (int (*)(char*, int)) dlsym(RTLD_DEFAULT, "mkostemp");
288
289 /* We don't care about errors, but we do want to clean them up.
290 * If there has been no error, then dlerror() will just return
291 * NULL.
292 */
293 dlerror();
294 #endif /* RTLD_DEFAULT */
295 }
296
297
uv__fs_mkstemp(uv_fs_t * req)298 static int uv__fs_mkstemp(uv_fs_t* req) {
299 static uv_once_t once = UV_ONCE_INIT;
300 int r;
301 #ifdef O_CLOEXEC
302 static _Atomic int no_cloexec_support;
303 #endif
304 static const char pattern[] = "XXXXXX";
305 static const size_t pattern_size = sizeof(pattern) - 1;
306 char* path;
307 size_t path_length;
308
309 path = (char*) req->path;
310 path_length = strlen(path);
311
312 /* EINVAL can be returned for 2 reasons:
313 1. The template's last 6 characters were not XXXXXX
314 2. open() didn't support O_CLOEXEC
315 We want to avoid going to the fallback path in case
316 of 1, so it's manually checked before. */
317 if (path_length < pattern_size ||
318 strcmp(path + path_length - pattern_size, pattern)) {
319 errno = EINVAL;
320 r = -1;
321 goto clobber;
322 }
323
324 uv_once(&once, uv__mkostemp_initonce);
325
326 #ifdef O_CLOEXEC
327 if (atomic_load_explicit(&no_cloexec_support, memory_order_relaxed) == 0 &&
328 uv__mkostemp != NULL) {
329 r = uv__mkostemp(path, O_CLOEXEC);
330
331 if (r >= 0)
332 return r;
333
334 /* If mkostemp() returns EINVAL, it means the kernel doesn't
335 support O_CLOEXEC, so we just fallback to mkstemp() below. */
336 if (errno != EINVAL)
337 goto clobber;
338
339 /* We set the static variable so that next calls don't even
340 try to use mkostemp. */
341 atomic_store_explicit(&no_cloexec_support, 1, memory_order_relaxed);
342 }
343 #endif /* O_CLOEXEC */
344
345 if (req->cb != NULL)
346 uv_rwlock_rdlock(&req->loop->cloexec_lock);
347
348 r = mkstemp(path);
349
350 /* In case of failure `uv__cloexec` will leave error in `errno`,
351 * so it is enough to just set `r` to `-1`.
352 */
353 if (r >= 0 && uv__cloexec(r, 1) != 0) {
354 r = uv__close(r);
355 if (r != 0)
356 abort();
357 r = -1;
358 }
359
360 if (req->cb != NULL)
361 uv_rwlock_rdunlock(&req->loop->cloexec_lock);
362
363 clobber:
364 if (r < 0)
365 path[0] = '\0';
366 return r;
367 }
368
369
uv__fs_open(uv_fs_t * req)370 static ssize_t uv__fs_open(uv_fs_t* req) {
371 #ifdef O_CLOEXEC
372 return open(req->path, req->flags | O_CLOEXEC, req->mode);
373 #else /* O_CLOEXEC */
374 int r;
375
376 if (req->cb != NULL)
377 uv_rwlock_rdlock(&req->loop->cloexec_lock);
378
379 r = open(req->path, req->flags, req->mode);
380
381 /* In case of failure `uv__cloexec` will leave error in `errno`,
382 * so it is enough to just set `r` to `-1`.
383 */
384 if (r >= 0 && uv__cloexec(r, 1) != 0) {
385 r = uv__close(r);
386 if (r != 0)
387 abort();
388 r = -1;
389 }
390
391 if (req->cb != NULL)
392 uv_rwlock_rdunlock(&req->loop->cloexec_lock);
393
394 return r;
395 #endif /* O_CLOEXEC */
396 }
397
398
uv__preadv_or_pwritev_emul(int fd,const struct iovec * bufs,size_t nbufs,off_t off,int is_pread)399 static ssize_t uv__preadv_or_pwritev_emul(int fd,
400 const struct iovec* bufs,
401 size_t nbufs,
402 off_t off,
403 int is_pread) {
404 ssize_t total;
405 ssize_t r;
406 size_t i;
407 size_t n;
408 void* p;
409
410 total = 0;
411 for (i = 0; i < (size_t) nbufs; i++) {
412 p = bufs[i].iov_base;
413 n = bufs[i].iov_len;
414
415 do
416 if (is_pread)
417 r = pread(fd, p, n, off);
418 else
419 r = pwrite(fd, p, n, off);
420 while (r == -1 && errno == EINTR);
421
422 if (r == -1) {
423 if (total > 0)
424 return total;
425 return -1;
426 }
427
428 off += r;
429 total += r;
430
431 if ((size_t) r < n)
432 return total;
433 }
434
435 return total;
436 }
437
438
439 #ifdef __linux__
440 typedef int uv__iovcnt;
441 #else
442 typedef size_t uv__iovcnt;
443 #endif
444
445
uv__preadv_emul(int fd,const struct iovec * bufs,uv__iovcnt nbufs,off_t off)446 static ssize_t uv__preadv_emul(int fd,
447 const struct iovec* bufs,
448 uv__iovcnt nbufs,
449 off_t off) {
450 return uv__preadv_or_pwritev_emul(fd, bufs, nbufs, off, /*is_pread*/1);
451 }
452
453
uv__pwritev_emul(int fd,const struct iovec * bufs,uv__iovcnt nbufs,off_t off)454 static ssize_t uv__pwritev_emul(int fd,
455 const struct iovec* bufs,
456 uv__iovcnt nbufs,
457 off_t off) {
458 return uv__preadv_or_pwritev_emul(fd, bufs, nbufs, off, /*is_pread*/0);
459 }
460
461
462 /* The function pointer cache is an uintptr_t because _Atomic void*
463 * doesn't work on macos/ios/etc...
464 */
uv__preadv_or_pwritev(int fd,const struct iovec * bufs,size_t nbufs,off_t off,_Atomic uintptr_t * cache,int is_pread)465 static ssize_t uv__preadv_or_pwritev(int fd,
466 const struct iovec* bufs,
467 size_t nbufs,
468 off_t off,
469 _Atomic uintptr_t* cache,
470 int is_pread) {
471 ssize_t (*f)(int, const struct iovec*, uv__iovcnt, off_t);
472 void* p;
473
474 p = (void*) atomic_load_explicit(cache, memory_order_relaxed);
475 if (p == NULL) {
476 #ifdef RTLD_DEFAULT
477 p = dlsym(RTLD_DEFAULT, is_pread ? "preadv" : "pwritev");
478 dlerror(); /* Clear errors. */
479 #endif /* RTLD_DEFAULT */
480 if (p == NULL)
481 p = is_pread ? uv__preadv_emul : uv__pwritev_emul;
482 atomic_store_explicit(cache, (uintptr_t) p, memory_order_relaxed);
483 }
484
485 f = p;
486 return f(fd, bufs, nbufs, off);
487 }
488
489
uv__preadv(int fd,const struct iovec * bufs,size_t nbufs,off_t off)490 static ssize_t uv__preadv(int fd,
491 const struct iovec* bufs,
492 size_t nbufs,
493 off_t off) {
494 static _Atomic uintptr_t cache;
495 return uv__preadv_or_pwritev(fd, bufs, nbufs, off, &cache, /*is_pread*/1);
496 }
497
498
uv__pwritev(int fd,const struct iovec * bufs,size_t nbufs,off_t off)499 static ssize_t uv__pwritev(int fd,
500 const struct iovec* bufs,
501 size_t nbufs,
502 off_t off) {
503 static _Atomic uintptr_t cache;
504 return uv__preadv_or_pwritev(fd, bufs, nbufs, off, &cache, /*is_pread*/0);
505 }
506
507
uv__fs_read(uv_fs_t * req)508 static ssize_t uv__fs_read(uv_fs_t* req) {
509 const struct iovec* bufs;
510 unsigned int iovmax;
511 size_t nbufs;
512 ssize_t r;
513 off_t off;
514 int fd;
515
516 fd = req->file;
517 off = req->off;
518 bufs = (const struct iovec*) req->bufs;
519 nbufs = req->nbufs;
520
521 iovmax = uv__getiovmax();
522 if (nbufs > iovmax)
523 nbufs = iovmax;
524
525 r = 0;
526 if (off < 0) {
527 if (nbufs == 1)
528 r = read(fd, bufs->iov_base, bufs->iov_len);
529 else if (nbufs > 1)
530 r = readv(fd, bufs, nbufs);
531 } else {
532 if (nbufs == 1)
533 r = pread(fd, bufs->iov_base, bufs->iov_len, off);
534 else if (nbufs > 1)
535 r = uv__preadv(fd, bufs, nbufs, off);
536 }
537
538 #ifdef __PASE__
539 /* PASE returns EOPNOTSUPP when reading a directory, convert to EISDIR */
540 if (r == -1 && errno == EOPNOTSUPP) {
541 struct stat buf;
542 ssize_t rc;
543 rc = uv__fstat(fd, &buf);
544 if (rc == 0 && S_ISDIR(buf.st_mode)) {
545 errno = EISDIR;
546 }
547 }
548 #endif
549
550 /* We don't own the buffer list in the synchronous case. */
551 if (req->cb != NULL)
552 if (req->bufs != req->bufsml)
553 uv__free(req->bufs);
554
555 req->bufs = NULL;
556 req->nbufs = 0;
557
558 return r;
559 }
560
561
uv__fs_scandir_filter(const uv__dirent_t * dent)562 static int uv__fs_scandir_filter(const uv__dirent_t* dent) {
563 return strcmp(dent->d_name, ".") != 0 && strcmp(dent->d_name, "..") != 0;
564 }
565
566
uv__fs_scandir_sort(const uv__dirent_t ** a,const uv__dirent_t ** b)567 static int uv__fs_scandir_sort(const uv__dirent_t** a, const uv__dirent_t** b) {
568 return strcmp((*a)->d_name, (*b)->d_name);
569 }
570
571
uv__fs_scandir(uv_fs_t * req)572 static ssize_t uv__fs_scandir(uv_fs_t* req) {
573 uv__dirent_t** dents;
574 int n;
575
576 dents = NULL;
577 n = scandir(req->path, &dents, uv__fs_scandir_filter, uv__fs_scandir_sort);
578
579 /* NOTE: We will use nbufs as an index field */
580 req->nbufs = 0;
581
582 if (n == 0) {
583 /* OS X still needs to deallocate some memory.
584 * Memory was allocated using the system allocator, so use free() here.
585 */
586 free(dents);
587 dents = NULL;
588 } else if (n == -1) {
589 return n;
590 }
591
592 req->ptr = dents;
593
594 return n;
595 }
596
uv__fs_opendir(uv_fs_t * req)597 static int uv__fs_opendir(uv_fs_t* req) {
598 uv_dir_t* dir;
599
600 dir = uv__malloc(sizeof(*dir));
601 if (dir == NULL)
602 goto error;
603
604 dir->dir = opendir(req->path);
605 if (dir->dir == NULL)
606 goto error;
607
608 req->ptr = dir;
609 return 0;
610
611 error:
612 uv__free(dir);
613 req->ptr = NULL;
614 return -1;
615 }
616
uv__fs_readdir(uv_fs_t * req)617 static int uv__fs_readdir(uv_fs_t* req) {
618 uv_dir_t* dir;
619 uv_dirent_t* dirent;
620 struct dirent* res;
621 unsigned int dirent_idx;
622 unsigned int i;
623
624 dir = req->ptr;
625 dirent_idx = 0;
626
627 while (dirent_idx < dir->nentries) {
628 /* readdir() returns NULL on end of directory, as well as on error. errno
629 is used to differentiate between the two conditions. */
630 errno = 0;
631 res = readdir(dir->dir);
632
633 if (res == NULL) {
634 if (errno != 0)
635 goto error;
636 break;
637 }
638
639 if (strcmp(res->d_name, ".") == 0 || strcmp(res->d_name, "..") == 0)
640 continue;
641
642 dirent = &dir->dirents[dirent_idx];
643 dirent->name = uv__strdup(res->d_name);
644
645 if (dirent->name == NULL)
646 goto error;
647
648 dirent->type = uv__fs_get_dirent_type(res);
649 ++dirent_idx;
650 }
651
652 return dirent_idx;
653
654 error:
655 for (i = 0; i < dirent_idx; ++i) {
656 uv__free((char*) dir->dirents[i].name);
657 dir->dirents[i].name = NULL;
658 }
659
660 return -1;
661 }
662
uv__fs_closedir(uv_fs_t * req)663 static int uv__fs_closedir(uv_fs_t* req) {
664 uv_dir_t* dir;
665
666 dir = req->ptr;
667
668 if (dir->dir != NULL) {
669 closedir(dir->dir);
670 dir->dir = NULL;
671 }
672
673 uv__free(req->ptr);
674 req->ptr = NULL;
675 return 0;
676 }
677
uv__fs_statfs(uv_fs_t * req)678 static int uv__fs_statfs(uv_fs_t* req) {
679 uv_statfs_t* stat_fs;
680 #if defined(__sun) || \
681 defined(__MVS__) || \
682 defined(__NetBSD__) || \
683 defined(__HAIKU__) || \
684 defined(__QNX__)
685 struct statvfs buf;
686
687 if (0 != statvfs(req->path, &buf))
688 #else
689 struct statfs buf;
690
691 if (0 != statfs(req->path, &buf))
692 #endif /* defined(__sun) */
693 return -1;
694
695 stat_fs = uv__malloc(sizeof(*stat_fs));
696 if (stat_fs == NULL) {
697 errno = ENOMEM;
698 return -1;
699 }
700
701 #if defined(__sun) || \
702 defined(__MVS__) || \
703 defined(__OpenBSD__) || \
704 defined(__NetBSD__) || \
705 defined(__HAIKU__) || \
706 defined(__QNX__)
707 stat_fs->f_type = 0; /* f_type is not supported. */
708 #else
709 stat_fs->f_type = buf.f_type;
710 #endif
711 stat_fs->f_bsize = buf.f_bsize;
712 stat_fs->f_blocks = buf.f_blocks;
713 stat_fs->f_bfree = buf.f_bfree;
714 stat_fs->f_bavail = buf.f_bavail;
715 stat_fs->f_files = buf.f_files;
716 stat_fs->f_ffree = buf.f_ffree;
717 req->ptr = stat_fs;
718 return 0;
719 }
720
uv__fs_pathmax_size(const char * path)721 static ssize_t uv__fs_pathmax_size(const char* path) {
722 ssize_t pathmax;
723
724 pathmax = pathconf(path, _PC_PATH_MAX);
725
726 if (pathmax == -1)
727 pathmax = UV__PATH_MAX;
728
729 return pathmax;
730 }
731
uv__fs_readlink(uv_fs_t * req)732 static ssize_t uv__fs_readlink(uv_fs_t* req) {
733 ssize_t maxlen;
734 ssize_t len;
735 char* buf;
736
737 #if defined(_POSIX_PATH_MAX) || defined(PATH_MAX)
738 maxlen = uv__fs_pathmax_size(req->path);
739 #else
740 /* We may not have a real PATH_MAX. Read size of link. */
741 struct stat st;
742 int ret;
743 ret = uv__lstat(req->path, &st);
744 if (ret != 0)
745 return -1;
746 if (!S_ISLNK(st.st_mode)) {
747 errno = EINVAL;
748 return -1;
749 }
750
751 maxlen = st.st_size;
752
753 /* According to readlink(2) lstat can report st_size == 0
754 for some symlinks, such as those in /proc or /sys. */
755 if (maxlen == 0)
756 maxlen = uv__fs_pathmax_size(req->path);
757 #endif
758
759 buf = uv__malloc(maxlen);
760
761 if (buf == NULL) {
762 errno = ENOMEM;
763 return -1;
764 }
765
766 #if defined(__MVS__)
767 len = os390_readlink(req->path, buf, maxlen);
768 #else
769 len = readlink(req->path, buf, maxlen);
770 #endif
771
772 if (len == -1) {
773 uv__free(buf);
774 return -1;
775 }
776
777 /* Uncommon case: resize to make room for the trailing nul byte. */
778 if (len == maxlen) {
779 buf = uv__reallocf(buf, len + 1);
780
781 if (buf == NULL)
782 return -1;
783 }
784
785 buf[len] = '\0';
786 req->ptr = buf;
787
788 return 0;
789 }
790
uv__fs_realpath(uv_fs_t * req)791 static ssize_t uv__fs_realpath(uv_fs_t* req) {
792 char* buf;
793 char* tmp;
794
795 #if defined(_POSIX_VERSION) && _POSIX_VERSION >= 200809L
796 tmp = realpath(req->path, NULL);
797 if (tmp == NULL)
798 return -1;
799 buf = uv__strdup(tmp);
800 free(tmp); /* _Not_ uv__free. */
801 if (buf == NULL) {
802 errno = ENOMEM;
803 return -1;
804 }
805 #else
806 ssize_t len;
807
808 (void)tmp;
809
810 len = uv__fs_pathmax_size(req->path);
811 buf = uv__malloc(len + 1);
812
813 if (buf == NULL) {
814 errno = ENOMEM;
815 return -1;
816 }
817
818 if (realpath(req->path, buf) == NULL) {
819 uv__free(buf);
820 return -1;
821 }
822 #endif
823
824 req->ptr = buf;
825
826 return 0;
827 }
828
uv__fs_sendfile_emul(uv_fs_t * req)829 static ssize_t uv__fs_sendfile_emul(uv_fs_t* req) {
830 struct pollfd pfd;
831 int use_pread;
832 off_t offset;
833 ssize_t nsent;
834 ssize_t nread;
835 ssize_t nwritten;
836 size_t buflen;
837 size_t len;
838 ssize_t n;
839 int in_fd;
840 int out_fd;
841 char buf[8192];
842
843 len = req->bufsml[0].len;
844 in_fd = req->flags;
845 out_fd = req->file;
846 offset = req->off;
847 use_pread = 1;
848
849 /* Here are the rules regarding errors:
850 *
851 * 1. Read errors are reported only if nsent==0, otherwise we return nsent.
852 * The user needs to know that some data has already been sent, to stop
853 * them from sending it twice.
854 *
855 * 2. Write errors are always reported. Write errors are bad because they
856 * mean data loss: we've read data but now we can't write it out.
857 *
858 * We try to use pread() and fall back to regular read() if the source fd
859 * doesn't support positional reads, for example when it's a pipe fd.
860 *
861 * If we get EAGAIN when writing to the target fd, we poll() on it until
862 * it becomes writable again.
863 *
864 * FIXME: If we get a write error when use_pread==1, it should be safe to
865 * return the number of sent bytes instead of an error because pread()
866 * is, in theory, idempotent. However, special files in /dev or /proc
867 * may support pread() but not necessarily return the same data on
868 * successive reads.
869 *
870 * FIXME: There is no way now to signal that we managed to send *some* data
871 * before a write error.
872 */
873 for (nsent = 0; (size_t) nsent < len; ) {
874 buflen = len - nsent;
875
876 if (buflen > sizeof(buf))
877 buflen = sizeof(buf);
878
879 do
880 if (use_pread)
881 nread = pread(in_fd, buf, buflen, offset);
882 else
883 nread = read(in_fd, buf, buflen);
884 while (nread == -1 && errno == EINTR);
885
886 if (nread == 0)
887 goto out;
888
889 if (nread == -1) {
890 if (use_pread && nsent == 0 && (errno == EIO || errno == ESPIPE)) {
891 use_pread = 0;
892 continue;
893 }
894
895 if (nsent == 0)
896 nsent = -1;
897
898 goto out;
899 }
900
901 for (nwritten = 0; nwritten < nread; ) {
902 do
903 n = write(out_fd, buf + nwritten, nread - nwritten);
904 while (n == -1 && errno == EINTR);
905
906 if (n != -1) {
907 nwritten += n;
908 continue;
909 }
910
911 if (errno != EAGAIN && errno != EWOULDBLOCK) {
912 nsent = -1;
913 goto out;
914 }
915
916 pfd.fd = out_fd;
917 pfd.events = POLLOUT;
918 pfd.revents = 0;
919
920 do
921 n = poll(&pfd, 1, -1);
922 while (n == -1 && errno == EINTR);
923
924 if (n == -1 || (pfd.revents & ~POLLOUT) != 0) {
925 errno = EIO;
926 nsent = -1;
927 goto out;
928 }
929 }
930
931 offset += nread;
932 nsent += nread;
933 }
934
935 out:
936 if (nsent != -1)
937 req->off = offset;
938
939 return nsent;
940 }
941
942
943 #ifdef __linux__
944 /* Pre-4.20 kernels have a bug where CephFS uses the RADOS copy-from command
945 * in copy_file_range() when it shouldn't. There is no workaround except to
946 * fall back to a regular copy.
947 */
uv__is_buggy_cephfs(int fd)948 static int uv__is_buggy_cephfs(int fd) {
949 struct statfs s;
950
951 if (-1 == fstatfs(fd, &s))
952 return 0;
953
954 if (s.f_type != /* CephFS */ 0xC36400)
955 return 0;
956
957 return uv__kernel_version() < /* 4.20.0 */ 0x041400;
958 }
959
960
uv__is_cifs_or_smb(int fd)961 static int uv__is_cifs_or_smb(int fd) {
962 struct statfs s;
963
964 if (-1 == fstatfs(fd, &s))
965 return 0;
966
967 switch ((unsigned) s.f_type) {
968 case 0x0000517Bu: /* SMB */
969 case 0xFE534D42u: /* SMB2 */
970 case 0xFF534D42u: /* CIFS */
971 return 1;
972 }
973
974 return 0;
975 }
976
977
uv__fs_try_copy_file_range(int in_fd,off_t * off,int out_fd,size_t len)978 static ssize_t uv__fs_try_copy_file_range(int in_fd, off_t* off,
979 int out_fd, size_t len) {
980 static _Atomic int no_copy_file_range_support;
981 ssize_t r;
982
983 if (atomic_load_explicit(&no_copy_file_range_support, memory_order_relaxed)) {
984 errno = ENOSYS;
985 return -1;
986 }
987
988 r = uv__fs_copy_file_range(in_fd, off, out_fd, NULL, len, 0);
989
990 if (r != -1)
991 return r;
992
993 switch (errno) {
994 case EACCES:
995 /* Pre-4.20 kernels have a bug where CephFS uses the RADOS
996 * copy-from command when it shouldn't.
997 */
998 if (uv__is_buggy_cephfs(in_fd))
999 errno = ENOSYS; /* Use fallback. */
1000 break;
1001 case ENOSYS:
1002 atomic_store_explicit(&no_copy_file_range_support, 1, memory_order_relaxed);
1003 break;
1004 case EPERM:
1005 /* It's been reported that CIFS spuriously fails.
1006 * Consider it a transient error.
1007 */
1008 if (uv__is_cifs_or_smb(out_fd))
1009 errno = ENOSYS; /* Use fallback. */
1010 break;
1011 case ENOTSUP:
1012 case EXDEV:
1013 /* ENOTSUP - it could work on another file system type.
1014 * EXDEV - it will not work when in_fd and out_fd are not on the same
1015 * mounted filesystem (pre Linux 5.3)
1016 */
1017 errno = ENOSYS; /* Use fallback. */
1018 break;
1019 }
1020
1021 return -1;
1022 }
1023
1024 #endif /* __linux__ */
1025
1026
uv__fs_sendfile(uv_fs_t * req)1027 static ssize_t uv__fs_sendfile(uv_fs_t* req) {
1028 int in_fd;
1029 int out_fd;
1030
1031 in_fd = req->flags;
1032 out_fd = req->file;
1033
1034 #if defined(__linux__) || defined(__sun)
1035 {
1036 off_t off;
1037 ssize_t r;
1038 size_t len;
1039 int try_sendfile;
1040
1041 off = req->off;
1042 len = req->bufsml[0].len;
1043 try_sendfile = 1;
1044
1045 #ifdef __linux__
1046 r = uv__fs_try_copy_file_range(in_fd, &off, out_fd, len);
1047 try_sendfile = (r == -1 && errno == ENOSYS);
1048 #endif
1049
1050 if (try_sendfile)
1051 r = sendfile(out_fd, in_fd, &off, len);
1052
1053 /* sendfile() on SunOS returns EINVAL if the target fd is not a socket but
1054 * it still writes out data. Fortunately, we can detect it by checking if
1055 * the offset has been updated.
1056 */
1057 if (r != -1 || off > req->off) {
1058 r = off - req->off;
1059 req->off = off;
1060 return r;
1061 }
1062
1063 if (errno == EINVAL ||
1064 errno == EIO ||
1065 errno == ENOTSOCK ||
1066 errno == EXDEV) {
1067 errno = 0;
1068 return uv__fs_sendfile_emul(req);
1069 }
1070
1071 return -1;
1072 }
1073 #elif defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__)
1074 {
1075 off_t len;
1076 ssize_t r;
1077
1078 /* sendfile() on FreeBSD and Darwin returns EAGAIN if the target fd is in
1079 * non-blocking mode and not all data could be written. If a non-zero
1080 * number of bytes have been sent, we don't consider it an error.
1081 */
1082
1083 #if defined(__FreeBSD__) || defined(__DragonFly__)
1084 #if defined(__FreeBSD__)
1085 off_t off;
1086
1087 off = req->off;
1088 r = uv__fs_copy_file_range(in_fd, &off, out_fd, NULL, req->bufsml[0].len, 0);
1089 if (r >= 0) {
1090 r = off - req->off;
1091 req->off = off;
1092 return r;
1093 }
1094 #endif
1095 len = 0;
1096 r = sendfile(in_fd, out_fd, req->off, req->bufsml[0].len, NULL, &len, 0);
1097 #else
1098 /* The darwin sendfile takes len as an input for the length to send,
1099 * so make sure to initialize it with the caller's value. */
1100 len = req->bufsml[0].len;
1101 r = sendfile(in_fd, out_fd, req->off, &len, NULL, 0);
1102 #endif
1103
1104 /*
1105 * The man page for sendfile(2) on DragonFly states that `len` contains
1106 * a meaningful value ONLY in case of EAGAIN and EINTR.
1107 * Nothing is said about it's value in case of other errors, so better
1108 * not depend on the potential wrong assumption that is was not modified
1109 * by the syscall.
1110 */
1111 if (r == 0 || ((errno == EAGAIN || errno == EINTR) && len != 0)) {
1112 req->off += len;
1113 return (ssize_t) len;
1114 }
1115
1116 if (errno == EINVAL ||
1117 errno == EIO ||
1118 errno == ENOTSOCK ||
1119 errno == EXDEV) {
1120 errno = 0;
1121 return uv__fs_sendfile_emul(req);
1122 }
1123
1124 return -1;
1125 }
1126 #else
1127 /* Squelch compiler warnings. */
1128 (void) &in_fd;
1129 (void) &out_fd;
1130
1131 return uv__fs_sendfile_emul(req);
1132 #endif
1133 }
1134
1135
uv__fs_utime(uv_fs_t * req)1136 static ssize_t uv__fs_utime(uv_fs_t* req) {
1137 #if defined(__linux__) \
1138 || defined(_AIX71) \
1139 || defined(__sun) \
1140 || defined(__HAIKU__)
1141 struct timespec ts[2];
1142 ts[0] = uv__fs_to_timespec(req->atime);
1143 ts[1] = uv__fs_to_timespec(req->mtime);
1144 return utimensat(AT_FDCWD, req->path, ts, 0);
1145 #elif defined(__APPLE__) \
1146 || defined(__DragonFly__) \
1147 || defined(__FreeBSD__) \
1148 || defined(__NetBSD__) \
1149 || defined(__OpenBSD__)
1150 struct timeval tv[2];
1151 tv[0] = uv__fs_to_timeval(req->atime);
1152 tv[1] = uv__fs_to_timeval(req->mtime);
1153 return utimes(req->path, tv);
1154 #elif defined(_AIX) \
1155 && !defined(_AIX71)
1156 struct utimbuf buf;
1157 buf.actime = req->atime;
1158 buf.modtime = req->mtime;
1159 return utime(req->path, &buf);
1160 #elif defined(__MVS__)
1161 attrib_t atr;
1162 memset(&atr, 0, sizeof(atr));
1163 atr.att_mtimechg = 1;
1164 atr.att_atimechg = 1;
1165 atr.att_mtime = req->mtime;
1166 atr.att_atime = req->atime;
1167 return __lchattr((char*) req->path, &atr, sizeof(atr));
1168 #else
1169 errno = ENOSYS;
1170 return -1;
1171 #endif
1172 }
1173
1174
uv__fs_lutime(uv_fs_t * req)1175 static ssize_t uv__fs_lutime(uv_fs_t* req) {
1176 #if defined(__linux__) || \
1177 defined(_AIX71) || \
1178 defined(__sun) || \
1179 defined(__HAIKU__) || \
1180 defined(__GNU__) || \
1181 defined(__OpenBSD__)
1182 struct timespec ts[2];
1183 ts[0] = uv__fs_to_timespec(req->atime);
1184 ts[1] = uv__fs_to_timespec(req->mtime);
1185 return utimensat(AT_FDCWD, req->path, ts, AT_SYMLINK_NOFOLLOW);
1186 #elif defined(__APPLE__) || \
1187 defined(__DragonFly__) || \
1188 defined(__FreeBSD__) || \
1189 defined(__NetBSD__)
1190 struct timeval tv[2];
1191 tv[0] = uv__fs_to_timeval(req->atime);
1192 tv[1] = uv__fs_to_timeval(req->mtime);
1193 return lutimes(req->path, tv);
1194 #else
1195 errno = ENOSYS;
1196 return -1;
1197 #endif
1198 }
1199
1200
uv__fs_write(uv_fs_t * req)1201 static ssize_t uv__fs_write(uv_fs_t* req) {
1202 const struct iovec* bufs;
1203 size_t nbufs;
1204 ssize_t r;
1205 off_t off;
1206 int fd;
1207
1208 fd = req->file;
1209 off = req->off;
1210 bufs = (const struct iovec*) req->bufs;
1211 nbufs = req->nbufs;
1212
1213 r = 0;
1214 if (off < 0) {
1215 if (nbufs == 1)
1216 r = write(fd, bufs->iov_base, bufs->iov_len);
1217 else if (nbufs > 1)
1218 r = writev(fd, bufs, nbufs);
1219 } else {
1220 if (nbufs == 1)
1221 r = pwrite(fd, bufs->iov_base, bufs->iov_len, off);
1222 else if (nbufs > 1)
1223 r = uv__pwritev(fd, bufs, nbufs, off);
1224 }
1225
1226 return r;
1227 }
1228
1229
uv__fs_copyfile(uv_fs_t * req)1230 static ssize_t uv__fs_copyfile(uv_fs_t* req) {
1231 uv_fs_t fs_req;
1232 uv_file srcfd;
1233 uv_file dstfd;
1234 struct stat src_statsbuf;
1235 struct stat dst_statsbuf;
1236 int dst_flags;
1237 int result;
1238 int err;
1239 off_t bytes_to_send;
1240 off_t in_offset;
1241 off_t bytes_written;
1242 size_t bytes_chunk;
1243
1244 dstfd = -1;
1245 err = 0;
1246
1247 /* Open the source file. */
1248 srcfd = uv_fs_open(NULL, &fs_req, req->path, O_RDONLY, 0, NULL);
1249 uv_fs_req_cleanup(&fs_req);
1250
1251 if (srcfd < 0)
1252 return srcfd;
1253
1254 /* Get the source file's mode. */
1255 if (uv__fstat(srcfd, &src_statsbuf)) {
1256 err = UV__ERR(errno);
1257 goto out;
1258 }
1259
1260 dst_flags = O_WRONLY | O_CREAT;
1261
1262 if (req->flags & UV_FS_COPYFILE_EXCL)
1263 dst_flags |= O_EXCL;
1264
1265 /* Open the destination file. */
1266 dstfd = uv_fs_open(NULL,
1267 &fs_req,
1268 req->new_path,
1269 dst_flags,
1270 src_statsbuf.st_mode,
1271 NULL);
1272 uv_fs_req_cleanup(&fs_req);
1273
1274 if (dstfd < 0) {
1275 err = dstfd;
1276 goto out;
1277 }
1278
1279 /* If the file is not being opened exclusively, verify that the source and
1280 destination are not the same file. If they are the same, bail out early. */
1281 if ((req->flags & UV_FS_COPYFILE_EXCL) == 0) {
1282 /* Get the destination file's mode. */
1283 if (uv__fstat(dstfd, &dst_statsbuf)) {
1284 err = UV__ERR(errno);
1285 goto out;
1286 }
1287
1288 /* Check if srcfd and dstfd refer to the same file */
1289 if (src_statsbuf.st_dev == dst_statsbuf.st_dev &&
1290 src_statsbuf.st_ino == dst_statsbuf.st_ino) {
1291 goto out;
1292 }
1293
1294 /* Truncate the file in case the destination already existed. */
1295 if (ftruncate(dstfd, 0) != 0) {
1296 err = UV__ERR(errno);
1297
1298 /* ftruncate() on ceph-fuse fails with EACCES when the file is created
1299 * with read only permissions. Since ftruncate() on a newly created
1300 * file is a meaningless operation anyway, detect that condition
1301 * and squelch the error.
1302 */
1303 if (err != UV_EACCES)
1304 goto out;
1305
1306 if (dst_statsbuf.st_size > 0)
1307 goto out;
1308
1309 err = 0;
1310 }
1311 }
1312
1313 if (fchmod(dstfd, src_statsbuf.st_mode) == -1) {
1314 err = UV__ERR(errno);
1315 #ifdef __linux__
1316 /* fchmod() on CIFS shares always fails with EPERM unless the share is
1317 * mounted with "noperm". As fchmod() is a meaningless operation on such
1318 * shares anyway, detect that condition and squelch the error.
1319 */
1320 if (err != UV_EPERM)
1321 goto out;
1322
1323 if (!uv__is_cifs_or_smb(dstfd))
1324 goto out;
1325
1326 err = 0;
1327 #else /* !__linux__ */
1328 goto out;
1329 #endif /* !__linux__ */
1330 }
1331
1332 #ifdef FICLONE
1333 if (req->flags & UV_FS_COPYFILE_FICLONE ||
1334 req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
1335 if (ioctl(dstfd, FICLONE, srcfd) == 0) {
1336 /* ioctl() with FICLONE succeeded. */
1337 goto out;
1338 }
1339 /* If an error occurred and force was set, return the error to the caller;
1340 * fall back to sendfile() when force was not set. */
1341 if (req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
1342 err = UV__ERR(errno);
1343 goto out;
1344 }
1345 }
1346 #else
1347 if (req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
1348 err = UV_ENOSYS;
1349 goto out;
1350 }
1351 #endif
1352
1353 bytes_to_send = src_statsbuf.st_size;
1354 in_offset = 0;
1355 while (bytes_to_send != 0) {
1356 bytes_chunk = SSIZE_MAX;
1357 if (bytes_to_send < (off_t) bytes_chunk)
1358 bytes_chunk = bytes_to_send;
1359 uv_fs_sendfile(NULL, &fs_req, dstfd, srcfd, in_offset, bytes_chunk, NULL);
1360 bytes_written = fs_req.result;
1361 uv_fs_req_cleanup(&fs_req);
1362
1363 if (bytes_written < 0) {
1364 err = bytes_written;
1365 break;
1366 }
1367
1368 bytes_to_send -= bytes_written;
1369 in_offset += bytes_written;
1370 }
1371
1372 out:
1373 if (err < 0)
1374 result = err;
1375 else
1376 result = 0;
1377
1378 /* Close the source file. */
1379 err = uv__close_nocheckstdio(srcfd);
1380
1381 /* Don't overwrite any existing errors. */
1382 if (err != 0 && result == 0)
1383 result = err;
1384
1385 /* Close the destination file if it is open. */
1386 if (dstfd >= 0) {
1387 err = uv__close_nocheckstdio(dstfd);
1388
1389 /* Don't overwrite any existing errors. */
1390 if (err != 0 && result == 0)
1391 result = err;
1392
1393 /* Remove the destination file if something went wrong. */
1394 if (result != 0) {
1395 uv_fs_unlink(NULL, &fs_req, req->new_path, NULL);
1396 /* Ignore the unlink return value, as an error already happened. */
1397 uv_fs_req_cleanup(&fs_req);
1398 }
1399 }
1400
1401 if (result == 0)
1402 return 0;
1403
1404 errno = UV__ERR(result);
1405 return -1;
1406 }
1407
uv__to_stat(struct stat * src,uv_stat_t * dst)1408 static void uv__to_stat(struct stat* src, uv_stat_t* dst) {
1409 dst->st_dev = src->st_dev;
1410 dst->st_mode = src->st_mode;
1411 dst->st_nlink = src->st_nlink;
1412 dst->st_uid = src->st_uid;
1413 dst->st_gid = src->st_gid;
1414 dst->st_rdev = src->st_rdev;
1415 dst->st_ino = src->st_ino;
1416 dst->st_size = src->st_size;
1417 dst->st_blksize = src->st_blksize;
1418 dst->st_blocks = src->st_blocks;
1419
1420 #if defined(__APPLE__)
1421 dst->st_atim.tv_sec = src->st_atimespec.tv_sec;
1422 dst->st_atim.tv_nsec = src->st_atimespec.tv_nsec;
1423 dst->st_mtim.tv_sec = src->st_mtimespec.tv_sec;
1424 dst->st_mtim.tv_nsec = src->st_mtimespec.tv_nsec;
1425 dst->st_ctim.tv_sec = src->st_ctimespec.tv_sec;
1426 dst->st_ctim.tv_nsec = src->st_ctimespec.tv_nsec;
1427 dst->st_birthtim.tv_sec = src->st_birthtimespec.tv_sec;
1428 dst->st_birthtim.tv_nsec = src->st_birthtimespec.tv_nsec;
1429 dst->st_flags = src->st_flags;
1430 dst->st_gen = src->st_gen;
1431 #elif defined(__ANDROID__)
1432 dst->st_atim.tv_sec = src->st_atime;
1433 dst->st_atim.tv_nsec = src->st_atimensec;
1434 dst->st_mtim.tv_sec = src->st_mtime;
1435 dst->st_mtim.tv_nsec = src->st_mtimensec;
1436 dst->st_ctim.tv_sec = src->st_ctime;
1437 dst->st_ctim.tv_nsec = src->st_ctimensec;
1438 dst->st_birthtim.tv_sec = src->st_ctime;
1439 dst->st_birthtim.tv_nsec = src->st_ctimensec;
1440 dst->st_flags = 0;
1441 dst->st_gen = 0;
1442 #elif !defined(_AIX) && \
1443 !defined(__MVS__) && ( \
1444 defined(__DragonFly__) || \
1445 defined(__FreeBSD__) || \
1446 defined(__OpenBSD__) || \
1447 defined(__NetBSD__) || \
1448 defined(_GNU_SOURCE) || \
1449 defined(_BSD_SOURCE) || \
1450 defined(_SVID_SOURCE) || \
1451 defined(_XOPEN_SOURCE) || \
1452 defined(_DEFAULT_SOURCE))
1453 dst->st_atim.tv_sec = src->st_atim.tv_sec;
1454 dst->st_atim.tv_nsec = src->st_atim.tv_nsec;
1455 dst->st_mtim.tv_sec = src->st_mtim.tv_sec;
1456 dst->st_mtim.tv_nsec = src->st_mtim.tv_nsec;
1457 dst->st_ctim.tv_sec = src->st_ctim.tv_sec;
1458 dst->st_ctim.tv_nsec = src->st_ctim.tv_nsec;
1459 # if defined(__FreeBSD__) || \
1460 defined(__NetBSD__)
1461 dst->st_birthtim.tv_sec = src->st_birthtim.tv_sec;
1462 dst->st_birthtim.tv_nsec = src->st_birthtim.tv_nsec;
1463 dst->st_flags = src->st_flags;
1464 dst->st_gen = src->st_gen;
1465 # else
1466 dst->st_birthtim.tv_sec = src->st_ctim.tv_sec;
1467 dst->st_birthtim.tv_nsec = src->st_ctim.tv_nsec;
1468 dst->st_flags = 0;
1469 dst->st_gen = 0;
1470 # endif
1471 #else
1472 dst->st_atim.tv_sec = src->st_atime;
1473 dst->st_atim.tv_nsec = 0;
1474 dst->st_mtim.tv_sec = src->st_mtime;
1475 dst->st_mtim.tv_nsec = 0;
1476 dst->st_ctim.tv_sec = src->st_ctime;
1477 dst->st_ctim.tv_nsec = 0;
1478 dst->st_birthtim.tv_sec = src->st_ctime;
1479 dst->st_birthtim.tv_nsec = 0;
1480 dst->st_flags = 0;
1481 dst->st_gen = 0;
1482 #endif
1483 }
1484
1485
uv__fs_statx(int fd,const char * path,int is_fstat,int is_lstat,uv_stat_t * buf)1486 static int uv__fs_statx(int fd,
1487 const char* path,
1488 int is_fstat,
1489 int is_lstat,
1490 uv_stat_t* buf) {
1491 STATIC_ASSERT(UV_ENOSYS != -1);
1492 #ifdef __linux__
1493 static _Atomic int no_statx;
1494 struct uv__statx statxbuf;
1495 int dirfd;
1496 int flags;
1497 int mode;
1498 int rc;
1499
1500 if (atomic_load_explicit(&no_statx, memory_order_relaxed))
1501 return UV_ENOSYS;
1502
1503 dirfd = AT_FDCWD;
1504 flags = 0; /* AT_STATX_SYNC_AS_STAT */
1505 mode = 0xFFF; /* STATX_BASIC_STATS + STATX_BTIME */
1506
1507 if (is_fstat) {
1508 dirfd = fd;
1509 flags |= 0x1000; /* AT_EMPTY_PATH */
1510 }
1511
1512 if (is_lstat)
1513 flags |= AT_SYMLINK_NOFOLLOW;
1514
1515 rc = uv__statx(dirfd, path, flags, mode, &statxbuf);
1516
1517 switch (rc) {
1518 case 0:
1519 break;
1520 case -1:
1521 /* EPERM happens when a seccomp filter rejects the system call.
1522 * Has been observed with libseccomp < 2.3.3 and docker < 18.04.
1523 * EOPNOTSUPP is used on DVS exported filesystems
1524 */
1525 if (errno != EINVAL && errno != EPERM && errno != ENOSYS && errno != EOPNOTSUPP)
1526 return -1;
1527 /* Fall through. */
1528 default:
1529 /* Normally on success, zero is returned and On error, -1 is returned.
1530 * Observed on S390 RHEL running in a docker container with statx not
1531 * implemented, rc might return 1 with 0 set as the error code in which
1532 * case we return ENOSYS.
1533 */
1534 atomic_store_explicit(&no_statx, 1, memory_order_relaxed);
1535 return UV_ENOSYS;
1536 }
1537
1538 uv__statx_to_stat(&statxbuf, buf);
1539
1540 return 0;
1541 #else
1542 return UV_ENOSYS;
1543 #endif /* __linux__ */
1544 }
1545
1546
uv__fs_stat(const char * path,uv_stat_t * buf)1547 static int uv__fs_stat(const char *path, uv_stat_t *buf) {
1548 struct stat pbuf;
1549 int ret;
1550
1551 ret = uv__fs_statx(-1, path, /* is_fstat */ 0, /* is_lstat */ 0, buf);
1552 if (ret != UV_ENOSYS)
1553 return ret;
1554
1555 ret = uv__stat(path, &pbuf);
1556 if (ret == 0)
1557 uv__to_stat(&pbuf, buf);
1558
1559 return ret;
1560 }
1561
1562
uv__fs_lstat(const char * path,uv_stat_t * buf)1563 static int uv__fs_lstat(const char *path, uv_stat_t *buf) {
1564 struct stat pbuf;
1565 int ret;
1566
1567 ret = uv__fs_statx(-1, path, /* is_fstat */ 0, /* is_lstat */ 1, buf);
1568 if (ret != UV_ENOSYS)
1569 return ret;
1570
1571 ret = uv__lstat(path, &pbuf);
1572 if (ret == 0)
1573 uv__to_stat(&pbuf, buf);
1574
1575 return ret;
1576 }
1577
1578
uv__fs_fstat(int fd,uv_stat_t * buf)1579 static int uv__fs_fstat(int fd, uv_stat_t *buf) {
1580 struct stat pbuf;
1581 int ret;
1582
1583 ret = uv__fs_statx(fd, "", /* is_fstat */ 1, /* is_lstat */ 0, buf);
1584 if (ret != UV_ENOSYS)
1585 return ret;
1586
1587 ret = uv__fstat(fd, &pbuf);
1588 if (ret == 0)
1589 uv__to_stat(&pbuf, buf);
1590
1591 return ret;
1592 }
1593
uv__fs_buf_offset(uv_buf_t * bufs,size_t size)1594 static size_t uv__fs_buf_offset(uv_buf_t* bufs, size_t size) {
1595 size_t offset;
1596 /* Figure out which bufs are done */
1597 for (offset = 0; size > 0 && bufs[offset].len <= size; ++offset)
1598 size -= bufs[offset].len;
1599
1600 /* Fix a partial read/write */
1601 if (size > 0) {
1602 bufs[offset].base += size;
1603 bufs[offset].len -= size;
1604 }
1605 return offset;
1606 }
1607
uv__fs_write_all(uv_fs_t * req)1608 static ssize_t uv__fs_write_all(uv_fs_t* req) {
1609 unsigned int iovmax;
1610 unsigned int nbufs;
1611 uv_buf_t* bufs;
1612 ssize_t total;
1613 ssize_t result;
1614
1615 iovmax = uv__getiovmax();
1616 nbufs = req->nbufs;
1617 bufs = req->bufs;
1618 total = 0;
1619
1620 while (nbufs > 0) {
1621 req->nbufs = nbufs;
1622 if (req->nbufs > iovmax)
1623 req->nbufs = iovmax;
1624
1625 do
1626 result = uv__fs_write(req);
1627 while (result < 0 && errno == EINTR);
1628
1629 if (result <= 0) {
1630 if (total == 0)
1631 total = result;
1632 break;
1633 }
1634
1635 if (req->off >= 0)
1636 req->off += result;
1637
1638 req->nbufs = uv__fs_buf_offset(req->bufs, result);
1639 req->bufs += req->nbufs;
1640 nbufs -= req->nbufs;
1641 total += result;
1642 }
1643
1644 if (bufs != req->bufsml)
1645 uv__free(bufs);
1646
1647 req->bufs = NULL;
1648 req->nbufs = 0;
1649
1650 return total;
1651 }
1652
1653
uv__fs_work(struct uv__work * w)1654 static void uv__fs_work(struct uv__work* w) {
1655 int retry_on_eintr;
1656 uv_fs_t* req;
1657 ssize_t r;
1658
1659 req = container_of(w, uv_fs_t, work_req);
1660 retry_on_eintr = !(req->fs_type == UV_FS_CLOSE ||
1661 req->fs_type == UV_FS_READ);
1662
1663 do {
1664 errno = 0;
1665
1666 #define X(type, action) \
1667 case UV_FS_ ## type: \
1668 r = action; \
1669 break;
1670
1671 switch (req->fs_type) {
1672 X(ACCESS, access(req->path, req->flags));
1673 X(CHMOD, chmod(req->path, req->mode));
1674 X(CHOWN, chown(req->path, req->uid, req->gid));
1675 X(CLOSE, uv__fs_close(req->file));
1676 X(COPYFILE, uv__fs_copyfile(req));
1677 X(FCHMOD, fchmod(req->file, req->mode));
1678 X(FCHOWN, fchown(req->file, req->uid, req->gid));
1679 X(LCHOWN, lchown(req->path, req->uid, req->gid));
1680 X(FDATASYNC, uv__fs_fdatasync(req));
1681 X(FSTAT, uv__fs_fstat(req->file, &req->statbuf));
1682 X(FSYNC, uv__fs_fsync(req));
1683 X(FTRUNCATE, ftruncate(req->file, req->off));
1684 X(FUTIME, uv__fs_futime(req));
1685 X(LUTIME, uv__fs_lutime(req));
1686 X(LSTAT, uv__fs_lstat(req->path, &req->statbuf));
1687 X(LINK, link(req->path, req->new_path));
1688 X(MKDIR, mkdir(req->path, req->mode));
1689 X(MKDTEMP, uv__fs_mkdtemp(req));
1690 X(MKSTEMP, uv__fs_mkstemp(req));
1691 X(OPEN, uv__fs_open(req));
1692 X(READ, uv__fs_read(req));
1693 X(SCANDIR, uv__fs_scandir(req));
1694 X(OPENDIR, uv__fs_opendir(req));
1695 X(READDIR, uv__fs_readdir(req));
1696 X(CLOSEDIR, uv__fs_closedir(req));
1697 X(READLINK, uv__fs_readlink(req));
1698 X(REALPATH, uv__fs_realpath(req));
1699 X(RENAME, rename(req->path, req->new_path));
1700 X(RMDIR, rmdir(req->path));
1701 X(SENDFILE, uv__fs_sendfile(req));
1702 X(STAT, uv__fs_stat(req->path, &req->statbuf));
1703 X(STATFS, uv__fs_statfs(req));
1704 X(SYMLINK, symlink(req->path, req->new_path));
1705 X(UNLINK, unlink(req->path));
1706 X(UTIME, uv__fs_utime(req));
1707 X(WRITE, uv__fs_write_all(req));
1708 default: abort();
1709 }
1710 #undef X
1711 } while (r == -1 && errno == EINTR && retry_on_eintr);
1712
1713 if (r == -1)
1714 req->result = UV__ERR(errno);
1715 else
1716 req->result = r;
1717
1718 if (r == 0 && (req->fs_type == UV_FS_STAT ||
1719 req->fs_type == UV_FS_FSTAT ||
1720 req->fs_type == UV_FS_LSTAT)) {
1721 req->ptr = &req->statbuf;
1722 }
1723 }
1724
1725
uv__fs_done(struct uv__work * w,int status)1726 static void uv__fs_done(struct uv__work* w, int status) {
1727 uv_fs_t* req;
1728
1729 req = container_of(w, uv_fs_t, work_req);
1730 uv__req_unregister(req->loop, req);
1731
1732 if (status == UV_ECANCELED) {
1733 assert(req->result == 0);
1734 req->result = UV_ECANCELED;
1735 }
1736
1737 req->cb(req);
1738 }
1739
1740
uv__fs_post(uv_loop_t * loop,uv_fs_t * req)1741 void uv__fs_post(uv_loop_t* loop, uv_fs_t* req) {
1742 uv__req_register(loop, req);
1743 uv__work_submit(loop,
1744 &req->work_req,
1745 UV__WORK_FAST_IO,
1746 uv__fs_work,
1747 uv__fs_done);
1748 }
1749
1750
uv_fs_access(uv_loop_t * loop,uv_fs_t * req,const char * path,int flags,uv_fs_cb cb)1751 int uv_fs_access(uv_loop_t* loop,
1752 uv_fs_t* req,
1753 const char* path,
1754 int flags,
1755 uv_fs_cb cb) {
1756 INIT(ACCESS);
1757 PATH;
1758 req->flags = flags;
1759 POST;
1760 }
1761
1762
uv_fs_chmod(uv_loop_t * loop,uv_fs_t * req,const char * path,int mode,uv_fs_cb cb)1763 int uv_fs_chmod(uv_loop_t* loop,
1764 uv_fs_t* req,
1765 const char* path,
1766 int mode,
1767 uv_fs_cb cb) {
1768 INIT(CHMOD);
1769 PATH;
1770 req->mode = mode;
1771 POST;
1772 }
1773
1774
uv_fs_chown(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_uid_t uid,uv_gid_t gid,uv_fs_cb cb)1775 int uv_fs_chown(uv_loop_t* loop,
1776 uv_fs_t* req,
1777 const char* path,
1778 uv_uid_t uid,
1779 uv_gid_t gid,
1780 uv_fs_cb cb) {
1781 INIT(CHOWN);
1782 PATH;
1783 req->uid = uid;
1784 req->gid = gid;
1785 POST;
1786 }
1787
1788
uv_fs_close(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_fs_cb cb)1789 int uv_fs_close(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1790 INIT(CLOSE);
1791 req->file = file;
1792 if (cb != NULL)
1793 if (uv__iou_fs_close(loop, req))
1794 return 0;
1795 POST;
1796 }
1797
1798
uv_fs_fchmod(uv_loop_t * loop,uv_fs_t * req,uv_file file,int mode,uv_fs_cb cb)1799 int uv_fs_fchmod(uv_loop_t* loop,
1800 uv_fs_t* req,
1801 uv_file file,
1802 int mode,
1803 uv_fs_cb cb) {
1804 INIT(FCHMOD);
1805 req->file = file;
1806 req->mode = mode;
1807 POST;
1808 }
1809
1810
uv_fs_fchown(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_uid_t uid,uv_gid_t gid,uv_fs_cb cb)1811 int uv_fs_fchown(uv_loop_t* loop,
1812 uv_fs_t* req,
1813 uv_file file,
1814 uv_uid_t uid,
1815 uv_gid_t gid,
1816 uv_fs_cb cb) {
1817 INIT(FCHOWN);
1818 req->file = file;
1819 req->uid = uid;
1820 req->gid = gid;
1821 POST;
1822 }
1823
1824
uv_fs_lchown(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_uid_t uid,uv_gid_t gid,uv_fs_cb cb)1825 int uv_fs_lchown(uv_loop_t* loop,
1826 uv_fs_t* req,
1827 const char* path,
1828 uv_uid_t uid,
1829 uv_gid_t gid,
1830 uv_fs_cb cb) {
1831 INIT(LCHOWN);
1832 PATH;
1833 req->uid = uid;
1834 req->gid = gid;
1835 POST;
1836 }
1837
1838
uv_fs_fdatasync(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_fs_cb cb)1839 int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1840 INIT(FDATASYNC);
1841 req->file = file;
1842 if (cb != NULL)
1843 if (uv__iou_fs_fsync_or_fdatasync(loop, req, /* IORING_FSYNC_DATASYNC */ 1))
1844 return 0;
1845 POST;
1846 }
1847
1848
uv_fs_fstat(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_fs_cb cb)1849 int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1850 INIT(FSTAT);
1851 req->file = file;
1852 if (cb != NULL)
1853 if (uv__iou_fs_statx(loop, req, /* is_fstat */ 1, /* is_lstat */ 0))
1854 return 0;
1855 POST;
1856 }
1857
1858
uv_fs_fsync(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_fs_cb cb)1859 int uv_fs_fsync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1860 INIT(FSYNC);
1861 req->file = file;
1862 if (cb != NULL)
1863 if (uv__iou_fs_fsync_or_fdatasync(loop, req, /* no flags */ 0))
1864 return 0;
1865 POST;
1866 }
1867
1868
uv_fs_ftruncate(uv_loop_t * loop,uv_fs_t * req,uv_file file,int64_t off,uv_fs_cb cb)1869 int uv_fs_ftruncate(uv_loop_t* loop,
1870 uv_fs_t* req,
1871 uv_file file,
1872 int64_t off,
1873 uv_fs_cb cb) {
1874 INIT(FTRUNCATE);
1875 req->file = file;
1876 req->off = off;
1877 POST;
1878 }
1879
1880
uv_fs_futime(uv_loop_t * loop,uv_fs_t * req,uv_file file,double atime,double mtime,uv_fs_cb cb)1881 int uv_fs_futime(uv_loop_t* loop,
1882 uv_fs_t* req,
1883 uv_file file,
1884 double atime,
1885 double mtime,
1886 uv_fs_cb cb) {
1887 INIT(FUTIME);
1888 req->file = file;
1889 req->atime = atime;
1890 req->mtime = mtime;
1891 POST;
1892 }
1893
uv_fs_lutime(uv_loop_t * loop,uv_fs_t * req,const char * path,double atime,double mtime,uv_fs_cb cb)1894 int uv_fs_lutime(uv_loop_t* loop,
1895 uv_fs_t* req,
1896 const char* path,
1897 double atime,
1898 double mtime,
1899 uv_fs_cb cb) {
1900 INIT(LUTIME);
1901 PATH;
1902 req->atime = atime;
1903 req->mtime = mtime;
1904 POST;
1905 }
1906
1907
uv_fs_lstat(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)1908 int uv_fs_lstat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
1909 INIT(LSTAT);
1910 PATH;
1911 if (cb != NULL)
1912 if (uv__iou_fs_statx(loop, req, /* is_fstat */ 0, /* is_lstat */ 1))
1913 return 0;
1914 POST;
1915 }
1916
1917
uv_fs_link(uv_loop_t * loop,uv_fs_t * req,const char * path,const char * new_path,uv_fs_cb cb)1918 int uv_fs_link(uv_loop_t* loop,
1919 uv_fs_t* req,
1920 const char* path,
1921 const char* new_path,
1922 uv_fs_cb cb) {
1923 INIT(LINK);
1924 PATH2;
1925 if (cb != NULL)
1926 if (uv__iou_fs_link(loop, req))
1927 return 0;
1928 POST;
1929 }
1930
1931
uv_fs_mkdir(uv_loop_t * loop,uv_fs_t * req,const char * path,int mode,uv_fs_cb cb)1932 int uv_fs_mkdir(uv_loop_t* loop,
1933 uv_fs_t* req,
1934 const char* path,
1935 int mode,
1936 uv_fs_cb cb) {
1937 INIT(MKDIR);
1938 PATH;
1939 req->mode = mode;
1940 if (cb != NULL)
1941 if (uv__iou_fs_mkdir(loop, req))
1942 return 0;
1943 POST;
1944 }
1945
1946
uv_fs_mkdtemp(uv_loop_t * loop,uv_fs_t * req,const char * tpl,uv_fs_cb cb)1947 int uv_fs_mkdtemp(uv_loop_t* loop,
1948 uv_fs_t* req,
1949 const char* tpl,
1950 uv_fs_cb cb) {
1951 INIT(MKDTEMP);
1952 req->path = uv__strdup(tpl);
1953 if (req->path == NULL)
1954 return UV_ENOMEM;
1955 POST;
1956 }
1957
1958
uv_fs_mkstemp(uv_loop_t * loop,uv_fs_t * req,const char * tpl,uv_fs_cb cb)1959 int uv_fs_mkstemp(uv_loop_t* loop,
1960 uv_fs_t* req,
1961 const char* tpl,
1962 uv_fs_cb cb) {
1963 INIT(MKSTEMP);
1964 req->path = uv__strdup(tpl);
1965 if (req->path == NULL)
1966 return UV_ENOMEM;
1967 POST;
1968 }
1969
1970
uv_fs_open(uv_loop_t * loop,uv_fs_t * req,const char * path,int flags,int mode,uv_fs_cb cb)1971 int uv_fs_open(uv_loop_t* loop,
1972 uv_fs_t* req,
1973 const char* path,
1974 int flags,
1975 int mode,
1976 uv_fs_cb cb) {
1977 INIT(OPEN);
1978 PATH;
1979 req->flags = flags;
1980 req->mode = mode;
1981 if (cb != NULL)
1982 if (uv__iou_fs_open(loop, req))
1983 return 0;
1984 POST;
1985 }
1986
1987
uv_fs_read(uv_loop_t * loop,uv_fs_t * req,uv_file file,const uv_buf_t bufs[],unsigned int nbufs,int64_t off,uv_fs_cb cb)1988 int uv_fs_read(uv_loop_t* loop, uv_fs_t* req,
1989 uv_file file,
1990 const uv_buf_t bufs[],
1991 unsigned int nbufs,
1992 int64_t off,
1993 uv_fs_cb cb) {
1994 INIT(READ);
1995
1996 if (bufs == NULL || nbufs == 0)
1997 return UV_EINVAL;
1998
1999 req->off = off;
2000 req->file = file;
2001 req->bufs = (uv_buf_t*) bufs; /* Safe, doesn't mutate |bufs| */
2002 req->nbufs = nbufs;
2003
2004 if (cb == NULL)
2005 goto post;
2006
2007 req->bufs = req->bufsml;
2008 if (nbufs > ARRAY_SIZE(req->bufsml))
2009 req->bufs = uv__malloc(nbufs * sizeof(*bufs));
2010
2011 if (req->bufs == NULL)
2012 return UV_ENOMEM;
2013
2014 memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
2015
2016 if (uv__iou_fs_read_or_write(loop, req, /* is_read */ 1))
2017 return 0;
2018
2019 post:
2020 POST;
2021 }
2022
2023
uv_fs_scandir(uv_loop_t * loop,uv_fs_t * req,const char * path,int flags,uv_fs_cb cb)2024 int uv_fs_scandir(uv_loop_t* loop,
2025 uv_fs_t* req,
2026 const char* path,
2027 int flags,
2028 uv_fs_cb cb) {
2029 INIT(SCANDIR);
2030 PATH;
2031 req->flags = flags;
2032 POST;
2033 }
2034
uv_fs_opendir(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2035 int uv_fs_opendir(uv_loop_t* loop,
2036 uv_fs_t* req,
2037 const char* path,
2038 uv_fs_cb cb) {
2039 INIT(OPENDIR);
2040 PATH;
2041 POST;
2042 }
2043
uv_fs_readdir(uv_loop_t * loop,uv_fs_t * req,uv_dir_t * dir,uv_fs_cb cb)2044 int uv_fs_readdir(uv_loop_t* loop,
2045 uv_fs_t* req,
2046 uv_dir_t* dir,
2047 uv_fs_cb cb) {
2048 INIT(READDIR);
2049
2050 if (dir == NULL || dir->dir == NULL || dir->dirents == NULL)
2051 return UV_EINVAL;
2052
2053 req->ptr = dir;
2054 POST;
2055 }
2056
uv_fs_closedir(uv_loop_t * loop,uv_fs_t * req,uv_dir_t * dir,uv_fs_cb cb)2057 int uv_fs_closedir(uv_loop_t* loop,
2058 uv_fs_t* req,
2059 uv_dir_t* dir,
2060 uv_fs_cb cb) {
2061 INIT(CLOSEDIR);
2062
2063 if (dir == NULL)
2064 return UV_EINVAL;
2065
2066 req->ptr = dir;
2067 POST;
2068 }
2069
uv_fs_readlink(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2070 int uv_fs_readlink(uv_loop_t* loop,
2071 uv_fs_t* req,
2072 const char* path,
2073 uv_fs_cb cb) {
2074 INIT(READLINK);
2075 PATH;
2076 POST;
2077 }
2078
2079
uv_fs_realpath(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2080 int uv_fs_realpath(uv_loop_t* loop,
2081 uv_fs_t* req,
2082 const char * path,
2083 uv_fs_cb cb) {
2084 INIT(REALPATH);
2085 PATH;
2086 POST;
2087 }
2088
2089
uv_fs_rename(uv_loop_t * loop,uv_fs_t * req,const char * path,const char * new_path,uv_fs_cb cb)2090 int uv_fs_rename(uv_loop_t* loop,
2091 uv_fs_t* req,
2092 const char* path,
2093 const char* new_path,
2094 uv_fs_cb cb) {
2095 INIT(RENAME);
2096 PATH2;
2097 if (cb != NULL)
2098 if (uv__iou_fs_rename(loop, req))
2099 return 0;
2100 POST;
2101 }
2102
2103
uv_fs_rmdir(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2104 int uv_fs_rmdir(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
2105 INIT(RMDIR);
2106 PATH;
2107 POST;
2108 }
2109
2110
uv_fs_sendfile(uv_loop_t * loop,uv_fs_t * req,uv_file out_fd,uv_file in_fd,int64_t off,size_t len,uv_fs_cb cb)2111 int uv_fs_sendfile(uv_loop_t* loop,
2112 uv_fs_t* req,
2113 uv_file out_fd,
2114 uv_file in_fd,
2115 int64_t off,
2116 size_t len,
2117 uv_fs_cb cb) {
2118 INIT(SENDFILE);
2119 req->flags = in_fd; /* hack */
2120 req->file = out_fd;
2121 req->off = off;
2122 req->bufsml[0].len = len;
2123 POST;
2124 }
2125
2126
uv_fs_stat(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2127 int uv_fs_stat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
2128 INIT(STAT);
2129 PATH;
2130 if (cb != NULL)
2131 if (uv__iou_fs_statx(loop, req, /* is_fstat */ 0, /* is_lstat */ 0))
2132 return 0;
2133 POST;
2134 }
2135
2136
uv_fs_symlink(uv_loop_t * loop,uv_fs_t * req,const char * path,const char * new_path,int flags,uv_fs_cb cb)2137 int uv_fs_symlink(uv_loop_t* loop,
2138 uv_fs_t* req,
2139 const char* path,
2140 const char* new_path,
2141 int flags,
2142 uv_fs_cb cb) {
2143 INIT(SYMLINK);
2144 PATH2;
2145 req->flags = flags;
2146 if (cb != NULL)
2147 if (uv__iou_fs_symlink(loop, req))
2148 return 0;
2149 POST;
2150 }
2151
2152
uv_fs_unlink(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2153 int uv_fs_unlink(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
2154 INIT(UNLINK);
2155 PATH;
2156 if (cb != NULL)
2157 if (uv__iou_fs_unlink(loop, req))
2158 return 0;
2159 POST;
2160 }
2161
2162
uv_fs_utime(uv_loop_t * loop,uv_fs_t * req,const char * path,double atime,double mtime,uv_fs_cb cb)2163 int uv_fs_utime(uv_loop_t* loop,
2164 uv_fs_t* req,
2165 const char* path,
2166 double atime,
2167 double mtime,
2168 uv_fs_cb cb) {
2169 INIT(UTIME);
2170 PATH;
2171 req->atime = atime;
2172 req->mtime = mtime;
2173 POST;
2174 }
2175
2176
uv_fs_write(uv_loop_t * loop,uv_fs_t * req,uv_file file,const uv_buf_t bufs[],unsigned int nbufs,int64_t off,uv_fs_cb cb)2177 int uv_fs_write(uv_loop_t* loop,
2178 uv_fs_t* req,
2179 uv_file file,
2180 const uv_buf_t bufs[],
2181 unsigned int nbufs,
2182 int64_t off,
2183 uv_fs_cb cb) {
2184 INIT(WRITE);
2185
2186 if (bufs == NULL || nbufs == 0)
2187 return UV_EINVAL;
2188
2189 req->file = file;
2190
2191 req->nbufs = nbufs;
2192 req->bufs = req->bufsml;
2193 if (nbufs > ARRAY_SIZE(req->bufsml))
2194 req->bufs = uv__malloc(nbufs * sizeof(*bufs));
2195
2196 if (req->bufs == NULL)
2197 return UV_ENOMEM;
2198
2199 memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
2200
2201 req->off = off;
2202
2203 if (cb != NULL)
2204 if (uv__iou_fs_read_or_write(loop, req, /* is_read */ 0))
2205 return 0;
2206
2207 POST;
2208 }
2209
2210
uv_fs_req_cleanup(uv_fs_t * req)2211 void uv_fs_req_cleanup(uv_fs_t* req) {
2212 if (req == NULL)
2213 return;
2214
2215 /* Only necessary for asynchronous requests, i.e., requests with a callback.
2216 * Synchronous ones don't copy their arguments and have req->path and
2217 * req->new_path pointing to user-owned memory. UV_FS_MKDTEMP and
2218 * UV_FS_MKSTEMP are the exception to the rule, they always allocate memory.
2219 */
2220 if (req->path != NULL &&
2221 (req->cb != NULL ||
2222 req->fs_type == UV_FS_MKDTEMP || req->fs_type == UV_FS_MKSTEMP))
2223 uv__free((void*) req->path); /* Memory is shared with req->new_path. */
2224
2225 req->path = NULL;
2226 req->new_path = NULL;
2227
2228 if (req->fs_type == UV_FS_READDIR && req->ptr != NULL)
2229 uv__fs_readdir_cleanup(req);
2230
2231 if (req->fs_type == UV_FS_SCANDIR && req->ptr != NULL)
2232 uv__fs_scandir_cleanup(req);
2233
2234 if (req->bufs != req->bufsml)
2235 uv__free(req->bufs);
2236 req->bufs = NULL;
2237
2238 if (req->fs_type != UV_FS_OPENDIR && req->ptr != &req->statbuf)
2239 uv__free(req->ptr);
2240 req->ptr = NULL;
2241 }
2242
2243
uv_fs_copyfile(uv_loop_t * loop,uv_fs_t * req,const char * path,const char * new_path,int flags,uv_fs_cb cb)2244 int uv_fs_copyfile(uv_loop_t* loop,
2245 uv_fs_t* req,
2246 const char* path,
2247 const char* new_path,
2248 int flags,
2249 uv_fs_cb cb) {
2250 INIT(COPYFILE);
2251
2252 if (flags & ~(UV_FS_COPYFILE_EXCL |
2253 UV_FS_COPYFILE_FICLONE |
2254 UV_FS_COPYFILE_FICLONE_FORCE)) {
2255 return UV_EINVAL;
2256 }
2257
2258 PATH2;
2259 req->flags = flags;
2260 POST;
2261 }
2262
2263
uv_fs_statfs(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2264 int uv_fs_statfs(uv_loop_t* loop,
2265 uv_fs_t* req,
2266 const char* path,
2267 uv_fs_cb cb) {
2268 INIT(STATFS);
2269 PATH;
2270 POST;
2271 }
2272
uv_fs_get_system_error(const uv_fs_t * req)2273 int uv_fs_get_system_error(const uv_fs_t* req) {
2274 return -req->result;
2275 }
2276