xref: /libuv/src/unix/udp.c (revision 77e4cd5b)
1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2  *
3  * Permission is hereby granted, free of charge, to any person obtaining a copy
4  * of this software and associated documentation files (the "Software"), to
5  * deal in the Software without restriction, including without limitation the
6  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7  * sell copies of the Software, and to permit persons to whom the Software is
8  * furnished to do so, subject to the following conditions:
9  *
10  * The above copyright notice and this permission notice shall be included in
11  * all copies or substantial portions of the Software.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19  * IN THE SOFTWARE.
20  */
21 
22 #include "uv.h"
23 #include "internal.h"
24 
25 #include <assert.h>
26 #include <string.h>
27 #include <errno.h>
28 #include <stdlib.h>
29 #include <unistd.h>
30 #if defined(__MVS__)
31 #include <xti.h>
32 #endif
33 #include <sys/un.h>
34 
35 #if defined(IPV6_JOIN_GROUP) && !defined(IPV6_ADD_MEMBERSHIP)
36 # define IPV6_ADD_MEMBERSHIP IPV6_JOIN_GROUP
37 #endif
38 
39 #if defined(IPV6_LEAVE_GROUP) && !defined(IPV6_DROP_MEMBERSHIP)
40 # define IPV6_DROP_MEMBERSHIP IPV6_LEAVE_GROUP
41 #endif
42 
43 static void uv__udp_run_completed(uv_udp_t* handle);
44 static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents);
45 static void uv__udp_recvmsg(uv_udp_t* handle);
46 static void uv__udp_sendmsg(uv_udp_t* handle);
47 static int uv__udp_maybe_deferred_bind(uv_udp_t* handle,
48                                        int domain,
49                                        unsigned int flags);
50 
51 
uv__udp_close(uv_udp_t * handle)52 void uv__udp_close(uv_udp_t* handle) {
53   uv__io_close(handle->loop, &handle->io_watcher);
54   uv__handle_stop(handle);
55 
56   if (handle->io_watcher.fd != -1) {
57     uv__close(handle->io_watcher.fd);
58     handle->io_watcher.fd = -1;
59   }
60 }
61 
62 
uv__udp_finish_close(uv_udp_t * handle)63 void uv__udp_finish_close(uv_udp_t* handle) {
64   uv_udp_send_t* req;
65   struct uv__queue* q;
66 
67   assert(!uv__io_active(&handle->io_watcher, POLLIN | POLLOUT));
68   assert(handle->io_watcher.fd == -1);
69 
70   while (!uv__queue_empty(&handle->write_queue)) {
71     q = uv__queue_head(&handle->write_queue);
72     uv__queue_remove(q);
73 
74     req = uv__queue_data(q, uv_udp_send_t, queue);
75     req->status = UV_ECANCELED;
76     uv__queue_insert_tail(&handle->write_completed_queue, &req->queue);
77   }
78 
79   uv__udp_run_completed(handle);
80 
81   assert(handle->send_queue_size == 0);
82   assert(handle->send_queue_count == 0);
83 
84   /* Now tear down the handle. */
85   handle->recv_cb = NULL;
86   handle->alloc_cb = NULL;
87   /* but _do not_ touch close_cb */
88 }
89 
90 
uv__udp_run_completed(uv_udp_t * handle)91 static void uv__udp_run_completed(uv_udp_t* handle) {
92   uv_udp_send_t* req;
93   struct uv__queue* q;
94 
95   assert(!(handle->flags & UV_HANDLE_UDP_PROCESSING));
96   handle->flags |= UV_HANDLE_UDP_PROCESSING;
97 
98   while (!uv__queue_empty(&handle->write_completed_queue)) {
99     q = uv__queue_head(&handle->write_completed_queue);
100     uv__queue_remove(q);
101 
102     req = uv__queue_data(q, uv_udp_send_t, queue);
103     uv__req_unregister(handle->loop, req);
104 
105     handle->send_queue_size -= uv__count_bufs(req->bufs, req->nbufs);
106     handle->send_queue_count--;
107 
108     if (req->bufs != req->bufsml)
109       uv__free(req->bufs);
110     req->bufs = NULL;
111 
112     if (req->send_cb == NULL)
113       continue;
114 
115     /* req->status >= 0 == bytes written
116      * req->status <  0 == errno
117      */
118     if (req->status >= 0)
119       req->send_cb(req, 0);
120     else
121       req->send_cb(req, req->status);
122   }
123 
124   if (uv__queue_empty(&handle->write_queue)) {
125     /* Pending queue and completion queue empty, stop watcher. */
126     uv__io_stop(handle->loop, &handle->io_watcher, POLLOUT);
127     if (!uv__io_active(&handle->io_watcher, POLLIN))
128       uv__handle_stop(handle);
129   }
130 
131   handle->flags &= ~UV_HANDLE_UDP_PROCESSING;
132 }
133 
134 
uv__udp_io(uv_loop_t * loop,uv__io_t * w,unsigned int revents)135 static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents) {
136   uv_udp_t* handle;
137 
138   handle = container_of(w, uv_udp_t, io_watcher);
139   assert(handle->type == UV_UDP);
140 
141   if (revents & POLLIN)
142     uv__udp_recvmsg(handle);
143 
144   if (revents & POLLOUT && !uv__is_closing(handle)) {
145     uv__udp_sendmsg(handle);
146     uv__udp_run_completed(handle);
147   }
148 }
149 
uv__udp_recvmmsg(uv_udp_t * handle,uv_buf_t * buf)150 static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf) {
151 #if defined(__linux__) || defined(__FreeBSD__)
152   struct sockaddr_in6 peers[20];
153   struct iovec iov[ARRAY_SIZE(peers)];
154   struct mmsghdr msgs[ARRAY_SIZE(peers)];
155   ssize_t nread;
156   uv_buf_t chunk_buf;
157   size_t chunks;
158   int flags;
159   size_t k;
160 
161   /* prepare structures for recvmmsg */
162   chunks = buf->len / UV__UDP_DGRAM_MAXSIZE;
163   if (chunks > ARRAY_SIZE(iov))
164     chunks = ARRAY_SIZE(iov);
165   for (k = 0; k < chunks; ++k) {
166     iov[k].iov_base = buf->base + k * UV__UDP_DGRAM_MAXSIZE;
167     iov[k].iov_len = UV__UDP_DGRAM_MAXSIZE;
168     memset(&msgs[k].msg_hdr, 0, sizeof(msgs[k].msg_hdr));
169     msgs[k].msg_hdr.msg_iov = iov + k;
170     msgs[k].msg_hdr.msg_iovlen = 1;
171     msgs[k].msg_hdr.msg_name = peers + k;
172     msgs[k].msg_hdr.msg_namelen = sizeof(peers[0]);
173     msgs[k].msg_hdr.msg_control = NULL;
174     msgs[k].msg_hdr.msg_controllen = 0;
175     msgs[k].msg_hdr.msg_flags = 0;
176   }
177 
178   do
179     nread = recvmmsg(handle->io_watcher.fd, msgs, chunks, 0, NULL);
180   while (nread == -1 && errno == EINTR);
181 
182   if (nread < 1) {
183     if (nread == 0 || errno == EAGAIN || errno == EWOULDBLOCK)
184       handle->recv_cb(handle, 0, buf, NULL, 0);
185     else
186       handle->recv_cb(handle, UV__ERR(errno), buf, NULL, 0);
187   } else {
188     /* pass each chunk to the application */
189     for (k = 0; k < (size_t) nread && handle->recv_cb != NULL; k++) {
190       flags = UV_UDP_MMSG_CHUNK;
191       if (msgs[k].msg_hdr.msg_flags & MSG_TRUNC)
192         flags |= UV_UDP_PARTIAL;
193 
194       chunk_buf = uv_buf_init(iov[k].iov_base, iov[k].iov_len);
195       handle->recv_cb(handle,
196                       msgs[k].msg_len,
197                       &chunk_buf,
198                       msgs[k].msg_hdr.msg_name,
199                       flags);
200     }
201 
202     /* one last callback so the original buffer is freed */
203     if (handle->recv_cb != NULL)
204       handle->recv_cb(handle, 0, buf, NULL, UV_UDP_MMSG_FREE);
205   }
206   return nread;
207 #else  /* __linux__ || ____FreeBSD__ */
208   return UV_ENOSYS;
209 #endif  /* __linux__ || ____FreeBSD__ */
210 }
211 
uv__udp_recvmsg(uv_udp_t * handle)212 static void uv__udp_recvmsg(uv_udp_t* handle) {
213   struct sockaddr_storage peer;
214   struct msghdr h;
215   ssize_t nread;
216   uv_buf_t buf;
217   int flags;
218   int count;
219 
220   assert(handle->recv_cb != NULL);
221   assert(handle->alloc_cb != NULL);
222 
223   /* Prevent loop starvation when the data comes in as fast as (or faster than)
224    * we can read it. XXX Need to rearm fd if we switch to edge-triggered I/O.
225    */
226   count = 32;
227 
228   do {
229     buf = uv_buf_init(NULL, 0);
230     handle->alloc_cb((uv_handle_t*) handle, UV__UDP_DGRAM_MAXSIZE, &buf);
231     if (buf.base == NULL || buf.len == 0) {
232       handle->recv_cb(handle, UV_ENOBUFS, &buf, NULL, 0);
233       return;
234     }
235     assert(buf.base != NULL);
236 
237     if (uv_udp_using_recvmmsg(handle)) {
238       nread = uv__udp_recvmmsg(handle, &buf);
239       if (nread > 0)
240         count -= nread;
241       continue;
242     }
243 
244     memset(&h, 0, sizeof(h));
245     memset(&peer, 0, sizeof(peer));
246     h.msg_name = &peer;
247     h.msg_namelen = sizeof(peer);
248     h.msg_iov = (void*) &buf;
249     h.msg_iovlen = 1;
250 
251     do {
252       nread = recvmsg(handle->io_watcher.fd, &h, 0);
253     }
254     while (nread == -1 && errno == EINTR);
255 
256     if (nread == -1) {
257       if (errno == EAGAIN || errno == EWOULDBLOCK)
258         handle->recv_cb(handle, 0, &buf, NULL, 0);
259       else
260         handle->recv_cb(handle, UV__ERR(errno), &buf, NULL, 0);
261     }
262     else {
263       flags = 0;
264       if (h.msg_flags & MSG_TRUNC)
265         flags |= UV_UDP_PARTIAL;
266 
267       handle->recv_cb(handle, nread, &buf, (const struct sockaddr*) &peer, flags);
268     }
269     count--;
270   }
271   /* recv_cb callback may decide to pause or close the handle */
272   while (nread != -1
273       && count > 0
274       && handle->io_watcher.fd != -1
275       && handle->recv_cb != NULL);
276 }
277 
uv__udp_sendmsg_one(uv_udp_t * handle,uv_udp_send_t * req)278 static void uv__udp_sendmsg_one(uv_udp_t* handle, uv_udp_send_t* req) {
279   struct uv__queue* q;
280   struct msghdr h;
281   ssize_t size;
282 
283   for (;;) {
284     memset(&h, 0, sizeof h);
285     if (req->addr.ss_family == AF_UNSPEC) {
286       h.msg_name = NULL;
287       h.msg_namelen = 0;
288     } else {
289       h.msg_name = &req->addr;
290       if (req->addr.ss_family == AF_INET6)
291         h.msg_namelen = sizeof(struct sockaddr_in6);
292       else if (req->addr.ss_family == AF_INET)
293         h.msg_namelen = sizeof(struct sockaddr_in);
294       else if (req->addr.ss_family == AF_UNIX)
295         h.msg_namelen = sizeof(struct sockaddr_un);
296       else {
297         assert(0 && "unsupported address family");
298         abort();
299       }
300     }
301     h.msg_iov = (struct iovec*) req->bufs;
302     h.msg_iovlen = req->nbufs;
303 
304     do
305       size = sendmsg(handle->io_watcher.fd, &h, 0);
306     while (size == -1 && errno == EINTR);
307 
308     if (size == -1)
309       if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS)
310         return;
311 
312     req->status = (size == -1 ? UV__ERR(errno) : size);
313 
314     /* Sending a datagram is an atomic operation: either all data
315      * is written or nothing is (and EMSGSIZE is raised). That is
316      * why we don't handle partial writes. Just pop the request
317      * off the write queue and onto the completed queue, done.
318      */
319     uv__queue_remove(&req->queue);
320     uv__queue_insert_tail(&handle->write_completed_queue, &req->queue);
321     uv__io_feed(handle->loop, &handle->io_watcher);
322 
323     if (uv__queue_empty(&handle->write_queue))
324       return;
325 
326     q = uv__queue_head(&handle->write_queue);
327     req = uv__queue_data(q, uv_udp_send_t, queue);
328   }
329 }
330 
331 #if defined(__linux__) || defined(__FreeBSD__)
uv__udp_sendmsg_many(uv_udp_t * handle)332 static void uv__udp_sendmsg_many(uv_udp_t* handle) {
333   uv_udp_send_t* req;
334   struct mmsghdr h[20];
335   struct mmsghdr* p;
336   struct uv__queue* q;
337   ssize_t npkts;
338   size_t pkts;
339   size_t i;
340 
341 write_queue_drain:
342   for (pkts = 0, q = uv__queue_head(&handle->write_queue);
343        pkts < ARRAY_SIZE(h) && q != &handle->write_queue;
344        ++pkts, q = uv__queue_head(q)) {
345     req = uv__queue_data(q, uv_udp_send_t, queue);
346 
347     p = &h[pkts];
348     memset(p, 0, sizeof(*p));
349     if (req->addr.ss_family == AF_UNSPEC) {
350       p->msg_hdr.msg_name = NULL;
351       p->msg_hdr.msg_namelen = 0;
352     } else {
353       p->msg_hdr.msg_name = &req->addr;
354       if (req->addr.ss_family == AF_INET6)
355         p->msg_hdr.msg_namelen = sizeof(struct sockaddr_in6);
356       else if (req->addr.ss_family == AF_INET)
357         p->msg_hdr.msg_namelen = sizeof(struct sockaddr_in);
358       else if (req->addr.ss_family == AF_UNIX)
359         p->msg_hdr.msg_namelen = sizeof(struct sockaddr_un);
360       else {
361         assert(0 && "unsupported address family");
362         abort();
363       }
364     }
365     h[pkts].msg_hdr.msg_iov = (struct iovec*) req->bufs;
366     h[pkts].msg_hdr.msg_iovlen = req->nbufs;
367   }
368 
369   do
370     npkts = sendmmsg(handle->io_watcher.fd, h, pkts, 0);
371   while (npkts == -1 && errno == EINTR);
372 
373   if (npkts < 1) {
374     if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS)
375       return;
376     for (i = 0, q = uv__queue_head(&handle->write_queue);
377          i < pkts && q != &handle->write_queue;
378          ++i, q = uv__queue_head(&handle->write_queue)) {
379       req = uv__queue_data(q, uv_udp_send_t, queue);
380       req->status = UV__ERR(errno);
381       uv__queue_remove(&req->queue);
382       uv__queue_insert_tail(&handle->write_completed_queue, &req->queue);
383     }
384     uv__io_feed(handle->loop, &handle->io_watcher);
385     return;
386   }
387 
388   /* Safety: npkts known to be >0 below. Hence cast from ssize_t
389    * to size_t safe.
390    */
391   for (i = 0, q = uv__queue_head(&handle->write_queue);
392        i < (size_t)npkts && q != &handle->write_queue;
393        ++i, q = uv__queue_head(&handle->write_queue)) {
394     req = uv__queue_data(q, uv_udp_send_t, queue);
395     req->status = req->bufs[0].len;
396 
397     /* Sending a datagram is an atomic operation: either all data
398      * is written or nothing is (and EMSGSIZE is raised). That is
399      * why we don't handle partial writes. Just pop the request
400      * off the write queue and onto the completed queue, done.
401      */
402     uv__queue_remove(&req->queue);
403     uv__queue_insert_tail(&handle->write_completed_queue, &req->queue);
404   }
405 
406   /* couldn't batch everything, continue sending (jump to avoid stack growth) */
407   if (!uv__queue_empty(&handle->write_queue))
408     goto write_queue_drain;
409 
410   uv__io_feed(handle->loop, &handle->io_watcher);
411 }
412 #endif  /* __linux__ || ____FreeBSD__ */
413 
uv__udp_sendmsg(uv_udp_t * handle)414 static void uv__udp_sendmsg(uv_udp_t* handle) {
415   struct uv__queue* q;
416   uv_udp_send_t* req;
417 
418   if (uv__queue_empty(&handle->write_queue))
419     return;
420 
421   q = uv__queue_head(&handle->write_queue);
422   req = uv__queue_data(q, uv_udp_send_t, queue);
423 
424 #if defined(__linux__) || defined(__FreeBSD__)
425   /* Use sendmmsg() if this send request contains more than one datagram OR
426    * there is more than one send request (because that automatically implies
427    * there is more than one datagram.)
428    */
429   if (req->nbufs != 1 || &handle->write_queue != uv__queue_next(&req->queue))
430     return uv__udp_sendmsg_many(handle);
431 #endif
432 
433   return uv__udp_sendmsg_one(handle, req);
434 }
435 
436 /* On the BSDs, SO_REUSEPORT implies SO_REUSEADDR but with some additional
437  * refinements for programs that use multicast.
438  *
439  * Linux as of 3.9 has a SO_REUSEPORT socket option but with semantics that
440  * are different from the BSDs: it _shares_ the port rather than steal it
441  * from the current listener.  While useful, it's not something we can emulate
442  * on other platforms so we don't enable it.
443  *
444  * zOS does not support getsockname with SO_REUSEPORT option when using
445  * AF_UNIX.
446  */
uv__set_reuse(int fd)447 static int uv__set_reuse(int fd) {
448   int yes;
449   yes = 1;
450 
451 #if defined(SO_REUSEPORT) && defined(__MVS__)
452   struct sockaddr_in sockfd;
453   unsigned int sockfd_len = sizeof(sockfd);
454   if (getsockname(fd, (struct sockaddr*) &sockfd, &sockfd_len) == -1)
455       return UV__ERR(errno);
456   if (sockfd.sin_family == AF_UNIX) {
457     if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)))
458       return UV__ERR(errno);
459   } else {
460     if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes)))
461        return UV__ERR(errno);
462   }
463 #elif defined(SO_REUSEPORT) && !defined(__linux__) && !defined(__GNU__) && \
464 	!defined(__sun__)
465   if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes)))
466     return UV__ERR(errno);
467 #else
468   if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)))
469     return UV__ERR(errno);
470 #endif
471 
472   return 0;
473 }
474 
475 /*
476  * The Linux kernel suppresses some ICMP error messages by default for UDP
477  * sockets. Setting IP_RECVERR/IPV6_RECVERR on the socket enables full ICMP
478  * error reporting, hopefully resulting in faster failover to working name
479  * servers.
480  */
uv__set_recverr(int fd,sa_family_t ss_family)481 static int uv__set_recverr(int fd, sa_family_t ss_family) {
482 #if defined(__linux__)
483   int yes;
484 
485   yes = 1;
486   if (ss_family == AF_INET) {
487     if (setsockopt(fd, IPPROTO_IP, IP_RECVERR, &yes, sizeof(yes)))
488       return UV__ERR(errno);
489   } else if (ss_family == AF_INET6) {
490     if (setsockopt(fd, IPPROTO_IPV6, IPV6_RECVERR, &yes, sizeof(yes)))
491        return UV__ERR(errno);
492   }
493 #endif
494   return 0;
495 }
496 
497 
uv__udp_bind(uv_udp_t * handle,const struct sockaddr * addr,unsigned int addrlen,unsigned int flags)498 int uv__udp_bind(uv_udp_t* handle,
499                  const struct sockaddr* addr,
500                  unsigned int addrlen,
501                  unsigned int flags) {
502   int err;
503   int yes;
504   int fd;
505 
506   /* Check for bad flags. */
507   if (flags & ~(UV_UDP_IPV6ONLY | UV_UDP_REUSEADDR | UV_UDP_LINUX_RECVERR))
508     return UV_EINVAL;
509 
510   /* Cannot set IPv6-only mode on non-IPv6 socket. */
511   if ((flags & UV_UDP_IPV6ONLY) && addr->sa_family != AF_INET6)
512     return UV_EINVAL;
513 
514   fd = handle->io_watcher.fd;
515   if (fd == -1) {
516     err = uv__socket(addr->sa_family, SOCK_DGRAM, 0);
517     if (err < 0)
518       return err;
519     fd = err;
520     handle->io_watcher.fd = fd;
521   }
522 
523   if (flags & UV_UDP_LINUX_RECVERR) {
524     err = uv__set_recverr(fd, addr->sa_family);
525     if (err)
526       return err;
527   }
528 
529   if (flags & UV_UDP_REUSEADDR) {
530     err = uv__set_reuse(fd);
531     if (err)
532       return err;
533   }
534 
535   if (flags & UV_UDP_IPV6ONLY) {
536 #ifdef IPV6_V6ONLY
537     yes = 1;
538     if (setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &yes, sizeof yes) == -1) {
539       err = UV__ERR(errno);
540       return err;
541     }
542 #else
543     err = UV_ENOTSUP;
544     return err;
545 #endif
546   }
547 
548   if (bind(fd, addr, addrlen)) {
549     err = UV__ERR(errno);
550     if (errno == EAFNOSUPPORT)
551       /* OSX, other BSDs and SunoS fail with EAFNOSUPPORT when binding a
552        * socket created with AF_INET to an AF_INET6 address or vice versa. */
553       err = UV_EINVAL;
554     return err;
555   }
556 
557   if (addr->sa_family == AF_INET6)
558     handle->flags |= UV_HANDLE_IPV6;
559 
560   handle->flags |= UV_HANDLE_BOUND;
561   return 0;
562 }
563 
564 
uv__udp_maybe_deferred_bind(uv_udp_t * handle,int domain,unsigned int flags)565 static int uv__udp_maybe_deferred_bind(uv_udp_t* handle,
566                                        int domain,
567                                        unsigned int flags) {
568   union uv__sockaddr taddr;
569   socklen_t addrlen;
570 
571   if (handle->io_watcher.fd != -1)
572     return 0;
573 
574   switch (domain) {
575   case AF_INET:
576   {
577     struct sockaddr_in* addr = &taddr.in;
578     memset(addr, 0, sizeof *addr);
579     addr->sin_family = AF_INET;
580     addr->sin_addr.s_addr = INADDR_ANY;
581     addrlen = sizeof *addr;
582     break;
583   }
584   case AF_INET6:
585   {
586     struct sockaddr_in6* addr = &taddr.in6;
587     memset(addr, 0, sizeof *addr);
588     addr->sin6_family = AF_INET6;
589     addr->sin6_addr = in6addr_any;
590     addrlen = sizeof *addr;
591     break;
592   }
593   default:
594     assert(0 && "unsupported address family");
595     abort();
596   }
597 
598   return uv__udp_bind(handle, &taddr.addr, addrlen, flags);
599 }
600 
601 
uv__udp_connect(uv_udp_t * handle,const struct sockaddr * addr,unsigned int addrlen)602 int uv__udp_connect(uv_udp_t* handle,
603                     const struct sockaddr* addr,
604                     unsigned int addrlen) {
605   int err;
606 
607   err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0);
608   if (err)
609     return err;
610 
611   do {
612     errno = 0;
613     err = connect(handle->io_watcher.fd, addr, addrlen);
614   } while (err == -1 && errno == EINTR);
615 
616   if (err)
617     return UV__ERR(errno);
618 
619   handle->flags |= UV_HANDLE_UDP_CONNECTED;
620 
621   return 0;
622 }
623 
624 /* From https://pubs.opengroup.org/onlinepubs/9699919799/functions/connect.html
625  * Any of uv supported UNIXs kernel should be standardized, but the kernel
626  * implementation logic not same, let's use pseudocode to explain the udp
627  * disconnect behaviors:
628  *
629  * Predefined stubs for pseudocode:
630  *   1. sodisconnect: The function to perform the real udp disconnect
631  *   2. pru_connect: The function to perform the real udp connect
632  *   3. so: The kernel object match with socket fd
633  *   4. addr: The sockaddr parameter from user space
634  *
635  * BSDs:
636  *   if(sodisconnect(so) == 0) { // udp disconnect succeed
637  *     if (addr->sa_len != so->addr->sa_len) return EINVAL;
638  *     if (addr->sa_family != so->addr->sa_family) return EAFNOSUPPORT;
639  *     pru_connect(so);
640  *   }
641  *   else return EISCONN;
642  *
643  * z/OS (same with Windows):
644  *   if(addr->sa_len < so->addr->sa_len) return EINVAL;
645  *   if (addr->sa_family == AF_UNSPEC) sodisconnect(so);
646  *
647  * AIX:
648  *   if(addr->sa_len != sizeof(struct sockaddr)) return EINVAL; // ignore ip proto version
649  *   if (addr->sa_family == AF_UNSPEC) sodisconnect(so);
650  *
651  * Linux,Others:
652  *   if(addr->sa_len < sizeof(struct sockaddr)) return EINVAL;
653  *   if (addr->sa_family == AF_UNSPEC) sodisconnect(so);
654  */
uv__udp_disconnect(uv_udp_t * handle)655 int uv__udp_disconnect(uv_udp_t* handle) {
656     int r;
657 #if defined(__MVS__)
658     struct sockaddr_storage addr;
659 #else
660     struct sockaddr addr;
661 #endif
662 
663     memset(&addr, 0, sizeof(addr));
664 
665 #if defined(__MVS__)
666     addr.ss_family = AF_UNSPEC;
667 #else
668     addr.sa_family = AF_UNSPEC;
669 #endif
670 
671     do {
672       errno = 0;
673 #ifdef __PASE__
674       /* On IBMi a connectionless transport socket can be disconnected by
675        * either setting the addr parameter to NULL or setting the
676        * addr_length parameter to zero, and issuing another connect().
677        * https://www.ibm.com/docs/en/i/7.4?topic=ssw_ibm_i_74/apis/connec.htm
678        */
679       r = connect(handle->io_watcher.fd, (struct sockaddr*) NULL, 0);
680 #else
681       r = connect(handle->io_watcher.fd, (struct sockaddr*) &addr, sizeof(addr));
682 #endif
683     } while (r == -1 && errno == EINTR);
684 
685     if (r == -1) {
686 #if defined(BSD)  /* The macro BSD is from sys/param.h */
687       if (errno != EAFNOSUPPORT && errno != EINVAL)
688         return UV__ERR(errno);
689 #else
690       return UV__ERR(errno);
691 #endif
692     }
693 
694     handle->flags &= ~UV_HANDLE_UDP_CONNECTED;
695     return 0;
696 }
697 
uv__udp_send(uv_udp_send_t * req,uv_udp_t * handle,const uv_buf_t bufs[],unsigned int nbufs,const struct sockaddr * addr,unsigned int addrlen,uv_udp_send_cb send_cb)698 int uv__udp_send(uv_udp_send_t* req,
699                  uv_udp_t* handle,
700                  const uv_buf_t bufs[],
701                  unsigned int nbufs,
702                  const struct sockaddr* addr,
703                  unsigned int addrlen,
704                  uv_udp_send_cb send_cb) {
705   int err;
706   int empty_queue;
707 
708   assert(nbufs > 0);
709 
710   if (addr) {
711     err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0);
712     if (err)
713       return err;
714   }
715 
716   /* It's legal for send_queue_count > 0 even when the write_queue is empty;
717    * it means there are error-state requests in the write_completed_queue that
718    * will touch up send_queue_size/count later.
719    */
720   empty_queue = (handle->send_queue_count == 0);
721 
722   uv__req_init(handle->loop, req, UV_UDP_SEND);
723   assert(addrlen <= sizeof(req->addr));
724   if (addr == NULL)
725     req->addr.ss_family = AF_UNSPEC;
726   else
727     memcpy(&req->addr, addr, addrlen);
728   req->send_cb = send_cb;
729   req->handle = handle;
730   req->nbufs = nbufs;
731 
732   req->bufs = req->bufsml;
733   if (nbufs > ARRAY_SIZE(req->bufsml))
734     req->bufs = uv__malloc(nbufs * sizeof(bufs[0]));
735 
736   if (req->bufs == NULL) {
737     uv__req_unregister(handle->loop, req);
738     return UV_ENOMEM;
739   }
740 
741   memcpy(req->bufs, bufs, nbufs * sizeof(bufs[0]));
742   handle->send_queue_size += uv__count_bufs(req->bufs, req->nbufs);
743   handle->send_queue_count++;
744   uv__queue_insert_tail(&handle->write_queue, &req->queue);
745   uv__handle_start(handle);
746 
747   if (empty_queue && !(handle->flags & UV_HANDLE_UDP_PROCESSING)) {
748     uv__udp_sendmsg(handle);
749 
750     /* `uv__udp_sendmsg` may not be able to do non-blocking write straight
751      * away. In such cases the `io_watcher` has to be queued for asynchronous
752      * write.
753      */
754     if (!uv__queue_empty(&handle->write_queue))
755       uv__io_start(handle->loop, &handle->io_watcher, POLLOUT);
756   } else {
757     uv__io_start(handle->loop, &handle->io_watcher, POLLOUT);
758   }
759 
760   return 0;
761 }
762 
763 
uv__udp_try_send(uv_udp_t * handle,const uv_buf_t bufs[],unsigned int nbufs,const struct sockaddr * addr,unsigned int addrlen)764 int uv__udp_try_send(uv_udp_t* handle,
765                      const uv_buf_t bufs[],
766                      unsigned int nbufs,
767                      const struct sockaddr* addr,
768                      unsigned int addrlen) {
769   int err;
770   struct msghdr h;
771   ssize_t size;
772 
773   assert(nbufs > 0);
774 
775   /* already sending a message */
776   if (handle->send_queue_count != 0)
777     return UV_EAGAIN;
778 
779   if (addr) {
780     err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0);
781     if (err)
782       return err;
783   } else {
784     assert(handle->flags & UV_HANDLE_UDP_CONNECTED);
785   }
786 
787   memset(&h, 0, sizeof h);
788   h.msg_name = (struct sockaddr*) addr;
789   h.msg_namelen = addrlen;
790   h.msg_iov = (struct iovec*) bufs;
791   h.msg_iovlen = nbufs;
792 
793   do {
794     size = sendmsg(handle->io_watcher.fd, &h, 0);
795   } while (size == -1 && errno == EINTR);
796 
797   if (size == -1) {
798     if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS)
799       return UV_EAGAIN;
800     else
801       return UV__ERR(errno);
802   }
803 
804   return size;
805 }
806 
807 
uv__udp_set_membership4(uv_udp_t * handle,const struct sockaddr_in * multicast_addr,const char * interface_addr,uv_membership membership)808 static int uv__udp_set_membership4(uv_udp_t* handle,
809                                    const struct sockaddr_in* multicast_addr,
810                                    const char* interface_addr,
811                                    uv_membership membership) {
812   struct ip_mreq mreq;
813   int optname;
814   int err;
815 
816   memset(&mreq, 0, sizeof mreq);
817 
818   if (interface_addr) {
819     err = uv_inet_pton(AF_INET, interface_addr, &mreq.imr_interface.s_addr);
820     if (err)
821       return err;
822   } else {
823     mreq.imr_interface.s_addr = htonl(INADDR_ANY);
824   }
825 
826   mreq.imr_multiaddr.s_addr = multicast_addr->sin_addr.s_addr;
827 
828   switch (membership) {
829   case UV_JOIN_GROUP:
830     optname = IP_ADD_MEMBERSHIP;
831     break;
832   case UV_LEAVE_GROUP:
833     optname = IP_DROP_MEMBERSHIP;
834     break;
835   default:
836     return UV_EINVAL;
837   }
838 
839   if (setsockopt(handle->io_watcher.fd,
840                  IPPROTO_IP,
841                  optname,
842                  &mreq,
843                  sizeof(mreq))) {
844 #if defined(__MVS__)
845   if (errno == ENXIO)
846     return UV_ENODEV;
847 #endif
848     return UV__ERR(errno);
849   }
850 
851   return 0;
852 }
853 
854 
uv__udp_set_membership6(uv_udp_t * handle,const struct sockaddr_in6 * multicast_addr,const char * interface_addr,uv_membership membership)855 static int uv__udp_set_membership6(uv_udp_t* handle,
856                                    const struct sockaddr_in6* multicast_addr,
857                                    const char* interface_addr,
858                                    uv_membership membership) {
859   int optname;
860   struct ipv6_mreq mreq;
861   struct sockaddr_in6 addr6;
862 
863   memset(&mreq, 0, sizeof mreq);
864 
865   if (interface_addr) {
866     if (uv_ip6_addr(interface_addr, 0, &addr6))
867       return UV_EINVAL;
868     mreq.ipv6mr_interface = addr6.sin6_scope_id;
869   } else {
870     mreq.ipv6mr_interface = 0;
871   }
872 
873   mreq.ipv6mr_multiaddr = multicast_addr->sin6_addr;
874 
875   switch (membership) {
876   case UV_JOIN_GROUP:
877     optname = IPV6_ADD_MEMBERSHIP;
878     break;
879   case UV_LEAVE_GROUP:
880     optname = IPV6_DROP_MEMBERSHIP;
881     break;
882   default:
883     return UV_EINVAL;
884   }
885 
886   if (setsockopt(handle->io_watcher.fd,
887                  IPPROTO_IPV6,
888                  optname,
889                  &mreq,
890                  sizeof(mreq))) {
891 #if defined(__MVS__)
892   if (errno == ENXIO)
893     return UV_ENODEV;
894 #endif
895     return UV__ERR(errno);
896   }
897 
898   return 0;
899 }
900 
901 
902 #if !defined(__OpenBSD__) &&                                        \
903     !defined(__NetBSD__) &&                                         \
904     !defined(__ANDROID__) &&                                        \
905     !defined(__DragonFly__) &&                                      \
906     !defined(__QNX__) &&                                            \
907     !defined(__GNU__)
uv__udp_set_source_membership4(uv_udp_t * handle,const struct sockaddr_in * multicast_addr,const char * interface_addr,const struct sockaddr_in * source_addr,uv_membership membership)908 static int uv__udp_set_source_membership4(uv_udp_t* handle,
909                                           const struct sockaddr_in* multicast_addr,
910                                           const char* interface_addr,
911                                           const struct sockaddr_in* source_addr,
912                                           uv_membership membership) {
913   struct ip_mreq_source mreq;
914   int optname;
915   int err;
916 
917   err = uv__udp_maybe_deferred_bind(handle, AF_INET, UV_UDP_REUSEADDR);
918   if (err)
919     return err;
920 
921   memset(&mreq, 0, sizeof(mreq));
922 
923   if (interface_addr != NULL) {
924     err = uv_inet_pton(AF_INET, interface_addr, &mreq.imr_interface.s_addr);
925     if (err)
926       return err;
927   } else {
928     mreq.imr_interface.s_addr = htonl(INADDR_ANY);
929   }
930 
931   mreq.imr_multiaddr.s_addr = multicast_addr->sin_addr.s_addr;
932   mreq.imr_sourceaddr.s_addr = source_addr->sin_addr.s_addr;
933 
934   if (membership == UV_JOIN_GROUP)
935     optname = IP_ADD_SOURCE_MEMBERSHIP;
936   else if (membership == UV_LEAVE_GROUP)
937     optname = IP_DROP_SOURCE_MEMBERSHIP;
938   else
939     return UV_EINVAL;
940 
941   if (setsockopt(handle->io_watcher.fd,
942                  IPPROTO_IP,
943                  optname,
944                  &mreq,
945                  sizeof(mreq))) {
946     return UV__ERR(errno);
947   }
948 
949   return 0;
950 }
951 
952 
uv__udp_set_source_membership6(uv_udp_t * handle,const struct sockaddr_in6 * multicast_addr,const char * interface_addr,const struct sockaddr_in6 * source_addr,uv_membership membership)953 static int uv__udp_set_source_membership6(uv_udp_t* handle,
954                                           const struct sockaddr_in6* multicast_addr,
955                                           const char* interface_addr,
956                                           const struct sockaddr_in6* source_addr,
957                                           uv_membership membership) {
958   struct group_source_req mreq;
959   struct sockaddr_in6 addr6;
960   int optname;
961   int err;
962 
963   err = uv__udp_maybe_deferred_bind(handle, AF_INET6, UV_UDP_REUSEADDR);
964   if (err)
965     return err;
966 
967   memset(&mreq, 0, sizeof(mreq));
968 
969   if (interface_addr != NULL) {
970     err = uv_ip6_addr(interface_addr, 0, &addr6);
971     if (err)
972       return err;
973     mreq.gsr_interface = addr6.sin6_scope_id;
974   } else {
975     mreq.gsr_interface = 0;
976   }
977 
978   STATIC_ASSERT(sizeof(mreq.gsr_group) >= sizeof(*multicast_addr));
979   STATIC_ASSERT(sizeof(mreq.gsr_source) >= sizeof(*source_addr));
980   memcpy(&mreq.gsr_group, multicast_addr, sizeof(*multicast_addr));
981   memcpy(&mreq.gsr_source, source_addr, sizeof(*source_addr));
982 
983   if (membership == UV_JOIN_GROUP)
984     optname = MCAST_JOIN_SOURCE_GROUP;
985   else if (membership == UV_LEAVE_GROUP)
986     optname = MCAST_LEAVE_SOURCE_GROUP;
987   else
988     return UV_EINVAL;
989 
990   if (setsockopt(handle->io_watcher.fd,
991                  IPPROTO_IPV6,
992                  optname,
993                  &mreq,
994                  sizeof(mreq))) {
995     return UV__ERR(errno);
996   }
997 
998   return 0;
999 }
1000 #endif
1001 
1002 
uv__udp_init_ex(uv_loop_t * loop,uv_udp_t * handle,unsigned flags,int domain)1003 int uv__udp_init_ex(uv_loop_t* loop,
1004                     uv_udp_t* handle,
1005                     unsigned flags,
1006                     int domain) {
1007   int fd;
1008 
1009   fd = -1;
1010   if (domain != AF_UNSPEC) {
1011     fd = uv__socket(domain, SOCK_DGRAM, 0);
1012     if (fd < 0)
1013       return fd;
1014   }
1015 
1016   uv__handle_init(loop, (uv_handle_t*)handle, UV_UDP);
1017   handle->alloc_cb = NULL;
1018   handle->recv_cb = NULL;
1019   handle->send_queue_size = 0;
1020   handle->send_queue_count = 0;
1021   uv__io_init(&handle->io_watcher, uv__udp_io, fd);
1022   uv__queue_init(&handle->write_queue);
1023   uv__queue_init(&handle->write_completed_queue);
1024 
1025   return 0;
1026 }
1027 
1028 
uv_udp_using_recvmmsg(const uv_udp_t * handle)1029 int uv_udp_using_recvmmsg(const uv_udp_t* handle) {
1030 #if defined(__linux__) || defined(__FreeBSD__)
1031   if (handle->flags & UV_HANDLE_UDP_RECVMMSG)
1032     return 1;
1033 #endif
1034   return 0;
1035 }
1036 
1037 
uv_udp_open(uv_udp_t * handle,uv_os_sock_t sock)1038 int uv_udp_open(uv_udp_t* handle, uv_os_sock_t sock) {
1039   int err;
1040 
1041   /* Check for already active socket. */
1042   if (handle->io_watcher.fd != -1)
1043     return UV_EBUSY;
1044 
1045   if (uv__fd_exists(handle->loop, sock))
1046     return UV_EEXIST;
1047 
1048   err = uv__nonblock(sock, 1);
1049   if (err)
1050     return err;
1051 
1052   err = uv__set_reuse(sock);
1053   if (err)
1054     return err;
1055 
1056   handle->io_watcher.fd = sock;
1057   if (uv__udp_is_connected(handle))
1058     handle->flags |= UV_HANDLE_UDP_CONNECTED;
1059 
1060   return 0;
1061 }
1062 
1063 
uv_udp_set_membership(uv_udp_t * handle,const char * multicast_addr,const char * interface_addr,uv_membership membership)1064 int uv_udp_set_membership(uv_udp_t* handle,
1065                           const char* multicast_addr,
1066                           const char* interface_addr,
1067                           uv_membership membership) {
1068   int err;
1069   struct sockaddr_in addr4;
1070   struct sockaddr_in6 addr6;
1071 
1072   if (uv_ip4_addr(multicast_addr, 0, &addr4) == 0) {
1073     err = uv__udp_maybe_deferred_bind(handle, AF_INET, UV_UDP_REUSEADDR);
1074     if (err)
1075       return err;
1076     return uv__udp_set_membership4(handle, &addr4, interface_addr, membership);
1077   } else if (uv_ip6_addr(multicast_addr, 0, &addr6) == 0) {
1078     err = uv__udp_maybe_deferred_bind(handle, AF_INET6, UV_UDP_REUSEADDR);
1079     if (err)
1080       return err;
1081     return uv__udp_set_membership6(handle, &addr6, interface_addr, membership);
1082   } else {
1083     return UV_EINVAL;
1084   }
1085 }
1086 
1087 
uv_udp_set_source_membership(uv_udp_t * handle,const char * multicast_addr,const char * interface_addr,const char * source_addr,uv_membership membership)1088 int uv_udp_set_source_membership(uv_udp_t* handle,
1089                                  const char* multicast_addr,
1090                                  const char* interface_addr,
1091                                  const char* source_addr,
1092                                  uv_membership membership) {
1093 #if !defined(__OpenBSD__) &&                                        \
1094     !defined(__NetBSD__) &&                                         \
1095     !defined(__ANDROID__) &&                                        \
1096     !defined(__DragonFly__) &&                                      \
1097     !defined(__QNX__) &&                                            \
1098     !defined(__GNU__)
1099   int err;
1100   union uv__sockaddr mcast_addr;
1101   union uv__sockaddr src_addr;
1102 
1103   err = uv_ip4_addr(multicast_addr, 0, &mcast_addr.in);
1104   if (err) {
1105     err = uv_ip6_addr(multicast_addr, 0, &mcast_addr.in6);
1106     if (err)
1107       return err;
1108     err = uv_ip6_addr(source_addr, 0, &src_addr.in6);
1109     if (err)
1110       return err;
1111     return uv__udp_set_source_membership6(handle,
1112                                           &mcast_addr.in6,
1113                                           interface_addr,
1114                                           &src_addr.in6,
1115                                           membership);
1116   }
1117 
1118   err = uv_ip4_addr(source_addr, 0, &src_addr.in);
1119   if (err)
1120     return err;
1121   return uv__udp_set_source_membership4(handle,
1122                                         &mcast_addr.in,
1123                                         interface_addr,
1124                                         &src_addr.in,
1125                                         membership);
1126 #else
1127   return UV_ENOSYS;
1128 #endif
1129 }
1130 
1131 
uv__setsockopt(uv_udp_t * handle,int option4,int option6,const void * val,socklen_t size)1132 static int uv__setsockopt(uv_udp_t* handle,
1133                          int option4,
1134                          int option6,
1135                          const void* val,
1136                          socklen_t size) {
1137   int r;
1138 
1139   if (handle->flags & UV_HANDLE_IPV6)
1140     r = setsockopt(handle->io_watcher.fd,
1141                    IPPROTO_IPV6,
1142                    option6,
1143                    val,
1144                    size);
1145   else
1146     r = setsockopt(handle->io_watcher.fd,
1147                    IPPROTO_IP,
1148                    option4,
1149                    val,
1150                    size);
1151   if (r)
1152     return UV__ERR(errno);
1153 
1154   return 0;
1155 }
1156 
uv__setsockopt_maybe_char(uv_udp_t * handle,int option4,int option6,int val)1157 static int uv__setsockopt_maybe_char(uv_udp_t* handle,
1158                                      int option4,
1159                                      int option6,
1160                                      int val) {
1161 #if defined(__sun) || defined(_AIX) || defined(__MVS__)
1162   char arg = val;
1163 #elif defined(__OpenBSD__)
1164   unsigned char arg = val;
1165 #else
1166   int arg = val;
1167 #endif
1168 
1169   if (val < 0 || val > 255)
1170     return UV_EINVAL;
1171 
1172   return uv__setsockopt(handle, option4, option6, &arg, sizeof(arg));
1173 }
1174 
1175 
uv_udp_set_broadcast(uv_udp_t * handle,int on)1176 int uv_udp_set_broadcast(uv_udp_t* handle, int on) {
1177   if (setsockopt(handle->io_watcher.fd,
1178                  SOL_SOCKET,
1179                  SO_BROADCAST,
1180                  &on,
1181                  sizeof(on))) {
1182     return UV__ERR(errno);
1183   }
1184 
1185   return 0;
1186 }
1187 
1188 
uv_udp_set_ttl(uv_udp_t * handle,int ttl)1189 int uv_udp_set_ttl(uv_udp_t* handle, int ttl) {
1190   if (ttl < 1 || ttl > 255)
1191     return UV_EINVAL;
1192 
1193 #if defined(__MVS__)
1194   if (!(handle->flags & UV_HANDLE_IPV6))
1195     return UV_ENOTSUP;  /* zOS does not support setting ttl for IPv4 */
1196 #endif
1197 
1198 /*
1199  * On Solaris and derivatives such as SmartOS, the length of socket options
1200  * is sizeof(int) for IP_TTL and IPV6_UNICAST_HOPS,
1201  * so hardcode the size of these options on this platform,
1202  * and use the general uv__setsockopt_maybe_char call on other platforms.
1203  */
1204 #if defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \
1205     defined(__MVS__) || defined(__QNX__)
1206 
1207   return uv__setsockopt(handle,
1208                         IP_TTL,
1209                         IPV6_UNICAST_HOPS,
1210                         &ttl,
1211                         sizeof(ttl));
1212 
1213 #else /* !(defined(__sun) || defined(_AIX) || defined (__OpenBSD__) ||
1214            defined(__MVS__) || defined(__QNX__)) */
1215 
1216   return uv__setsockopt_maybe_char(handle,
1217                                    IP_TTL,
1218                                    IPV6_UNICAST_HOPS,
1219                                    ttl);
1220 
1221 #endif /* defined(__sun) || defined(_AIX) || defined (__OpenBSD__) ||
1222           defined(__MVS__) || defined(__QNX__) */
1223 }
1224 
1225 
uv_udp_set_multicast_ttl(uv_udp_t * handle,int ttl)1226 int uv_udp_set_multicast_ttl(uv_udp_t* handle, int ttl) {
1227 /*
1228  * On Solaris and derivatives such as SmartOS, the length of socket options
1229  * is sizeof(int) for IPV6_MULTICAST_HOPS and sizeof(char) for
1230  * IP_MULTICAST_TTL, so hardcode the size of the option in the IPv6 case,
1231  * and use the general uv__setsockopt_maybe_char call otherwise.
1232  */
1233 #if defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \
1234     defined(__MVS__) || defined(__QNX__)
1235   if (handle->flags & UV_HANDLE_IPV6)
1236     return uv__setsockopt(handle,
1237                           IP_MULTICAST_TTL,
1238                           IPV6_MULTICAST_HOPS,
1239                           &ttl,
1240                           sizeof(ttl));
1241 #endif /* defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \
1242     defined(__MVS__) || defined(__QNX__) */
1243 
1244   return uv__setsockopt_maybe_char(handle,
1245                                    IP_MULTICAST_TTL,
1246                                    IPV6_MULTICAST_HOPS,
1247                                    ttl);
1248 }
1249 
1250 
uv_udp_set_multicast_loop(uv_udp_t * handle,int on)1251 int uv_udp_set_multicast_loop(uv_udp_t* handle, int on) {
1252 /*
1253  * On Solaris and derivatives such as SmartOS, the length of socket options
1254  * is sizeof(int) for IPV6_MULTICAST_LOOP and sizeof(char) for
1255  * IP_MULTICAST_LOOP, so hardcode the size of the option in the IPv6 case,
1256  * and use the general uv__setsockopt_maybe_char call otherwise.
1257  */
1258 #if defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \
1259     defined(__MVS__) || defined(__QNX__)
1260   if (handle->flags & UV_HANDLE_IPV6)
1261     return uv__setsockopt(handle,
1262                           IP_MULTICAST_LOOP,
1263                           IPV6_MULTICAST_LOOP,
1264                           &on,
1265                           sizeof(on));
1266 #endif /* defined(__sun) || defined(_AIX) ||defined(__OpenBSD__) ||
1267     defined(__MVS__) || defined(__QNX__) */
1268 
1269   return uv__setsockopt_maybe_char(handle,
1270                                    IP_MULTICAST_LOOP,
1271                                    IPV6_MULTICAST_LOOP,
1272                                    on);
1273 }
1274 
uv_udp_set_multicast_interface(uv_udp_t * handle,const char * interface_addr)1275 int uv_udp_set_multicast_interface(uv_udp_t* handle, const char* interface_addr) {
1276   struct sockaddr_storage addr_st;
1277   struct sockaddr_in* addr4;
1278   struct sockaddr_in6* addr6;
1279 
1280   addr4 = (struct sockaddr_in*) &addr_st;
1281   addr6 = (struct sockaddr_in6*) &addr_st;
1282 
1283   if (!interface_addr) {
1284     memset(&addr_st, 0, sizeof addr_st);
1285     if (handle->flags & UV_HANDLE_IPV6) {
1286       addr_st.ss_family = AF_INET6;
1287       addr6->sin6_scope_id = 0;
1288     } else {
1289       addr_st.ss_family = AF_INET;
1290       addr4->sin_addr.s_addr = htonl(INADDR_ANY);
1291     }
1292   } else if (uv_ip4_addr(interface_addr, 0, addr4) == 0) {
1293     /* nothing, address was parsed */
1294   } else if (uv_ip6_addr(interface_addr, 0, addr6) == 0) {
1295     /* nothing, address was parsed */
1296   } else {
1297     return UV_EINVAL;
1298   }
1299 
1300   if (addr_st.ss_family == AF_INET) {
1301     if (setsockopt(handle->io_watcher.fd,
1302                    IPPROTO_IP,
1303                    IP_MULTICAST_IF,
1304                    (void*) &addr4->sin_addr,
1305                    sizeof(addr4->sin_addr)) == -1) {
1306       return UV__ERR(errno);
1307     }
1308   } else if (addr_st.ss_family == AF_INET6) {
1309     if (setsockopt(handle->io_watcher.fd,
1310                    IPPROTO_IPV6,
1311                    IPV6_MULTICAST_IF,
1312                    &addr6->sin6_scope_id,
1313                    sizeof(addr6->sin6_scope_id)) == -1) {
1314       return UV__ERR(errno);
1315     }
1316   } else {
1317     assert(0 && "unexpected address family");
1318     abort();
1319   }
1320 
1321   return 0;
1322 }
1323 
uv_udp_getpeername(const uv_udp_t * handle,struct sockaddr * name,int * namelen)1324 int uv_udp_getpeername(const uv_udp_t* handle,
1325                        struct sockaddr* name,
1326                        int* namelen) {
1327 
1328   return uv__getsockpeername((const uv_handle_t*) handle,
1329                              getpeername,
1330                              name,
1331                              namelen);
1332 }
1333 
uv_udp_getsockname(const uv_udp_t * handle,struct sockaddr * name,int * namelen)1334 int uv_udp_getsockname(const uv_udp_t* handle,
1335                        struct sockaddr* name,
1336                        int* namelen) {
1337 
1338   return uv__getsockpeername((const uv_handle_t*) handle,
1339                              getsockname,
1340                              name,
1341                              namelen);
1342 }
1343 
1344 
uv__udp_recv_start(uv_udp_t * handle,uv_alloc_cb alloc_cb,uv_udp_recv_cb recv_cb)1345 int uv__udp_recv_start(uv_udp_t* handle,
1346                        uv_alloc_cb alloc_cb,
1347                        uv_udp_recv_cb recv_cb) {
1348   int err;
1349 
1350   if (alloc_cb == NULL || recv_cb == NULL)
1351     return UV_EINVAL;
1352 
1353   if (uv__io_active(&handle->io_watcher, POLLIN))
1354     return UV_EALREADY;  /* FIXME(bnoordhuis) Should be UV_EBUSY. */
1355 
1356   err = uv__udp_maybe_deferred_bind(handle, AF_INET, 0);
1357   if (err)
1358     return err;
1359 
1360   handle->alloc_cb = alloc_cb;
1361   handle->recv_cb = recv_cb;
1362 
1363   uv__io_start(handle->loop, &handle->io_watcher, POLLIN);
1364   uv__handle_start(handle);
1365 
1366   return 0;
1367 }
1368 
1369 
uv__udp_recv_stop(uv_udp_t * handle)1370 int uv__udp_recv_stop(uv_udp_t* handle) {
1371   uv__io_stop(handle->loop, &handle->io_watcher, POLLIN);
1372 
1373   if (!uv__io_active(&handle->io_watcher, POLLOUT))
1374     uv__handle_stop(handle);
1375 
1376   handle->alloc_cb = NULL;
1377   handle->recv_cb = NULL;
1378 
1379   return 0;
1380 }
1381