1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to
5 * deal in the Software without restriction, including without limitation the
6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7 * sell copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19 * IN THE SOFTWARE.
20 */
21
22 #include "uv.h"
23 #include "internal.h"
24
25 #include <assert.h>
26 #include <string.h>
27 #include <errno.h>
28 #include <stdlib.h>
29 #include <unistd.h>
30 #if defined(__MVS__)
31 #include <xti.h>
32 #endif
33 #include <sys/un.h>
34
35 #if defined(IPV6_JOIN_GROUP) && !defined(IPV6_ADD_MEMBERSHIP)
36 # define IPV6_ADD_MEMBERSHIP IPV6_JOIN_GROUP
37 #endif
38
39 #if defined(IPV6_LEAVE_GROUP) && !defined(IPV6_DROP_MEMBERSHIP)
40 # define IPV6_DROP_MEMBERSHIP IPV6_LEAVE_GROUP
41 #endif
42
43 static void uv__udp_run_completed(uv_udp_t* handle);
44 static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents);
45 static void uv__udp_recvmsg(uv_udp_t* handle);
46 static void uv__udp_sendmsg(uv_udp_t* handle);
47 static int uv__udp_maybe_deferred_bind(uv_udp_t* handle,
48 int domain,
49 unsigned int flags);
50
51
uv__udp_close(uv_udp_t * handle)52 void uv__udp_close(uv_udp_t* handle) {
53 uv__io_close(handle->loop, &handle->io_watcher);
54 uv__handle_stop(handle);
55
56 if (handle->io_watcher.fd != -1) {
57 uv__close(handle->io_watcher.fd);
58 handle->io_watcher.fd = -1;
59 }
60 }
61
62
uv__udp_finish_close(uv_udp_t * handle)63 void uv__udp_finish_close(uv_udp_t* handle) {
64 uv_udp_send_t* req;
65 struct uv__queue* q;
66
67 assert(!uv__io_active(&handle->io_watcher, POLLIN | POLLOUT));
68 assert(handle->io_watcher.fd == -1);
69
70 while (!uv__queue_empty(&handle->write_queue)) {
71 q = uv__queue_head(&handle->write_queue);
72 uv__queue_remove(q);
73
74 req = uv__queue_data(q, uv_udp_send_t, queue);
75 req->status = UV_ECANCELED;
76 uv__queue_insert_tail(&handle->write_completed_queue, &req->queue);
77 }
78
79 uv__udp_run_completed(handle);
80
81 assert(handle->send_queue_size == 0);
82 assert(handle->send_queue_count == 0);
83
84 /* Now tear down the handle. */
85 handle->recv_cb = NULL;
86 handle->alloc_cb = NULL;
87 /* but _do not_ touch close_cb */
88 }
89
90
uv__udp_run_completed(uv_udp_t * handle)91 static void uv__udp_run_completed(uv_udp_t* handle) {
92 uv_udp_send_t* req;
93 struct uv__queue* q;
94
95 assert(!(handle->flags & UV_HANDLE_UDP_PROCESSING));
96 handle->flags |= UV_HANDLE_UDP_PROCESSING;
97
98 while (!uv__queue_empty(&handle->write_completed_queue)) {
99 q = uv__queue_head(&handle->write_completed_queue);
100 uv__queue_remove(q);
101
102 req = uv__queue_data(q, uv_udp_send_t, queue);
103 uv__req_unregister(handle->loop);
104
105 handle->send_queue_size -= uv__count_bufs(req->bufs, req->nbufs);
106 handle->send_queue_count--;
107
108 if (req->bufs != req->bufsml)
109 uv__free(req->bufs);
110 req->bufs = NULL;
111
112 if (req->send_cb == NULL)
113 continue;
114
115 /* req->status >= 0 == bytes written
116 * req->status < 0 == errno
117 */
118 if (req->status >= 0)
119 req->send_cb(req, 0);
120 else
121 req->send_cb(req, req->status);
122 }
123
124 if (uv__queue_empty(&handle->write_queue)) {
125 /* Pending queue and completion queue empty, stop watcher. */
126 uv__io_stop(handle->loop, &handle->io_watcher, POLLOUT);
127 if (!uv__io_active(&handle->io_watcher, POLLIN))
128 uv__handle_stop(handle);
129 }
130
131 handle->flags &= ~UV_HANDLE_UDP_PROCESSING;
132 }
133
134
uv__udp_io(uv_loop_t * loop,uv__io_t * w,unsigned int revents)135 static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents) {
136 uv_udp_t* handle;
137
138 handle = container_of(w, uv_udp_t, io_watcher);
139 assert(handle->type == UV_UDP);
140
141 if (revents & POLLIN)
142 uv__udp_recvmsg(handle);
143
144 if (revents & POLLOUT && !uv__is_closing(handle)) {
145 uv__udp_sendmsg(handle);
146 uv__udp_run_completed(handle);
147 }
148 }
149
uv__udp_recvmmsg(uv_udp_t * handle,uv_buf_t * buf)150 static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf) {
151 #if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__)
152 struct sockaddr_in6 peers[20];
153 struct iovec iov[ARRAY_SIZE(peers)];
154 struct mmsghdr msgs[ARRAY_SIZE(peers)];
155 ssize_t nread;
156 uv_buf_t chunk_buf;
157 size_t chunks;
158 int flags;
159 size_t k;
160
161 /* prepare structures for recvmmsg */
162 chunks = buf->len / UV__UDP_DGRAM_MAXSIZE;
163 if (chunks > ARRAY_SIZE(iov))
164 chunks = ARRAY_SIZE(iov);
165 for (k = 0; k < chunks; ++k) {
166 iov[k].iov_base = buf->base + k * UV__UDP_DGRAM_MAXSIZE;
167 iov[k].iov_len = UV__UDP_DGRAM_MAXSIZE;
168 memset(&msgs[k].msg_hdr, 0, sizeof(msgs[k].msg_hdr));
169 msgs[k].msg_hdr.msg_iov = iov + k;
170 msgs[k].msg_hdr.msg_iovlen = 1;
171 msgs[k].msg_hdr.msg_name = peers + k;
172 msgs[k].msg_hdr.msg_namelen = sizeof(peers[0]);
173 msgs[k].msg_hdr.msg_control = NULL;
174 msgs[k].msg_hdr.msg_controllen = 0;
175 msgs[k].msg_hdr.msg_flags = 0;
176 msgs[k].msg_len = 0;
177 }
178
179 #if defined(__APPLE__)
180 do
181 nread = recvmsg_x(handle->io_watcher.fd, msgs, chunks, MSG_DONTWAIT);
182 while (nread == -1 && errno == EINTR);
183 #else
184 do
185 nread = recvmmsg(handle->io_watcher.fd, msgs, chunks, 0, NULL);
186 while (nread == -1 && errno == EINTR);
187 #endif
188
189 if (nread < 1) {
190 if (nread == 0 || errno == EAGAIN || errno == EWOULDBLOCK)
191 handle->recv_cb(handle, 0, buf, NULL, 0);
192 else
193 handle->recv_cb(handle, UV__ERR(errno), buf, NULL, 0);
194 } else {
195 /* pass each chunk to the application */
196 for (k = 0; k < (size_t) nread && handle->recv_cb != NULL; k++) {
197 flags = UV_UDP_MMSG_CHUNK;
198 if (msgs[k].msg_hdr.msg_flags & MSG_TRUNC)
199 flags |= UV_UDP_PARTIAL;
200
201 chunk_buf = uv_buf_init(iov[k].iov_base, iov[k].iov_len);
202 handle->recv_cb(handle,
203 msgs[k].msg_len,
204 &chunk_buf,
205 msgs[k].msg_hdr.msg_name,
206 flags);
207 }
208
209 /* one last callback so the original buffer is freed */
210 if (handle->recv_cb != NULL)
211 handle->recv_cb(handle, 0, buf, NULL, UV_UDP_MMSG_FREE);
212 }
213 return nread;
214 #else /* __linux__ || ____FreeBSD__ || __APPLE__ */
215 return UV_ENOSYS;
216 #endif /* __linux__ || ____FreeBSD__ || __APPLE__ */
217 }
218
uv__udp_recvmsg(uv_udp_t * handle)219 static void uv__udp_recvmsg(uv_udp_t* handle) {
220 struct sockaddr_storage peer;
221 struct msghdr h;
222 ssize_t nread;
223 uv_buf_t buf;
224 int flags;
225 int count;
226
227 assert(handle->recv_cb != NULL);
228 assert(handle->alloc_cb != NULL);
229
230 /* Prevent loop starvation when the data comes in as fast as (or faster than)
231 * we can read it. XXX Need to rearm fd if we switch to edge-triggered I/O.
232 */
233 count = 32;
234
235 do {
236 buf = uv_buf_init(NULL, 0);
237 handle->alloc_cb((uv_handle_t*) handle, UV__UDP_DGRAM_MAXSIZE, &buf);
238 if (buf.base == NULL || buf.len == 0) {
239 handle->recv_cb(handle, UV_ENOBUFS, &buf, NULL, 0);
240 return;
241 }
242 assert(buf.base != NULL);
243
244 if (uv_udp_using_recvmmsg(handle)) {
245 nread = uv__udp_recvmmsg(handle, &buf);
246 if (nread > 0)
247 count -= nread;
248 continue;
249 }
250
251 memset(&h, 0, sizeof(h));
252 memset(&peer, 0, sizeof(peer));
253 h.msg_name = &peer;
254 h.msg_namelen = sizeof(peer);
255 h.msg_iov = (void*) &buf;
256 h.msg_iovlen = 1;
257
258 do {
259 nread = recvmsg(handle->io_watcher.fd, &h, 0);
260 }
261 while (nread == -1 && errno == EINTR);
262
263 if (nread == -1) {
264 if (errno == EAGAIN || errno == EWOULDBLOCK)
265 handle->recv_cb(handle, 0, &buf, NULL, 0);
266 else
267 handle->recv_cb(handle, UV__ERR(errno), &buf, NULL, 0);
268 }
269 else {
270 flags = 0;
271 if (h.msg_flags & MSG_TRUNC)
272 flags |= UV_UDP_PARTIAL;
273
274 handle->recv_cb(handle, nread, &buf, (const struct sockaddr*) &peer, flags);
275 }
276 count--;
277 }
278 /* recv_cb callback may decide to pause or close the handle */
279 while (nread != -1
280 && count > 0
281 && handle->io_watcher.fd != -1
282 && handle->recv_cb != NULL);
283 }
284
uv__udp_sendmsg_one(uv_udp_t * handle,uv_udp_send_t * req)285 static void uv__udp_sendmsg_one(uv_udp_t* handle, uv_udp_send_t* req) {
286 struct uv__queue* q;
287 struct msghdr h;
288 ssize_t size;
289
290 for (;;) {
291 memset(&h, 0, sizeof h);
292 if (req->addr.ss_family == AF_UNSPEC) {
293 h.msg_name = NULL;
294 h.msg_namelen = 0;
295 } else {
296 h.msg_name = &req->addr;
297 if (req->addr.ss_family == AF_INET6)
298 h.msg_namelen = sizeof(struct sockaddr_in6);
299 else if (req->addr.ss_family == AF_INET)
300 h.msg_namelen = sizeof(struct sockaddr_in);
301 else if (req->addr.ss_family == AF_UNIX)
302 h.msg_namelen = sizeof(struct sockaddr_un);
303 else {
304 assert(0 && "unsupported address family");
305 abort();
306 }
307 }
308 h.msg_iov = (struct iovec*) req->bufs;
309 h.msg_iovlen = req->nbufs;
310
311 do
312 size = sendmsg(handle->io_watcher.fd, &h, 0);
313 while (size == -1 && errno == EINTR);
314
315 if (size == -1)
316 if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS)
317 return;
318
319 req->status = (size == -1 ? UV__ERR(errno) : size);
320
321 /* Sending a datagram is an atomic operation: either all data
322 * is written or nothing is (and EMSGSIZE is raised). That is
323 * why we don't handle partial writes. Just pop the request
324 * off the write queue and onto the completed queue, done.
325 */
326 uv__queue_remove(&req->queue);
327 uv__queue_insert_tail(&handle->write_completed_queue, &req->queue);
328 uv__io_feed(handle->loop, &handle->io_watcher);
329
330 if (uv__queue_empty(&handle->write_queue))
331 return;
332
333 q = uv__queue_head(&handle->write_queue);
334 req = uv__queue_data(q, uv_udp_send_t, queue);
335 }
336 }
337
338 #if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__)
uv__udp_sendmsg_many(uv_udp_t * handle)339 static void uv__udp_sendmsg_many(uv_udp_t* handle) {
340 uv_udp_send_t* req;
341 struct mmsghdr h[20];
342 struct mmsghdr* p;
343 struct uv__queue* q;
344 ssize_t npkts;
345 size_t pkts;
346 size_t i;
347
348 write_queue_drain:
349 for (pkts = 0, q = uv__queue_head(&handle->write_queue);
350 pkts < ARRAY_SIZE(h) && q != &handle->write_queue;
351 ++pkts, q = uv__queue_head(q)) {
352 req = uv__queue_data(q, uv_udp_send_t, queue);
353
354 p = &h[pkts];
355 memset(p, 0, sizeof(*p));
356 if (req->addr.ss_family == AF_UNSPEC) {
357 p->msg_hdr.msg_name = NULL;
358 p->msg_hdr.msg_namelen = 0;
359 } else {
360 p->msg_hdr.msg_name = &req->addr;
361 if (req->addr.ss_family == AF_INET6)
362 p->msg_hdr.msg_namelen = sizeof(struct sockaddr_in6);
363 else if (req->addr.ss_family == AF_INET)
364 p->msg_hdr.msg_namelen = sizeof(struct sockaddr_in);
365 else if (req->addr.ss_family == AF_UNIX)
366 p->msg_hdr.msg_namelen = sizeof(struct sockaddr_un);
367 else {
368 assert(0 && "unsupported address family");
369 abort();
370 }
371 }
372 h[pkts].msg_hdr.msg_iov = (struct iovec*) req->bufs;
373 h[pkts].msg_hdr.msg_iovlen = req->nbufs;
374 }
375
376 #if defined(__APPLE__)
377 do
378 npkts = sendmsg_x(handle->io_watcher.fd, h, pkts, MSG_DONTWAIT);
379 while (npkts == -1 && errno == EINTR);
380 #else
381 do
382 npkts = sendmmsg(handle->io_watcher.fd, h, pkts, 0);
383 while (npkts == -1 && errno == EINTR);
384 #endif
385
386 if (npkts < 1) {
387 if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS)
388 return;
389 for (i = 0, q = uv__queue_head(&handle->write_queue);
390 i < pkts && q != &handle->write_queue;
391 ++i, q = uv__queue_head(&handle->write_queue)) {
392 req = uv__queue_data(q, uv_udp_send_t, queue);
393 req->status = UV__ERR(errno);
394 uv__queue_remove(&req->queue);
395 uv__queue_insert_tail(&handle->write_completed_queue, &req->queue);
396 }
397 uv__io_feed(handle->loop, &handle->io_watcher);
398 return;
399 }
400
401 /* Safety: npkts known to be >0 below. Hence cast from ssize_t
402 * to size_t safe.
403 */
404 for (i = 0, q = uv__queue_head(&handle->write_queue);
405 i < (size_t)npkts && q != &handle->write_queue;
406 ++i, q = uv__queue_head(&handle->write_queue)) {
407 req = uv__queue_data(q, uv_udp_send_t, queue);
408 req->status = req->bufs[0].len;
409
410 /* Sending a datagram is an atomic operation: either all data
411 * is written or nothing is (and EMSGSIZE is raised). That is
412 * why we don't handle partial writes. Just pop the request
413 * off the write queue and onto the completed queue, done.
414 */
415 uv__queue_remove(&req->queue);
416 uv__queue_insert_tail(&handle->write_completed_queue, &req->queue);
417 }
418
419 /* couldn't batch everything, continue sending (jump to avoid stack growth) */
420 if (!uv__queue_empty(&handle->write_queue))
421 goto write_queue_drain;
422
423 uv__io_feed(handle->loop, &handle->io_watcher);
424 }
425 #endif /* __linux__ || ____FreeBSD__ || __APPLE__ */
426
uv__udp_sendmsg(uv_udp_t * handle)427 static void uv__udp_sendmsg(uv_udp_t* handle) {
428 struct uv__queue* q;
429 uv_udp_send_t* req;
430
431 if (uv__queue_empty(&handle->write_queue))
432 return;
433
434 q = uv__queue_head(&handle->write_queue);
435 req = uv__queue_data(q, uv_udp_send_t, queue);
436
437 #if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__)
438 /* Use sendmmsg() if this send request contains more than one datagram OR
439 * there is more than one send request (because that automatically implies
440 * there is more than one datagram.)
441 */
442 if (req->nbufs != 1 || &handle->write_queue != uv__queue_next(&req->queue))
443 return uv__udp_sendmsg_many(handle);
444 #endif
445
446 return uv__udp_sendmsg_one(handle, req);
447 }
448
449 /* On the BSDs, SO_REUSEPORT implies SO_REUSEADDR but with some additional
450 * refinements for programs that use multicast. Therefore we preferentially
451 * set SO_REUSEPORT over SO_REUSEADDR here, but we set SO_REUSEPORT only
452 * when that socket option doesn't have the capability of load balancing.
453 * Otherwise, we fall back to SO_REUSEADDR.
454 *
455 * Linux as of 3.9, DragonflyBSD 3.6, AIX 7.2.5 have the SO_REUSEPORT socket
456 * option but with semantics that are different from the BSDs: it _shares_
457 * the port rather than steals it from the current listener. While useful,
458 * it's not something we can emulate on other platforms so we don't enable it.
459 *
460 * zOS does not support getsockname with SO_REUSEPORT option when using
461 * AF_UNIX.
462 */
uv__sock_reuseaddr(int fd)463 static int uv__sock_reuseaddr(int fd) {
464 int yes;
465 yes = 1;
466
467 #if defined(SO_REUSEPORT) && defined(__MVS__)
468 struct sockaddr_in sockfd;
469 unsigned int sockfd_len = sizeof(sockfd);
470 if (getsockname(fd, (struct sockaddr*) &sockfd, &sockfd_len) == -1)
471 return UV__ERR(errno);
472 if (sockfd.sin_family == AF_UNIX) {
473 if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)))
474 return UV__ERR(errno);
475 } else {
476 if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes)))
477 return UV__ERR(errno);
478 }
479 #elif defined(SO_REUSEPORT) && !defined(__linux__) && !defined(__GNU__) && \
480 !defined(__sun__) && !defined(__DragonFly__) && !defined(_AIX73)
481 if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes)))
482 return UV__ERR(errno);
483 #else
484 if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)))
485 return UV__ERR(errno);
486 #endif
487
488 return 0;
489 }
490
491 /*
492 * The Linux kernel suppresses some ICMP error messages by default for UDP
493 * sockets. Setting IP_RECVERR/IPV6_RECVERR on the socket enables full ICMP
494 * error reporting, hopefully resulting in faster failover to working name
495 * servers.
496 */
uv__set_recverr(int fd,sa_family_t ss_family)497 static int uv__set_recverr(int fd, sa_family_t ss_family) {
498 #if defined(__linux__)
499 int yes;
500
501 yes = 1;
502 if (ss_family == AF_INET) {
503 if (setsockopt(fd, IPPROTO_IP, IP_RECVERR, &yes, sizeof(yes)))
504 return UV__ERR(errno);
505 } else if (ss_family == AF_INET6) {
506 if (setsockopt(fd, IPPROTO_IPV6, IPV6_RECVERR, &yes, sizeof(yes)))
507 return UV__ERR(errno);
508 }
509 #endif
510 return 0;
511 }
512
513
uv__udp_bind(uv_udp_t * handle,const struct sockaddr * addr,unsigned int addrlen,unsigned int flags)514 int uv__udp_bind(uv_udp_t* handle,
515 const struct sockaddr* addr,
516 unsigned int addrlen,
517 unsigned int flags) {
518 int err;
519 int yes;
520 int fd;
521
522 /* Check for bad flags. */
523 if (flags & ~(UV_UDP_IPV6ONLY | UV_UDP_REUSEADDR |
524 UV_UDP_REUSEPORT | UV_UDP_LINUX_RECVERR))
525 return UV_EINVAL;
526
527 /* Cannot set IPv6-only mode on non-IPv6 socket. */
528 if ((flags & UV_UDP_IPV6ONLY) && addr->sa_family != AF_INET6)
529 return UV_EINVAL;
530
531 fd = handle->io_watcher.fd;
532 if (fd == -1) {
533 err = uv__socket(addr->sa_family, SOCK_DGRAM, 0);
534 if (err < 0)
535 return err;
536 fd = err;
537 handle->io_watcher.fd = fd;
538 }
539
540 if (flags & UV_UDP_LINUX_RECVERR) {
541 err = uv__set_recverr(fd, addr->sa_family);
542 if (err)
543 return err;
544 }
545
546 if (flags & UV_UDP_REUSEADDR) {
547 err = uv__sock_reuseaddr(fd);
548 if (err)
549 return err;
550 }
551
552 if (flags & UV_UDP_REUSEPORT) {
553 err = uv__sock_reuseport(fd);
554 if (err)
555 return err;
556 }
557
558 if (flags & UV_UDP_IPV6ONLY) {
559 #ifdef IPV6_V6ONLY
560 yes = 1;
561 if (setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &yes, sizeof yes) == -1) {
562 err = UV__ERR(errno);
563 return err;
564 }
565 #else
566 err = UV_ENOTSUP;
567 return err;
568 #endif
569 }
570
571 if (bind(fd, addr, addrlen)) {
572 err = UV__ERR(errno);
573 if (errno == EAFNOSUPPORT)
574 /* OSX, other BSDs and SunoS fail with EAFNOSUPPORT when binding a
575 * socket created with AF_INET to an AF_INET6 address or vice versa. */
576 err = UV_EINVAL;
577 return err;
578 }
579
580 if (addr->sa_family == AF_INET6)
581 handle->flags |= UV_HANDLE_IPV6;
582
583 handle->flags |= UV_HANDLE_BOUND;
584 return 0;
585 }
586
587
uv__udp_maybe_deferred_bind(uv_udp_t * handle,int domain,unsigned int flags)588 static int uv__udp_maybe_deferred_bind(uv_udp_t* handle,
589 int domain,
590 unsigned int flags) {
591 union uv__sockaddr taddr;
592 socklen_t addrlen;
593
594 if (handle->io_watcher.fd != -1)
595 return 0;
596
597 switch (domain) {
598 case AF_INET:
599 {
600 struct sockaddr_in* addr = &taddr.in;
601 memset(addr, 0, sizeof *addr);
602 addr->sin_family = AF_INET;
603 addr->sin_addr.s_addr = INADDR_ANY;
604 addrlen = sizeof *addr;
605 break;
606 }
607 case AF_INET6:
608 {
609 struct sockaddr_in6* addr = &taddr.in6;
610 memset(addr, 0, sizeof *addr);
611 addr->sin6_family = AF_INET6;
612 addr->sin6_addr = in6addr_any;
613 addrlen = sizeof *addr;
614 break;
615 }
616 default:
617 assert(0 && "unsupported address family");
618 abort();
619 }
620
621 return uv__udp_bind(handle, &taddr.addr, addrlen, flags);
622 }
623
624
uv__udp_connect(uv_udp_t * handle,const struct sockaddr * addr,unsigned int addrlen)625 int uv__udp_connect(uv_udp_t* handle,
626 const struct sockaddr* addr,
627 unsigned int addrlen) {
628 int err;
629
630 err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0);
631 if (err)
632 return err;
633
634 do {
635 errno = 0;
636 err = connect(handle->io_watcher.fd, addr, addrlen);
637 } while (err == -1 && errno == EINTR);
638
639 if (err)
640 return UV__ERR(errno);
641
642 handle->flags |= UV_HANDLE_UDP_CONNECTED;
643
644 return 0;
645 }
646
647 /* From https://pubs.opengroup.org/onlinepubs/9699919799/functions/connect.html
648 * Any of uv supported UNIXs kernel should be standardized, but the kernel
649 * implementation logic not same, let's use pseudocode to explain the udp
650 * disconnect behaviors:
651 *
652 * Predefined stubs for pseudocode:
653 * 1. sodisconnect: The function to perform the real udp disconnect
654 * 2. pru_connect: The function to perform the real udp connect
655 * 3. so: The kernel object match with socket fd
656 * 4. addr: The sockaddr parameter from user space
657 *
658 * BSDs:
659 * if(sodisconnect(so) == 0) { // udp disconnect succeed
660 * if (addr->sa_len != so->addr->sa_len) return EINVAL;
661 * if (addr->sa_family != so->addr->sa_family) return EAFNOSUPPORT;
662 * pru_connect(so);
663 * }
664 * else return EISCONN;
665 *
666 * z/OS (same with Windows):
667 * if(addr->sa_len < so->addr->sa_len) return EINVAL;
668 * if (addr->sa_family == AF_UNSPEC) sodisconnect(so);
669 *
670 * AIX:
671 * if(addr->sa_len != sizeof(struct sockaddr)) return EINVAL; // ignore ip proto version
672 * if (addr->sa_family == AF_UNSPEC) sodisconnect(so);
673 *
674 * Linux,Others:
675 * if(addr->sa_len < sizeof(struct sockaddr)) return EINVAL;
676 * if (addr->sa_family == AF_UNSPEC) sodisconnect(so);
677 */
uv__udp_disconnect(uv_udp_t * handle)678 int uv__udp_disconnect(uv_udp_t* handle) {
679 int r;
680 #if defined(__MVS__)
681 struct sockaddr_storage addr;
682 #else
683 struct sockaddr addr;
684 #endif
685
686 memset(&addr, 0, sizeof(addr));
687
688 #if defined(__MVS__)
689 addr.ss_family = AF_UNSPEC;
690 #else
691 addr.sa_family = AF_UNSPEC;
692 #endif
693
694 do {
695 errno = 0;
696 #ifdef __PASE__
697 /* On IBMi a connectionless transport socket can be disconnected by
698 * either setting the addr parameter to NULL or setting the
699 * addr_length parameter to zero, and issuing another connect().
700 * https://www.ibm.com/docs/en/i/7.4?topic=ssw_ibm_i_74/apis/connec.htm
701 */
702 r = connect(handle->io_watcher.fd, (struct sockaddr*) NULL, 0);
703 #else
704 r = connect(handle->io_watcher.fd, (struct sockaddr*) &addr, sizeof(addr));
705 #endif
706 } while (r == -1 && errno == EINTR);
707
708 if (r == -1) {
709 #if defined(BSD) /* The macro BSD is from sys/param.h */
710 if (errno != EAFNOSUPPORT && errno != EINVAL)
711 return UV__ERR(errno);
712 #else
713 return UV__ERR(errno);
714 #endif
715 }
716
717 handle->flags &= ~UV_HANDLE_UDP_CONNECTED;
718 return 0;
719 }
720
uv__udp_send(uv_udp_send_t * req,uv_udp_t * handle,const uv_buf_t bufs[],unsigned int nbufs,const struct sockaddr * addr,unsigned int addrlen,uv_udp_send_cb send_cb)721 int uv__udp_send(uv_udp_send_t* req,
722 uv_udp_t* handle,
723 const uv_buf_t bufs[],
724 unsigned int nbufs,
725 const struct sockaddr* addr,
726 unsigned int addrlen,
727 uv_udp_send_cb send_cb) {
728 int err;
729 int empty_queue;
730
731 assert(nbufs > 0);
732
733 if (addr) {
734 err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0);
735 if (err)
736 return err;
737 }
738
739 /* It's legal for send_queue_count > 0 even when the write_queue is empty;
740 * it means there are error-state requests in the write_completed_queue that
741 * will touch up send_queue_size/count later.
742 */
743 empty_queue = (handle->send_queue_count == 0);
744
745 uv__req_init(handle->loop, req, UV_UDP_SEND);
746 assert(addrlen <= sizeof(req->addr));
747 if (addr == NULL)
748 req->addr.ss_family = AF_UNSPEC;
749 else
750 memcpy(&req->addr, addr, addrlen);
751 req->send_cb = send_cb;
752 req->handle = handle;
753 req->nbufs = nbufs;
754
755 req->bufs = req->bufsml;
756 if (nbufs > ARRAY_SIZE(req->bufsml))
757 req->bufs = uv__malloc(nbufs * sizeof(bufs[0]));
758
759 if (req->bufs == NULL) {
760 uv__req_unregister(handle->loop);
761 return UV_ENOMEM;
762 }
763
764 memcpy(req->bufs, bufs, nbufs * sizeof(bufs[0]));
765 handle->send_queue_size += uv__count_bufs(req->bufs, req->nbufs);
766 handle->send_queue_count++;
767 uv__queue_insert_tail(&handle->write_queue, &req->queue);
768 uv__handle_start(handle);
769
770 if (empty_queue && !(handle->flags & UV_HANDLE_UDP_PROCESSING)) {
771 uv__udp_sendmsg(handle);
772
773 /* `uv__udp_sendmsg` may not be able to do non-blocking write straight
774 * away. In such cases the `io_watcher` has to be queued for asynchronous
775 * write.
776 */
777 if (!uv__queue_empty(&handle->write_queue))
778 uv__io_start(handle->loop, &handle->io_watcher, POLLOUT);
779 } else {
780 uv__io_start(handle->loop, &handle->io_watcher, POLLOUT);
781 }
782
783 return 0;
784 }
785
786
uv__udp_try_send(uv_udp_t * handle,const uv_buf_t bufs[],unsigned int nbufs,const struct sockaddr * addr,unsigned int addrlen)787 int uv__udp_try_send(uv_udp_t* handle,
788 const uv_buf_t bufs[],
789 unsigned int nbufs,
790 const struct sockaddr* addr,
791 unsigned int addrlen) {
792 int err;
793 struct msghdr h;
794 ssize_t size;
795
796 assert(nbufs > 0);
797
798 /* already sending a message */
799 if (handle->send_queue_count != 0)
800 return UV_EAGAIN;
801
802 if (addr) {
803 err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0);
804 if (err)
805 return err;
806 } else {
807 assert(handle->flags & UV_HANDLE_UDP_CONNECTED);
808 }
809
810 memset(&h, 0, sizeof h);
811 h.msg_name = (struct sockaddr*) addr;
812 h.msg_namelen = addrlen;
813 h.msg_iov = (struct iovec*) bufs;
814 h.msg_iovlen = nbufs;
815
816 do {
817 size = sendmsg(handle->io_watcher.fd, &h, 0);
818 } while (size == -1 && errno == EINTR);
819
820 if (size == -1) {
821 if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS)
822 return UV_EAGAIN;
823 else
824 return UV__ERR(errno);
825 }
826
827 return size;
828 }
829
830
uv__udp_set_membership4(uv_udp_t * handle,const struct sockaddr_in * multicast_addr,const char * interface_addr,uv_membership membership)831 static int uv__udp_set_membership4(uv_udp_t* handle,
832 const struct sockaddr_in* multicast_addr,
833 const char* interface_addr,
834 uv_membership membership) {
835 struct ip_mreq mreq;
836 int optname;
837 int err;
838
839 memset(&mreq, 0, sizeof mreq);
840
841 if (interface_addr) {
842 err = uv_inet_pton(AF_INET, interface_addr, &mreq.imr_interface.s_addr);
843 if (err)
844 return err;
845 } else {
846 mreq.imr_interface.s_addr = htonl(INADDR_ANY);
847 }
848
849 mreq.imr_multiaddr.s_addr = multicast_addr->sin_addr.s_addr;
850
851 switch (membership) {
852 case UV_JOIN_GROUP:
853 optname = IP_ADD_MEMBERSHIP;
854 break;
855 case UV_LEAVE_GROUP:
856 optname = IP_DROP_MEMBERSHIP;
857 break;
858 default:
859 return UV_EINVAL;
860 }
861
862 if (setsockopt(handle->io_watcher.fd,
863 IPPROTO_IP,
864 optname,
865 &mreq,
866 sizeof(mreq))) {
867 #if defined(__MVS__)
868 if (errno == ENXIO)
869 return UV_ENODEV;
870 #endif
871 return UV__ERR(errno);
872 }
873
874 return 0;
875 }
876
877
uv__udp_set_membership6(uv_udp_t * handle,const struct sockaddr_in6 * multicast_addr,const char * interface_addr,uv_membership membership)878 static int uv__udp_set_membership6(uv_udp_t* handle,
879 const struct sockaddr_in6* multicast_addr,
880 const char* interface_addr,
881 uv_membership membership) {
882 int optname;
883 struct ipv6_mreq mreq;
884 struct sockaddr_in6 addr6;
885
886 memset(&mreq, 0, sizeof mreq);
887
888 if (interface_addr) {
889 if (uv_ip6_addr(interface_addr, 0, &addr6))
890 return UV_EINVAL;
891 mreq.ipv6mr_interface = addr6.sin6_scope_id;
892 } else {
893 mreq.ipv6mr_interface = 0;
894 }
895
896 mreq.ipv6mr_multiaddr = multicast_addr->sin6_addr;
897
898 switch (membership) {
899 case UV_JOIN_GROUP:
900 optname = IPV6_ADD_MEMBERSHIP;
901 break;
902 case UV_LEAVE_GROUP:
903 optname = IPV6_DROP_MEMBERSHIP;
904 break;
905 default:
906 return UV_EINVAL;
907 }
908
909 if (setsockopt(handle->io_watcher.fd,
910 IPPROTO_IPV6,
911 optname,
912 &mreq,
913 sizeof(mreq))) {
914 #if defined(__MVS__)
915 if (errno == ENXIO)
916 return UV_ENODEV;
917 #endif
918 return UV__ERR(errno);
919 }
920
921 return 0;
922 }
923
924
925 #if !defined(__OpenBSD__) && \
926 !defined(__NetBSD__) && \
927 !defined(__ANDROID__) && \
928 !defined(__DragonFly__) && \
929 !defined(__QNX__) && \
930 !defined(__GNU__)
uv__udp_set_source_membership4(uv_udp_t * handle,const struct sockaddr_in * multicast_addr,const char * interface_addr,const struct sockaddr_in * source_addr,uv_membership membership)931 static int uv__udp_set_source_membership4(uv_udp_t* handle,
932 const struct sockaddr_in* multicast_addr,
933 const char* interface_addr,
934 const struct sockaddr_in* source_addr,
935 uv_membership membership) {
936 struct ip_mreq_source mreq;
937 int optname;
938 int err;
939
940 err = uv__udp_maybe_deferred_bind(handle, AF_INET, UV_UDP_REUSEADDR);
941 if (err)
942 return err;
943
944 memset(&mreq, 0, sizeof(mreq));
945
946 if (interface_addr != NULL) {
947 err = uv_inet_pton(AF_INET, interface_addr, &mreq.imr_interface.s_addr);
948 if (err)
949 return err;
950 } else {
951 mreq.imr_interface.s_addr = htonl(INADDR_ANY);
952 }
953
954 mreq.imr_multiaddr.s_addr = multicast_addr->sin_addr.s_addr;
955 mreq.imr_sourceaddr.s_addr = source_addr->sin_addr.s_addr;
956
957 if (membership == UV_JOIN_GROUP)
958 optname = IP_ADD_SOURCE_MEMBERSHIP;
959 else if (membership == UV_LEAVE_GROUP)
960 optname = IP_DROP_SOURCE_MEMBERSHIP;
961 else
962 return UV_EINVAL;
963
964 if (setsockopt(handle->io_watcher.fd,
965 IPPROTO_IP,
966 optname,
967 &mreq,
968 sizeof(mreq))) {
969 return UV__ERR(errno);
970 }
971
972 return 0;
973 }
974
975
uv__udp_set_source_membership6(uv_udp_t * handle,const struct sockaddr_in6 * multicast_addr,const char * interface_addr,const struct sockaddr_in6 * source_addr,uv_membership membership)976 static int uv__udp_set_source_membership6(uv_udp_t* handle,
977 const struct sockaddr_in6* multicast_addr,
978 const char* interface_addr,
979 const struct sockaddr_in6* source_addr,
980 uv_membership membership) {
981 struct group_source_req mreq;
982 struct sockaddr_in6 addr6;
983 int optname;
984 int err;
985
986 err = uv__udp_maybe_deferred_bind(handle, AF_INET6, UV_UDP_REUSEADDR);
987 if (err)
988 return err;
989
990 memset(&mreq, 0, sizeof(mreq));
991
992 if (interface_addr != NULL) {
993 err = uv_ip6_addr(interface_addr, 0, &addr6);
994 if (err)
995 return err;
996 mreq.gsr_interface = addr6.sin6_scope_id;
997 } else {
998 mreq.gsr_interface = 0;
999 }
1000
1001 STATIC_ASSERT(sizeof(mreq.gsr_group) >= sizeof(*multicast_addr));
1002 STATIC_ASSERT(sizeof(mreq.gsr_source) >= sizeof(*source_addr));
1003 memcpy(&mreq.gsr_group, multicast_addr, sizeof(*multicast_addr));
1004 memcpy(&mreq.gsr_source, source_addr, sizeof(*source_addr));
1005
1006 if (membership == UV_JOIN_GROUP)
1007 optname = MCAST_JOIN_SOURCE_GROUP;
1008 else if (membership == UV_LEAVE_GROUP)
1009 optname = MCAST_LEAVE_SOURCE_GROUP;
1010 else
1011 return UV_EINVAL;
1012
1013 if (setsockopt(handle->io_watcher.fd,
1014 IPPROTO_IPV6,
1015 optname,
1016 &mreq,
1017 sizeof(mreq))) {
1018 return UV__ERR(errno);
1019 }
1020
1021 return 0;
1022 }
1023 #endif
1024
1025
uv__udp_init_ex(uv_loop_t * loop,uv_udp_t * handle,unsigned flags,int domain)1026 int uv__udp_init_ex(uv_loop_t* loop,
1027 uv_udp_t* handle,
1028 unsigned flags,
1029 int domain) {
1030 int fd;
1031
1032 fd = -1;
1033 if (domain != AF_UNSPEC) {
1034 fd = uv__socket(domain, SOCK_DGRAM, 0);
1035 if (fd < 0)
1036 return fd;
1037 }
1038
1039 uv__handle_init(loop, (uv_handle_t*)handle, UV_UDP);
1040 handle->alloc_cb = NULL;
1041 handle->recv_cb = NULL;
1042 handle->send_queue_size = 0;
1043 handle->send_queue_count = 0;
1044 uv__io_init(&handle->io_watcher, uv__udp_io, fd);
1045 uv__queue_init(&handle->write_queue);
1046 uv__queue_init(&handle->write_completed_queue);
1047
1048 return 0;
1049 }
1050
1051
uv_udp_using_recvmmsg(const uv_udp_t * handle)1052 int uv_udp_using_recvmmsg(const uv_udp_t* handle) {
1053 #if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__)
1054 if (handle->flags & UV_HANDLE_UDP_RECVMMSG)
1055 return 1;
1056 #endif
1057 return 0;
1058 }
1059
1060
uv_udp_open(uv_udp_t * handle,uv_os_sock_t sock)1061 int uv_udp_open(uv_udp_t* handle, uv_os_sock_t sock) {
1062 int err;
1063
1064 /* Check for already active socket. */
1065 if (handle->io_watcher.fd != -1)
1066 return UV_EBUSY;
1067
1068 if (uv__fd_exists(handle->loop, sock))
1069 return UV_EEXIST;
1070
1071 err = uv__nonblock(sock, 1);
1072 if (err)
1073 return err;
1074
1075 err = uv__sock_reuseaddr(sock);
1076 if (err)
1077 return err;
1078
1079 handle->io_watcher.fd = sock;
1080 if (uv__udp_is_connected(handle))
1081 handle->flags |= UV_HANDLE_UDP_CONNECTED;
1082
1083 return 0;
1084 }
1085
1086
uv_udp_set_membership(uv_udp_t * handle,const char * multicast_addr,const char * interface_addr,uv_membership membership)1087 int uv_udp_set_membership(uv_udp_t* handle,
1088 const char* multicast_addr,
1089 const char* interface_addr,
1090 uv_membership membership) {
1091 int err;
1092 struct sockaddr_in addr4;
1093 struct sockaddr_in6 addr6;
1094
1095 if (uv_ip4_addr(multicast_addr, 0, &addr4) == 0) {
1096 err = uv__udp_maybe_deferred_bind(handle, AF_INET, UV_UDP_REUSEADDR);
1097 if (err)
1098 return err;
1099 return uv__udp_set_membership4(handle, &addr4, interface_addr, membership);
1100 } else if (uv_ip6_addr(multicast_addr, 0, &addr6) == 0) {
1101 err = uv__udp_maybe_deferred_bind(handle, AF_INET6, UV_UDP_REUSEADDR);
1102 if (err)
1103 return err;
1104 return uv__udp_set_membership6(handle, &addr6, interface_addr, membership);
1105 } else {
1106 return UV_EINVAL;
1107 }
1108 }
1109
1110
uv_udp_set_source_membership(uv_udp_t * handle,const char * multicast_addr,const char * interface_addr,const char * source_addr,uv_membership membership)1111 int uv_udp_set_source_membership(uv_udp_t* handle,
1112 const char* multicast_addr,
1113 const char* interface_addr,
1114 const char* source_addr,
1115 uv_membership membership) {
1116 #if !defined(__OpenBSD__) && \
1117 !defined(__NetBSD__) && \
1118 !defined(__ANDROID__) && \
1119 !defined(__DragonFly__) && \
1120 !defined(__QNX__) && \
1121 !defined(__GNU__)
1122 int err;
1123 union uv__sockaddr mcast_addr;
1124 union uv__sockaddr src_addr;
1125
1126 err = uv_ip4_addr(multicast_addr, 0, &mcast_addr.in);
1127 if (err) {
1128 err = uv_ip6_addr(multicast_addr, 0, &mcast_addr.in6);
1129 if (err)
1130 return err;
1131 err = uv_ip6_addr(source_addr, 0, &src_addr.in6);
1132 if (err)
1133 return err;
1134 return uv__udp_set_source_membership6(handle,
1135 &mcast_addr.in6,
1136 interface_addr,
1137 &src_addr.in6,
1138 membership);
1139 }
1140
1141 err = uv_ip4_addr(source_addr, 0, &src_addr.in);
1142 if (err)
1143 return err;
1144 return uv__udp_set_source_membership4(handle,
1145 &mcast_addr.in,
1146 interface_addr,
1147 &src_addr.in,
1148 membership);
1149 #else
1150 return UV_ENOSYS;
1151 #endif
1152 }
1153
1154
uv__setsockopt(uv_udp_t * handle,int option4,int option6,const void * val,socklen_t size)1155 static int uv__setsockopt(uv_udp_t* handle,
1156 int option4,
1157 int option6,
1158 const void* val,
1159 socklen_t size) {
1160 int r;
1161
1162 if (handle->flags & UV_HANDLE_IPV6)
1163 r = setsockopt(handle->io_watcher.fd,
1164 IPPROTO_IPV6,
1165 option6,
1166 val,
1167 size);
1168 else
1169 r = setsockopt(handle->io_watcher.fd,
1170 IPPROTO_IP,
1171 option4,
1172 val,
1173 size);
1174 if (r)
1175 return UV__ERR(errno);
1176
1177 return 0;
1178 }
1179
uv__setsockopt_maybe_char(uv_udp_t * handle,int option4,int option6,int val)1180 static int uv__setsockopt_maybe_char(uv_udp_t* handle,
1181 int option4,
1182 int option6,
1183 int val) {
1184 #if defined(__sun) || defined(_AIX) || defined(__MVS__)
1185 char arg = val;
1186 #elif defined(__OpenBSD__)
1187 unsigned char arg = val;
1188 #else
1189 int arg = val;
1190 #endif
1191
1192 if (val < 0 || val > 255)
1193 return UV_EINVAL;
1194
1195 return uv__setsockopt(handle, option4, option6, &arg, sizeof(arg));
1196 }
1197
1198
uv_udp_set_broadcast(uv_udp_t * handle,int on)1199 int uv_udp_set_broadcast(uv_udp_t* handle, int on) {
1200 if (setsockopt(handle->io_watcher.fd,
1201 SOL_SOCKET,
1202 SO_BROADCAST,
1203 &on,
1204 sizeof(on))) {
1205 return UV__ERR(errno);
1206 }
1207
1208 return 0;
1209 }
1210
1211
uv_udp_set_ttl(uv_udp_t * handle,int ttl)1212 int uv_udp_set_ttl(uv_udp_t* handle, int ttl) {
1213 if (ttl < 1 || ttl > 255)
1214 return UV_EINVAL;
1215
1216 #if defined(__MVS__)
1217 if (!(handle->flags & UV_HANDLE_IPV6))
1218 return UV_ENOTSUP; /* zOS does not support setting ttl for IPv4 */
1219 #endif
1220
1221 /*
1222 * On Solaris and derivatives such as SmartOS, the length of socket options
1223 * is sizeof(int) for IP_TTL and IPV6_UNICAST_HOPS,
1224 * so hardcode the size of these options on this platform,
1225 * and use the general uv__setsockopt_maybe_char call on other platforms.
1226 */
1227 #if defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \
1228 defined(__MVS__) || defined(__QNX__)
1229
1230 return uv__setsockopt(handle,
1231 IP_TTL,
1232 IPV6_UNICAST_HOPS,
1233 &ttl,
1234 sizeof(ttl));
1235
1236 #else /* !(defined(__sun) || defined(_AIX) || defined (__OpenBSD__) ||
1237 defined(__MVS__) || defined(__QNX__)) */
1238
1239 return uv__setsockopt_maybe_char(handle,
1240 IP_TTL,
1241 IPV6_UNICAST_HOPS,
1242 ttl);
1243
1244 #endif /* defined(__sun) || defined(_AIX) || defined (__OpenBSD__) ||
1245 defined(__MVS__) || defined(__QNX__) */
1246 }
1247
1248
uv_udp_set_multicast_ttl(uv_udp_t * handle,int ttl)1249 int uv_udp_set_multicast_ttl(uv_udp_t* handle, int ttl) {
1250 /*
1251 * On Solaris and derivatives such as SmartOS, the length of socket options
1252 * is sizeof(int) for IPV6_MULTICAST_HOPS and sizeof(char) for
1253 * IP_MULTICAST_TTL, so hardcode the size of the option in the IPv6 case,
1254 * and use the general uv__setsockopt_maybe_char call otherwise.
1255 */
1256 #if defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \
1257 defined(__MVS__) || defined(__QNX__)
1258 if (handle->flags & UV_HANDLE_IPV6)
1259 return uv__setsockopt(handle,
1260 IP_MULTICAST_TTL,
1261 IPV6_MULTICAST_HOPS,
1262 &ttl,
1263 sizeof(ttl));
1264 #endif /* defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \
1265 defined(__MVS__) || defined(__QNX__) */
1266
1267 return uv__setsockopt_maybe_char(handle,
1268 IP_MULTICAST_TTL,
1269 IPV6_MULTICAST_HOPS,
1270 ttl);
1271 }
1272
1273
uv_udp_set_multicast_loop(uv_udp_t * handle,int on)1274 int uv_udp_set_multicast_loop(uv_udp_t* handle, int on) {
1275 /*
1276 * On Solaris and derivatives such as SmartOS, the length of socket options
1277 * is sizeof(int) for IPV6_MULTICAST_LOOP and sizeof(char) for
1278 * IP_MULTICAST_LOOP, so hardcode the size of the option in the IPv6 case,
1279 * and use the general uv__setsockopt_maybe_char call otherwise.
1280 */
1281 #if defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \
1282 defined(__MVS__) || defined(__QNX__)
1283 if (handle->flags & UV_HANDLE_IPV6)
1284 return uv__setsockopt(handle,
1285 IP_MULTICAST_LOOP,
1286 IPV6_MULTICAST_LOOP,
1287 &on,
1288 sizeof(on));
1289 #endif /* defined(__sun) || defined(_AIX) ||defined(__OpenBSD__) ||
1290 defined(__MVS__) || defined(__QNX__) */
1291
1292 return uv__setsockopt_maybe_char(handle,
1293 IP_MULTICAST_LOOP,
1294 IPV6_MULTICAST_LOOP,
1295 on);
1296 }
1297
uv_udp_set_multicast_interface(uv_udp_t * handle,const char * interface_addr)1298 int uv_udp_set_multicast_interface(uv_udp_t* handle, const char* interface_addr) {
1299 struct sockaddr_storage addr_st;
1300 struct sockaddr_in* addr4;
1301 struct sockaddr_in6* addr6;
1302
1303 addr4 = (struct sockaddr_in*) &addr_st;
1304 addr6 = (struct sockaddr_in6*) &addr_st;
1305
1306 if (!interface_addr) {
1307 memset(&addr_st, 0, sizeof addr_st);
1308 if (handle->flags & UV_HANDLE_IPV6) {
1309 addr_st.ss_family = AF_INET6;
1310 addr6->sin6_scope_id = 0;
1311 } else {
1312 addr_st.ss_family = AF_INET;
1313 addr4->sin_addr.s_addr = htonl(INADDR_ANY);
1314 }
1315 } else if (uv_ip4_addr(interface_addr, 0, addr4) == 0) {
1316 /* nothing, address was parsed */
1317 } else if (uv_ip6_addr(interface_addr, 0, addr6) == 0) {
1318 /* nothing, address was parsed */
1319 } else {
1320 return UV_EINVAL;
1321 }
1322
1323 if (addr_st.ss_family == AF_INET) {
1324 if (setsockopt(handle->io_watcher.fd,
1325 IPPROTO_IP,
1326 IP_MULTICAST_IF,
1327 (void*) &addr4->sin_addr,
1328 sizeof(addr4->sin_addr)) == -1) {
1329 return UV__ERR(errno);
1330 }
1331 } else if (addr_st.ss_family == AF_INET6) {
1332 if (setsockopt(handle->io_watcher.fd,
1333 IPPROTO_IPV6,
1334 IPV6_MULTICAST_IF,
1335 &addr6->sin6_scope_id,
1336 sizeof(addr6->sin6_scope_id)) == -1) {
1337 return UV__ERR(errno);
1338 }
1339 } else {
1340 assert(0 && "unexpected address family");
1341 abort();
1342 }
1343
1344 return 0;
1345 }
1346
uv_udp_getpeername(const uv_udp_t * handle,struct sockaddr * name,int * namelen)1347 int uv_udp_getpeername(const uv_udp_t* handle,
1348 struct sockaddr* name,
1349 int* namelen) {
1350
1351 return uv__getsockpeername((const uv_handle_t*) handle,
1352 getpeername,
1353 name,
1354 namelen);
1355 }
1356
uv_udp_getsockname(const uv_udp_t * handle,struct sockaddr * name,int * namelen)1357 int uv_udp_getsockname(const uv_udp_t* handle,
1358 struct sockaddr* name,
1359 int* namelen) {
1360
1361 return uv__getsockpeername((const uv_handle_t*) handle,
1362 getsockname,
1363 name,
1364 namelen);
1365 }
1366
1367
uv__udp_recv_start(uv_udp_t * handle,uv_alloc_cb alloc_cb,uv_udp_recv_cb recv_cb)1368 int uv__udp_recv_start(uv_udp_t* handle,
1369 uv_alloc_cb alloc_cb,
1370 uv_udp_recv_cb recv_cb) {
1371 int err;
1372
1373 if (alloc_cb == NULL || recv_cb == NULL)
1374 return UV_EINVAL;
1375
1376 if (uv__io_active(&handle->io_watcher, POLLIN))
1377 return UV_EALREADY; /* FIXME(bnoordhuis) Should be UV_EBUSY. */
1378
1379 err = uv__udp_maybe_deferred_bind(handle, AF_INET, 0);
1380 if (err)
1381 return err;
1382
1383 handle->alloc_cb = alloc_cb;
1384 handle->recv_cb = recv_cb;
1385
1386 uv__io_start(handle->loop, &handle->io_watcher, POLLIN);
1387 uv__handle_start(handle);
1388
1389 return 0;
1390 }
1391
1392
uv__udp_recv_stop(uv_udp_t * handle)1393 int uv__udp_recv_stop(uv_udp_t* handle) {
1394 uv__io_stop(handle->loop, &handle->io_watcher, POLLIN);
1395
1396 if (!uv__io_active(&handle->io_watcher, POLLOUT))
1397 uv__handle_stop(handle);
1398
1399 handle->alloc_cb = NULL;
1400 handle->recv_cb = NULL;
1401
1402 return 0;
1403 }
1404