1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to
5 * deal in the Software without restriction, including without limitation the
6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7 * sell copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19 * IN THE SOFTWARE.
20 */
21
22 #include "uv.h"
23 #include "internal.h"
24
25 #include <assert.h>
26 #include <string.h>
27 #include <errno.h>
28 #include <stdlib.h>
29 #include <unistd.h>
30 #if defined(__MVS__)
31 #include <xti.h>
32 #endif
33 #include <sys/un.h>
34
35 #if defined(IPV6_JOIN_GROUP) && !defined(IPV6_ADD_MEMBERSHIP)
36 # define IPV6_ADD_MEMBERSHIP IPV6_JOIN_GROUP
37 #endif
38
39 #if defined(IPV6_LEAVE_GROUP) && !defined(IPV6_DROP_MEMBERSHIP)
40 # define IPV6_DROP_MEMBERSHIP IPV6_LEAVE_GROUP
41 #endif
42
43 static void uv__udp_run_completed(uv_udp_t* handle);
44 static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents);
45 static void uv__udp_recvmsg(uv_udp_t* handle);
46 static void uv__udp_sendmsg(uv_udp_t* handle);
47 static int uv__udp_maybe_deferred_bind(uv_udp_t* handle,
48 int domain,
49 unsigned int flags);
50
51
uv__udp_close(uv_udp_t * handle)52 void uv__udp_close(uv_udp_t* handle) {
53 uv__io_close(handle->loop, &handle->io_watcher);
54 uv__handle_stop(handle);
55
56 if (handle->io_watcher.fd != -1) {
57 uv__close(handle->io_watcher.fd);
58 handle->io_watcher.fd = -1;
59 }
60 }
61
62
uv__udp_finish_close(uv_udp_t * handle)63 void uv__udp_finish_close(uv_udp_t* handle) {
64 uv_udp_send_t* req;
65 struct uv__queue* q;
66
67 assert(!uv__io_active(&handle->io_watcher, POLLIN | POLLOUT));
68 assert(handle->io_watcher.fd == -1);
69
70 while (!uv__queue_empty(&handle->write_queue)) {
71 q = uv__queue_head(&handle->write_queue);
72 uv__queue_remove(q);
73
74 req = uv__queue_data(q, uv_udp_send_t, queue);
75 req->status = UV_ECANCELED;
76 uv__queue_insert_tail(&handle->write_completed_queue, &req->queue);
77 }
78
79 uv__udp_run_completed(handle);
80
81 assert(handle->send_queue_size == 0);
82 assert(handle->send_queue_count == 0);
83
84 /* Now tear down the handle. */
85 handle->recv_cb = NULL;
86 handle->alloc_cb = NULL;
87 /* but _do not_ touch close_cb */
88 }
89
90
uv__udp_run_completed(uv_udp_t * handle)91 static void uv__udp_run_completed(uv_udp_t* handle) {
92 uv_udp_send_t* req;
93 struct uv__queue* q;
94
95 assert(!(handle->flags & UV_HANDLE_UDP_PROCESSING));
96 handle->flags |= UV_HANDLE_UDP_PROCESSING;
97
98 while (!uv__queue_empty(&handle->write_completed_queue)) {
99 q = uv__queue_head(&handle->write_completed_queue);
100 uv__queue_remove(q);
101
102 req = uv__queue_data(q, uv_udp_send_t, queue);
103 uv__req_unregister(handle->loop, req);
104
105 handle->send_queue_size -= uv__count_bufs(req->bufs, req->nbufs);
106 handle->send_queue_count--;
107
108 if (req->bufs != req->bufsml)
109 uv__free(req->bufs);
110 req->bufs = NULL;
111
112 if (req->send_cb == NULL)
113 continue;
114
115 /* req->status >= 0 == bytes written
116 * req->status < 0 == errno
117 */
118 if (req->status >= 0)
119 req->send_cb(req, 0);
120 else
121 req->send_cb(req, req->status);
122 }
123
124 if (uv__queue_empty(&handle->write_queue)) {
125 /* Pending queue and completion queue empty, stop watcher. */
126 uv__io_stop(handle->loop, &handle->io_watcher, POLLOUT);
127 if (!uv__io_active(&handle->io_watcher, POLLIN))
128 uv__handle_stop(handle);
129 }
130
131 handle->flags &= ~UV_HANDLE_UDP_PROCESSING;
132 }
133
134
uv__udp_io(uv_loop_t * loop,uv__io_t * w,unsigned int revents)135 static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents) {
136 uv_udp_t* handle;
137
138 handle = container_of(w, uv_udp_t, io_watcher);
139 assert(handle->type == UV_UDP);
140
141 if (revents & POLLIN)
142 uv__udp_recvmsg(handle);
143
144 if (revents & POLLOUT && !uv__is_closing(handle)) {
145 uv__udp_sendmsg(handle);
146 uv__udp_run_completed(handle);
147 }
148 }
149
uv__udp_recvmmsg(uv_udp_t * handle,uv_buf_t * buf)150 static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf) {
151 #if defined(__linux__) || defined(__FreeBSD__)
152 struct sockaddr_in6 peers[20];
153 struct iovec iov[ARRAY_SIZE(peers)];
154 struct mmsghdr msgs[ARRAY_SIZE(peers)];
155 ssize_t nread;
156 uv_buf_t chunk_buf;
157 size_t chunks;
158 int flags;
159 size_t k;
160
161 /* prepare structures for recvmmsg */
162 chunks = buf->len / UV__UDP_DGRAM_MAXSIZE;
163 if (chunks > ARRAY_SIZE(iov))
164 chunks = ARRAY_SIZE(iov);
165 for (k = 0; k < chunks; ++k) {
166 iov[k].iov_base = buf->base + k * UV__UDP_DGRAM_MAXSIZE;
167 iov[k].iov_len = UV__UDP_DGRAM_MAXSIZE;
168 memset(&msgs[k].msg_hdr, 0, sizeof(msgs[k].msg_hdr));
169 msgs[k].msg_hdr.msg_iov = iov + k;
170 msgs[k].msg_hdr.msg_iovlen = 1;
171 msgs[k].msg_hdr.msg_name = peers + k;
172 msgs[k].msg_hdr.msg_namelen = sizeof(peers[0]);
173 msgs[k].msg_hdr.msg_control = NULL;
174 msgs[k].msg_hdr.msg_controllen = 0;
175 msgs[k].msg_hdr.msg_flags = 0;
176 }
177
178 do
179 nread = recvmmsg(handle->io_watcher.fd, msgs, chunks, 0, NULL);
180 while (nread == -1 && errno == EINTR);
181
182 if (nread < 1) {
183 if (nread == 0 || errno == EAGAIN || errno == EWOULDBLOCK)
184 handle->recv_cb(handle, 0, buf, NULL, 0);
185 else
186 handle->recv_cb(handle, UV__ERR(errno), buf, NULL, 0);
187 } else {
188 /* pass each chunk to the application */
189 for (k = 0; k < (size_t) nread && handle->recv_cb != NULL; k++) {
190 flags = UV_UDP_MMSG_CHUNK;
191 if (msgs[k].msg_hdr.msg_flags & MSG_TRUNC)
192 flags |= UV_UDP_PARTIAL;
193
194 chunk_buf = uv_buf_init(iov[k].iov_base, iov[k].iov_len);
195 handle->recv_cb(handle,
196 msgs[k].msg_len,
197 &chunk_buf,
198 msgs[k].msg_hdr.msg_name,
199 flags);
200 }
201
202 /* one last callback so the original buffer is freed */
203 if (handle->recv_cb != NULL)
204 handle->recv_cb(handle, 0, buf, NULL, UV_UDP_MMSG_FREE);
205 }
206 return nread;
207 #else /* __linux__ || ____FreeBSD__ */
208 return UV_ENOSYS;
209 #endif /* __linux__ || ____FreeBSD__ */
210 }
211
uv__udp_recvmsg(uv_udp_t * handle)212 static void uv__udp_recvmsg(uv_udp_t* handle) {
213 struct sockaddr_storage peer;
214 struct msghdr h;
215 ssize_t nread;
216 uv_buf_t buf;
217 int flags;
218 int count;
219
220 assert(handle->recv_cb != NULL);
221 assert(handle->alloc_cb != NULL);
222
223 /* Prevent loop starvation when the data comes in as fast as (or faster than)
224 * we can read it. XXX Need to rearm fd if we switch to edge-triggered I/O.
225 */
226 count = 32;
227
228 do {
229 buf = uv_buf_init(NULL, 0);
230 handle->alloc_cb((uv_handle_t*) handle, UV__UDP_DGRAM_MAXSIZE, &buf);
231 if (buf.base == NULL || buf.len == 0) {
232 handle->recv_cb(handle, UV_ENOBUFS, &buf, NULL, 0);
233 return;
234 }
235 assert(buf.base != NULL);
236
237 if (uv_udp_using_recvmmsg(handle)) {
238 nread = uv__udp_recvmmsg(handle, &buf);
239 if (nread > 0)
240 count -= nread;
241 continue;
242 }
243
244 memset(&h, 0, sizeof(h));
245 memset(&peer, 0, sizeof(peer));
246 h.msg_name = &peer;
247 h.msg_namelen = sizeof(peer);
248 h.msg_iov = (void*) &buf;
249 h.msg_iovlen = 1;
250
251 do {
252 nread = recvmsg(handle->io_watcher.fd, &h, 0);
253 }
254 while (nread == -1 && errno == EINTR);
255
256 if (nread == -1) {
257 if (errno == EAGAIN || errno == EWOULDBLOCK)
258 handle->recv_cb(handle, 0, &buf, NULL, 0);
259 else
260 handle->recv_cb(handle, UV__ERR(errno), &buf, NULL, 0);
261 }
262 else {
263 flags = 0;
264 if (h.msg_flags & MSG_TRUNC)
265 flags |= UV_UDP_PARTIAL;
266
267 handle->recv_cb(handle, nread, &buf, (const struct sockaddr*) &peer, flags);
268 }
269 count--;
270 }
271 /* recv_cb callback may decide to pause or close the handle */
272 while (nread != -1
273 && count > 0
274 && handle->io_watcher.fd != -1
275 && handle->recv_cb != NULL);
276 }
277
uv__udp_sendmsg_one(uv_udp_t * handle,uv_udp_send_t * req)278 static void uv__udp_sendmsg_one(uv_udp_t* handle, uv_udp_send_t* req) {
279 struct uv__queue* q;
280 struct msghdr h;
281 ssize_t size;
282
283 for (;;) {
284 memset(&h, 0, sizeof h);
285 if (req->addr.ss_family == AF_UNSPEC) {
286 h.msg_name = NULL;
287 h.msg_namelen = 0;
288 } else {
289 h.msg_name = &req->addr;
290 if (req->addr.ss_family == AF_INET6)
291 h.msg_namelen = sizeof(struct sockaddr_in6);
292 else if (req->addr.ss_family == AF_INET)
293 h.msg_namelen = sizeof(struct sockaddr_in);
294 else if (req->addr.ss_family == AF_UNIX)
295 h.msg_namelen = sizeof(struct sockaddr_un);
296 else {
297 assert(0 && "unsupported address family");
298 abort();
299 }
300 }
301 h.msg_iov = (struct iovec*) req->bufs;
302 h.msg_iovlen = req->nbufs;
303
304 do
305 size = sendmsg(handle->io_watcher.fd, &h, 0);
306 while (size == -1 && errno == EINTR);
307
308 if (size == -1)
309 if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS)
310 return;
311
312 req->status = (size == -1 ? UV__ERR(errno) : size);
313
314 /* Sending a datagram is an atomic operation: either all data
315 * is written or nothing is (and EMSGSIZE is raised). That is
316 * why we don't handle partial writes. Just pop the request
317 * off the write queue and onto the completed queue, done.
318 */
319 uv__queue_remove(&req->queue);
320 uv__queue_insert_tail(&handle->write_completed_queue, &req->queue);
321 uv__io_feed(handle->loop, &handle->io_watcher);
322
323 if (uv__queue_empty(&handle->write_queue))
324 return;
325
326 q = uv__queue_head(&handle->write_queue);
327 req = uv__queue_data(q, uv_udp_send_t, queue);
328 }
329 }
330
331 #if defined(__linux__) || defined(__FreeBSD__)
uv__udp_sendmsg_many(uv_udp_t * handle)332 static void uv__udp_sendmsg_many(uv_udp_t* handle) {
333 uv_udp_send_t* req;
334 struct mmsghdr h[20];
335 struct mmsghdr* p;
336 struct uv__queue* q;
337 ssize_t npkts;
338 size_t pkts;
339 size_t i;
340
341 write_queue_drain:
342 for (pkts = 0, q = uv__queue_head(&handle->write_queue);
343 pkts < ARRAY_SIZE(h) && q != &handle->write_queue;
344 ++pkts, q = uv__queue_head(q)) {
345 req = uv__queue_data(q, uv_udp_send_t, queue);
346
347 p = &h[pkts];
348 memset(p, 0, sizeof(*p));
349 if (req->addr.ss_family == AF_UNSPEC) {
350 p->msg_hdr.msg_name = NULL;
351 p->msg_hdr.msg_namelen = 0;
352 } else {
353 p->msg_hdr.msg_name = &req->addr;
354 if (req->addr.ss_family == AF_INET6)
355 p->msg_hdr.msg_namelen = sizeof(struct sockaddr_in6);
356 else if (req->addr.ss_family == AF_INET)
357 p->msg_hdr.msg_namelen = sizeof(struct sockaddr_in);
358 else if (req->addr.ss_family == AF_UNIX)
359 p->msg_hdr.msg_namelen = sizeof(struct sockaddr_un);
360 else {
361 assert(0 && "unsupported address family");
362 abort();
363 }
364 }
365 h[pkts].msg_hdr.msg_iov = (struct iovec*) req->bufs;
366 h[pkts].msg_hdr.msg_iovlen = req->nbufs;
367 }
368
369 do
370 npkts = sendmmsg(handle->io_watcher.fd, h, pkts, 0);
371 while (npkts == -1 && errno == EINTR);
372
373 if (npkts < 1) {
374 if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS)
375 return;
376 for (i = 0, q = uv__queue_head(&handle->write_queue);
377 i < pkts && q != &handle->write_queue;
378 ++i, q = uv__queue_head(&handle->write_queue)) {
379 req = uv__queue_data(q, uv_udp_send_t, queue);
380 req->status = UV__ERR(errno);
381 uv__queue_remove(&req->queue);
382 uv__queue_insert_tail(&handle->write_completed_queue, &req->queue);
383 }
384 uv__io_feed(handle->loop, &handle->io_watcher);
385 return;
386 }
387
388 /* Safety: npkts known to be >0 below. Hence cast from ssize_t
389 * to size_t safe.
390 */
391 for (i = 0, q = uv__queue_head(&handle->write_queue);
392 i < (size_t)npkts && q != &handle->write_queue;
393 ++i, q = uv__queue_head(&handle->write_queue)) {
394 req = uv__queue_data(q, uv_udp_send_t, queue);
395 req->status = req->bufs[0].len;
396
397 /* Sending a datagram is an atomic operation: either all data
398 * is written or nothing is (and EMSGSIZE is raised). That is
399 * why we don't handle partial writes. Just pop the request
400 * off the write queue and onto the completed queue, done.
401 */
402 uv__queue_remove(&req->queue);
403 uv__queue_insert_tail(&handle->write_completed_queue, &req->queue);
404 }
405
406 /* couldn't batch everything, continue sending (jump to avoid stack growth) */
407 if (!uv__queue_empty(&handle->write_queue))
408 goto write_queue_drain;
409
410 uv__io_feed(handle->loop, &handle->io_watcher);
411 }
412 #endif /* __linux__ || ____FreeBSD__ */
413
uv__udp_sendmsg(uv_udp_t * handle)414 static void uv__udp_sendmsg(uv_udp_t* handle) {
415 struct uv__queue* q;
416 uv_udp_send_t* req;
417
418 if (uv__queue_empty(&handle->write_queue))
419 return;
420
421 q = uv__queue_head(&handle->write_queue);
422 req = uv__queue_data(q, uv_udp_send_t, queue);
423
424 #if defined(__linux__) || defined(__FreeBSD__)
425 /* Use sendmmsg() if this send request contains more than one datagram OR
426 * there is more than one send request (because that automatically implies
427 * there is more than one datagram.)
428 */
429 if (req->nbufs != 1 || &handle->write_queue != uv__queue_next(&req->queue))
430 return uv__udp_sendmsg_many(handle);
431 #endif
432
433 return uv__udp_sendmsg_one(handle, req);
434 }
435
436 /* On the BSDs, SO_REUSEPORT implies SO_REUSEADDR but with some additional
437 * refinements for programs that use multicast. Therefore we preferentially
438 * set SO_REUSEPORT over SO_REUSEADDR here, but we set SO_REUSEPORT only
439 * when that socket option doesn't have the capability of load balancing.
440 * Otherwise, we fall back to SO_REUSEADDR.
441 *
442 * Linux as of 3.9, DragonflyBSD 3.6, AIX 7.2.5 have the SO_REUSEPORT socket
443 * option but with semantics that are different from the BSDs: it _shares_
444 * the port rather than steals it from the current listener. While useful,
445 * it's not something we can emulate on other platforms so we don't enable it.
446 *
447 * zOS does not support getsockname with SO_REUSEPORT option when using
448 * AF_UNIX.
449 */
uv__sock_reuseaddr(int fd)450 static int uv__sock_reuseaddr(int fd) {
451 int yes;
452 yes = 1;
453
454 #if defined(SO_REUSEPORT) && defined(__MVS__)
455 struct sockaddr_in sockfd;
456 unsigned int sockfd_len = sizeof(sockfd);
457 if (getsockname(fd, (struct sockaddr*) &sockfd, &sockfd_len) == -1)
458 return UV__ERR(errno);
459 if (sockfd.sin_family == AF_UNIX) {
460 if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)))
461 return UV__ERR(errno);
462 } else {
463 if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes)))
464 return UV__ERR(errno);
465 }
466 #elif defined(SO_REUSEPORT) && !defined(__linux__) && !defined(__GNU__) && \
467 !defined(__sun__) && !defined(__DragonFly__) && !defined(_AIX73)
468 if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes)))
469 return UV__ERR(errno);
470 #else
471 if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)))
472 return UV__ERR(errno);
473 #endif
474
475 return 0;
476 }
477
478 /*
479 * The Linux kernel suppresses some ICMP error messages by default for UDP
480 * sockets. Setting IP_RECVERR/IPV6_RECVERR on the socket enables full ICMP
481 * error reporting, hopefully resulting in faster failover to working name
482 * servers.
483 */
uv__set_recverr(int fd,sa_family_t ss_family)484 static int uv__set_recverr(int fd, sa_family_t ss_family) {
485 #if defined(__linux__)
486 int yes;
487
488 yes = 1;
489 if (ss_family == AF_INET) {
490 if (setsockopt(fd, IPPROTO_IP, IP_RECVERR, &yes, sizeof(yes)))
491 return UV__ERR(errno);
492 } else if (ss_family == AF_INET6) {
493 if (setsockopt(fd, IPPROTO_IPV6, IPV6_RECVERR, &yes, sizeof(yes)))
494 return UV__ERR(errno);
495 }
496 #endif
497 return 0;
498 }
499
500
uv__udp_bind(uv_udp_t * handle,const struct sockaddr * addr,unsigned int addrlen,unsigned int flags)501 int uv__udp_bind(uv_udp_t* handle,
502 const struct sockaddr* addr,
503 unsigned int addrlen,
504 unsigned int flags) {
505 int err;
506 int yes;
507 int fd;
508
509 /* Check for bad flags. */
510 if (flags & ~(UV_UDP_IPV6ONLY | UV_UDP_REUSEADDR |
511 UV_UDP_REUSEPORT | UV_UDP_LINUX_RECVERR))
512 return UV_EINVAL;
513
514 /* Cannot set IPv6-only mode on non-IPv6 socket. */
515 if ((flags & UV_UDP_IPV6ONLY) && addr->sa_family != AF_INET6)
516 return UV_EINVAL;
517
518 fd = handle->io_watcher.fd;
519 if (fd == -1) {
520 err = uv__socket(addr->sa_family, SOCK_DGRAM, 0);
521 if (err < 0)
522 return err;
523 fd = err;
524 handle->io_watcher.fd = fd;
525 }
526
527 if (flags & UV_UDP_LINUX_RECVERR) {
528 err = uv__set_recverr(fd, addr->sa_family);
529 if (err)
530 return err;
531 }
532
533 if (flags & UV_UDP_REUSEADDR) {
534 err = uv__sock_reuseaddr(fd);
535 if (err)
536 return err;
537 }
538
539 if (flags & UV_UDP_REUSEPORT) {
540 err = uv__sock_reuseport(fd);
541 if (err)
542 return err;
543 }
544
545 if (flags & UV_UDP_IPV6ONLY) {
546 #ifdef IPV6_V6ONLY
547 yes = 1;
548 if (setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &yes, sizeof yes) == -1) {
549 err = UV__ERR(errno);
550 return err;
551 }
552 #else
553 err = UV_ENOTSUP;
554 return err;
555 #endif
556 }
557
558 if (bind(fd, addr, addrlen)) {
559 err = UV__ERR(errno);
560 if (errno == EAFNOSUPPORT)
561 /* OSX, other BSDs and SunoS fail with EAFNOSUPPORT when binding a
562 * socket created with AF_INET to an AF_INET6 address or vice versa. */
563 err = UV_EINVAL;
564 return err;
565 }
566
567 if (addr->sa_family == AF_INET6)
568 handle->flags |= UV_HANDLE_IPV6;
569
570 handle->flags |= UV_HANDLE_BOUND;
571 return 0;
572 }
573
574
uv__udp_maybe_deferred_bind(uv_udp_t * handle,int domain,unsigned int flags)575 static int uv__udp_maybe_deferred_bind(uv_udp_t* handle,
576 int domain,
577 unsigned int flags) {
578 union uv__sockaddr taddr;
579 socklen_t addrlen;
580
581 if (handle->io_watcher.fd != -1)
582 return 0;
583
584 switch (domain) {
585 case AF_INET:
586 {
587 struct sockaddr_in* addr = &taddr.in;
588 memset(addr, 0, sizeof *addr);
589 addr->sin_family = AF_INET;
590 addr->sin_addr.s_addr = INADDR_ANY;
591 addrlen = sizeof *addr;
592 break;
593 }
594 case AF_INET6:
595 {
596 struct sockaddr_in6* addr = &taddr.in6;
597 memset(addr, 0, sizeof *addr);
598 addr->sin6_family = AF_INET6;
599 addr->sin6_addr = in6addr_any;
600 addrlen = sizeof *addr;
601 break;
602 }
603 default:
604 assert(0 && "unsupported address family");
605 abort();
606 }
607
608 return uv__udp_bind(handle, &taddr.addr, addrlen, flags);
609 }
610
611
uv__udp_connect(uv_udp_t * handle,const struct sockaddr * addr,unsigned int addrlen)612 int uv__udp_connect(uv_udp_t* handle,
613 const struct sockaddr* addr,
614 unsigned int addrlen) {
615 int err;
616
617 err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0);
618 if (err)
619 return err;
620
621 do {
622 errno = 0;
623 err = connect(handle->io_watcher.fd, addr, addrlen);
624 } while (err == -1 && errno == EINTR);
625
626 if (err)
627 return UV__ERR(errno);
628
629 handle->flags |= UV_HANDLE_UDP_CONNECTED;
630
631 return 0;
632 }
633
634 /* From https://pubs.opengroup.org/onlinepubs/9699919799/functions/connect.html
635 * Any of uv supported UNIXs kernel should be standardized, but the kernel
636 * implementation logic not same, let's use pseudocode to explain the udp
637 * disconnect behaviors:
638 *
639 * Predefined stubs for pseudocode:
640 * 1. sodisconnect: The function to perform the real udp disconnect
641 * 2. pru_connect: The function to perform the real udp connect
642 * 3. so: The kernel object match with socket fd
643 * 4. addr: The sockaddr parameter from user space
644 *
645 * BSDs:
646 * if(sodisconnect(so) == 0) { // udp disconnect succeed
647 * if (addr->sa_len != so->addr->sa_len) return EINVAL;
648 * if (addr->sa_family != so->addr->sa_family) return EAFNOSUPPORT;
649 * pru_connect(so);
650 * }
651 * else return EISCONN;
652 *
653 * z/OS (same with Windows):
654 * if(addr->sa_len < so->addr->sa_len) return EINVAL;
655 * if (addr->sa_family == AF_UNSPEC) sodisconnect(so);
656 *
657 * AIX:
658 * if(addr->sa_len != sizeof(struct sockaddr)) return EINVAL; // ignore ip proto version
659 * if (addr->sa_family == AF_UNSPEC) sodisconnect(so);
660 *
661 * Linux,Others:
662 * if(addr->sa_len < sizeof(struct sockaddr)) return EINVAL;
663 * if (addr->sa_family == AF_UNSPEC) sodisconnect(so);
664 */
uv__udp_disconnect(uv_udp_t * handle)665 int uv__udp_disconnect(uv_udp_t* handle) {
666 int r;
667 #if defined(__MVS__)
668 struct sockaddr_storage addr;
669 #else
670 struct sockaddr addr;
671 #endif
672
673 memset(&addr, 0, sizeof(addr));
674
675 #if defined(__MVS__)
676 addr.ss_family = AF_UNSPEC;
677 #else
678 addr.sa_family = AF_UNSPEC;
679 #endif
680
681 do {
682 errno = 0;
683 #ifdef __PASE__
684 /* On IBMi a connectionless transport socket can be disconnected by
685 * either setting the addr parameter to NULL or setting the
686 * addr_length parameter to zero, and issuing another connect().
687 * https://www.ibm.com/docs/en/i/7.4?topic=ssw_ibm_i_74/apis/connec.htm
688 */
689 r = connect(handle->io_watcher.fd, (struct sockaddr*) NULL, 0);
690 #else
691 r = connect(handle->io_watcher.fd, (struct sockaddr*) &addr, sizeof(addr));
692 #endif
693 } while (r == -1 && errno == EINTR);
694
695 if (r == -1) {
696 #if defined(BSD) /* The macro BSD is from sys/param.h */
697 if (errno != EAFNOSUPPORT && errno != EINVAL)
698 return UV__ERR(errno);
699 #else
700 return UV__ERR(errno);
701 #endif
702 }
703
704 handle->flags &= ~UV_HANDLE_UDP_CONNECTED;
705 return 0;
706 }
707
uv__udp_send(uv_udp_send_t * req,uv_udp_t * handle,const uv_buf_t bufs[],unsigned int nbufs,const struct sockaddr * addr,unsigned int addrlen,uv_udp_send_cb send_cb)708 int uv__udp_send(uv_udp_send_t* req,
709 uv_udp_t* handle,
710 const uv_buf_t bufs[],
711 unsigned int nbufs,
712 const struct sockaddr* addr,
713 unsigned int addrlen,
714 uv_udp_send_cb send_cb) {
715 int err;
716 int empty_queue;
717
718 assert(nbufs > 0);
719
720 if (addr) {
721 err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0);
722 if (err)
723 return err;
724 }
725
726 /* It's legal for send_queue_count > 0 even when the write_queue is empty;
727 * it means there are error-state requests in the write_completed_queue that
728 * will touch up send_queue_size/count later.
729 */
730 empty_queue = (handle->send_queue_count == 0);
731
732 uv__req_init(handle->loop, req, UV_UDP_SEND);
733 assert(addrlen <= sizeof(req->addr));
734 if (addr == NULL)
735 req->addr.ss_family = AF_UNSPEC;
736 else
737 memcpy(&req->addr, addr, addrlen);
738 req->send_cb = send_cb;
739 req->handle = handle;
740 req->nbufs = nbufs;
741
742 req->bufs = req->bufsml;
743 if (nbufs > ARRAY_SIZE(req->bufsml))
744 req->bufs = uv__malloc(nbufs * sizeof(bufs[0]));
745
746 if (req->bufs == NULL) {
747 uv__req_unregister(handle->loop, req);
748 return UV_ENOMEM;
749 }
750
751 memcpy(req->bufs, bufs, nbufs * sizeof(bufs[0]));
752 handle->send_queue_size += uv__count_bufs(req->bufs, req->nbufs);
753 handle->send_queue_count++;
754 uv__queue_insert_tail(&handle->write_queue, &req->queue);
755 uv__handle_start(handle);
756
757 if (empty_queue && !(handle->flags & UV_HANDLE_UDP_PROCESSING)) {
758 uv__udp_sendmsg(handle);
759
760 /* `uv__udp_sendmsg` may not be able to do non-blocking write straight
761 * away. In such cases the `io_watcher` has to be queued for asynchronous
762 * write.
763 */
764 if (!uv__queue_empty(&handle->write_queue))
765 uv__io_start(handle->loop, &handle->io_watcher, POLLOUT);
766 } else {
767 uv__io_start(handle->loop, &handle->io_watcher, POLLOUT);
768 }
769
770 return 0;
771 }
772
773
uv__udp_try_send(uv_udp_t * handle,const uv_buf_t bufs[],unsigned int nbufs,const struct sockaddr * addr,unsigned int addrlen)774 int uv__udp_try_send(uv_udp_t* handle,
775 const uv_buf_t bufs[],
776 unsigned int nbufs,
777 const struct sockaddr* addr,
778 unsigned int addrlen) {
779 int err;
780 struct msghdr h;
781 ssize_t size;
782
783 assert(nbufs > 0);
784
785 /* already sending a message */
786 if (handle->send_queue_count != 0)
787 return UV_EAGAIN;
788
789 if (addr) {
790 err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0);
791 if (err)
792 return err;
793 } else {
794 assert(handle->flags & UV_HANDLE_UDP_CONNECTED);
795 }
796
797 memset(&h, 0, sizeof h);
798 h.msg_name = (struct sockaddr*) addr;
799 h.msg_namelen = addrlen;
800 h.msg_iov = (struct iovec*) bufs;
801 h.msg_iovlen = nbufs;
802
803 do {
804 size = sendmsg(handle->io_watcher.fd, &h, 0);
805 } while (size == -1 && errno == EINTR);
806
807 if (size == -1) {
808 if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS)
809 return UV_EAGAIN;
810 else
811 return UV__ERR(errno);
812 }
813
814 return size;
815 }
816
817
uv__udp_set_membership4(uv_udp_t * handle,const struct sockaddr_in * multicast_addr,const char * interface_addr,uv_membership membership)818 static int uv__udp_set_membership4(uv_udp_t* handle,
819 const struct sockaddr_in* multicast_addr,
820 const char* interface_addr,
821 uv_membership membership) {
822 struct ip_mreq mreq;
823 int optname;
824 int err;
825
826 memset(&mreq, 0, sizeof mreq);
827
828 if (interface_addr) {
829 err = uv_inet_pton(AF_INET, interface_addr, &mreq.imr_interface.s_addr);
830 if (err)
831 return err;
832 } else {
833 mreq.imr_interface.s_addr = htonl(INADDR_ANY);
834 }
835
836 mreq.imr_multiaddr.s_addr = multicast_addr->sin_addr.s_addr;
837
838 switch (membership) {
839 case UV_JOIN_GROUP:
840 optname = IP_ADD_MEMBERSHIP;
841 break;
842 case UV_LEAVE_GROUP:
843 optname = IP_DROP_MEMBERSHIP;
844 break;
845 default:
846 return UV_EINVAL;
847 }
848
849 if (setsockopt(handle->io_watcher.fd,
850 IPPROTO_IP,
851 optname,
852 &mreq,
853 sizeof(mreq))) {
854 #if defined(__MVS__)
855 if (errno == ENXIO)
856 return UV_ENODEV;
857 #endif
858 return UV__ERR(errno);
859 }
860
861 return 0;
862 }
863
864
uv__udp_set_membership6(uv_udp_t * handle,const struct sockaddr_in6 * multicast_addr,const char * interface_addr,uv_membership membership)865 static int uv__udp_set_membership6(uv_udp_t* handle,
866 const struct sockaddr_in6* multicast_addr,
867 const char* interface_addr,
868 uv_membership membership) {
869 int optname;
870 struct ipv6_mreq mreq;
871 struct sockaddr_in6 addr6;
872
873 memset(&mreq, 0, sizeof mreq);
874
875 if (interface_addr) {
876 if (uv_ip6_addr(interface_addr, 0, &addr6))
877 return UV_EINVAL;
878 mreq.ipv6mr_interface = addr6.sin6_scope_id;
879 } else {
880 mreq.ipv6mr_interface = 0;
881 }
882
883 mreq.ipv6mr_multiaddr = multicast_addr->sin6_addr;
884
885 switch (membership) {
886 case UV_JOIN_GROUP:
887 optname = IPV6_ADD_MEMBERSHIP;
888 break;
889 case UV_LEAVE_GROUP:
890 optname = IPV6_DROP_MEMBERSHIP;
891 break;
892 default:
893 return UV_EINVAL;
894 }
895
896 if (setsockopt(handle->io_watcher.fd,
897 IPPROTO_IPV6,
898 optname,
899 &mreq,
900 sizeof(mreq))) {
901 #if defined(__MVS__)
902 if (errno == ENXIO)
903 return UV_ENODEV;
904 #endif
905 return UV__ERR(errno);
906 }
907
908 return 0;
909 }
910
911
912 #if !defined(__OpenBSD__) && \
913 !defined(__NetBSD__) && \
914 !defined(__ANDROID__) && \
915 !defined(__DragonFly__) && \
916 !defined(__QNX__) && \
917 !defined(__GNU__)
uv__udp_set_source_membership4(uv_udp_t * handle,const struct sockaddr_in * multicast_addr,const char * interface_addr,const struct sockaddr_in * source_addr,uv_membership membership)918 static int uv__udp_set_source_membership4(uv_udp_t* handle,
919 const struct sockaddr_in* multicast_addr,
920 const char* interface_addr,
921 const struct sockaddr_in* source_addr,
922 uv_membership membership) {
923 struct ip_mreq_source mreq;
924 int optname;
925 int err;
926
927 err = uv__udp_maybe_deferred_bind(handle, AF_INET, UV_UDP_REUSEADDR);
928 if (err)
929 return err;
930
931 memset(&mreq, 0, sizeof(mreq));
932
933 if (interface_addr != NULL) {
934 err = uv_inet_pton(AF_INET, interface_addr, &mreq.imr_interface.s_addr);
935 if (err)
936 return err;
937 } else {
938 mreq.imr_interface.s_addr = htonl(INADDR_ANY);
939 }
940
941 mreq.imr_multiaddr.s_addr = multicast_addr->sin_addr.s_addr;
942 mreq.imr_sourceaddr.s_addr = source_addr->sin_addr.s_addr;
943
944 if (membership == UV_JOIN_GROUP)
945 optname = IP_ADD_SOURCE_MEMBERSHIP;
946 else if (membership == UV_LEAVE_GROUP)
947 optname = IP_DROP_SOURCE_MEMBERSHIP;
948 else
949 return UV_EINVAL;
950
951 if (setsockopt(handle->io_watcher.fd,
952 IPPROTO_IP,
953 optname,
954 &mreq,
955 sizeof(mreq))) {
956 return UV__ERR(errno);
957 }
958
959 return 0;
960 }
961
962
uv__udp_set_source_membership6(uv_udp_t * handle,const struct sockaddr_in6 * multicast_addr,const char * interface_addr,const struct sockaddr_in6 * source_addr,uv_membership membership)963 static int uv__udp_set_source_membership6(uv_udp_t* handle,
964 const struct sockaddr_in6* multicast_addr,
965 const char* interface_addr,
966 const struct sockaddr_in6* source_addr,
967 uv_membership membership) {
968 struct group_source_req mreq;
969 struct sockaddr_in6 addr6;
970 int optname;
971 int err;
972
973 err = uv__udp_maybe_deferred_bind(handle, AF_INET6, UV_UDP_REUSEADDR);
974 if (err)
975 return err;
976
977 memset(&mreq, 0, sizeof(mreq));
978
979 if (interface_addr != NULL) {
980 err = uv_ip6_addr(interface_addr, 0, &addr6);
981 if (err)
982 return err;
983 mreq.gsr_interface = addr6.sin6_scope_id;
984 } else {
985 mreq.gsr_interface = 0;
986 }
987
988 STATIC_ASSERT(sizeof(mreq.gsr_group) >= sizeof(*multicast_addr));
989 STATIC_ASSERT(sizeof(mreq.gsr_source) >= sizeof(*source_addr));
990 memcpy(&mreq.gsr_group, multicast_addr, sizeof(*multicast_addr));
991 memcpy(&mreq.gsr_source, source_addr, sizeof(*source_addr));
992
993 if (membership == UV_JOIN_GROUP)
994 optname = MCAST_JOIN_SOURCE_GROUP;
995 else if (membership == UV_LEAVE_GROUP)
996 optname = MCAST_LEAVE_SOURCE_GROUP;
997 else
998 return UV_EINVAL;
999
1000 if (setsockopt(handle->io_watcher.fd,
1001 IPPROTO_IPV6,
1002 optname,
1003 &mreq,
1004 sizeof(mreq))) {
1005 return UV__ERR(errno);
1006 }
1007
1008 return 0;
1009 }
1010 #endif
1011
1012
uv__udp_init_ex(uv_loop_t * loop,uv_udp_t * handle,unsigned flags,int domain)1013 int uv__udp_init_ex(uv_loop_t* loop,
1014 uv_udp_t* handle,
1015 unsigned flags,
1016 int domain) {
1017 int fd;
1018
1019 fd = -1;
1020 if (domain != AF_UNSPEC) {
1021 fd = uv__socket(domain, SOCK_DGRAM, 0);
1022 if (fd < 0)
1023 return fd;
1024 }
1025
1026 uv__handle_init(loop, (uv_handle_t*)handle, UV_UDP);
1027 handle->alloc_cb = NULL;
1028 handle->recv_cb = NULL;
1029 handle->send_queue_size = 0;
1030 handle->send_queue_count = 0;
1031 uv__io_init(&handle->io_watcher, uv__udp_io, fd);
1032 uv__queue_init(&handle->write_queue);
1033 uv__queue_init(&handle->write_completed_queue);
1034
1035 return 0;
1036 }
1037
1038
uv_udp_using_recvmmsg(const uv_udp_t * handle)1039 int uv_udp_using_recvmmsg(const uv_udp_t* handle) {
1040 #if defined(__linux__) || defined(__FreeBSD__)
1041 if (handle->flags & UV_HANDLE_UDP_RECVMMSG)
1042 return 1;
1043 #endif
1044 return 0;
1045 }
1046
1047
uv_udp_open(uv_udp_t * handle,uv_os_sock_t sock)1048 int uv_udp_open(uv_udp_t* handle, uv_os_sock_t sock) {
1049 int err;
1050
1051 /* Check for already active socket. */
1052 if (handle->io_watcher.fd != -1)
1053 return UV_EBUSY;
1054
1055 if (uv__fd_exists(handle->loop, sock))
1056 return UV_EEXIST;
1057
1058 err = uv__nonblock(sock, 1);
1059 if (err)
1060 return err;
1061
1062 err = uv__sock_reuseaddr(sock);
1063 if (err)
1064 return err;
1065
1066 handle->io_watcher.fd = sock;
1067 if (uv__udp_is_connected(handle))
1068 handle->flags |= UV_HANDLE_UDP_CONNECTED;
1069
1070 return 0;
1071 }
1072
1073
uv_udp_set_membership(uv_udp_t * handle,const char * multicast_addr,const char * interface_addr,uv_membership membership)1074 int uv_udp_set_membership(uv_udp_t* handle,
1075 const char* multicast_addr,
1076 const char* interface_addr,
1077 uv_membership membership) {
1078 int err;
1079 struct sockaddr_in addr4;
1080 struct sockaddr_in6 addr6;
1081
1082 if (uv_ip4_addr(multicast_addr, 0, &addr4) == 0) {
1083 err = uv__udp_maybe_deferred_bind(handle, AF_INET, UV_UDP_REUSEADDR);
1084 if (err)
1085 return err;
1086 return uv__udp_set_membership4(handle, &addr4, interface_addr, membership);
1087 } else if (uv_ip6_addr(multicast_addr, 0, &addr6) == 0) {
1088 err = uv__udp_maybe_deferred_bind(handle, AF_INET6, UV_UDP_REUSEADDR);
1089 if (err)
1090 return err;
1091 return uv__udp_set_membership6(handle, &addr6, interface_addr, membership);
1092 } else {
1093 return UV_EINVAL;
1094 }
1095 }
1096
1097
uv_udp_set_source_membership(uv_udp_t * handle,const char * multicast_addr,const char * interface_addr,const char * source_addr,uv_membership membership)1098 int uv_udp_set_source_membership(uv_udp_t* handle,
1099 const char* multicast_addr,
1100 const char* interface_addr,
1101 const char* source_addr,
1102 uv_membership membership) {
1103 #if !defined(__OpenBSD__) && \
1104 !defined(__NetBSD__) && \
1105 !defined(__ANDROID__) && \
1106 !defined(__DragonFly__) && \
1107 !defined(__QNX__) && \
1108 !defined(__GNU__)
1109 int err;
1110 union uv__sockaddr mcast_addr;
1111 union uv__sockaddr src_addr;
1112
1113 err = uv_ip4_addr(multicast_addr, 0, &mcast_addr.in);
1114 if (err) {
1115 err = uv_ip6_addr(multicast_addr, 0, &mcast_addr.in6);
1116 if (err)
1117 return err;
1118 err = uv_ip6_addr(source_addr, 0, &src_addr.in6);
1119 if (err)
1120 return err;
1121 return uv__udp_set_source_membership6(handle,
1122 &mcast_addr.in6,
1123 interface_addr,
1124 &src_addr.in6,
1125 membership);
1126 }
1127
1128 err = uv_ip4_addr(source_addr, 0, &src_addr.in);
1129 if (err)
1130 return err;
1131 return uv__udp_set_source_membership4(handle,
1132 &mcast_addr.in,
1133 interface_addr,
1134 &src_addr.in,
1135 membership);
1136 #else
1137 return UV_ENOSYS;
1138 #endif
1139 }
1140
1141
uv__setsockopt(uv_udp_t * handle,int option4,int option6,const void * val,socklen_t size)1142 static int uv__setsockopt(uv_udp_t* handle,
1143 int option4,
1144 int option6,
1145 const void* val,
1146 socklen_t size) {
1147 int r;
1148
1149 if (handle->flags & UV_HANDLE_IPV6)
1150 r = setsockopt(handle->io_watcher.fd,
1151 IPPROTO_IPV6,
1152 option6,
1153 val,
1154 size);
1155 else
1156 r = setsockopt(handle->io_watcher.fd,
1157 IPPROTO_IP,
1158 option4,
1159 val,
1160 size);
1161 if (r)
1162 return UV__ERR(errno);
1163
1164 return 0;
1165 }
1166
uv__setsockopt_maybe_char(uv_udp_t * handle,int option4,int option6,int val)1167 static int uv__setsockopt_maybe_char(uv_udp_t* handle,
1168 int option4,
1169 int option6,
1170 int val) {
1171 #if defined(__sun) || defined(_AIX) || defined(__MVS__)
1172 char arg = val;
1173 #elif defined(__OpenBSD__)
1174 unsigned char arg = val;
1175 #else
1176 int arg = val;
1177 #endif
1178
1179 if (val < 0 || val > 255)
1180 return UV_EINVAL;
1181
1182 return uv__setsockopt(handle, option4, option6, &arg, sizeof(arg));
1183 }
1184
1185
uv_udp_set_broadcast(uv_udp_t * handle,int on)1186 int uv_udp_set_broadcast(uv_udp_t* handle, int on) {
1187 if (setsockopt(handle->io_watcher.fd,
1188 SOL_SOCKET,
1189 SO_BROADCAST,
1190 &on,
1191 sizeof(on))) {
1192 return UV__ERR(errno);
1193 }
1194
1195 return 0;
1196 }
1197
1198
uv_udp_set_ttl(uv_udp_t * handle,int ttl)1199 int uv_udp_set_ttl(uv_udp_t* handle, int ttl) {
1200 if (ttl < 1 || ttl > 255)
1201 return UV_EINVAL;
1202
1203 #if defined(__MVS__)
1204 if (!(handle->flags & UV_HANDLE_IPV6))
1205 return UV_ENOTSUP; /* zOS does not support setting ttl for IPv4 */
1206 #endif
1207
1208 /*
1209 * On Solaris and derivatives such as SmartOS, the length of socket options
1210 * is sizeof(int) for IP_TTL and IPV6_UNICAST_HOPS,
1211 * so hardcode the size of these options on this platform,
1212 * and use the general uv__setsockopt_maybe_char call on other platforms.
1213 */
1214 #if defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \
1215 defined(__MVS__) || defined(__QNX__)
1216
1217 return uv__setsockopt(handle,
1218 IP_TTL,
1219 IPV6_UNICAST_HOPS,
1220 &ttl,
1221 sizeof(ttl));
1222
1223 #else /* !(defined(__sun) || defined(_AIX) || defined (__OpenBSD__) ||
1224 defined(__MVS__) || defined(__QNX__)) */
1225
1226 return uv__setsockopt_maybe_char(handle,
1227 IP_TTL,
1228 IPV6_UNICAST_HOPS,
1229 ttl);
1230
1231 #endif /* defined(__sun) || defined(_AIX) || defined (__OpenBSD__) ||
1232 defined(__MVS__) || defined(__QNX__) */
1233 }
1234
1235
uv_udp_set_multicast_ttl(uv_udp_t * handle,int ttl)1236 int uv_udp_set_multicast_ttl(uv_udp_t* handle, int ttl) {
1237 /*
1238 * On Solaris and derivatives such as SmartOS, the length of socket options
1239 * is sizeof(int) for IPV6_MULTICAST_HOPS and sizeof(char) for
1240 * IP_MULTICAST_TTL, so hardcode the size of the option in the IPv6 case,
1241 * and use the general uv__setsockopt_maybe_char call otherwise.
1242 */
1243 #if defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \
1244 defined(__MVS__) || defined(__QNX__)
1245 if (handle->flags & UV_HANDLE_IPV6)
1246 return uv__setsockopt(handle,
1247 IP_MULTICAST_TTL,
1248 IPV6_MULTICAST_HOPS,
1249 &ttl,
1250 sizeof(ttl));
1251 #endif /* defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \
1252 defined(__MVS__) || defined(__QNX__) */
1253
1254 return uv__setsockopt_maybe_char(handle,
1255 IP_MULTICAST_TTL,
1256 IPV6_MULTICAST_HOPS,
1257 ttl);
1258 }
1259
1260
uv_udp_set_multicast_loop(uv_udp_t * handle,int on)1261 int uv_udp_set_multicast_loop(uv_udp_t* handle, int on) {
1262 /*
1263 * On Solaris and derivatives such as SmartOS, the length of socket options
1264 * is sizeof(int) for IPV6_MULTICAST_LOOP and sizeof(char) for
1265 * IP_MULTICAST_LOOP, so hardcode the size of the option in the IPv6 case,
1266 * and use the general uv__setsockopt_maybe_char call otherwise.
1267 */
1268 #if defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \
1269 defined(__MVS__) || defined(__QNX__)
1270 if (handle->flags & UV_HANDLE_IPV6)
1271 return uv__setsockopt(handle,
1272 IP_MULTICAST_LOOP,
1273 IPV6_MULTICAST_LOOP,
1274 &on,
1275 sizeof(on));
1276 #endif /* defined(__sun) || defined(_AIX) ||defined(__OpenBSD__) ||
1277 defined(__MVS__) || defined(__QNX__) */
1278
1279 return uv__setsockopt_maybe_char(handle,
1280 IP_MULTICAST_LOOP,
1281 IPV6_MULTICAST_LOOP,
1282 on);
1283 }
1284
uv_udp_set_multicast_interface(uv_udp_t * handle,const char * interface_addr)1285 int uv_udp_set_multicast_interface(uv_udp_t* handle, const char* interface_addr) {
1286 struct sockaddr_storage addr_st;
1287 struct sockaddr_in* addr4;
1288 struct sockaddr_in6* addr6;
1289
1290 addr4 = (struct sockaddr_in*) &addr_st;
1291 addr6 = (struct sockaddr_in6*) &addr_st;
1292
1293 if (!interface_addr) {
1294 memset(&addr_st, 0, sizeof addr_st);
1295 if (handle->flags & UV_HANDLE_IPV6) {
1296 addr_st.ss_family = AF_INET6;
1297 addr6->sin6_scope_id = 0;
1298 } else {
1299 addr_st.ss_family = AF_INET;
1300 addr4->sin_addr.s_addr = htonl(INADDR_ANY);
1301 }
1302 } else if (uv_ip4_addr(interface_addr, 0, addr4) == 0) {
1303 /* nothing, address was parsed */
1304 } else if (uv_ip6_addr(interface_addr, 0, addr6) == 0) {
1305 /* nothing, address was parsed */
1306 } else {
1307 return UV_EINVAL;
1308 }
1309
1310 if (addr_st.ss_family == AF_INET) {
1311 if (setsockopt(handle->io_watcher.fd,
1312 IPPROTO_IP,
1313 IP_MULTICAST_IF,
1314 (void*) &addr4->sin_addr,
1315 sizeof(addr4->sin_addr)) == -1) {
1316 return UV__ERR(errno);
1317 }
1318 } else if (addr_st.ss_family == AF_INET6) {
1319 if (setsockopt(handle->io_watcher.fd,
1320 IPPROTO_IPV6,
1321 IPV6_MULTICAST_IF,
1322 &addr6->sin6_scope_id,
1323 sizeof(addr6->sin6_scope_id)) == -1) {
1324 return UV__ERR(errno);
1325 }
1326 } else {
1327 assert(0 && "unexpected address family");
1328 abort();
1329 }
1330
1331 return 0;
1332 }
1333
uv_udp_getpeername(const uv_udp_t * handle,struct sockaddr * name,int * namelen)1334 int uv_udp_getpeername(const uv_udp_t* handle,
1335 struct sockaddr* name,
1336 int* namelen) {
1337
1338 return uv__getsockpeername((const uv_handle_t*) handle,
1339 getpeername,
1340 name,
1341 namelen);
1342 }
1343
uv_udp_getsockname(const uv_udp_t * handle,struct sockaddr * name,int * namelen)1344 int uv_udp_getsockname(const uv_udp_t* handle,
1345 struct sockaddr* name,
1346 int* namelen) {
1347
1348 return uv__getsockpeername((const uv_handle_t*) handle,
1349 getsockname,
1350 name,
1351 namelen);
1352 }
1353
1354
uv__udp_recv_start(uv_udp_t * handle,uv_alloc_cb alloc_cb,uv_udp_recv_cb recv_cb)1355 int uv__udp_recv_start(uv_udp_t* handle,
1356 uv_alloc_cb alloc_cb,
1357 uv_udp_recv_cb recv_cb) {
1358 int err;
1359
1360 if (alloc_cb == NULL || recv_cb == NULL)
1361 return UV_EINVAL;
1362
1363 if (uv__io_active(&handle->io_watcher, POLLIN))
1364 return UV_EALREADY; /* FIXME(bnoordhuis) Should be UV_EBUSY. */
1365
1366 err = uv__udp_maybe_deferred_bind(handle, AF_INET, 0);
1367 if (err)
1368 return err;
1369
1370 handle->alloc_cb = alloc_cb;
1371 handle->recv_cb = recv_cb;
1372
1373 uv__io_start(handle->loop, &handle->io_watcher, POLLIN);
1374 uv__handle_start(handle);
1375
1376 return 0;
1377 }
1378
1379
uv__udp_recv_stop(uv_udp_t * handle)1380 int uv__udp_recv_stop(uv_udp_t* handle) {
1381 uv__io_stop(handle->loop, &handle->io_watcher, POLLIN);
1382
1383 if (!uv__io_active(&handle->io_watcher, POLLOUT))
1384 uv__handle_stop(handle);
1385
1386 handle->alloc_cb = NULL;
1387 handle->recv_cb = NULL;
1388
1389 return 0;
1390 }
1391