1 /***************************************************************************
2 * _ _ ____ _
3 * Project ___| | | | _ \| |
4 * / __| | | | |_) | |
5 * | (__| |_| | _ <| |___
6 * \___|\___/|_| \_\_____|
7 *
8 * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
9 *
10 * This software is licensed as described in the file COPYING, which
11 * you should have received as part of this distribution. The terms
12 * are also available at https://curl.se/docs/copyright.html.
13 *
14 * You may opt to use, copy, modify, merge, publish, distribute and/or sell
15 * copies of the Software, and permit persons to whom the Software is
16 * furnished to do so, under the terms of the COPYING file.
17 *
18 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
19 * KIND, either express or implied.
20 *
21 * SPDX-License-Identifier: curl
22 *
23 ***************************************************************************/
24
25 #include "curl_setup.h"
26 #include "bufq.h"
27
28 /* The last 3 #include files should be in this order */
29 #include "curl_printf.h"
30 #include "curl_memory.h"
31 #include "memdebug.h"
32
chunk_is_empty(const struct buf_chunk * chunk)33 static bool chunk_is_empty(const struct buf_chunk *chunk)
34 {
35 return chunk->r_offset >= chunk->w_offset;
36 }
37
chunk_is_full(const struct buf_chunk * chunk)38 static bool chunk_is_full(const struct buf_chunk *chunk)
39 {
40 return chunk->w_offset >= chunk->dlen;
41 }
42
chunk_len(const struct buf_chunk * chunk)43 static size_t chunk_len(const struct buf_chunk *chunk)
44 {
45 return chunk->w_offset - chunk->r_offset;
46 }
47
chunk_space(const struct buf_chunk * chunk)48 static size_t chunk_space(const struct buf_chunk *chunk)
49 {
50 return chunk->dlen - chunk->w_offset;
51 }
52
chunk_reset(struct buf_chunk * chunk)53 static void chunk_reset(struct buf_chunk *chunk)
54 {
55 chunk->next = NULL;
56 chunk->r_offset = chunk->w_offset = 0;
57 }
58
chunk_append(struct buf_chunk * chunk,const unsigned char * buf,size_t len)59 static size_t chunk_append(struct buf_chunk *chunk,
60 const unsigned char *buf, size_t len)
61 {
62 unsigned char *p = &chunk->x.data[chunk->w_offset];
63 size_t n = chunk->dlen - chunk->w_offset;
64 DEBUGASSERT(chunk->dlen >= chunk->w_offset);
65 if(n) {
66 n = CURLMIN(n, len);
67 memcpy(p, buf, n);
68 chunk->w_offset += n;
69 }
70 return n;
71 }
72
chunk_read(struct buf_chunk * chunk,unsigned char * buf,size_t len)73 static size_t chunk_read(struct buf_chunk *chunk,
74 unsigned char *buf, size_t len)
75 {
76 unsigned char *p = &chunk->x.data[chunk->r_offset];
77 size_t n = chunk->w_offset - chunk->r_offset;
78 DEBUGASSERT(chunk->w_offset >= chunk->r_offset);
79 if(!n) {
80 return 0;
81 }
82 else if(n <= len) {
83 memcpy(buf, p, n);
84 chunk->r_offset = chunk->w_offset = 0;
85 return n;
86 }
87 else {
88 memcpy(buf, p, len);
89 chunk->r_offset += len;
90 return len;
91 }
92 }
93
chunk_unwrite(struct buf_chunk * chunk,size_t len)94 static size_t chunk_unwrite(struct buf_chunk *chunk, size_t len)
95 {
96 size_t n = chunk->w_offset - chunk->r_offset;
97 DEBUGASSERT(chunk->w_offset >= chunk->r_offset);
98 if(!n) {
99 return 0;
100 }
101 else if(n <= len) {
102 chunk->r_offset = chunk->w_offset = 0;
103 return n;
104 }
105 else {
106 chunk->w_offset -= len;
107 return len;
108 }
109 }
110
chunk_slurpn(struct buf_chunk * chunk,size_t max_len,Curl_bufq_reader * reader,void * reader_ctx,CURLcode * err)111 static ssize_t chunk_slurpn(struct buf_chunk *chunk, size_t max_len,
112 Curl_bufq_reader *reader,
113 void *reader_ctx, CURLcode *err)
114 {
115 unsigned char *p = &chunk->x.data[chunk->w_offset];
116 size_t n = chunk->dlen - chunk->w_offset; /* free amount */
117 ssize_t nread;
118
119 DEBUGASSERT(chunk->dlen >= chunk->w_offset);
120 if(!n) {
121 *err = CURLE_AGAIN;
122 return -1;
123 }
124 if(max_len && n > max_len)
125 n = max_len;
126 nread = reader(reader_ctx, p, n, err);
127 if(nread > 0) {
128 DEBUGASSERT((size_t)nread <= n);
129 chunk->w_offset += nread;
130 }
131 return nread;
132 }
133
chunk_peek(const struct buf_chunk * chunk,const unsigned char ** pbuf,size_t * plen)134 static void chunk_peek(const struct buf_chunk *chunk,
135 const unsigned char **pbuf, size_t *plen)
136 {
137 DEBUGASSERT(chunk->w_offset >= chunk->r_offset);
138 *pbuf = &chunk->x.data[chunk->r_offset];
139 *plen = chunk->w_offset - chunk->r_offset;
140 }
141
chunk_peek_at(const struct buf_chunk * chunk,size_t offset,const unsigned char ** pbuf,size_t * plen)142 static void chunk_peek_at(const struct buf_chunk *chunk, size_t offset,
143 const unsigned char **pbuf, size_t *plen)
144 {
145 offset += chunk->r_offset;
146 DEBUGASSERT(chunk->w_offset >= offset);
147 *pbuf = &chunk->x.data[offset];
148 *plen = chunk->w_offset - offset;
149 }
150
chunk_skip(struct buf_chunk * chunk,size_t amount)151 static size_t chunk_skip(struct buf_chunk *chunk, size_t amount)
152 {
153 size_t n = chunk->w_offset - chunk->r_offset;
154 DEBUGASSERT(chunk->w_offset >= chunk->r_offset);
155 if(n) {
156 n = CURLMIN(n, amount);
157 chunk->r_offset += n;
158 if(chunk->r_offset == chunk->w_offset)
159 chunk->r_offset = chunk->w_offset = 0;
160 }
161 return n;
162 }
163
chunk_list_free(struct buf_chunk ** anchor)164 static void chunk_list_free(struct buf_chunk **anchor)
165 {
166 struct buf_chunk *chunk;
167 while(*anchor) {
168 chunk = *anchor;
169 *anchor = chunk->next;
170 free(chunk);
171 }
172 }
173
174
175
Curl_bufcp_init(struct bufc_pool * pool,size_t chunk_size,size_t spare_max)176 void Curl_bufcp_init(struct bufc_pool *pool,
177 size_t chunk_size, size_t spare_max)
178 {
179 DEBUGASSERT(chunk_size > 0);
180 DEBUGASSERT(spare_max > 0);
181 memset(pool, 0, sizeof(*pool));
182 pool->chunk_size = chunk_size;
183 pool->spare_max = spare_max;
184 }
185
bufcp_take(struct bufc_pool * pool,struct buf_chunk ** pchunk)186 static CURLcode bufcp_take(struct bufc_pool *pool,
187 struct buf_chunk **pchunk)
188 {
189 struct buf_chunk *chunk = NULL;
190
191 if(pool->spare) {
192 chunk = pool->spare;
193 pool->spare = chunk->next;
194 --pool->spare_count;
195 chunk_reset(chunk);
196 *pchunk = chunk;
197 return CURLE_OK;
198 }
199
200 chunk = calloc(1, sizeof(*chunk) + pool->chunk_size);
201 if(!chunk) {
202 *pchunk = NULL;
203 return CURLE_OUT_OF_MEMORY;
204 }
205 chunk->dlen = pool->chunk_size;
206 *pchunk = chunk;
207 return CURLE_OK;
208 }
209
bufcp_put(struct bufc_pool * pool,struct buf_chunk * chunk)210 static void bufcp_put(struct bufc_pool *pool,
211 struct buf_chunk *chunk)
212 {
213 if(pool->spare_count >= pool->spare_max) {
214 free(chunk);
215 }
216 else {
217 chunk_reset(chunk);
218 chunk->next = pool->spare;
219 pool->spare = chunk;
220 ++pool->spare_count;
221 }
222 }
223
Curl_bufcp_free(struct bufc_pool * pool)224 void Curl_bufcp_free(struct bufc_pool *pool)
225 {
226 chunk_list_free(&pool->spare);
227 pool->spare_count = 0;
228 }
229
bufq_init(struct bufq * q,struct bufc_pool * pool,size_t chunk_size,size_t max_chunks,int opts)230 static void bufq_init(struct bufq *q, struct bufc_pool *pool,
231 size_t chunk_size, size_t max_chunks, int opts)
232 {
233 DEBUGASSERT(chunk_size > 0);
234 DEBUGASSERT(max_chunks > 0);
235 memset(q, 0, sizeof(*q));
236 q->chunk_size = chunk_size;
237 q->max_chunks = max_chunks;
238 q->pool = pool;
239 q->opts = opts;
240 }
241
Curl_bufq_init2(struct bufq * q,size_t chunk_size,size_t max_chunks,int opts)242 void Curl_bufq_init2(struct bufq *q, size_t chunk_size, size_t max_chunks,
243 int opts)
244 {
245 bufq_init(q, NULL, chunk_size, max_chunks, opts);
246 }
247
Curl_bufq_init(struct bufq * q,size_t chunk_size,size_t max_chunks)248 void Curl_bufq_init(struct bufq *q, size_t chunk_size, size_t max_chunks)
249 {
250 bufq_init(q, NULL, chunk_size, max_chunks, BUFQ_OPT_NONE);
251 }
252
Curl_bufq_initp(struct bufq * q,struct bufc_pool * pool,size_t max_chunks,int opts)253 void Curl_bufq_initp(struct bufq *q, struct bufc_pool *pool,
254 size_t max_chunks, int opts)
255 {
256 bufq_init(q, pool, pool->chunk_size, max_chunks, opts);
257 }
258
Curl_bufq_free(struct bufq * q)259 void Curl_bufq_free(struct bufq *q)
260 {
261 chunk_list_free(&q->head);
262 chunk_list_free(&q->spare);
263 q->tail = NULL;
264 q->chunk_count = 0;
265 }
266
Curl_bufq_reset(struct bufq * q)267 void Curl_bufq_reset(struct bufq *q)
268 {
269 struct buf_chunk *chunk;
270 while(q->head) {
271 chunk = q->head;
272 q->head = chunk->next;
273 chunk->next = q->spare;
274 q->spare = chunk;
275 }
276 q->tail = NULL;
277 }
278
Curl_bufq_len(const struct bufq * q)279 size_t Curl_bufq_len(const struct bufq *q)
280 {
281 const struct buf_chunk *chunk = q->head;
282 size_t len = 0;
283 while(chunk) {
284 len += chunk_len(chunk);
285 chunk = chunk->next;
286 }
287 return len;
288 }
289
Curl_bufq_space(const struct bufq * q)290 size_t Curl_bufq_space(const struct bufq *q)
291 {
292 size_t space = 0;
293 if(q->tail)
294 space += chunk_space(q->tail);
295 if(q->spare) {
296 struct buf_chunk *chunk = q->spare;
297 while(chunk) {
298 space += chunk->dlen;
299 chunk = chunk->next;
300 }
301 }
302 if(q->chunk_count < q->max_chunks) {
303 space += (q->max_chunks - q->chunk_count) * q->chunk_size;
304 }
305 return space;
306 }
307
Curl_bufq_is_empty(const struct bufq * q)308 bool Curl_bufq_is_empty(const struct bufq *q)
309 {
310 return !q->head || chunk_is_empty(q->head);
311 }
312
Curl_bufq_is_full(const struct bufq * q)313 bool Curl_bufq_is_full(const struct bufq *q)
314 {
315 if(!q->tail || q->spare)
316 return FALSE;
317 if(q->chunk_count < q->max_chunks)
318 return FALSE;
319 if(q->chunk_count > q->max_chunks)
320 return TRUE;
321 /* we have no spares and cannot make more, is the tail full? */
322 return chunk_is_full(q->tail);
323 }
324
get_spare(struct bufq * q)325 static struct buf_chunk *get_spare(struct bufq *q)
326 {
327 struct buf_chunk *chunk = NULL;
328
329 if(q->spare) {
330 chunk = q->spare;
331 q->spare = chunk->next;
332 chunk_reset(chunk);
333 return chunk;
334 }
335
336 if(q->chunk_count >= q->max_chunks && (!(q->opts & BUFQ_OPT_SOFT_LIMIT)))
337 return NULL;
338
339 if(q->pool) {
340 if(bufcp_take(q->pool, &chunk))
341 return NULL;
342 ++q->chunk_count;
343 return chunk;
344 }
345 else {
346 chunk = calloc(1, sizeof(*chunk) + q->chunk_size);
347 if(!chunk)
348 return NULL;
349 chunk->dlen = q->chunk_size;
350 ++q->chunk_count;
351 return chunk;
352 }
353 }
354
prune_head(struct bufq * q)355 static void prune_head(struct bufq *q)
356 {
357 struct buf_chunk *chunk;
358
359 while(q->head && chunk_is_empty(q->head)) {
360 chunk = q->head;
361 q->head = chunk->next;
362 if(q->tail == chunk)
363 q->tail = q->head;
364 if(q->pool) {
365 bufcp_put(q->pool, chunk);
366 --q->chunk_count;
367 }
368 else if((q->chunk_count > q->max_chunks) ||
369 (q->opts & BUFQ_OPT_NO_SPARES)) {
370 /* SOFT_LIMIT allowed us more than max. free spares until
371 * we are at max again. Or free them if we are configured
372 * to not use spares. */
373 free(chunk);
374 --q->chunk_count;
375 }
376 else {
377 chunk->next = q->spare;
378 q->spare = chunk;
379 }
380 }
381 }
382
chunk_prev(struct buf_chunk * head,struct buf_chunk * chunk)383 static struct buf_chunk *chunk_prev(struct buf_chunk *head,
384 struct buf_chunk *chunk)
385 {
386 while(head) {
387 if(head == chunk)
388 return NULL;
389 if(head->next == chunk)
390 return head;
391 head = head->next;
392 }
393 return NULL;
394 }
395
prune_tail(struct bufq * q)396 static void prune_tail(struct bufq *q)
397 {
398 struct buf_chunk *chunk;
399
400 while(q->tail && chunk_is_empty(q->tail)) {
401 chunk = q->tail;
402 q->tail = chunk_prev(q->head, chunk);
403 if(q->tail)
404 q->tail->next = NULL;
405 if(q->head == chunk)
406 q->head = q->tail;
407 if(q->pool) {
408 bufcp_put(q->pool, chunk);
409 --q->chunk_count;
410 }
411 else if((q->chunk_count > q->max_chunks) ||
412 (q->opts & BUFQ_OPT_NO_SPARES)) {
413 /* SOFT_LIMIT allowed us more than max. free spares until
414 * we are at max again. Or free them if we are configured
415 * to not use spares. */
416 free(chunk);
417 --q->chunk_count;
418 }
419 else {
420 chunk->next = q->spare;
421 q->spare = chunk;
422 }
423 }
424 }
425
get_non_full_tail(struct bufq * q)426 static struct buf_chunk *get_non_full_tail(struct bufq *q)
427 {
428 struct buf_chunk *chunk;
429
430 if(q->tail && !chunk_is_full(q->tail))
431 return q->tail;
432 chunk = get_spare(q);
433 if(chunk) {
434 /* new tail, and possibly new head */
435 if(q->tail) {
436 q->tail->next = chunk;
437 q->tail = chunk;
438 }
439 else {
440 DEBUGASSERT(!q->head);
441 q->head = q->tail = chunk;
442 }
443 }
444 return chunk;
445 }
446
Curl_bufq_write(struct bufq * q,const unsigned char * buf,size_t len,CURLcode * err)447 ssize_t Curl_bufq_write(struct bufq *q,
448 const unsigned char *buf, size_t len,
449 CURLcode *err)
450 {
451 struct buf_chunk *tail;
452 ssize_t nwritten = 0;
453 size_t n;
454
455 DEBUGASSERT(q->max_chunks > 0);
456 while(len) {
457 tail = get_non_full_tail(q);
458 if(!tail) {
459 if((q->chunk_count < q->max_chunks) || (q->opts & BUFQ_OPT_SOFT_LIMIT)) {
460 *err = CURLE_OUT_OF_MEMORY;
461 return -1;
462 }
463 break;
464 }
465 n = chunk_append(tail, buf, len);
466 if(!n)
467 break;
468 nwritten += n;
469 buf += n;
470 len -= n;
471 }
472 if(nwritten == 0 && len) {
473 *err = CURLE_AGAIN;
474 return -1;
475 }
476 *err = CURLE_OK;
477 return nwritten;
478 }
479
Curl_bufq_cwrite(struct bufq * q,const char * buf,size_t len,size_t * pnwritten)480 CURLcode Curl_bufq_cwrite(struct bufq *q,
481 const char *buf, size_t len,
482 size_t *pnwritten)
483 {
484 ssize_t n;
485 CURLcode result;
486 n = Curl_bufq_write(q, (const unsigned char *)buf, len, &result);
487 *pnwritten = (n < 0) ? 0 : (size_t)n;
488 return result;
489 }
490
Curl_bufq_unwrite(struct bufq * q,size_t len)491 CURLcode Curl_bufq_unwrite(struct bufq *q, size_t len)
492 {
493 while(len && q->tail) {
494 len -= chunk_unwrite(q->tail, len);
495 prune_tail(q);
496 }
497 return len ? CURLE_AGAIN : CURLE_OK;
498 }
499
Curl_bufq_read(struct bufq * q,unsigned char * buf,size_t len,CURLcode * err)500 ssize_t Curl_bufq_read(struct bufq *q, unsigned char *buf, size_t len,
501 CURLcode *err)
502 {
503 ssize_t nread = 0;
504 size_t n;
505
506 *err = CURLE_OK;
507 while(len && q->head) {
508 n = chunk_read(q->head, buf, len);
509 if(n) {
510 nread += n;
511 buf += n;
512 len -= n;
513 }
514 prune_head(q);
515 }
516 if(nread == 0) {
517 *err = CURLE_AGAIN;
518 return -1;
519 }
520 return nread;
521 }
522
Curl_bufq_cread(struct bufq * q,char * buf,size_t len,size_t * pnread)523 CURLcode Curl_bufq_cread(struct bufq *q, char *buf, size_t len,
524 size_t *pnread)
525 {
526 ssize_t n;
527 CURLcode result;
528 n = Curl_bufq_read(q, (unsigned char *)buf, len, &result);
529 *pnread = (n < 0) ? 0 : (size_t)n;
530 return result;
531 }
532
Curl_bufq_peek(struct bufq * q,const unsigned char ** pbuf,size_t * plen)533 bool Curl_bufq_peek(struct bufq *q,
534 const unsigned char **pbuf, size_t *plen)
535 {
536 if(q->head && chunk_is_empty(q->head)) {
537 prune_head(q);
538 }
539 if(q->head && !chunk_is_empty(q->head)) {
540 chunk_peek(q->head, pbuf, plen);
541 return TRUE;
542 }
543 *pbuf = NULL;
544 *plen = 0;
545 return FALSE;
546 }
547
Curl_bufq_peek_at(struct bufq * q,size_t offset,const unsigned char ** pbuf,size_t * plen)548 bool Curl_bufq_peek_at(struct bufq *q, size_t offset,
549 const unsigned char **pbuf, size_t *plen)
550 {
551 struct buf_chunk *c = q->head;
552 size_t clen;
553
554 while(c) {
555 clen = chunk_len(c);
556 if(!clen)
557 break;
558 if(offset >= clen) {
559 offset -= clen;
560 c = c->next;
561 continue;
562 }
563 chunk_peek_at(c, offset, pbuf, plen);
564 return TRUE;
565 }
566 *pbuf = NULL;
567 *plen = 0;
568 return FALSE;
569 }
570
Curl_bufq_skip(struct bufq * q,size_t amount)571 void Curl_bufq_skip(struct bufq *q, size_t amount)
572 {
573 size_t n;
574
575 while(amount && q->head) {
576 n = chunk_skip(q->head, amount);
577 amount -= n;
578 prune_head(q);
579 }
580 }
581
Curl_bufq_pass(struct bufq * q,Curl_bufq_writer * writer,void * writer_ctx,CURLcode * err)582 ssize_t Curl_bufq_pass(struct bufq *q, Curl_bufq_writer *writer,
583 void *writer_ctx, CURLcode *err)
584 {
585 const unsigned char *buf;
586 size_t blen;
587 ssize_t nwritten = 0;
588
589 while(Curl_bufq_peek(q, &buf, &blen)) {
590 ssize_t chunk_written;
591
592 chunk_written = writer(writer_ctx, buf, blen, err);
593 if(chunk_written < 0) {
594 if(!nwritten || *err != CURLE_AGAIN) {
595 /* blocked on first write or real error, fail */
596 nwritten = -1;
597 }
598 break;
599 }
600 if(!chunk_written) {
601 if(!nwritten) {
602 /* treat as blocked */
603 *err = CURLE_AGAIN;
604 nwritten = -1;
605 }
606 break;
607 }
608 Curl_bufq_skip(q, (size_t)chunk_written);
609 nwritten += chunk_written;
610 }
611 return nwritten;
612 }
613
Curl_bufq_write_pass(struct bufq * q,const unsigned char * buf,size_t len,Curl_bufq_writer * writer,void * writer_ctx,CURLcode * err)614 ssize_t Curl_bufq_write_pass(struct bufq *q,
615 const unsigned char *buf, size_t len,
616 Curl_bufq_writer *writer, void *writer_ctx,
617 CURLcode *err)
618 {
619 ssize_t nwritten = 0, n;
620
621 *err = CURLE_OK;
622 while(len) {
623 if(Curl_bufq_is_full(q)) {
624 /* try to make room in case we are full */
625 n = Curl_bufq_pass(q, writer, writer_ctx, err);
626 if(n < 0) {
627 if(*err != CURLE_AGAIN) {
628 /* real error, fail */
629 return -1;
630 }
631 /* would block, bufq is full, give up */
632 break;
633 }
634 }
635
636 /* Add whatever is remaining now to bufq */
637 n = Curl_bufq_write(q, buf, len, err);
638 if(n < 0) {
639 if(*err != CURLE_AGAIN) {
640 /* real error, fail */
641 return -1;
642 }
643 /* no room in bufq */
644 break;
645 }
646 /* edge case of writer returning 0 (and len is >0)
647 * break or we might enter an infinite loop here */
648 if(n == 0)
649 break;
650
651 /* Maybe only part of `data` has been added, continue to loop */
652 buf += (size_t)n;
653 len -= (size_t)n;
654 nwritten += (size_t)n;
655 }
656
657 if(!nwritten && len) {
658 *err = CURLE_AGAIN;
659 return -1;
660 }
661 *err = CURLE_OK;
662 return nwritten;
663 }
664
Curl_bufq_sipn(struct bufq * q,size_t max_len,Curl_bufq_reader * reader,void * reader_ctx,CURLcode * err)665 ssize_t Curl_bufq_sipn(struct bufq *q, size_t max_len,
666 Curl_bufq_reader *reader, void *reader_ctx,
667 CURLcode *err)
668 {
669 struct buf_chunk *tail = NULL;
670 ssize_t nread;
671
672 *err = CURLE_AGAIN;
673 tail = get_non_full_tail(q);
674 if(!tail) {
675 if(q->chunk_count < q->max_chunks) {
676 *err = CURLE_OUT_OF_MEMORY;
677 return -1;
678 }
679 /* full, blocked */
680 *err = CURLE_AGAIN;
681 return -1;
682 }
683
684 nread = chunk_slurpn(tail, max_len, reader, reader_ctx, err);
685 if(nread < 0) {
686 return -1;
687 }
688 else if(nread == 0) {
689 /* eof */
690 *err = CURLE_OK;
691 }
692 return nread;
693 }
694
695 /**
696 * Read up to `max_len` bytes and append it to the end of the buffer queue.
697 * if `max_len` is 0, no limit is imposed and the call behaves exactly
698 * the same as `Curl_bufq_slurp()`.
699 * Returns the total amount of buf read (may be 0) or -1 on other
700 * reader errors.
701 * Note that even in case of a -1 chunks may have been read and
702 * the buffer queue will have different length than before.
703 */
bufq_slurpn(struct bufq * q,size_t max_len,Curl_bufq_reader * reader,void * reader_ctx,CURLcode * err)704 static ssize_t bufq_slurpn(struct bufq *q, size_t max_len,
705 Curl_bufq_reader *reader, void *reader_ctx,
706 CURLcode *err)
707 {
708 ssize_t nread = 0, n;
709
710 *err = CURLE_AGAIN;
711 while(1) {
712
713 n = Curl_bufq_sipn(q, max_len, reader, reader_ctx, err);
714 if(n < 0) {
715 if(!nread || *err != CURLE_AGAIN) {
716 /* blocked on first read or real error, fail */
717 nread = -1;
718 }
719 else
720 *err = CURLE_OK;
721 break;
722 }
723 else if(n == 0) {
724 /* eof */
725 *err = CURLE_OK;
726 break;
727 }
728 nread += (size_t)n;
729 if(max_len) {
730 DEBUGASSERT((size_t)n <= max_len);
731 max_len -= (size_t)n;
732 if(!max_len)
733 break;
734 }
735 /* give up slurping when we get less bytes than we asked for */
736 if(q->tail && !chunk_is_full(q->tail))
737 break;
738 }
739 return nread;
740 }
741
Curl_bufq_slurp(struct bufq * q,Curl_bufq_reader * reader,void * reader_ctx,CURLcode * err)742 ssize_t Curl_bufq_slurp(struct bufq *q, Curl_bufq_reader *reader,
743 void *reader_ctx, CURLcode *err)
744 {
745 return bufq_slurpn(q, 0, reader, reader_ctx, err);
746 }
747