source: UsbWattMeter/trunk/curl-7.47.1/lib/transfer.c

Last change on this file was 167, checked in by coas-nagasima, 8 years ago

MIMEにSJISを設定

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
  • Property svn:mime-type set to text/x-csrc; charset=SHIFT_JIS
File size: 64.9 KB
RevLine 
[164]1/***************************************************************************
2 * _ _ ____ _
3 * Project ___| | | | _ \| |
4 * / __| | | | |_) | |
5 * | (__| |_| | _ <| |___
6 * \___|\___/|_| \_\_____|
7 *
8 * Copyright (C) 1998 - 2015, Daniel Stenberg, <daniel@haxx.se>, et al.
9 *
10 * This software is licensed as described in the file COPYING, which
11 * you should have received as part of this distribution. The terms
12 * are also available at https://curl.haxx.se/docs/copyright.html.
13 *
14 * You may opt to use, copy, modify, merge, publish, distribute and/or sell
15 * copies of the Software, and permit persons to whom the Software is
16 * furnished to do so, under the terms of the COPYING file.
17 *
18 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
19 * KIND, either express or implied.
20 *
21 ***************************************************************************/
22
23#include "curl_setup.h"
24
25#include "strtoofft.h"
26#include "strequal.h"
27#include "rawstr.h"
28
29#ifdef HAVE_NETINET_IN_H
30#include <netinet/in.h>
31#endif
32#ifdef HAVE_NETDB_H
33#include <netdb.h>
34#endif
35#ifdef HAVE_ARPA_INET_H
36#include <arpa/inet.h>
37#endif
38#ifdef HAVE_NET_IF_H
39#include <net/if.h>
40#endif
41#ifdef HAVE_SYS_IOCTL_H
42#include <sys/ioctl.h>
43#endif
44#ifdef HAVE_SIGNAL_H
45#include <signal.h>
46#endif
47
48#ifdef HAVE_SYS_PARAM_H
49#include <sys/param.h>
50#endif
51
52#ifdef HAVE_SYS_SELECT_H
53#include <sys/select.h>
54#endif
55
56#ifndef HAVE_SOCKET
57#error "We can't compile without socket() support!"
58#endif
59
60#include "urldata.h"
61#include <curl/curl.h>
62#include "netrc.h"
63
64#include "content_encoding.h"
65#include "hostip.h"
66#include "transfer.h"
67#include "sendf.h"
68#include "speedcheck.h"
69#include "progress.h"
70#include "http.h"
71#include "url.h"
72#include "getinfo.h"
73#include "vtls/vtls.h"
74#include "http_digest.h"
75#include "curl_ntlm.h"
76#include "http_negotiate.h"
77#include "share.h"
78#include "select.h"
79#include "multiif.h"
80#include "connect.h"
81#include "non-ascii.h"
82#include "curl_printf.h"
83
84/* The last #include files should be: */
85#include "curl_memory.h"
86#include "memdebug.h"
87
88/*
89 * This function will call the read callback to fill our buffer with data
90 * to upload.
91 */
92CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp)
93{
94 struct SessionHandle *data = conn->data;
95 size_t buffersize = (size_t)bytes;
96 int nread;
97#ifdef CURL_DOES_CONVERSIONS
98 bool sending_http_headers = FALSE;
99
100 if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
101 const struct HTTP *http = data->req.protop;
102
103 if(http->sending == HTTPSEND_REQUEST)
104 /* We're sending the HTTP request headers, not the data.
105 Remember that so we don't re-translate them into garbage. */
106 sending_http_headers = TRUE;
107 }
108#endif
109
110 if(data->req.upload_chunky) {
111 /* if chunked Transfer-Encoding */
112 buffersize -= (8 + 2 + 2); /* 32bit hex + CRLF + CRLF */
113 data->req.upload_fromhere += (8 + 2); /* 32bit hex + CRLF */
114 }
115
116 /* this function returns a size_t, so we typecast to int to prevent warnings
117 with picky compilers */
118 nread = (int)data->state.fread_func(data->req.upload_fromhere, 1,
119 buffersize, data->state.in);
120
121 if(nread == CURL_READFUNC_ABORT) {
122 failf(data, "operation aborted by callback");
123 *nreadp = 0;
124 return CURLE_ABORTED_BY_CALLBACK;
125 }
126 else if(nread == CURL_READFUNC_PAUSE) {
127
128 if(conn->handler->flags & PROTOPT_NONETWORK) {
129 /* protocols that work without network cannot be paused. This is
130 actually only FILE:// just now, and it can't pause since the transfer
131 isn't done using the "normal" procedure. */
132 failf(data, "Read callback asked for PAUSE when not supported!");
133 return CURLE_READ_ERROR;
134 }
135 else {
136 struct SingleRequest *k = &data->req;
137 /* CURL_READFUNC_PAUSE pauses read callbacks that feed socket writes */
138 k->keepon |= KEEP_SEND_PAUSE; /* mark socket send as paused */
139 if(data->req.upload_chunky) {
140 /* Back out the preallocation done above */
141 data->req.upload_fromhere -= (8 + 2);
142 }
143 *nreadp = 0;
144 }
145 return CURLE_OK; /* nothing was read */
146 }
147 else if((size_t)nread > buffersize) {
148 /* the read function returned a too large value */
149 *nreadp = 0;
150 failf(data, "read function returned funny value");
151 return CURLE_READ_ERROR;
152 }
153
154 if(!data->req.forbidchunk && data->req.upload_chunky) {
155 /* if chunked Transfer-Encoding
156 * build chunk:
157 *
158 * <HEX SIZE> CRLF
159 * <DATA> CRLF
160 */
161 /* On non-ASCII platforms the <DATA> may or may not be
162 translated based on set.prefer_ascii while the protocol
163 portion must always be translated to the network encoding.
164 To further complicate matters, line end conversion might be
165 done later on, so we need to prevent CRLFs from becoming
166 CRCRLFs if that's the case. To do this we use bare LFs
167 here, knowing they'll become CRLFs later on.
168 */
169
170 char hexbuffer[11];
171 const char *endofline_native;
172 const char *endofline_network;
173 int hexlen;
174
175 if(
176#ifdef CURL_DO_LINEEND_CONV
177 (data->set.prefer_ascii) ||
178#endif
179 (data->set.crlf)) {
180 /* \n will become \r\n later on */
181 endofline_native = "\n";
182 endofline_network = "\x0a";
183 }
184 else {
185 endofline_native = "\r\n";
186 endofline_network = "\x0d\x0a";
187 }
188 hexlen = snprintf(hexbuffer, sizeof(hexbuffer),
189 "%x%s", nread, endofline_native);
190
191 /* move buffer pointer */
192 data->req.upload_fromhere -= hexlen;
193 nread += hexlen;
194
195 /* copy the prefix to the buffer, leaving out the NUL */
196 memcpy(data->req.upload_fromhere, hexbuffer, hexlen);
197
198 /* always append ASCII CRLF to the data */
199 memcpy(data->req.upload_fromhere + nread,
200 endofline_network,
201 strlen(endofline_network));
202
203#ifdef CURL_DOES_CONVERSIONS
204 CURLcode result;
205 int length;
206 if(data->set.prefer_ascii) {
207 /* translate the protocol and data */
208 length = nread;
209 }
210 else {
211 /* just translate the protocol portion */
212 length = strlen(hexbuffer);
213 }
214 result = Curl_convert_to_network(data, data->req.upload_fromhere, length);
215 /* Curl_convert_to_network calls failf if unsuccessful */
216 if(result)
217 return result;
218#endif /* CURL_DOES_CONVERSIONS */
219
220 if((nread - hexlen) == 0)
221 /* mark this as done once this chunk is transferred */
222 data->req.upload_done = TRUE;
223
224 nread+=(int)strlen(endofline_native); /* for the added end of line */
225 }
226#ifdef CURL_DOES_CONVERSIONS
227 else if((data->set.prefer_ascii) && (!sending_http_headers)) {
228 CURLcode result;
229 result = Curl_convert_to_network(data, data->req.upload_fromhere, nread);
230 /* Curl_convert_to_network calls failf if unsuccessful */
231 if(result)
232 return result;
233 }
234#endif /* CURL_DOES_CONVERSIONS */
235
236 *nreadp = nread;
237
238 return CURLE_OK;
239}
240
241
242/*
243 * Curl_readrewind() rewinds the read stream. This is typically used for HTTP
244 * POST/PUT with multi-pass authentication when a sending was denied and a
245 * resend is necessary.
246 */
247CURLcode Curl_readrewind(struct connectdata *conn)
248{
249 struct SessionHandle *data = conn->data;
250
251 conn->bits.rewindaftersend = FALSE; /* we rewind now */
252
253 /* explicitly switch off sending data on this connection now since we are
254 about to restart a new transfer and thus we want to avoid inadvertently
255 sending more data on the existing connection until the next transfer
256 starts */
257 data->req.keepon &= ~KEEP_SEND;
258
259 /* We have sent away data. If not using CURLOPT_POSTFIELDS or
260 CURLOPT_HTTPPOST, call app to rewind
261 */
262 if(data->set.postfields ||
263 (data->set.httpreq == HTTPREQ_POST_FORM))
264 ; /* do nothing */
265 else {
266 if(data->set.seek_func) {
267 int err;
268
269 err = (data->set.seek_func)(data->set.seek_client, 0, SEEK_SET);
270 if(err) {
271 failf(data, "seek callback returned error %d", (int)err);
272 return CURLE_SEND_FAIL_REWIND;
273 }
274 }
275 else if(data->set.ioctl_func) {
276 curlioerr err;
277
278 err = (data->set.ioctl_func)(data, CURLIOCMD_RESTARTREAD,
279 data->set.ioctl_client);
280 infof(data, "the ioctl callback returned %d\n", (int)err);
281
282 if(err) {
283 /* FIXME: convert to a human readable error message */
284 failf(data, "ioctl callback returned error %d", (int)err);
285 return CURLE_SEND_FAIL_REWIND;
286 }
287 }
288 else {
289 /* If no CURLOPT_READFUNCTION is used, we know that we operate on a
290 given FILE * stream and we can actually attempt to rewind that
291 ourselves with fseek() */
292 if(data->state.fread_func == (curl_read_callback)fread) {
293 if(-1 != fseek(data->state.in, 0, SEEK_SET))
294 /* successful rewind */
295 return CURLE_OK;
296 }
297
298 /* no callback set or failure above, makes us fail at once */
299 failf(data, "necessary data rewind wasn't possible");
300 return CURLE_SEND_FAIL_REWIND;
301 }
302 }
303 return CURLE_OK;
304}
305
306static int data_pending(const struct connectdata *conn)
307{
308 /* in the case of libssh2, we can never be really sure that we have emptied
309 its internal buffers so we MUST always try until we get EAGAIN back */
310 return conn->handler->protocol&(CURLPROTO_SCP|CURLPROTO_SFTP) ||
311#if defined(USE_NGHTTP2)
312 Curl_ssl_data_pending(conn, FIRSTSOCKET) ||
313 /* For HTTP/2, we may read up everything including responde body
314 with header fields in Curl_http_readwrite_headers. If no
315 content-length is provided, curl waits for the connection
316 close, which we emulate it using conn->proto.httpc.closed =
317 TRUE. The thing is if we read everything, then http2_recv won't
318 be called and we cannot signal the HTTP/2 stream has closed. As
319 a workaround, we return nonzero here to call http2_recv. */
320 ((conn->handler->protocol&PROTO_FAMILY_HTTP) && conn->httpversion == 20);
321#else
322 Curl_ssl_data_pending(conn, FIRSTSOCKET);
323#endif
324}
325
326static void read_rewind(struct connectdata *conn,
327 size_t thismuch)
328{
329 DEBUGASSERT(conn->read_pos >= thismuch);
330
331 conn->read_pos -= thismuch;
332 conn->bits.stream_was_rewound = TRUE;
333
334#ifdef DEBUGBUILD
335 {
336 char buf[512 + 1];
337 size_t show;
338
339 show = CURLMIN(conn->buf_len - conn->read_pos, sizeof(buf)-1);
340 if(conn->master_buffer) {
341 memcpy(buf, conn->master_buffer + conn->read_pos, show);
342 buf[show] = '\0';
343 }
344 else {
345 buf[0] = '\0';
346 }
347
348 DEBUGF(infof(conn->data,
349 "Buffer after stream rewind (read_pos = %zu): [%s]\n",
350 conn->read_pos, buf));
351 }
352#endif
353}
354
355/*
356 * Check to see if CURLOPT_TIMECONDITION was met by comparing the time of the
357 * remote document with the time provided by CURLOPT_TIMEVAL
358 */
359bool Curl_meets_timecondition(struct SessionHandle *data, time_t timeofdoc)
360{
361 if((timeofdoc == 0) || (data->set.timevalue == 0))
362 return TRUE;
363
364 switch(data->set.timecondition) {
365 case CURL_TIMECOND_IFMODSINCE:
366 default:
367 if(timeofdoc <= data->set.timevalue) {
368 infof(data,
369 "The requested document is not new enough\n");
370 data->info.timecond = TRUE;
371 return FALSE;
372 }
373 break;
374 case CURL_TIMECOND_IFUNMODSINCE:
375 if(timeofdoc >= data->set.timevalue) {
376 infof(data,
377 "The requested document is not old enough\n");
378 data->info.timecond = TRUE;
379 return FALSE;
380 }
381 break;
382 }
383
384 return TRUE;
385}
386
387/*
388 * Go ahead and do a read if we have a readable socket or if
389 * the stream was rewound (in which case we have data in a
390 * buffer)
391 */
392static CURLcode readwrite_data(struct SessionHandle *data,
393 struct connectdata *conn,
394 struct SingleRequest *k,
395 int *didwhat, bool *done)
396{
397 CURLcode result = CURLE_OK;
398 ssize_t nread; /* number of bytes read */
399 size_t excess = 0; /* excess bytes read */
400 bool is_empty_data = FALSE;
401 bool readmore = FALSE; /* used by RTP to signal for more data */
402 int maxloops = 100;
403
404 *done = FALSE;
405
406 /* This is where we loop until we have read everything there is to
407 read or we get a CURLE_AGAIN */
408 do {
409 size_t buffersize = data->set.buffer_size?
410 data->set.buffer_size : BUFSIZE;
411 size_t bytestoread = buffersize;
412
413 if(
414#if defined(USE_NGHTTP2)
415 /* For HTTP/2, read data without caring about the content
416 length. This is safe because body in HTTP/2 is always
417 segmented thanks to its framing layer. Meanwhile, we have to
418 call Curl_read to ensure that http2_handle_stream_close is
419 called when we read all incoming bytes for a particular
420 stream. */
421 !((conn->handler->protocol & PROTO_FAMILY_HTTP) &&
422 conn->httpversion == 20) &&
423#endif
424 k->size != -1 && !k->header) {
425 /* make sure we don't read "too much" if we can help it since we
426 might be pipelining and then someone else might want to read what
427 follows! */
428 curl_off_t totalleft = k->size - k->bytecount;
429 if(totalleft < (curl_off_t)bytestoread)
430 bytestoread = (size_t)totalleft;
431 }
432
433 if(bytestoread) {
434 /* receive data from the network! */
435 result = Curl_read(conn, conn->sockfd, k->buf, bytestoread, &nread);
436
437 /* read would've blocked */
438 if(CURLE_AGAIN == result)
439 break; /* get out of loop */
440
441 if(result>0)
442 return result;
443 }
444 else {
445 /* read nothing but since we wanted nothing we consider this an OK
446 situation to proceed from */
447 DEBUGF(infof(data, "readwrite_data: we're done!\n"));
448 nread = 0;
449 }
450
451 if((k->bytecount == 0) && (k->writebytecount == 0)) {
452 Curl_pgrsTime(data, TIMER_STARTTRANSFER);
453 if(k->exp100 > EXP100_SEND_DATA)
454 /* set time stamp to compare with when waiting for the 100 */
455 k->start100 = Curl_tvnow();
456 }
457
458 *didwhat |= KEEP_RECV;
459 /* indicates data of zero size, i.e. empty file */
460 is_empty_data = ((nread == 0) && (k->bodywrites == 0)) ? TRUE : FALSE;
461
462 /* NUL terminate, allowing string ops to be used */
463 if(0 < nread || is_empty_data) {
464 k->buf[nread] = 0;
465 }
466 else if(0 >= nread) {
467 /* if we receive 0 or less here, the server closed the connection
468 and we bail out from this! */
469 DEBUGF(infof(data, "nread <= 0, server closed connection, bailing\n"));
470 k->keepon &= ~KEEP_RECV;
471 break;
472 }
473
474 /* Default buffer to use when we write the buffer, it may be changed
475 in the flow below before the actual storing is done. */
476 k->str = k->buf;
477
478 if(conn->handler->readwrite) {
479 result = conn->handler->readwrite(data, conn, &nread, &readmore);
480 if(result)
481 return result;
482 if(readmore)
483 break;
484 }
485
486#ifndef CURL_DISABLE_HTTP
487 /* Since this is a two-state thing, we check if we are parsing
488 headers at the moment or not. */
489 if(k->header) {
490 /* we are in parse-the-header-mode */
491 bool stop_reading = FALSE;
492 result = Curl_http_readwrite_headers(data, conn, &nread, &stop_reading);
493 if(result)
494 return result;
495
496 if(conn->handler->readwrite &&
497 (k->maxdownload <= 0 && nread > 0)) {
498 result = conn->handler->readwrite(data, conn, &nread, &readmore);
499 if(result)
500 return result;
501 if(readmore)
502 break;
503 }
504
505 if(stop_reading) {
506 /* We've stopped dealing with input, get out of the do-while loop */
507
508 if(nread > 0) {
509 if(Curl_pipeline_wanted(conn->data->multi, CURLPIPE_HTTP1)) {
510 infof(data,
511 "Rewinding stream by : %zd"
512 " bytes on url %s (zero-length body)\n",
513 nread, data->state.path);
514 read_rewind(conn, (size_t)nread);
515 }
516 else {
517 infof(data,
518 "Excess found in a non pipelined read:"
519 " excess = %zd"
520 " url = %s (zero-length body)\n",
521 nread, data->state.path);
522 }
523 }
524
525 break;
526 }
527 }
528#endif /* CURL_DISABLE_HTTP */
529
530
531 /* This is not an 'else if' since it may be a rest from the header
532 parsing, where the beginning of the buffer is headers and the end
533 is non-headers. */
534 if(k->str && !k->header && (nread > 0 || is_empty_data)) {
535
536#ifndef CURL_DISABLE_HTTP
537 if(0 == k->bodywrites && !is_empty_data) {
538 /* These checks are only made the first time we are about to
539 write a piece of the body */
540 if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
541 /* HTTP-only checks */
542
543 if(data->req.newurl) {
544 if(conn->bits.close) {
545 /* Abort after the headers if "follow Location" is set
546 and we're set to close anyway. */
547 k->keepon &= ~KEEP_RECV;
548 *done = TRUE;
549 return CURLE_OK;
550 }
551 /* We have a new url to load, but since we want to be able
552 to re-use this connection properly, we read the full
553 response in "ignore more" */
554 k->ignorebody = TRUE;
555 infof(data, "Ignoring the response-body\n");
556 }
557 if(data->state.resume_from && !k->content_range &&
558 (data->set.httpreq==HTTPREQ_GET) &&
559 !k->ignorebody) {
560
561 if(k->size == data->state.resume_from) {
562 /* The resume point is at the end of file, consider this fine
563 even if it doesn't allow resume from here. */
564 infof(data, "The entire document is already downloaded");
565 connclose(conn, "already downloaded");
566 /* Abort download */
567 k->keepon &= ~KEEP_RECV;
568 *done = TRUE;
569 return CURLE_OK;
570 }
571
572 /* we wanted to resume a download, although the server doesn't
573 * seem to support this and we did this with a GET (if it
574 * wasn't a GET we did a POST or PUT resume) */
575 failf(data, "HTTP server doesn't seem to support "
576 "byte ranges. Cannot resume.");
577 return CURLE_RANGE_ERROR;
578 }
579
580 if(data->set.timecondition && !data->state.range) {
581 /* A time condition has been set AND no ranges have been
582 requested. This seems to be what chapter 13.3.4 of
583 RFC 2616 defines to be the correct action for a
584 HTTP/1.1 client */
585
586 if(!Curl_meets_timecondition(data, k->timeofdoc)) {
587 *done = TRUE;
588 /* We're simulating a http 304 from server so we return
589 what should have been returned from the server */
590 data->info.httpcode = 304;
591 infof(data, "Simulate a HTTP 304 response!\n");
592 /* we abort the transfer before it is completed == we ruin the
593 re-use ability. Close the connection */
594 connclose(conn, "Simulated 304 handling");
595 return CURLE_OK;
596 }
597 } /* we have a time condition */
598
599 } /* this is HTTP or RTSP */
600 } /* this is the first time we write a body part */
601#endif /* CURL_DISABLE_HTTP */
602
603 k->bodywrites++;
604
605 /* pass data to the debug function before it gets "dechunked" */
606 if(data->set.verbose) {
607 if(k->badheader) {
608 Curl_debug(data, CURLINFO_DATA_IN, data->state.headerbuff,
609 (size_t)k->hbuflen, conn);
610 if(k->badheader == HEADER_PARTHEADER)
611 Curl_debug(data, CURLINFO_DATA_IN,
612 k->str, (size_t)nread, conn);
613 }
614 else
615 Curl_debug(data, CURLINFO_DATA_IN,
616 k->str, (size_t)nread, conn);
617 }
618
619#ifndef CURL_DISABLE_HTTP
620 if(k->chunk) {
621 /*
622 * Here comes a chunked transfer flying and we need to decode this
623 * properly. While the name says read, this function both reads
624 * and writes away the data. The returned 'nread' holds the number
625 * of actual data it wrote to the client.
626 */
627
628 CHUNKcode res =
629 Curl_httpchunk_read(conn, k->str, nread, &nread);
630
631 if(CHUNKE_OK < res) {
632 if(CHUNKE_WRITE_ERROR == res) {
633 failf(data, "Failed writing data");
634 return CURLE_WRITE_ERROR;
635 }
636 failf(data, "%s in chunked-encoding", Curl_chunked_strerror(res));
637 return CURLE_RECV_ERROR;
638 }
639 else if(CHUNKE_STOP == res) {
640 size_t dataleft;
641 /* we're done reading chunks! */
642 k->keepon &= ~KEEP_RECV; /* read no more */
643
644 /* There are now possibly N number of bytes at the end of the
645 str buffer that weren't written to the client.
646
647 We DO care about this data if we are pipelining.
648 Push it back to be read on the next pass. */
649
650 dataleft = conn->chunk.dataleft;
651 if(dataleft != 0) {
652 infof(conn->data, "Leftovers after chunking: %zu bytes\n",
653 dataleft);
654 if(Curl_pipeline_wanted(conn->data->multi, CURLPIPE_HTTP1)) {
655 /* only attempt the rewind if we truly are pipelining */
656 infof(conn->data, "Rewinding %zu bytes\n",dataleft);
657 read_rewind(conn, dataleft);
658 }
659 }
660 }
661 /* If it returned OK, we just keep going */
662 }
663#endif /* CURL_DISABLE_HTTP */
664
665 /* Account for body content stored in the header buffer */
666 if(k->badheader && !k->ignorebody) {
667 DEBUGF(infof(data, "Increasing bytecount by %zu from hbuflen\n",
668 k->hbuflen));
669 k->bytecount += k->hbuflen;
670 }
671
672 if((-1 != k->maxdownload) &&
673 (k->bytecount + nread >= k->maxdownload)) {
674
675 excess = (size_t)(k->bytecount + nread - k->maxdownload);
676 if(excess > 0 && !k->ignorebody) {
677 if(Curl_pipeline_wanted(conn->data->multi, CURLPIPE_HTTP1)) {
678 /* The 'excess' amount below can't be more than BUFSIZE which
679 always will fit in a size_t */
680 infof(data,
681 "Rewinding stream by : %zu"
682 " bytes on url %s (size = %" CURL_FORMAT_CURL_OFF_T
683 ", maxdownload = %" CURL_FORMAT_CURL_OFF_T
684 ", bytecount = %" CURL_FORMAT_CURL_OFF_T ", nread = %zd)\n",
685 excess, data->state.path,
686 k->size, k->maxdownload, k->bytecount, nread);
687 read_rewind(conn, excess);
688 }
689 else {
690 infof(data,
691 "Excess found in a non pipelined read:"
692 " excess = %zu"
693 ", size = %" CURL_FORMAT_CURL_OFF_T
694 ", maxdownload = %" CURL_FORMAT_CURL_OFF_T
695 ", bytecount = %" CURL_FORMAT_CURL_OFF_T "\n",
696 excess, k->size, k->maxdownload, k->bytecount);
697 }
698 }
699
700 nread = (ssize_t) (k->maxdownload - k->bytecount);
701 if(nread < 0 ) /* this should be unusual */
702 nread = 0;
703
704 k->keepon &= ~KEEP_RECV; /* we're done reading */
705 }
706
707 k->bytecount += nread;
708
709 Curl_pgrsSetDownloadCounter(data, k->bytecount);
710
711 if(!k->chunk && (nread || k->badheader || is_empty_data)) {
712 /* If this is chunky transfer, it was already written */
713
714 if(k->badheader && !k->ignorebody) {
715 /* we parsed a piece of data wrongly assuming it was a header
716 and now we output it as body instead */
717
718 /* Don't let excess data pollute body writes */
719 if(k->maxdownload == -1 || (curl_off_t)k->hbuflen <= k->maxdownload)
720 result = Curl_client_write(conn, CLIENTWRITE_BODY,
721 data->state.headerbuff,
722 k->hbuflen);
723 else
724 result = Curl_client_write(conn, CLIENTWRITE_BODY,
725 data->state.headerbuff,
726 (size_t)k->maxdownload);
727
728 if(result)
729 return result;
730 }
731 if(k->badheader < HEADER_ALLBAD) {
732 /* This switch handles various content encodings. If there's an
733 error here, be sure to check over the almost identical code
734 in http_chunks.c.
735 Make sure that ALL_CONTENT_ENCODINGS contains all the
736 encodings handled here. */
737#ifdef HAVE_LIBZ
738 switch (conn->data->set.http_ce_skip ?
739 IDENTITY : k->auto_decoding) {
740 case IDENTITY:
741#endif
742 /* This is the default when the server sends no
743 Content-Encoding header. See Curl_readwrite_init; the
744 memset() call initializes k->auto_decoding to zero. */
745 if(!k->ignorebody) {
746
747#ifndef CURL_DISABLE_POP3
748 if(conn->handler->protocol&PROTO_FAMILY_POP3)
749 result = Curl_pop3_write(conn, k->str, nread);
750 else
751#endif /* CURL_DISABLE_POP3 */
752
753 result = Curl_client_write(conn, CLIENTWRITE_BODY, k->str,
754 nread);
755 }
756#ifdef HAVE_LIBZ
757 break;
758
759 case DEFLATE:
760 /* Assume CLIENTWRITE_BODY; headers are not encoded. */
761 if(!k->ignorebody)
762 result = Curl_unencode_deflate_write(conn, k, nread);
763 break;
764
765 case GZIP:
766 /* Assume CLIENTWRITE_BODY; headers are not encoded. */
767 if(!k->ignorebody)
768 result = Curl_unencode_gzip_write(conn, k, nread);
769 break;
770
771 default:
772 failf (data, "Unrecognized content encoding type. "
773 "libcurl understands `identity', `deflate' and `gzip' "
774 "content encodings.");
775 result = CURLE_BAD_CONTENT_ENCODING;
776 break;
777 }
778#endif
779 }
780 k->badheader = HEADER_NORMAL; /* taken care of now */
781
782 if(result)
783 return result;
784 }
785
786 } /* if(! header and data to read ) */
787
788 if(conn->handler->readwrite &&
789 (excess > 0 && !conn->bits.stream_was_rewound)) {
790 /* Parse the excess data */
791 k->str += nread;
792 nread = (ssize_t)excess;
793
794 result = conn->handler->readwrite(data, conn, &nread, &readmore);
795 if(result)
796 return result;
797
798 if(readmore)
799 k->keepon |= KEEP_RECV; /* we're not done reading */
800 break;
801 }
802
803 if(is_empty_data) {
804 /* if we received nothing, the server closed the connection and we
805 are done */
806 k->keepon &= ~KEEP_RECV;
807 }
808
809 } while(data_pending(conn) && maxloops--);
810
811 if(((k->keepon & (KEEP_RECV|KEEP_SEND)) == KEEP_SEND) &&
812 conn->bits.close ) {
813 /* When we've read the entire thing and the close bit is set, the server
814 may now close the connection. If there's now any kind of sending going
815 on from our side, we need to stop that immediately. */
816 infof(data, "we are done reading and this is set to close, stop send\n");
817 k->keepon &= ~KEEP_SEND; /* no writing anymore either */
818 }
819
820 return CURLE_OK;
821}
822
823/*
824 * Send data to upload to the server, when the socket is writable.
825 */
826static CURLcode readwrite_upload(struct SessionHandle *data,
827 struct connectdata *conn,
828 struct SingleRequest *k,
829 int *didwhat)
830{
831 ssize_t i, si;
832 ssize_t bytes_written;
833 CURLcode result;
834 ssize_t nread; /* number of bytes read */
835 bool sending_http_headers = FALSE;
836
837 if((k->bytecount == 0) && (k->writebytecount == 0))
838 Curl_pgrsTime(data, TIMER_STARTTRANSFER);
839
840 *didwhat |= KEEP_SEND;
841
842 do {
843
844 /* only read more data if there's no upload data already
845 present in the upload buffer */
846 if(0 == data->req.upload_present) {
847 /* init the "upload from here" pointer */
848 data->req.upload_fromhere = k->uploadbuf;
849
850 if(!k->upload_done) {
851 /* HTTP pollution, this should be written nicer to become more
852 protocol agnostic. */
853 int fillcount;
854 struct HTTP *http = data->req.protop;
855
856 if((k->exp100 == EXP100_SENDING_REQUEST) &&
857 (http->sending == HTTPSEND_BODY)) {
858 /* If this call is to send body data, we must take some action:
859 We have sent off the full HTTP 1.1 request, and we shall now
860 go into the Expect: 100 state and await such a header */
861 k->exp100 = EXP100_AWAITING_CONTINUE; /* wait for the header */
862 k->keepon &= ~KEEP_SEND; /* disable writing */
863 k->start100 = Curl_tvnow(); /* timeout count starts now */
864 *didwhat &= ~KEEP_SEND; /* we didn't write anything actually */
865
866 /* set a timeout for the multi interface */
867 Curl_expire(data, data->set.expect_100_timeout);
868 break;
869 }
870
871 if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
872 if(http->sending == HTTPSEND_REQUEST)
873 /* We're sending the HTTP request headers, not the data.
874 Remember that so we don't change the line endings. */
875 sending_http_headers = TRUE;
876 else
877 sending_http_headers = FALSE;
878 }
879
880 result = Curl_fillreadbuffer(conn, BUFSIZE, &fillcount);
881 if(result)
882 return result;
883
884 nread = (ssize_t)fillcount;
885 }
886 else
887 nread = 0; /* we're done uploading/reading */
888
889 if(!nread && (k->keepon & KEEP_SEND_PAUSE)) {
890 /* this is a paused transfer */
891 break;
892 }
893 else if(nread<=0) {
894 /* done */
895 k->keepon &= ~KEEP_SEND; /* we're done writing */
896
897 if(conn->bits.rewindaftersend) {
898 result = Curl_readrewind(conn);
899 if(result)
900 return result;
901 }
902 break;
903 }
904
905 /* store number of bytes available for upload */
906 data->req.upload_present = nread;
907
908 /* convert LF to CRLF if so asked */
909 if((!sending_http_headers) && (
910#ifdef CURL_DO_LINEEND_CONV
911 /* always convert if we're FTPing in ASCII mode */
912 (data->set.prefer_ascii) ||
913#endif
914 (data->set.crlf))) {
915 /* Do we need to allocate a scratch buffer? */
916 if(!data->state.scratch) {
917 data->state.scratch = malloc(2 * BUFSIZE);
918 if(!data->state.scratch) {
919 failf(data, "Failed to alloc scratch buffer!");
920
921 return CURLE_OUT_OF_MEMORY;
922 }
923 }
924
925 /*
926 * ASCII/EBCDIC Note: This is presumably a text (not binary)
927 * transfer so the data should already be in ASCII.
928 * That means the hex values for ASCII CR (0x0d) & LF (0x0a)
929 * must be used instead of the escape sequences \r & \n.
930 */
931 for(i = 0, si = 0; i < nread; i++, si++) {
932 if(data->req.upload_fromhere[i] == 0x0a) {
933 data->state.scratch[si++] = 0x0d;
934 data->state.scratch[si] = 0x0a;
935 if(!data->set.crlf) {
936 /* we're here only because FTP is in ASCII mode...
937 bump infilesize for the LF we just added */
938 if(data->state.infilesize != -1)
939 data->state.infilesize++;
940 }
941 }
942 else
943 data->state.scratch[si] = data->req.upload_fromhere[i];
944 }
945
946 if(si != nread) {
947 /* only perform the special operation if we really did replace
948 anything */
949 nread = si;
950
951 /* upload from the new (replaced) buffer instead */
952 data->req.upload_fromhere = data->state.scratch;
953
954 /* set the new amount too */
955 data->req.upload_present = nread;
956 }
957 }
958
959#ifndef CURL_DISABLE_SMTP
960 if(conn->handler->protocol & PROTO_FAMILY_SMTP) {
961 result = Curl_smtp_escape_eob(conn, nread);
962 if(result)
963 return result;
964 }
965#endif /* CURL_DISABLE_SMTP */
966 } /* if 0 == data->req.upload_present */
967 else {
968 /* We have a partial buffer left from a previous "round". Use
969 that instead of reading more data */
970 }
971
972 /* write to socket (send away data) */
973 result = Curl_write(conn,
974 conn->writesockfd, /* socket to send to */
975 data->req.upload_fromhere, /* buffer pointer */
976 data->req.upload_present, /* buffer size */
977 &bytes_written); /* actually sent */
978
979 if(result)
980 return result;
981
982 if(data->set.verbose)
983 /* show the data before we change the pointer upload_fromhere */
984 Curl_debug(data, CURLINFO_DATA_OUT, data->req.upload_fromhere,
985 (size_t)bytes_written, conn);
986
987 k->writebytecount += bytes_written;
988
989 if(k->writebytecount == data->state.infilesize) {
990 /* we have sent all data we were supposed to */
991 k->upload_done = TRUE;
992 infof(data, "We are completely uploaded and fine\n");
993 }
994
995 if(data->req.upload_present != bytes_written) {
996 /* we only wrote a part of the buffer (if anything), deal with it! */
997
998 /* store the amount of bytes left in the buffer to write */
999 data->req.upload_present -= bytes_written;
1000
1001 /* advance the pointer where to find the buffer when the next send
1002 is to happen */
1003 data->req.upload_fromhere += bytes_written;
1004 }
1005 else {
1006 /* we've uploaded that buffer now */
1007 data->req.upload_fromhere = k->uploadbuf;
1008 data->req.upload_present = 0; /* no more bytes left */
1009
1010 if(k->upload_done) {
1011 /* switch off writing, we're done! */
1012 k->keepon &= ~KEEP_SEND; /* we're done writing */
1013 }
1014 }
1015
1016 Curl_pgrsSetUploadCounter(data, k->writebytecount);
1017
1018 } WHILE_FALSE; /* just to break out from! */
1019
1020 return CURLE_OK;
1021}
1022
1023/*
1024 * Curl_readwrite() is the low-level function to be called when data is to
1025 * be read and written to/from the connection.
1026 */
1027CURLcode Curl_readwrite(struct connectdata *conn,
1028 struct SessionHandle *data,
1029 bool *done)
1030{
1031 struct SingleRequest *k = &data->req;
1032 CURLcode result;
1033 int didwhat=0;
1034
1035 curl_socket_t fd_read;
1036 curl_socket_t fd_write;
1037 int select_res = conn->cselect_bits;
1038
1039 conn->cselect_bits = 0;
1040
1041 /* only use the proper socket if the *_HOLD bit is not set simultaneously as
1042 then we are in rate limiting state in that transfer direction */
1043
1044 if((k->keepon & KEEP_RECVBITS) == KEEP_RECV)
1045 fd_read = conn->sockfd;
1046 else
1047 fd_read = CURL_SOCKET_BAD;
1048
1049 if((k->keepon & KEEP_SENDBITS) == KEEP_SEND)
1050 fd_write = conn->writesockfd;
1051 else
1052 fd_write = CURL_SOCKET_BAD;
1053
1054 if(conn->data->state.drain) {
1055 select_res |= CURL_CSELECT_IN;
1056 DEBUGF(infof(data, "Curl_readwrite: forcibly told to drain data\n"));
1057 }
1058
1059 if(!select_res) /* Call for select()/poll() only, if read/write/error
1060 status is not known. */
1061 select_res = Curl_socket_ready(fd_read, fd_write, 0);
1062
1063 if(select_res == CURL_CSELECT_ERR) {
1064 failf(data, "select/poll returned error");
1065 return CURLE_SEND_ERROR;
1066 }
1067
1068 /* We go ahead and do a read if we have a readable socket or if
1069 the stream was rewound (in which case we have data in a
1070 buffer) */
1071 if((k->keepon & KEEP_RECV) &&
1072 ((select_res & CURL_CSELECT_IN) || conn->bits.stream_was_rewound)) {
1073
1074 result = readwrite_data(data, conn, k, &didwhat, done);
1075 if(result || *done)
1076 return result;
1077 }
1078
1079 /* If we still have writing to do, we check if we have a writable socket. */
1080 if((k->keepon & KEEP_SEND) && (select_res & CURL_CSELECT_OUT)) {
1081 /* write */
1082
1083 result = readwrite_upload(data, conn, k, &didwhat);
1084 if(result)
1085 return result;
1086 }
1087
1088 k->now = Curl_tvnow();
1089 if(didwhat) {
1090 /* Update read/write counters */
1091 if(k->bytecountp)
1092 *k->bytecountp = k->bytecount; /* read count */
1093 if(k->writebytecountp)
1094 *k->writebytecountp = k->writebytecount; /* write count */
1095 }
1096 else {
1097 /* no read no write, this is a timeout? */
1098 if(k->exp100 == EXP100_AWAITING_CONTINUE) {
1099 /* This should allow some time for the header to arrive, but only a
1100 very short time as otherwise it'll be too much wasted time too
1101 often. */
1102
1103 /* Quoting RFC2616, section "8.2.3 Use of the 100 (Continue) Status":
1104
1105 Therefore, when a client sends this header field to an origin server
1106 (possibly via a proxy) from which it has never seen a 100 (Continue)
1107 status, the client SHOULD NOT wait for an indefinite period before
1108 sending the request body.
1109
1110 */
1111
1112 long ms = Curl_tvdiff(k->now, k->start100);
1113 if(ms >= data->set.expect_100_timeout) {
1114 /* we've waited long enough, continue anyway */
1115 k->exp100 = EXP100_SEND_DATA;
1116 k->keepon |= KEEP_SEND;
1117 infof(data, "Done waiting for 100-continue\n");
1118 }
1119 }
1120 }
1121
1122 if(Curl_pgrsUpdate(conn))
1123 result = CURLE_ABORTED_BY_CALLBACK;
1124 else
1125 result = Curl_speedcheck(data, k->now);
1126 if(result)
1127 return result;
1128
1129 if(k->keepon) {
1130 if(0 > Curl_timeleft(data, &k->now, FALSE)) {
1131 if(k->size != -1) {
1132 failf(data, "Operation timed out after %ld milliseconds with %"
1133 CURL_FORMAT_CURL_OFF_T " out of %"
1134 CURL_FORMAT_CURL_OFF_T " bytes received",
1135 Curl_tvdiff(k->now, data->progress.t_startsingle), k->bytecount,
1136 k->size);
1137 }
1138 else {
1139 failf(data, "Operation timed out after %ld milliseconds with %"
1140 CURL_FORMAT_CURL_OFF_T " bytes received",
1141 Curl_tvdiff(k->now, data->progress.t_startsingle), k->bytecount);
1142 }
1143 return CURLE_OPERATION_TIMEDOUT;
1144 }
1145 }
1146 else {
1147 /*
1148 * The transfer has been performed. Just make some general checks before
1149 * returning.
1150 */
1151
1152 if(!(data->set.opt_no_body) && (k->size != -1) &&
1153 (k->bytecount != k->size) &&
1154#ifdef CURL_DO_LINEEND_CONV
1155 /* Most FTP servers don't adjust their file SIZE response for CRLFs,
1156 so we'll check to see if the discrepancy can be explained
1157 by the number of CRLFs we've changed to LFs.
1158 */
1159 (k->bytecount != (k->size + data->state.crlf_conversions)) &&
1160#endif /* CURL_DO_LINEEND_CONV */
1161 !data->req.newurl) {
1162 failf(data, "transfer closed with %" CURL_FORMAT_CURL_OFF_T
1163 " bytes remaining to read",
1164 k->size - k->bytecount);
1165 return CURLE_PARTIAL_FILE;
1166 }
1167 else if(!(data->set.opt_no_body) &&
1168 k->chunk &&
1169 (conn->chunk.state != CHUNK_STOP)) {
1170 /*
1171 * In chunked mode, return an error if the connection is closed prior to
1172 * the empty (terminating) chunk is read.
1173 *
1174 * The condition above used to check for
1175 * conn->proto.http->chunk.datasize != 0 which is true after reading
1176 * *any* chunk, not just the empty chunk.
1177 *
1178 */
1179 failf(data, "transfer closed with outstanding read data remaining");
1180 return CURLE_PARTIAL_FILE;
1181 }
1182 if(Curl_pgrsUpdate(conn))
1183 return CURLE_ABORTED_BY_CALLBACK;
1184 }
1185
1186 /* Now update the "done" boolean we return */
1187 *done = (0 == (k->keepon&(KEEP_RECV|KEEP_SEND|
1188 KEEP_RECV_PAUSE|KEEP_SEND_PAUSE))) ? TRUE : FALSE;
1189
1190 return CURLE_OK;
1191}
1192
1193/*
1194 * Curl_single_getsock() gets called by the multi interface code when the app
1195 * has requested to get the sockets for the current connection. This function
1196 * will then be called once for every connection that the multi interface
1197 * keeps track of. This function will only be called for connections that are
1198 * in the proper state to have this information available.
1199 */
1200int Curl_single_getsock(const struct connectdata *conn,
1201 curl_socket_t *sock, /* points to numsocks number
1202 of sockets */
1203 int numsocks)
1204{
1205 const struct SessionHandle *data = conn->data;
1206 int bitmap = GETSOCK_BLANK;
1207 unsigned sockindex = 0;
1208
1209 if(conn->handler->perform_getsock)
1210 return conn->handler->perform_getsock(conn, sock, numsocks);
1211
1212 if(numsocks < 2)
1213 /* simple check but we might need two slots */
1214 return GETSOCK_BLANK;
1215
1216 /* don't include HOLD and PAUSE connections */
1217 if((data->req.keepon & KEEP_RECVBITS) == KEEP_RECV) {
1218
1219 DEBUGASSERT(conn->sockfd != CURL_SOCKET_BAD);
1220
1221 bitmap |= GETSOCK_READSOCK(sockindex);
1222 sock[sockindex] = conn->sockfd;
1223 }
1224
1225 /* don't include HOLD and PAUSE connections */
1226 if((data->req.keepon & KEEP_SENDBITS) == KEEP_SEND) {
1227
1228 if((conn->sockfd != conn->writesockfd) ||
1229 bitmap == GETSOCK_BLANK) {
1230 /* only if they are not the same socket and we have a readable
1231 one, we increase index */
1232 if(bitmap != GETSOCK_BLANK)
1233 sockindex++; /* increase index if we need two entries */
1234
1235 DEBUGASSERT(conn->writesockfd != CURL_SOCKET_BAD);
1236
1237 sock[sockindex] = conn->writesockfd;
1238 }
1239
1240 bitmap |= GETSOCK_WRITESOCK(sockindex);
1241 }
1242
1243 return bitmap;
1244}
1245
1246/*
1247 * Determine optimum sleep time based on configured rate, current rate,
1248 * and packet size.
1249 * Returns value in milliseconds.
1250 *
1251 * The basic idea is to adjust the desired rate up/down in this method
1252 * based on whether we are running too slow or too fast. Then, calculate
1253 * how many milliseconds to wait for the next packet to achieve this new
1254 * rate.
1255 */
1256long Curl_sleep_time(curl_off_t rate_bps, curl_off_t cur_rate_bps,
1257 int pkt_size)
1258{
1259 curl_off_t min_sleep = 0;
1260 curl_off_t rv = 0;
1261
1262 if(rate_bps == 0)
1263 return 0;
1264
1265 /* If running faster than about .1% of the desired speed, slow
1266 * us down a bit. Use shift instead of division as the 0.1%
1267 * cutoff is arbitrary anyway.
1268 */
1269 if(cur_rate_bps > (rate_bps + (rate_bps >> 10))) {
1270 /* running too fast, decrease target rate by 1/64th of rate */
1271 rate_bps -= rate_bps >> 6;
1272 min_sleep = 1;
1273 }
1274 else if(cur_rate_bps < (rate_bps - (rate_bps >> 10))) {
1275 /* running too slow, increase target rate by 1/64th of rate */
1276 rate_bps += rate_bps >> 6;
1277 }
1278
1279 /* Determine number of milliseconds to wait until we do
1280 * the next packet at the adjusted rate. We should wait
1281 * longer when using larger packets, for instance.
1282 */
1283 rv = ((curl_off_t)(pkt_size * 1000) / rate_bps);
1284
1285 /* Catch rounding errors and always slow down at least 1ms if
1286 * we are running too fast.
1287 */
1288 if(rv < min_sleep)
1289 rv = min_sleep;
1290
1291 /* Bound value to fit in 'long' on 32-bit platform. That's
1292 * plenty long enough anyway!
1293 */
1294 if(rv > 0x7fffffff)
1295 rv = 0x7fffffff;
1296
1297 return (long)rv;
1298}
1299
1300/* Curl_init_CONNECT() gets called each time the handle switches to CONNECT
1301 which means this gets called once for each subsequent redirect etc */
1302void Curl_init_CONNECT(struct SessionHandle *data)
1303{
1304 data->state.fread_func = data->set.fread_func_set;
1305 data->state.in = data->set.in_set;
1306}
1307
1308/*
1309 * Curl_pretransfer() is called immediately before a transfer starts, and only
1310 * once for one transfer no matter if it has redirects or do multi-pass
1311 * authentication etc.
1312 */
1313CURLcode Curl_pretransfer(struct SessionHandle *data)
1314{
1315 CURLcode result;
1316 if(!data->change.url) {
1317 /* we can't do anything without URL */
1318 failf(data, "No URL set!");
1319 return CURLE_URL_MALFORMAT;
1320 }
1321
1322 /* Init the SSL session ID cache here. We do it here since we want to do it
1323 after the *_setopt() calls (that could specify the size of the cache) but
1324 before any transfer takes place. */
1325 result = Curl_ssl_initsessions(data, data->set.ssl.max_ssl_sessions);
1326 if(result)
1327 return result;
1328
1329 data->set.followlocation=0; /* reset the location-follow counter */
1330 data->state.this_is_a_follow = FALSE; /* reset this */
1331 data->state.errorbuf = FALSE; /* no error has occurred */
1332 data->state.httpversion = 0; /* don't assume any particular server version */
1333
1334 data->state.authproblem = FALSE;
1335 data->state.authhost.want = data->set.httpauth;
1336 data->state.authproxy.want = data->set.proxyauth;
1337 Curl_safefree(data->info.wouldredirect);
1338 data->info.wouldredirect = NULL;
1339
1340 if(data->set.httpreq == HTTPREQ_PUT)
1341 data->state.infilesize = data->set.filesize;
1342 else
1343 data->state.infilesize = data->set.postfieldsize;
1344
1345 /* If there is a list of cookie files to read, do it now! */
1346 if(data->change.cookielist)
1347 Curl_cookie_loadfiles(data);
1348
1349 /* If there is a list of host pairs to deal with */
1350 if(data->change.resolve)
1351 result = Curl_loadhostpairs(data);
1352
1353 if(!result) {
1354 /* Allow data->set.use_port to set which port to use. This needs to be
1355 * disabled for example when we follow Location: headers to URLs using
1356 * different ports! */
1357 data->state.allow_port = TRUE;
1358
1359#if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1360 /*************************************************************
1361 * Tell signal handler to ignore SIGPIPE
1362 *************************************************************/
1363 if(!data->set.no_signal)
1364 data->state.prev_signal = signal(SIGPIPE, SIG_IGN);
1365#endif
1366
1367 Curl_initinfo(data); /* reset session-specific information "variables" */
1368 Curl_pgrsResetTimesSizes(data);
1369 Curl_pgrsStartNow(data);
1370
1371 if(data->set.timeout)
1372 Curl_expire(data, data->set.timeout);
1373
1374 if(data->set.connecttimeout)
1375 Curl_expire(data, data->set.connecttimeout);
1376
1377 /* In case the handle is re-used and an authentication method was picked
1378 in the session we need to make sure we only use the one(s) we now
1379 consider to be fine */
1380 data->state.authhost.picked &= data->state.authhost.want;
1381 data->state.authproxy.picked &= data->state.authproxy.want;
1382 }
1383
1384 return result;
1385}
1386
1387/*
1388 * Curl_posttransfer() is called immediately after a transfer ends
1389 */
1390CURLcode Curl_posttransfer(struct SessionHandle *data)
1391{
1392#if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1393 /* restore the signal handler for SIGPIPE before we get back */
1394 if(!data->set.no_signal)
1395 signal(SIGPIPE, data->state.prev_signal);
1396#else
1397 (void)data; /* unused parameter */
1398#endif
1399
1400 return CURLE_OK;
1401}
1402
1403#ifndef CURL_DISABLE_HTTP
1404/*
1405 * strlen_url() returns the length of the given URL if the spaces within the
1406 * URL were properly URL encoded.
1407 */
1408static size_t strlen_url(const char *url)
1409{
1410 const unsigned char *ptr;
1411 size_t newlen=0;
1412 bool left=TRUE; /* left side of the ? */
1413
1414 for(ptr=(unsigned char *)url; *ptr; ptr++) {
1415 switch(*ptr) {
1416 case '?':
1417 left=FALSE;
1418 /* fall through */
1419 default:
1420 if(*ptr >= 0x80)
1421 newlen += 2;
1422 newlen++;
1423 break;
1424 case ' ':
1425 if(left)
1426 newlen+=3;
1427 else
1428 newlen++;
1429 break;
1430 }
1431 }
1432 return newlen;
1433}
1434
1435/* strcpy_url() copies a url to a output buffer and URL-encodes the spaces in
1436 * the source URL accordingly.
1437 */
1438static void strcpy_url(char *output, const char *url)
1439{
1440 /* we must add this with whitespace-replacing */
1441 bool left=TRUE;
1442 const unsigned char *iptr;
1443 char *optr = output;
1444 for(iptr = (unsigned char *)url; /* read from here */
1445 *iptr; /* until zero byte */
1446 iptr++) {
1447 switch(*iptr) {
1448 case '?':
1449 left=FALSE;
1450 /* fall through */
1451 default:
1452 if(*iptr >= 0x80) {
1453 snprintf(optr, 4, "%%%02x", *iptr);
1454 optr += 3;
1455 }
1456 else
1457 *optr++=*iptr;
1458 break;
1459 case ' ':
1460 if(left) {
1461 *optr++='%'; /* add a '%' */
1462 *optr++='2'; /* add a '2' */
1463 *optr++='0'; /* add a '0' */
1464 }
1465 else
1466 *optr++='+'; /* add a '+' here */
1467 break;
1468 }
1469 }
1470 *optr=0; /* zero terminate output buffer */
1471
1472}
1473
1474/*
1475 * Returns true if the given URL is absolute (as opposed to relative)
1476 */
1477static bool is_absolute_url(const char *url)
1478{
1479 char prot[16]; /* URL protocol string storage */
1480 char letter; /* used for a silly sscanf */
1481
1482 return (2 == sscanf(url, "%15[^?&/:]://%c", prot, &letter)) ? TRUE : FALSE;
1483}
1484
1485/*
1486 * Concatenate a relative URL to a base URL making it absolute.
1487 * URL-encodes any spaces.
1488 * The returned pointer must be freed by the caller unless NULL
1489 * (returns NULL on out of memory).
1490 */
1491static char *concat_url(const char *base, const char *relurl)
1492{
1493 /***
1494 TRY to append this new path to the old URL
1495 to the right of the host part. Oh crap, this is doomed to cause
1496 problems in the future...
1497 */
1498 char *newest;
1499 char *protsep;
1500 char *pathsep;
1501 size_t newlen;
1502
1503 const char *useurl = relurl;
1504 size_t urllen;
1505
1506 /* we must make our own copy of the URL to play with, as it may
1507 point to read-only data */
1508 char *url_clone=strdup(base);
1509
1510 if(!url_clone)
1511 return NULL; /* skip out of this NOW */
1512
1513 /* protsep points to the start of the host name */
1514 protsep=strstr(url_clone, "//");
1515 if(!protsep)
1516 protsep=url_clone;
1517 else
1518 protsep+=2; /* pass the slashes */
1519
1520 if('/' != relurl[0]) {
1521 int level=0;
1522
1523 /* First we need to find out if there's a ?-letter in the URL,
1524 and cut it and the right-side of that off */
1525 pathsep = strchr(protsep, '?');
1526 if(pathsep)
1527 *pathsep=0;
1528
1529 /* we have a relative path to append to the last slash if there's one
1530 available, or if the new URL is just a query string (starts with a
1531 '?') we append the new one at the end of the entire currently worked
1532 out URL */
1533 if(useurl[0] != '?') {
1534 pathsep = strrchr(protsep, '/');
1535 if(pathsep)
1536 *pathsep=0;
1537 }
1538
1539 /* Check if there's any slash after the host name, and if so, remember
1540 that position instead */
1541 pathsep = strchr(protsep, '/');
1542 if(pathsep)
1543 protsep = pathsep+1;
1544 else
1545 protsep = NULL;
1546
1547 /* now deal with one "./" or any amount of "../" in the newurl
1548 and act accordingly */
1549
1550 if((useurl[0] == '.') && (useurl[1] == '/'))
1551 useurl+=2; /* just skip the "./" */
1552
1553 while((useurl[0] == '.') &&
1554 (useurl[1] == '.') &&
1555 (useurl[2] == '/')) {
1556 level++;
1557 useurl+=3; /* pass the "../" */
1558 }
1559
1560 if(protsep) {
1561 while(level--) {
1562 /* cut off one more level from the right of the original URL */
1563 pathsep = strrchr(protsep, '/');
1564 if(pathsep)
1565 *pathsep=0;
1566 else {
1567 *protsep=0;
1568 break;
1569 }
1570 }
1571 }
1572 }
1573 else {
1574 /* We got a new absolute path for this server */
1575
1576 if((relurl[0] == '/') && (relurl[1] == '/')) {
1577 /* the new URL starts with //, just keep the protocol part from the
1578 original one */
1579 *protsep=0;
1580 useurl = &relurl[2]; /* we keep the slashes from the original, so we
1581 skip the new ones */
1582 }
1583 else {
1584 /* cut off the original URL from the first slash, or deal with URLs
1585 without slash */
1586 pathsep = strchr(protsep, '/');
1587 if(pathsep) {
1588 /* When people use badly formatted URLs, such as
1589 "http://www.url.com?dir=/home/daniel" we must not use the first
1590 slash, if there's a ?-letter before it! */
1591 char *sep = strchr(protsep, '?');
1592 if(sep && (sep < pathsep))
1593 pathsep = sep;
1594 *pathsep=0;
1595 }
1596 else {
1597 /* There was no slash. Now, since we might be operating on a badly
1598 formatted URL, such as "http://www.url.com?id=2380" which doesn't
1599 use a slash separator as it is supposed to, we need to check for a
1600 ?-letter as well! */
1601 pathsep = strchr(protsep, '?');
1602 if(pathsep)
1603 *pathsep=0;
1604 }
1605 }
1606 }
1607
1608 /* If the new part contains a space, this is a mighty stupid redirect
1609 but we still make an effort to do "right". To the left of a '?'
1610 letter we replace each space with %20 while it is replaced with '+'
1611 on the right side of the '?' letter.
1612 */
1613 newlen = strlen_url(useurl);
1614
1615 urllen = strlen(url_clone);
1616
1617 newest = malloc(urllen + 1 + /* possible slash */
1618 newlen + 1 /* zero byte */);
1619
1620 if(!newest) {
1621 free(url_clone); /* don't leak this */
1622 return NULL;
1623 }
1624
1625 /* copy over the root url part */
1626 memcpy(newest, url_clone, urllen);
1627
1628 /* check if we need to append a slash */
1629 if(('/' == useurl[0]) || (protsep && !*protsep) || ('?' == useurl[0]))
1630 ;
1631 else
1632 newest[urllen++]='/';
1633
1634 /* then append the new piece on the right side */
1635 strcpy_url(&newest[urllen], useurl);
1636
1637 free(url_clone);
1638
1639 return newest;
1640}
1641#endif /* CURL_DISABLE_HTTP */
1642
1643/*
1644 * Curl_follow() handles the URL redirect magic. Pass in the 'newurl' string
1645 * as given by the remote server and set up the new URL to request.
1646 */
1647CURLcode Curl_follow(struct SessionHandle *data,
1648 char *newurl, /* this 'newurl' is the Location: string,
1649 and it must be malloc()ed before passed
1650 here */
1651 followtype type) /* see transfer.h */
1652{
1653#ifdef CURL_DISABLE_HTTP
1654 (void)data;
1655 (void)newurl;
1656 (void)type;
1657 /* Location: following will not happen when HTTP is disabled */
1658 return CURLE_TOO_MANY_REDIRECTS;
1659#else
1660
1661 /* Location: redirect */
1662 bool disallowport = FALSE;
1663
1664 if(type == FOLLOW_REDIR) {
1665 if((data->set.maxredirs != -1) &&
1666 (data->set.followlocation >= data->set.maxredirs)) {
1667 failf(data, "Maximum (%ld) redirects followed", data->set.maxredirs);
1668 return CURLE_TOO_MANY_REDIRECTS;
1669 }
1670
1671 /* mark the next request as a followed location: */
1672 data->state.this_is_a_follow = TRUE;
1673
1674 data->set.followlocation++; /* count location-followers */
1675
1676 if(data->set.http_auto_referer) {
1677 /* We are asked to automatically set the previous URL as the referer
1678 when we get the next URL. We pick the ->url field, which may or may
1679 not be 100% correct */
1680
1681 if(data->change.referer_alloc) {
1682 Curl_safefree(data->change.referer);
1683 data->change.referer_alloc = FALSE;
1684 }
1685
1686 data->change.referer = strdup(data->change.url);
1687 if(!data->change.referer)
1688 return CURLE_OUT_OF_MEMORY;
1689 data->change.referer_alloc = TRUE; /* yes, free this later */
1690 }
1691 }
1692
1693 if(!is_absolute_url(newurl)) {
1694 /***
1695 *DANG* this is an RFC 2068 violation. The URL is supposed
1696 to be absolute and this doesn't seem to be that!
1697 */
1698 char *absolute = concat_url(data->change.url, newurl);
1699 if(!absolute)
1700 return CURLE_OUT_OF_MEMORY;
1701 free(newurl);
1702 newurl = absolute;
1703 }
1704 else {
1705 /* The new URL MAY contain space or high byte values, that means a mighty
1706 stupid redirect URL but we still make an effort to do "right". */
1707 char *newest;
1708 size_t newlen = strlen_url(newurl);
1709
1710 /* This is an absolute URL, don't allow the custom port number */
1711 disallowport = TRUE;
1712
1713 newest = malloc(newlen+1); /* get memory for this */
1714 if(!newest)
1715 return CURLE_OUT_OF_MEMORY;
1716 strcpy_url(newest, newurl); /* create a space-free URL */
1717
1718 free(newurl); /* that was no good */
1719 newurl = newest; /* use this instead now */
1720
1721 }
1722
1723 if(type == FOLLOW_FAKE) {
1724 /* we're only figuring out the new url if we would've followed locations
1725 but now we're done so we can get out! */
1726 data->info.wouldredirect = newurl;
1727 return CURLE_OK;
1728 }
1729
1730 if(disallowport)
1731 data->state.allow_port = FALSE;
1732
1733 if(data->change.url_alloc) {
1734 Curl_safefree(data->change.url);
1735 data->change.url_alloc = FALSE;
1736 }
1737
1738 data->change.url = newurl;
1739 data->change.url_alloc = TRUE;
1740 newurl = NULL; /* don't free! */
1741
1742 infof(data, "Issue another request to this URL: '%s'\n", data->change.url);
1743
1744 /*
1745 * We get here when the HTTP code is 300-399 (and 401). We need to perform
1746 * differently based on exactly what return code there was.
1747 *
1748 * News from 7.10.6: we can also get here on a 401 or 407, in case we act on
1749 * a HTTP (proxy-) authentication scheme other than Basic.
1750 */
1751 switch(data->info.httpcode) {
1752 /* 401 - Act on a WWW-Authenticate, we keep on moving and do the
1753 Authorization: XXXX header in the HTTP request code snippet */
1754 /* 407 - Act on a Proxy-Authenticate, we keep on moving and do the
1755 Proxy-Authorization: XXXX header in the HTTP request code snippet */
1756 /* 300 - Multiple Choices */
1757 /* 306 - Not used */
1758 /* 307 - Temporary Redirect */
1759 default: /* for all above (and the unknown ones) */
1760 /* Some codes are explicitly mentioned since I've checked RFC2616 and they
1761 * seem to be OK to POST to.
1762 */
1763 break;
1764 case 301: /* Moved Permanently */
1765 /* (quote from RFC7231, section 6.4.2)
1766 *
1767 * Note: For historical reasons, a user agent MAY change the request
1768 * method from POST to GET for the subsequent request. If this
1769 * behavior is undesired, the 307 (Temporary Redirect) status code
1770 * can be used instead.
1771 *
1772 * ----
1773 *
1774 * Many webservers expect this, so these servers often answers to a POST
1775 * request with an error page. To be sure that libcurl gets the page that
1776 * most user agents would get, libcurl has to force GET.
1777 *
1778 * This behaviour is forbidden by RFC1945 and the obsolete RFC2616, and
1779 * can be overridden with CURLOPT_POSTREDIR.
1780 */
1781 if((data->set.httpreq == HTTPREQ_POST
1782 || data->set.httpreq == HTTPREQ_POST_FORM)
1783 && !(data->set.keep_post & CURL_REDIR_POST_301)) {
1784 infof(data, "Switch from POST to GET\n");
1785 data->set.httpreq = HTTPREQ_GET;
1786 }
1787 break;
1788 case 302: /* Found */
1789 /* (quote from RFC7231, section 6.4.3)
1790 *
1791 * Note: For historical reasons, a user agent MAY change the request
1792 * method from POST to GET for the subsequent request. If this
1793 * behavior is undesired, the 307 (Temporary Redirect) status code
1794 * can be used instead.
1795 *
1796 * ----
1797 *
1798 * Many webservers expect this, so these servers often answers to a POST
1799 * request with an error page. To be sure that libcurl gets the page that
1800 * most user agents would get, libcurl has to force GET.
1801 *
1802 * This behaviour is forbidden by RFC1945 and the obsolete RFC2616, and
1803 * can be overridden with CURLOPT_POSTREDIR.
1804 */
1805 if((data->set.httpreq == HTTPREQ_POST
1806 || data->set.httpreq == HTTPREQ_POST_FORM)
1807 && !(data->set.keep_post & CURL_REDIR_POST_302)) {
1808 infof(data, "Switch from POST to GET\n");
1809 data->set.httpreq = HTTPREQ_GET;
1810 }
1811 break;
1812
1813 case 303: /* See Other */
1814 /* Disable both types of POSTs, unless the user explicitely
1815 asks for POST after POST */
1816 if(data->set.httpreq != HTTPREQ_GET
1817 && !(data->set.keep_post & CURL_REDIR_POST_303)) {
1818 data->set.httpreq = HTTPREQ_GET; /* enforce GET request */
1819 infof(data, "Disables POST, goes with %s\n",
1820 data->set.opt_no_body?"HEAD":"GET");
1821 }
1822 break;
1823 case 304: /* Not Modified */
1824 /* 304 means we did a conditional request and it was "Not modified".
1825 * We shouldn't get any Location: header in this response!
1826 */
1827 break;
1828 case 305: /* Use Proxy */
1829 /* (quote from RFC2616, section 10.3.6):
1830 * "The requested resource MUST be accessed through the proxy given
1831 * by the Location field. The Location field gives the URI of the
1832 * proxy. The recipient is expected to repeat this single request
1833 * via the proxy. 305 responses MUST only be generated by origin
1834 * servers."
1835 */
1836 break;
1837 }
1838 Curl_pgrsTime(data, TIMER_REDIRECT);
1839 Curl_pgrsResetTimesSizes(data);
1840
1841 return CURLE_OK;
1842#endif /* CURL_DISABLE_HTTP */
1843}
1844
1845CURLcode
1846Curl_reconnect_request(struct connectdata **connp)
1847{
1848 CURLcode result = CURLE_OK;
1849 struct connectdata *conn = *connp;
1850 struct SessionHandle *data = conn->data;
1851
1852 /* This was a re-use of a connection and we got a write error in the
1853 * DO-phase. Then we DISCONNECT this connection and have another attempt to
1854 * CONNECT and then DO again! The retry cannot possibly find another
1855 * connection to re-use, since we only keep one possible connection for
1856 * each. */
1857
1858 infof(data, "Re-used connection seems dead, get a new one\n");
1859
1860 connclose(conn, "Reconnect dead connection"); /* enforce close */
1861 result = Curl_done(&conn, result, FALSE); /* we are so done with this */
1862
1863 /* conn may no longer be a good pointer, clear it to avoid mistakes by
1864 parent functions */
1865 *connp = NULL;
1866
1867 /*
1868 * According to bug report #1330310. We need to check for CURLE_SEND_ERROR
1869 * here as well. I figure this could happen when the request failed on a FTP
1870 * connection and thus Curl_done() itself tried to use the connection
1871 * (again). Slight Lack of feedback in the report, but I don't think this
1872 * extra check can do much harm.
1873 */
1874 if(!result || (CURLE_SEND_ERROR == result)) {
1875 bool async;
1876 bool protocol_done = TRUE;
1877
1878 /* Now, redo the connect and get a new connection */
1879 result = Curl_connect(data, connp, &async, &protocol_done);
1880 if(!result) {
1881 /* We have connected or sent away a name resolve query fine */
1882
1883 conn = *connp; /* setup conn to again point to something nice */
1884 if(async) {
1885 /* Now, if async is TRUE here, we need to wait for the name
1886 to resolve */
1887 result = Curl_resolver_wait_resolv(conn, NULL);
1888 if(result)
1889 return result;
1890
1891 /* Resolved, continue with the connection */
1892 result = Curl_async_resolved(conn, &protocol_done);
1893 if(result)
1894 return result;
1895 }
1896 }
1897 }
1898
1899 return result;
1900}
1901
1902/* Returns CURLE_OK *and* sets '*url' if a request retry is wanted.
1903
1904 NOTE: that the *url is malloc()ed. */
1905CURLcode Curl_retry_request(struct connectdata *conn,
1906 char **url)
1907{
1908 struct SessionHandle *data = conn->data;
1909
1910 *url = NULL;
1911
1912 /* if we're talking upload, we can't do the checks below, unless the protocol
1913 is HTTP as when uploading over HTTP we will still get a response */
1914 if(data->set.upload &&
1915 !(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)))
1916 return CURLE_OK;
1917
1918 if((data->req.bytecount + data->req.headerbytecount == 0) &&
1919 conn->bits.reuse &&
1920 !data->set.opt_no_body &&
1921 (data->set.rtspreq != RTSPREQ_RECEIVE)) {
1922 /* We got no data, we attempted to re-use a connection and yet we want a
1923 "body". This might happen if the connection was left alive when we were
1924 done using it before, but that was closed when we wanted to read from
1925 it again. Bad luck. Retry the same request on a fresh connect! */
1926 infof(conn->data, "Connection died, retrying a fresh connect\n");
1927 *url = strdup(conn->data->change.url);
1928 if(!*url)
1929 return CURLE_OUT_OF_MEMORY;
1930
1931 connclose(conn, "retry"); /* close this connection */
1932 conn->bits.retry = TRUE; /* mark this as a connection we're about
1933 to retry. Marking it this way should
1934 prevent i.e HTTP transfers to return
1935 error just because nothing has been
1936 transferred! */
1937
1938
1939 if(conn->handler->protocol&PROTO_FAMILY_HTTP) {
1940 struct HTTP *http = data->req.protop;
1941 if(http->writebytecount)
1942 return Curl_readrewind(conn);
1943 }
1944 }
1945 return CURLE_OK;
1946}
1947
1948/*
1949 * Curl_setup_transfer() is called to setup some basic properties for the
1950 * upcoming transfer.
1951 */
1952void
1953Curl_setup_transfer(
1954 struct connectdata *conn, /* connection data */
1955 int sockindex, /* socket index to read from or -1 */
1956 curl_off_t size, /* -1 if unknown at this point */
1957 bool getheader, /* TRUE if header parsing is wanted */
1958 curl_off_t *bytecountp, /* return number of bytes read or NULL */
1959 int writesockindex, /* socket index to write to, it may very well be
1960 the same we read from. -1 disables */
1961 curl_off_t *writecountp /* return number of bytes written or NULL */
1962 )
1963{
1964 struct SessionHandle *data;
1965 struct SingleRequest *k;
1966
1967 DEBUGASSERT(conn != NULL);
1968
1969 data = conn->data;
1970 k = &data->req;
1971
1972 DEBUGASSERT((sockindex <= 1) && (sockindex >= -1));
1973
1974 /* now copy all input parameters */
1975 conn->sockfd = sockindex == -1 ?
1976 CURL_SOCKET_BAD : conn->sock[sockindex];
1977 conn->writesockfd = writesockindex == -1 ?
1978 CURL_SOCKET_BAD:conn->sock[writesockindex];
1979 k->getheader = getheader;
1980
1981 k->size = size;
1982 k->bytecountp = bytecountp;
1983 k->writebytecountp = writecountp;
1984
1985 /* The code sequence below is placed in this function just because all
1986 necessary input is not always known in do_complete() as this function may
1987 be called after that */
1988
1989 if(!k->getheader) {
1990 k->header = FALSE;
1991 if(size > 0)
1992 Curl_pgrsSetDownloadSize(data, size);
1993 }
1994 /* we want header and/or body, if neither then don't do this! */
1995 if(k->getheader || !data->set.opt_no_body) {
1996
1997 if(conn->sockfd != CURL_SOCKET_BAD)
1998 k->keepon |= KEEP_RECV;
1999
2000 if(conn->writesockfd != CURL_SOCKET_BAD) {
2001 struct HTTP *http = data->req.protop;
2002 /* HTTP 1.1 magic:
2003
2004 Even if we require a 100-return code before uploading data, we might
2005 need to write data before that since the REQUEST may not have been
2006 finished sent off just yet.
2007
2008 Thus, we must check if the request has been sent before we set the
2009 state info where we wait for the 100-return code
2010 */
2011 if((data->state.expect100header) &&
2012 (conn->handler->protocol&PROTO_FAMILY_HTTP) &&
2013 (http->sending == HTTPSEND_BODY)) {
2014 /* wait with write until we either got 100-continue or a timeout */
2015 k->exp100 = EXP100_AWAITING_CONTINUE;
2016 k->start100 = Curl_tvnow();
2017
2018 /* Set a timeout for the multi interface. Add the inaccuracy margin so
2019 that we don't fire slightly too early and get denied to run. */
2020 Curl_expire(data, data->set.expect_100_timeout);
2021 }
2022 else {
2023 if(data->state.expect100header)
2024 /* when we've sent off the rest of the headers, we must await a
2025 100-continue but first finish sending the request */
2026 k->exp100 = EXP100_SENDING_REQUEST;
2027
2028 /* enable the write bit when we're not waiting for continue */
2029 k->keepon |= KEEP_SEND;
2030 }
2031 } /* if(conn->writesockfd != CURL_SOCKET_BAD) */
2032 } /* if(k->getheader || !data->set.opt_no_body) */
2033
2034}
Note: See TracBrowser for help on using the repository browser.