Compare commits

..

418 Commits

Author SHA1 Message Date
Daniel Stenberg
50a53d4eec 7.7.1 commit 2001-04-04 06:23:43 +00:00
Daniel Stenberg
6bd1ed034a bugfixed the Location: following that must've been bad since the persistant
connections were introduced
2001-04-03 13:37:53 +00:00
Daniel Stenberg
fa491ed910 - disabling port on absolute redirects is wrong
- removed #ifdefed code
2001-04-03 13:18:41 +00:00
Daniel Stenberg
66a1e3df69 two crashes removed 2001-04-03 12:37:48 +00:00
Daniel Stenberg
28497e7ee4 better error checks for failure conditions (based on Puneet Pawaia's reports) 2001-04-03 10:20:23 +00:00
Daniel Stenberg
87c7f403a9 Puneet Pawaia pointed out the lack of http_chunks in several places. 2001-04-03 08:57:06 +00:00
Daniel Stenberg
1a2c3acb3b elaborated more in CURLOPT_HTTPHEADER section 2001-03-30 08:43:52 +00:00
Daniel Stenberg
b54d752783 ftps:// added and the perl interfaces changed 2001-03-29 11:25:29 +00:00
Daniel Stenberg
b1328430c9 ftps:// support added 2001-03-29 08:16:55 +00:00
Daniel Stenberg
34efa74a59 Georg Horn's and my fixes to make it compile with 7.7 2001-03-29 06:45:04 +00:00
Daniel Stenberg
794d08a728 Georg Horn set -Wall 2001-03-29 06:44:34 +00:00
Daniel Stenberg
0abc999c4d Georg Horn's updates 2001-03-29 06:43:46 +00:00
Daniel Stenberg
3e65062be2 make sure the alarm is off when returning from curl_easy_perform() 2001-03-27 21:24:46 +00:00
Daniel Stenberg
45ffb16c2a Added a line about the new makefile example 2001-03-27 09:10:53 +00:00
Daniel Stenberg
0b8b0b7c86 Added Makefile.example as an example makefile that can build the example
source files (if edited slightly)
2001-03-27 09:09:09 +00:00
Daniel Stenberg
053bf49bd2 Added ftpget.c just to show that it is exactly as easy to get FTP files 2001-03-27 09:00:18 +00:00
Daniel Stenberg
8b08dfed38 no more ' as first letter of a row, and made the quotes match in the top
.BI line
2001-03-27 08:45:50 +00:00
Daniel Stenberg
ba3a3553dc Added some text to WRITEHEADER about the fact that libcurl will always write
complete header lines one-by-one to that file handle
2001-03-27 08:41:37 +00:00
Daniel Stenberg
6a26104724 7.7.1-beta1 2001-03-26 13:49:50 +00:00
Daniel Stenberg
8b35b89f4d persistant fix for http/1.0 2001-03-26 09:07:44 +00:00
Daniel Stenberg
31f9d4016d 'Connection: keep-alive' is now understood when sent by a HTTP/1.0 server
as an indication of a persistant connection
2001-03-26 06:19:11 +00:00
Daniel Stenberg
bb601731ea numerous corrections since the 7.7 release 2001-03-24 18:50:55 +00:00
Daniel Stenberg
9a85172896 Colin Watson's man patch as posted to debian bug tracker numer #90281 2001-03-24 18:28:43 +00:00
Daniel Stenberg
a0eb52bee1 two Qs added:
1.5 Who makes cURL?
 1.6 What do you get for making cURL?
2001-03-23 15:28:13 +00:00
Daniel Stenberg
6235a8d969 make should be $(MAKE) 2001-03-23 14:29:10 +00:00
Daniel Stenberg
0d6a87ed7a match the new never-read-body when doing HEAD 2001-03-23 14:26:23 +00:00
Daniel Stenberg
b6241b3c89 curl_setopt() should be curl_easy_setopt() 2001-03-23 09:07:04 +00:00
Daniel Stenberg
1e14f8d4c7 DONT TOUCH the data->url as it may point to read-only memory!!! 2001-03-23 08:24:47 +00:00
Daniel Stenberg
bc5954fe2d updates by SM nttp at iname.com 2001-03-23 08:16:24 +00:00
Daniel Stenberg
02f6894af5 now always stops reading a HEAD reply after all the headers have been returned
RFC 2616, section 9.4 says: "The HEAD method is identical to GET except that
the server MUST NOT return a message-body in the response."
2001-03-23 07:52:45 +00:00
Daniel Stenberg
76576cd1e2 ConnectionExists() wrongly returned TRUE for too many connections if proxy
was not used...
2001-03-23 07:46:14 +00:00
Daniel Stenberg
997672ba9a updated with the new don't-encode-already-encoded-data concept 2001-03-22 20:06:31 +00:00
Daniel Stenberg
ec1f42a154 Treat 302-redirects the same way we treat 303-redirects 2001-03-22 20:02:52 +00:00
Daniel Stenberg
aa1c3bb46d reset the follow location counter in Curl_perform() so that we can follow
new locations on the same connection that was previously followed on
2001-03-22 19:14:35 +00:00
Daniel Stenberg
95f0714ff8 brand new Curl_ prefixes on global symbols 2001-03-22 19:07:38 +00:00
Daniel Stenberg
c050619b36 made it use Curl_ prefixes on global symbols 2001-03-22 18:44:43 +00:00
Daniel Stenberg
58085dbbf6 Jim Drash suggested and I made it not encode what looks like an already
encoded letter (in curl_escape)
2001-03-22 18:06:08 +00:00
Daniel Stenberg
546572da04 7.7 commit 2001-03-22 17:21:07 +00:00
Daniel Stenberg
005536cc28 removed the duplicates! 2001-03-22 15:42:10 +00:00
Daniel Stenberg
3d5b6aa3b0 added escape and unescape 2001-03-22 11:53:12 +00:00
Daniel Stenberg
f8d883355d the new escape/unescape function setup 2001-03-22 11:40:58 +00:00
Daniel Stenberg
c23df41d83 (un)escape, configure hack, betas run fine 2001-03-22 11:24:21 +00:00
Daniel Stenberg
8e7b261984 added how to pronounce curl! ;-) 2001-03-22 11:23:48 +00:00
Daniel Stenberg
08e3d034ef don't mention explicit version 7.8
removed escape/unescape as they're now documented
2001-03-22 11:22:47 +00:00
Daniel Stenberg
2f869f89ff added escape and unescape man pages 2001-03-22 11:22:09 +00:00
Daniel Stenberg
0f310a5001 not exactly new, but documented for 7.7 2001-03-22 10:32:56 +00:00
Daniel Stenberg
ad1abee441 Andrs Garca's problems on the mailing list made me realize that we can't
allow this script to simply detect a gethostbyname_r() if it can't figure
out how to use it. From now on, this script will fail when that happens.
2001-03-22 08:51:24 +00:00
Daniel Stenberg
669709f80e GNUTLS is another new SSL library we can add support for. Anyone? 2001-03-20 10:13:47 +00:00
Daniel Stenberg
ea409d0374 7.7-beta5 commit 2001-03-19 08:42:00 +00:00
Daniel Stenberg
eaaa1a1fd4 test case 39 added, HTTP location and continue 2001-03-19 08:36:08 +00:00
Daniel Stenberg
78b4851da1 Added support for HTTP code 100 continue, as 8.2.3 in RFC2616 defines 2001-03-19 07:47:57 +00:00
Daniel Stenberg
38c47803dd detect if chunked transfers are aborted 2001-03-16 15:45:12 +00:00
Daniel Stenberg
455663ba5e corrected the close to sclose() so that the memdebug stuff works 2001-03-16 15:44:38 +00:00
Daniel Stenberg
efb5d9a403 new directories 2001-03-16 15:22:51 +00:00
Daniel Stenberg
b1a5208e6b removed the CURL_SEPARATORS define 2001-03-16 15:21:26 +00:00
Daniel Stenberg
e6dacd92ec re-generated with the memdebug.h include 2001-03-16 15:20:36 +00:00
Daniel Stenberg
952b3a2c0f added memdebug.h include 2001-03-16 15:19:36 +00:00
Daniel Stenberg
721f9bca84 moved to ../../php/examples/ 2001-03-16 13:45:42 +00:00
Daniel Stenberg
ad4d5fabf8 the PHP examples are moved 2001-03-16 13:44:57 +00:00
Daniel Stenberg
aa860990ad fix the new makefiles in php/ and perl/ 2001-03-16 13:35:45 +00:00
Daniel Stenberg
0fa9135d9f use perl in two ways 2001-03-16 13:35:11 +00:00
Daniel Stenberg
8f0114a4dd Short about the perl interface 2001-03-16 13:34:08 +00:00
Daniel Stenberg
5980c2977b filled in 2001-03-16 13:30:56 +00:00
Daniel Stenberg
19f8d71508 for the php examples 2001-03-16 13:29:57 +00:00
Daniel Stenberg
6f3bccd911 PHP examples 2001-03-16 13:28:11 +00:00
Daniel Stenberg
96f81a5c4a new PHP section 2001-03-16 13:27:42 +00:00
Daniel Stenberg
ca05d1b59c a perl script that can be used to mirror all curl archives 2001-03-16 13:10:42 +00:00
Daniel Stenberg
895dc5e530 Added README for releases 2001-03-16 13:09:21 +00:00
Daniel Stenberg
bcc6ca6fd1 Added to build proper releases 2001-03-16 13:09:05 +00:00
Daniel Stenberg
d538241a58 Georg Horn's Curl::easy interface for perl 2001-03-16 13:05:39 +00:00
Daniel Stenberg
71b4b2ffa9 moved to contrib/ 2001-03-16 13:05:18 +00:00
Daniel Stenberg
65b4a63f56 moved here from ../ 2001-03-16 13:04:57 +00:00
Daniel Stenberg
ecbee01f4b moved the documentation item to 7.8, it is rather important to have things
documented
2001-03-15 14:45:03 +00:00
Daniel Stenberg
34fed76a35 updated to have the windows builds instructions use the root Makefile that
is delivered with each source archive
2001-03-15 14:44:01 +00:00
Daniel Stenberg
0adf0cfde7 connection timeouts added 2001-03-15 14:38:54 +00:00
Daniel Stenberg
d6c456db85 added connect timeout support 2001-03-15 14:38:30 +00:00
Daniel Stenberg
36c88343d3 Added --connect-timeout support 2001-03-15 14:38:03 +00:00
Daniel Stenberg
2360e5ce12 Added CURLOPT_CONNECTTIMEOUT 2001-03-15 14:37:41 +00:00
Daniel Stenberg
d445eac162 connection timeout is now supported 2001-03-15 14:37:17 +00:00
Daniel Stenberg
e0a6d20e20 Jrn's win32-fix to make it work better 2001-03-15 12:34:40 +00:00
Daniel Stenberg
3bb979b897 corrected it, did I mention IPv6 with HTTP proxy? 2001-03-15 09:14:43 +00:00
Daniel Stenberg
010daec776 Put more concentrated unix install help already at the top, with a note that
you might need to be root to use 'make install'.
2001-03-15 08:38:15 +00:00
Daniel Stenberg
e2b0ad8429 added some text for -d that says it "emulates filling in HTML forms" as that
is what most people will use -d for
2001-03-14 19:48:29 +00:00
Daniel Stenberg
6eed95103a ipv6 adjustments 2001-03-14 18:26:54 +00:00
Daniel Stenberg
4eb2a165e8 removed a bunch of warnings for IPv6-compiles 2001-03-14 18:24:07 +00:00
Daniel Stenberg
b7fc1e45b5 now works with IPv6 and HTTP proxy 2001-03-14 18:18:02 +00:00
Daniel Stenberg
3395a2fa9e netrc fix 2001-03-14 16:59:49 +00:00
Daniel Stenberg
a564a54e21 hm, don't free the home dir and append the .netrc part properly 2001-03-14 16:12:47 +00:00
Daniel Stenberg
92186dc3d3 checks for a few functions and include files more for the new getpwuid()
stuff in lib/netrc.c
2001-03-14 16:05:31 +00:00
Daniel Stenberg
7bd6507eec uses getpwuid() to find user's home dir 2001-03-14 16:05:00 +00:00
Daniel Stenberg
d4cc810de3 added a missing \ 2001-03-14 14:35:35 +00:00
Daniel Stenberg
bea7bbee1b always append the incoming request to the server.input file, it allows
the mainscript to verify a whole series of requests
2001-03-14 14:26:56 +00:00
Daniel Stenberg
fe64570d5d updated to work with the modified http server 2001-03-14 14:26:16 +00:00
Daniel Stenberg
df6ad8d8d6 Added test case 38 2001-03-14 14:25:57 +00:00
Daniel Stenberg
f8e1fc32de Edin Kadribaic's bug report #408488 forced a rearrange of two struct fields
from urldata to connectdata, quite correctly.
2001-03-14 14:11:11 +00:00
Daniel Stenberg
8c6d56f1f9 Added the --egd-file and --random-file options 2001-03-14 11:47:55 +00:00
Daniel Stenberg
1841c8ee6a curl 7.7 beta 3 2001-03-14 11:25:44 +00:00
Daniel Stenberg
70793595fe removed the two unnecessary include files 2001-03-14 10:27:13 +00:00
Daniel Stenberg
28a8e1602d ssluse fixed, various win32 fixes 2001-03-14 10:21:52 +00:00
Daniel Stenberg
cce05b9138 Bjrn Stenberg corrected the silly '(void)data' usage when SSL is not
used
2001-03-14 10:15:42 +00:00
Daniel Stenberg
72a7fd4dc7 Jrn's updated file 2001-03-14 10:06:23 +00:00
Daniel Stenberg
9a6a476cf5 the URL escape/unescape functions are also public but undocumented 2001-03-14 08:59:34 +00:00
Daniel Stenberg
5d0efedd2d First Jrn's updates were applied, then
my take at removing the private functions from the list, then I renamed
the *str(n)equal functions...
2001-03-14 08:58:36 +00:00
Daniel Stenberg
a426818a78 no longer includes the curl/types.h and curl/easy.h include files
explicitly, as they're taken care of indirectly by curl/curl.h these
days.
2001-03-14 08:55:17 +00:00
Daniel Stenberg
bfe413d8bd increased the 'current' number for the interface 2001-03-14 08:54:18 +00:00
Daniel Stenberg
dbbd20646f Curl_str(n)equal renamed to curl_str(n)equal 2001-03-14 08:53:31 +00:00
Daniel Stenberg
b8fe4deb13 documented the undocumented public functions in libcurl 2001-03-14 08:51:04 +00:00
Daniel Stenberg
332a016e3c chunked bugfix, Jrn's fixes, the interface number increase 2001-03-14 08:49:11 +00:00
Daniel Stenberg
3738e4bdc0 The Curl_* prefixes are now changed for curl_* ones, as these two functions
are used externally and thus are public symbols.
2001-03-14 08:47:56 +00:00
Daniel Stenberg
3201d2dafa Jrn added "#define socklen_t int" 2001-03-14 08:28:54 +00:00
Daniel Stenberg
0a1e002ca4 Jrn fixed it to compile on win32 again 2001-03-14 08:28:19 +00:00
Daniel Stenberg
9195bb64d4 Jrn Hartroth added a set of files 2001-03-14 08:23:51 +00:00
Daniel Stenberg
11ee547a0e Jrn Hartroth fixed a bad #endif placement 2001-03-14 08:20:41 +00:00
Daniel Stenberg
147de35d41 re-added the default switch for weird states 2001-03-13 23:29:53 +00:00
Daniel Stenberg
e16e9b91ae removed the random seeding and persistant stuff, as both are already in
this version!
2001-03-13 22:31:56 +00:00
Daniel Stenberg
f9cde0646f Added a failf() error message when the chunked read returns failure 2001-03-13 22:20:14 +00:00
Daniel Stenberg
195233ed5c updated the chunked state-machine to deal with the trailing CRLF that comes
after the data part
2001-03-13 22:16:42 +00:00
Daniel Stenberg
048e654514 made 'X to Y' sequences not include X twice 2001-03-13 22:14:53 +00:00
Daniel Stenberg
dfbd45142d corrected the chunked format 2001-03-13 22:13:06 +00:00
Daniel Stenberg
ff681f7bfd 7.7 beta 2 fixes 2001-03-13 15:44:31 +00:00
Daniel Stenberg
60bbb64a81 EXTRA_DIST got too long, I shortened it now but we have to do something
else as it will grow a lot more...
2001-03-13 13:31:14 +00:00
Daniel Stenberg
c622f2bb4e failf() now respects the mute flag 2001-03-13 13:22:58 +00:00
Daniel Stenberg
cd59f13da6 Guenole Bescon's bug found on march 8 is added 2001-03-13 13:14:21 +00:00
Daniel Stenberg
11d718bf52 exchanged I and me to we and us in a lot of places
updated for persistant connections and 7.7
2001-03-13 11:47:30 +00:00
Daniel Stenberg
8e8846d876 Added test case 37, HTTP GET with name+password in the URL 2001-03-13 09:44:09 +00:00
Daniel Stenberg
7d562bb685 a whole new section on persitant connections and how they're treated
internally
2001-03-13 08:16:54 +00:00
Daniel Stenberg
20ddd35669 we speak HTTP 1.1 now
more braging about the portability
2001-03-13 08:16:25 +00:00
Daniel Stenberg
063f88cd14 close policies 2001-03-13 07:59:19 +00:00
Daniel Stenberg
87b0b7cab9 initial close policy support 2001-03-13 07:54:18 +00:00
Daniel Stenberg
70d0d9d4da Added 'created' to the connectdata struct to hold the creation date, to
be used for the close policy decision
2001-03-13 07:53:59 +00:00
Daniel Stenberg
4ae3bd71ea Curl_tvnow is now properly declared with (void) 2001-03-13 07:53:06 +00:00
Daniel Stenberg
a9390665b8 curl_getinfo is removed, not a public function 2001-03-13 07:46:19 +00:00
Daniel Stenberg
fb7a6e3423 added --random-file and --egd-file to the command line client 2001-03-12 16:02:29 +00:00
Daniel Stenberg
cc99e3f7de Added the two new seeding options 2001-03-12 15:52:18 +00:00
Daniel Stenberg
e6b40bb6ac two new random seed options for the ssl config struct 2001-03-12 15:47:41 +00:00
Daniel Stenberg
f2fd1b8856 two new random seed options: CURLOPT_RANDOM_FILE and CURLOPT_EGDSOCKET 2001-03-12 15:47:17 +00:00
Daniel Stenberg
cb4efcf275 better chunked error detection 2001-03-12 15:29:04 +00:00
Daniel Stenberg
56a27d608a Added test case 36:
[HTTP GET with badly formatted chunked Transfer-Encoding]
2001-03-12 15:27:01 +00:00
Daniel Stenberg
46c9075eab updated the comment for the chunked reading 2001-03-12 15:21:11 +00:00
Daniel Stenberg
d95fa648e9 made it return illegal hex in case no hexadecimal digit was read when at
least one was expected
2001-03-12 15:20:35 +00:00
Daniel Stenberg
563ad213dc added an error code for illegal hex values in the chunked stream 2001-03-12 15:20:02 +00:00
Daniel Stenberg
0121d7d731 Added new libcurl options in include/curl/curl.h, they're documented in
curl_easy_setopt.3 and they're partly implemented in lib/url.c

Slowly, we're getting there...
2001-03-12 15:11:38 +00:00
Daniel Stenberg
8495fac1c5 Added options for the persistant support, they're also documented in
curl_easy_setopt.3 now
2001-03-12 15:06:29 +00:00
Daniel Stenberg
38c349f751 support for a few new libcurl 7.7 CURLOPT_* options added 2001-03-12 15:05:54 +00:00
Daniel Stenberg
542df800ab Added four new options that come with the new persitant support:
CURLOPT_MAXCONNECTS, CURLOPT_CLOSEPOLICY, CURLOPT_FRESH_CONNECT and
CURLOPT_FORBID_REUSE
2001-03-12 14:54:00 +00:00
Daniel Stenberg
3e88b1cac5 the client is adjusted to work with persistant curl handles, and *gee* it
seems to be working!!!
2001-03-12 13:59:38 +00:00
Daniel Stenberg
d774b10afb Added infof() calls for persistant connection info, we are very likely to
need these at least for debugging 7.7 and probably later as well...
2001-03-12 13:58:03 +00:00
Daniel Stenberg
b449b94393 moved the libcurl init call 2001-03-12 13:57:02 +00:00
Daniel Stenberg
a6cb9b08b2 persistant updates 2001-03-12 13:55:06 +00:00
Daniel Stenberg
440a3101d0 added a note about persitant connections through HTTP proxies 2001-03-12 13:54:46 +00:00
Daniel Stenberg
9778a5356b Added some persistant notes 2001-03-12 13:54:10 +00:00
Daniel Stenberg
de7dcdbc54 modified to make the curl client with persistant connection support do
correct
2001-03-12 13:47:07 +00:00
Daniel Stenberg
070968abbc include the failed test case numbers in the end summary 2001-03-12 13:46:23 +00:00
Daniel Stenberg
e97fc2aab5 Added description of the new test case ranges support 2001-03-12 12:58:57 +00:00
Daniel Stenberg
a23ac24192 made it support test case ranges on the command line, specified as
"X to Y", where X is smaller than Y.
2001-03-12 12:58:30 +00:00
Daniel Stenberg
9ee14644a7 adjusted to work with the HTTP 1.1-speaking libcurl 2001-03-12 12:45:12 +00:00
Daniel Stenberg
c576e114b9 output the protocol data to stderr when verbose is on 2001-03-12 12:44:44 +00:00
Daniel Stenberg
639a7982ba server problems,
libcurl *works* persistant over HTTP proxy!!!!
2001-03-12 10:18:01 +00:00
Daniel Stenberg
5bbe189420 modified Curl_disconnect() so that it unlinks itself from the data struct,
it saves me from more mistakes when the connectindex is -1 ... also, there's
no point in having its parent do it as all parents would do it anyway.
2001-03-12 10:13:42 +00:00
Daniel Stenberg
93ff159e32 split up the big printf() into several ones to never use strings longer
than 509 letters (as newer gcc warns on with -Wall)
2001-03-12 09:47:23 +00:00
Daniel Stenberg
8eb8a0a8e4 bugfix: don't use the connectindex if it is -1 2001-03-12 09:44:57 +00:00
Daniel Stenberg
a4af638867 added persistant connection details 2001-03-12 09:44:08 +00:00
Daniel Stenberg
75a9a87ec2 replaced I and my with we and us 2001-03-12 09:43:43 +00:00
Daniel Stenberg
b5ba011110 updated 2001-03-12 09:42:22 +00:00
Daniel Stenberg
e9b763ff05 use the new name and hostname even though an old connection is reused, since
we can re-use a proxy connection that actually has different host names on
the same connection
2001-03-09 16:50:08 +00:00
Daniel Stenberg
ac0bad2433 remake Host: for each connection and it'll work with proxies too 2001-03-09 16:48:18 +00:00
Daniel Stenberg
67d5c0a970 for HTTP/1.0 we default to non keep-alive connections, but when we get a
1.0-reply from a proxy we use and the Proxy-Connection: keep-alive header
is used, we switch it on and live happily ever after
2001-03-09 16:02:59 +00:00
Daniel Stenberg
580896d615 Added httpversion to the progress struct, we do read it, we can just as well
store it.
2001-03-09 15:58:36 +00:00
Daniel Stenberg
11693c0faa the socklen_t check is more involved now, but works on linux at least 2001-03-09 15:38:59 +00:00
Daniel Stenberg
26cd8eda4a Added socklen_t 2001-03-09 15:24:33 +00:00
Daniel Stenberg
8cd3f44040 added a check for socklen_t
removed the tiny/Makefile that was added accidentaly before
2001-03-09 15:21:00 +00:00
Daniel Stenberg
2b30bfc349 all comments for the former public "low level" interface have been removed
since they were out-of-date and not correct anymore.

moved around some struct fields
2001-03-09 15:19:42 +00:00
Daniel Stenberg
8ec4dba599 removed handles and states from the main structs
renamed prefixes from curl_ to Curl_
made persistant connections work with http proxies (at least partly)
2001-03-09 15:18:25 +00:00
Daniel Stenberg
1efec6572e curl_transfer became Curl_perform() to better match the public name and
use the correct prefix
2001-03-09 15:17:09 +00:00
Daniel Stenberg
781dd7a9bf prefix changes curl_ to Curl_
made it work (partly) with persistant connections for HTTP/1.0 replies
moved the 'newurl' struct field for Location: to the connectdata struct
2001-03-09 15:16:28 +00:00
Daniel Stenberg
beb8761b22 #include <string.h> removed a warning 2001-03-09 15:14:51 +00:00
Daniel Stenberg
071c7de9fe removed curl_read() and curl_write() - they weren't used and the public
"low leve" interface is dumped
2001-03-09 15:14:22 +00:00
Daniel Stenberg
3e7ebcd051 uses socklen_t now 2001-03-09 15:13:34 +00:00
Daniel Stenberg
c67952fc5c curl_ prefix modified to Curl_ 2001-03-09 15:13:11 +00:00
Daniel Stenberg
7d7c24f915 accept() and getsockname() now use socklen_t types, as that was just added
to configure
2001-03-09 15:12:22 +00:00
Daniel Stenberg
0dc8c4d451 use unsigned int hex to receive the hex digit in, caused a warning with
-Wall and a new gcc
2001-03-09 15:11:39 +00:00
Daniel Stenberg
9cf4434ae2 Modified to use Curl_* functions instead of curl_* ones 2001-03-09 15:10:58 +00:00
Daniel Stenberg
8ccd8b6dbc only generate maximum 509 characters in each string 2001-03-09 13:11:28 +00:00
Daniel Stenberg
b4f70aa2c8 version 7.7-beta1 2001-03-08 12:35:51 +00:00
Daniel Stenberg
f54a282ccc persistant adjusts 2001-03-08 12:32:03 +00:00
Daniel Stenberg
2a11bdc216 HTTP HEAD tests 2001-03-08 10:39:36 +00:00
Daniel Stenberg
5cd4c3ed24 return from transfer when all headers have been received and nobody is set,
as is the case when doing HEAD requests
2001-03-08 10:32:27 +00:00
Daniel Stenberg
147a673063 updated for persistant connections 2001-03-08 09:25:09 +00:00
Daniel Stenberg
9ce5827fc1 made it split the version number on - too to make 7.7-blabla make a better
version number define in the header file
2001-03-08 09:23:11 +00:00
Daniel Stenberg
97f1c93674 added lots of numbers for the error codes as they're often printed
and used
2001-03-08 09:04:43 +00:00
Daniel Stenberg
e61ceaf1bd clarified the 0001-files use a bit, I couldn't understand it myself! :-) 2001-03-08 08:33:17 +00:00
Daniel Stenberg
1118612249 Added test #34 - HTTP GET with chunked Transfer-Encoding 2001-03-08 08:30:35 +00:00
Daniel Stenberg
a23db7b7c7 "Transfer-Encoding: chunked" support added 2001-03-07 23:51:41 +00:00
Daniel Stenberg
f6b6dff46a added the http_chunks files 2001-03-07 23:50:00 +00:00
Daniel Stenberg
55b8ceac18 chunked transfer encoding support 2001-03-07 23:28:22 +00:00
Daniel Stenberg
bcf448ee32 connection timeout is in for 7.7 2001-03-07 23:24:23 +00:00
Daniel Stenberg
91e4da7ddb initial chunked transfer-encoding support 2001-03-07 17:12:12 +00:00
Daniel Stenberg
2873c18132 removed compiler warning if HAVE_RAND_STATUS is false 2001-03-07 17:08:20 +00:00
Daniel Stenberg
5dd0a8a63e Added persistant connections blurb even if it doesn't really work yet... 2001-03-06 14:37:37 +00:00
Daniel Stenberg
2103dc41f5 cleaned up for the 7.7 fixes 2001-03-06 12:50:42 +00:00
Daniel Stenberg
2ef13230cb new seeding stuff as mentioned by Albert Chin 2001-03-06 00:04:58 +00:00
Daniel Stenberg
9479ac6dda Added a persistant connection example 2001-03-05 16:56:10 +00:00
Daniel Stenberg
4e878eae79 updated to libcurl 7.7 conditions 2001-03-05 15:51:34 +00:00
Daniel Stenberg
1e8e90a220 mucho updated with new 7.7 concepts 2001-03-05 15:38:06 +00:00
Daniel Stenberg
fe95c7dc34 removed an incorrect comment 2001-03-05 14:52:23 +00:00
Daniel Stenberg
6dae34d5da all test cases run OK now (again) 2001-03-05 14:13:15 +00:00
Daniel Stenberg
36c621c9df more details on debugging with the test suite 2001-03-05 14:08:22 +00:00
Daniel Stenberg
1717963e3d show the ftp server invoke line when -d is used 2001-03-05 14:03:48 +00:00
Daniel Stenberg
4646a1ffa9 talks more on verbose 2001-03-05 14:03:20 +00:00
Daniel Stenberg
0cb4eba002 free the struct on done 2001-03-05 14:01:13 +00:00
Daniel Stenberg
5eba359b5d telnet without any static variables 2001-03-05 13:59:43 +00:00
Daniel Stenberg
07ce7539a8 set download size properly for HTTP downloads 2001-03-05 13:40:31 +00:00
Daniel Stenberg
c21f848c1c enable persistant connections by default 2001-03-05 13:40:08 +00:00
Daniel Stenberg
84e94fda8b remade FILE:// support to look more as the other protocols 2001-03-05 13:39:01 +00:00
Daniel Stenberg
ebd6897b10 runtests -g explained 2001-03-04 18:11:25 +00:00
Daniel Stenberg
5ab8a9d32f persistant support protocol updates 2001-03-04 18:07:13 +00:00
Daniel Stenberg
cf8704ccdf 7.7 alpha 2 commit 2001-03-04 16:34:20 +00:00
Daniel Stenberg
5543c2f11f Added include of easy.h to enable libcurl-using programs to *only* have to
include <curl/curl.h>
2001-03-04 15:32:44 +00:00
Daniel Stenberg
90ac37a683 Curl_http() could crash on connection re-use 2001-03-04 15:25:54 +00:00
Daniel Stenberg
dd893fd8a4 ipv6 fix for the 'port' no longer in urldata 2001-03-03 17:50:01 +00:00
Daniel Stenberg
834f079918 fixed for persistant stuff 2001-03-03 16:28:59 +00:00
Daniel Stenberg
2665c763df latest 2001-03-02 15:38:06 +00:00
Daniel Stenberg
d1cfbd51b5 remade the port number stuff so that following locations work and doing
intermixed HTTP and FTP persistant connections also work!
2001-03-02 15:34:15 +00:00
Daniel Stenberg
a3ba6b7a6a Added the disconnect proto 2001-03-02 07:44:22 +00:00
Daniel Stenberg
415d2e7cb7 removed the slist -functions from here
added the Curl_ftp_disconnect function for FTP-specific disconnects
2001-03-02 07:44:05 +00:00
Daniel Stenberg
af4451ec26 improved connections 2001-03-02 07:43:20 +00:00
Daniel Stenberg
7c6562683a extending connectdata 2001-03-02 07:42:35 +00:00
Daniel Stenberg
b6fa2f882c moved the slist-functions here from FTP since they're more generic than simply
for FTP-stuff
2001-03-02 07:42:11 +00:00
Daniel Stenberg
b6c5da337a strdup() takes a const char * now 2001-03-02 07:41:40 +00:00
Daniel Stenberg
9bc24e4876 cleanup better when connects fail 2001-02-28 14:03:46 +00:00
Daniel Stenberg
4af55809e4 added some infof() calls for persistant info 2001-02-22 23:51:17 +00:00
Daniel Stenberg
9c63fcf210 we only allocate the HTTP struct if we need to 2001-02-22 23:41:15 +00:00
Daniel Stenberg
1f17fb5f89 Now persistant connection download works thanks to the Content-Length taken
into account
2001-02-22 23:32:41 +00:00
Daniel Stenberg
584dbffe60 moved the dynamicly set pointers to the connectdata struct 2001-02-22 23:32:02 +00:00
Daniel Stenberg
1c6f6f6972 Douglas R. Horner's corrections applied 2001-02-22 22:33:49 +00:00
Daniel Stenberg
da06a6e7e3 IPv6-adjustments 2001-02-21 17:15:09 +00:00
Daniel Stenberg
46e0937263 corrected memory leaks when re-using connections 2001-02-20 17:46:35 +00:00
Daniel Stenberg
a1d6ad2610 multiple connection support initial commit 2001-02-20 17:35:51 +00:00
Daniel Stenberg
5f3d63ed5b bugfix 2001-02-20 13:58:56 +00:00
Daniel Stenberg
63b5748eb6 -g runs the specified test(s) with gdb! 2001-02-20 13:58:39 +00:00
Daniel Stenberg
e2590430c5 removed the #ifdef 2001-02-20 13:57:50 +00:00
Daniel Stenberg
ada9bc2b24 win32sockets.c is now added with winsock init/cleanup example functions 2001-02-20 13:56:38 +00:00
Daniel Stenberg
43da41e73e Added three tiny PHP examples 2001-02-19 13:39:21 +00:00
Daniel Stenberg
720fa45b56 blurb about different languages and environments added 2001-02-19 13:38:29 +00:00
Daniel Stenberg
7de874c438 just a few PHP/curl examples 2001-02-19 13:38:05 +00:00
Daniel Stenberg
2078c1a01a added two VC++ files for project stuff 2001-02-19 09:29:40 +00:00
Daniel Stenberg
f7a8909372 Made CURLOPT_POST no longer necessary when CURLOPT_POSTFIELDS is used 2001-02-19 09:29:19 +00:00
Daniel Stenberg
250df30e64 Moved a bunch of prototypes from curl.h here, they're no longer public and
I merely stuffed them here before I decide where they belong and if they
are to remain at all
2001-02-19 09:28:10 +00:00
Daniel Stenberg
b887cf7521 removed a bunch of "low level" functions that were never used and are about
to never become reality either
2001-02-19 09:27:12 +00:00
Daniel Stenberg
630e932091 MS VC++ stuff 2001-02-19 09:26:29 +00:00
Daniel Stenberg
cdabd67aa9 Bob Schader updated this 2001-02-19 09:26:01 +00:00
Daniel Stenberg
42e4f9d776 added stuff to the mailing list chapter 2001-02-19 09:25:18 +00:00
Daniel Stenberg
c111033595 removed --continue task (done)
added URL to the NTLM task
2001-02-16 13:41:34 +00:00
Daniel Stenberg
26d1aaccdf 2.2 - rephrased 2001-02-16 13:41:09 +00:00
Daniel Stenberg
ce95d2020f better english timeouted => timed out, as suggested by Larry Fahnoe 2001-02-13 21:57:04 +00:00
Daniel Stenberg
948c3b3aa9 7.6.1 commit 2001-02-13 13:37:14 +00:00
Daniel Stenberg
a140e5311d moved the protocol-specific free to allow easier multiple transfers 2001-02-13 13:34:16 +00:00
Daniel Stenberg
7686ac3f2c ftp response fix, netrc fix for non-http/ftp, https put research 2001-02-12 13:20:04 +00:00
Daniel Stenberg
54778134e4 corrected the prototype 2001-02-12 13:19:09 +00:00
Daniel Stenberg
c59baa06f0 Added 3.10 and a few minor updates 2001-02-12 10:05:09 +00:00
Daniel Stenberg
c107303ade very minor indentation fix 2001-02-12 08:22:19 +00:00
Daniel Stenberg
21b05afc99 removed getenv.h from the package as it was unused 2001-02-12 08:21:45 +00:00
Daniel Stenberg
eebcf7d4f5 Not used anymore 2001-02-09 07:33:58 +00:00
Daniel Stenberg
8d169dfadd Added a failf() call in the error-check just added 2001-02-09 07:14:28 +00:00
Daniel Stenberg
b12e334d83 if netrc is parsed and our host was found in there, set data->bits.user_passwd
unconditioanlly!
2001-02-08 13:53:13 +00:00
Daniel Stenberg
7e36c4437e today's FTP response check fix 2001-02-08 13:52:38 +00:00
Daniel Stenberg
3c7a80a275 postit.c was added as a HTML form file upload example 2001-02-08 08:26:54 +00:00
Daniel Stenberg
61e2a8108b 7.6.1-pre3 2001-02-07 09:49:06 +00:00
Daniel Stenberg
abb14de7e0 GetLine() didn't properly act on -1 lengths returned from Curl_read() 2001-02-07 09:31:03 +00:00
Daniel Stenberg
ccd57e58f6 Added #define ssize_t int since ssize_t doesn't seem to exist in normal
win32 systems
2001-02-07 09:23:54 +00:00
Daniel Stenberg
58d70db92e no longer #includes "getenv.h" 2001-02-07 08:36:23 +00:00
Daniel Stenberg
09f6fc22ed silly me, corrected the strlcat() to compile 2001-02-06 09:12:39 +00:00
Daniel Stenberg
833ce37cb9 new openbsd inspired implementation of strlcat() 2001-02-06 09:08:24 +00:00
Daniel Stenberg
07e7018564 nntp@iname.com's suggested fix to set the libpath 2001-02-06 07:14:44 +00:00
Daniel Stenberg
db70cd28b3 adjusted the IPv6 stuff to compile and build on Linux as well 2001-02-05 23:35:44 +00:00
Daniel Stenberg
f6e2bfd464 Jun-ichiro itojun Hagino's IPv6 adjustments 2001-02-05 23:04:44 +00:00
Daniel Stenberg
1ae5dab8fb Robert Weaver's VC experiences 2001-02-05 22:35:55 +00:00
Daniel Stenberg
c6355e6a43 Added a telnet section 2001-02-05 22:35:21 +00:00
Daniel Stenberg
7d26eb61fe Added a few more configure option explanations 2001-02-05 10:24:12 +00:00
Daniel Stenberg
8613ce377f the new getinfo() stuff and the cygwin patch 2001-02-04 20:10:52 +00:00
Daniel Stenberg
d6b94488a1 Added blurb about the win32 thing that precents a DLL from using a pointer
passed to it from user-space!
2001-02-04 20:10:02 +00:00
Daniel Stenberg
5d7b32d09f extended 5.5 2001-02-04 20:08:42 +00:00
Daniel Stenberg
ed16d30ea8 CURLINFO_CONTENT_LENGTH_DOWNLOAD and CURLINFO_CONTENT_LENGTH_UPLOAD documented 2001-02-04 20:07:53 +00:00
Daniel Stenberg
6f7c70fbbc CURLINFO_CONTENT_LENGTH_DOWNLOAD and CURLINFO_CONTENT_LENGTH_UPLOAD were
added as suggested by Bob Schader
2001-02-04 20:03:30 +00:00
Daniel Stenberg
9ab5d30e3b Ingo Ralf Blum made it compile with the newest cygwin 2001-02-04 19:00:27 +00:00
Daniel Stenberg
3b44a3df76 7.6.1-pre2 2001-02-01 07:59:46 +00:00
Daniel Stenberg
572c29a4a3 Added support for --enable-debug 2001-02-01 07:58:49 +00:00
Daniel Stenberg
9464c5430d Curl_read() uses ssize_t now 2001-01-31 15:06:56 +00:00
Daniel Stenberg
a14aaaf23f fixed up the telnet to work (using support from Linus Nielsen) 2001-01-31 15:05:44 +00:00
Daniel Stenberg
c41c5a0ef2 curl_read() and Curl_read() now have ssize_t in the last argument 2001-01-31 13:54:12 +00:00
Daniel Stenberg
c0c0283356 Added a check for a working getaddrinfo() that is required for the IPv6
to be considered enabled
2001-01-31 13:53:31 +00:00
Daniel Stenberg
1bcd3e601a changed order of the range and follow-location checks so that a range-request
will work even when following a Location:
2001-01-30 11:52:59 +00:00
Daniel Stenberg
e721f85c83 new test case 2001-01-29 16:04:19 +00:00
Daniel Stenberg
7015c61b86 removed upload.NN files after each test 2001-01-29 15:07:28 +00:00
Daniel Stenberg
30ec0af109 test case 33, HTTP PUT resume 2001-01-29 14:36:49 +00:00
Daniel Stenberg
f585b66af7 7.6-pre1 2001-01-29 11:36:08 +00:00
Daniel Stenberg
1b77c18430 Added an extra text about % in POST data after comments from Daniel Marell 2001-01-29 10:24:51 +00:00
Daniel Stenberg
bd0bd35771 s/to I/do I/ 2001-01-29 10:16:47 +00:00
Daniel Stenberg
368e3526ea Added "3.9 How to I use curl in PHP?" 2001-01-29 10:16:21 +00:00
Daniel Stenberg
1bbe407a4d The PUT stuff is never set! It is set with the UPLOAD... 2001-01-29 07:26:50 +00:00
Daniel Stenberg
513bc44421 HTTP PUT resume now sends Content-Range: headers as I believe the RFC2616
describes, Bob Schader's research seems to agree.
2001-01-29 07:24:20 +00:00
Daniel Stenberg
4cc76d1576 upload sets HTTP request to PUT for "HTTP upload" 2001-01-29 07:23:11 +00:00
Daniel Stenberg
6dc5c6ffc7 the keep-alive issue 2001-01-27 20:51:31 +00:00
Daniel Stenberg
c69c79dd04 bettersupport for HTTP return codes 300-399 2001-01-27 20:31:51 +00:00
Daniel Stenberg
7fca24b14b PUT resume things
progress meter modified for resume
POST/GET/Location adjustments
2001-01-27 20:25:52 +00:00
Daniel Stenberg
2fa0d3dd5f test case 31 and 32 were added 2001-01-27 20:02:11 +00:00
Daniel Stenberg
3a8210c975 Resume is now done with a Content-Range header instead of a Range header if
the request isn't GET. Presumably, this is how it should be made.
2001-01-27 18:57:07 +00:00
Daniel Stenberg
d69302202d minor output fix 2001-01-27 18:51:10 +00:00
Daniel Stenberg
227662d2ed Added -d that enables easier protocol/server debug overview (it invokes
the servers with their -v options)
2001-01-27 18:50:54 +00:00
Daniel Stenberg
3cb3d43913 added test 29 and 30, HTTP resume and partial download tests 2001-01-27 18:49:48 +00:00
Daniel Stenberg
c8a546c941 The progess meter title get an extra output when a resumed transfer is
taking place
2001-01-27 18:23:59 +00:00
Daniel Stenberg
62fec1d28d data->httpreq was not set properly 2001-01-27 17:58:15 +00:00
Daniel Stenberg
ac98c73b04 7.6 2001-01-27 16:16:54 +00:00
Daniel Stenberg
a145654394 http upload resume 2001-01-26 15:53:33 +00:00
Daniel Stenberg
e8382ba290 moved the symbols talk to the library part, updated slightly to match 2001-01-26 15:52:51 +00:00
Daniel Stenberg
fcb347d124 Added a httpreq field in the main struct so that there's one field to check
for what HTTP request that is being used. The old bit-style fields are still
in there as well.
2001-01-26 15:52:01 +00:00
Daniel Stenberg
c331ef02f9 The check for that content-range is received must only be made if we requested
GET resume. Other resumes are upload-wise and don't care about this header
in the download stream
2001-01-26 15:50:56 +00:00
Daniel Stenberg
3a3f632bf0 Made it possible to do "upload resume" over HTTP 2001-01-26 15:49:39 +00:00
Daniel Stenberg
68d7b6f871 7.6-pre4 2001-01-25 13:48:28 +00:00
Daniel Stenberg
c43a9d9068 timespent is now updated in every call to the progress meter update function 2001-01-25 12:32:40 +00:00
Daniel Stenberg
64e80091db Rick's and Jeff's stuff 2001-01-25 12:31:44 +00:00
Daniel Stenberg
4f255ffbeb make the configure script die if select() or socket() is missing 2001-01-25 12:28:46 +00:00
Daniel Stenberg
80d75b0eaf Added Ingo Ralf Blum 2001-01-25 12:28:10 +00:00
Daniel Stenberg
808c4020e6 use this function only once note added 2001-01-25 12:27:44 +00:00
Daniel Stenberg
149d6363b3 modified the Curl_ConnectHTTPProxyTunnel proto 2001-01-25 12:24:34 +00:00
Daniel Stenberg
30eab8ca51 moved curl_read() and curl_write() to sendf.c 2001-01-25 12:23:57 +00:00
Daniel Stenberg
e49a82b06c converted to use Curl_read() and Curl_write() 2001-01-25 12:23:12 +00:00
Daniel Stenberg
45fdb48189 uses Curl_read() and Curl_write()
unfolded telwrite() instead of being a separate single function
2001-01-25 12:22:17 +00:00
Daniel Stenberg
3fcc9677c4 use recv()/send() instead of read()/write() with sockets 2001-01-25 12:21:10 +00:00
Daniel Stenberg
1552bd9c8c sendf is now only Curl_sendf
Curl_write() and Curl_read() are here
2001-01-25 12:20:30 +00:00
Daniel Stenberg
939c0c5521 removed two compiler warnings 2001-01-25 12:19:36 +00:00
Daniel Stenberg
f0b9aefd2e Curl_read() and Curl_write() are now used for reading/writing sockets.
Some functions changed prototype due to this change as well.
2001-01-25 12:19:02 +00:00
Daniel Stenberg
11f3c51e8f Get get-ftp-response function is now using Curl_read() for reading from a
socket. Curl_ConnectHTTPProxyTunnel changed prototype.
2001-01-25 12:17:07 +00:00
Daniel Stenberg
1a329b98a3 replaced sendf() calls with Curl_sendf() 2001-01-25 12:13:35 +00:00
Daniel Stenberg
29bcba9a90 Ingo Ralf Blum's cygwin fixes 2001-01-24 14:44:05 +00:00
Daniel Stenberg
1716dbb68a Robert Weaver's win32 getenv fix, my added comments in some files 2001-01-24 14:04:47 +00:00
Daniel Stenberg
16ecfcf62c Added Robert Weaver as contributor 2001-01-24 14:04:14 +00:00
Daniel Stenberg
8bafc3692d fixed the comment for 'path' 2001-01-24 14:03:48 +00:00
Daniel Stenberg
8a75120568 added comments all over 2001-01-24 12:32:34 +00:00
Daniel Stenberg
3d96ee7423 extended the gname field one byte to avoid a possible overflow
added lots of explaining comments
2001-01-24 12:10:10 +00:00
Daniel Stenberg
b3dbdfa306 Robert Weaver's fix 2001-01-24 09:01:32 +00:00
Daniel Stenberg
25bad589ba generated by autoheader 2001-01-23 13:00:43 +00:00
Daniel Stenberg
0b6cd75004 ipv6 adjustments 2001-01-23 10:29:16 +00:00
Daniel Stenberg
7872cc131a Enabled support for IPv6-style IP-addresses if ENABLE_IPV6 is set. If it isn't,
curl will return an error when such an address is used.
2001-01-23 10:21:30 +00:00
Daniel Stenberg
210aa4371c big reorg to make it not exit when it fails, but instead just not do any
globbing, it makes IPv6 support easier and smoother to add.
2001-01-23 10:14:43 +00:00
Daniel Stenberg
6f438bc8fb Added 'ipv6 enabled' for ipv6 compiled versions 2001-01-23 08:16:59 +00:00
Daniel Stenberg
65840f1fd1 Added simple IPv6 recognition support 2001-01-22 23:54:54 +00:00
Daniel Stenberg
5fc492e5c6 Bjrn's progress meter fix, new test cases and ftpserver.pl patch 2001-01-22 16:25:55 +00:00
Daniel Stenberg
abcd1e7d5a Bjrn Stenberg's patch for making the progress meter betterlooking 2001-01-22 16:21:05 +00:00
Daniel Stenberg
6429c378a2 the custom reply engine was not inited properly 2001-01-22 16:16:18 +00:00
Daniel Stenberg
d830f10417 test case 121 2001-01-22 16:05:12 +00:00
Daniel Stenberg
3d6fcbf97b Added test case 120, ftp with '-Q -' 2001-01-22 16:00:28 +00:00
Daniel Stenberg
609be218c2 Removed the deprecated -c and -t from the --help output. 2001-01-22 10:09:04 +00:00
Daniel Stenberg
41084e57ca Added 5.5 the CURLOPT_FILE problem on win32, DeYoung provided it! 2001-01-22 08:42:00 +00:00
Daniel Stenberg
9afab85105 Added -g/--globoff description 2001-01-19 12:24:46 +00:00
Daniel Stenberg
7822233964 Made the complaint on free-twice errors a lot better 2001-01-19 12:20:30 +00:00
Daniel Stenberg
022315089b removed URL length restrictions, added a test case 2001-01-19 12:20:02 +00:00
Daniel Stenberg
faa5c14aee No more URL length restrictions 2001-01-19 12:15:23 +00:00
Daniel Stenberg
3dd886955b removed MAX_URL_LENGTH, there is no longer any length restrictions on URLs
anywhere within libcurl
2001-01-19 12:14:55 +00:00
Daniel Stenberg
c2dbf21459 corrected url memory handling with --globoff 2001-01-19 12:14:09 +00:00
Daniel Stenberg
133eb220b9 Added files for test case 28 2001-01-19 12:13:37 +00:00
Daniel Stenberg
c5796d9e39 --globoff test case 2001-01-19 12:12:54 +00:00
Daniel Stenberg
d80f87554c version 7.6-pre3 2001-01-19 09:38:48 +00:00
Daniel Stenberg
c1d37470f6 spelling error FPL should be GPL 2001-01-19 09:38:29 +00:00
Daniel Stenberg
9c695393b2 edited the portable code section 2001-01-19 09:37:39 +00:00
Daniel Stenberg
444024ea14 brought up-to-date and extended 2001-01-17 14:17:49 +00:00
Daniel Stenberg
afcd933b4c Transfer and file renaming 2001-01-17 14:17:26 +00:00
Daniel Stenberg
ae0a6835bd Transfer is now Curl_Tranfer() and transfer.h is used instead of highlevel.h
and download.h
2001-01-17 13:23:01 +00:00
Daniel Stenberg
f2f11be8ba download.[ch] is renamed to transfer.[ch], highlevel.[ch] is history 2001-01-17 13:22:27 +00:00
Daniel Stenberg
e09eda9c7c download and highlevel are replaced with transfer 2001-01-17 13:19:01 +00:00
Daniel Stenberg
c6877a414e clarified that vcvars32.bat is not part of the curl package 2001-01-17 08:24:29 +00:00
Daniel Stenberg
a3eb91ffb1 shortened the "what is libcurl" text 2001-01-15 14:59:07 +00:00
Daniel Stenberg
12708473a6 Added a few more similar tools 2001-01-15 12:12:36 +00:00
Daniel Stenberg
9012f8cdb3 removed an old reference to previous license conditions 2001-01-15 10:28:41 +00:00
Daniel Stenberg
e26ee09586 4.2 and 4.3 were updated 2001-01-15 10:26:37 +00:00
Daniel Stenberg
7d09e51162 TELNET was missing in the basic initial description! Updated the language
in the thread-safe question 5.1 to be more clear.
2001-01-11 12:52:07 +00:00
Daniel Stenberg
18ebde6960 I successfully compiled on built curl for StrongARM NetBSD
Added other known platforms
Added the faked autoconf and autoheader trick posted about recently
2001-01-11 12:33:26 +00:00
Daniel Stenberg
b0c0e8d815 7.6-pre2 2001-01-11 09:29:30 +00:00
Daniel Stenberg
16502d7d15 -g added, no more space requirements between short options and their parameters 2001-01-11 08:02:07 +00:00
Daniel Stenberg
ce05deece8 Added -g, fixed so that short options worked again. My last "merged"fix did
screw a few things up.
2001-01-11 08:01:24 +00:00
Daniel Stenberg
b77e2528e7 made short options and their parmaters possible to specify without space
separation
2001-01-10 23:47:08 +00:00
Daniel Stenberg
27f8cf6dfc made "short options" possible to specify -m20 as well as -m 200. 2001-01-10 23:42:03 +00:00
Daniel Stenberg
f5aa7f64bd added missing newlines to two infof() functions about document dates 2001-01-10 22:46:26 +00:00
Daniel Stenberg
44254c4945 getpass_r() fix for SCO (hopefully) 2001-01-10 11:42:00 +00:00
Daniel Stenberg
a9ea507c6a version 7.6-pre1 2001-01-09 12:25:32 +00:00
Daniel Stenberg
b137d5ec23 bugfix for when more -o than URLs is used 2001-01-09 12:25:14 +00:00
Daniel Stenberg
4792eee5d0 multiple URL adjustments 2001-01-09 12:24:49 +00:00
Daniel Stenberg
a84625eca6 Added two tests for multiple URLs (26 + 27) 2001-01-09 12:24:08 +00:00
Daniel Stenberg
19d3fd1185 Loic's fix that removes the % from the instructions in the bottom 2001-01-09 10:09:39 +00:00
Daniel Stenberg
a9be9bc7f5 Additional "docs" about 'make rpms' added by Loic 2001-01-09 10:09:13 +00:00
Daniel Stenberg
e8b99d21e5 Added the curl source-header 2001-01-09 07:41:04 +00:00
Daniel Stenberg
f6c57990ee removed FILES from the RPM 2001-01-08 23:35:45 +00:00
Daniel Stenberg
370d7f7527 Added source header. Made the prototype not being set if HAVE_GETPASS_R is
set, as those systems are likely to have it already set in a system header
and this prototype has proven to cause problems on SCO systems.
2001-01-08 22:30:30 +00:00
Daniel Stenberg
7d38692c4f Added Loic Dachary as a contributor after his major makefile session! 2001-01-08 22:29:31 +00:00
Daniel Stenberg
a997d60304 Loic Dachary's updates to get 'make distcheck' work, including running the
test suite
2001-01-08 22:18:30 +00:00
Daniel Stenberg
ff8fb8cdb0 krb4.c header file, no source header (yet) 2001-01-08 22:02:23 +00:00
Daniel Stenberg
b915ca68f9 'make distcheck' works now 2001-01-08 17:38:23 +00:00
Daniel Stenberg
703fc264f0 Had to add this to get 'make distcheck' to run! 2001-01-08 17:28:53 +00:00
Daniel Stenberg
19d92834ed corrected 2001-01-08 16:32:36 +00:00
Daniel Stenberg
9ade752fa7 distcheck fixes 2001-01-08 16:31:29 +00:00
Daniel Stenberg
e8a5f3026f Added mprintf #include 2001-01-08 16:22:55 +00:00
Daniel Stenberg
2cac4a9c72 better cleanup when existing due to bad usage 2001-01-08 15:02:58 +00:00
Daniel Stenberg
39e939a507 corrected the separator when using URL globbing 2001-01-08 14:48:34 +00:00
Daniel Stenberg
803005892c mostly a dummy 2001-01-08 14:36:34 +00:00
Daniel Stenberg
08cfdf909e use .spec.in files instead of plain .spec files 2001-01-08 13:42:18 +00:00
Daniel Stenberg
434ce48016 removed multiple URL, we do that now! 2001-01-08 13:40:26 +00:00
Daniel Stenberg
10051e6916 generated file 2001-01-08 13:39:49 +00:00
Daniel Stenberg
d54cdf294b adjusted to work with automake 'make dist' 2001-01-08 13:39:21 +00:00
Daniel Stenberg
2e342d5d9b we're now using automake to build archives, this file is obsolete 2001-01-08 12:58:27 +00:00
Daniel Stenberg
fe84071e80 adjusted to use 'make dist' when building the package 2001-01-08 12:57:38 +00:00
Daniel Stenberg
044ca343ad Loic Dachary's makefile/dist/rpm fixes 2001-01-08 10:00:14 +00:00
Daniel Stenberg
f59ea9adb3 krb4 fix, big symbol renaming action, multiple URL support in the client 2001-01-08 07:45:43 +00:00
Daniel Stenberg
0cec4ba6bf generated 2001-01-08 07:42:35 +00:00
Daniel Stenberg
14ca732a8f Multiple URL support added 2001-01-08 07:37:44 +00:00
Daniel Stenberg
53c27c7722 generated file, don't CVS it 2001-01-08 07:37:13 +00:00
Daniel Stenberg
c2f5b71dc9 multiple uses of -d was wrong documented 2001-01-05 13:44:53 +00:00
Daniel Stenberg
6403257886 renamed Curl_ to curl_ for the printf() prefixes 2001-01-05 12:19:42 +00:00
Daniel Stenberg
4031104404 Internal symbols that aren't static are now prefixed with 'Curl_' 2001-01-05 10:11:41 +00:00
283 changed files with 10097 additions and 7552 deletions

580
CHANGES
View File

@@ -6,6 +6,586 @@
History of Changes
Version 7.7.1
Daniel (3 April 2001)
- Puneet Pawaia pointed out two serious problems. Libcurl would attempt to
read bad memory during situations when an (ftp) connection attempt failed.
Also, the lib/Makefile.vc6 was corrected.
- More investigations in the Location: following code made me realize that
it was not clean enough to work transparantly with persistant and non-
persistant connections. I think I've fixed it now.
Daniel (29 March 2001)
- Georg Horn mailed me some corrections for the Curl::easy perl interface.
- Experimental ftps:// support added. It is basically FTP over SSL for the
control connection. It still makes all data transfers going over unencrypted
connections. Rainer Weikusat's ftpd-ssl server hack supports this and I used
that to verify the functionality.
Daniel (27 March 2001)
- Guenole Bescon discovered that if you set a CURLOPT_TIMEOUT and then tried
to get a file from a site and it fails, the SIGALRM would still be sent
after the timeout-time, quite inexpectedly!
- I added an ftp transfer example to docs/examples/ and I also wrote a tiny
example makefile that can be used as a start when building one of the
examples.
Version 7.7.1-beta1
Daniel (26 March 2001)
- Mohamed Lrhazi reported problems with 7.6.1 and persistant HTTP/1.0
connections (when the server replied a Connection: Keep-Alive) and this
problem was not properly dealt with in 7.7 either. A patch was posted to the
curl-and-php mailing list.
Daniel (24 March 2001)
- Colin Watson reported about a problem and brought a patch that corrected it,
which was about the man page and lines starting with a single quote (') in a
way that gnroff doesn't like.
Daniel (23 March 2001)
- Peter Bray reported correctly that the root makefile used make instead of
$(MAKE) for the test target.
- Corrected the Curl::easy perl interface to use curl_easy_setopt() and not
curl_setopt() which was removed in 7.7!
- SM provided updates on three documents (MANUAL, INSTALL and FAQ).
- When following a Location:, libcurl would sometimes write to the URL string
in a way it shouldn't. As the pointer is passed-in to libcurl from an
application, we can't be allowed to write to it. The particular bug report
from 'nk' that brought this up was because he had a read-only URL that then
caused a libcurl crash!
- No longer reads HEAD responses longer than to the last header. Previously,
curl would read the full reply if the connection was a "close" one.
- libcurl did re-use connections way too much. Doing "curl
http://www.{microsoft,ibm}.com" would make it re-use the connection which
made the second request return very odd results.
Daniel (22 March 2001)
- Edin Kadribasic made me aware that curl should not re-send POST requests
when following 302-redirects. I made 302 work like 303 which means curl uses
GET in the following request(s).
- libcurl now reset the "followed-location" counter on each invoke of
curl_easy_perform() as it otherwise would sum up all redirects on the same
connection and thus could reach the maxredirs counter wrongly.
- Jim Drash suggested curl_escape() should not re-encode what already looks
like an encoded sequence and I think that's a fair suggestion.
Version 7.7
Daniel (22 March 2001)
- The configure script now fails with an error message if gethostbyname_r() is
detected but it couldn't figure out how to invoke it (what amount of
arguments it is supposed to get). Reports from Andr<64>s Garc<72>a made me aware
of this need.
- Talking with Jim Drash made me finally put the curl_escape and curl_unescape
functions in the curl.h include file and write man pages for them. The
escape function was modified to use the same interface as the unescape one
had.
- No bug reports at all on the latest betas. Release time coming up.
Version 7.7-beta5
Daniel (19 March 2001)
- Georg Ottinger reported problems with using -C together with -L in the sense
that the -C info got lost when it was redirected. I could not repeat this
problem on the 7.7 branch why I leave this for the moment. Test case 39 was
added to do exactly this, and it seems to do right.
- Christian Robottom Reis reported how his 7.7 beta didn't successfully do
form posts as elegantly as 7.6.1 did. Indeed, this was a flaw in the header
engine, as HTTP 1.1 has introduced a new 100 "transient" return code for PUT
and POST operations that I need to add support for. Section 8.2.3 in RFC2616
has all the details. Seems to work now!
Daniel (16 March 2001)
- After having experienced another machine break-down, we're back.
- Georg Horn's perl interface Curl::easy is now included in the curl release
archive. The perl/ directory is now present. Please help me with docs,
examples and updates you think fit.
- Made a new php/ directory in the release archive and moved the PHP examples
into a subdirectory in there. Not much PHP info yet, but I plan to. Please
help me here as well!
- Made libcurl return error if a transfer is aborted in the middle of a
"chunk". It actually enables libcurl to discover premature transfer aborts
even if the Content-Length: size is unknown.
Daniel (15 March 2001)
- Added --connect-timeout to curl, which sets the new CURLOPT_CONNECTTIMEOUT
option in libcurl. It limits the time curl is allowed to spend in the
connection phase. This differs from -m/--max-time that limits the entire
file transfer operation. Requested by Larry Fahnoe and others.
I also updated the curl.1 and curl_easy_setopt.3 man pages and removed the
item from the TODO.
Version 7.7-beta4
Daniel (14 March 2001)
- Made curl grok IPv6 with HTTP proxies and got everything to compile nicely
again when ENABLE_IPV6 is set.
I need to remake things in the test suite. I can't test the FTP parts with
curl built for IPv6 as it uses a different set of FTP commands then!
- I fell onto a bug report on php.net (posted by Lars Torben Wilson) that was
a report meant for our project. Anyway, it said the .netrc parsing didn't
work as supposed, and as I agreed with Lars, I made the netrc parser use
getpwuid() to figure out the home directory of the effective user and try
that netrc. It still uses the environment variable HOME for those that don't
have that function or if the user doesn't return valid pwd info.
- Edin Kadribaic posted a bug report where he got a crash when a fetch with
user+password in the URL followed a Location: to a second URL (absolute,
without name+password). This bug has been around for a long while and
crashes due to a read at address zero. Fixed now. Wrote test case 38, that
tests this.
- Modified the test suite's httpserver slightly to append all client request
data to its log file so that the test script now better can verify a range
of requests and not only the last one, as it did previously.
- Updated the curl man page with --random-file and --egd-file details.
Version 7.7-beta3
Daniel (14 March 2001)
- Bj<42>rn Stenberg provided similar fixes as J<>rn did and some additional patches
for non-SSL compiles.
- I increased the interface number for libcurl as I've removed the low level
functions from the interface. I also took this opportunity to rename the
Curl_strequal function to curl_strequal and Curl_strnequal to
curl_strnequal, as they're public libcurl functions (even if they're still
undocumented).
This will make older programs not capable of using the new libcurl with
just a drop-in replacement.
- J<>rn Hartroth updated stuff for win32 compiles:
o config-win32.h was fixed for socklen_t
o lib/ssluse.c had a bad #endif placement
o lib/file.c was made to compile on win32 again
o lib/Makefile.m32 was updated with the new files
o lib/libcurl.def matches the current interface state
Daniel (13 March 2001)
- It only took an hour or so before J<>rn Hartroth found a problem in the
chunked transfer-encoding. Given his fine example-site, I could easily spot
the problem and when I re-read the spec (the part I have pasted in the top
of the http_chunks.h file), I realized I had made my state-machine slightly
wrong and didn't expect/handle the trailing CRLF that comes after the data
in each chunk (and those extra two bytes sure feel wasted).
Had to modify test case 34 to match this as well.
Version 7.7-beta2
Daniel (13 March 2001)
- Added the policy stuff to the curl_easy_setopt man page for the two supported
policies.
- Implemented some support for the CURLOPT_CLOSEPOLICY option. The policies
CURLCLOSEPOLICY_LEAST_RECENTLY_USED and CURLCLOSEPOLICY_OLDEST are now
supported, and the "least recently used" is used as default if no policy
is chosen.
Daniel (12 March 2001)
- Added CURLOPT_RANDOM_FILE and CURLOPT_EGDSOCKET to libcurl for seeding the
SSL random engine. The random seeding support was also brought to the curl
client with the new options --random-file <file> and --egd-file <file>. I
need some people to really test this to know they work as supposed. Remember
that libcurl now informs (if verbose is on) if the random seed is considered
weak (HTTPS connections).
- Made the chunked transfer-encoding engine detected bad formatted data length
and return error if so (we can't possibly extract sensible data if this is
the case). Added a test case that detects this. Number 36. Now there are 60
test cases.
- Added 5 new libcurl options to curl/curl.h that can be used to control the
persistant connection support in libcurl. They're also documented (fairly
thoroughly) in the curl_easy_setopt.3 man page. Three of them are now
implemented, although not really tested at this point... Anyway, the new
implemented options are named CURLOPT_MAXCONNECTS, CURLOPT_FRESH_CONNECT,
CURLOPT_FORBID_REUSE. The ones still left to write code for are:
CURLOPT_CLOSEPOLICY and its related option CURLOPT_CLOSEFUNCTION.
- Made curl (the actual command line tool) use the new libcurl 7.7 persistant
connection support by re-using the same curl handle for every specified file
transfer and after some more test case tweaking we have 100% test case OK.
I made some test cases return HTTP/1.0 now to make sure that works as well.
- Had to add 'Connection: close' to the headers of a bunch of test cases so
that curl behaves "old-style" since the test http server doesn't do multiple
connections... Now I get 100% test case OK.
- The curl.haxx.se site, the main curl mailing list and my personal email are
all dead today due to power blackout in the area where the main servers are
located. Horrible.
- I've made persistance work over a squid HTTP proxy. I find it disturbing
that it uses headers that aren't present in any HTTP standard though
(Proxy-Connection:) and that makes me feel that I'm now on the edge of what
the standard actually defines. I need to get this code excercised on a lot
of different HTTP proxies before I feel safe.
Now I'm facing the problem with my test suite servers (both FTP and HTTP)
not supporting persistant connections and libcurl is doing them now. I have
to fix the test servers to get all the test cases do OK.
Daniel (8 March 2001)
- Guenole Bescon reported that libcurl did output errors to stderr even if
MUTE and NOPROGRESS was set. It turned out to be a bug and happens if
there's an error and no ERRORBUFFER is set. This is now corrected.
Version 7.7-beta1
Daniel (8 March 2001)
- "Transfer-Encoding: chunked" is no longer any trouble for libcurl. I've
added two source files and I've run some test downloads that look fine.
- HTTP HEAD works too, even on 1.1 servers.
Daniel (5 March 2001)
- The current 57 test cases now pass OK. It would suggest that libcurl works
using the old-style with one connection per handle. The test suite doesn't
handle multiple connections yet so there are no test cases for this.
- I patched the telnet.c heavily to not use any global variables anymore. It
should make it a lot nicer library-wise.
- The file:// support was modified slightly to use the internal connect-first-
then-do approach.
Daniel (4 March 2001)
- More bugs erased.
Version 7.7-alpha2
Daniel (4 March 2001)
- Now, there's even a basic check that a re-used connection is still alive
before it is assumed so. A few first tests have proven that libcurl will
then re-connect instead of re-use the dead connection!
Daniel (2 March 2001)
- Now they work intermixed as well. Major coolness!
- More fiddling around, my 'tiny' client I have for testing purposes now has
proved to download both FTP and HTTP with persistant connections. They do
not work intermixed yet though.
Daniel (1 March 2001)
- Wilfredo Sanchez pointed out a minor spelling mistake in a man page and that
curl_slist_append() should take a const char * as second argument. It does
now.
Daniel (22 February 2001)
- The persistant connections start to look good for HTTP. On a subsequent
request, it seems that libcurl now can pick an already existing connection
if a suitable one exists, or it opens a new one.
- Douglas R. Horner mailed me corrections to the curl_formparse() man page
that I applied.
Daniel (20 February 2001)
- Added the docs/examples/win32sockets.c file for our windows friends.
- Linus Nielsen Feltzing provided brand new TELNET functionality and
improvements:
* Negotiation is now passive. Curl does not negotiate until the peer does.
* Possibility to set negotiation options on the command line, currently only
XDISPLOC, TTYPE and NEW_ENVIRON (called NEW_ENV).
* Now sends the USER environment variable if the -u switch is used.
* Use -t to set telnet options (Linus even updated the man page, awesome!)
- Haven't done this big changes to curl for a while. Moved around a lot of
struct fields and stuff to make multiple connections get connection specific
data in separate structs so that they can co-exist in a nice way. See the
mailing lists for discussions around how this is gonna be implemented. Docs
and more will follow.
Studied the HTTP RFC to find out better how persistant connections should
work. Seems cool enough.
Daniel (19 February 2001)
- Bob Schader brought me two files that help set up a MS VC++ libcurl project
easier. He also provided me with an up-to-date libcurl.def file.
- I moved a bunch of prototypes from the public <curl/curl.h> file to the
library private urldata.h. This is because of the upcoming changes. The
low level interface is no longer being planned to become reality.
Daniel (15 February 2001)
- CURLOPT_POST is not required anymore. Just setting the POST string with
CURLOPT_POSTFIELDS will switch on the HTTP POST. Most other things in
libcurl already works this way, i.e they require only the parameter to
switch on a feature so I think this works well with the rest. Setting a NULL
string switches off the POST again.
- Excellent suggestions from Rich Gray, Rick Jones, Johan Nilsson and Bjorn
Reese helped me define a way how to incorporate persistant connections into
libcurl in a very smooth way. If done right, no change may have to be made
to older programs and they will just start using persistant connections when
applicable!
Daniel (13 February 2001)
- Changed the word 'timeouted' to 'timed out' in two different error messages.
Suggested by Larry Fahnoe.
Version 7.6.1
Daniel (9 February 2001)
- Frank Reid and Cain Hopwood provided information and research around a HTTPS
PUT/upload problem we seem to have. No solution found yet.
Daniel (8 February 2001)
- An interesting discussion is how to specify an empty password without having
curl ask for it interactively? The current implmentation takes an empty
password as a request for a password prompt. However, I still want to
support a blank user field. Thus, today if you enter "-u :" (without user
and password) curl will prompt for the password. Tricky. How would you
specify you want the prompt otherwise?
- Made the netrc parse result possible to use for other protocols than FTP and
HTTP (such as the upcoming TELNET fixes).
- The previously mentioned "MSVC++ problems" turned out to be a non-issue.
- Added a HTTP file upload code example in the docs/examples/ section on
request.
- Adjusted the FTP response fix slightly.
Version 7.6.1-pre3
Daniel (7 February 2001)
- SM found a flaw in the response reading function for FTP that could make
libcurl not get out of the loop properly when it should, if libcurl got -1
returned when reading the socket.
- I found a similar mistake in http.c when using a proxy and reading the
results from the proxy connection.
Daniel (6 February 2001)
- A friendly person named "SM" (nntp at iname.com) pointed out that the VC
makefile in src/ needed the libpath set for the debug build to work.
- Daniel Gehriger stepped in to assist with the VC++ stuff Robert Weaver
brought up yesterday.
Daniel (5 February 2001)
- Jun-ichiro itojun Hagino brought a big patch that brings IPv6-awareness to
a bunch of different areas within libcurl.
- Robert Weaver told me about the problems the MS VC++ 6.0 compiler has with
the 'static' keyword on a number of libcurl functions. I might need to add a
patch that redefines static when libcurl is compiled with that compiler.
How do I know when VC++ compiles, anyone?
Daniel (4 February 2001)
- curl_getinfo() was extended with two new options:
CURLINFO_CONTENT_LENGTH_DOWNLOAD and CURLINFO_CONTENT_LENGTH_UPLOAD. They
return the full assumed content length of the transfer in the given
direction. The CURLINFO_CONTENT_LENGTH_DOWNLOAD will be the Content-Length:
size of a HTTP download. Added descriptions to the man page as well. This
was done after discussions with Bob Schader.
Daniel (3 February 2001)
- Ingo Ralf Blum provided another fix that makes curl build under the more
recent cygwin installations. It seems they've changed the preset defines to
not include WIN32 anymore.
Version 7.6.1-pre2
Daniel (31 January 2001)
- Curl_read() and curl_read() now return a ssize_t for the size, as it had to
be able to return -1. The telnet support crashed due to this and there was a
possibility to weird behaviour all over. Linus Nielsen Feltzing helped me
find this.
- Added a configure.in check for a working getaddrinfo() if IPv6 is requested.
I also made the configure script feature --enable-debug which sets a couple
of compiler options when used. It assumes gcc.
Daniel (30 January 2001)
- I finally took a stab at the long-term FIXME item I've had on myself, and
now libcurl will properly work when doing a HTTP range-request that follows
a Location:. Previously that would make libcurl fail saying that the server
doesn't seem to support range requests.
Daniel (29 January 2001)
- I added a test case for the HTTP PUT resume thing (test case 33).
Version 7.6.1-pre1
Daniel (29 January 2001)
- Yet another Content-Range change. Ok now? Bob Schader checks from his end
and it works for him.
Daniel (27 January 2001)
- So the HTTP PUT resume fix wasn't good. There should appearantly be a
Content-Range header when resuming a PUT.
- I noticed I broke the download-check that verifies that a resumed HTTP
download is actually resumed. It got broke because my new 'httpreq' field
in the main curl struct. I should get slapped. I added a test case for
this now, so I won't be able to ruin this again without noticing.
- Added a test case for content-length verifying when downloading HTTP.
- Made the progress meter title say if the transfer is being transfered. It
makes the output slightly better for resumes.
- When dealing with Location: and HTTP return codes, libcurl will not attempt
to follow the spirit of RFC2616 better. It means that when POSTing to a
URL that is being following to a second place, the standard will judge on
what to do. All HTTP codes except 303 and 305 will cause curl to make a
second POST operation. 303 will make a GET and 305 is not yet supported.
I also wrote two test cases for this POST/GET/Location stuff.
Version 7.6
Daniel (26 January 2001)
- Lots of mails back and forth with Bob Schader finally made me add a small
piece of code in the HTTP engine so that HTTP upload resume works. You can
now do an operation like 'curl -T file -C <offset> <URL>' and curl will PUT
the ending part of the file starting at given offet to the specified URL.
Version 7.6-pre4
Daniel (25 January 2001)
- I took hold of Rick Jones' question why we don't use recv() and send() for
reading/writing to the sockets and I've now modified the sread() and
swrite() macros to use them instead. If nothing else, they could be tested
in the next beta-round coming right up.
- Jeff Morrow found a problem with libcurl's usage of SSL_read() and supplied
his research results in how to fix this. It turns out we have to invoke the
function several times in some cases. The same goes for the SSL_write().
I made some rather drastic changes all over libcurl to make all writes and
reads get done on one single place so that this repeated-attempts thing
would only have to be implemented at one point.
- Rick Jones spotted that the 'total time' counter really didn't measure the
total time very accurate on subsecond levels.
- Johan Nilsson pointed out the need to more clearly specify that the timeout
value you set for a download is for the *entire* download. There's currently
no option available that sets a timeout for the connection phase only.
Daniel (24 January 2001)
- Ingo Ralf Blum submitted a series of patches required to get curl to compile
properly with cygwin.
- Robert Weaver posted a fix for the win32 section of the curl_getenv() code
that corrected a potential memory leak.
- Added comments in a few files in a sudden attempt to make the sources more
easy to read and understand!
Daniel (23 January 2001)
- Added simple IPv6 detection in the configure script and made the version
string add 'ipv6' to the enable section in that case. ENABLE_IPV6 will be
set if curl is compiled with IPv6 support enabled.
- Added a parser for IPv6-style specified IP-addresses in a URL. Thus, when
IPv6 gets enabled soon, we can use URLs like '[0::1]:80'...
- Made the URL globbing in the client possible to fail silently if there's an
error in the globbing. It makes it almost intuitive, so when you don't
follow the syntax rules, globbing is simply switched off and the raw string
is used instead.
I still think we'll get problems with IPv6-style IP-addresses when we *want*
globbing on parts of the URL as the initial part of the URL will for sure
seriously confuse the globber.
Daniel (22 January 2001)
- Bj<42>rn Stenberg supplied a progress meter patch that makes it look better even
during slow starts. Previously it made some silly assumptions...
- Added two FTP tests for -Q and -Q - stuff since it was being discussed on
the mailing list. Had to correct the ftpserver.pl too as it bugged slightly.
Daniel (19 January 2001)
- Made the Location: parsers deal with any-length URLs. Thus I removed the last
code that restricts the length of URLs that curl supports.
- Added a --globoff test case (#28) and it quickly identified a memory problem
in src/main.c that I took care of.
Version 7.6-pre3
Daniel (17 January 2001)
- Made the two former files lib/download.c and lib/highlevel.c become the new
lib/transfer.c which makes more sense. I also did the rename from Transfer()
to Curl_Transfer() in the other source files that use the transfer function
in the spirit of using Curl_ prefix for library-scoped global symbols.
Daniel (11 January 2001)
- Added -g/--globoff that switches OFF the URL globbing and thus enables {}[]
letters to be part of the URL. Do note that RFC2396 section 2.4.3 explicitly
mention these letters to be escaped. This was posted as a feature request by
Jorge Gutierrez and as a bug by Terry.
- Short options to curl that requires parameters can now be specified without
having the option and its parameter space separated. -ofile works as good as
-o file. -m20 is equal to -m 20. Do note that this goes for single-letter
options only, verbose --long-style options still must be separated with
space from their parameters.
Daniel (8 January 2001)
- Francis Dagenais reported that the SCO compiler still fails when compiling
curl due to that getpass_r() prototype. I've now put it around #ifndef
HAVE_GETPASS_R in an attempt to please the SCO systems.
- Made some minor corrections to get the client to cleanup properly and I made
the separator work again when getting multiple globbed URLs to stdout.
- Worked with Loic Dachary to get the make dist and make distcheck work
correctly. The 'maketgz' script is now using the automake generated 'make
dist' when creating release archives. Loic successfully made 'make rpms'
automatically build RPMs!
Loic Dachary (6 January 2001)
- Automated generation of rpm packages, no need to be root.
- make distcheck generates a proper distribution (EXTRA_DIST
in all Makefile.am modified to match FILES).
Daniel (5 January 2001)
- Huge client-side hack: now multiple URLs are supported. Any number of URLs
can be specified on the command line, and they'll all be downloaded. There
must be a corresponding -o or -O for each URL or the data will be written to
stdout. This needs more testing, time to release a 7.6-pre package.
- The krb4 support was broken in the release. Fixed now.
- Huge internal symbol rename operation. All non-static but still lib-internal
symbols should now be prefixed with 'Curl_' to prevent collisions with other
libs. All public symbols should be prefixed with 'curl_' and the rest should
be static and thus invisible to the outside world. I updated the INTERNALS
document to say this as well.
Version 7.5.2
Daniel (4 January 2001)

View File

@@ -10,11 +10,13 @@ memanalyze.pl is for analyzing the output generated by curl if -DMALLOCDEBUG
Makefile.dist is included as the root Makefile in distribution archives
perl/ is a subdirectory with various perl scripts
perl/contrib/ is a subdirectory with various perl scripts
To build after having extracted everything from CVS, do this:
% automake
% autoconf
% ./configure
% make
automake
aclocal
autoheader
autoconf
./configure
make

86
FILES
View File

@@ -1,86 +0,0 @@
CHANGES
FILES
LEGAL
MPL-1.1.txt
MITX.txt
README
docs/BUGS
docs/CONTRIBUTE
docs/FAQ
docs/FEATURES
docs/INSTALL
docs/INTERNALS
docs/MANUAL
docs/README.win32
docs/LIBCURL
docs/RESOURCES
docs/TODO
docs/curl.1
docs/Makefile.in
docs/Makefile.am
docs/TheArtOfHttpScripting
docs/*.3
docs/examples/README
docs/examples/*.c
maketgz
Makefile.in
Makefile.am
acconfig.h
acinclude.m4
aclocal.m4
config.guess
config.h.in
config-win32.h
config.sub
configure
configure.in
install-sh
missing
mkinstalldirs
reconf
stamp-h.in
ltconfig
ltmain.sh
src/config-win32.h
src/hugehelp.c
src/main.c
src/setup.h
src/urlglob.c
src/urlglob.h
src/version.h
src/writeout.c
src/writeout.h
src/*.in
src/*.am
src/mkhelp.pl
src/Makefile.vc6
src/Makefile.b32
src/*m32
lib/getdate.y
lib/*.[ch]
lib/*in
lib/*am
lib/Makefile.vc6
lib/*m32
lib/Makefile.b32
lib/Makefile.b32.resp
lib/libcurl.def
include/README
include/Makefile.in
include/Makefile.am
include/curl/*.h
include/curl/Makefile.in
include/curl/Makefile.am
packages/Linux/RPM/curl-ssl.spec
packages/Linux/RPM/curl.spec
packages/Linux/RPM/make_curl_rpm
packages/Linux/RPM/README
packages/Win32/README
packages/README
tests/Makefile.am
tests/Makefile.in
tests/runtests.pl
tests/README
tests/httpserver.pl
tests/ftpserver.pl
tests/data/*.txt

View File

@@ -4,9 +4,44 @@
AUTOMAKE_OPTIONS = foreign no-dependencies
EXTRA_DIST = curl.spec curl-ssl.spec
EXTRA_DIST = \
CHANGES LEGAL maketgz MITX.txt MPL-1.1.txt \
config-win32.h reconf packages/README Makefile.dist
SUBDIRS = docs lib src include tests
SUBDIRS = docs lib src include tests packages perl php
# create a root makefile in the distribution:
dist-hook:
cp $(srcdir)/Makefile.dist $(distdir)/Makefile
check: test
test:
@(cd tests; make quiet-test)
@(cd tests; $(MAKE) quiet-test)
#
# Build source and binary rpms. For rpm-3.0 and above, the ~/.rpmmacros
# must contain the following line:
# %_topdir /home/loic/local/rpm
# and that /home/loic/local/rpm contains the directory SOURCES, BUILD etc.
#
# cd /home/loic/local/rpm ; mkdir -p SOURCES BUILD RPMS/i386 SPECS SRPMS
#
# If additional configure flags are needed to build the package, add the
# following in ~/.rpmmacros
# %configure CFLAGS="%{optflags}" ./configure %{_target_platform} --prefix=%{_prefix} ${AM_CONFIGFLAGS}
# and run make rpm in the following way:
# AM_CONFIGFLAGS='--with-uri=/home/users/loic/local/RedHat-6.2' make rpm
#
rpms:
$(MAKE) RPMDIST=curl rpm
$(MAKE) RPMDIST=curl-ssl rpm
rpm:
RPM_TOPDIR=`rpm --showrc | $(PERL) -n -e 'print if(s/.*_topdir\s+(.*)/$$1/)'` ; \
cp $(srcdir)/packages/Linux/RPM/$(RPMDIST).spec $$RPM_TOPDIR/SPECS ; \
cp $(PACKAGE)-$(VERSION).tar.gz $$RPM_TOPDIR/SOURCES ; \
rpm -ba --clean --rmsource $$RPM_TOPDIR/SPECS/$(RPMDIST).spec ; \
mv $$RPM_TOPDIR/RPMS/i386/$(RPMDIST)-*.rpm . ; \
mv $$RPM_TOPDIR/SRPMS/$(RPMDIST)-*.src.rpm .

View File

@@ -30,16 +30,16 @@ ssl:
make
borland:
cd lib; make -f Makefile.b32
cd src; make -f Makefile.b32
cd lib & make -f Makefile.b32
cd src & make -f Makefile.b32
mingw32:
cd lib; make -f Makefile.m32
cd src; make -f Makefile.m32
cd lib & make -f Makefile.m32
cd src & make -f Makefile.m32
mingw32-ssl:
cd lib; make -f Makefile.m32 SSL=1
cd src; make -f Makefile.m32 SSL=1
cd lib & make -f Makefile.m32 SSL=1
cd src & make -f Makefile.m32 SSL=1
vc:
cd lib

View File

@@ -36,3 +36,18 @@
/* Define if you have the Kerberos4 libraries (including -ldes) */
#undef KRB4
/* Define if you want to enable IPv6 support */
#undef ENABLE_IPV6
/* Define this to 'int' if ssize_t is not an available typedefed type */
#undef ssize_t
/* Define this to 'int' if socklen_t is not an available typedefed type */
#undef socklen_t
/* Define this as a suitable file to read random data from */
#undef RANDOM_FILE
/* Define this to your Entropy Gathering Daemon socket pathname */
#undef EGD_SOCKET

616
aclocal.m4 vendored
View File

@@ -1,616 +0,0 @@
dnl aclocal.m4 generated automatically by aclocal 1.4
dnl Copyright (C) 1994, 1995-8, 1999 Free Software Foundation, Inc.
dnl This file is free software; the Free Software Foundation
dnl gives unlimited permission to copy and/or distribute it,
dnl with or without modifications, as long as this notice is preserved.
dnl This program is distributed in the hope that it will be useful,
dnl but WITHOUT ANY WARRANTY, to the extent permitted by law; without
dnl even the implied warranty of MERCHANTABILITY or FITNESS FOR A
dnl PARTICULAR PURPOSE.
#serial 12
dnl By default, many hosts won't let programs access large files;
dnl one must use special compiler options to get large-file access to work.
dnl For more details about this brain damage please see:
dnl http://www.sas.com/standards/large.file/x_open.20Mar96.html
dnl Written by Paul Eggert <eggert@twinsun.com>.
dnl Internal subroutine of AC_SYS_LARGEFILE.
dnl AC_SYS_LARGEFILE_TEST_INCLUDES
AC_DEFUN(AC_SYS_LARGEFILE_TEST_INCLUDES,
[[#include <sys/types.h>
int a[(off_t) 9223372036854775807 == 9223372036854775807 ? 1 : -1];
]])
dnl Internal subroutine of AC_SYS_LARGEFILE.
dnl AC_SYS_LARGEFILE_MACRO_VALUE(C-MACRO, VALUE, CACHE-VAR, COMMENT, INCLUDES, FUNCTION-BODY)
AC_DEFUN(AC_SYS_LARGEFILE_MACRO_VALUE,
[AC_CACHE_CHECK([for $1 value needed for large files], $3,
[$3=no
AC_TRY_COMPILE(AC_SYS_LARGEFILE_TEST_INCLUDES
$5
,
[$6],
,
[AC_TRY_COMPILE([#define $1 $2]
AC_SYS_LARGEFILE_TEST_INCLUDES
$5
,
[$6],
[$3=$2])])])
if test "[$]$3" != no; then
AC_DEFINE_UNQUOTED([$1], [$]$3, [$4])
fi])
AC_DEFUN(AC_SYS_LARGEFILE,
[AC_ARG_ENABLE(largefile,
[ --disable-largefile omit support for large files])
if test "$enable_largefile" != no; then
AC_CACHE_CHECK([for special C compiler options needed for large files],
ac_cv_sys_largefile_CC,
[ac_cv_sys_largefile_CC=no
if test "$GCC" != yes; then
# IRIX 6.2 and later do not support large files by default,
# so use the C compiler's -n32 option if that helps.
AC_TRY_COMPILE(AC_SYS_LARGEFILE_TEST_INCLUDES, , ,
[ac_save_CC="$CC"
CC="$CC -n32"
AC_TRY_COMPILE(AC_SYS_LARGEFILE_TEST_INCLUDES, ,
ac_cv_sys_largefile_CC=' -n32')
CC="$ac_save_CC"])
fi])
if test "$ac_cv_sys_largefile_CC" != no; then
CC="$CC$ac_cv_sys_largefile_CC"
fi
AC_SYS_LARGEFILE_MACRO_VALUE(_FILE_OFFSET_BITS, 64,
ac_cv_sys_file_offset_bits,
[Number of bits in a file offset, on hosts where this is settable.])
AC_SYS_LARGEFILE_MACRO_VALUE(_LARGEFILE_SOURCE, 1,
ac_cv_sys_largefile_source,
[Define to make ftello visible on some hosts (e.g. HP-UX 10.20).],
[#include <stdio.h>], [return !ftello;])
AC_SYS_LARGEFILE_MACRO_VALUE(_LARGE_FILES, 1,
ac_cv_sys_large_files,
[Define for large files, on AIX-style hosts.])
dnl lftp does not need ftello, and _XOPEN_SOURCE=500 makes resolv.h fail.
dnl AC_SYS_LARGEFILE_MACRO_VALUE(_XOPEN_SOURCE, 500,
dnl ac_cv_sys_xopen_source,
dnl [Define to make ftello visible on some hosts (e.g. glibc 2.1.3).],
dnl [#include <stdio.h>], [return !ftello;])
fi
])
# Like AC_CONFIG_HEADER, but automatically create stamp file.
AC_DEFUN(AM_CONFIG_HEADER,
[AC_PREREQ([2.12])
AC_CONFIG_HEADER([$1])
dnl When config.status generates a header, we must update the stamp-h file.
dnl This file resides in the same directory as the config header
dnl that is generated. We must strip everything past the first ":",
dnl and everything past the last "/".
AC_OUTPUT_COMMANDS(changequote(<<,>>)dnl
ifelse(patsubst(<<$1>>, <<[^ ]>>, <<>>), <<>>,
<<test -z "<<$>>CONFIG_HEADERS" || echo timestamp > patsubst(<<$1>>, <<^\([^:]*/\)?.*>>, <<\1>>)stamp-h<<>>dnl>>,
<<am_indx=1
for am_file in <<$1>>; do
case " <<$>>CONFIG_HEADERS " in
*" <<$>>am_file "*<<)>>
echo timestamp > `echo <<$>>am_file | sed -e 's%:.*%%' -e 's%[^/]*$%%'`stamp-h$am_indx
;;
esac
am_indx=`expr "<<$>>am_indx" + 1`
done<<>>dnl>>)
changequote([,]))])
# Do all the work for Automake. This macro actually does too much --
# some checks are only needed if your package does certain things.
# But this isn't really a big deal.
# serial 1
dnl Usage:
dnl AM_INIT_AUTOMAKE(package,version, [no-define])
AC_DEFUN(AM_INIT_AUTOMAKE,
[AC_REQUIRE([AC_PROG_INSTALL])
PACKAGE=[$1]
AC_SUBST(PACKAGE)
VERSION=[$2]
AC_SUBST(VERSION)
dnl test to see if srcdir already configured
if test "`cd $srcdir && pwd`" != "`pwd`" && test -f $srcdir/config.status; then
AC_MSG_ERROR([source directory already configured; run "make distclean" there first])
fi
ifelse([$3],,
AC_DEFINE_UNQUOTED(PACKAGE, "$PACKAGE", [Name of package])
AC_DEFINE_UNQUOTED(VERSION, "$VERSION", [Version number of package]))
AC_REQUIRE([AM_SANITY_CHECK])
AC_REQUIRE([AC_ARG_PROGRAM])
dnl FIXME This is truly gross.
missing_dir=`cd $ac_aux_dir && pwd`
AM_MISSING_PROG(ACLOCAL, aclocal, $missing_dir)
AM_MISSING_PROG(AUTOCONF, autoconf, $missing_dir)
AM_MISSING_PROG(AUTOMAKE, automake, $missing_dir)
AM_MISSING_PROG(AUTOHEADER, autoheader, $missing_dir)
AM_MISSING_PROG(MAKEINFO, makeinfo, $missing_dir)
AC_REQUIRE([AC_PROG_MAKE_SET])])
#
# Check to make sure that the build environment is sane.
#
AC_DEFUN(AM_SANITY_CHECK,
[AC_MSG_CHECKING([whether build environment is sane])
# Just in case
sleep 1
echo timestamp > conftestfile
# Do `set' in a subshell so we don't clobber the current shell's
# arguments. Must try -L first in case configure is actually a
# symlink; some systems play weird games with the mod time of symlinks
# (eg FreeBSD returns the mod time of the symlink's containing
# directory).
if (
set X `ls -Lt $srcdir/configure conftestfile 2> /dev/null`
if test "[$]*" = "X"; then
# -L didn't work.
set X `ls -t $srcdir/configure conftestfile`
fi
if test "[$]*" != "X $srcdir/configure conftestfile" \
&& test "[$]*" != "X conftestfile $srcdir/configure"; then
# If neither matched, then we have a broken ls. This can happen
# if, for instance, CONFIG_SHELL is bash and it inherits a
# broken ls alias from the environment. This has actually
# happened. Such a system could not be considered "sane".
AC_MSG_ERROR([ls -t appears to fail. Make sure there is not a broken
alias in your environment])
fi
test "[$]2" = conftestfile
)
then
# Ok.
:
else
AC_MSG_ERROR([newly created file is older than distributed files!
Check your system clock])
fi
rm -f conftest*
AC_MSG_RESULT(yes)])
dnl AM_MISSING_PROG(NAME, PROGRAM, DIRECTORY)
dnl The program must properly implement --version.
AC_DEFUN(AM_MISSING_PROG,
[AC_MSG_CHECKING(for working $2)
# Run test in a subshell; some versions of sh will print an error if
# an executable is not found, even if stderr is redirected.
# Redirect stdin to placate older versions of autoconf. Sigh.
if ($2 --version) < /dev/null > /dev/null 2>&1; then
$1=$2
AC_MSG_RESULT(found)
else
$1="$3/missing $2"
AC_MSG_RESULT(missing)
fi
AC_SUBST($1)])
# serial 40 AC_PROG_LIBTOOL
AC_DEFUN(AC_PROG_LIBTOOL,
[AC_REQUIRE([AC_LIBTOOL_SETUP])dnl
# Save cache, so that ltconfig can load it
AC_CACHE_SAVE
# Actually configure libtool. ac_aux_dir is where install-sh is found.
CC="$CC" CFLAGS="$CFLAGS" CPPFLAGS="$CPPFLAGS" \
LD="$LD" LDFLAGS="$LDFLAGS" LIBS="$LIBS" \
LN_S="$LN_S" NM="$NM" RANLIB="$RANLIB" \
DLLTOOL="$DLLTOOL" AS="$AS" OBJDUMP="$OBJDUMP" \
${CONFIG_SHELL-/bin/sh} $ac_aux_dir/ltconfig --no-reexec \
$libtool_flags --no-verify $ac_aux_dir/ltmain.sh $lt_target \
|| AC_MSG_ERROR([libtool configure failed])
# Reload cache, that may have been modified by ltconfig
AC_CACHE_LOAD
# This can be used to rebuild libtool when needed
LIBTOOL_DEPS="$ac_aux_dir/ltconfig $ac_aux_dir/ltmain.sh"
# Always use our own libtool.
LIBTOOL='$(SHELL) $(top_builddir)/libtool'
AC_SUBST(LIBTOOL)dnl
# Redirect the config.log output again, so that the ltconfig log is not
# clobbered by the next message.
exec 5>>./config.log
])
AC_DEFUN(AC_LIBTOOL_SETUP,
[AC_PREREQ(2.13)dnl
AC_REQUIRE([AC_ENABLE_SHARED])dnl
AC_REQUIRE([AC_ENABLE_STATIC])dnl
AC_REQUIRE([AC_ENABLE_FAST_INSTALL])dnl
AC_REQUIRE([AC_CANONICAL_HOST])dnl
AC_REQUIRE([AC_CANONICAL_BUILD])dnl
AC_REQUIRE([AC_PROG_RANLIB])dnl
AC_REQUIRE([AC_PROG_CC])dnl
AC_REQUIRE([AC_PROG_LD])dnl
AC_REQUIRE([AC_PROG_NM])dnl
AC_REQUIRE([AC_PROG_LN_S])dnl
dnl
case "$target" in
NONE) lt_target="$host" ;;
*) lt_target="$target" ;;
esac
# Check for any special flags to pass to ltconfig.
libtool_flags="--cache-file=$cache_file"
test "$enable_shared" = no && libtool_flags="$libtool_flags --disable-shared"
test "$enable_static" = no && libtool_flags="$libtool_flags --disable-static"
test "$enable_fast_install" = no && libtool_flags="$libtool_flags --disable-fast-install"
test "$ac_cv_prog_gcc" = yes && libtool_flags="$libtool_flags --with-gcc"
test "$ac_cv_prog_gnu_ld" = yes && libtool_flags="$libtool_flags --with-gnu-ld"
ifdef([AC_PROVIDE_AC_LIBTOOL_DLOPEN],
[libtool_flags="$libtool_flags --enable-dlopen"])
ifdef([AC_PROVIDE_AC_LIBTOOL_WIN32_DLL],
[libtool_flags="$libtool_flags --enable-win32-dll"])
AC_ARG_ENABLE(libtool-lock,
[ --disable-libtool-lock avoid locking (might break parallel builds)])
test "x$enable_libtool_lock" = xno && libtool_flags="$libtool_flags --disable-lock"
test x"$silent" = xyes && libtool_flags="$libtool_flags --silent"
# Some flags need to be propagated to the compiler or linker for good
# libtool support.
case "$lt_target" in
*-*-irix6*)
# Find out which ABI we are using.
echo '[#]line __oline__ "configure"' > conftest.$ac_ext
if AC_TRY_EVAL(ac_compile); then
case "`/usr/bin/file conftest.o`" in
*32-bit*)
LD="${LD-ld} -32"
;;
*N32*)
LD="${LD-ld} -n32"
;;
*64-bit*)
LD="${LD-ld} -64"
;;
esac
fi
rm -rf conftest*
;;
*-*-sco3.2v5*)
# On SCO OpenServer 5, we need -belf to get full-featured binaries.
SAVE_CFLAGS="$CFLAGS"
CFLAGS="$CFLAGS -belf"
AC_CACHE_CHECK([whether the C compiler needs -belf], lt_cv_cc_needs_belf,
[AC_TRY_LINK([],[],[lt_cv_cc_needs_belf=yes],[lt_cv_cc_needs_belf=no])])
if test x"$lt_cv_cc_needs_belf" != x"yes"; then
# this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf
CFLAGS="$SAVE_CFLAGS"
fi
;;
ifdef([AC_PROVIDE_AC_LIBTOOL_WIN32_DLL],
[*-*-cygwin* | *-*-mingw*)
AC_CHECK_TOOL(DLLTOOL, dlltool, false)
AC_CHECK_TOOL(AS, as, false)
AC_CHECK_TOOL(OBJDUMP, objdump, false)
;;
])
esac
])
# AC_LIBTOOL_DLOPEN - enable checks for dlopen support
AC_DEFUN(AC_LIBTOOL_DLOPEN, [AC_BEFORE([$0],[AC_LIBTOOL_SETUP])])
# AC_LIBTOOL_WIN32_DLL - declare package support for building win32 dll's
AC_DEFUN(AC_LIBTOOL_WIN32_DLL, [AC_BEFORE([$0], [AC_LIBTOOL_SETUP])])
# AC_ENABLE_SHARED - implement the --enable-shared flag
# Usage: AC_ENABLE_SHARED[(DEFAULT)]
# Where DEFAULT is either `yes' or `no'. If omitted, it defaults to
# `yes'.
AC_DEFUN(AC_ENABLE_SHARED, [dnl
define([AC_ENABLE_SHARED_DEFAULT], ifelse($1, no, no, yes))dnl
AC_ARG_ENABLE(shared,
changequote(<<, >>)dnl
<< --enable-shared[=PKGS] build shared libraries [default=>>AC_ENABLE_SHARED_DEFAULT],
changequote([, ])dnl
[p=${PACKAGE-default}
case "$enableval" in
yes) enable_shared=yes ;;
no) enable_shared=no ;;
*)
enable_shared=no
# Look at the argument we got. We use all the common list separators.
IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}:,"
for pkg in $enableval; do
if test "X$pkg" = "X$p"; then
enable_shared=yes
fi
done
IFS="$ac_save_ifs"
;;
esac],
enable_shared=AC_ENABLE_SHARED_DEFAULT)dnl
])
# AC_DISABLE_SHARED - set the default shared flag to --disable-shared
AC_DEFUN(AC_DISABLE_SHARED, [AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl
AC_ENABLE_SHARED(no)])
# AC_ENABLE_STATIC - implement the --enable-static flag
# Usage: AC_ENABLE_STATIC[(DEFAULT)]
# Where DEFAULT is either `yes' or `no'. If omitted, it defaults to
# `yes'.
AC_DEFUN(AC_ENABLE_STATIC, [dnl
define([AC_ENABLE_STATIC_DEFAULT], ifelse($1, no, no, yes))dnl
AC_ARG_ENABLE(static,
changequote(<<, >>)dnl
<< --enable-static[=PKGS] build static libraries [default=>>AC_ENABLE_STATIC_DEFAULT],
changequote([, ])dnl
[p=${PACKAGE-default}
case "$enableval" in
yes) enable_static=yes ;;
no) enable_static=no ;;
*)
enable_static=no
# Look at the argument we got. We use all the common list separators.
IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}:,"
for pkg in $enableval; do
if test "X$pkg" = "X$p"; then
enable_static=yes
fi
done
IFS="$ac_save_ifs"
;;
esac],
enable_static=AC_ENABLE_STATIC_DEFAULT)dnl
])
# AC_DISABLE_STATIC - set the default static flag to --disable-static
AC_DEFUN(AC_DISABLE_STATIC, [AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl
AC_ENABLE_STATIC(no)])
# AC_ENABLE_FAST_INSTALL - implement the --enable-fast-install flag
# Usage: AC_ENABLE_FAST_INSTALL[(DEFAULT)]
# Where DEFAULT is either `yes' or `no'. If omitted, it defaults to
# `yes'.
AC_DEFUN(AC_ENABLE_FAST_INSTALL, [dnl
define([AC_ENABLE_FAST_INSTALL_DEFAULT], ifelse($1, no, no, yes))dnl
AC_ARG_ENABLE(fast-install,
changequote(<<, >>)dnl
<< --enable-fast-install[=PKGS] optimize for fast installation [default=>>AC_ENABLE_FAST_INSTALL_DEFAULT],
changequote([, ])dnl
[p=${PACKAGE-default}
case "$enableval" in
yes) enable_fast_install=yes ;;
no) enable_fast_install=no ;;
*)
enable_fast_install=no
# Look at the argument we got. We use all the common list separators.
IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}:,"
for pkg in $enableval; do
if test "X$pkg" = "X$p"; then
enable_fast_install=yes
fi
done
IFS="$ac_save_ifs"
;;
esac],
enable_fast_install=AC_ENABLE_FAST_INSTALL_DEFAULT)dnl
])
# AC_ENABLE_FAST_INSTALL - set the default to --disable-fast-install
AC_DEFUN(AC_DISABLE_FAST_INSTALL, [AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl
AC_ENABLE_FAST_INSTALL(no)])
# AC_PROG_LD - find the path to the GNU or non-GNU linker
AC_DEFUN(AC_PROG_LD,
[AC_ARG_WITH(gnu-ld,
[ --with-gnu-ld assume the C compiler uses GNU ld [default=no]],
test "$withval" = no || with_gnu_ld=yes, with_gnu_ld=no)
AC_REQUIRE([AC_PROG_CC])dnl
AC_REQUIRE([AC_CANONICAL_HOST])dnl
AC_REQUIRE([AC_CANONICAL_BUILD])dnl
ac_prog=ld
if test "$ac_cv_prog_gcc" = yes; then
# Check if gcc -print-prog-name=ld gives a path.
AC_MSG_CHECKING([for ld used by GCC])
ac_prog=`($CC -print-prog-name=ld) 2>&5`
case "$ac_prog" in
# Accept absolute paths.
changequote(,)dnl
[\\/]* | [A-Za-z]:[\\/]*)
re_direlt='/[^/][^/]*/\.\./'
changequote([,])dnl
# Canonicalize the path of ld
ac_prog=`echo $ac_prog| sed 's%\\\\%/%g'`
while echo $ac_prog | grep "$re_direlt" > /dev/null 2>&1; do
ac_prog=`echo $ac_prog| sed "s%$re_direlt%/%"`
done
test -z "$LD" && LD="$ac_prog"
;;
"")
# If it fails, then pretend we aren't using GCC.
ac_prog=ld
;;
*)
# If it is relative, then search for the first ld in PATH.
with_gnu_ld=unknown
;;
esac
elif test "$with_gnu_ld" = yes; then
AC_MSG_CHECKING([for GNU ld])
else
AC_MSG_CHECKING([for non-GNU ld])
fi
AC_CACHE_VAL(ac_cv_path_LD,
[if test -z "$LD"; then
IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}${PATH_SEPARATOR-:}"
for ac_dir in $PATH; do
test -z "$ac_dir" && ac_dir=.
if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then
ac_cv_path_LD="$ac_dir/$ac_prog"
# Check to see if the program is GNU ld. I'd rather use --version,
# but apparently some GNU ld's only accept -v.
# Break only if it was the GNU/non-GNU ld that we prefer.
if "$ac_cv_path_LD" -v 2>&1 < /dev/null | egrep '(GNU|with BFD)' > /dev/null; then
test "$with_gnu_ld" != no && break
else
test "$with_gnu_ld" != yes && break
fi
fi
done
IFS="$ac_save_ifs"
else
ac_cv_path_LD="$LD" # Let the user override the test with a path.
fi])
LD="$ac_cv_path_LD"
if test -n "$LD"; then
AC_MSG_RESULT($LD)
else
AC_MSG_RESULT(no)
fi
test -z "$LD" && AC_MSG_ERROR([no acceptable ld found in \$PATH])
AC_PROG_LD_GNU
])
AC_DEFUN(AC_PROG_LD_GNU,
[AC_CACHE_CHECK([if the linker ($LD) is GNU ld], ac_cv_prog_gnu_ld,
[# I'd rather use --version here, but apparently some GNU ld's only accept -v.
if $LD -v 2>&1 </dev/null | egrep '(GNU|with BFD)' 1>&5; then
ac_cv_prog_gnu_ld=yes
else
ac_cv_prog_gnu_ld=no
fi])
])
# AC_PROG_NM - find the path to a BSD-compatible name lister
AC_DEFUN(AC_PROG_NM,
[AC_MSG_CHECKING([for BSD-compatible nm])
AC_CACHE_VAL(ac_cv_path_NM,
[if test -n "$NM"; then
# Let the user override the test.
ac_cv_path_NM="$NM"
else
IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}${PATH_SEPARATOR-:}"
for ac_dir in $PATH /usr/ccs/bin /usr/ucb /bin; do
test -z "$ac_dir" && ac_dir=.
if test -f $ac_dir/nm || test -f $ac_dir/nm$ac_exeext ; then
# Check to see if the nm accepts a BSD-compat flag.
# Adding the `sed 1q' prevents false positives on HP-UX, which says:
# nm: unknown option "B" ignored
if ($ac_dir/nm -B /dev/null 2>&1 | sed '1q'; exit 0) | egrep /dev/null >/dev/null; then
ac_cv_path_NM="$ac_dir/nm -B"
break
elif ($ac_dir/nm -p /dev/null 2>&1 | sed '1q'; exit 0) | egrep /dev/null >/dev/null; then
ac_cv_path_NM="$ac_dir/nm -p"
break
else
ac_cv_path_NM=${ac_cv_path_NM="$ac_dir/nm"} # keep the first match, but
continue # so that we can try to find one that supports BSD flags
fi
fi
done
IFS="$ac_save_ifs"
test -z "$ac_cv_path_NM" && ac_cv_path_NM=nm
fi])
NM="$ac_cv_path_NM"
AC_MSG_RESULT([$NM])
])
# AC_CHECK_LIBM - check for math library
AC_DEFUN(AC_CHECK_LIBM,
[AC_REQUIRE([AC_CANONICAL_HOST])dnl
LIBM=
case "$lt_target" in
*-*-beos* | *-*-cygwin*)
# These system don't have libm
;;
*-ncr-sysv4.3*)
AC_CHECK_LIB(mw, _mwvalidcheckl, LIBM="-lmw")
AC_CHECK_LIB(m, main, LIBM="$LIBM -lm")
;;
*)
AC_CHECK_LIB(m, main, LIBM="-lm")
;;
esac
])
# AC_LIBLTDL_CONVENIENCE[(dir)] - sets LIBLTDL to the link flags for
# the libltdl convenience library and INCLTDL to the include flags for
# the libltdl header and adds --enable-ltdl-convenience to the
# configure arguments. Note that LIBLTDL and INCLTDL are not
# AC_SUBSTed, nor is AC_CONFIG_SUBDIRS called. If DIR is not
# provided, it is assumed to be `libltdl'. LIBLTDL will be prefixed
# with '${top_builddir}/' and INCLTDL will be prefixed with
# '${top_srcdir}/' (note the single quotes!). If your package is not
# flat and you're not using automake, define top_builddir and
# top_srcdir appropriately in the Makefiles.
AC_DEFUN(AC_LIBLTDL_CONVENIENCE, [AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl
case "$enable_ltdl_convenience" in
no) AC_MSG_ERROR([this package needs a convenience libltdl]) ;;
"") enable_ltdl_convenience=yes
ac_configure_args="$ac_configure_args --enable-ltdl-convenience" ;;
esac
LIBLTDL='${top_builddir}/'ifelse($#,1,[$1],['libltdl'])/libltdlc.la
INCLTDL='-I${top_srcdir}/'ifelse($#,1,[$1],['libltdl'])
])
# AC_LIBLTDL_INSTALLABLE[(dir)] - sets LIBLTDL to the link flags for
# the libltdl installable library and INCLTDL to the include flags for
# the libltdl header and adds --enable-ltdl-install to the configure
# arguments. Note that LIBLTDL and INCLTDL are not AC_SUBSTed, nor is
# AC_CONFIG_SUBDIRS called. If DIR is not provided and an installed
# libltdl is not found, it is assumed to be `libltdl'. LIBLTDL will
# be prefixed with '${top_builddir}/' and INCLTDL will be prefixed
# with '${top_srcdir}/' (note the single quotes!). If your package is
# not flat and you're not using automake, define top_builddir and
# top_srcdir appropriately in the Makefiles.
# In the future, this macro may have to be called after AC_PROG_LIBTOOL.
AC_DEFUN(AC_LIBLTDL_INSTALLABLE, [AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl
AC_CHECK_LIB(ltdl, main,
[test x"$enable_ltdl_install" != xyes && enable_ltdl_install=no],
[if test x"$enable_ltdl_install" = xno; then
AC_MSG_WARN([libltdl not installed, but installation disabled])
else
enable_ltdl_install=yes
fi
])
if test x"$enable_ltdl_install" = x"yes"; then
ac_configure_args="$ac_configure_args --enable-ltdl-install"
LIBLTDL='${top_builddir}/'ifelse($#,1,[$1],['libltdl'])/libltdl.la
INCLTDL='-I${top_srcdir}/'ifelse($#,1,[$1],['libltdl'])
else
ac_configure_args="$ac_configure_args --enable-ltdl-install=no"
LIBLTDL="-lltdl"
INCLTDL=
fi
])
dnl old names
AC_DEFUN(AM_PROG_LIBTOOL, [indir([AC_PROG_LIBTOOL])])dnl
AC_DEFUN(AM_ENABLE_SHARED, [indir([AC_ENABLE_SHARED], $@)])dnl
AC_DEFUN(AM_ENABLE_STATIC, [indir([AC_ENABLE_STATIC], $@)])dnl
AC_DEFUN(AM_DISABLE_SHARED, [indir([AC_DISABLE_SHARED], $@)])dnl
AC_DEFUN(AM_DISABLE_STATIC, [indir([AC_DISABLE_STATIC], $@)])dnl
AC_DEFUN(AM_PROG_LD, [indir([AC_PROG_LD])])dnl
AC_DEFUN(AM_PROG_NM, [indir([AC_PROG_NM])])dnl
dnl This is just to silence aclocal about the macro not being used
ifelse([AC_DISABLE_FAST_INSTALL])dnl

View File

@@ -23,6 +23,12 @@
/* Define to `unsigned' if <sys/types.h> doesn't define. */
/* #undef size_t */
/* Define this to 'int' if ssize_t is not an available typedefed type */
#define ssize_t int
/* Define this to 'int' if socklen_t is not an available typedefed type */
#define socklen_t int
/* Define if you have the ANSI C header files. */
#define STDC_HEADERS 1

View File

@@ -1,312 +0,0 @@
/* config.h.in. Generated automatically from configure.in by autoheader. */
/* Define if on AIX 3.
System headers sometimes define this.
We just want to avoid a redefinition error message. */
#ifndef _ALL_SOURCE
#undef _ALL_SOURCE
#endif
/* Define to empty if the keyword does not work. */
#undef const
/* Define as the return type of signal handlers (int or void). */
#undef RETSIGTYPE
/* Define to `unsigned' if <sys/types.h> doesn't define. */
#undef size_t
/* Define if you have the ANSI C header files. */
#undef STDC_HEADERS
/* Define if you can safely include both <sys/time.h> and <time.h>. */
#undef TIME_WITH_SYS_TIME
/* Define cpu-machine-OS */
#undef OS
/* Define if you have the gethostbyaddr_r() function with 5 arguments */
#undef HAVE_GETHOSTBYADDR_R_5
/* Define if you have the gethostbyaddr_r() function with 7 arguments */
#undef HAVE_GETHOSTBYADDR_R_7
/* Define if you have the gethostbyaddr_r() function with 8 arguments */
#undef HAVE_GETHOSTBYADDR_R_8
/* Define if you have the gethostbyname_r() function with 3 arguments */
#undef HAVE_GETHOSTBYNAME_R_3
/* Define if you have the gethostbyname_r() function with 5 arguments */
#undef HAVE_GETHOSTBYNAME_R_5
/* Define if you have the gethostbyname_r() function with 6 arguments */
#undef HAVE_GETHOSTBYNAME_R_6
/* Define if you have the inet_ntoa_r function declared. */
#undef HAVE_INET_NTOA_R_DECL
/* Define if you need the _REENTRANT define for some functions */
#undef NEED_REENTRANT
/* Define if you have the Kerberos4 libraries (including -ldes) */
#undef KRB4
/* The number of bytes in a long double. */
#undef SIZEOF_LONG_DOUBLE
/* The number of bytes in a long long. */
#undef SIZEOF_LONG_LONG
/* Define if you have the RAND_screen function. */
#undef HAVE_RAND_SCREEN
/* Define if you have the RAND_status function. */
#undef HAVE_RAND_STATUS
/* Define if you have the closesocket function. */
#undef HAVE_CLOSESOCKET
/* Define if you have the gethostbyaddr function. */
#undef HAVE_GETHOSTBYADDR
/* Define if you have the gethostbyaddr_r function. */
#undef HAVE_GETHOSTBYADDR_R
/* Define if you have the gethostbyname_r function. */
#undef HAVE_GETHOSTBYNAME_R
/* Define if you have the gethostname function. */
#undef HAVE_GETHOSTNAME
/* Define if you have the getpass_r function. */
#undef HAVE_GETPASS_R
/* Define if you have the getservbyname function. */
#undef HAVE_GETSERVBYNAME
/* Define if you have the gettimeofday function. */
#undef HAVE_GETTIMEOFDAY
/* Define if you have the inet_addr function. */
#undef HAVE_INET_ADDR
/* Define if you have the inet_ntoa function. */
#undef HAVE_INET_NTOA
/* Define if you have the inet_ntoa_r function. */
#undef HAVE_INET_NTOA_R
/* Define if you have the krb_get_our_ip_for_realm function. */
#undef HAVE_KRB_GET_OUR_IP_FOR_REALM
/* Define if you have the localtime_r function. */
#undef HAVE_LOCALTIME_R
/* Define if you have the perror function. */
#undef HAVE_PERROR
/* Define if you have the select function. */
#undef HAVE_SELECT
/* Define if you have the setvbuf function. */
#undef HAVE_SETVBUF
/* Define if you have the sigaction function. */
#undef HAVE_SIGACTION
/* Define if you have the signal function. */
#undef HAVE_SIGNAL
/* Define if you have the socket function. */
#undef HAVE_SOCKET
/* Define if you have the strcasecmp function. */
#undef HAVE_STRCASECMP
/* Define if you have the strcmpi function. */
#undef HAVE_STRCMPI
/* Define if you have the strdup function. */
#undef HAVE_STRDUP
/* Define if you have the strftime function. */
#undef HAVE_STRFTIME
/* Define if you have the stricmp function. */
#undef HAVE_STRICMP
/* Define if you have the strlcpy function. */
#undef HAVE_STRLCPY
/* Define if you have the strstr function. */
#undef HAVE_STRSTR
/* Define if you have the tcgetattr function. */
#undef HAVE_TCGETATTR
/* Define if you have the tcsetattr function. */
#undef HAVE_TCSETATTR
/* Define if you have the uname function. */
#undef HAVE_UNAME
/* Define if you have the <alloca.h> header file. */
#undef HAVE_ALLOCA_H
/* Define if you have the <arpa/inet.h> header file. */
#undef HAVE_ARPA_INET_H
/* Define if you have the <crypto.h> header file. */
#undef HAVE_CRYPTO_H
/* Define if you have the <des.h> header file. */
#undef HAVE_DES_H
/* Define if you have the <dlfcn.h> header file. */
#undef HAVE_DLFCN_H
/* Define if you have the <err.h> header file. */
#undef HAVE_ERR_H
/* Define if you have the <fcntl.h> header file. */
#undef HAVE_FCNTL_H
/* Define if you have the <getopt.h> header file. */
#undef HAVE_GETOPT_H
/* Define if you have the <io.h> header file. */
#undef HAVE_IO_H
/* Define if you have the <krb.h> header file. */
#undef HAVE_KRB_H
/* Define if you have the <malloc.h> header file. */
#undef HAVE_MALLOC_H
/* Define if you have the <net/if.h> header file. */
#undef HAVE_NET_IF_H
/* Define if you have the <netdb.h> header file. */
#undef HAVE_NETDB_H
/* Define if you have the <netinet/if_ether.h> header file. */
#undef HAVE_NETINET_IF_ETHER_H
/* Define if you have the <netinet/in.h> header file. */
#undef HAVE_NETINET_IN_H
/* Define if you have the <openssl/crypto.h> header file. */
#undef HAVE_OPENSSL_CRYPTO_H
/* Define if you have the <openssl/err.h> header file. */
#undef HAVE_OPENSSL_ERR_H
/* Define if you have the <openssl/pem.h> header file. */
#undef HAVE_OPENSSL_PEM_H
/* Define if you have the <openssl/rsa.h> header file. */
#undef HAVE_OPENSSL_RSA_H
/* Define if you have the <openssl/ssl.h> header file. */
#undef HAVE_OPENSSL_SSL_H
/* Define if you have the <openssl/x509.h> header file. */
#undef HAVE_OPENSSL_X509_H
/* Define if you have the <pem.h> header file. */
#undef HAVE_PEM_H
/* Define if you have the <rsa.h> header file. */
#undef HAVE_RSA_H
/* Define if you have the <sgtty.h> header file. */
#undef HAVE_SGTTY_H
/* Define if you have the <ssl.h> header file. */
#undef HAVE_SSL_H
/* Define if you have the <stdlib.h> header file. */
#undef HAVE_STDLIB_H
/* Define if you have the <sys/param.h> header file. */
#undef HAVE_SYS_PARAM_H
/* Define if you have the <sys/select.h> header file. */
#undef HAVE_SYS_SELECT_H
/* Define if you have the <sys/socket.h> header file. */
#undef HAVE_SYS_SOCKET_H
/* Define if you have the <sys/sockio.h> header file. */
#undef HAVE_SYS_SOCKIO_H
/* Define if you have the <sys/stat.h> header file. */
#undef HAVE_SYS_STAT_H
/* Define if you have the <sys/time.h> header file. */
#undef HAVE_SYS_TIME_H
/* Define if you have the <sys/types.h> header file. */
#undef HAVE_SYS_TYPES_H
/* Define if you have the <termio.h> header file. */
#undef HAVE_TERMIO_H
/* Define if you have the <termios.h> header file. */
#undef HAVE_TERMIOS_H
/* Define if you have the <time.h> header file. */
#undef HAVE_TIME_H
/* Define if you have the <unistd.h> header file. */
#undef HAVE_UNISTD_H
/* Define if you have the <winsock.h> header file. */
#undef HAVE_WINSOCK_H
/* Define if you have the <x509.h> header file. */
#undef HAVE_X509_H
/* Define if you have the crypto library (-lcrypto). */
#undef HAVE_LIBCRYPTO
/* Define if you have the dl library (-ldl). */
#undef HAVE_LIBDL
/* Define if you have the nsl library (-lnsl). */
#undef HAVE_LIBNSL
/* Define if you have the resolv library (-lresolv). */
#undef HAVE_LIBRESOLV
/* Define if you have the resolve library (-lresolve). */
#undef HAVE_LIBRESOLVE
/* Define if you have the socket library (-lsocket). */
#undef HAVE_LIBSOCKET
/* Define if you have the ssl library (-lssl). */
#undef HAVE_LIBSSL
/* Define if you have the ucb library (-lucb). */
#undef HAVE_LIBUCB
/* Name of package */
#undef PACKAGE
/* Version number of package */
#undef VERSION
/* Number of bits in a file offset, on hosts where this is settable. */
#undef _FILE_OFFSET_BITS
/* Define to make ftello visible on some hosts (e.g. HP-UX 10.20). */
#undef _LARGEFILE_SOURCE
/* Define for large files, on AIX-style hosts. */
#undef _LARGE_FILES
/* Set to explicitly specify we don't want to use thread-safe functions */
#undef DISABLED_THREADSAFE

View File

@@ -26,6 +26,66 @@ dnl The install stuff has already been taken care of by the automake stuff
dnl AC_PROG_INSTALL
AC_PROG_MAKE_SET
dnl ************************************************************
dnl lame option to switch on debug options
dnl
AC_MSG_CHECKING([whether to enable debug options])
AC_ARG_ENABLE(debug,
[ --enable-debug Enable pedantic debug options
--disable-debug Disable debug options],
[ case "$enableval" in
no)
AC_MSG_RESULT(no)
;;
*) AC_MSG_RESULT(yes)
CPPFLAGS="$CPPFLAGS -DMALLOCDEBUG"
CFLAGS="-Wall -pedantic -g"
;;
esac ],
AC_MSG_RESULT(no)
)
dnl
dnl check for working getaddrinfo()
dnl
AC_DEFUN(CURL_CHECK_WORKING_GETADDRINFO,[
AC_CACHE_CHECK(for working getaddrinfo, ac_cv_working_getaddrinfo,[
AC_TRY_RUN( [
#include <netdb.h>
#include <sys/types.h>
#include <sys/socket.h>
void main(void) {
struct addrinfo hints, *ai;
int error;
memset(&hints, 0, sizeof(hints));
hints.ai_family = AF_UNSPEC;
hints.ai_socktype = SOCK_STREAM;
error = getaddrinfo("127.0.0.1", "8080", &hints, &ai);
if (error) {
exit(1);
}
else {
exit(0);
}
}
],[
ac_cv_working_getaddrinfo="yes"
],[
ac_cv_working_getaddrinfo="no"
],[
ac_cv_working_getaddrinfo="yes"
])])
if test "$ac_cv_working_getaddrinfo" = "yes"; then
AC_DEFINE(HAVE_GETADDRINFO, 1, [Define if getaddrinfo exists and works])
AC_DEFINE(ENABLE_IPV6, 1, [Define if you want to enable IPv6 support])
fi
])
AC_DEFUN(CURL_CHECK_LOCALTIME_R,
[
dnl check for a few thread-safe functions
@@ -235,8 +295,57 @@ exit (rc != 0 ? 1 : 0); }],[
[ac_cv_gethostbyname_args=0])],
[ac_cv_gethostbyname_args=0])])
if test "$ac_cv_func_gethostbyname_r" = "yes"; then
if test "$ac_cv_gethostbyname_args" = "0"; then
dnl there's a gethostbyname_r() function, but we don't know how
dnl many arguments it wants!
AC_MSG_ERROR([couldn't figure out how to use gethostbyname_r()])
fi
fi
])
dnl **********************************************************************
dnl Checks for IPv6
dnl **********************************************************************
AC_MSG_CHECKING([whether to enable ipv6])
AC_ARG_ENABLE(ipv6,
[ --enable-ipv6 Enable ipv6 (with ipv4) support
--disable-ipv6 Disable ipv6 support],
[ case "$enableval" in
no)
AC_MSG_RESULT(no)
ipv6=no
;;
*) AC_MSG_RESULT(yes)
ipv6=yes
;;
esac ],
AC_TRY_RUN([ /* is AF_INET6 available? */
#include <sys/types.h>
#include <sys/socket.h>
main()
{
if (socket(AF_INET6, SOCK_STREAM, 0) < 0)
exit(1);
else
exit(0);
}
],
AC_MSG_RESULT(yes)
ipv6=yes,
AC_MSG_RESULT(no)
ipv6=no,
AC_MSG_RESULT(no)
ipv6=no
))
if test "$ipv6" = "yes"; then
CURL_CHECK_WORKING_GETADDRINFO
fi
dnl **********************************************************************
dnl Checks for libraries.
@@ -290,6 +399,36 @@ AC_CHECK_FUNC(gethostname, , AC_CHECK_LIB(ucb, gethostname))
dnl dl lib?
AC_CHECK_FUNC(dlopen, , AC_CHECK_LIB(dl, dlopen))
dnl **********************************************************************
dnl Check for the random seed preferences
dnl **********************************************************************
AC_ARG_WITH(egd-socket,
[ --with-egd-socket=FILE Entropy Gathering Daemon socket pathname],
[ EGD_SOCKET="$withval" ]
)
if test -n "$EGD_SOCKET" ; then
AC_DEFINE_UNQUOTED(EGD_SOCKET, "$EGD_SOCKET")
fi
dnl Check for user-specified random device
AC_ARG_WITH(random,
[ --with-random=FILE read randomness from FILE (default=/dev/urandom)],
[ RANDOM_FILE="$withval" ],
[
dnl Check for random device
AC_CHECK_FILE("/dev/urandom",
[
RANDOM_FILE="/dev/urandom";
]
)
]
)
if test -n "$RANDOM_FILE" ; then
AC_SUBST(RANDOM_FILE)
AC_DEFINE_UNQUOTED(RANDOM_FILE, "$RANDOM_FILE")
fi
dnl **********************************************************************
dnl Check for the presence of Kerberos4 libraries and headers
dnl **********************************************************************
@@ -327,6 +466,10 @@ AC_MSG_CHECKING([if Kerberos4 support is requested])
if test "$want_krb4" = yes
then
if test "$ipv6" = "yes"; then
echo krb4 is not compatible with IPv6
exit 1
fi
AC_MSG_RESULT(yes)
dnl Check for & handle argument to --with-krb4
@@ -440,7 +583,8 @@ else
dnl these can only exist if openssl exists
AC_CHECK_FUNCS( RAND_status \
RAND_screen )
RAND_screen \
RAND_egd )
fi
@@ -537,6 +681,7 @@ AC_CHECK_HEADERS( \
winsock.h \
time.h \
io.h \
pwd.h
)
dnl Check for libz header
@@ -554,6 +699,31 @@ AC_CHECK_SIZEOF(long double, 8)
# check for 'long long'
AC_CHECK_SIZEOF(long long, 4)
# check for ssize_t
AC_CHECK_TYPE(ssize_t, int)
dnl
dnl We can't just AC_CHECK_TYPE() for socklen_t since it doesn't appear
dnl in the standard headers. We egrep for it in the socket headers and
dnl if it is used there we assume we have the type defined, otherwise
dnl we search for it with AC_CHECK_TYPE() the "normal" way
dnl
if test "$ac_cv_header_sys_socket_h" = "yes"; then
AC_MSG_CHECKING(for socklen_t in sys/socket.h)
AC_EGREP_HEADER(socklen_t,
sys/socket.h,
socklen_t=yes
AC_MSG_RESULT(yes),
AC_MSG_RESULT(no))
fi
if test "$socklen_t" != "yes"; then
# check for socklen_t the standard way if it wasn't found before
AC_CHECK_TYPE(socklen_t, int)
fi
dnl Get system canonical name
AC_CANONICAL_HOST
AC_DEFINE_UNQUOTED(OS, "${host}")
@@ -584,12 +754,18 @@ AC_CHECK_FUNCS( socket \
setvbuf \
sigaction \
signal \
getpass_r
getpass_r \
strlcat \
getpwuid \
geteuid
)
dnl removed 'getpass' check on October 26, 2000
if test "$ac_cv_func_select" != "yes"; then
AC_MSG_ERROR(Can't work without an existing select() function)
fi
if test "$ac_cv_func_socket" != "yes"; then
AC_MSG_ERROR(Can't work without an existing socket() function)
fi
@@ -609,12 +785,22 @@ dnl AC_SUBST(RANLIB)
AC_OUTPUT( Makefile \
docs/Makefile \
docs/examples/Makefile \
include/Makefile \
include/curl/Makefile \
src/Makefile \
lib/Makefile \
tests/Makefile)
dnl perl/checklinks.pl \
dnl perl/getlinks.pl \
dnl perl/formfind.pl \
dnl perl/recursiveftpget.pl )
tests/Makefile \
tests/data/Makefile \
packages/Makefile \
packages/Win32/Makefile \
packages/Linux/Makefile \
packages/Linux/RPM/Makefile \
packages/Linux/RPM/curl.spec \
packages/Linux/RPM/curl-ssl.spec \
perl/Makefile \
perl/Curl_easy/Makefile \
php/Makefile \
php/examples/Makefile
)

View File

@@ -6,9 +6,9 @@
BUGS
Curl has grown substantially from that day, several years ago, when I
started fiddling with it. When I write this, there are 16500 lines of source
code, and by the time you read this it has probably grown even more.
Curl and libcurl have grown substantially since the beginning. At the time
of writing (mid March 2001), there are 23000 lines of source code, and by
the time you read this it has probably grown even more.
Of course there are lots of bugs left. And lots of misfeatures.
@@ -21,10 +21,11 @@ BUGS
http://sourceforge.net/bugs/?group_id=976
When reporting a bug, you should include information that will help us
understand what's wrong, what's expected and how to repeat it. You therefore
need to supply your operating system's name and version number (uname -a
under a unix is fine), what version of curl you're using (curl -v is fine),
what URL you were working with and anything else you think matters.
understand what's wrong, what you expected to happen and how to repeat the
bad behaviour. You therefore need to supply your operating system's name and
version number (uname -a under a unix is fine), what version of curl you're
using (curl -V is fine), what URL you were working with and anything else
you think matters.
If curl crashed, causing a core dump (in unix), there is hardly any use to
send that huge file to anyone of us. Unless we have an exact same system
@@ -32,7 +33,7 @@ BUGS
a stack trace and send that (much smaller) output to us instead!
The address and how to subscribe to the mailing list is detailed in the
README.curl file.
MANUAL file.
HOW TO GET A STACK TRACE with a common unix debugger
====================================================

View File

@@ -13,12 +13,12 @@ To Think About When Contributing Source Code
The License Issue
When contributing with code, you agree to put your changes and new code under
the same license curl and libcurl is already using.
the same license curl and libcurl is already using unless stated otherwise.
If you add a larger piece of code, you can opt to make that file or set of
files to use a different license as long as they don't enfore any changes to
the rest of the package and they make sense. Such "separate parts" can not be
GPL (as we don't want the FPL virus to attack users of libcurl) but they must
GPL (as we don't want the GPL virus to attack users of libcurl) but they must
use "GPL compatible" licenses.
Naming
@@ -26,19 +26,19 @@ Naming
Try using a non-confusing naming scheme for your new functions and variable
names. It doesn't necessarily have to mean that you should use the same as in
other places of the code, just that the names should be logical,
understandable and be named according to what they're used for.
understandable and be named according to what they're used for. File-local
functions should be made static.
Indenting
Please try using the same indenting levels and bracing method as all the
other code already does. It makes the source code a lot easier to follow if
all of it is written using the same style. I don't ask you to like it, I just
ask you to follow the tradition! ;-)
all of it is written using the same style. We don't ask you to like it, we
just ask you to follow the tradition! ;-)
Commenting
Comment your source code extensively. I don't see myself as a very good
source commenter, but I try to become one. Commented code is quality code and
Comment your source code extensively. Commented code is quality code and
enables future modifications much more. Uncommented code much more risk being
completely replaced when someone wants to extend things, since other persons'
source code can get quite hard to read.
@@ -71,9 +71,9 @@ Separate Patches Doing Different Things
Patch Against Recent Sources
Please try to get the latest available sources to make your patches
against. It makes my life so much easier. The very best is if you get the
most up-to-date sources from the CVS repository, but the latest release
archive is quite OK as well!
against. It makes the life of the developers so much easier. The very best is
if you get the most up-to-date sources from the CVS repository, but the
latest release archive is quite OK as well!
Document
@@ -91,9 +91,9 @@ Write Access to CVS Repository
Test Cases
Since the introduction of the test suite, we will get the possibility to
quickly verify that the main features are working as supposed to. To maintain
this situation and improve it, all new features and functions that are added
need tro be tested. Every feature that is added should get at least one valid
Since the introduction of the test suite, we can quickly verify that the main
features are working as they're supposed to. To maintain this situation and
improve it, all new features and functions that are added need to be tested
in the test suite. Every feature that is added should get at least one valid
test case that verifies that it works as documented. If every submitter also
post a few test cases, it won't end up as a heavy burden on a single person!

249
docs/FAQ
View File

@@ -1,4 +1,4 @@
Updated: January 4, 2001 (http://curl.haxx.se/docs/faq.shtml)
Updated: March 23, 2001 (http://curl.haxx.se/docs/faq.shtml)
_ _ ____ _
___| | | | _ \| |
/ __| | | | |_) | |
@@ -12,6 +12,8 @@ FAQ
1.2 What is libcurl?
1.3 What is cURL not?
1.4 When will you make curl do XXXX ?
1.5 Who makes cURL?
1.6 What do you get for making cURL?
2. Install Related Problems
2.1 configure doesn't find OpenSSL even when it is installed
@@ -30,10 +32,12 @@ FAQ
3.6 Does curl support javascript, ASP, XML, XHTML or HTML version Y?
3.7 Can I use curl to delete/rename a file through FTP?
3.8 How do I tell curl to follow HTTP redirects?
3.9 How do I use curl in PHP?
3.10 What about SOAP, WebDAV, XML-RPC or similar protocols over HTTP?
4. Running Problems
4.1 Problems connecting to SSL servers.
4.2 Why do I get problems when I use & in the URL?
4.2 Why do I get problems when I use & or % in the URL?
4.3 How can I use {, }, [ or ] to specify multiple URLs?
4.4 Why do I get downloaded data even though the web page doesn't exist?
4.5 Why do I get return code XXX from a HTTP server?
@@ -48,10 +52,12 @@ FAQ
4.9 Curl can't authenticate to the server that requires NTLM?
5. libcurl Issues
5.1 Is libcurl thread safe?
5.1 Is libcurl thread-safe?
5.2 How can I receive all data into a large memory chunk?
5.3 How do I fetch multiple files with libcurl?
5.4 Does libcurl do Winsock initing on win32 systems?
5.5 Does CURLOPT_FILE and CURLOPT_INFILE work on win32 ?
5.6 What about Keep-Alive or persistant connections?
6. License Issues
6.1 I have a GPL program, can I use the libcurl library?
@@ -73,19 +79,18 @@ FAQ
fact it can also be pronounced 'see URL' also helped.
Curl supports a range of common internet protocols, currently including
HTTP, HTTPS, FTP, GOPHER, LDAP, DICT and FILE.
HTTP, HTTPS, FTP, GOPHER, LDAP, DICT, TELNET and FILE.
Please spell it cURL or just curl.
We spell it cURL or just curl. We pronounce it with an initial k sound:
[kurl].
1.2 What is libcurl?
libcurl is the engine inside curl that does all the work. curl is more or
less the command line interface that converts the given options into libcurl
function invokes. libcurl is a reliable, higly portable multiprotocol file
transfer library.
libcurl is a reliable and portable library which provides you with an easy
interface to a range of common internet protocols.
Any application is free to use libcurl, even commercial or closed-source
ones. Just make sure changes to the lib itself are made public.
You can use libcurl for free in your application even if it is commercial
or closed-source.
1.3 What is cURL not?
@@ -106,38 +111,64 @@ FAQ
or with PHP.
Curl is not a single-OS program. Curl exists, compiles, builds and runs
under a wide range of operating systems, including all modern Unixes,
Windows, Amiga, BeOS, OS/2, OS X, QNX etc.
under a wide range of operating systems, including all modern Unixes (and a
bunch of older ones too), Windows, Amiga, BeOS, OS/2, OS X, QNX etc.
1.4 When will you make curl do XXXX ?
I love suggestions of what to change in order to make curl and libcurl
better. I do however believe in a few rules when it comes to the future of
We love suggestions of what to change in order to make curl and libcurl
better. We do however believe in a few rules when it comes to the future of
curl:
* It is to remain a command line tool. If you want GUIs or fancy scripting
* Curl is to remain a command line tool. If you want GUIs or fancy scripting
capabilities, you're free to write another tool that uses libcurl and that
offers this. There's no point in having one single tool that does every
offers this. There's no point in having a single tool that does every
imaginable thing. That's also one of the great advantages of having the
core of curl as a library: libcurl.
core of curl as a library.
* I do not add things to curl that other small and available tools already
* We do not add things to curl that other small and available tools already
do very fine at the side. Curl's output is fine to pipe into another
program or redirect to another file for the next program to interpret.
* I focus on protocol related issues and improvements. If you wanna do more
* We focus on protocol related issues and improvements. If you wanna do more
magic with the supported protocols than curl currently does, chances are
big I will agree. If you wanna add more protocols, I may very well
agree.
* If you want me to make all the work while you wait for me to implement it
for you, that is not a very friendly attitude. I spend a considerable time
already on maintaining and developing curl. In order to get more out of
me, I trust you will offer some of your time and efforts in return.
* If you want someone else to make all the work while you wait for us to
implement it for you, that is not a very friendly attitude. We spend a
considerable time already on maintaining and developing curl. In order to
get more out of us, you should consider trading in some of your time and
efforts in return.
* If you write the code, chances are bigger that it will get into curl
faster.
1.5 Who makes cURL?
cURL and libcurl are not made by any single individual. Sure, Daniel
Stenberg writes the major parts, but various people's submissions are
important and crucial. Anyone can post their changes and improvements and
have them inserted in the main sources (of course on the condition that
developers agree on that the fixes are good).
The list of contributors in the bottom of the man page is only a small part
of all the people that every day provide us with bug reports, suggestions,
ideas and source code.
curl is developed by a community, with Daniel at the wheel.
1.6 What do you get for making cURL?
Project cURL is entirely free and open, without any commercial interests or
money involved. No person gets paid in any way for developing curl. We all
do this volountarily on our spare time.
We get some help from companies. Contactor Data hosts the curl web site and
the main mailing list, Haxx owns the curl web site's domain and
sourceforge.net hosts several project tools we take advantage from like the
bug tracker, mailing lists and more.
2. Install Related Problems
2.1. configure doesn't find OpenSSL even when it is installed
@@ -181,26 +212,24 @@ FAQ
2.2. Does curl work/build with other SSL libraries?
Curl has been written to use OpenSSL, although I doubt there would be much
problems using a different library. I just don't know any other free one and
that has limited my possibilities to develop against anything else.
If anyone does "port" curl to use a commercial SSL library, I am of course
very interested in getting the patch!
Curl has been written to use OpenSSL, although there should not be much
problems using a different library. If anyone does "port" curl to use a
different SSL library, we are of course very interested in getting the
patch!
2.3. Where can I find a copy of LIBEAY32.DLL?
That is an OpenSSL binary built for Windows.
Curl uses OpenSSL to do the SSL stuff. The LIBEAY32.DLL is what curl needs
on a windows machine to do https://. Check out the curl web page to find
on a windows machine to do https://. Check out the curl web site to find
accurate and up-to-date pointers to recent OpenSSL DDLs and other binary
packages.
2.4. Does cURL support Socks (RFC 1928) ?
No. Nobody has wanted it that badly yet. I would appriciate patches that
brings this functionality.
No. Nobody has wanted it that badly yet. We appriciate patches that bring
this functionality.
3. Usage problems
@@ -222,7 +251,7 @@ FAQ
3.2. How do I tell curl to resume a transfer?
Curl supports resume both ways on FTP, download ways on HTTP.
Curl supports resumed transfers both ways on both FTP and HTTP.
Try the -C option.
@@ -230,14 +259,14 @@ FAQ
You can't simply use -F or -d at your choice. The web server that will
receive your post assumes one of the formats. If the form you're trying to
"fake" sets the type to 'multipart/form-data', than and only then you must
"fake" sets the type to 'multipart/form-data', then and only then you must
use the -F type. In all the most common cases, you should use -d which then
causes a posting with the type 'application/x-www-form-urlencoded'.
I have described this in some detail in the README.curl file, and if you
don't understand it the first time, read it again before you post questions
about this to the mailing list. I would also suggest that you read through
the mailing list archives for old postings and questions regarding this.
This is described in some detail in the README.curl file, and if you don't
understand it the first time, read it again before you post questions about
this to the mailing list. Also, try reading through the mailing list
archives for old postings and questions regarding this.
3.4. How do I tell curl to run custom FTP commands?
@@ -281,13 +310,36 @@ FAQ
curl -L http://redirector.com
3.9 How do I use curl in PHP?
PHP4 has the ability to use libcurl as an internal module if built with that
option enabled. You then get a set of extra functions that can be used
within your PHP programs. You find all details about those functions in the
curl section in the PHP manual, see the online version at:
http://www.php.net/manual/ref.curl.php
PHP also offers the option to run a command line, and then you can of course
invoke the curl tool using a command line. This is the way to use curl if
you're using PHP3 or PHP4 built without curl module support.
3.10 What about SOAP, WebDAV, XML-RPC or similar protocols over HTTP?
Curl adheres to the HTTP spec, which basically means you can play with *any*
protocol that is built ontop of HTTP. Protocols such as SOAP, WEBDAV and
XML-RPC are all such ones. You can use -X to set custom requests and -H to
set custom headers (or replace internally generated ones).
Using libcurl or PHP's curl modules is just as fine and you'd just use the
proper library options to do the same.
4. Running Problems
4.1. Problems connecting to SSL servers.
It took a very long time before I could sort out why curl had problems
to connect to certain SSL servers when using SSLeay or OpenSSL v0.9+.
The error sometimes showed up similar to:
It took a very long time before we could sort out why curl had problems to
connect to certain SSL servers when using SSLeay or OpenSSL v0.9+. The
error sometimes showed up similar to:
16570:error:1407D071:SSL routines:SSL2_READ:bad mac decode:s2_pkt.c:233:
@@ -295,12 +347,12 @@ FAQ
requests properly. To correct this problem, tell curl to select SSLv2 from
the command line (-2/--sslv2).
I have also seen examples where the remote server didn't like the SSLv2
There has also been examples where the remote server didn't like the SSLv2
request and instead you had to force curl to use SSLv3 with -3/--sslv3.
4.2. Why do I get problems when I use & in the URL?
4.2. Why do I get problems when I use & or % in the URL?
In general unix shells, the & letter is treated special and when used it
In general unix shells, the & letter is treated special and when used, it
runs the specified command in the background. To safely send the & as a part
of a URL, you should qoute the entire URL by using single (') or double (")
quotes around it.
@@ -309,6 +361,12 @@ FAQ
curl 'http://www.altavista.com/cgi-bin/query?text=yes&q=curl'
In win32, the standard DOS shell treats the %-letter specially and you may
need to quote the string properly when % is used in it.
Also note that if you want the literal %-letter to be part of the data you
pass in a POST using -d/--data you must encode it as '%25'.
4.3. How can I use {, }, [ or ] to specify multiple URLs?
Because those letters have a special meaning to the shell, and to be used in
@@ -318,6 +376,12 @@ FAQ
curl '{curl,www}.haxx.se'
To be able to use those letters as actual parts of the URL (without using
them for the curl URL "globbing" system), use the -g/--globoff option (curl
7.6 and later):
curl -g 'www.site.com/weirdname[].html'
4.4. Why do I get downloaded data even though the web page doesn't exist?
Curl asks remote servers for the page you specify. If the page doesn't exist
@@ -330,8 +394,8 @@ FAQ
4.5 Why do I get return code XXX from a HTTP server?
RFC2616 clearly explains the return codes. I'll make a short transcript
here. Go read the RFC for exact details:
RFC2616 clearly explains the return codes. This is a short transcript. Go
read the RFC for exact details:
4.5.1 "400 Bad Request"
@@ -367,7 +431,7 @@ FAQ
4.7. How do I keep usernames and passwords secret in Curl command lines?
I see this problem as two parts:
This problem has two sides:
The first part is to avoid having clear-text passwords in the command line
so that they don't appear in 'ps' outputs and similar. That is easily
@@ -392,8 +456,7 @@ FAQ
you have.
If there is a bug, post a bug report in the Curl Bug Track System over at
http://sourceforge.net/bugs/?group_id=976 or mail a detailed bug description
to curl-bug@haxx.se.
http://sourceforge.net/bugs/?group_id=976
Always include as many details you can think of, including curl version,
operating system name and version and complete instructions how to repeat
@@ -402,19 +465,21 @@ FAQ
4.9. Curl can't authenticate to the server that requires NTLM?
NTLM is a Microsoft proprietary protocol. Unfortunately, curl does not
currently support that.
currently support that. Proprietary formats are evil. You should not use
such ones.
5. libcurl Issues
5.1. Is libcurl thread safe?
5.1. Is libcurl thread-safe?
We have attempted to write the entire code adjusted for multi-threaded
programs. If your system has such, curl will attempt to use threadsafe
functions instead of non-safe ones.
Yes.
I am very interested in once and for all getting some kind of report or
README file from those who have used libcurl in a threaded environment,
since I haven't and I get this question more and more frequently!
We have written the libcurl code specificly adjusted for multi-threaded
programs. libcurl will use thread-safe functions instead of non-safe ones if
your system has such.
We would appriciate some kind of report or README file from those who have
used libcurl in a threaded environment.
5.2 How can I receive all data into a large memory chunk?
@@ -451,11 +516,18 @@ FAQ
5.3 How do I fetch multiple files with libcurl?
The easy interface of libcurl does not support multiple requests using the
same connection. The only available way to do multiple requests is to
init/perform/cleanup for each request.
Starting with version 7.7, curl and libcurl will have excellent support for
transferring multiple files. You should just repeatedly set new URLs with
curl_easy_setopt() and then transfer it with curl_easy_perform(). The handle
you get from curl_easy_init() is not only reusable starting with libcurl
7.7, but also you're encouraged to reuse it if you can, as that will enable
libcurl to use persistant connections.
5.4 Does libcurl do Winsock initing on win32 systems?
For libcurl prior to 7.7, there was no multiple file support. The only
available way to do multiple requests was to init/perform/cleanup for each
transfer.
5.4 Does libcurl do Winsock initialization on win32 systems?
No.
@@ -465,9 +537,27 @@ FAQ
use several different libraries and parts, and there's no reason for every
single library to do this.
6. License Issues
5.5 Does CURLOPT_FILE and CURLOPT_INFILE work on win32 ?
NOTE: This section is now updated to concern curl 7.5.2 or later!
Yes, but you cannot open a FILE * and pass the pointer to a DLL and have
that DLL use the FILE *. If you set CURLOPT_FILE you must also use
CURLOPT_WRITEFUNCTION as well to set a function that writes the file, even
if that simply writes the data to the specified FILE*. Similarly, if you use
CURLOPT_INFILE you must also specify CURLOPT_READFUNCTION.
(Provided by Joel DeYoung and Bob Schader)
5.6 What about Keep-Alive or persistant connections?
Starting with version 7.7, curl and libcurl will have excellent support for
persistant connections when transferring several files from the same server.
Curl will attempt to reuse connections for all URLs specified on the same
command line/config file, and libcurl will reuse connections for all
transfers that are made using the same libcurl handle.
Previous versions had no persistant connection support.
6. License Issues
Curl and libcurl are released under a MIT/X derivate license *or* the MPL,
the Mozilla Public License. To get a really good answer to your license
@@ -485,27 +575,25 @@ FAQ
6.2. I have a closed-source program, can I use the libcurl library?
Yes.
Yes!
libcurl does not put any restrictions on the program that uses the
library.
libcurl does not put any restrictions on the program that uses the library.
6.3. I have a BSD licensed program, can I use the libcurl library?
Yes.
Yes!
libcurl does not put any restrictions on the program that uses the
library.
libcurl does not put any restrictions on the program that uses the library.
6.4. I have a program that uses LGPL libraries, can I use libcurl?
Yes.
Yes!
The LGPL license don't clash with other licenses.
The LGPL license doesn't clash with other licenses.
6.5. Can I modify curl/libcurl for my program and keep the changes secret?
Yes.
Yes!
The MIT/X derivate license practically allows you to do almost anything with
the sources, on the condition that the copyright texts in the sources are
@@ -513,9 +601,12 @@ FAQ
6.6. Can you please change the curl/libcurl license to XXXX?
No. We carefully picked this license years ago and a large amount of people
have contributed with source code knowing that this is the license we
use. This license puts the restrictions we want on curl/libcurl and it does
not spread to other programs or libraries that use it. The recent dual
license modification should make it possible for everyone to use libcurl or
curl in their projects, no matter what license they already have in use.
No.
We have carefully picked this license after years of development and
discussions and a large amount of people have contributed with source code
knowing that this is the license we use. This license puts the restrictions
we want on curl/libcurl and it does not spread to other programs or
libraries that use it. The recent dual license modification should make it
possible for everyone to use libcurl or curl in their projects, no matter
what license they already have in use.

View File

@@ -17,18 +17,21 @@ Misc
- progress bar/time specs while downloading
- "standard" proxy environment variables support
- config file support
- compiles on win32
- compiles on win32 (reported built on 29 operating systems)
- redirectable stderr
- use selected network interface for outgoing traffic
- IPv6 support
- persistant connections
HTTP
- HTTP/1.1 compliant
- GET
- PUT
- HEAD
- POST
- multipart POST
- authentication
- resume
- resume (both GET and PUT)
- follow redirects
- maximum amount of redirects to follow
- custom HTTP request
@@ -71,6 +74,7 @@ FTP
TELNET
- connection negotiation
- custom telnet options
- stdin/stdout I/O
LDAP (*2)

View File

@@ -7,24 +7,35 @@
How To Compile
Curl has been compiled and built on numerous different operating systems. The
way to proceed is mainly divided in two different ways: the unix way or the
way to proceed is mainly divided in two different ways: the unix way or the
windows way.
If you're using Windows (95, 98, NT) or OS/2, you should continue reading from
the Win32 or OS/2 headers further down. All other systems should be capable of
being installed as described below.
If you're using Windows (95/98/NT/ME/2000 or whatever) or OS/2, you should
continue reading from the Win32 or OS/2 headers further down. All other
systems should be capable of being installed as described below.
UNIX
====
The configure script *always* tries to find a working SSL library unless
explicitly told not to. If you have OpenSSL installed in the default
search path for your compiler/linker, you don't need to do anything
special:
A normal unix installation is made in three or four steps (after you've
unpacked the source archive):
./configure
make
make test (optional)
make install
If you have OpenSSL installed in /usr/local/ssl, you can run configure
You probably need to be root when doing the last command.
If you want to install curl in a different file hierarchy than /usr/local,
you need to specify that already when running configure:
./configure --prefix=/path/to/curl/tree
The configure script always tries to find a working SSL library unless
explicitly told not to. If you have OpenSSL installed in the default search
path for your compiler/linker, you don't need to do anything special. If
you have OpenSSL installed in e.g /usr/local/ssl, you can run configure
like:
./configure --with-ssl
@@ -54,46 +65,40 @@ UNIX
env CPPFLAGS="-I/path/to/ssl/include" LDFLAGS="-L/path/to/ssl/lib" \
./configure
If your SSL library was compiled with rsaref (usually for use in
the United States), you may also need to set:
If your SSL library was compiled with rsaref (usually for use in the United
States), you may also need to set:
LIBS=-lRSAglue -lrsaref
(from Doug Kaufman <dkaufman@rahul.net>)
Without SSL support, just run:
./configure
Then run:
make
Use the executable `curl` in src/ directory.
To install curl on your system, run
make install
This will copy curl to /usr/local/bin/ (or $prefix/bin if you used the
--prefix option to configure) and it copies the man pages, the lib and the
include files to suitable places.
To make sure everything runs as supposed, run the test suite:
make test
(as suggested by Doug Kaufman)
KNOWN PROBLEMS
If you happen to have autoconf installed, but a version older than
2.12 you will get into trouble. Then you can still build curl by
issuing these commands: (from Ralph Beckmann <rabe@uni-paderborn.de>)
If you happen to have autoconf installed, but a version older than 2.12
you will get into trouble. Then you can still build curl by issuing these
commands (note that this requires curl to be built staticly): (from Ralph
Beckmann)
./configure [...]
cd lib; make; cd ..
cd src; make; cd ..
cp src/curl elsewhere/bin/
OPTIONS
As suggested by David West, you can make a faked version of autoconf and
autoheader:
----start of autoconf----
#!/bin/bash
#fake autoconf for building curl
if [ "$1" = "--version" ] then
echo "Autoconf version 2.13"
fi
----end of autoconf----
Then make autoheader a symbolic link to the same script and make sure
they're executable and set to appear in the path *BEFORE* the actual (but
obsolete) autoconf and autoheader scripts.
MORE OPTIONS
Remember, to force configure to use the standard cc compiler if both
cc and gcc are present, run configure like
@@ -124,6 +129,14 @@ UNIX
./configure --with-krb4=/usr/athena
If your system support shared libraries, but you want to built a static
version only, you can disable building the shared version by using:
./configure --disable-shared
If you're a curl developer and use gcc, you might want to enable more
debug options with the --enable-debug option.
Win32
=====
@@ -132,27 +145,27 @@ Win32
MingW32 (GCC-2.95) style
------------------------
Run the 'mingw32.bat' file to get the proper environment variables
set, then run 'make -f Makefile.m32' in the lib/ dir and then
'make -f Makefile.m32' in the src/ dir.
set, then run 'make mingw32' in the root dir.
If you have any problems linking libraries or finding header files,
be sure to look at the provided "Makefile.m32" files for the proper
If you have any problems linking libraries or finding header files, be
sure to verify that the provided "Makefile.m32" files use the proper
paths, and adjust as necessary.
Cygwin style
------------
Almost identical to the unix installation. Run the configure script
in the curl root with 'sh configure'. Make sure you have the sh
executable in /bin/ or you'll see the configure fail towards the
end.
Almost identical to the unix installation. Run the configure script in
the curl root with 'sh configure'. Make sure you have the sh
executable in /bin/ or you'll see the configure fail towards the end.
Run 'make'
Microsoft command line style
----------------------------
Run the 'vcvars32.bat' file to get the proper environment variables
set, then run 'nmake -f Makefile.vc6' in the lib/ dir and then
'nmake -f Makefile.vc6' in the src/ dir.
set, then run 'nmake vc' in the root dir.
The vcvars32.bat file is part of the Microsoft development
environment.
IDE-style
-------------------------
@@ -170,9 +183,9 @@ Win32
For VC++ 6, there's an included Makefile.vc6 that should be possible
to use out-of-the-box.
Microsoft note: add /Zm200 to the compiler options, as the hugehelp.c
won't compile otherwise due to "too long puts string" or something
like that!
Microsoft note: add /Zm200 to the compiler options to increase the
compiler's memory allocation limit, as the hugehelp.c won't compile
due to "too long puts string".
With SSL:
@@ -180,24 +193,24 @@ Win32
MingW32 (GCC-2.95) style
------------------------
Run the 'mingw32.bat' file to get the proper environment variables
set, then run 'make -f Makefile.m32 SSL=1' in the lib/ dir and then
'make -f Makefile.m32 SSL=1' in the src/ dir.
set, then run 'make mingw32-ssl' in the root dir.
If you have any problems linking libraries or finding header files,
be sure to look at the provided "Makefile.m32" files for the proper
If you have any problems linking libraries or finding header files, be
sure to look at the provided "Makefile.m32" files for the proper
paths, and adjust as necessary.
Cygwin style
------------
Haven't done, nor got any reports on how to do. It should although be
identical to the unix setup for the same purpose. See above.
Microsoft command line style
----------------------------
Run the 'vcvars32.bat' file to get the proper environment variables
set, then run 'nmake -f Makefile.vc6 release-ssl' in the lib/ dir and
then 'nmake -f Makefile.vc6' in the src/ dir.
set, then run 'nmake vc-ssl' in the root dir.
The vcvars32.bat file is part of the Microsoft development
environment.
Microsoft / Borland style
-------------------------
@@ -238,7 +251,7 @@ IBM OS/2
PORTS
=====
Just to show off, this is a probably incomplete list of known hardware and
This is a probably incomplete list of known hardware and
operating systems that curl has been compiled for:
- Ultrix
@@ -256,18 +269,20 @@ PORTS
- PowerPC Mac OS X
- Sparc Linux
- Sparc Solaris 2.4, 2.5, 2.5.1, 2.6, 7, 8
- Sparc SunOS 4.1.*
- Sparc SunOS 4.1.X
- i386 BeOS
- i386 FreeBSD
- i386 Linux 1.3, 2.0, 2.2, 2.3, 2.4
- i386 NetBSD
- i386 OS/2
- i386 OpenBSD
- i386 SCO unix
- i386 Solaris 2.7
- i386 Windows 95, 98, NT, 2000
- i386 Windows 95, 98, ME, NT, 2000
- ia64 Linux 2.3.99
- m68k AmigaOS 3
- m68k OpenBSD
- StrongARM NetBSD 1.4.1
OpenSSL
=======

View File

@@ -1,3 +1,4 @@
Updated for curl 7.7 on March 13, 2001
_ _ ____ _
___| | | | _ \| |
/ __| | | | |_) | |
@@ -6,15 +7,14 @@
INTERNALS
The project is kind of split in two. The library and the client. The client
part uses the library, but the library is meant to be designed to allow other
applications to use it.
The project is split in two. The library and the client. The client part uses
the library, but the library is designed to allow other applications to use
it.
Thus, the largest amount of code and complexity is in the library part.
The largest amount of code and complexity is in the library part.
CVS
===
All changes to the sources are committed to the CVS repository as soon as
they're somewhat verified to work. Changes shall be commited as independently
as possible so that individual changes can be easier spotted and tracked
@@ -27,47 +27,51 @@ Windows vs Unix
===============
There are a few differences in how to program curl the unix way compared to
the Windows way. The four most notable details are:
the Windows way. The four perhaps most notable details are:
1. Different function names for socket operations.
In curl, this is solved with defines and macros, so that the source looks
the same at all places except for the header file that defines them. The
macros in use are sclose(), sread() and swrite().
2. Windows requires a couple of init calls for the socket stuff.
Those must be made by the application that uses libcurl, in curl that means
src/main.c has some code #ifdef'ed to do just that.
1. Different function names for close(), read(), write()
2. Windows requires a couple of init calls for the socket stuff
3. The file descriptors for network communication and file operations are
not easily interchangable as in unix
not easily interchangable as in unix.
We avoid this by not trying any funny tricks on file descriptors.
4. When writing data to stdout, Windows makes end-of-lines the DOS way, thus
destroying binary data, although you do want that conversion if it is
text coming through... (sigh)
In curl, (1) is made with defines and macros, so that the source looks the
same at all places except for the header file that defines them.
We set stdout to binary under windows
(2) must be made by the application that uses libcurl, in curl that means
src/main.c has some code #ifdef'ed to do just that.
(3) is simply avoided by not trying any funny tricks on file descriptors.
(4) we set stdout to binary under windows
Inside the source code, I do make an effort to avoid '#ifdef WIN32'. All
Inside the source code, We make an effort to avoid '#ifdef [Your OS]'. All
conditionals that deal with features *should* instead be in the format
'#ifdef HAVE_THAT_WEIRD_FUNCTION'. Since Windows can't run configure scripts,
I maintain two config-win32.h files (one in / and one in src/) that are
we maintain two config-win32.h files (one in / and one in src/) that are
supposed to look exactly as a config.h file would have looked like on a
Windows machine!
Generally speaking: always remember that this will be compiled on dozens of
operating systems. Don't walk on the edge.
Library
=======
As described elsewhere, libcurl is meant to get two different "layers" of
interfaces. At the present point only the high-level, the "easy", interface
has been fully implemented and documented. We assume the easy-interface in
this description, the low-level interface will be documented when fully
implemented.
There are plenty of entry points to the library, namely each publicly defined
function that libcurl offers to applications. All of those functions are
rather small and easy-to-follow. All the ones prefixed with 'curl_easy' are
put in the lib/easy.c file.
All printf()-style functions use the supplied clones in lib/mprintf.c. This
makes sure we stay absolutely platform independent.
curl_easy_init() allocates an internal struct and makes some initializations.
The returned handle does not revail internals.
@@ -77,38 +81,58 @@ Library
curl_easy_perform() does a whole lot of things:
The function analyzes the URL, get the different components and connects to
the remote host. This may involve using a proxy and/or using SSL. The
GetHost() function in lib/hostip.c is used for looking up host names.
It starts off in the lib/easy.c file by calling curl_transfer(), but the main
work is lib/url.c. The function first analyzes the URL, it separates the
different components and connects to the remote host. This may involve using
a proxy and/or using SSL. The Curl_gethost() function in lib/hostip.c is used
for looking up host names.
When connected, the proper function is called. The functions are named after
the protocols they handle. ftp(), http(), dict(), etc. They all reside in
their respective files (ftp.c, http.c and dict.c).
When connected, the proper protocol-specific function is called. The
functions are named after the protocols they handle. Curl_ftp(), Curl_http(),
Curl_dict(), etc. They all reside in their respective files (ftp.c, http.c
and dict.c).
The protocol-specific functions deal with protocol-specific negotiations and
setup. They have access to the sendf() (from lib/sendf.c) function to send
printf-style formatted data to the remote host and when they're ready to make
the actual file transfer they call the Transfer() function (in
lib/download.c) to do the transfer. All printf()-style functions use the
supplied clones in lib/mprintf.c.
The protocol-specific functions of course deal with protocol-specific
negotiations and setup. They have access to the Curl_sendf() (from
lib/sendf.c) function to send printf-style formatted data to the remote host
and when they're ready to make the actual file transfer they call the
Curl_Transfer() function (in lib/transfer.c) to setup the transfer and
returns. Curl_perform() then calls Transfer() in lib/transfer.c that performs
the entire file transfer. Curl_perform() is what does the main "connect - do
- transfer - done" loop. It loops if there's a Location: to follow.
While transfering, the progress functions in lib/progress.c are called at a
During transfer, the progress functions in lib/progress.c are called at a
frequent interval (or at the user's choice, a specified callback might get
called). The speedcheck functions in lib/speedcheck.c are also used to verify
that the transfer is as fast as required.
When completed curl_easy_cleanup() should be called to free up used
When completed, the curl_easy_cleanup() should be called to free up used
resources.
A quick roundup on internal function sequences (many of these call
protocol-specific function-pointers):
curl_connect - connects to a remote site and does initial connect fluff
This also checks for an existing connection to the requested site and uses
that one if it is possible.
curl_do - starts a transfer
curl_transfer() - transfers data
curl_done - ends a transfer
curl_disconnect - disconnects from a remote site. This is called when the
disconnect is really requested, which doesn't necessarily have to be
exactly after curl_done in case we want to keep the connection open for
a while.
HTTP(S)
HTTP offers a lot and is the protocol in curl that uses the most lines of
code. There is a special file (lib/formdata.c) that offers all the multipart
post functions.
base64-functions for user+password stuff is in (lib/base64.c) and all
functions for parsing and sending cookies are found in
(lib/cookie.c).
base64-functions for user+password stuff (and more) is in (lib/base64.c) and
all functions for parsing and sending cookies are found in (lib/cookie.c).
HTTPS uses in almost every means the same procedure as HTTP, with only two
exceptions: the connect procedure is different and the function used to read
@@ -116,11 +140,27 @@ Library
the source by the use of curl_read() for reading and curl_write() for writing
data to the remote server.
http_chunks.c contains functions that understands HTTP 1.1 chunked transfer
encoding.
An interesting detail with the HTTP(S) request, is the add_buffer() series of
functions we use. They append data to one single buffer, and when the
building is done the entire request is sent off in one single write. This is
done this way to overcome problems with flawed firewalls and lame servers.
FTP
The if2ip() function can be used for getting the IP number of a specified
network interface, and it resides in lib/if2ip.c. It is only used for the FTP
PORT command.
The Curl_if2ip() function can be used for getting the IP number of a
specified network interface, and it resides in lib/if2ip.c.
Curl_ftpsendf() is used for sending FTP commands to the remote server. It was
made a separate function to prevent us programmers from forgetting that they
must be CRLF terminated. They must also be sent in one single write() to make
firewalls and similar happy.
Kerberos
The kerberos support is mainly in lib/krb4.c and lib/security.c.
TELNET
@@ -145,26 +185,85 @@ Library
lib/getenv.c offers curl_getenv() which is for reading environment variables
in a neat platform independent way. That's used in the client, but also in
lib/url.c when checking the proxy environment variables.
lib/url.c when checking the proxy environment variables. Note that contrary
to the normal unix getenv(), this returns an allocated buffer that must be
free()ed after use.
lib/netrc.c holds the .netrc parser
lib/timeval.c features replacement functions for systems that don't have
gettimeofday().
gettimeofday() and a few support functions for timeval convertions.
A function named curl_version() that returns the full curl version string is
found in lib/version.c.
If authentication is requested but no password is given, a getpass_r() clone
exists in lib/getpass.c. libcurl offers a custom callback that can be used
instead of this, but it doesn't change much to us.
Persistant Connections
======================
With curl 7.7, we added persistant connection support to libcurl which has
introduced a somewhat different treatmeant of things inside of libcurl.
o The 'UrlData' struct returned in the curl_easy_init() call must never
hold connection-oriented data. It is meant to hold the root data as well
as all the options etc that the library-user may choose.
o The 'UrlData' struct holds the cache array of pointers to 'connectdata'
structs. There's one connectdata struct for each connection that libcurl
knows about.
o This also enables the 'curl handle' to be reused on subsequent transfers,
something that was illegal in pre-7.7 versions.
o When we are about to perform a transfer with curl_easy_perform(), we first
check for an already existing connection in the cache that we can use,
otherwise we create a new one and add to the cache. If the cache is full
already when we add a new connection, we close one of the present ones. We
select which one to close dependent on the close policy that may have been
previously set.
o When the tranfer operation is complete, we try to leave the connection open.
Particular options may tell us not to, and protocols may signal closure on
connections and then we don't keep it open of course.
o When curl_easy_cleanup() is called, we close all still opened connections.
You do realize that the curl handle must be re-used in order for the
persistant connections to work.
Library Symbols
===============
All symbols used internally in libcurl must use a 'Curl_' prefix if they're
used in more than a single file. Single-file symbols must be made
static. Public (exported) symbols must use a 'curl_' prefix. (There are
exceptions, but they are destined to be changed to follow this pattern in the
future.)
Return Codes and Informationals
===============================
I've made things simple. Almost every function in libcurl returns a CURLcode,
that must be CURLE_OK if everything is OK or otherwise a suitable error code
as the curl/curl.h include file defines. The very spot that detects an error
must use the Curl_failf() function to set the human-readable error
description.
In aiding the user to understand what's happening and to debug curl usage, we
must supply a fair amount of informational messages by using the Curl_infof()
function. Those messages are only displayed when the user explicitly asks for
them. They are best used when revealing information that isn't otherwise
obvious.
Client
======
main() resides in src/main.c together with most of the client code.
src/hugehelp.c is automatically generated by the mkhelp.pl perl script to
display the complete "manual" and the src/urlglob.c file holds the functions
used for the multiple-URL support.
used for the URL-"globbing" support. Globbing in the sense that the {} and []
expansion stuff is there.
The client mostly mess around to setup its config struct properly, then it
calls the curl_easy_*() functions of the library and when it gets back
The client mostly messes around to setup its 'config' struct properly, then
it calls the curl_easy_*() functions of the library and when it gets back
control after the curl_easy_perform() it cleans up the library, checks status
and exits.
@@ -173,10 +272,30 @@ Client
curl_easy_getinfo() function to extract useful information from the curl
session.
Recent versions may loop and do all that several times if many URLs were
specified on the command line or config file.
Memory Debugging
================
The file named lib/memdebug.c contains debug-versions of a few
functions. Functions such as malloc, free, fopen, fclose, etc that somehow
deal with resources that might give us problems if we "leak" them. The
functions in the memdebug system do nothing fancy, they do their normal
function and then log information about what they just did. The logged data
can then be analyzed after a complete session,
memanalyze.pl is a perl script present only present in CVS (not part of the
release archives) that analyzes a log file generated by the memdebug
system. It detects if resources are allocated but never freed and other kinds
of errors related to resource management.
Use -DMALLOCDEBUG when compiling to enable memory debugging.
Test Suite
==========
During November 2000, a test suite has evolved. It is placed in its own
Since November 2000, a test suite has evolved. It is placed in its own
subdirectory directly off the root in the curl archive tree, and it contains
a bunch of scripts and a lot of test case data.
@@ -184,5 +303,20 @@ Test Suite
httpserver.pl and ftpserver.pl before all the test cases are performed. The
test suite currently only runs on unix-like platforms.
You'll find a complete description of the test case data files in the README
file in the test directory.
You'll find a complete description of the test case data files in the
tests/README file.
The test suite automatically detects if curl was built with the memory
debugging enabled, and if it was it will detect memory leaks too.
Building Releases
=================
There's no magic to this. When you consider everything stable enough to be
released, run the 'maketgz' script (using 'make distcheck' will give you a
pretty good view on the status of the current sources). maketgz prompts for
version number of the client and the library before it creates a release
archive. maketgz uses 'make dist' for the actual archive building, why you
need to fill in the Makefile.am files properly for which files that should
be included in the release archives.

View File

@@ -4,58 +4,91 @@
| | | |_) | (__| |_| | | | |
|_|_|_.__/ \___|\__,_|_| |_|
How To Use Libcurl In Your C/C++ Program
How To Use Libcurl In Your Program
[ libcurl can be used directly from within your PHP or Perl programs as well,
look elsewhere for documentation on this ]
Interfaces
libcurl currently offers two different interfaces to the URL transfer
engine. They can be seen as one low-level and one high-level, in the sense
that the low-level one will allow you to deal with a lot more details but on
the other hand not offer as many fancy features (such as Location:
following). The high-level interface is supposed to be a built-in
implementation of the low-level interface. You will not be able to mix
function calls from the different layers.
As we currently ONLY support the high-level interface, the so called easy
interface, I will not attempt to describe any low-level functions at this
point.
Function descriptions
The interface is meant to be very simple for very simple
implementations. Thus, we have minimized the number of entries.
The interface is meant to be very simple for applictions/programmers, hence
the name "easy". We have therefore minimized the number of entries.
The Easy Interface
When using the easy interface, you init your easy-session and get a handle,
which you use as input to the following interface functions you use.
When using the easy interface, you init your session and get a handle, which
you use as input to the following interface functions you use. Use
curl_easy_init() to get the handle.
You continue by setting all the options you want in the upcoming transfer,
most important among them is the URL itself. You might want to set some
callbacks as well that will be called from the library when data is available
etc.
most important among them is the URL itself (you can't transfer anything
without a specified URL as you may have figured out yourself). You might want
to set some callbacks as well that will be called from the library when data
is available etc. curl_easy_setopt() is there for this.
When all is setup, you tell libcurl to perform the transfer. It will then do
the entire operation and won't return until it is done or failed.
When all is setup, you tell libcurl to perform the transfer using
curl_easy_perform(). It will then do the entire operation and won't return
until it is done or failed.
After the transfer has been made, you cleanup the easy-session's handle and
libcurl is entirely off the hook!
After the transfer has been made, you cleanup the session with
curl_easy_cleanup() and libcurl is entirely off the hook! If you want
persistant connections, you don't cleanup immediately, but instead run ahead
and perform other transfers. See the chapter below for Persistant
Connections.
curl_easy_init()
curl_easy_setopt()
curl_easy_perform()
curl_easy_cleanup()
While the above mentioned four functions are the main functions to use in the
easy interface, there is a series of other helpful functions to use. They
are:
While the above four functions are the main functions to use in the easy
interface, there is a series of helpful functions to use. They are:
curl_version() - displays the libcurl version
curl_getdate() - converts a date string to time_t
curl_getenv() - portable environment variable reader
curl_easy_getinfo() - get information about a performed transfer
curl_formparse() - helps building a HTTP form POST
curl_formfree() - free a list built with curl_formparse()
curl_slist_append() - builds a linked list
curl_slist_free_all() - frees a whole curl_slist
curl_version() - displays the libcurl version
curl_getdate() - converts a date string to time_t
curl_getenv() - portable environment variable reader
curl_formparse() - helps building a HTTP form POST
curl_slist_append() - builds a linked list
curl_slist_free_all() - frees a whole curl_slist
For details on these, read the separate man pages.
Read the separate man pages for these functions for details!
Portability
libcurl works *exactly* the same, on any of the platforms it compiles and
builds on.
There's only one caution, and that is the win32 platform that may(*) require
you to init the winsock stuff before you use the libcurl functions. Details
on this are noted on the curl_easy_init() man page.
(*) = it appears as if users of the cygwin environment get this done
automatically.
Threads
Never *ever* call curl-functions simultaneously using the same handle from
several threads. libcurl is thread-safe and can be used in any number of
threads, but you must use separate curl handles if you want to use libcurl in
more than one thread simultaneously.
Persistant Connections
With libcurl 7.7, persistant connections were added. Persistant connections
means that libcurl can re-use the same connection for several transfers, if
the conditions are right.
libcurl will *always* attempt to use persistant connections. Whenever you use
curl_easy_perform(), libcurl will attempt to use an existing connection to do
the transfer, and if none exists it'll open a new one that will be subject
for re-use on a possible following call to curl_easy_perform().
To allow libcurl to take full advantage of persistant connections, you should
do as many of your file transfers as possible using the same curl
handle. When you call curl_easy_cleanup(), all the possibly open connections
held by libcurl will be closed and forgotten.
Note that the options set with curl_easy_setopt() will be used in on every
repeat curl_easy_perform() call
Compatibility with older libcurls
Repeated curl_easy_perform() calls on the same handle were not supported in
pre-7.7 versions, and caused confusion and defined behaviour.

View File

@@ -15,22 +15,26 @@ SIMPLE USAGE
curl ftp://ftp.funet.fi/README
Get a gopher document from funet's gopher server:
curl gopher://gopher.funet.fi
Get a web page from a server using port 8000:
curl http://www.weirdserver.com:8000/
Get a list of the root directory of an FTP site:
curl ftp://ftp.fts.frontec.se/
curl ftp://cool.haxx.se/
Get a gopher document from funet's gopher server:
curl gopher://gopher.funet.fi
Get the definition of curl from a dictionary:
curl dict://dict.org/m:curl
Fetch two documents at once:
curl ftp://cool.haxx.se/ http://www.weirdserver.com:8000/
DOWNLOAD TO A FILE
Get a web page and store in a local file:
@@ -43,6 +47,10 @@ DOWNLOAD TO A FILE
curl -O http://www.netscape.com/index.html
Fetch two files and store them with their remote names:
curl -O www.haxx.se/index.html -O curl.haxx.se/download.html
USING PASSWORDS
FTP
@@ -178,7 +186,7 @@ DETAILED INFORMATION
-D/--dump-header option when getting files from both FTP and HTTP, and it
will then store the headers in the specified file.
Store the HTTP headers in a separate file:
Store the HTTP headers in a separate file (headers.txt in the example):
curl --dump-header headers.txt curl.haxx.se
@@ -237,32 +245,32 @@ POST (HTTP)
-F accepts parameters like -F "name=contents". If you want the contents to
be read from a file, use <@filename> as contents. When specifying a file,
you can also specify which content type the file is, by appending
';type=<mime type>' to the file name. You can also post contents of several
files in one field. So that the field name 'coolfiles' can be sent three
files with different content types in a manner similar to:
you can also specify the file content type by appending ';type=<mime type>'
to the file name. You can also post the contents of several files in one field.
For example, the field name 'coolfiles' is used to send three files, with
different content types using the following syntax:
curl -F "coolfiles=@fil1.gif;type=image/gif,fil2.txt,fil3.html" \
http://www.post.com/postit.cgi
If content-type is not specified, curl will try to guess from the extension
(it only knows a few), or use the previously specified type (from an earlier
file if several files are specified in a list) or finally using the default
type 'text/plain'.
If the content-type is not specified, curl will try to guess from the file
extension (it only knows a few), or use the previously specified type
(from an earlier file if several files are specified in a list) or else it
will using the default type 'text/plain'.
Emulate a fill-in form with -F. Let's say you fill in three fields in a
form. One field is a file name which to post, one field is your name and one
field is a file description. We want to post the file we have written named
"cooltext.txt". To let curl do the posting of this data instead of your
favourite browser, you have to check out the HTML of the form page to get to
know the names of the input fields. In our example, the input field names are
favourite browser, you have to read the HTML source of the form page and find
the names of the input fields. In our example, the input field names are
'file', 'yourname' and 'filedescription'.
curl -F "file=@cooltext.txt" -F "yourname=Daniel" \
-F "filedescription=Cool text file with cool text inside" \
http://www.post.com/postit.cgi
So, to send two files in one post you can do it in two ways:
To send two files in one post you can do it in two ways:
1. Send multiple files in a single "field" with a single field name:
@@ -272,11 +280,11 @@ POST (HTTP)
curl -F "docpicture=@dog.gif" -F "catpicture=@cat.gif"
REFERER
REFERRER
A HTTP request has the option to include information about which address
that referred to actual page, and curl allows the user to specify that
referrer to get specified on the command line. It is especially useful to
that referred to actual page. Curl allows you to specify the
referrer to be used on the command line. It is especially useful to
fool or trick stupid servers or CGI scripts that rely on that information
being available or contain certain data.
@@ -345,13 +353,17 @@ COOKIES
Note that by specifying -b you enable the "cookie awareness" and with -L
you can make curl follow a location: (which often is used in combination
with cookies). So that if a site sends cookies and a location, you can
use a non-existing file to trig the cookie awareness like:
use a non-existing file to trigger the cookie awareness like:
curl -L -b empty-file www.example.com
curl -L -b empty.txt www.example.com
The file to read cookies from must be formatted using plain HTTP headers OR
as netscape's cookie file. Curl will determine what kind it is based on the
file contents.
file contents. In the above command, curl will parse the header and store
the cookies received from www.example.com. curl will send to the server the
stored cookies which match the request as it follows the location. The
file "empty.txt" may be a non-existant file.
PROGRESS METER
@@ -384,12 +396,12 @@ PROGRESS METER
SPEED LIMIT
Curl offers the user to set conditions regarding transfer speed that must
be met to let the transfer keep going. By using the switch -y and -Y you
can make curl abort transfers if the transfer speed doesn't exceed your
given lowest limit for a specified time.
Curl allows the user to set the transfer speed conditions that must be met
to let the transfer keep going. By using the switch -y and -Y you
can make curl abort transfers if the transfer speed is below the specified
lowest limit for a specified time.
To let curl abandon downloading this page if its slower than 3000 bytes per
To have curl abort the download if the speed is slower than 3000 bytes per
second for 1 minute, run:
curl -y 3000 -Y 60 www.far-away-site.com
@@ -455,9 +467,13 @@ EXTRA HEADERS
curl -H "X-you-and-me: yes" www.love.com
This can also be useful in case you want curl to send a different text in
a header than it normally does. The -H header you specify then replaces the
header curl would normally send.
This can also be useful in case you want curl to send a different text in a
header than it normally does. The -H header you specify then replaces the
header curl would normally send. If you replace an internal header with an
empty one, you prevent that header from being sent. To prevent the Host:
header from being used:
curl -H "Host:" www.server.com
FTP and PATH NAMES
@@ -598,7 +614,7 @@ RESUMING FILE TRANSFERS
(*1) = This requires that the ftp server supports the non-standard command
SIZE. If it doesn't, curl will say so.
(*2) = This requires that the wb server supports at least HTTP/1.1. If it
(*2) = This requires that the web server supports at least HTTP/1.1. If it
doesn't, curl will say so.
TIME CONDITIONS
@@ -726,16 +742,60 @@ KERBEROS4 FTP TRANSFER
There's no use for a password on the -u switch, but a blank one will make
curl ask for one and you already entered the real password to kauth.
MAILING LIST
TELNET
We have an open mailing list to discuss curl, its development and things
relevant to this.
The curl telnet support is basic and very easy to use. Curl passes all data
passed to it on stdin to the remote server. Connect to a remote telnet
server using a command line similar to:
To subscribe, mail curl-request@contactor.se with "subscribe <fill in your
email address>" in the body.
curl telnet://remote.server.com
To post to the list, mail curl@contactor.se.
And enter the data to pass to the server on stdin. The result will be sent
to stdout or to the file you specify with -o.
To unsubcribe, mail curl-request@contactor.se with "unsubscribe <your
subscribed email address>" in the body.
You might want the -N/--no-buffer option to switch off the buffered output
for slow connections or similar.
NOTE: the telnet protocol does not specify any way to login with a specified
user and password so curl can't do that automatically. To do that, you need
to track when the login prompt is received and send the username and
password accordingly.
PERSISTANT CONNECTIONS
Specifying multiple files on a single command line will make curl transfer
all of them, one after the other in the specified order.
libcurl will attempt to use persistant connections for the transfers so that
the second transfer to the same host can use the same connection that was
already initiated and was left open in the previous transfer. This greatly
decreases connection time for all but the first transfer and it makes a far
better use of the network.
Note that curl cannot use persistant connections for transfers that are used
in subsequence curl invokes. Try to stuff as many URLs as possible on the
same command line if they are using the same host, as that'll make the
transfers faster. If you use a http proxy for file transfers, practicly
all transfers will be persistant.
Persistant connections were introduced in curl 7.7.
MAILING LISTS
For your convenience, we have several open mailing lists to discuss curl,
its development and things relevant to this.
To subscribe to the main curl list, mail curl-request@contactor.se with
"subscribe <fill in your email address>" in the body.
To subscribe to the curl-library users/deverlopers list, follow the
instructions at http://curl.haxx.se/mail/
To subscribe to the curl-announce list, to only get information about new
releases, follow the instructions at http://curl.haxx.se/mail/
To subscribe to the curl-and-PHP list in which curl using with PHP is
discussed, follow the instructions at http://curl.haxx.se/mail/
Please direct curl questions, feature requests and trouble reports to one of
these mailing lists instead of mailing any individual.

View File

@@ -12,11 +12,17 @@ man_MANS = \
curl_easy_perform.3 \
curl_easy_setopt.3 \
curl_formparse.3 \
curl_formfree.3 \
curl_getdate.3 \
curl_getenv.3 \
curl_slist_append.3 \
curl_slist_free_all.3 \
curl_version.3
curl_version.3 \
curl_escape.3 \
curl_unescape.3
EXTRA_DIST = $(man_MANS)
EXTRA_DIST = $(man_MANS) \
MANUAL BUGS CONTRIBUTE FAQ FEATURES INTERNALS \
LIBCURL README.win32 RESOURCES TODO TheArtOfHttpScripting
SUBDIRS = examples

View File

@@ -102,6 +102,12 @@ Similar Tools
Kermit - http://www.columbia.edu/kermit/ftpclient
Pavuk - http://www.idata.sk/~ondrej/pavuk/
httpr - http://zwolak.dhs.org/httpr/
puf - http://www.inf.tu-dresden.de/~ob6/sw/puf.html
Related Software
----------------
ftpparse - http://cr.yp.to/ftpparse.html parses FTP LIST responses

View File

@@ -6,51 +6,49 @@
TODO
For the future
Things to do in project cURL. Please tell me what you think, contribute and
send me patches that improve things!
Ok, this is what I wanna do with Curl. Please tell me what you think, and
please don't hesitate to contribute and send me patches that improve this
product! (Yes, you may add things not mentioned here, these are just a
few teasers...)
To do for the next release:
* Make SSL session ids get used if multiple HTTPS documents from the same
host is requested.
* Improve the command line option parser to accept '-m300' as well as the '-m
300' convention. It should be able to work if '-m300' is considered to be
space separated to the next option.
* Document the undocumented libcurl functions: the printf clones (like
curl_msprintf, curl_mfprintf, curl_msnprintf, curl_maprintf and
curl_mvfprintf) and the string compare functions (curl_strequal
and curl_strnequal).
* Make the curl tool support URLs that start with @ that would then mean that
the following is a plain list with URLs to download. Thus @filename.txt
reads a list of URLs from a local file. A fancy option would then be to
support @http://whatever.com that would first load a list and then get the
URLs mentioned in the list. I figure -O or something would have to be
implied by such an action.
To do in a future release (random order):
* Make curl with multiple URLs, even outside of {}-letters. I could also
imagine an optional fork()ed system that downloads each URL in its own
thread. It should of course have a maximum amount of simultaneous fork()s.
* Rewrite parts of the test suite. Make a (XML?) format to store all
test-data in a single for a single test case. The current system makes far
too many separate files. We also need to have the test suite support
different behaviors, like when libcurl is compiled for IPv6 support and
thus performs a different set of FTP commands.
* Improve the regular progress meter with --continue is used. It should be
noticable when there's a resume going on.
* Add configure options that disables certain protocols in libcurl to
decrease footprint. '--disable-[protocol]' where protocol is http, ftp,
telnet, ldap, dict or file.
* Extend the test suite to include telnet and https. The telnet could just do
ftp or http operations (for which we have test servers) and the https would
probably work against/with some of the openssl tools.
* Add a command line option that allows the output file to get the same time
stamp as the remote file. This requires some fiddling on FTP but comes
almost free for HTTP.
stamp as the remote file. libcurl already is capable of fetching the remote
file's date.
* Make the SSL layer option capable of using the Mozilla Security Services as
an alternative to OpenSSL:
http://www.mozilla.org/projects/security/pki/nss/
* Make sure the low-level interface works. highlevel.c should basically be
possible to write using that interface. Document the low-level interface
* Make the easy-interface support multiple file transfers. If they're done
to the same host, they should use persistant connections or similar.
* Make curl's SSL layer option capable of using other free SSL libraries.
Such as the Mozilla Security Services
(http://www.mozilla.org/projects/security/pki/nss/) and GNUTLS
(http://gnutls.hellug.gr/)
* Add asynchronous name resolving, as this enables full timeout support for
fork() systems.
* Non-blocking connect(), also to make timeouts work on windows.
* Move non-URL related functions that are used by both the lib and the curl
application to a separate "portability lib".
@@ -58,50 +56,40 @@ For the future
something being worked on in this area) and perl (we have seen the first
versions of this!) comes to mind. Python anyone?
* "Content-Encoding: compress/gzip/zlib"
* "Content-Encoding: compress/gzip/zlib" HTTP 1.1 clearly defines how to get
and decode compressed documents. There is the zlib that is pretty good at
decompressing stuff. This work was started in October 1999 but halted again
since it proved more work than we thought. It is still a good idea to
implement though.
HTTP 1.1 clearly defines how to get and decode compressed documents. There
is the zlib that is pretty good at decompressing stuff. This work was
started in October 1999 but halted again since it proved more work than we
thought. It is still a good idea to implement though.
* Authentication: NTLM. It would be cool to support that MS crap called NTLM
* Authentication: NTLM. Support for that MS crap called NTLM
authentication. MS proxies and servers sometime require that. Since that
protocol is a proprietary one, it involves reverse engineering and network
sniffing. This should however be a library-based functionality. There are a
few different efforts "out there" to make open source HTTP clients support
this and it should be possible to take advantage of other people's hard
work. http://modntlm.sourceforge.net/ is one.
work. http://modntlm.sourceforge.net/ is one. There's a web page at
http://www.innovation.ch/java/ntlm.html that contains detailed reverse-
engineered info.
* RFC2617 compliance, "Digest Access Authentication"
A valid test page seem to exist at:
http://hopf.math.nwu.edu/testpage/digest/
http://hopf.math.nwu.edu/testpage/digest/
And some friendly person's server source code is available at
http://hopf.math.nwu.edu/digestauth/index.html
http://hopf.math.nwu.edu/digestauth/index.html
Then there's the Apache mod_digest source code too of course. It seems as
if Netscape doesn't support this, and not many servers do. Although this is
a lot better authentication method than the more common "Basic". Basic
sends the password in cleartext over the network, this "Digest" method uses
a challange-response protocol which increases security quite a lot.
* Multiple Proxies?
Is there anyone that actually uses serial-proxies? I mean, send CONNECT to
the first proxy to connect to the second proxy to which you send CONNECT to
connect to the remote host (or even more iterations). Is there anyone
wanting curl to support it? (Not that it would be hard, just confusing...)
* Other proxies
Ftp-kind proxy, Socks5, whatever kind of proxies are there?
* IPv6 Awareness and support
Where ever it would fit. configure search for v6-versions of a few
functions and then use them instead is of course the first thing to do...
RFC 2428 "FTP Extensions for IPv6 and NATs" will be interesting. PORT
should be replaced with EPRT for IPv6, and EPSV instead of PASV.
* IPv6 Awareness and support. (This is partly done.) RFC 2428 "FTP
Extensions for IPv6 and NATs" is interesting. PORT should be replaced with
EPRT for IPv6 (done), and EPSV instead of PASV. HTTP proxies are left to
add support for.
* SSL for more protocols, like SSL-FTP...
(http://search.ietf.org/internet-drafts/draft-murray-auth-ftp-ssl-05.txt)
* HTTP POST resume using Range:

View File

@@ -2,18 +2,19 @@
.\" nroff -man curl.1
.\" Written by Daniel Stenberg
.\"
.TH curl 1 "4 January 2001" "Curl 7.5.2" "Curl Manual"
.TH curl 1 "24 March 2001" "Curl 7.7" "Curl Manual"
.SH NAME
curl \- get a URL with FTP, TELNET, LDAP, GOPHER, DICT, FILE, HTTP or
HTTPS syntax.
.SH SYNOPSIS
.B curl [options]
.I url
.I [URL...]
.SH DESCRIPTION
.B curl
is a client to get documents/files from servers, using any of the
supported protocols. The command is designed to work without user
interaction or any kind of interactivity.
is a client to get documents/files from or send documents to a server, using
any of the supported protocols (HTTP, HTTPS, FTP, GOPHER, DICT, TELNET, LDAP
or FILE). The command is designed to work without user interaction or any kind
of interactivity.
curl offers a busload of useful tricks like proxy support, user
authentication, ftp upload, HTTP post, SSL (https:) connections, cookies, file
@@ -37,6 +38,15 @@ It is possible to specify up to 9 sets or series for a URL, but no nesting is
supported at the moment:
http://www.any.org/archive[1996-1999]/volume[1-4]part{a,b,c,index}.html
Starting with curl 7.6, you can specify any amount of URLs on the command
line. They will be fetched in a sequential manner in the specified order.
Starting with curl 7.7, curl will attempt to re-use connections for multiple
file transfers, so that getting many files from the same server will not do
multiple connects/handshakes. This improves speed. Of course this is only done
on files specified on a single command line and cannot be used between
separate curl invokes.
.SH OPTIONS
.IP "-a/--append"
(FTP)
@@ -81,6 +91,14 @@ also be enforced by using an URL that ends with ";type=A". This option causes
data sent to stdout to be in text mode for win32 systems.
If this option is used twice, the second one will disable ASCII usage.
.IP "--connect-timeout <seconds>"
Maximum time in seconds that you allow the connection to the server to take.
This only limits the connection phase, once curl has connected this option is
of no more use. This option doesn't work in win32 systems. See also the
.I "--max-time"
option.
If this option is used serveral times, the last one will be used.
.IP "-c/--continue"
.B Deprecated. Use '-C -' instead.
Continue/Resume a previous file transfer. This instructs curl to
@@ -101,15 +119,16 @@ HTTP resume is only possible with HTTP/1.1 or later servers.
If this option is used serveral times, the last one will be used.
.IP "-d/--data <data>"
(HTTP) Sends the specified data in a POST request to the HTTP server. Note
that the data is sent exactly as specified with no extra processing (with all
newlines cut off). The data is expected to be "url-encoded". This will cause
curl to pass the data to the server using the content-type
application/x-www-form-urlencoded. Compare to -F. If more than one -d/--data
option is used on the same command line, the data pieces specified will be
merged together with a separating &-letter. Thus, using '-d name=daniel -d
skill=lousy' would generate a post chunk that looks like
'name=daniel&skill=lousy'.
(HTTP) Sends the specified data in a POST request to the HTTP server, in a way
that can emulate as if a user has filled in a HTML form and pressed the submit
button. Note that the data is sent exactly as specified with no extra
processing (with all newlines cut off). The data is expected to be
"url-encoded". This will cause curl to pass the data to the server using the
content-type application/x-www-form-urlencoded. Compare to -F. If more than
one -d/--data option is used on the same command line, the data pieces
specified will be merged together with a separating &-letter. Thus, using '-d
name=daniel -d skill=lousy' would generate a post chunk that looks like
\&'name=daniel&skill=lousy'.
If you start the data with the letter @, the rest should be a file name to
read the data from, or - if you want curl to read the data from stdin. The
@@ -120,11 +139,13 @@ To post data purely binary, you should instead use the --data-binary option.
-d/--data is the same as --data-ascii.
If this option is used serveral times, the last one will be used.
If this option is used serveral times, the ones following the first will
append data.
.IP "--data-ascii <data>"
(HTTP) This is an alias for the -d/--data option.
If this option is used serveral times, the last one will be used.
If this option is used serveral times, the ones following the first will
append data.
.IP "--data-binary <data>"
(HTTP) This posts data in a similar manner as --data-ascii does, although when
using this option the entire context of the posted data is kept as-is. If you
@@ -132,6 +153,9 @@ want to post a binary file without the strip-newlines feature of the
--data-ascii option, this is for you.
If this option is used serveral times, the last one will be used.
If this option is used serveral times, the ones following the first will
append data.
.IP "-D/--dump-header <file>"
(HTTP/FTP)
Write the HTTP headers to this file. Write the FTP file info to this
@@ -151,6 +175,11 @@ previous URL when it follows a Location: header. The ";auto" string can be
used alone, even if you don't set an initial referer.
If this option is used serveral times, the last one will be used.
.IP "--egd-file <file>"
(HTTPS) Specify the path name to the Entropy Gathering Daemon socket. The
socket is used to seed the random engine for SSL connections. See also the
.I "--random-file"
option.
.IP "-E/--cert <certificate[:password]>"
(HTTPS)
Tells curl to use the specified certificate file when getting a file
@@ -186,7 +215,7 @@ get attached in the post as a file upload, while the < makes a text field and
just get the contents for that text field from a file.
Example, to send your password file to the server, where
'password' is the name of the form-field to which /etc/passwd will be the
\&'password' is the name of the form-field to which /etc/passwd will be the
input:
.B curl
@@ -196,6 +225,12 @@ To read the file's content from stdin insted of a file, use - where the file
name should've been. This goes for both @ and < constructs.
This option can be used multiple times.
.IP "-g/--globoff"
This option switches off the "URL globbing parser". When you set this option,
you can specify URLs that contain the letters {}[] without having them being
interpreted by curl itself. Note that these letters are not normal legal URL
contents but they should be encoded according to the URI standard. (Option
added in curl 7.6)
.IP "-h/--help"
Usage help.
.IP "-H/--header <header>"
@@ -268,6 +303,9 @@ If this option is used twice, the second will again disable location following.
Maximum time in seconds that you allow the whole operation to take. This is
useful for preventing your batch jobs from hanging for hours due to slow
networks or links going down. This doesn't work fully in win32 systems.
See also the
.I "--connect-timeout"
option.
If this option is used serveral times, the last one will be used.
.IP "-M/--manual"
@@ -287,7 +325,7 @@ directory.
A quick and very simple example of how to setup a
.I .netrc
to allow curl to ftp to the machine host.domain.com with user name
'myself' and password 'secret' should look similar to:
\&'myself' and password 'secret' should look similar to:
.B "machine host.domain.com login myself password secret"
@@ -311,11 +349,12 @@ or use several variables like:
curl http://{site,host}.host[1-5].com -o "#1_#2"
If this option is used serveral times, the last one will be used.
You may use this option as many times as you have number of URLs.
.IP "-O/--remote-name"
Write output to a local file named like the remote file we get. (Only
the file part of the remote file is used, the path is cut off.)
You may use this option as many times as you have number of URLs.
.IP "-p/--proxytunnel"
When an HTTP proxy is used, this option will cause non-HTTP protocols to
attempt to tunnel through the proxy instead of merely using it to do HTTP-like
@@ -361,6 +400,12 @@ to be run before and after the transfer. If the server returns failure for one
of the commands, the entire operation will be aborted.
This option can be used multiple times.
.IP "--random-file <file>"
(HTTPS) Specify the path name to file containing what will be considered as
random data. The data is used to seed the random engine for SSL connections.
See also the
.I "--edg-file"
option.
.IP "-r/--range <range>"
(HTTP/FTP)
Retrieve a byte range (i.e a partial document) from a HTTP/1.1 or FTP
@@ -409,11 +454,14 @@ If this option is used twice, the second will again disable mute.
When used with -s it makes curl show error message if it fails.
If this option is used twice, the second will again disable show error.
.IP "-t/--upload"
.B Deprecated. Use '-T -' instead.
Transfer the stdin data to the specified file. Curl will read
everything from stdin until EOF and store with the supplied name. If
this is used on a http(s) server, the PUT command will be used.
.IP "-t/--telnet-option <OPT=val>"
Pass options to the telnet protocol. Supported options are:
TTYPE=<term> Sets the terminal type.
XDISPLOC=<X display> Sets the X display location.
NEW_ENV=<var,val> Sets an environment variable.
.IP "-T/--upload-file <file>"
Like -t, but this transfers the specified local file. If there is no
file part in the specified URL, Curl will append the local file
@@ -436,10 +484,14 @@ password is specified, curl will ask for it interactively.
If this option is used serveral times, the last one will be used.
.IP "--url <URL>"
Set the URL to fetch. This option is mostly handy when you wanna specify URL
in a config file.
Specify a URL to fetch. This option is mostly handy when you wanna specify
URL(s) in a config file.
If this option is used serveral times, the last one will be used.
This option may be used any number of times. To control where this URL is written, use the
.I -o
or the
.I -O
options.
.IP "-v/--verbose"
Makes the fetching more verbose/talkative. Mostly usable for
debugging. Lines starting with '>' means data sent by curl, '<'
@@ -738,7 +790,7 @@ If you do find bugs, mail them to curl-bug@haxx.se.
- Lars J. Aas <larsa@sim.no>
- J<>rn Hartroth <Joern.Hartroth@computer.org>
- Matthew Clarke <clamat@van.maves.ca>
- Linus Nielsen <Linus.Nielsen@haxx.se>
- Linus Nielsen Feltzing <linus@haxx.se>
- Felix von Leitner <felix@convergence.de>
- Dan Zitter <dzitter@zitter.net>
- Jongki Suwandi <Jongki.Suwandi@eng.sun.com>
@@ -765,6 +817,10 @@ If you do find bugs, mail them to curl-bug@haxx.se.
- T. Bharath <TBharath@responsenetworks.com>
- Alexander Kourakos <awk@users.sourceforge.net>
- James Griffiths <griffiths_james@yahoo.com>
- Loic Dachary <loic@senga.org>
- Robert Weaver <robert.weaver@sabre.com>
- Ingo Ralf Blum <ingoralfblum@ingoralfblum.com>
- Jun-ichiro itojun Hagino <itojun@iijlab.net>
.SH WWW
http://curl.haxx.se

View File

@@ -2,13 +2,13 @@
.\" nroff -man [file]
.\" Written by daniel@haxx.se
.\"
.TH curl_easy_cleanup 3 "22 May 2000" "Curl 7.0" "libcurl Manual"
.TH curl_easy_cleanup 3 "5 March 2001" "libcurl 7.7" "libcurl Manual"
.SH NAME
curl_easy_cleanup - End a libcurl "easy" session
curl_easy_cleanup - End a libcurl session
.SH SYNOPSIS
.B #include <curl/easy.h>
.B #include <curl/curl.h>
.sp
.BI "curl_easy_cleanup(CURL *" handle ");
.BI "curl_easy_cleanup(CURL *" handle ");"
.ad
.SH DESCRIPTION
This function must be the last function to call for a curl session. It is the
@@ -17,6 +17,10 @@ opposite of the
function and must be called with the same
.I handle
as input as the curl_easy_init call returned.
This will effectively close all connections libcurl has been used and possibly
has kept open until now. Don't call this function if you intend to transfer
more files (libcurl 7.7 or later).
.SH RETURN VALUE
None
.SH "SEE ALSO"

View File

@@ -2,11 +2,11 @@
.\" nroff -man [file]
.\" Written by daniel@haxx.se
.\"
.TH curl_easy_init 3 "22 November 2000" "Curl 7.5" "libcurl Manual"
.TH curl_easy_init 3 "5 March 2001" "libcurl 7.6.1" "libcurl Manual"
.SH NAME
curl_easy_getinfo - Extract information from a curl session (added in 7.4)
.SH SYNOPSIS
.B #include <curl/easy.h>
.B #include <curl/curl.h>
.sp
.BI "CURLcode curl_easy_getinfo(CURL *curl, CURLINFO info, ... );"
.ad
@@ -81,6 +81,14 @@ than one request if FOLLOWLOCATION is true.
Pass a pointer to a long to receive the result of the certification
verification that was requested (using the CURLOPT_SSL_VERIFYPEER option to
curl_easy_setopt). (Added in 7.4.2)
.TP
.B CURLINFO_CONTENT_LENGTH_DOWNLOAD
Pass a pointer to a double to receive the content-length of the download. This
is the value read from the Content-Length: field. (Added in 7.6.1)
.TP
.B CURLINFO_CONTENT_LENGTH_UPLOAD
Pass a pointer to a double to receive the specified size of the upload.
(Added in 7.6.1)
.PP
.SH RETURN VALUE

View File

@@ -2,11 +2,11 @@
.\" nroff -man [file]
.\" Written by daniel@haxx.se
.\"
.TH curl_easy_init 3 "26 September 2000" "Curl 7.0" "libcurl Manual"
.TH curl_easy_init 3 "5 March 2001" "libcurl 7.7" "libcurl Manual"
.SH NAME
curl_easy_init - Start a libcurl "easy" session
curl_easy_init - Start a libcurl session
.SH SYNOPSIS
.B #include <curl/easy.h>
.B #include <curl/curl.h>
.sp
.BI "CURL *curl_easy_init( );"
.ad
@@ -19,6 +19,10 @@ when the operation is complete.
On win32 systems, you need to init the winsock stuff manually, libcurl will
not do that for you. WSAStartup() and WSACleanup() should be used accordingly.
Using libcurl 7.7 and later, you should perform all your sequential file
transfers using the same curl handle. This enables libcurl to use persistant
connections where possible.
.SH RETURN VALUE
If this function returns NULL, something went wrong and you cannot use the
other curl functions.

View File

@@ -2,11 +2,11 @@
.\" nroff -man [file]
.\" Written by daniel@haxx.se
.\"
.TH curl_easy_perform 3 "26 May 2000" "Curl 7.0" "libcurl Manual"
.TH curl_easy_perform 3 "5 Mar 2001" "libcurl 7.7" "libcurl Manual"
.SH NAME
curl_easy_perform - Do the actual transfer in a "easy" session
curl_easy_perform - Perform a file transfer
.SH SYNOPSIS
.B #include <curl/easy.h>
.B #include <curl/curl.h>
.sp
.BI "CURLcode curl_easy_perform(CURL *" handle ");
.ad
@@ -16,12 +16,29 @@ are made, and will perform the transfer as described in the options.
It must be called with the same
.I handle
as input as the curl_easy_init call returned.
libcurl version 7.7 or later (for older versions see below): You can do any
amount of calls to curl_easy_perform() while using the same handle. If you
intend to transfer more than one file, you are even encouraged to do
so. libcurl will then attempt to re-use the same connection for the following
transfers, thus making the operations faster, less CPU intense and using less
network resources. Just note that you will have to use
.I curl_easy_setopt
between the invokes to set options for the following curl_easy_perform.
You must never call this function simultaneously from two places using the
same handle. Let the function return first before invoking it another time. If
you want parallel transfers, you must use several curl handles.
Before libcurl version 7.7: You are only allowed to call this function once
using the same handle. If you want to do repeated calls, you must call
curl_easy_cleanup and curl_easy_init again first.
.SH RETURN VALUE
0 means everything was ok, non-zero means an error occurred as
.I <curl/curl.h>
defines. If the CURLOPT_ERRORBUFFER was set with
.I curl_easy_setopt
there willo be a readable error message in the error buffer when non-zero is
there will be a readable error message in the error buffer when non-zero is
returned.
.SH "SEE ALSO"
.BR curl_easy_init "(3), " curl_easy_setopt "(3), "

View File

@@ -2,13 +2,13 @@
.\" nroff -man [file]
.\" Written by daniel@haxx.se
.\"
.TH curl_easy_setopt 3 "28 November 2000" "Curl 7.5" "libcurl Manual"
.TH curl_easy_setopt 3 "30 March 2001" "libcurl 7.7" "libcurl Manual"
.SH NAME
curl_easy_setopt - Set curl easy-session options
.SH SYNOPSIS
.B #include <curl/easy.h>
.B #include <curl/curl.h>
.sp
.BI "CURLcode curl_easy_setopt(CURL *" handle ", CURLoption "option ", ...);
.BI "CURLcode curl_easy_setopt(CURL *" handle ", CURLoption "option ", ...);"
.ad
.SH DESCRIPTION
curl_easy_setopt() is called to tell libcurl how to behave in a number of
@@ -20,7 +20,18 @@ followed by a parameter. That parameter can be a long, a function pointer or
an object pointer, all depending on what the option in question expects. Read
this manual carefully as bad input values may cause libcurl to behave badly!
You can only set one option in each function call. A typical application uses
many calls in the setup phase.
many curl_easy_setopt() calls in the setup phase.
NOTE: strings passed to libcurl as 'char *' arguments, will not be copied by
the library. Instead you should keep them available until libcurl no longer
needs them. Failing to do so will cause very odd behaviour or even crashes.
More note: the options set with this function call are valid for the
forthcoming data transfers that are performed when you invoke
.I curl_easy_perform .
The options are not in any way reset between transfers, so if you want
subsequent transfers with different options, you must change them between the
transfers.
The
.I "handle"
@@ -35,6 +46,12 @@ Data pointer to pass instead of FILE * to the file write function. Note that
if you specify the
.I CURLOPT_WRITEFUNCTION
, this is the pointer you'll get as input.
NOTE: If you're using libcurl as a win32 .DLL, you MUST use a
.I CURLOPT_WRITEFUNCTION
if you set the
.I CURLOPT_FILE
option.
.TP
.B CURLOPT_WRITEFUNCTION
Function pointer that should use match the following prototype:
@@ -53,6 +70,12 @@ Data pointer to pass instead of FILE * to the file read function. Note that if
you specify the
.I CURLOPT_READFUNCTION
, this is the pointer you'll get as input.
NOTE: If you're using libcurl as a win32 .DLL, you MUST use a
.I CURLOPT_READFUNCTION
if you set the
.I CURLOPT_INFILE
option.
.TP
.B CURLOPT_READFUNCTION
Function pointer that should use match the following prototype:
@@ -74,14 +97,16 @@ libcurl what the expected size of the infile is.
.TP
.B CURLOPT_URL
The actual URL to deal with. The parameter should be a char * to a zero
terminated string. NOTE: this option is currently required!
terminated string. The string must remain present until curl no longer needs
it, as it doesn't copy the string. NOTE: this option is required to be set
before curl_easy_perform() is called.
.TP
.B CURLOPT_PROXY
If you need libcurl to use a http proxy to access the outside world, set the
proxy string with this option. The parameter should be a char * to a zero
terminated string. To specify port number in this string, append":[port]" to
terminated string. To specify port number in this string, append :[port] to
the end of the host name. The proxy string may be prefixed with
"[protocol]://" since any such prefix will be ignored.
[protocol]:// since any such prefix will be ignored.
.TP
.B CURLOPT_PROXYPORT
Set this long with this option to set the proxy port to use unless it is
@@ -177,9 +202,11 @@ prompted for it.
.TP
.B CURLOPT_RANGE
Pass a char * as parameter, which should contain the specified range you
want. It should be in the format "X-Y", where X or Y may be left out. The HTTP
want. It should be in the format "X-Y", where X or Y may be left out. HTTP
transfers also support several intervals, separated with commas as in
.I "X-Y,N-M".
.I "X-Y,N-M"
. Using this kind of multiple intervals will cause the HTTP server to send the
response document in pieces.
.TP
.B CURLOPT_ERRORBUFFER
Pass a char * to a buffer that the libcurl may store human readable error
@@ -190,7 +217,8 @@ library. The buffer must be at least CURL_ERROR_SIZE big.
Pass a long as parameter containing the maximum time in seconds that you allow
the libcurl transfer operation to take. Do note that normally, name lookups
maky take a considerable time and that limiting the operation to less than a
few minutes risk aborting perfectly normal operations.
few minutes risk aborting perfectly normal operations. This option will cause
curl to use the SIGALRM to enable timeouting system calls.
.TP
.B CURLOPT_POSTFIELDS
Pass a char * as parameter, which should be the full data to post in a HTTP
@@ -246,7 +274,11 @@ curl_slist' structs properly filled in. Use
.I curl_slist_append(3)
to create the list and
.I curl_slist_free_all(3)
to clean up an entire list.
to clean up an entire list. If you add a header that is otherwise generated
and used by libcurl internally, your added one will be used instead. If you
add a header with no contents as in 'Accept:', the internally used header will
just get disabled. Thus, using this option you can add new headers, replace
internal headers and remove internal headers.
.TP
.B CURLOPT_HTTPPOST
Tells libcurl you want a multipart/formdata HTTP POST to be made and you
@@ -285,7 +317,9 @@ struct curl_slist structs properly filled in as described for
.I "CURLOPT_QUOTE"
.TP
.B CURLOPT_WRITEHEADER
Pass a FILE * to be used to write the header part of the received data to.
Pass a FILE * to be used to write the header part of the received data to. The
headers are guaranteed to be written one-by-one to this file handle and only
complete lines are written. Parsing headers should be easy enough using this.
.TP
.B CURLOPT_COOKIEFILE
Pass a pointer to a zero terminated string as parameter. It should contain the
@@ -325,7 +359,7 @@ name. (Added in libcurl 7.3)
.B CURLOPT_KRB4LEVEL
Pass a char * as parameter. Set the krb4 security level, this also enables
krb4 awareness. This is a string, 'clear', 'safe', 'confidential' or
'private'. If the string is set but doesn't match one of these, 'private'
\&'private'. If the string is set but doesn't match one of these, 'private'
will be used. Set the string to NULL to disable kerberos4. The kerberos
support only works for FTP. (Added in libcurl 7.3)
.TP
@@ -398,6 +432,59 @@ Pass a long. The set number will be the redirection limit. If that many
redirections have been followed, the next redirect will cause an error. This
option only makes sense if the CURLOPT_FOLLOWLOCATION is used at the same
time. (Added in 7.5)
.TP
.B CURLOPT_MAXCONNECTS
Pass a long. The set number will be the persistant connection cache size. The
set amount will be the maximum amount of simultaneous connections that libcurl
may cache between file transfers. Default is 5, and there isn't much point in
changing this value unless you are perfectly aware of how this work and
changes libcurl's behaviour. Note: if you have already performed transfers
with this curl handle, setting a smaller MAXCONNECTS than before may cause
open connections to unnecessarily get closed. (Added in 7.7)
.TP
.B CURLOPT_CLOSEPOLICY
Pass a long. This option sets what policy libcurl should use when the
connection cache is filled and one of the open connections has to be closed to
make room for a new connection. This must be one of the CURLCLOSEPOLICY_*
defines. Use CURLCLOSEPOLICY_LEAST_RECENTLY_USED to make libcurl close the
connection that was least recently used, that connection is also least likely
to be capable of re-use. Use CURLCLOSEPOLICY_OLDEST to make libcurl close the
oldest connection, the one that was created first among the ones in the
connection cache. The other close policies are not support yet. (Added in 7.7)
.TP
.B CURLOPT_FRESH_CONNECT
Pass a long. Set to non-zero to make the next transfer use a new connection by
force. If the connection cache is full before this connection, one of the
existinf connections will be closed as according to the set policy. This
option should be used with caution and only if you understand what it
does. Set to 0 to have libcurl attempt re-use of an existing connection.
(Added in 7.7)
.TP
.B CURLOPT_FORBID_REUSE
Pass a long. Set to non-zero to make the next transfer explicitly close the
connection when done. Normally, libcurl keep all connections alive when done
with one transfer in case there comes a succeeding one that can re-use them.
This option should be used with caution and only if you understand what it
does. Set to 0 to have libcurl keep the connection open for possibly later
re-use. (Added in 7.7)
.TP
.B CURLOPT_RANDOM_FILE
Pass a char * to a zero terminated file name. The file will be used to read
from to seed the random engine for SSL. The more random the specified file is,
the more secure will the SSL connection become.
.TP
.B CURLOPT_FORBID_REUSE
Pass a char * to the zero terminated path name to the Entropy Gathering Daemon
socket. It will be used to seed the random engine for SSL.
.TP
.B CURLOPT_CONNECTTIMEOUT
Pass a long. It should contain the maximum time in seconds that you allow the
connection to the server to take. This only limits the connection phase, once
it has connected, this option is of no more use. Set to zero to disable
connection timeout (it will then only timeout on the system's internal
timeouts). This option doesn't work in win32 systems. See also the
.I CURLOPT_TIMEOUT
option.
.PP
.SH RETURN VALUE
0 means the option was set properly, non-zero means an error as

28
docs/curl_escape.3 Normal file
View File

@@ -0,0 +1,28 @@
.\" You can view this file with:
.\" nroff -man [file]
.\" Written by daniel@haxx.se
.\"
.TH curl_escape 3 "22 March 2001" "libcurl 7.7" "libcurl Manual"
.SH NAME
curl_escape - URL encodes the given string
.SH SYNOPSIS
.B #include <curl/curl.h>
.sp
.BI "char *curl_escape( char *" url ", int "length " );"
.ad
.SH DESCRIPTION
This function will convert the given input string to an URL encoded string and
return that as a new allocated string. All input characters that are not a-z,
A-Z or 0-9 will be converted to their "URL escaped" version. If a sequence of
%NN (where NN is a two-digit hexadecimal number) is found in the string to
encode, that 3-letter combination will be copied to the output unmodifed,
assuming that it is an already encoded piece of data.
If the 'length' argument is set to 0, curl_escape() will use strlen() on the
input 'url' string to find out the size.
You must free() the returned string when you're done with it.
.SH RETURN VALUE
A pointer to a zero terminated string or NULL if it failed.
.SH "SEE ALSO"
.I curl_unescape(), RFC 2396

View File

@@ -2,7 +2,7 @@
.\" nroff -man [file]
.\" Written by daniel@haxx.se
.\"
.TH curl_formfree 3 "17 November 2000" "Curl 7.5" "libcurl Manual"
.TH curl_formfree 3 "5 March 2001" "libcurl 7.5" "libcurl Manual"
.SH NAME
curl_formfree - free a previously build multipart/formdata HTTP POST chain
.SH SYNOPSIS

View File

@@ -2,13 +2,13 @@
.\" nroff -man [file]
.\" Written by daniel@haxx.se
.\"
.TH curl_formparse 3 "6 June 2000" "Curl 7.0" "libcurl Manual"
.TH curl_formparse 3 "5 March 2001" "libcurl 7.0" "libcurl Manual"
.SH NAME
curl_formparse - add a section to a multipart/formdata HTTP POST
.SH SYNOPSIS
.B #include <curl/curl.h>
.sp
.BI "CURLcode *curl_formparse(char *" string, "struct HttpPost **" firstitem,
.BI "CURLcode curl_formparse(char *" string, "struct HttpPost **" firstitem,
.BI "struct HttpPost ** "lastitem ");"
.ad
.SH DESCRIPTION
@@ -42,14 +42,14 @@ Add a form field named 'name' with the contents as read from the local files
named 'filename1' and 'filename2'. This is identical to the upper, except that
you get the contents of several files in one section.
.TP
.B [name]=@[filename];[content-type]
.B [name]=@[filename];[type=<content-type>]
Whenever you specify a file to read from, you can optionally specify the
content-type as well. The content-type is passed to the server together with
the contents of the file. curl_formparse() will guess content-type for a
number of well-known extensions and otherwise it will set it to binary. You
can override the internal decision by using this option.
.TP
.B [name]=@[filename1,filename2,...];[content-type]
.B [name]=@[filename1,filename2,...];[type=<content-type>]
When you specify several files to read the contents from, you can set the
content-type for all of them in the same way as with a single file.
.PP

View File

@@ -2,7 +2,7 @@
.\" nroff -man [file]
.\" Written by daniel@haxx.se
.\"
.TH curl_getdate 3 "2 June 2000" "Curl 7.0" "libcurl Manual"
.TH curl_getdate 3 "5 March 2001" "libcurl 7.0" "libcurl Manual"
.SH NAME
curl_getdate - Convert an date in a ASCII string to number of seconds since
January 1, 1970

View File

@@ -2,7 +2,7 @@
.\" nroff -man [file]
.\" Written by daniel@haxx.se
.\"
.TH curl_getenv 3 "2 June 2000" "Curl 7.0" "libcurl Manual"
.TH curl_getenv 3 "5 March 2001" "libcurl 7.0" "libcurl Manual"
.SH NAME
curl_getenv - return value for environment name
.SH SYNOPSIS

View File

@@ -2,14 +2,14 @@
.\" nroff -man [file]
.\" Written by daniel@haxx.se
.\"
.TH curl_slist_append 3 "2 June 2000" "Curl 7.0" "libcurl Manual"
.TH curl_slist_append 3 "5 March 2001" "libcurl 7.0" "libcurl Manual"
.SH NAME
curl_slist_append - add a string to an slist
.SH SYNOPSIS
.B #include <curl/curl.h>
.sp
.BI "struct curl_slist *curl_slist_append(struct curl_slit *" list,
.BI "char * "string ");"
.BI "const char * "string ");"
.ad
.SH DESCRIPTION
curl_slist_append() appends a specified string to a linked list of

View File

@@ -2,13 +2,13 @@
.\" nroff -man [file]
.\" Written by daniel@haxx.se
.\"
.TH curl_slist_free_all 3 "2 June 2000" "Curl 7.0" "libcurl Manual"
.TH curl_slist_free_all 3 "5 March 2001" "libcurl 7.0" "libcurl Manual"
.SH NAME
curl_slist_free_all - free an entire curl_slist list
.SH SYNOPSIS
.B #include <curl/curl.h>
.sp
.BI "void curl_slist_free_all(struct curl_slit *" list);
.BI "void curl_slist_free_all(struct curl_slist *" list);
.ad
.SH DESCRIPTION
curl_slist_free_all() removes all traces of a previously built curl_slist

27
docs/curl_unescape.3 Normal file
View File

@@ -0,0 +1,27 @@
.\" You can view this file with:
.\" nroff -man [file]
.\" Written by daniel@haxx.se
.\"
.TH curl_unescape 3 "22 March 2001" "libcurl 7.7" "libcurl Manual"
.SH NAME
curl_unescape - URL decodes the given string
.SH SYNOPSIS
.B #include <curl/curl.h>
.sp
.BI "char *curl_unescape( char *" url ", int "length " );"
.ad
.SH DESCRIPTION
This function will convert the given URL encoded input string to a "plain
string" and return that as a new allocated string. All input characters that
are URL encoded (%XX where XX is a two-digit hexadecimal number, or +) will be
converted to their plain text versions (up to a ? letter, no letters to the
right of a ? letter will be converted).
If the 'length' argument is set to 0, curl_unescape() will use strlen() on the
input 'url' string to find out the size.
You must free() the returned string when you're done with it.
.SH RETURN VALUE
A pointer to a zero terminated string or NULL if it failed.
.SH "SEE ALSO"
.I curl_escape(), RFC 2396

View File

@@ -2,11 +2,11 @@
.\" nroff -man [file]
.\" Written by daniel@haxx.se
.\"
.TH curl_version 3 "2 June 2000" "Curl 7.0" "libcurl Manual"
.TH curl_version 3 "5 March 2001" "libcurl 7.0" "libcurl Manual"
.SH NAME
curl_version - returns the libcurl version string
.SH SYNOPSIS
.B #include <curl/easy.h>
.B #include <curl/curl.h>
.sp
.BI "char *curl_version( );"
.ad
@@ -14,9 +14,9 @@ curl_version - returns the libcurl version string
Returns a human readable string with the version number of libcurl and some of
its important components (like OpenSSL version).
Do note that this returns the actual running lib's version, you might have
installed a newer lib's include files in your system which may turn your
LIBCURL_VERSION #define value to differ from this result.
Note: this returns the actual running lib's version, you might have installed
a newer lib's include files in your system which may turn your LIBCURL_VERSION
#define value to differ from this result.
.SH RETURN VALUE
A pointer to a zero terminated string.
.SH "SEE ALSO"

12
docs/examples/Makefile.am Normal file
View File

@@ -0,0 +1,12 @@
#
# $Id$
#
AUTOMAKE_OPTIONS = foreign no-dependencies
EXTRA_DIST =
README curlgtk.c sepheaders.c simple.c postit.c \
win32sockets.c persistant.c ftpget.c Makefile.example
all:
@echo "done"

View File

@@ -0,0 +1,41 @@
#############################################################################
# _ _ ____ _
# Project ___| | | | _ \| |
# / __| | | | |_) | |
# | (__| |_| | _ <| |___
# \___|\___/|_| \_\_____|
#
# $Id$
#
# What to call the final executable
TARGET = example
# Which object files that the executable consists of
OBJS= ftpget.o
# What compiler to use
CC = gcc
# Compiler flags, -g for debug, -c to make an object file
CFLAGS = -c -g
# This should point to a directory that holds libcurl, if it isn't
# in the system's standard lib dir
# We also set a -L to include the directory where we have the openssl
# libraries
LDFLAGS = -L/home/dast/lib -L/usr/local/ssl/lib
# We need -lcurl for the curl stuff
# We need -lsocket and -lnsl when on Solaris
# We need -lssl and -lcrypto when using libcurl with SSL support
# We need -ldl for dlopen() if that is in libdl
LIBS = -lcurl -lsocket -lnsl -lssl -lcrypto -dl
# Link the target with all objects and libraries
$(TARGET) : $(OBJS)
$(CC) $(LDFLAGS) $(LIBS) -o $(TARGET) $(OBJS)
# Compile the source files into object files
ftpget.o : ftpget.c
$(CC) $(CFLAGS) $<

View File

@@ -6,3 +6,9 @@ advantage of libcurl.
If you end up with other small but still useful example sources, please mail
them for submission in future packages and on the web site.
The Makefile.example is an example makefile that could be used to build these
examples. Just edit the file according to your system and requirements first.
Try the php/examples/ directory for PHP programming snippets!

View File

@@ -1,4 +1,12 @@
/* curlgtk.c */
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* $Id$
*/
/* Copyright (c) 2000 David Odin (aka DindinX) for MandrakeSoft */
/* an attempt to use the curl library in concert with a gtk-threaded application */

44
docs/examples/ftpget.c Normal file
View File

@@ -0,0 +1,44 @@
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* $Id$
*/
#include <stdio.h>
#include <curl/curl.h>
#include <curl/types.h>
#include <curl/easy.h>
/* to make this work under windows, use the win32-functions from the
win32socket.c file as well */
int main(int argc, char **argv)
{
CURL *curl;
CURLcode res;
FILE *ftpfile;
/* local file name to store the file as */
ftpfile = fopen("curl.tar.gz", "wb"); /* b is binary for win */
curl = curl_easy_init();
if(curl) {
/* Get curl 7.7 from sunet.se's FTP site: */
curl_easy_setopt(curl, CURLOPT_URL,
"ftp://ftp.sunet.se/pub/www/utilities/curl/curl-7.7.tar.gz");
curl_easy_setopt(curl, CURLOPT_FILE, ftpfile);
res = curl_easy_perform(curl);
/* always cleanup */
curl_easy_cleanup(curl);
}
fclose(ftpfile); /* close the local file */
return 0;
}

View File

@@ -0,0 +1,53 @@
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* $Id$
*/
#include <stdio.h>
#include <unistd.h>
#include <curl/curl.h>
/* to make this work under windows, use the win32-functions from the
docs/examples/win32socket.c file as well */
/* This example REQUIRES libcurl 7.7 or later */
#if (LIBCURL_VERSION_NUM < 0x070700)
#error Too old libcurl version, upgrade or stay away.
#endif
int main(int argc, char **argv)
{
CURL *curl;
CURLcode res;
#ifdef MALLOCDEBUG
/* this sends all memory debug messages to a specified logfile */
curl_memdebug("memdump");
#endif
curl = curl_easy_init();
if(curl) {
curl_easy_setopt(curl, CURLOPT_VERBOSE, 1);
curl_easy_setopt(curl, CURLOPT_HEADER, 1);
/* get the first document */
curl_easy_setopt(curl, CURLOPT_URL, "http://curl.haxx.se/");
res = curl_easy_perform(curl);
/* get another document from the same server using the same
connection */
curl_easy_setopt(curl, CURLOPT_URL, "http://curl.haxx.se/docs/");
res = curl_easy_perform(curl);
/* always cleanup */
curl_easy_cleanup(curl);
}
return 0;
}

71
docs/examples/postit.c Normal file
View File

@@ -0,0 +1,71 @@
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* $Id$
*
* Example code that uploads a file name 'foo' to a remote script that accepts
* "HTML form based" (as described in RFC1738) uploads using HTTP POST.
*
* The imaginary form we'll fill in looks like:
*
* <form method="post" enctype="multipart/form-data" action="examplepost.cgi">
* Enter file: <input type="file" name="sendfile" size="40">
* Enter file name: <input type="text" name="filename" size="30">
* <input type="submit" value="send" name="submit">
* </form>
*
* This exact source code has not been verified to work.
*/
/* to make this work under windows, use the win32-functions from the
win32socket.c file as well */
#include <stdio.h>
#include <curl/curl.h>
#include <curl/types.h>
#include <curl/easy.h>
int main(int argc, char **argv)
{
CURL *curl;
CURLcode res;
struct HttpPost *formpost=NULL;
struct HttpPost *lastptr=NULL;
/* Fill in the file upload field */
curl_formparse("sendfile=@foo",
&formpost,
&lastptr);
/* Fill in the filename field */
curl_formparse("filename=foo",
&formpost,
&lastptr);
/* Fill in the submit field too, even if this is rarely needed */
curl_formparse("submit=send",
&formpost,
&lastptr);
curl = curl_easy_init();
if(curl) {
/* what URL that receives this POST */
curl_easy_setopt(curl, CURLOPT_URL, "http://curl.haxx.se/examplepost.cgi");
curl_easy_setopt(curl, CURLOPT_HTTPPOST, formpost);
res = curl_easy_perform(curl);
/* always cleanup */
curl_easy_cleanup(curl);
/* then cleanup the formpost chain */
curl_formfree(formpost);
}
return 0;
}

View File

@@ -1,3 +1,16 @@
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* $Id$
*/
/* to make this work under windows, use the win32-functions from the
win32socket.c file as well */
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>

View File

@@ -1,9 +1,22 @@
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* $Id$
*/
#include <stdio.h>
#include <curl/curl.h>
#include <curl/types.h>
#include <curl/easy.h>
/* to make this work under windows, use the win32-functions from the
win32socket.c file as well */
int main(int argc, char **argv)
{
CURL *curl;

View File

@@ -0,0 +1,40 @@
/*
* These are example functions doing socket init that Windows
* require. If you don't use windows, you can safely ignore this crap.
*/
static void win32_cleanup(void)
{
WSACleanup();
}
static CURLcode win32_init(void)
{
WORD wVersionRequested;
WSADATA wsaData;
int err;
wVersionRequested = MAKEWORD(1, 1);
err = WSAStartup(wVersionRequested, &wsaData);
if (err != 0)
/* Tell the user that we couldn't find a useable */
/* winsock.dll. */
return 1;
/* Confirm that the Windows Sockets DLL supports 1.1.*/
/* Note that if the DLL supports versions greater */
/* than 1.1 in addition to 1.1, it will still return */
/* 1.1 in wVersion since that is the version we */
/* requested. */
if ( LOBYTE( wsaData.wVersion ) != 1 ||
HIBYTE( wsaData.wVersion ) != 1 ) {
/* Tell the user that we couldn't find a useable */
/* winsock.dll. */
WSACleanup();
return 1;
}
return 0; /* 0 is ok */
}

View File

@@ -97,68 +97,57 @@ typedef int (*curl_passwd_callback)(void *clientp,
typedef enum {
CURLE_OK = 0,
CURLE_UNSUPPORTED_PROTOCOL,
CURLE_FAILED_INIT,
CURLE_URL_MALFORMAT,
CURLE_URL_MALFORMAT_USER,
CURLE_COULDNT_RESOLVE_PROXY,
CURLE_COULDNT_RESOLVE_HOST,
CURLE_COULDNT_CONNECT,
CURLE_FTP_WEIRD_SERVER_REPLY,
CURLE_FTP_ACCESS_DENIED,
CURLE_FTP_USER_PASSWORD_INCORRECT,
CURLE_FTP_WEIRD_PASS_REPLY,
CURLE_FTP_WEIRD_USER_REPLY,
CURLE_FTP_WEIRD_PASV_REPLY,
CURLE_FTP_WEIRD_227_FORMAT,
CURLE_FTP_CANT_GET_HOST,
CURLE_FTP_CANT_RECONNECT,
CURLE_FTP_COULDNT_SET_BINARY,
CURLE_PARTIAL_FILE,
CURLE_FTP_COULDNT_RETR_FILE,
CURLE_FTP_WRITE_ERROR,
CURLE_FTP_QUOTE_ERROR,
CURLE_HTTP_NOT_FOUND,
CURLE_WRITE_ERROR,
CURLE_UNSUPPORTED_PROTOCOL, /* 1 */
CURLE_FAILED_INIT, /* 2 */
CURLE_URL_MALFORMAT, /* 3 */
CURLE_URL_MALFORMAT_USER, /* 4 */
CURLE_COULDNT_RESOLVE_PROXY, /* 5 */
CURLE_COULDNT_RESOLVE_HOST, /* 6 */
CURLE_COULDNT_CONNECT, /* 7 */
CURLE_FTP_WEIRD_SERVER_REPLY, /* 8 */
CURLE_FTP_ACCESS_DENIED, /* 9 */
CURLE_FTP_USER_PASSWORD_INCORRECT, /* 10 */
CURLE_FTP_WEIRD_PASS_REPLY, /* 11 */
CURLE_FTP_WEIRD_USER_REPLY, /* 12 */
CURLE_FTP_WEIRD_PASV_REPLY, /* 13 */
CURLE_FTP_WEIRD_227_FORMAT, /* 14 */
CURLE_FTP_CANT_GET_HOST, /* 15 */
CURLE_FTP_CANT_RECONNECT, /* 16 */
CURLE_FTP_COULDNT_SET_BINARY, /* 17 */
CURLE_PARTIAL_FILE, /* 18 */
CURLE_FTP_COULDNT_RETR_FILE, /* 19 */
CURLE_FTP_WRITE_ERROR, /* 20 */
CURLE_FTP_QUOTE_ERROR, /* 21 */
CURLE_HTTP_NOT_FOUND, /* 22 */
CURLE_WRITE_ERROR, /* 23 */
CURLE_MALFORMAT_USER, /* 24 - user name is illegally specified */
CURLE_FTP_COULDNT_STOR_FILE, /* 25 - failed FTP upload */
CURLE_READ_ERROR, /* 26 - could open/read from file */
CURLE_OUT_OF_MEMORY, /* 27 */
CURLE_OPERATION_TIMEOUTED, /* 28 - the timeout time was reached */
CURLE_FTP_COULDNT_SET_ASCII, /* 29 - TYPE A failed */
CURLE_FTP_PORT_FAILED, /* 30 - FTP PORT operation failed */
CURLE_FTP_COULDNT_USE_REST, /* 31 - the REST command failed */
CURLE_FTP_COULDNT_GET_SIZE, /* 32 - the SIZE command failed */
CURLE_HTTP_RANGE_ERROR, /* 33 - RANGE "command" didn't work */
CURLE_HTTP_POST_ERROR, /* 34 */
CURLE_SSL_CONNECT_ERROR, /* 35 - wrong when connecting with SSL */
CURLE_FTP_BAD_DOWNLOAD_RESUME, /* 36 - couldn't resume download */
CURLE_FILE_COULDNT_READ_FILE, /* 37 */
CURLE_LDAP_CANNOT_BIND, /* 38 */
CURLE_LDAP_SEARCH_FAILED, /* 39 */
CURLE_LIBRARY_NOT_FOUND, /* 40 */
CURLE_FUNCTION_NOT_FOUND, /* 41 */
CURLE_ABORTED_BY_CALLBACK, /* 42 */
CURLE_BAD_FUNCTION_ARGUMENT, /* 43 */
CURLE_BAD_CALLING_ORDER, /* 44 */
CURLE_HTTP_PORT_FAILED, /* 45 - HTTP Interface operation failed */
CURLE_BAD_PASSWORD_ENTERED, /* 46 - my_getpass() returns fail */
CURLE_TOO_MANY_REDIRECTS , /* 47 - catch endless re-direct loops */
CURLE_UNKNOWN_TELNET_OPTION, /* 48 - User specified an unknown option */
CURLE_TELNET_OPTION_SYNTAX , /* 49 - Malformed telnet option */
CURLE_MALFORMAT_USER, /* the user name is illegally specified */
CURLE_FTP_COULDNT_STOR_FILE, /* failed FTP upload */
CURLE_READ_ERROR, /* could open/read from file */
CURLE_OUT_OF_MEMORY,
CURLE_OPERATION_TIMEOUTED, /* the timeout time was reached */
CURLE_FTP_COULDNT_SET_ASCII, /* TYPE A failed */
CURLE_FTP_PORT_FAILED, /* FTP PORT operation failed */
CURLE_FTP_COULDNT_USE_REST, /* the REST command failed */
CURLE_FTP_COULDNT_GET_SIZE, /* the SIZE command failed */
CURLE_HTTP_RANGE_ERROR, /* The RANGE "command" didn't seem to work */
CURLE_HTTP_POST_ERROR,
CURLE_SSL_CONNECT_ERROR, /* something was wrong when connecting with SSL */
CURLE_FTP_BAD_DOWNLOAD_RESUME, /* couldn't resume download */
CURLE_FILE_COULDNT_READ_FILE,
CURLE_LDAP_CANNOT_BIND,
CURLE_LDAP_SEARCH_FAILED,
CURLE_LIBRARY_NOT_FOUND,
CURLE_FUNCTION_NOT_FOUND,
CURLE_ABORTED_BY_CALLBACK,
CURLE_BAD_FUNCTION_ARGUMENT,
CURLE_BAD_CALLING_ORDER,
CURLE_HTTP_PORT_FAILED, /* HTTP Interface operation failed */
CURLE_BAD_PASSWORD_ENTERED, /* when the my_getpass() returns fail */
CURLE_TOO_MANY_REDIRECTS , /* catch endless re-direct loops */
CURL_LAST
CURL_LAST /* never use! */
} CURLcode;
/* This is just to make older programs not break: */
@@ -166,10 +155,6 @@ typedef enum {
#define CURL_ERROR_SIZE 256
/* maximum URL length we deal with in headers */
#define URL_MAX_LENGTH 4096
#define URL_MAX_LENGTH_TXT "4095"
/* name is uppercase CURLOPT_<name>,
type is one of the defined CURLOPTTYPE_<type>
number is unique identifier */
@@ -410,6 +395,41 @@ typedef enum {
document! Pass a NULL to shut it off. */
CINIT(FILETIME, OBJECTPOINT, 69),
/* This points to a linked list of telnet options */
CINIT(TELNETOPTIONS, OBJECTPOINT, 70),
/* Max amount of cached alive connections */
CINIT(MAXCONNECTS, LONG, 71),
/* What policy to use when closing connections when the cache is filled
up */
CINIT(CLOSEPOLICY, LONG, 72),
/* Callback to use when CURLCLOSEPOLICY_CALLBACK is set */
CINIT(CLOSEFUNCTION, FUNCTIONPOINT, 73),
/* Set to explicitly use a new connection for the upcoming transfer.
Do not use this unless you're absolutely sure of this, as it makes the
operation slower and is less friendly for the network. */
CINIT(FRESH_CONNECT, LONG, 74),
/* Set to explicitly forbid the upcoming transfer's connection to be re-used
when done. Do not use this unless you're absolutely sure of this, as it
makes the operation slower and is less friendly for the network. */
CINIT(FORBID_REUSE, LONG, 75),
/* Set to a file name that contains random data for libcurl to use to
seed the random engine when doing SSL connects. */
CINIT(RANDOM_FILE, OBJECTPOINT, 76),
/* Set to the Entropy Gathering Daemon socket pathname */
CINIT(EGDSOCKET, OBJECTPOINT, 77),
/* Time-out connect operations after this amount of seconds, if connects
are OK within this time, then fine... This only aborts the connect
phase. [Only works on unix-style/SIGALRM operating systems] */
CINIT(CONNECTTIMEOUT, LONG, 78),
CURLOPT_LASTENTRY /* the last unusued */
} CURLoption;
@@ -435,8 +455,10 @@ typedef enum {
NOTE: they return TRUE if the strings match *case insensitively*.
*/
extern int (strequal)(const char *s1, const char *s2);
extern int (strnequal)(const char *s1, const char *s2, size_t n);
extern int (curl_strequal)(const char *s1, const char *s2);
extern int (curl_strnequal)(const char *s1, const char *s2, size_t n);
#define strequal(a,b) curl_strequal(a,b)
#define strnequal(a,b,c) curl_strnequal(a,b,c)
/* external form function */
int curl_formparse(char *string,
@@ -453,9 +475,14 @@ char *curl_getenv(char *variable);
/* Returns a static ascii string of the libcurl version. */
char *curl_version(void);
/* Escape and unescape URL encoding in strings. The functions return a new
* allocated string or NULL if an error occurred. */
char *curl_escape(char *string, int length);
char *curl_unescape(char *string, int length);
/* This is the version number */
#define LIBCURL_VERSION "7.5.2"
#define LIBCURL_VERSION_NUM 0x070502
#define LIBCURL_VERSION "7.7.1"
#define LIBCURL_VERSION_NUM 0x070701
/* linked-list structure for the CURLOPT_QUOTE option (and other) */
struct curl_slist {
@@ -463,184 +490,8 @@ struct curl_slist {
struct curl_slist *next;
};
struct curl_slist *curl_slist_append(struct curl_slist *list, char *data);
void curl_slist_free_all(struct curl_slist *list);
/*
* NAME curl_init()
*
* DESCRIPTION
*
* Inits libcurl globally. This must be used before any libcurl calls can
* be used. This may install global plug-ins or whatever. (This does not
* do winsock inits in Windows.)
*
* EXAMPLE
*
* curl_init();
*
*/
CURLcode curl_init(void);
/*
* NAME curl_init()
*
* DESCRIPTION
*
* Frees libcurl globally. This must be used after all libcurl calls have
* been used. This may remove global plug-ins or whatever. (This does not
* do winsock cleanups in Windows.)
*
* EXAMPLE
*
* curl_free(curl);
*
*/
void curl_free(void);
/*
* NAME curl_open()
*
* DESCRIPTION
*
* Opens a general curl session. It does not try to connect or do anything
* on the network because of this call. The specified URL is only required
* to enable curl to figure out what protocol to "activate".
*
* A session should be looked upon as a series of requests to a single host. A
* session interacts with one host only, using one single protocol.
*
* The URL is not required. If set to "" or NULL, it can still be set later
* using the curl_setopt() function. If the curl_connect() function is called
* without the URL being known, it will return error.
*
* EXAMPLE
*
* CURLcode result;
* CURL *curl;
* result = curl_open(&curl, "http://curl.haxx.nu/libcurl/");
* if(result != CURL_OK) {
* return result;
* }
* */
CURLcode curl_open(CURL **curl, char *url);
/*
* NAME curl_setopt()
*
* DESCRIPTION
*
* Sets a particular option to the specified value.
*
* EXAMPLE
*
* CURL curl;
* curl_setopt(curl, CURL_HTTP_FOLLOW_LOCATION, TRUE);
*/
CURLcode curl_setopt(CURL *handle, CURLoption option, ...);
/*
* NAME curl_close()
*
* DESCRIPTION
*
* Closes a session previously opened with curl_open()
*
* EXAMPLE
*
* CURL *curl;
* CURLcode result;
*
* result = curl_close(curl);
*/
CURLcode curl_close(CURL *curl); /* the opposite of curl_open() */
CURLcode curl_read(CURLconnect *c_conn, char *buf, size_t buffersize,
size_t *n);
CURLcode curl_write(CURLconnect *c_conn, char *buf, size_t amount,
size_t *n);
/*
* NAME curl_connect()
*
* DESCRIPTION
*
* Connects to the peer server and performs the initial setup. This function
* writes a connect handle to its second argument that is a unique handle for
* this connect. This allows multiple connects from the same handle returned
* by curl_open().
*
* EXAMPLE
*
* CURLCode result;
* CURL curl;
* CURLconnect connect;
* result = curl_connect(curl, &connect);
*/
CURLcode curl_connect(CURL *curl, CURLconnect **in_connect);
/*
* NAME curl_do()
*
* DESCRIPTION
*
* (Note: May 3rd 2000: this function does not currently allow you to
* specify a document, it will use the one set previously)
*
* This function asks for the particular document, file or resource that
* resides on the server we have connected to. You may specify a full URL,
* just an absolute path or even a relative path. That means, if you're just
* getting one file from the remote site, you can use the same URL as input
* for both curl_open() as well as for this function.
*
* In the even there is a host name, port number, user name or password parts
* in the URL, you can use the 'flags' argument to ignore them completely, or
* at your choice, make the function fail if you're trying to get a URL from
* different host than you connected to with curl_connect().
*
* You can only get one document at a time using the same connection. When one
* document has been received you can although request again.
*
* When the transfer is done, curl_done() MUST be called.
*
* EXAMPLE
*
* CURLCode result;
* char *url;
* CURLconnect *connect;
* result = curl_do(connect, url, CURL_DO_NONE); */
CURLcode curl_do(CURLconnect *in_conn);
/*
* NAME curl_done()
*
* DESCRIPTION
*
* When the transfer following a curl_do() call is done, this function should
* get called.
*
* EXAMPLE
*
* CURLCode result;
* char *url;
* CURLconnect *connect;
* result = curl_done(connect); */
CURLcode curl_done(CURLconnect *connect);
/*
* NAME curl_disconnect()
*
* DESCRIPTION
*
* Disconnects from the peer server and performs connection cleanup.
*
* EXAMPLE
*
* CURLcode result;
* CURLconnect *connect;
* result = curl_disconnect(connect); */
CURLcode curl_disconnect(CURLconnect *connect);
struct curl_slist *curl_slist_append(struct curl_slist *, const char *);
void curl_slist_free_all(struct curl_slist *);
/*
* NAME curl_getdate()
@@ -678,22 +529,28 @@ typedef enum {
CURLINFO_SSL_VERIFYRESULT = CURLINFO_LONG + 13,
CURLINFO_FILETIME = CURLINFO_LONG + 14,
CURLINFO_LASTONE = 15
CURLINFO_CONTENT_LENGTH_DOWNLOAD = CURLINFO_DOUBLE + 15,
CURLINFO_CONTENT_LENGTH_UPLOAD = CURLINFO_DOUBLE + 16,
CURLINFO_LASTONE = 17
} CURLINFO;
/*
* NAME curl_getinfo()
*
* DESCRIPTION
*
* Request internal information from the curl session with this function.
* The third argument MUST be a pointer to a long or a pointer to a char *.
* The data pointed to will be filled in accordingly and can be relied upon
* only if the function returns CURLE_OK.
* This function is intended to get used *AFTER* a performed transfer, all
* results are undefined before the transfer is completed.
*/
CURLcode curl_getinfo(CURL *curl, CURLINFO info, ...);
/* unfortunately, the easy.h include file needs the options and info stuff
before it can be included! */
#include <curl/easy.h> /* nothing in curl is fun without the easy stuff */
typedef enum {
CURLCLOSEPOLICY_NONE, /* first, never use this */
CURLCLOSEPOLICY_OLDEST,
CURLCLOSEPOLICY_LEAST_RECENTLY_USED,
CURLCLOSEPOLICY_LEAST_TRAFFIC,
CURLCLOSEPOLICY_SLOWEST,
CURLCLOSEPOLICY_CALLBACK,
CURLCLOSEPOLICY_LAST /* last, never use this */
} curl_closepolicy;
#ifdef __cplusplus
}

View File

@@ -55,26 +55,28 @@
#include <stdarg.h>
int mprintf(const char *format, ...);
int mfprintf(FILE *fd, const char *format, ...);
int msprintf(char *buffer, const char *format, ...);
int msnprintf(char *buffer, size_t maxlength, const char *format, ...);
int mvprintf(const char *format, va_list args);
int mvfprintf(FILE *fd, const char *format, va_list args);
int mvsprintf(char *buffer, const char *format, va_list args);
int mvsnprintf(char *buffer, size_t maxlength, const char *format, va_list args);
char *maprintf(const char *format, ...);
char *mvaprintf(const char *format, va_list args);
int curl_mprintf(const char *format, ...);
int curl_mfprintf(FILE *fd, const char *format, ...);
int curl_msprintf(char *buffer, const char *format, ...);
int curl_msnprintf(char *buffer, size_t maxlength, const char *format, ...);
int curl_mvprintf(const char *format, va_list args);
int curl_mvfprintf(FILE *fd, const char *format, va_list args);
int curl_mvsprintf(char *buffer, const char *format, va_list args);
int curl_mvsnprintf(char *buffer, size_t maxlength, const char *format, va_list args);
char *curl_maprintf(const char *format, ...);
char *curl_mvaprintf(const char *format, va_list args);
#ifdef _MPRINTF_REPLACE
# define printf mprintf
# define fprintf mfprintf
# define sprintf msprintf
# define snprintf msnprintf
# define vprintf mvprintf
# define vfprintf mvfprintf
# define vsprintf mvsprintf
# define vsnprintf mvsnprintf
# define printf curl_mprintf
# define fprintf curl_mfprintf
# define sprintf curl_msprintf
# define snprintf curl_msnprintf
# define vprintf curl_mvprintf
# define vfprintf curl_mvfprintf
# define vsprintf curl_mvsprintf
# define vsnprintf curl_mvsnprintf
# define aprintf curl_maprintf
# define vaprintf curl_mvaprintf
#endif
#endif /* H_MPRINTF */

View File

@@ -4,6 +4,10 @@
AUTOMAKE_OPTIONS = foreign
EXTRA_DIST = getdate.y \
Makefile.b32 Makefile.b32.resp Makefile.m32 Makefile.vc6 \
libcurl.def dllinit.c curllib.dsp curllib.dsw
lib_LTLIBRARIES = libcurl.la
# Some flags needed when trying to cause warnings ;-)
@@ -12,7 +16,7 @@ lib_LTLIBRARIES = libcurl.la
INCLUDES = -I$(top_srcdir)/include
libcurl_la_LDFLAGS = -version-info 1:0:0
libcurl_la_LDFLAGS = -version-info 2:0:0
# This flag accepts an argument of the form current[:revision[:age]]. So,
# passing -version-info 3:12:1 sets current to 3, revision to 12, and age to
# 1.
@@ -49,12 +53,15 @@ cookie.c formdata.h http.c sendf.c \
cookie.h ftp.c http.h sendf.h url.c \
dict.c ftp.h if2ip.c speedcheck.c url.h \
dict.h getdate.c if2ip.h speedcheck.h urldata.h \
download.c getdate.h ldap.c ssluse.c version.c \
download.h getenv.c ldap.h ssluse.h \
escape.c getenv.h mprintf.c telnet.c \
getdate.h ldap.c ssluse.c version.c \
getenv.c ldap.h ssluse.h \
escape.c mprintf.c telnet.c \
escape.h getpass.c netrc.c telnet.h \
getinfo.c highlevel.c strequal.c strequal.h easy.c \
security.h security.c krb4.c memdebug.c memdebug.h
getinfo.c transfer.c strequal.c strequal.h easy.c \
security.h security.c krb4.c krb4.h memdebug.c memdebug.h inet_ntoa_r.h \
http_chunks.c http_chunks.h
noinst_HEADERS = setup.h transfer.h
# Say $(srcdir), so GNU make does not report an ambiguity with the .y.c rule.
$(srcdir)/getdate.c: getdate.y

View File

@@ -29,7 +29,7 @@ LIBCURLLIB = libcurl.lib
SOURCES = \
base64.c \
cookie.c \
download.c \
transfer.c \
escape.c \
formdata.c \
ftp.c \
@@ -54,7 +54,6 @@ SOURCES = \
getinfo.c \
version.c \
easy.c \
highlevel.c \
strequal.c
OBJECTS = $(SOURCES:.c=.obj)

View File

@@ -1,6 +1,6 @@
+base64.obj &
+cookie.obj &
+download.obj &
+transfer.obj &
+escape.obj &
+formdata.obj &
+ftp.obj &
@@ -25,5 +25,4 @@
+getinfo.obj &
+version.obj &
+easy.obj &
+highlevel.obj &
+strequal.obj

View File

@@ -1,357 +0,0 @@
# Makefile.in generated automatically by automake 1.4 from Makefile.am
# Copyright (C) 1994, 1995-8, 1999 Free Software Foundation, Inc.
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE.
#
# $Id$
#
SHELL = @SHELL@
srcdir = @srcdir@
top_srcdir = @top_srcdir@
VPATH = @srcdir@
prefix = @prefix@
exec_prefix = @exec_prefix@
bindir = @bindir@
sbindir = @sbindir@
libexecdir = @libexecdir@
datadir = @datadir@
sysconfdir = @sysconfdir@
sharedstatedir = @sharedstatedir@
localstatedir = @localstatedir@
libdir = @libdir@
infodir = @infodir@
mandir = @mandir@
includedir = @includedir@
oldincludedir = /usr/include
DESTDIR =
pkgdatadir = $(datadir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
top_builddir = ..
ACLOCAL = @ACLOCAL@
AUTOCONF = @AUTOCONF@
AUTOMAKE = @AUTOMAKE@
AUTOHEADER = @AUTOHEADER@
INSTALL = @INSTALL@
INSTALL_PROGRAM = @INSTALL_PROGRAM@ $(AM_INSTALL_PROGRAM_FLAGS)
INSTALL_DATA = @INSTALL_DATA@
INSTALL_SCRIPT = @INSTALL_SCRIPT@
transform = @program_transform_name@
NORMAL_INSTALL = :
PRE_INSTALL = :
POST_INSTALL = :
NORMAL_UNINSTALL = :
PRE_UNINSTALL = :
POST_UNINSTALL = :
host_alias = @host_alias@
host_triplet = @host@
AS = @AS@
CC = @CC@
DLLTOOL = @DLLTOOL@
LIBTOOL = @LIBTOOL@
LN_S = @LN_S@
MAKEINFO = @MAKEINFO@
NROFF = @NROFF@
OBJDUMP = @OBJDUMP@
PACKAGE = @PACKAGE@
PERL = @PERL@
RANLIB = @RANLIB@
VERSION = @VERSION@
YACC = @YACC@
AUTOMAKE_OPTIONS = foreign
lib_LTLIBRARIES = libcurl.la
# Some flags needed when trying to cause warnings ;-)
# CFLAGS = -DMALLOCDEBUG -g # -Wall #-pedantic
INCLUDES = -I$(top_srcdir)/include
libcurl_la_LDFLAGS = -version-info 1:0:0
# This flag accepts an argument of the form current[:revision[:age]]. So,
# passing -version-info 3:12:1 sets current to 3, revision to 12, and age to
# 1.
#
# If either revision or age are omitted, they default to 0. Also note that age
# must be less than or equal to the current interface number.
#
# Here are a set of rules to help you update your library version information:
#
# 1.Start with version information of 0:0:0 for each libtool library.
#
# 2.Update the version information only immediately before a public release of
# your software. More frequent updates are unnecessary, and only guarantee
# that the current interface number gets larger faster.
#
# 3.If the library source code has changed at all since the last update, then
# increment revision (c:r:a becomes c:r+1:a).
#
# 4.If any interfaces have been added, removed, or changed since the last
# update, increment current, and set revision to 0.
#
# 5.If any interfaces have been added since the last public release, then
# increment age.
#
# 6.If any interfaces have been removed since the last public release, then
# set age to 0.
#
libcurl_la_SOURCES = arpa_telnet.h file.c getpass.h netrc.h timeval.c base64.c file.h hostip.c progress.c timeval.h base64.h formdata.c hostip.h progress.h cookie.c formdata.h http.c sendf.c cookie.h ftp.c http.h sendf.h url.c dict.c ftp.h if2ip.c speedcheck.c url.h dict.h getdate.c if2ip.h speedcheck.h urldata.h download.c getdate.h ldap.c ssluse.c version.c download.h getenv.c ldap.h ssluse.h escape.c getenv.h mprintf.c telnet.c escape.h getpass.c netrc.c telnet.h getinfo.c highlevel.c strequal.c strequal.h easy.c security.h security.c krb4.c memdebug.c memdebug.h
mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs
CONFIG_HEADER = ../config.h ../src/config.h
CONFIG_CLEAN_FILES =
LTLIBRARIES = $(lib_LTLIBRARIES)
DEFS = @DEFS@ -I. -I$(srcdir) -I.. -I../src
CPPFLAGS = @CPPFLAGS@
LDFLAGS = @LDFLAGS@
LIBS = @LIBS@
libcurl_la_LIBADD =
libcurl_la_OBJECTS = file.lo timeval.lo base64.lo hostip.lo progress.lo \
formdata.lo cookie.lo http.lo sendf.lo ftp.lo url.lo dict.lo if2ip.lo \
speedcheck.lo getdate.lo download.lo ldap.lo ssluse.lo version.lo \
getenv.lo escape.lo mprintf.lo telnet.lo getpass.lo netrc.lo getinfo.lo \
highlevel.lo strequal.lo easy.lo security.lo krb4.lo memdebug.lo
CFLAGS = @CFLAGS@
COMPILE = $(CC) $(DEFS) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
LTCOMPILE = $(LIBTOOL) --mode=compile $(CC) $(DEFS) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
CCLD = $(CC)
LINK = $(LIBTOOL) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(LDFLAGS) -o $@
DIST_COMMON = Makefile.am Makefile.in
DISTFILES = $(DIST_COMMON) $(SOURCES) $(HEADERS) $(TEXINFOS) $(EXTRA_DIST)
TAR = gtar
GZIP_ENV = --best
SOURCES = $(libcurl_la_SOURCES)
OBJECTS = $(libcurl_la_OBJECTS)
all: all-redirect
.SUFFIXES:
.SUFFIXES: .S .c .lo .o .s
$(srcdir)/Makefile.in: Makefile.am $(top_srcdir)/configure.in $(ACLOCAL_M4)
cd $(top_srcdir) && $(AUTOMAKE) --foreign --include-deps lib/Makefile
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
cd $(top_builddir) \
&& CONFIG_FILES=$(subdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status
mostlyclean-libLTLIBRARIES:
clean-libLTLIBRARIES:
-test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES)
distclean-libLTLIBRARIES:
maintainer-clean-libLTLIBRARIES:
install-libLTLIBRARIES: $(lib_LTLIBRARIES)
@$(NORMAL_INSTALL)
$(mkinstalldirs) $(DESTDIR)$(libdir)
@list='$(lib_LTLIBRARIES)'; for p in $$list; do \
if test -f $$p; then \
echo "$(LIBTOOL) --mode=install $(INSTALL) $$p $(DESTDIR)$(libdir)/$$p"; \
$(LIBTOOL) --mode=install $(INSTALL) $$p $(DESTDIR)$(libdir)/$$p; \
else :; fi; \
done
uninstall-libLTLIBRARIES:
@$(NORMAL_UNINSTALL)
list='$(lib_LTLIBRARIES)'; for p in $$list; do \
$(LIBTOOL) --mode=uninstall rm -f $(DESTDIR)$(libdir)/$$p; \
done
.c.o:
$(COMPILE) -c $<
.s.o:
$(COMPILE) -c $<
.S.o:
$(COMPILE) -c $<
mostlyclean-compile:
-rm -f *.o core *.core
clean-compile:
distclean-compile:
-rm -f *.tab.c
maintainer-clean-compile:
.c.lo:
$(LIBTOOL) --mode=compile $(COMPILE) -c $<
.s.lo:
$(LIBTOOL) --mode=compile $(COMPILE) -c $<
.S.lo:
$(LIBTOOL) --mode=compile $(COMPILE) -c $<
mostlyclean-libtool:
-rm -f *.lo
clean-libtool:
-rm -rf .libs _libs
distclean-libtool:
maintainer-clean-libtool:
libcurl.la: $(libcurl_la_OBJECTS) $(libcurl_la_DEPENDENCIES)
$(LINK) -rpath $(libdir) $(libcurl_la_LDFLAGS) $(libcurl_la_OBJECTS) $(libcurl_la_LIBADD) $(LIBS)
tags: TAGS
ID: $(HEADERS) $(SOURCES) $(LISP)
list='$(SOURCES) $(HEADERS)'; \
unique=`for i in $$list; do echo $$i; done | \
awk ' { files[$$0] = 1; } \
END { for (i in files) print i; }'`; \
here=`pwd` && cd $(srcdir) \
&& mkid -f$$here/ID $$unique $(LISP)
TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) $(LISP)
tags=; \
here=`pwd`; \
list='$(SOURCES) $(HEADERS)'; \
unique=`for i in $$list; do echo $$i; done | \
awk ' { files[$$0] = 1; } \
END { for (i in files) print i; }'`; \
test -z "$(ETAGS_ARGS)$$unique$(LISP)$$tags" \
|| (cd $(srcdir) && etags $(ETAGS_ARGS) $$tags $$unique $(LISP) -o $$here/TAGS)
mostlyclean-tags:
clean-tags:
distclean-tags:
-rm -f TAGS ID
maintainer-clean-tags:
distdir = $(top_builddir)/$(PACKAGE)-$(VERSION)/$(subdir)
subdir = lib
distdir: $(DISTFILES)
@for file in $(DISTFILES); do \
d=$(srcdir); \
if test -d $$d/$$file; then \
cp -pr $$/$$file $(distdir)/$$file; \
else \
test -f $(distdir)/$$file \
|| ln $$d/$$file $(distdir)/$$file 2> /dev/null \
|| cp -p $$d/$$file $(distdir)/$$file || :; \
fi; \
done
info-am:
info: info-am
dvi-am:
dvi: dvi-am
check-am: all-am
check: check-am
installcheck-am:
installcheck: installcheck-am
install-exec-am: install-libLTLIBRARIES
install-exec: install-exec-am
install-data-am:
install-data: install-data-am
install-am: all-am
@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
install: install-am
uninstall-am: uninstall-libLTLIBRARIES
uninstall: uninstall-am
all-am: Makefile $(LTLIBRARIES)
all-redirect: all-am
install-strip:
$(MAKE) $(AM_MAKEFLAGS) AM_INSTALL_PROGRAM_FLAGS=-s install
installdirs:
$(mkinstalldirs) $(DESTDIR)$(libdir)
mostlyclean-generic:
clean-generic:
distclean-generic:
-rm -f Makefile $(CONFIG_CLEAN_FILES)
-rm -f config.cache config.log stamp-h stamp-h[0-9]*
maintainer-clean-generic:
mostlyclean-am: mostlyclean-libLTLIBRARIES mostlyclean-compile \
mostlyclean-libtool mostlyclean-tags \
mostlyclean-generic
mostlyclean: mostlyclean-am
clean-am: clean-libLTLIBRARIES clean-compile clean-libtool clean-tags \
clean-generic mostlyclean-am
clean: clean-am
distclean-am: distclean-libLTLIBRARIES distclean-compile \
distclean-libtool distclean-tags distclean-generic \
clean-am
-rm -f libtool
distclean: distclean-am
maintainer-clean-am: maintainer-clean-libLTLIBRARIES \
maintainer-clean-compile maintainer-clean-libtool \
maintainer-clean-tags maintainer-clean-generic \
distclean-am
@echo "This command is intended for maintainers to use;"
@echo "it deletes files that may require special tools to rebuild."
maintainer-clean: maintainer-clean-am
.PHONY: mostlyclean-libLTLIBRARIES distclean-libLTLIBRARIES \
clean-libLTLIBRARIES maintainer-clean-libLTLIBRARIES \
uninstall-libLTLIBRARIES install-libLTLIBRARIES mostlyclean-compile \
distclean-compile clean-compile maintainer-clean-compile \
mostlyclean-libtool distclean-libtool clean-libtool \
maintainer-clean-libtool tags mostlyclean-tags distclean-tags \
clean-tags maintainer-clean-tags distdir info-am info dvi-am dvi check \
check-am installcheck-am installcheck install-exec-am install-exec \
install-data-am install-data install-am install uninstall-am uninstall \
all-redirect all-am all installdirs mostlyclean-generic \
distclean-generic clean-generic maintainer-clean-generic clean \
mostlyclean distclean maintainer-clean
# Say $(srcdir), so GNU make does not report an ambiguity with the .y.c rule.
$(srcdir)/getdate.c: getdate.y
cd $(srcdir) && \
$(YACC) $(YFLAGS) getdate.y; \
mv -f y.tab.c getdate.c
# Tell versions [3.59,3.63) of GNU make to not export all variables.
# Otherwise a system limit (for SysV at least) may be exceeded.
.NOEXPORT:

View File

@@ -30,16 +30,16 @@ libcurl_a_SOURCES = arpa_telnet.h file.c getpass.h netrc.h timeval.c base64.c \
file.h hostip.c progress.c timeval.h base64.h formdata.c hostip.h progress.h \
cookie.c formdata.h http.c sendf.c cookie.h ftp.c http.h sendf.h url.c dict.c \
ftp.h if2ip.c speedcheck.c url.h dict.h getdate.c if2ip.h speedcheck.h \
urldata.h download.c getdate.h ldap.c ssluse.c version.c download.h getenv.c \
urldata.h transfer.c getdate.h ldap.c ssluse.c version.c transfer.h getenv.c \
ldap.h ssluse.h escape.c getenv.h mprintf.c telnet.c escape.h getpass.c netrc.c \
telnet.h getinfo.c highlevel.c strequal.c strequal.h easy.c security.h \
security.c krb4.c
telnet.h getinfo.c strequal.c strequal.h easy.c security.h \
security.c krb4.h krb4.c memdebug.h memdebug.c inet_ntoa_r.h http_chunks.h http_chunks.c
libcurl_a_OBJECTS = file.o timeval.o base64.o hostip.o progress.o \
formdata.o cookie.o http.o sendf.o ftp.o url.o dict.o if2ip.o \
speedcheck.o getdate.o download.o ldap.o ssluse.o version.o \
speedcheck.o getdate.o transfer.o ldap.o ssluse.o version.o \
getenv.o escape.o mprintf.o telnet.o getpass.o netrc.o getinfo.o \
highlevel.o strequal.o easy.o security.o krb4.o
strequal.o easy.o security.o krb4.o memdebug.o http_chunks.o
LIBRARIES = $(libcurl_a_LIBRARIES)
SOURCES = $(libcurl_a_SOURCES)

View File

@@ -33,11 +33,12 @@ LINKSLIBS = libeay32.lib ssleay32.lib RSAglue.lib
RELEASE_OBJS= \
base64r.obj \
cookier.obj \
downloadr.obj \
transferr.obj \
escaper.obj \
formdatar.obj \
ftpr.obj \
httpr.obj \
http_chunksr.obj \
ldapr.obj \
dictr.obj \
telnetr.obj \
@@ -58,17 +59,17 @@ RELEASE_OBJS= \
getinfor.obj \
versionr.obj \
easyr.obj \
highlevelr.obj \
strequalr.obj
DEBUG_OBJS= \
base64d.obj \
cookied.obj \
downloadd.obj \
transferd.obj \
escaped.obj \
formdatad.obj \
ftpd.obj \
httpd.obj \
http_chunksd.obj \
ldapd.obj \
dictd.obj \
telnetd.obj \
@@ -89,17 +90,17 @@ DEBUG_OBJS= \
getinfod.obj \
versiond.obj \
easyd.obj \
highleveld.obj \
strequald.obj
RELEASE_SSL_OBJS= \
base64rs.obj \
cookiers.obj \
downloadrs.obj \
transferrs.obj \
escapers.obj \
formdatars.obj \
ftprs.obj \
httprs.obj \
http_chunksrs.obj \
ldaprs.obj \
dictrs.obj \
telnetrs.obj \
@@ -120,17 +121,17 @@ RELEASE_SSL_OBJS= \
getinfors.obj \
versionrs.obj \
easyrs.obj \
highlevelrs.obj \
strequalrs.obj
LINK_OBJS= \
base64.obj \
cookie.obj \
download.obj \
transfer.obj \
escape.obj \
formdata.obj \
ftp.obj \
http.obj \
http_chunks.obj \
ldap.obj \
dict.obj \
telnet.obj \
@@ -151,7 +152,6 @@ LINK_OBJS= \
getinfo.obj \
version.obj \
easy.obj \
highlevel.obj \
strequal.obj
all : release
@@ -170,8 +170,8 @@ base64r.obj: base64.c
$(CCR) $(CFLAGS) base64.c
cookier.obj: cookie.c
$(CCR) $(CFLAGS) cookie.c
downloadr.obj: download.c
$(CCR) $(CFLAGS) download.c
transferr.obj: transfer.c
$(CCR) $(CFLAGS) transfer.c
escaper.obj: escape.c
$(CCR) $(CFLAGS) escape.c
formdatar.obj: formdata.c
@@ -180,6 +180,8 @@ ftpr.obj: ftp.c
$(CCR) $(CFLAGS) ftp.c
httpr.obj: http.c
$(CCR) $(CFLAGS) http.c
http_chunksr.obj: http_chunks.c
$(CCR) $(CFLAGS) http_chunks.c
ldapr.obj: ldap.c
$(CCR) $(CFLAGS) ldap.c
dictr.obj: dict.c
@@ -220,8 +222,6 @@ versionr.obj: version.c
$(CCR) $(CFLAGS) version.c
easyr.obj: easy.c
$(CCR) $(CFLAGS) easy.c
highlevelr.obj: highlevel.c
$(CCR) $(CFLAGS) highlevel.c
strequalr.obj: strequal.c
$(CCR) $(CFLAGS) strequal.c
@@ -230,8 +230,8 @@ base64d.obj: base64.c
$(CCD) $(CFLAGS) base64.c
cookied.obj: cookie.c
$(CCD) $(CFLAGS) cookie.c
downloadd.obj: download.c
$(CCD) $(CFLAGS) download.c
transferd.obj: transfer.c
$(CCD) $(CFLAGS) transfer.c
escaped.obj: escape.c
$(CCD) $(CFLAGS) escape.c
formdatad.obj: formdata.c
@@ -240,6 +240,8 @@ ftpd.obj: ftp.c
$(CCD) $(CFLAGS) ftp.c
httpd.obj: http.c
$(CCD) $(CFLAGS) http.c
http_chunksd.obj: http_chunks.c
$(CCD) $(CFLAGS) http_chunks.c
ldapd.obj: ldap.c
$(CCD) $(CFLAGS) ldap.c
dictd.obj: dict.c
@@ -280,8 +282,6 @@ versiond.obj: version.c
$(CCD) $(CFLAGS) version.c
easyd.obj: easy.c
$(CCD) $(CFLAGS) easy.c
highleveld.obj: highlevel.c
$(CCD) $(CFLAGS) highlevel.c
strequald.obj: strequal.c
$(CCD) $(CFLAGS) strequal.c
@@ -291,16 +291,16 @@ base64rs.obj: base64.c
$(CCRS) $(CFLAGS) base64.c
cookiers.obj: cookie.c
$(CCRS) $(CFLAGS) cookie.c
downloadrs.obj: download.c
$(CCRS) $(CFLAGS) download.c
transferrs.obj: transfer.c
$(CCRS) $(CFLAGS) transfer.c
escapers.obj: escape.c
$(CCRS) $(CFLAGS) escape.c
formdatars.obj: formdata.c
$(CCRS) $(CFLAGS) formdata.c
ftprs.obj: ftp.c
$(CCRS) $(CFLAGS) ftp.c
httprs.obj: http.c
$(CCRS) $(CFLAGS) http.c
http_chunksrs.obj: http_chunks.c
$(CCRS) $(CFLAGS) http_chunks.c
ldaprs.obj: ldap.c
$(CCRS) $(CFLAGS) ldap.c
dictrs.obj: dict.c
@@ -341,8 +341,6 @@ versionrs.obj: version.c
$(CCRS) $(CFLAGS) version.c
easyrs.obj: easy.c
$(CCRS) $(CFLAGS) easy.c
highlevelrs.obj: highlevel.c
$(CCRS) $(CFLAGS) highlevel.c
strequalrs.obj: strequal.c
$(CCRS) $(CFLAGS) strequal.c

View File

@@ -63,6 +63,7 @@
#define SYNCH 242 /* for telfunc calls */
#ifdef TELCMDS
static
char *telcmds[] = {
"EOF", "SUSP", "ABORT", "EOR",
"SE", "NOP", "DMARK", "BRK", "IP", "AO", "AYT", "EC",
@@ -124,6 +125,7 @@ extern char *telcmds[];
#define NTELOPTS (1+TELOPT_NEW_ENVIRON)
#ifdef TELOPTS
static
char *telopts[NTELOPTS+1] = {
"BINARY", "ECHO", "RCP", "SUPPRESS GO AHEAD", "NAME",
"STATUS", "TIMING MARK", "RCTE", "NAOL", "NAOP",

View File

@@ -55,7 +55,7 @@ static int pos(char c)
}
#if 1
int base64_encode(const void *data, int size, char **str)
int Curl_base64_encode(const void *data, int size, char **str)
{
char *s, *p;
int i;
@@ -93,7 +93,7 @@ int base64_encode(const void *data, int size, char **str)
}
#endif
int base64_decode(const char *str, void *data)
int Curl_base64_decode(const char *str, void *data)
{
const char *p;
unsigned char *q;

View File

@@ -34,6 +34,7 @@
#ifndef __BASE64_H
#define __BASE64_H
int base64_encode(const void *data, int size, char **str);
int Curl_base64_encode(const void *data, int size, char **str);
int Curl_base64_decode(const char *str, void *data);
#endif

View File

@@ -100,9 +100,10 @@ Example set of cookies:
*
***************************************************************************/
struct Cookie *cookie_add(struct CookieInfo *c,
bool httpheader, /* TRUE if HTTP header-style line */
char *lineptr) /* first non-space of the line */
struct Cookie *
Curl_cookie_add(struct CookieInfo *c,
bool httpheader, /* TRUE if HTTP header-style line */
char *lineptr) /* first non-space of the line */
{
struct Cookie *clist;
char what[MAX_COOKIE_LINE];
@@ -347,7 +348,7 @@ struct Cookie *cookie_add(struct CookieInfo *c,
* called before any cookies are set. File may be NULL.
*
****************************************************************************/
struct CookieInfo *cookie_init(char *file)
struct CookieInfo *Curl_cookie_init(char *file)
{
char line[MAX_COOKIE_LINE];
struct CookieInfo *c;
@@ -375,7 +376,7 @@ struct CookieInfo *cookie_init(char *file)
while(*lineptr && isspace((int)*lineptr))
lineptr++;
cookie_add(c, TRUE, lineptr);
Curl_cookie_add(c, TRUE, lineptr);
}
else {
/* This might be a netscape cookie-file line, get it! */
@@ -383,7 +384,7 @@ struct CookieInfo *cookie_init(char *file)
while(*lineptr && isspace((int)*lineptr))
lineptr++;
cookie_add(c, FALSE, lineptr);
Curl_cookie_add(c, FALSE, lineptr);
}
}
if(fromfile)
@@ -405,8 +406,8 @@ struct CookieInfo *cookie_init(char *file)
*
****************************************************************************/
struct Cookie *cookie_getlist(struct CookieInfo *c,
char *host, char *path, bool secure)
struct Cookie *Curl_cookie_getlist(struct CookieInfo *c,
char *host, char *path, bool secure)
{
struct Cookie *newco;
struct Cookie *co;
@@ -473,7 +474,7 @@ struct Cookie *cookie_getlist(struct CookieInfo *c,
*
****************************************************************************/
void cookie_freelist(struct Cookie *co)
void Curl_cookie_freelist(struct Cookie *co)
{
struct Cookie *next;
if(co) {
@@ -493,7 +494,7 @@ void cookie_freelist(struct Cookie *co)
* Free a "cookie object" previous created with cookie_init().
*
****************************************************************************/
void cookie_cleanup(struct CookieInfo *c)
void Curl_cookie_cleanup(struct CookieInfo *c)
{
struct Cookie *co;
struct Cookie *next;

View File

@@ -63,10 +63,10 @@ struct CookieInfo {
#define MAX_NAME 256
#define MAX_NAME_TXT "255"
struct Cookie *cookie_add(struct CookieInfo *, bool, char *);
struct CookieInfo *cookie_init(char *);
struct Cookie *cookie_getlist(struct CookieInfo *, char *, char *, bool);
void cookie_freelist(struct Cookie *);
void cookie_cleanup(struct CookieInfo *);
struct Cookie *Curl_cookie_add(struct CookieInfo *, bool, char *);
struct CookieInfo *Curl_cookie_init(char *);
struct Cookie *Curl_cookie_getlist(struct CookieInfo *, char *, char *, bool);
void Curl_cookie_freelist(struct Cookie *);
void Curl_cookie_cleanup(struct CookieInfo *);
#endif

367
lib/curllib.dsp Normal file
View File

@@ -0,0 +1,367 @@
# Microsoft Developer Studio Project File - Name="curllib" - Package Owner=<4>
# Microsoft Developer Studio Generated Build File, Format Version 6.00
# ** DO NOT EDIT **
# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
CFG=curllib - Win32 Debug
!MESSAGE This is not a valid makefile. To build this project using NMAKE,
!MESSAGE use the Export Makefile command and run
!MESSAGE
!MESSAGE NMAKE /f "curllib.mak".
!MESSAGE
!MESSAGE You can specify a configuration when running NMAKE
!MESSAGE by defining the macro CFG on the command line. For example:
!MESSAGE
!MESSAGE NMAKE /f "curllib.mak" CFG="curllib - Win32 Debug"
!MESSAGE
!MESSAGE Possible choices for configuration are:
!MESSAGE
!MESSAGE "curllib - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
!MESSAGE "curllib - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
!MESSAGE
# Begin Project
# PROP AllowPerConfigDependencies 0
# PROP Scc_ProjName ""
# PROP Scc_LocalPath ""
CPP=cl.exe
MTL=midl.exe
RSC=rc.exe
!IF "$(CFG)" == "curllib - Win32 Release"
# PROP BASE Use_MFC 0
# PROP BASE Use_Debug_Libraries 0
# PROP BASE Output_Dir "Release"
# PROP BASE Intermediate_Dir "Release"
# PROP BASE Target_Dir ""
# PROP Use_MFC 0
# PROP Use_Debug_Libraries 0
# PROP Output_Dir "Release"
# PROP Intermediate_Dir "Release"
# PROP Ignore_Export_Lib 0
# PROP Target_Dir ""
# ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /D "_USRDLL" /D "CURLLIB_EXPORTS" /YX /FD /c
# ADD CPP /nologo /MT /W3 /GX /O2 /I "C:\jdk1.3.0_01\include" /I "C:\jdk1.3.0_01\include\win32" /I "..\include" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /D "_USRDLL" /D "CURLLIB_EXPORTS" /YX /FD /c
# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /win32
# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
# ADD BASE RSC /l 0x409 /d "NDEBUG"
# ADD RSC /l 0x409 /d "NDEBUG"
BSC32=bscmake.exe
# ADD BASE BSC32 /nologo
# ADD BSC32 /nologo
LINK32=link.exe
# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /dll /machine:I386
# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib wsock32.lib /nologo /dll /machine:I386 /out:"Release/curl.dll"
!ELSEIF "$(CFG)" == "curllib - Win32 Debug"
# PROP BASE Use_MFC 0
# PROP BASE Use_Debug_Libraries 1
# PROP BASE Output_Dir "Debug"
# PROP BASE Intermediate_Dir "Debug"
# PROP BASE Target_Dir ""
# PROP Use_MFC 0
# PROP Use_Debug_Libraries 1
# PROP Output_Dir "Debug"
# PROP Intermediate_Dir "Debug"
# PROP Ignore_Export_Lib 0
# PROP Target_Dir ""
# ADD BASE CPP /nologo /MTd /W3 /Gm /GX /ZI /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_MBCS" /D "_USRDLL" /D "CURLLIB_EXPORTS" /YX /FD /GZ /c
# ADD CPP /nologo /MTd /W3 /Gm /GX /ZI /Od /I "C:\jdk1.3.0_01\include" /I "C:\jdk1.3.0_01\include\win32" /I "..\include" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_MBCS" /D "_USRDLL" /D "CURLLIB_EXPORTS" /YX /FD /GZ /c
# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /win32
# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
# ADD BASE RSC /l 0x409 /d "_DEBUG"
# ADD RSC /l 0x409 /d "_DEBUG"
BSC32=bscmake.exe
# ADD BASE BSC32 /nologo
# ADD BSC32 /nologo
LINK32=link.exe
# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /dll /debug /machine:I386 /pdbtype:sept
# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib wsock32.lib /nologo /dll /debug /machine:I386 /out:"Debug/curl.dll" /pdbtype:sept
!ENDIF
# Begin Target
# Name "curllib - Win32 Release"
# Name "curllib - Win32 Debug"
# Begin Group "Source Files"
# PROP Default_Filter "cpp;c;cxx;rc;def;r;odl;idl;hpj;bat"
# Begin Source File
SOURCE=.\base64.c
# End Source File
# Begin Source File
SOURCE=.\cookie.c
# End Source File
# Begin Source File
SOURCE=.\dict.c
# End Source File
# Begin Source File
SOURCE=.\dllinit.c
# End Source File
# Begin Source File
SOURCE=.\easy.c
# End Source File
# Begin Source File
SOURCE=.\easyswig.c
# End Source File
# Begin Source File
SOURCE=.\easyswig_wrap.c
# End Source File
# Begin Source File
SOURCE=.\escape.c
# End Source File
# Begin Source File
SOURCE=.\file.c
# End Source File
# Begin Source File
SOURCE=.\formdata.c
# End Source File
# Begin Source File
SOURCE=.\ftp.c
# End Source File
# Begin Source File
SOURCE=.\getdate.c
# End Source File
# Begin Source File
SOURCE=.\getenv.c
# End Source File
# Begin Source File
SOURCE=.\getinfo.c
# End Source File
# Begin Source File
SOURCE=.\getpass.c
# End Source File
# Begin Source File
SOURCE=.\hostip.c
# End Source File
# Begin Source File
SOURCE=.\http.c
# End Source File
# Begin Source File
SOURCE=.\if2ip.c
# End Source File
# Begin Source File
SOURCE=.\krb4.c
# End Source File
# Begin Source File
SOURCE=.\ldap.c
# End Source File
# Begin Source File
SOURCE=.\libcurl.def
# End Source File
# Begin Source File
SOURCE=.\memdebug.c
# End Source File
# Begin Source File
SOURCE=.\mprintf.c
# End Source File
# Begin Source File
SOURCE=.\netrc.c
# End Source File
# Begin Source File
SOURCE=.\progress.c
# End Source File
# Begin Source File
SOURCE=.\security.c
# End Source File
# Begin Source File
SOURCE=.\sendf.c
# End Source File
# Begin Source File
SOURCE=.\speedcheck.c
# End Source File
# Begin Source File
SOURCE=.\ssluse.c
# End Source File
# Begin Source File
SOURCE=.\strequal.c
# End Source File
# Begin Source File
SOURCE=.\telnet.c
# End Source File
# Begin Source File
SOURCE=.\timeval.c
# End Source File
# Begin Source File
SOURCE=.\transfer.c
# End Source File
# Begin Source File
SOURCE=.\url.c
# End Source File
# Begin Source File
SOURCE=.\version.c
# End Source File
# End Group
# Begin Group "Header Files"
# PROP Default_Filter "h;hpp;hxx;hm;inl"
# Begin Source File
SOURCE=.\arpa_telnet.h
# End Source File
# Begin Source File
SOURCE=.\base64.h
# End Source File
# Begin Source File
SOURCE=.\cookie.h
# End Source File
# Begin Source File
SOURCE=.\dict.h
# End Source File
# Begin Source File
SOURCE=.\escape.h
# End Source File
# Begin Source File
SOURCE=.\file.h
# End Source File
# Begin Source File
SOURCE=.\formdata.h
# End Source File
# Begin Source File
SOURCE=.\ftp.h
# End Source File
# Begin Source File
SOURCE=.\getdate.h
# End Source File
# Begin Source File
SOURCE=.\getenv.h
# End Source File
# Begin Source File
SOURCE=.\getpass.h
# End Source File
# Begin Source File
SOURCE=.\hostip.h
# End Source File
# Begin Source File
SOURCE=.\http.h
# End Source File
# Begin Source File
SOURCE=.\if2ip.h
# End Source File
# Begin Source File
SOURCE=.\inet_ntoa_r.h
# End Source File
# Begin Source File
SOURCE=.\krb4.h
# End Source File
# Begin Source File
SOURCE=.\ldap.h
# End Source File
# Begin Source File
SOURCE=.\memdebug.h
# End Source File
# Begin Source File
SOURCE=.\netrc.h
# End Source File
# Begin Source File
SOURCE=.\progress.h
# End Source File
# Begin Source File
SOURCE=.\security.h
# End Source File
# Begin Source File
SOURCE=.\sendf.h
# End Source File
# Begin Source File
SOURCE=.\setup.h
# End Source File
# Begin Source File
SOURCE=.\speedcheck.h
# End Source File
# Begin Source File
SOURCE=.\ssluse.h
# End Source File
# Begin Source File
SOURCE=.\strequal.h
# End Source File
# Begin Source File
SOURCE=.\telnet.h
# End Source File
# Begin Source File
SOURCE=.\timeval.h
# End Source File
# Begin Source File
SOURCE=.\transfer.h
# End Source File
# Begin Source File
SOURCE=.\url.h
# End Source File
# Begin Source File
SOURCE=.\urldata.h
# End Source File
# End Group
# Begin Group "Resource Files"
# PROP Default_Filter "ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe"
# End Group
# End Target
# End Project

29
lib/curllib.dsw Normal file
View File

@@ -0,0 +1,29 @@
Microsoft Developer Studio Workspace File, Format Version 6.00
# WARNING: DO NOT EDIT OR DELETE THIS WORKSPACE FILE!
###############################################################################
Project: "curllib"=".\curllib.dsp" - Package Owner=<4>
Package=<5>
{{{
}}}
Package=<4>
{{{
}}}
###############################################################################
Global:
Package=<5>
{{{
}}}
Package=<3>
{{{
}}}
###############################################################################

View File

@@ -71,7 +71,7 @@
#include "urldata.h"
#include <curl/curl.h>
#include "download.h"
#include "transfer.h"
#include "sendf.h"
#include "progress.h"
@@ -80,12 +80,12 @@
#define _MPRINTF_REPLACE /* use our functions only */
#include <curl/mprintf.h>
CURLcode dict_done(struct connectdata *conn)
CURLcode Curl_dict_done(struct connectdata *conn)
{
return CURLE_OK;
}
CURLcode dict(struct connectdata *conn)
CURLcode Curl_dict(struct connectdata *conn)
{
int nth;
char *word;
@@ -100,7 +100,7 @@ CURLcode dict(struct connectdata *conn)
char *path = conn->path;
long *bytecount = &conn->bytecount;
if(data->bits.user_passwd) {
if(conn->bits.user_passwd) {
/* AUTH is missing */
}
@@ -141,21 +141,21 @@ CURLcode dict(struct connectdata *conn)
nth = atoi(nthdef);
}
sendf(data->firstsocket, data,
"CLIENT " LIBCURL_NAME " " LIBCURL_VERSION "\n"
"MATCH "
"%s " /* database */
"%s " /* strategy */
"%s\n" /* word */
"QUIT\n",
Curl_sendf(conn->firstsocket, conn,
"CLIENT " LIBCURL_NAME " " LIBCURL_VERSION "\n"
"MATCH "
"%s " /* database */
"%s " /* strategy */
"%s\n" /* word */
"QUIT\n",
database,
strategy,
word
);
database,
strategy,
word
);
result = Transfer(conn, data->firstsocket, -1, FALSE, bytecount,
-1, NULL); /* no upload */
result = Curl_Transfer(conn, conn->firstsocket, -1, FALSE, bytecount,
-1, NULL); /* no upload */
if(result)
return result;
@@ -191,20 +191,20 @@ CURLcode dict(struct connectdata *conn)
nth = atoi(nthdef);
}
sendf(data->firstsocket, data,
"CLIENT " LIBCURL_NAME " " LIBCURL_VERSION "\n"
"DEFINE "
"%s " /* database */
"%s\n" /* word */
"QUIT\n",
database,
word
);
Curl_sendf(conn->firstsocket, conn,
"CLIENT " LIBCURL_NAME " " LIBCURL_VERSION "\n"
"DEFINE "
"%s " /* database */
"%s\n" /* word */
"QUIT\n",
database,
word
);
result = Curl_Transfer(conn, conn->firstsocket, -1, FALSE, bytecount,
-1, NULL); /* no upload */
result = Transfer(conn, data->firstsocket, -1, FALSE, bytecount,
-1, NULL); /* no upload */
if(result)
return result;
@@ -220,14 +220,14 @@ CURLcode dict(struct connectdata *conn)
if (ppath[i] == ':')
ppath[i] = ' ';
}
sendf(data->firstsocket, data,
"CLIENT " LIBCURL_NAME " " LIBCURL_VERSION "\n"
"%s\n"
"QUIT\n",
ppath);
Curl_sendf(conn->firstsocket, conn,
"CLIENT " LIBCURL_NAME " " LIBCURL_VERSION "\n"
"%s\n"
"QUIT\n",
ppath);
result = Transfer(conn, data->firstsocket, -1, FALSE, bytecount,
-1, NULL);
result = Curl_Transfer(conn, conn->firstsocket, -1, FALSE, bytecount,
-1, NULL);
if(result)
return result;

View File

@@ -23,7 +23,7 @@
*
* $Id$
*****************************************************************************/
CURLcode dict(struct connectdata *conn);
CURLcode dict_done(struct connectdata *conn);
CURLcode Curl_dict(struct connectdata *conn);
CURLcode Curl_dict_done(struct connectdata *conn);
#endif

View File

@@ -1,100 +0,0 @@
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 2000, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* In order to be useful for every potential user, curl and libcurl are
* dual-licensed under the MPL and the MIT/X-derivate licenses.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the MPL or the MIT/X-derivate
* licenses. You may pick one of these licenses.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
* $Id$
*****************************************************************************/
#include "setup.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef HAVE_SYS_SELECT_H
#include <sys/select.h>
#endif
#include "urldata.h"
#include <curl/curl.h>
#ifdef __BEOS__
#include <net/socket.h>
#endif
#ifdef WIN32
#if !defined( __GNUC__) || defined(__MINGW32__)
#include <winsock.h>
#endif
#include <time.h> /* for the time_t typedef! */
#if defined(__GNUC__) && defined(TIME_WITH_SYS_TIME)
#include <sys/time.h>
#endif
#endif
#include "progress.h"
#include "speedcheck.h"
#include "sendf.h"
#include <curl/types.h>
/* --- download and upload a stream from/to a socket --- */
/* Parts of this function was brought to us by the friendly Mark Butler
<butlerm@xmission.com>. */
CURLcode
Transfer(CURLconnect *c_conn,
/* READ stuff */
int sockfd, /* socket to read from or -1 */
int size, /* -1 if unknown at this point */
bool getheader, /* TRUE if header parsing is wanted */
long *bytecountp, /* return number of bytes read or NULL */
/* WRITE stuff */
int writesockfd, /* socket to write to, it may very well be
the same we read from. -1 disables */
long *writebytecountp /* return number of bytes written or NULL */
)
{
struct connectdata *conn = (struct connectdata *)c_conn;
if(!conn)
return CURLE_BAD_FUNCTION_ARGUMENT;
/* now copy all input parameters */
conn->sockfd = sockfd;
conn->size = size;
conn->getheader = getheader;
conn->bytecountp = bytecountp;
conn->writesockfd = writesockfd;
conn->writebytecountp = writebytecountp;
return CURLE_OK;
}

View File

@@ -72,7 +72,7 @@
#include "urldata.h"
#include <curl/curl.h>
#include "highlevel.h"
#include "transfer.h"
#include <curl/types.h>
#define _MPRINTF_REPLACE /* use our functions only */
@@ -83,15 +83,11 @@ CURL *curl_easy_init(void)
CURLcode res;
struct UrlData *data;
if(curl_init())
return NULL;
/* We use curl_open() with undefined URL so far */
res = curl_open((CURL **)&data, NULL);
res = Curl_open((CURL **)&data, NULL);
if(res != CURLE_OK)
return NULL;
data->interf = CURLI_EASY; /* mark it as an easy one */
/* SAC */
data->device = NULL;
@@ -119,16 +115,16 @@ CURLcode curl_easy_setopt(CURL *curl, CURLoption tag, ...)
if(tag < CURLOPTTYPE_OBJECTPOINT) {
/* This is a LONG type */
param_long = va_arg(arg, long);
curl_setopt(data, tag, param_long);
Curl_setopt(data, tag, param_long);
}
else if(tag < CURLOPTTYPE_FUNCTIONPOINT) {
/* This is a object pointer type */
param_obj = va_arg(arg, void *);
curl_setopt(data, tag, param_obj);
Curl_setopt(data, tag, param_obj);
}
else {
param_func = va_arg(arg, func_T );
curl_setopt(data, tag, param_func);
Curl_setopt(data, tag, param_func);
}
va_end(arg);
@@ -137,13 +133,12 @@ CURLcode curl_easy_setopt(CURL *curl, CURLoption tag, ...)
CURLcode curl_easy_perform(CURL *curl)
{
return curl_transfer(curl);
return Curl_perform(curl);
}
void curl_easy_cleanup(CURL *curl)
{
curl_close(curl);
curl_free();
Curl_close(curl);
}
CURLcode curl_easy_getinfo(CURL *curl, CURLINFO info, ...)
@@ -153,5 +148,5 @@ CURLcode curl_easy_getinfo(CURL *curl, CURLINFO info, ...)
va_start(arg, info);
paramp = va_arg(arg, void *);
return curl_getinfo(curl, info, paramp);
return Curl_getinfo(curl, info, paramp);
}

View File

@@ -25,6 +25,7 @@
* allocated string or NULL if an error occurred. */
#include "setup.h"
#include <ctype.h>
#include <curl/curl.h>
#include <stdio.h>
@@ -36,74 +37,88 @@
#include "memdebug.h"
#endif
char *curl_escape(char *string)
char *curl_escape(char *string, int length)
{
int alloc=strlen(string)+1;
char *ns = malloc(alloc);
unsigned char in;
int newlen = alloc;
int index=0;
int alloc = (length?length:strlen(string))+1;
char *ns = malloc(alloc);
unsigned char in;
int newlen = alloc;
int index=0;
while(*string) {
in = *string;
if(' ' == in)
ns[index++] = '+';
else if(!(in >= 'a' && in <= 'z') &&
!(in >= 'A' && in <= 'Z') &&
!(in >= '0' && in <= '9')) {
/* encode it */
newlen += 2; /* the size grows with two, since this'll become a %XX */
if(newlen > alloc) {
alloc *= 2;
ns = realloc(ns, alloc);
if(!ns)
return NULL;
}
sprintf(&ns[index], "%%%02X", in);
index+=3;
while(length--) {
in = *string;
if(' ' == in)
ns[index++] = '+';
else if(!(in >= 'a' && in <= 'z') &&
!(in >= 'A' && in <= 'Z') &&
!(in >= '0' && in <= '9')) {
/* encode it */
if(('%' == in) &&
(length>=2) &&
isxdigit((int)string[1]) &&
isxdigit((int)string[2]) ) {
/*
* This is an already encoded letter, leave it!
*/
memcpy(&ns[index], string, 3);
string+=2;
}
else {
/* just copy this */
ns[index++]=in;
/* encode this now */
newlen += 2; /* the size grows with two, since this'll become a %XX */
if(newlen > alloc) {
alloc *= 2;
ns = realloc(ns, alloc);
if(!ns)
return NULL;
}
sprintf(&ns[index], "%%%02X", in);
}
string++;
}
ns[index]=0; /* terminate it */
return ns;
index+=3;
}
else {
/* just copy this */
ns[index++]=in;
}
string++;
}
ns[index]=0; /* terminate it */
return ns;
}
char *curl_unescape(char *string, int length)
{
int alloc = (length?length:strlen(string))+1;
char *ns = malloc(alloc);
unsigned char in;
int index=0;
int hex;
char querypart=FALSE; /* everything to the right of a '?' letter is
the "query part" where '+' should become ' '.
RFC 2316, section 3.10 */
int alloc = (length?length:strlen(string))+1;
char *ns = malloc(alloc);
unsigned char in;
int index=0;
unsigned int hex;
char querypart=FALSE; /* everything to the right of a '?' letter is
the "query part" where '+' should become ' '.
RFC 2316, section 3.10 */
while(--alloc > 0) {
in = *string;
if(querypart && ('+' == in))
in = ' ';
else if(!querypart && ('?' == in)) {
/* we have "walked in" to the query part */
querypart=TRUE;
while(--alloc > 0) {
in = *string;
if(querypart && ('+' == in))
in = ' ';
else if(!querypart && ('?' == in)) {
/* we have "walked in" to the query part */
querypart=TRUE;
}
else if('%' == in) {
/* encoded part */
if(sscanf(string+1, "%02X", &hex)) {
in = hex;
string+=2;
alloc-=2;
}
else if('%' == in) {
/* encoded part */
if(sscanf(string+1, "%02X", &hex)) {
in = hex;
string+=2;
alloc-=2;
}
}
ns[index++] = in;
string++;
}
ns[index]=0; /* terminate it */
return ns;
}
ns[index++] = in;
string++;
}
ns[index]=0; /* terminate it */
return ns;
}

View File

@@ -26,7 +26,7 @@
/* Escape and unescape URL encoding in strings. The functions return a new
* allocated string or NULL if an error occurred. */
char *curl_escape(char *string);
char *curl_escape(char *string, int length);
char *curl_unescape(char *string, int length);
#endif

View File

@@ -91,29 +91,24 @@
#include "memdebug.h"
#endif
CURLcode file(struct connectdata *conn)
/* Emulate a connect-then-transfer protocol. We connect to the file here */
CURLcode Curl_file_connect(struct connectdata *conn)
{
/* This implementation ignores the host name in conformance with
RFC 1738. Only local files (reachable via the standard file system)
are supported. This means that files on remotely mounted directories
(via NFS, Samba, NT sharing) can be accessed through a file:// URL
*/
CURLcode res = CURLE_OK;
char *path = conn->path;
struct stat statbuf;
size_t expected_size=-1;
size_t nread;
struct UrlData *data = conn->data;
char *buf = data->buffer;
int bytecount = 0;
struct timeval start = tvnow();
struct timeval now = start;
char *actual_path = curl_unescape(conn->path, 0);
struct FILE *file;
int fd;
char *actual_path = curl_unescape(path, 0);
#if defined(WIN32) || defined(__EMX__)
int i;
#endif
file = (struct FILE *)malloc(sizeof(struct FILE));
if(!file)
return CURLE_OUT_OF_MEMORY;
memset(file, 0, sizeof(struct FILE));
conn->proto.file = file;
#if defined(WIN32) || defined(__EMX__)
/* change path separators from '/' to '\\' for Windows and OS/2 */
for (i=0; actual_path[i] != '\0'; ++i)
if (actual_path[i] == '/')
@@ -126,9 +121,37 @@ CURLcode file(struct connectdata *conn)
free(actual_path);
if(fd == -1) {
failf(data, "Couldn't open file %s", path);
failf(conn->data, "Couldn't open file %s", conn->path);
return CURLE_FILE_COULDNT_READ_FILE;
}
file->fd = fd;
return CURLE_OK;
}
/* This is the do-phase, separated from the connect-phase above */
CURLcode Curl_file(struct connectdata *conn)
{
/* This implementation ignores the host name in conformance with
RFC 1738. Only local files (reachable via the standard file system)
are supported. This means that files on remotely mounted directories
(via NFS, Samba, NT sharing) can be accessed through a file:// URL
*/
CURLcode res = CURLE_OK;
struct stat statbuf;
size_t expected_size=-1;
size_t nread;
struct UrlData *data = conn->data;
char *buf = data->buffer;
int bytecount = 0;
struct timeval start = Curl_tvnow();
struct timeval now = start;
int fd;
/* get the fd from the connection phase */
fd = conn->proto.file->fd;
if( -1 != fstat(fd, &statbuf)) {
/* we could stat it, then read out the size */
expected_size = statbuf.st_size;
@@ -139,7 +162,7 @@ CURLcode file(struct connectdata *conn)
it avoids problems with select() and recv() on file descriptors
in Winsock */
if(expected_size != -1)
pgrsSetDownloadSize(data, expected_size);
Curl_pgrsSetDownloadSize(data, expected_size);
while (res == CURLE_OK) {
nread = read(fd, buf, BUFSIZE-1);
@@ -155,16 +178,16 @@ CURLcode file(struct connectdata *conn)
to prevent CR/LF translation (this then goes to a binary mode
file descriptor). */
res = client_write(data, CLIENTWRITE_BODY, buf, nread);
res = Curl_client_write(data, CLIENTWRITE_BODY, buf, nread);
if(res)
return res;
now = tvnow();
if(pgrsUpdate(data))
now = Curl_tvnow();
if(Curl_pgrsUpdate(data))
res = CURLE_ABORTED_BY_CALLBACK;
}
now = tvnow();
if(pgrsUpdate(data))
now = Curl_tvnow();
if(Curl_pgrsUpdate(data))
res = CURLE_ABORTED_BY_CALLBACK;
close(fd);

View File

@@ -23,6 +23,6 @@
*
* $Id$
*****************************************************************************/
CURLcode file(struct connectdata *conn);
CURLcode Curl_file(struct connectdata *conn);
CURLcode Curl_file_connect(struct connectdata *conn);
#endif

View File

@@ -91,16 +91,10 @@ static void GetStr(char **string,
*
***************************************************************************/
int curl_formparse(char *input,
struct HttpPost **httppost,
struct HttpPost **last_post)
{
return FormParse(input, httppost, last_post);
}
#define FORM_FILE_SEPARATOR ','
#define FORM_TYPE_SEPARATOR ';'
static
int FormParse(char *input,
struct HttpPost **httppost,
struct HttpPost **last_post)
@@ -298,6 +292,13 @@ int FormParse(char *input,
return 0;
}
int curl_formparse(char *input,
struct HttpPost **httppost,
struct HttpPost **last_post)
{
return FormParse(input, httppost, last_post);
}
static int AddFormData(struct FormData **formp,
void *line,
long length)
@@ -339,7 +340,7 @@ static int AddFormDataf(struct FormData **formp,
}
char *MakeFormBoundary(void)
char *Curl_FormBoundary(void)
{
char *retstring;
static int randomizer=0; /* this is just so that two boundaries within
@@ -367,7 +368,7 @@ char *MakeFormBoundary(void)
}
/* Used from http.c */
void FormFree(struct FormData *form)
void Curl_FormFree(struct FormData *form)
{
struct FormData *next;
do {
@@ -400,8 +401,8 @@ void curl_formfree(struct HttpPost *form)
} while((form=next)); /* continue */
}
struct FormData *getFormData(struct HttpPost *post,
int *sizep)
struct FormData *Curl_getFormData(struct HttpPost *post,
int *sizep)
{
struct FormData *form = NULL;
struct FormData *firstform;
@@ -415,7 +416,7 @@ struct FormData *getFormData(struct HttpPost *post,
if(!post)
return NULL; /* no input => no output! */
boundary = MakeFormBoundary();
boundary = Curl_FormBoundary();
/* Make the first line of the output */
AddFormDataf(&form,
@@ -439,7 +440,7 @@ struct FormData *getFormData(struct HttpPost *post,
/* If used, this is a link to more file names, we must then do
the magic to include several files with the same field name */
fileboundary = MakeFormBoundary();
fileboundary = Curl_FormBoundary();
size += AddFormDataf(&form,
"\r\nContent-Type: multipart/mixed,"
@@ -535,24 +536,11 @@ struct FormData *getFormData(struct HttpPost *post,
return firstform;
}
int FormInit(struct Form *form, struct FormData *formdata )
int Curl_FormInit(struct Form *form, struct FormData *formdata )
{
if(!formdata)
return 1; /* error */
#if 0
struct FormData *lastnode=formdata;
/* find the last node in the list */
while(lastnode->next) {
lastnode = lastnode->next;
}
/* Now, make sure that we'll send a nice terminating sequence at the end
* of the post. We *DONT* add this string to the size of the data since this
* is actually AFTER the data. */
AddFormDataf(&lastnode, "\r\n\r\n");
#endif
form->data = formdata;
form->sent = 0;
@@ -560,10 +548,10 @@ int FormInit(struct Form *form, struct FormData *formdata )
}
/* fread() emulation */
int FormReader(char *buffer,
size_t size,
size_t nitems,
FILE *mydata)
int Curl_FormReader(char *buffer,
size_t size,
size_t nitems,
FILE *mydata)
{
struct Form *form;
int wantedsize;
@@ -638,7 +626,7 @@ int main(int argc, char **argv)
}
}
form=getFormData(httppost, &size);
form=Curl_getFormData(httppost, &size);
FormInit(&formread, form);

View File

@@ -36,23 +36,19 @@ struct Form {
been sent in a previous invoke */
};
int FormParse(char *string,
struct HttpPost **httppost,
struct HttpPost **last_post);
int Curl_FormInit(struct Form *form, struct FormData *formdata );
int FormInit(struct Form *form, struct FormData *formdata );
struct FormData *getFormData(struct HttpPost *post,
int *size);
struct FormData *Curl_getFormData(struct HttpPost *post,
int *size);
/* fread() emulation */
int FormReader(char *buffer,
size_t size,
size_t nitems,
FILE *mydata);
int Curl_FormReader(char *buffer,
size_t size,
size_t nitems,
FILE *mydata);
char *MakeFormBoundary(void);
char *Curl_FormBoundary(void);
void FormFree(struct FormData *);
void Curl_FormFree(struct FormData *);
#endif

696
lib/ftp.c

File diff suppressed because it is too large Load Diff

View File

@@ -23,11 +23,16 @@
*
* $Id$
*****************************************************************************/
CURLcode ftp(struct connectdata *conn);
CURLcode ftp_done(struct connectdata *conn);
CURLcode ftp_connect(struct connectdata *conn);
CURLcode Curl_ftp(struct connectdata *conn);
CURLcode Curl_ftp_done(struct connectdata *conn);
CURLcode Curl_ftp_connect(struct connectdata *conn);
CURLcode Curl_ftp_disconnect(struct connectdata *conn);
struct curl_slist *curl_slist_append(struct curl_slist *list, char *data);
void curl_slist_free_all(struct curl_slist *list);
size_t Curl_ftpsendf(int fd, struct connectdata *, char *fmt, ...);
/* The kerberos stuff needs this: */
int Curl_GetFTPResponse(int sockfd, char *buf,
struct connectdata *conn,
int *ftpcode);
#endif

View File

@@ -104,6 +104,11 @@
# include <string.h>
#endif
/* The last #include file should be: */
#ifdef MALLOCDEBUG
#include "memdebug.h"
#endif
#if __GNUC__ < 2 || (__GNUC__ == 2 && __GNUC_MINOR__ < 7)
# define __attribute__(x)
#endif
@@ -127,44 +132,44 @@
then those parser generators need to be fixed instead of adding those
names to this list. */
#define yymaxdepth gd_maxdepth
#define yyparse gd_parse
#define yylex gd_lex
#define yyerror gd_error
#define yylval gd_lval
#define yychar gd_char
#define yydebug gd_debug
#define yypact gd_pact
#define yyr1 gd_r1
#define yyr2 gd_r2
#define yydef gd_def
#define yychk gd_chk
#define yypgo gd_pgo
#define yyact gd_act
#define yyexca gd_exca
#define yyerrflag gd_errflag
#define yynerrs gd_nerrs
#define yyps gd_ps
#define yypv gd_pv
#define yys gd_s
#define yy_yys gd_yys
#define yystate gd_state
#define yytmp gd_tmp
#define yyv gd_v
#define yy_yyv gd_yyv
#define yyval gd_val
#define yylloc gd_lloc
#define yyreds gd_reds /* With YYDEBUG defined */
#define yytoks gd_toks /* With YYDEBUG defined */
#define yylhs gd_yylhs
#define yylen gd_yylen
#define yydefred gd_yydefred
#define yydgoto gd_yydgoto
#define yysindex gd_yysindex
#define yyrindex gd_yyrindex
#define yygindex gd_yygindex
#define yytable gd_yytable
#define yycheck gd_yycheck
#define yymaxdepth Curl_gd_maxdepth
#define yyparse Curl_gd_parse
#define yylex Curl_gd_lex
#define yyerror Curl_gd_error
#define yylval Curl_gd_lval
#define yychar Curl_gd_char
#define yydebug Curl_gd_debug
#define yypact Curl_gd_pact
#define yyr1 Curl_gd_r1
#define yyr2 Curl_gd_r2
#define yydef Curl_gd_def
#define yychk Curl_gd_chk
#define yypgo Curl_gd_pgo
#define yyact Curl_gd_act
#define yyexca Curl_gd_exca
#define yyerrflag Curl_gd_errflag
#define yynerrs Curl_gd_nerrs
#define yyps Curl_gd_ps
#define yypv Curl_gd_pv
#define yys Curl_gd_s
#define yy_yys Curl_gd_yys
#define yystate Curl_gd_state
#define yytmp Curl_gd_tmp
#define yyv Curl_gd_v
#define yy_yyv Curl_gd_yyv
#define yyval Curl_gd_val
#define yylloc Curl_gd_lloc
#define yyreds Curl_gd_reds /* With YYDEBUG defined */
#define yytoks Curl_gd_toks /* With YYDEBUG defined */
#define yylhs Curl_gd_yylhs
#define yylen Curl_gd_yylen
#define yydefred Curl_gd_yydefred
#define yydgoto Curl_gd_yydgoto
#define yysindex Curl_gd_yysindex
#define yyrindex Curl_gd_yyrindex
#define yygindex Curl_gd_yygindex
#define yytable Curl_gd_yytable
#define yycheck Curl_gd_yycheck
static int yylex ();
static int yyerror ();
@@ -222,7 +227,7 @@ static int yyRelSeconds;
static int yyRelYear;
#line 205 "getdate.y"
#line 210 "getdate.y"
typedef union {
int Number;
enum _MERIDIAN Meridian;
@@ -305,11 +310,11 @@ static const short yyrhs[] = { -1,
#if YYDEBUG != 0
static const short yyrline[] = { 0,
221, 222, 225, 228, 231, 234, 237, 240, 243, 249,
255, 264, 270, 282, 285, 288, 294, 298, 302, 308,
312, 330, 336, 342, 346, 351, 355, 362, 370, 373,
376, 379, 382, 385, 388, 391, 394, 397, 400, 403,
406, 409, 412, 415, 418, 421, 424, 429, 462, 466
226, 227, 230, 233, 236, 239, 242, 245, 248, 254,
260, 269, 275, 287, 290, 293, 299, 303, 307, 313,
317, 335, 341, 347, 351, 356, 360, 367, 375, 378,
381, 384, 387, 390, 393, 396, 399, 402, 405, 408,
411, 414, 417, 420, 423, 426, 429, 434, 467, 471
};
#endif
@@ -390,7 +395,7 @@ static const short yycheck[] = { 0,
56
};
/* -*-C-*- Note some compilers choke on comments on `#line' lines. */
#line 3 "/usr/local/share/bison.simple"
#line 3 "/usr/lib/bison.simple"
/* This file comes from bison-1.28. */
/* Skeleton output parser for bison,
@@ -604,7 +609,7 @@ __yy_memcpy (char *to, char *from, unsigned int count)
#endif
#endif
#line 217 "/usr/local/share/bison.simple"
#line 217 "/usr/lib/bison.simple"
/* The user can define YYPARSE_PARAM as the name of an argument to be passed
into yyparse. The argument should have type void *.
@@ -933,37 +938,37 @@ yyreduce:
switch (yyn) {
case 3:
#line 225 "getdate.y"
#line 230 "getdate.y"
{
yyHaveTime++;
;
break;}
case 4:
#line 228 "getdate.y"
#line 233 "getdate.y"
{
yyHaveZone++;
;
break;}
case 5:
#line 231 "getdate.y"
#line 236 "getdate.y"
{
yyHaveDate++;
;
break;}
case 6:
#line 234 "getdate.y"
#line 239 "getdate.y"
{
yyHaveDay++;
;
break;}
case 7:
#line 237 "getdate.y"
#line 242 "getdate.y"
{
yyHaveRel++;
;
break;}
case 9:
#line 243 "getdate.y"
#line 248 "getdate.y"
{
yyHour = yyvsp[-1].Number;
yyMinutes = 0;
@@ -972,7 +977,7 @@ case 9:
;
break;}
case 10:
#line 249 "getdate.y"
#line 254 "getdate.y"
{
yyHour = yyvsp[-3].Number;
yyMinutes = yyvsp[-1].Number;
@@ -981,7 +986,7 @@ case 10:
;
break;}
case 11:
#line 255 "getdate.y"
#line 260 "getdate.y"
{
yyHour = yyvsp[-3].Number;
yyMinutes = yyvsp[-1].Number;
@@ -993,7 +998,7 @@ case 11:
;
break;}
case 12:
#line 264 "getdate.y"
#line 269 "getdate.y"
{
yyHour = yyvsp[-5].Number;
yyMinutes = yyvsp[-3].Number;
@@ -1002,7 +1007,7 @@ case 12:
;
break;}
case 13:
#line 270 "getdate.y"
#line 275 "getdate.y"
{
yyHour = yyvsp[-5].Number;
yyMinutes = yyvsp[-3].Number;
@@ -1015,53 +1020,53 @@ case 13:
;
break;}
case 14:
#line 282 "getdate.y"
#line 287 "getdate.y"
{
yyTimezone = yyvsp[0].Number;
;
break;}
case 15:
#line 285 "getdate.y"
#line 290 "getdate.y"
{
yyTimezone = yyvsp[0].Number - 60;
;
break;}
case 16:
#line 289 "getdate.y"
#line 294 "getdate.y"
{
yyTimezone = yyvsp[-1].Number - 60;
;
break;}
case 17:
#line 294 "getdate.y"
#line 299 "getdate.y"
{
yyDayOrdinal = 1;
yyDayNumber = yyvsp[0].Number;
;
break;}
case 18:
#line 298 "getdate.y"
#line 303 "getdate.y"
{
yyDayOrdinal = 1;
yyDayNumber = yyvsp[-1].Number;
;
break;}
case 19:
#line 302 "getdate.y"
#line 307 "getdate.y"
{
yyDayOrdinal = yyvsp[-1].Number;
yyDayNumber = yyvsp[0].Number;
;
break;}
case 20:
#line 308 "getdate.y"
#line 313 "getdate.y"
{
yyMonth = yyvsp[-2].Number;
yyDay = yyvsp[0].Number;
;
break;}
case 21:
#line 312 "getdate.y"
#line 317 "getdate.y"
{
/* Interpret as YYYY/MM/DD if $1 >= 1000, otherwise as MM/DD/YY.
The goal in recognizing YYYY/MM/DD is solely to support legacy
@@ -1082,7 +1087,7 @@ case 21:
;
break;}
case 22:
#line 330 "getdate.y"
#line 335 "getdate.y"
{
/* ISO 8601 format. yyyy-mm-dd. */
yyYear = yyvsp[-2].Number;
@@ -1091,7 +1096,7 @@ case 22:
;
break;}
case 23:
#line 336 "getdate.y"
#line 341 "getdate.y"
{
/* e.g. 17-JUN-1992. */
yyDay = yyvsp[-2].Number;
@@ -1100,14 +1105,14 @@ case 23:
;
break;}
case 24:
#line 342 "getdate.y"
#line 347 "getdate.y"
{
yyMonth = yyvsp[-1].Number;
yyDay = yyvsp[0].Number;
;
break;}
case 25:
#line 346 "getdate.y"
#line 351 "getdate.y"
{
yyMonth = yyvsp[-3].Number;
yyDay = yyvsp[-2].Number;
@@ -1115,14 +1120,14 @@ case 25:
;
break;}
case 26:
#line 351 "getdate.y"
#line 356 "getdate.y"
{
yyMonth = yyvsp[0].Number;
yyDay = yyvsp[-1].Number;
;
break;}
case 27:
#line 355 "getdate.y"
#line 360 "getdate.y"
{
yyMonth = yyvsp[-1].Number;
yyDay = yyvsp[-2].Number;
@@ -1130,7 +1135,7 @@ case 27:
;
break;}
case 28:
#line 362 "getdate.y"
#line 367 "getdate.y"
{
yyRelSeconds = -yyRelSeconds;
yyRelMinutes = -yyRelMinutes;
@@ -1141,115 +1146,115 @@ case 28:
;
break;}
case 30:
#line 373 "getdate.y"
#line 378 "getdate.y"
{
yyRelYear += yyvsp[-1].Number * yyvsp[0].Number;
;
break;}
case 31:
#line 376 "getdate.y"
#line 381 "getdate.y"
{
yyRelYear += yyvsp[-1].Number * yyvsp[0].Number;
;
break;}
case 32:
#line 379 "getdate.y"
#line 384 "getdate.y"
{
yyRelYear += yyvsp[0].Number;
;
break;}
case 33:
#line 382 "getdate.y"
#line 387 "getdate.y"
{
yyRelMonth += yyvsp[-1].Number * yyvsp[0].Number;
;
break;}
case 34:
#line 385 "getdate.y"
#line 390 "getdate.y"
{
yyRelMonth += yyvsp[-1].Number * yyvsp[0].Number;
;
break;}
case 35:
#line 388 "getdate.y"
#line 393 "getdate.y"
{
yyRelMonth += yyvsp[0].Number;
;
break;}
case 36:
#line 391 "getdate.y"
#line 396 "getdate.y"
{
yyRelDay += yyvsp[-1].Number * yyvsp[0].Number;
;
break;}
case 37:
#line 394 "getdate.y"
#line 399 "getdate.y"
{
yyRelDay += yyvsp[-1].Number * yyvsp[0].Number;
;
break;}
case 38:
#line 397 "getdate.y"
#line 402 "getdate.y"
{
yyRelDay += yyvsp[0].Number;
;
break;}
case 39:
#line 400 "getdate.y"
#line 405 "getdate.y"
{
yyRelHour += yyvsp[-1].Number * yyvsp[0].Number;
;
break;}
case 40:
#line 403 "getdate.y"
#line 408 "getdate.y"
{
yyRelHour += yyvsp[-1].Number * yyvsp[0].Number;
;
break;}
case 41:
#line 406 "getdate.y"
#line 411 "getdate.y"
{
yyRelHour += yyvsp[0].Number;
;
break;}
case 42:
#line 409 "getdate.y"
#line 414 "getdate.y"
{
yyRelMinutes += yyvsp[-1].Number * yyvsp[0].Number;
;
break;}
case 43:
#line 412 "getdate.y"
#line 417 "getdate.y"
{
yyRelMinutes += yyvsp[-1].Number * yyvsp[0].Number;
;
break;}
case 44:
#line 415 "getdate.y"
#line 420 "getdate.y"
{
yyRelMinutes += yyvsp[0].Number;
;
break;}
case 45:
#line 418 "getdate.y"
#line 423 "getdate.y"
{
yyRelSeconds += yyvsp[-1].Number * yyvsp[0].Number;
;
break;}
case 46:
#line 421 "getdate.y"
#line 426 "getdate.y"
{
yyRelSeconds += yyvsp[-1].Number * yyvsp[0].Number;
;
break;}
case 47:
#line 424 "getdate.y"
#line 429 "getdate.y"
{
yyRelSeconds += yyvsp[0].Number;
;
break;}
case 48:
#line 430 "getdate.y"
#line 435 "getdate.y"
{
if (yyHaveTime && yyHaveDate && !yyHaveRel)
yyYear = yyvsp[0].Number;
@@ -1282,20 +1287,20 @@ case 48:
;
break;}
case 49:
#line 463 "getdate.y"
#line 468 "getdate.y"
{
yyval.Meridian = MER24;
;
break;}
case 50:
#line 467 "getdate.y"
#line 472 "getdate.y"
{
yyval.Meridian = yyvsp[0].Meridian;
;
break;}
}
/* the action file gets copied in in place of this dollarsign */
#line 543 "/usr/local/share/bison.simple"
#line 543 "/usr/lib/bison.simple"
yyvsp -= yylen;
yyssp -= yylen;
@@ -1515,7 +1520,7 @@ yyerrhandle:
}
return 1;
}
#line 472 "getdate.y"
#line 477 "getdate.y"
/* Include this file down here because bison inserts code above which

View File

@@ -80,6 +80,11 @@
# include <string.h>
#endif
/* The last #include file should be: */
#ifdef MALLOCDEBUG
#include "memdebug.h"
#endif
#if __GNUC__ < 2 || (__GNUC__ == 2 && __GNUC_MINOR__ < 7)
# define __attribute__(x)
#endif
@@ -103,44 +108,44 @@
then those parser generators need to be fixed instead of adding those
names to this list. */
#define yymaxdepth gd_maxdepth
#define yyparse gd_parse
#define yylex gd_lex
#define yyerror gd_error
#define yylval gd_lval
#define yychar gd_char
#define yydebug gd_debug
#define yypact gd_pact
#define yyr1 gd_r1
#define yyr2 gd_r2
#define yydef gd_def
#define yychk gd_chk
#define yypgo gd_pgo
#define yyact gd_act
#define yyexca gd_exca
#define yyerrflag gd_errflag
#define yynerrs gd_nerrs
#define yyps gd_ps
#define yypv gd_pv
#define yys gd_s
#define yy_yys gd_yys
#define yystate gd_state
#define yytmp gd_tmp
#define yyv gd_v
#define yy_yyv gd_yyv
#define yyval gd_val
#define yylloc gd_lloc
#define yyreds gd_reds /* With YYDEBUG defined */
#define yytoks gd_toks /* With YYDEBUG defined */
#define yylhs gd_yylhs
#define yylen gd_yylen
#define yydefred gd_yydefred
#define yydgoto gd_yydgoto
#define yysindex gd_yysindex
#define yyrindex gd_yyrindex
#define yygindex gd_yygindex
#define yytable gd_yytable
#define yycheck gd_yycheck
#define yymaxdepth Curl_gd_maxdepth
#define yyparse Curl_gd_parse
#define yylex Curl_gd_lex
#define yyerror Curl_gd_error
#define yylval Curl_gd_lval
#define yychar Curl_gd_char
#define yydebug Curl_gd_debug
#define yypact Curl_gd_pact
#define yyr1 Curl_gd_r1
#define yyr2 Curl_gd_r2
#define yydef Curl_gd_def
#define yychk Curl_gd_chk
#define yypgo Curl_gd_pgo
#define yyact Curl_gd_act
#define yyexca Curl_gd_exca
#define yyerrflag Curl_gd_errflag
#define yynerrs Curl_gd_nerrs
#define yyps Curl_gd_ps
#define yypv Curl_gd_pv
#define yys Curl_gd_s
#define yy_yys Curl_gd_yys
#define yystate Curl_gd_state
#define yytmp Curl_gd_tmp
#define yyv Curl_gd_v
#define yy_yyv Curl_gd_yyv
#define yyval Curl_gd_val
#define yylloc Curl_gd_lloc
#define yyreds Curl_gd_reds /* With YYDEBUG defined */
#define yytoks Curl_gd_toks /* With YYDEBUG defined */
#define yylhs Curl_gd_yylhs
#define yylen Curl_gd_yylen
#define yydefred Curl_gd_yydefred
#define yydgoto Curl_gd_yydgoto
#define yysindex Curl_gd_yysindex
#define yyrindex Curl_gd_yyrindex
#define yygindex Curl_gd_yygindex
#define yytable Curl_gd_yytable
#define yycheck Curl_gd_yycheck
static int yylex ();
static int yyerror ();

View File

@@ -33,6 +33,7 @@
#include "memdebug.h"
#endif
static
char *GetEnv(char *variable)
{
#ifdef WIN32
@@ -40,12 +41,13 @@ char *GetEnv(char *variable)
char env[MAX_PATH]; /* MAX_PATH is from windef.h */
char *temp = getenv(variable);
env[0] = '\0';
ExpandEnvironmentStrings(temp, env, sizeof(env));
if (temp != NULL)
ExpandEnvironmentStrings(temp, env, sizeof(env));
#else
/* no length control */
char *env = getenv(variable);
#endif
return env?strdup(env):NULL;
return (env && env[0])?strdup(env):NULL;
}
char *curl_getenv(char *v)

View File

@@ -1,29 +0,0 @@
#ifndef __GETENV_H
#define __GETENV_H
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 2000, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* In order to be useful for every potential user, curl and libcurl are
* dual-licensed under the MPL and the MIT/X-derivate licenses.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the MPL or the MIT/X-derivate
* licenses. You may pick one of these licenses.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
* $Id$
*****************************************************************************/
/* Unix and Win32 getenv function call */
char *GetEnv(char *variable);
#endif

View File

@@ -31,7 +31,7 @@
#include <string.h>
#include <stdarg.h>
CURLcode curl_getinfo(CURL *curl, CURLINFO info, ...)
CURLcode Curl_getinfo(CURL *curl, CURLINFO info, ...)
{
va_list arg;
long *param_longp;
@@ -103,6 +103,12 @@ CURLcode curl_getinfo(CURL *curl, CURLINFO info, ...)
case CURLINFO_SSL_VERIFYRESULT:
*param_longp = data->ssl.certverifyresult;
break;
case CURLINFO_CONTENT_LENGTH_DOWNLOAD:
*param_doublep = data->progress.size_dl;
break;
case CURLINFO_CONTENT_LENGTH_UPLOAD:
*param_doublep = data->progress.size_ul;
break;
default:
return CURLE_BAD_FUNCTION_ARGUMENT;
}

View File

@@ -66,6 +66,11 @@
# endif
#endif
/* The last #include file should be: */
#ifdef MALLOCDEBUG
#include "memdebug.h"
#endif
/* no perror? make an fprintf! */
#ifndef HAVE_PERROR
# define perror(x) fprintf(stderr, "Error in: %s\n", x)

View File

@@ -1,8 +1,35 @@
#ifndef __GETPASS_H
#define __GETPASS_H
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 2000, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* In order to be useful for every potential user, curl and libcurl are
* dual-licensed under the MPL and the MIT/X-derivate licenses.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the MPL or the MIT/X-derivate
* licenses. You may pick one of these licenses.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
* $Id$
*****************************************************************************/
#ifndef HAVE_GETPASS_R
/* If there's a system-provided function named like this, we trust it is
also found in one of the standard headers. */
/*
* Returning NULL will abort the continued operation!
*/
char* getpass_r(char *prompt, char* buffer, size_t buflen );
#endif
#endif

View File

@@ -62,7 +62,7 @@
/* --- resolve name or IP-number --- */
char *MakeIP(unsigned long num,char *addr, int addr_len)
static char *MakeIP(unsigned long num,char *addr, int addr_len)
{
#if defined(HAVE_INET_NTOA) || defined(HAVE_INET_NTOA_R)
struct in_addr in;
@@ -83,14 +83,40 @@ char *MakeIP(unsigned long num,char *addr, int addr_len)
return (addr);
}
/* The original code to this function was stolen from the Dancer source code,
written by Bjorn Reese, it has since been patched and modified. */
#ifdef ENABLE_IPV6
struct addrinfo *Curl_getaddrinfo(struct UrlData *data,
char *hostname,
int port)
{
struct addrinfo hints, *res;
int error;
char sbuf[NI_MAXSERV];
memset(&hints, 0, sizeof(hints));
hints.ai_family = PF_UNSPEC;
hints.ai_socktype = SOCK_STREAM;
hints.ai_flags = AI_CANONNAME;
snprintf(sbuf, sizeof(sbuf), "%d", port);
error = getaddrinfo(hostname, sbuf, &hints, &res);
if (error) {
infof(data, "getaddrinfo(3) failed for %s\n", hostname);
return NULL;
}
return res;
}
#endif
/* The original code to this function was once stolen from the Dancer source
code, written by Bjorn Reese, it has since been patched and modified
considerably. */
#ifndef INADDR_NONE
#define INADDR_NONE (unsigned long) ~0
#endif
struct hostent *GetHost(struct UrlData *data,
char *hostname,
char **bufp)
struct hostent *Curl_gethost(struct UrlData *data,
char *hostname,
char **bufp)
{
struct hostent *h = NULL;
unsigned long in;

View File

@@ -23,6 +23,13 @@
* $Id$
*****************************************************************************/
struct hostent *GetHost(struct UrlData *data, char *hostname, char **bufp );
struct addrinfo;
struct addrinfo *Curl_getaddrinfo(struct UrlData *data,
char *hostname,
int port);
struct hostent *Curl_gethost(struct UrlData *data,
char *hostname,
char **bufp);
#endif

View File

@@ -87,14 +87,13 @@
#include "urldata.h"
#include <curl/curl.h>
#include "download.h"
#include "transfer.h"
#include "sendf.h"
#include "formdata.h"
#include "progress.h"
#include "base64.h"
#include "cookie.h"
#include "strequal.h"
#include "url.h"
#include "ssluse.h"
#define _MPRINTF_REPLACE /* use our functions only */
@@ -105,6 +104,145 @@
#include "memdebug.h"
#endif
/* ------------------------------------------------------------------------- */
/*
* The add_buffer series of functions are used to build one large memory chunk
* from repeated function invokes. Used so that the entire HTTP request can
* be sent in one go.
*/
static CURLcode
add_buffer(send_buffer *in, void *inptr, size_t size);
/*
* add_buffer_init() returns a fine buffer struct
*/
static
send_buffer *add_buffer_init(void)
{
send_buffer *blonk;
blonk=(send_buffer *)malloc(sizeof(send_buffer));
if(blonk) {
memset(blonk, 0, sizeof(send_buffer));
return blonk;
}
return NULL; /* failed, go home */
}
/*
* add_buffer_send() sends a buffer and frees all associated memory.
*/
static
size_t add_buffer_send(int sockfd, struct connectdata *conn, send_buffer *in)
{
size_t amount;
if(conn->data->bits.verbose) {
fputs("> ", conn->data->err);
/* this data _may_ contain binary stuff */
fwrite(in->buffer, in->size_used, 1, conn->data->err);
}
Curl_write(conn, sockfd, in->buffer, in->size_used, &amount);
if(in->buffer)
free(in->buffer);
free(in);
return amount;
}
/*
* add_bufferf() builds a buffer from the formatted input
*/
static
CURLcode add_bufferf(send_buffer *in, char *fmt, ...)
{
CURLcode result = CURLE_OUT_OF_MEMORY;
char *s;
va_list ap;
va_start(ap, fmt);
s = vaprintf(fmt, ap); /* this allocs a new string to append */
va_end(ap);
if(s) {
result = add_buffer(in, s, strlen(s));
free(s);
}
return result;
}
/*
* add_buffer() appends a memory chunk to the existing one
*/
static
CURLcode add_buffer(send_buffer *in, void *inptr, size_t size)
{
char *new_rb;
int new_size;
if(size > 0) {
if(!in->buffer ||
((in->size_used + size) > (in->size_max - 1))) {
new_size = (in->size_used+size)*2;
if(in->buffer)
/* we have a buffer, enlarge the existing one */
new_rb = (char *)realloc(in->buffer, new_size);
else
/* create a new buffer */
new_rb = (char *)malloc(new_size);
if(!new_rb)
return CURLE_OUT_OF_MEMORY;
in->buffer = new_rb;
in->size_max = new_size;
}
memcpy(&in->buffer[in->size_used], inptr, size);
in->size_used += size;
}
return CURLE_OK;
}
/* end of the add_buffer functions */
/* ------------------------------------------------------------------------- */
/*
* Read everything until a newline.
*/
static
int GetLine(int sockfd, char *buf, struct connectdata *conn)
{
ssize_t nread;
int read_rc=1;
char *ptr;
struct UrlData *data=conn->data;
ptr=buf;
/* get us a full line, terminated with a newline */
for(nread=0;
(nread<BUFSIZE) && read_rc;
nread++, ptr++) {
if((CURLE_OK != Curl_read(conn, sockfd, ptr, 1, &nread)) ||
(nread <= 0) ||
(*ptr == '\n'))
break;
}
*ptr=0; /* zero terminate */
if(data->bits.verbose) {
fputs("< ", data->err);
fwrite(buf, 1, nread, data->err);
fputs("\n", data->err);
}
return nread>0?nread:0;
}
/*
* This function checks the linked list of custom HTTP headers for a particular
* header (prefix).
@@ -123,32 +261,34 @@ bool static checkheaders(struct UrlData *data, char *thisheader)
}
/*
* GetHTTPProxyTunnel() requires that we're connected to a HTTP proxy. This
* ConnectHTTPProxyTunnel() requires that we're connected to a HTTP proxy. This
* function will issue the necessary commands to get a seamless tunnel through
* this proxy. After that, the socket can be used just as a normal socket.
*/
CURLcode GetHTTPProxyTunnel(struct UrlData *data, int tunnelsocket,
char *hostname, int remote_port)
CURLcode Curl_ConnectHTTPProxyTunnel(struct connectdata *conn,
int tunnelsocket,
char *hostname, int remote_port)
{
int httperror=0;
int subversion=0;
struct UrlData *data=conn->data;
infof(data, "Establish HTTP proxy tunnel to %s:%d\n", hostname, remote_port);
/* OK, now send the connect statment */
sendf(tunnelsocket, data,
"CONNECT %s:%d HTTP/1.0\015\012"
"%s"
"%s"
"\r\n",
hostname, remote_port,
(data->bits.proxy_user_passwd)?data->ptr_proxyuserpwd:"",
(data->useragent?data->ptr_uagent:"")
);
/* OK, now send the connect request to the proxy */
Curl_sendf(tunnelsocket, conn,
"CONNECT %s:%d HTTP/1.0\015\012"
"%s"
"%s"
"\r\n",
hostname, remote_port,
(conn->bits.proxy_user_passwd)?conn->allocptr.proxyuserpwd:"",
(data->useragent?conn->allocptr.uagent:"")
);
/* wait for the proxy to send us a HTTP/1.0 200 OK header */
while(GetLine(tunnelsocket, data->buffer, data)) {
while(GetLine(tunnelsocket, data->buffer, conn)) {
if('\r' == data->buffer[0])
break; /* end of headers */
if(2 == sscanf(data->buffer, "HTTP/1.%d %d",
@@ -170,7 +310,10 @@ CURLcode GetHTTPProxyTunnel(struct UrlData *data, int tunnelsocket,
return CURLE_OK;
}
CURLcode http_connect(struct connectdata *conn)
/*
* HTTP stuff to do at connect-time.
*/
CURLcode Curl_http_connect(struct connectdata *conn)
{
struct UrlData *data;
CURLcode result;
@@ -186,22 +329,21 @@ CURLcode http_connect(struct connectdata *conn)
if (conn->protocol & PROT_HTTPS) {
if (data->bits.httpproxy) {
/* HTTPS through a proxy can only be done with a tunnel */
result = GetHTTPProxyTunnel(data, data->firstsocket,
data->hostname, data->remote_port);
result = Curl_ConnectHTTPProxyTunnel(conn, conn->firstsocket,
conn->hostname, conn->remote_port);
if(CURLE_OK != result)
return result;
}
/* now, perform the SSL initialization for this socket */
if(UrgSSLConnect (data)) {
if(Curl_SSLConnect(conn))
return CURLE_SSL_CONNECT_ERROR;
}
}
if(data->bits.user_passwd && !data->bits.this_is_a_follow) {
if(conn->bits.user_passwd && !data->bits.this_is_a_follow) {
/* Authorization: is requested, this is not a followed location, get the
original host name */
data->auth_host = strdup(data->hostname);
data->auth_host = strdup(conn->hostname);
}
return CURLE_OK;
@@ -209,26 +351,26 @@ CURLcode http_connect(struct connectdata *conn)
/* called from curl_close() when this struct is about to get wasted, free
protocol-specific resources */
CURLcode http_close(struct connectdata *conn)
CURLcode Curl_http_close(struct connectdata *conn)
{
if(conn->data->auth_host)
free(conn->data->auth_host);
return CURLE_OK;
}
CURLcode http_done(struct connectdata *conn)
CURLcode Curl_http_done(struct connectdata *conn)
{
struct UrlData *data;
long *bytecount = &conn->bytecount;
struct HTTP *http;
data=conn->data;
http=data->proto.http;
http=conn->proto.http;
if(data->bits.http_formpost) {
*bytecount = http->readbytecount + http->writebytecount;
FormFree(http->sendit); /* Now free that whole lot */
Curl_FormFree(http->sendit); /* Now free that whole lot */
data->fread = http->storefread; /* restore */
data->in = http->in; /* restore */
@@ -237,14 +379,11 @@ CURLcode http_done(struct connectdata *conn)
*bytecount = http->readbytecount + http->writebytecount;
}
free(http);
data->proto.http=NULL; /* it is gone */
return CURLE_OK;
}
CURLcode http(struct connectdata *conn)
CURLcode Curl_http(struct connectdata *conn)
{
struct UrlData *data=conn->data;
char *buf = data->buffer; /* this is a short cut to the buffer */
@@ -255,11 +394,20 @@ CURLcode http(struct connectdata *conn)
char *host = conn->name;
long *bytecount = &conn->bytecount;
http = (struct HTTP *)malloc(sizeof(struct HTTP));
if(!http)
return CURLE_OUT_OF_MEMORY;
memset(http, 0, sizeof(struct HTTP));
data->proto.http = http;
if(!conn->proto.http) {
/* Only allocate this struct if we don't already have it! */
http = (struct HTTP *)malloc(sizeof(struct HTTP));
if(!http)
return CURLE_OUT_OF_MEMORY;
memset(http, 0, sizeof(struct HTTP));
conn->proto.http = http;
}
else
http = conn->proto.http;
/* We default to persistant connections */
conn->bits.close = FALSE;
if ( (conn->protocol&(PROT_HTTP|PROT_FTP)) &&
data->bits.upload) {
@@ -270,43 +418,46 @@ CURLcode http(struct connectdata *conn)
have been used in the proxy connect, but if we have got a header with
the user-agent string specified, we erase the previously made string
here. */
if(checkheaders(data, "User-Agent:") && data->ptr_uagent) {
free(data->ptr_uagent);
data->ptr_uagent=NULL;
if(checkheaders(data, "User-Agent:") && conn->allocptr.uagent) {
free(conn->allocptr.uagent);
conn->allocptr.uagent=NULL;
}
if((data->bits.user_passwd) && !checkheaders(data, "Authorization:")) {
if((conn->bits.user_passwd) && !checkheaders(data, "Authorization:")) {
char *authorization;
/* To prevent the user+password to get sent to other than the original
host due to a location-follow, we do some weirdo checks here */
if(!data->bits.this_is_a_follow ||
!data->auth_host ||
strequal(data->auth_host, data->hostname)) {
strequal(data->auth_host, conn->hostname)) {
sprintf(data->buffer, "%s:%s", data->user, data->passwd);
if(base64_encode(data->buffer, strlen(data->buffer),
&authorization) >= 0) {
data->ptr_userpwd = maprintf( "Authorization: Basic %s\015\012",
authorization);
if(Curl_base64_encode(data->buffer, strlen(data->buffer),
&authorization) >= 0) {
if(conn->allocptr.userpwd)
free(conn->allocptr.userpwd);
conn->allocptr.userpwd = aprintf( "Authorization: Basic %s\015\012",
authorization);
free(authorization);
}
}
}
if((data->bits.set_range) && !checkheaders(data, "Range:")) {
data->ptr_rangeline = maprintf("Range: bytes=%s\015\012", data->range);
}
if((data->bits.http_set_referer) && !checkheaders(data, "Referer:")) {
data->ptr_ref = maprintf("Referer: %s\015\012", data->referer);
if(conn->allocptr.ref)
free(conn->allocptr.ref);
conn->allocptr.ref = aprintf("Referer: %s\015\012", data->referer);
}
if(data->cookie && !checkheaders(data, "Cookie:")) {
data->ptr_cookie = maprintf("Cookie: %s\015\012", data->cookie);
if(conn->allocptr.cookie)
free(conn->allocptr.cookie);
conn->allocptr.cookie = aprintf("Cookie: %s\015\012", data->cookie);
}
if(data->cookies) {
co = cookie_getlist(data->cookies,
host,
ppath,
conn->protocol&PROT_HTTPS?TRUE:FALSE);
co = Curl_cookie_getlist(data->cookies,
host,
ppath,
conn->protocol&PROT_HTTPS?TRUE:FALSE);
}
if ((data->bits.httpproxy) && !(conn->protocol&PROT_HTTPS)) {
/* The path sent to the proxy is in fact the entire URL */
@@ -315,17 +466,26 @@ CURLcode http(struct connectdata *conn)
if(data->bits.http_formpost) {
/* we must build the whole darned post sequence first, so that we have
a size of the whole shebang before we start to send it */
http->sendit = getFormData(data->httppost, &http->postsize);
http->sendit = Curl_getFormData(data->httppost, &http->postsize);
}
if(!checkheaders(data, "Host:")) {
if(((conn->protocol&PROT_HTTPS) && (data->remote_port == PORT_HTTPS)) ||
(!(conn->protocol&PROT_HTTPS) && (data->remote_port == PORT_HTTP)) )
/* if ptr_host is already set, it is almost OK since we only re-use
connections to the very same host and port, but when we use a HTTP
proxy we have a persistant connect and yet we must change the Host:
header! */
if(conn->allocptr.host)
free(conn->allocptr.host);
if(((conn->protocol&PROT_HTTPS) && (conn->remote_port == PORT_HTTPS)) ||
(!(conn->protocol&PROT_HTTPS) && (conn->remote_port == PORT_HTTP)) )
/* If (HTTPS on port 443) OR (non-HTTPS on port 80) then don't include
the port number in the host string */
data->ptr_host = maprintf("Host: %s\r\n", host);
conn->allocptr.host = aprintf("Host: %s\r\n", host);
else
data->ptr_host = maprintf("Host: %s:%d\r\n", host, data->remote_port);
conn->allocptr.host = aprintf("Host: %s:%d\r\n", host,
conn->remote_port);
}
if(!checkheaders(data, "Pragma:"))
@@ -334,6 +494,92 @@ CURLcode http(struct connectdata *conn)
if(!checkheaders(data, "Accept:"))
http->p_accept = "Accept: image/gif, image/x-xbitmap, image/jpeg, image/pjpeg, */*\r\n";
if((data->bits.http_post ||
data->bits.http_formpost ||
data->bits.http_put) &&
data->resume_from) {
/**********************************************************************
* Resuming upload in HTTP means that we PUT or POST and that we have
* got a resume_from value set. The resume value has already created
* a Range: header that will be passed along. We need to "fast forward"
* the file the given number of bytes and decrease the assume upload
* file size before we continue this venture in the dark lands of HTTP.
*********************************************************************/
if(data->resume_from < 0 ) {
/*
* This is meant to get the size of the present remote-file by itself.
* We don't support this now. Bail out!
*/
data->resume_from = 0;
}
if(data->resume_from) {
/* do we still game? */
int passed=0;
/* Now, let's read off the proper amount of bytes from the
input. If we knew it was a proper file we could've just
fseek()ed but we only have a stream here */
do {
int readthisamountnow = (data->resume_from - passed);
int actuallyread;
if(readthisamountnow > BUFSIZE)
readthisamountnow = BUFSIZE;
actuallyread =
data->fread(data->buffer, 1, readthisamountnow, data->in);
passed += actuallyread;
if(actuallyread != readthisamountnow) {
failf(data, "Could only read %d bytes from the input\n",
passed);
return CURLE_READ_ERROR;
}
} while(passed != data->resume_from); /* loop until done */
/* now, decrease the size of the read */
if(data->infilesize>0) {
data->infilesize -= data->resume_from;
if(data->infilesize <= 0) {
failf(data, "File already completely uploaded\n");
return CURLE_PARTIAL_FILE;
}
}
/* we've passed, proceed as normal */
}
}
if(data->bits.set_range) {
/*
* A range is selected. We use different headers whether we're downloading
* or uploading and we always let customized headers override our internal
* ones if any such are specified.
*/
if((data->httpreq == HTTPREQ_GET) &&
!checkheaders(data, "Range:")) {
conn->allocptr.rangeline = aprintf("Range: bytes=%s\r\n", data->range);
}
else if((data->httpreq != HTTPREQ_GET) &&
!checkheaders(data, "Content-Range:")) {
if(data->resume_from) {
/* This is because "resume" was selected */
long total_expected_size= data->resume_from + data->infilesize;
conn->allocptr.rangeline = aprintf("Content-Range: bytes %s%ld/%ld\r\n",
data->range, total_expected_size-1,
total_expected_size);
}
else {
/* Range was selected and then we just pass the incoming range and
append total size */
conn->allocptr.rangeline = aprintf("Content-Range: bytes %s/%d\r\n",
data->range, data->infilesize);
}
}
}
do {
send_buffer *req_buffer;
struct curl_slist *headers=data->headers;
@@ -344,7 +590,7 @@ CURLcode http(struct connectdata *conn)
/* add the main request stuff */
add_bufferf(req_buffer,
"%s " /* GET/HEAD/POST/PUT */
"%s HTTP/1.0\r\n" /* path */
"%s HTTP/1.1\r\n" /* path */
"%s" /* proxyuserpwd */
"%s" /* userpwd */
"%s" /* range */
@@ -360,15 +606,19 @@ CURLcode http(struct connectdata *conn)
(data->bits.http_post || data->bits.http_formpost)?"POST":
(data->bits.http_put)?"PUT":"GET"),
ppath,
(data->bits.proxy_user_passwd && data->ptr_proxyuserpwd)?data->ptr_proxyuserpwd:"",
(data->bits.user_passwd && data->ptr_userpwd)?data->ptr_userpwd:"",
(data->bits.set_range && data->ptr_rangeline)?data->ptr_rangeline:"",
(data->useragent && *data->useragent && data->ptr_uagent)?data->ptr_uagent:"",
(data->ptr_cookie?data->ptr_cookie:""), /* Cookie: <data> */
(data->ptr_host?data->ptr_host:""), /* Host: host */
(conn->bits.proxy_user_passwd &&
conn->allocptr.proxyuserpwd)?conn->allocptr.proxyuserpwd:"",
(conn->bits.user_passwd && conn->allocptr.userpwd)?
conn->allocptr.userpwd:"",
(data->bits.set_range && conn->allocptr.rangeline)?
conn->allocptr.rangeline:"",
(data->useragent && *data->useragent && conn->allocptr.uagent)?
conn->allocptr.uagent:"",
(conn->allocptr.cookie?conn->allocptr.cookie:""), /* Cookie: <data> */
(conn->allocptr.host?conn->allocptr.host:""), /* Host: host */
http->p_pragma?http->p_pragma:"",
http->p_accept?http->p_accept:"",
(data->bits.http_set_referer && data->ptr_ref)?data->ptr_ref:"" /* Referer: <data> <CRLF> */
(data->bits.http_set_referer && conn->allocptr.ref)?conn->allocptr.ref:"" /* Referer: <data> <CRLF> */
);
if(co) {
@@ -389,7 +639,7 @@ CURLcode http(struct connectdata *conn)
if(count) {
add_buffer(req_buffer, "\r\n", 2);
}
cookie_freelist(store); /* free the cookie list */
Curl_cookie_freelist(store); /* free the cookie list */
co=NULL;
}
@@ -451,7 +701,7 @@ CURLcode http(struct connectdata *conn)
}
if(data->bits.http_formpost) {
if(FormInit(&http->form, http->sendit)) {
if(Curl_FormInit(&http->form, http->sendit)) {
failf(data, "Internal HTTP POST error!\n");
return CURLE_HTTP_POST_ERROR;
}
@@ -461,24 +711,24 @@ CURLcode http(struct connectdata *conn)
data->fread =
(size_t (*)(char *, size_t, size_t, FILE *))
FormReader; /* set the read function to read from the
generated form data */
Curl_FormReader; /* set the read function to read from the
generated form data */
data->in = (FILE *)&http->form;
add_bufferf(req_buffer,
"Content-Length: %d\r\n", http->postsize-2);
/* set upload size to the progress meter */
pgrsSetUploadSize(data, http->postsize);
Curl_pgrsSetUploadSize(data, http->postsize);
data->request_size =
add_buffer_send(data->firstsocket, conn, req_buffer);
result = Transfer(conn, data->firstsocket, -1, TRUE,
add_buffer_send(conn->firstsocket, conn, req_buffer);
result = Curl_Transfer(conn, conn->firstsocket, -1, TRUE,
&http->readbytecount,
data->firstsocket,
conn->firstsocket,
&http->writebytecount);
if(result) {
FormFree(http->sendit); /* free that whole lot */
Curl_FormFree(http->sendit); /* free that whole lot */
return result;
}
}
@@ -494,16 +744,16 @@ CURLcode http(struct connectdata *conn)
add_bufferf(req_buffer, "\015\012");
/* set the upload size to the progress meter */
pgrsSetUploadSize(data, data->infilesize);
Curl_pgrsSetUploadSize(data, data->infilesize);
/* this sends the buffer and frees all the buffer resources */
data->request_size =
add_buffer_send(data->firstsocket, conn, req_buffer);
add_buffer_send(conn->firstsocket, conn, req_buffer);
/* prepare for transfer */
result = Transfer(conn, data->firstsocket, -1, TRUE,
result = Curl_Transfer(conn, conn->firstsocket, -1, TRUE,
&http->readbytecount,
data->firstsocket,
conn->firstsocket,
&http->writebytecount);
if(result)
return result;
@@ -544,10 +794,10 @@ CURLcode http(struct connectdata *conn)
/* issue the request */
data->request_size =
add_buffer_send(data->firstsocket, conn, req_buffer);
add_buffer_send(conn->firstsocket, conn, req_buffer);
/* HTTP GET/HEAD download: */
result = Transfer(conn, data->firstsocket, -1, TRUE, bytecount,
result = Curl_Transfer(conn, conn->firstsocket, -1, TRUE, bytecount,
-1, NULL); /* nothing to upload */
}
if(result)

View File

@@ -25,13 +25,19 @@
*****************************************************************************/
/* ftp can use this as well */
CURLcode GetHTTPProxyTunnel(struct UrlData *data, int tunnelsocket,
char *hostname, int remote_port);
CURLcode Curl_ConnectHTTPProxyTunnel(struct connectdata *conn,
int tunnelsocket,
char *hostname, int remote_port);
/* protocol-specific functions set up to be called by the main engine */
CURLcode http(struct connectdata *conn);
CURLcode http_done(struct connectdata *conn);
CURLcode http_connect(struct connectdata *conn);
CURLcode http_close(struct connectdata *conn);
CURLcode Curl_http(struct connectdata *conn);
CURLcode Curl_http_done(struct connectdata *conn);
CURLcode Curl_http_connect(struct connectdata *conn);
CURLcode Curl_http_close(struct connectdata *conn);
/* The following functions are defined in http_chunks.c */
void Curl_httpchunk_init(struct connectdata *conn);
CHUNKcode Curl_httpchunk_read(struct connectdata *conn, char *datap,
ssize_t length, ssize_t *wrote);
#endif

222
lib/http_chunks.c Normal file
View File

@@ -0,0 +1,222 @@
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 2001, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* In order to be useful for every potential user, curl and libcurl are
* dual-licensed under the MPL and the MIT/X-derivate licenses.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the MPL or the MIT/X-derivate
* licenses. You may pick one of these licenses.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
* $Id$
*****************************************************************************/
#include "setup.h"
/* -- WIN32 approved -- */
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#include <stdlib.h>
#include <ctype.h>
#include "urldata.h" /* it includes http_chunks.h */
#include "sendf.h" /* for the client write stuff */
#define _MPRINTF_REPLACE /* use our functions only */
#include <curl/mprintf.h>
/* The last #include file should be: */
#ifdef MALLOCDEBUG
#include "memdebug.h"
#endif
/*
* Chunk format (simplified):
*
* <HEX SIZE>[ chunk extension ] CRLF
* <DATA>
*
* Highlights from RFC2616 section 3.6 say:
The chunked encoding modifies the body of a message in order to
transfer it as a series of chunks, each with its own size indicator,
followed by an OPTIONAL trailer containing entity-header fields. This
allows dynamically produced content to be transferred along with the
information necessary for the recipient to verify that it has
received the full message.
Chunked-Body = *chunk
last-chunk
trailer
CRLF
chunk = chunk-size [ chunk-extension ] CRLF
chunk-data CRLF
chunk-size = 1*HEX
last-chunk = 1*("0") [ chunk-extension ] CRLF
chunk-extension= *( ";" chunk-ext-name [ "=" chunk-ext-val ] )
chunk-ext-name = token
chunk-ext-val = token | quoted-string
chunk-data = chunk-size(OCTET)
trailer = *(entity-header CRLF)
The chunk-size field is a string of hex digits indicating the size of
the chunk. The chunked encoding is ended by any chunk whose size is
zero, followed by the trailer, which is terminated by an empty line.
*/
void Curl_httpchunk_init(struct connectdata *conn)
{
struct Curl_chunker *chunk = &conn->proto.http->chunk;
chunk->hexindex=0; /* start at 0 */
chunk->dataleft=0; /* no data left yet! */
chunk->state = CHUNK_HEX; /* we get hex first! */
}
/*
* chunk_read() returns a OK for normal operations, or a positive return code
* for errors. STOP means this sequence of chunks is complete. The 'wrote'
* argument is set to tell the caller how many bytes we actually passed to the
* client (for byte-counting and whatever).
*
* The states and the state-machine is further explained in the header file.
*/
CHUNKcode Curl_httpchunk_read(struct connectdata *conn,
char *datap,
ssize_t length,
ssize_t *wrote)
{
CURLcode result;
struct Curl_chunker *ch = &conn->proto.http->chunk;
int piece;
*wrote = 0; /* nothing yet */
while(length) {
switch(ch->state) {
case CHUNK_HEX:
if(isxdigit((int)*datap)) {
if(ch->hexindex < MAXNUM_SIZE) {
ch->hexbuffer[ch->hexindex] = *datap;
datap++;
length--;
ch->hexindex++;
}
else {
return CHUNKE_TOO_LONG_HEX; /* longer hex than we support */
}
}
else {
if(0 == ch->hexindex) {
/* This is illegal data, we received junk where we expected
a hexadecimal digit. */
return CHUNKE_ILLEGAL_HEX;
}
/* length and datap are unmodified */
ch->hexbuffer[ch->hexindex]=0;
ch->datasize=strtoul(ch->hexbuffer, NULL, 16);
ch->state = CHUNK_POSTHEX;
}
break;
case CHUNK_POSTHEX:
/* In this state, we're waiting for CRLF to arrive. We support
this to allow so called chunk-extensions to show up here
before the CRLF comes. */
if(*datap == '\r')
ch->state = CHUNK_CR;
length--;
datap++;
break;
case CHUNK_CR:
/* waiting for the LF */
if(*datap == '\n') {
/* we're now expecting data to come, unless size was zero! */
if(0 == ch->datasize) {
ch->state = CHUNK_STOP; /* stop reading! */
if(1 == length) {
/* This was the final byte, return right now */
return CHUNKE_STOP;
}
}
else
ch->state = CHUNK_DATA;
}
else
/* previously we got a fake CR, go back to CR waiting! */
ch->state = CHUNK_CR;
datap++;
length--;
break;
case CHUNK_DATA:
/* we get pure and fine data
We expect another 'datasize' of data. We have 'length' right now,
it can be more or less than 'datasize'. Get the smallest piece.
*/
piece = (ch->datasize >= length)?length:ch->datasize;
/* Write the data portion available */
result = Curl_client_write(conn->data, CLIENTWRITE_BODY, datap, piece);
if(result)
return CHUNKE_WRITE_ERROR;
*wrote += piece;
ch->datasize -= piece; /* decrease amount left to expect */
datap += piece; /* move read pointer forward */
length -= piece; /* decrease space left in this round */
if(0 == ch->datasize)
/* end of data this round, we now expect a trailing CRLF */
ch->state = CHUNK_POSTCR;
break;
case CHUNK_POSTCR:
if(*datap == '\r') {
ch->state = CHUNK_POSTLF;
datap++;
length--;
}
else
return CHUNKE_BAD_CHUNK;
break;
case CHUNK_POSTLF:
if(*datap == '\n') {
/*
* The last one before we go back to hex state and start all
* over.
*/
Curl_httpchunk_init(conn);
datap++;
length--;
}
else
return CHUNKE_BAD_CHUNK;
break;
case CHUNK_STOP:
/* If we arrive here, there is data left in the end of the buffer
even if there's no more chunks to read */
ch->dataleft = length;
return CHUNKE_STOP; /* return stop */
default:
return CHUNKE_STATE_ERROR;
}
}
return CHUNKE_OK;
}

87
lib/http_chunks.h Normal file
View File

@@ -0,0 +1,87 @@
#ifndef __HTTP_CHUNKS_H
#define __HTTP_CHUNKS_H
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 2001, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* In order to be useful for every potential user, curl and libcurl are
* dual-licensed under the MPL and the MIT/X-derivate licenses.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the MPL or the MIT/X-derivate
* licenses. You may pick one of these licenses.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
* $Id$
*****************************************************************************/
/*
* The longest possible hexadecimal number we support in a chunked transfer.
* Weird enough, RFC2616 doesn't set a maximum size! Since we use strtoul()
* to convert it, we "only" support 2^32 bytes chunk data.
*/
#define MAXNUM_SIZE 16
typedef enum {
CHUNK_FIRST, /* never use */
/* In this we await and buffer all hexadecimal digits until we get one
that isn't a hexadecimal digit. When done, we go POSTHEX */
CHUNK_HEX,
/* We have received the hexadecimal digit and we eat all characters until
we get a CRLF pair. When we see a CR we go to the CR state. */
CHUNK_POSTHEX,
/* A single CR has been found and we should get a LF right away in this
state or we go back to POSTHEX. When LF is received, we go to DATA.
If the size given was zero, we set state to STOP and return. */
CHUNK_CR,
/* We eat the amount of data specified. When done, we move on to the
POST_CR state. */
CHUNK_DATA,
/* POSTCR should get a CR and nothing else, then move to POSTLF */
CHUNK_POSTCR,
/* POSTLF should get a LF and nothing else, then move back to HEX as
the CRLF combination marks the end of a chunk */
CHUNK_POSTLF,
/* This is mainly used to really mark that we're out of the game.
NOTE: that there's a 'dataleft' field in the struct that will tell how
many bytes that were not passed to the client in the end of the last
buffer! */
CHUNK_STOP,
CHUNK_LAST /* never use */
} ChunkyState;
typedef enum {
CHUNKE_STOP = -1,
CHUNKE_OK = 0,
CHUNKE_TOO_LONG_HEX = 1,
CHUNKE_ILLEGAL_HEX,
CHUNKE_BAD_CHUNK,
CHUNKE_WRITE_ERROR,
CHUNKE_STATE_ERROR,
CHUNKE_LAST
} CHUNKcode;
struct Curl_chunker {
char hexbuffer[ MAXNUM_SIZE + 1];
int hexindex;
ChunkyState state;
size_t datasize;
size_t dataleft; /* untouched data amount at the end of the last buffer */
};
#endif

View File

@@ -32,7 +32,7 @@
#include <unistd.h>
#endif
#if ! defined(WIN32) && ! defined(__BEOS__)
#if ! defined(WIN32) && ! defined(__BEOS__) && !defined(__CYGWIN32__)
#ifdef NEED_REENTRANT
#define _REENTRANT
@@ -70,9 +70,14 @@
#include "inet_ntoa_r.h"
#endif
/* The last #include file should be: */
#ifdef MALLOCDEBUG
#include "memdebug.h"
#endif
#define SYS_ERROR -1
char *if2ip(char *interface, char *buf, int buf_size)
char *Curl_if2ip(char *interface, char *buf, int buf_size)
{
int dummy;
char *ip=NULL;
@@ -90,6 +95,7 @@ char *if2ip(char *interface, char *buf, int buf_size)
strcpy(req.ifr_name, interface);
req.ifr_addr.sa_family = AF_INET;
if (SYS_ERROR == ioctl(dummy, SIOCGIFADDR, &req, sizeof(req))) {
sclose(dummy);
return NULL;
}
else {
@@ -104,7 +110,7 @@ char *if2ip(char *interface, char *buf, int buf_size)
ip[buf_size - 1] = 0;
#endif
}
close(dummy);
sclose(dummy);
}
return ip;
}

View File

@@ -24,10 +24,10 @@
*****************************************************************************/
#include "setup.h"
#if ! defined(WIN32) && ! defined(__BEOS__)
extern char *if2ip(char *interface, char *buf, int buf_size);
#if ! defined(WIN32) && ! defined(__BEOS__) && !defined(__CYGWIN32__)
extern char *Curl_if2ip(char *interface, char *buf, int buf_size);
#else
#define if2ip(a,b,c) NULL
#define Curl_if2ip(a,b,c) NULL
#endif
#endif

View File

@@ -47,6 +47,9 @@
#include <string.h>
#include <krb.h>
#include "ftp.h"
#include "sendf.h"
/* The last #include file should be: */
#ifdef MALLOCDEBUG
#include "memdebug.h"
@@ -95,7 +98,8 @@ strlcpy (char *dst, const char *src, size_t dst_sz)
else
return n + strlen (src);
}
#else
size_t strlcpy (char *dst, const char *src, size_t dst_sz);
#endif
static int
@@ -279,12 +283,15 @@ krb4_auth(void *app_data, struct connectdata *conn)
u_int32_t cs;
struct krb4_data *d = app_data;
struct sockaddr_in *localaddr = (struct sockaddr_in *)LOCAL_ADDR;
#if 0
struct sockaddr_in *remoteaddr = (struct sockaddr_in *)REMOTE_ADDR;
#endif
char *host = conn->hp->h_name;
size_t nread;
int l = sizeof(local_addr);
if(getsockname(conn->data->firstsocket, LOCAL_ADDR, &l) < 0)
if(getsockname(conn->firstsocket,
(struct sockaddr *)LOCAL_ADDR, &l) < 0)
perror("getsockname()");
checksum = getpid();
@@ -308,7 +315,7 @@ krb4_auth(void *app_data, struct connectdata *conn)
else {
if (natAddr.s_addr != localaddr->sin_addr.s_addr) {
printf("Using NAT IP address (%s) for kerberos 4\n",
inet_ntoa(natAddr));
(char *)inet_ntoa(natAddr));
localaddr->sin_addr = natAddr;
/*
@@ -327,15 +334,15 @@ krb4_auth(void *app_data, struct connectdata *conn)
/*printf("Local address is %s\n", inet_ntoa(localaddr->sin_addr));***/
/*printf("Remote address is %s\n", inet_ntoa(remoteaddr->sin_addr));***/
if(base64_encode(adat.dat, adat.length, &p) < 0) {
if(Curl_base64_encode(adat.dat, adat.length, &p) < 0) {
printf("Out of memory base64-encoding.\n");
return AUTH_CONTINUE;
}
/*ret = command("ADAT %s", p)*/
ftpsendf(conn->data->firstsocket, conn, "ADAT %s", p);
Curl_ftpsendf(conn->firstsocket, conn, "ADAT %s", p);
/* wait for feedback */
nread = GetLastResponse(conn->data->firstsocket,
conn->data->buffer, conn);
nread = Curl_GetFTPResponse(conn->firstsocket,
conn->data->buffer, conn, NULL);
if(nread < 0)
return /*CURLE_OPERATION_TIMEOUTED*/-1;
free(p);
@@ -351,7 +358,7 @@ krb4_auth(void *app_data, struct connectdata *conn)
return AUTH_ERROR;
}
p += 5;
len = base64_decode(p, adat.dat);
len = Curl_base64_decode(p, adat.dat);
if(len < 0){
printf("Failed to decode base64 from server.\n");
return AUTH_ERROR;
@@ -389,14 +396,11 @@ struct sec_client_mech krb4_client_mech = {
void krb_kauth(struct connectdata *conn)
{
int ret;
char buf[1024];
des_cblock key;
des_key_schedule schedule;
KTEXT_ST tkt, tktcopy;
char *name;
char *p;
int overbose;
char passwd[100];
int tmp;
size_t nread;
@@ -405,20 +409,19 @@ void krb_kauth(struct connectdata *conn)
save = set_command_prot(conn, prot_private);
/*ret = command("SITE KAUTH %s", name);***/
ftpsendf(conn->data->firstsocket, conn,
Curl_ftpsendf(conn->firstsocket, conn,
"SITE KAUTH %s", conn->data->user);
/* wait for feedback */
nread = GetLastResponse(conn->data->firstsocket, conn->data->buffer, conn);
nread = Curl_GetFTPResponse(conn->firstsocket, conn->data->buffer,
conn, NULL);
if(nread < 0)
return /*CURLE_OPERATION_TIMEOUTED*/;
if(/*ret != CONTINUE*/conn->data->buffer[0] != '3'){
/*verbose = overbose;***/
set_command_prot(conn, save);
/*code = -1;***/
return;
}
/*verbose = overbose;***/
p = strstr(/*reply_string***/conn->data->buffer, "T=");
if(!p){
printf("Bad reply from server.\n");
@@ -427,7 +430,7 @@ void krb_kauth(struct connectdata *conn)
return;
}
p += 2;
tmp = base64_decode(p, &tkt.dat);
tmp = Curl_base64_decode(p, &tkt.dat);
if(tmp < 0){
printf("Failed to decode base64 in reply.\n");
set_command_prot(conn, save);
@@ -440,7 +443,6 @@ void krb_kauth(struct connectdata *conn)
p = strstr(/*reply_string***/conn->data->buffer, "P=");
if(!p){
printf("Bad reply from server.\n");
/*verbose = overbose;***/
set_command_prot(conn, save);
/*code = -1;***/
return;
@@ -476,7 +478,7 @@ void krb_kauth(struct connectdata *conn)
memset(key, 0, sizeof(key));
memset(schedule, 0, sizeof(schedule));
memset(passwd, 0, sizeof(passwd));
if(base64_encode(tktcopy.dat, tktcopy.length, &p) < 0) {
if(Curl_base64_encode(tktcopy.dat, tktcopy.length, &p) < 0) {
failf(conn->data, "Out of memory base64-encoding.\n");
set_command_prot(conn, save);
/*code = -1;***/
@@ -484,10 +486,11 @@ void krb_kauth(struct connectdata *conn)
}
memset (tktcopy.dat, 0, tktcopy.length);
/*ret = command("SITE KAUTH %s %s", name, p);***/
ftpsendf(conn->data->firstsocket, conn,
Curl_ftpsendf(conn->firstsocket, conn,
"SITE KAUTH %s %s", name, p);
/* wait for feedback */
nread = GetLastResponse(conn->data->firstsocket, conn->data->buffer, conn);
nread = Curl_GetFTPResponse(conn->firstsocket, conn->data->buffer,
conn, NULL);
if(nread < 0)
return /*CURLE_OPERATION_TIMEOUTED*/;
free(p);

View File

@@ -1,5 +1,5 @@
#ifndef __HIGHLEVEL_H
#define __HIGHLEVEL_H
#ifndef __KRB4_H
#define __KRB4_H
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
@@ -22,5 +22,6 @@
*
* $Id$
*****************************************************************************/
CURLcode curl_transfer(CURL *curl);
void krb_kauth(struct connectdata *conn);
#endif

View File

@@ -117,18 +117,18 @@ static void * DynaGetFunction(char *name)
static int WriteProc(void *param, char *text, int len)
{
struct UrlData *data = (struct UrlData *)param;
client_write(data, CLIENTWRITE_BODY, text, 0);
Curl_client_write(data, CLIENTWRITE_BODY, text, 0);
return 0;
}
CURLcode ldap_done(struct connectdata *conn)
CURLcode Curl_ldap_done(struct connectdata *conn)
{
return CURLE_OK;
}
/***********************************************************************
*/
CURLcode ldap(struct connectdata *conn)
CURLcode Curl_ldap(struct connectdata *conn)
{
CURLcode status = CURLE_OK;
int rc;
@@ -171,10 +171,10 @@ CURLcode ldap(struct connectdata *conn)
DYNA_GET_FUNCTION(int (*)(void *, char *, void *, void *, char **, char **, int (*)(void *, char *, int), void *, char *, int, unsigned long), ldap_entry2text);
DYNA_GET_FUNCTION(int (*)(void *, char *, void *, void *, char **, char **, int (*)(void *, char *, int), void *, char *, int, unsigned long, char *, char *), ldap_entry2html);
server = ldap_open(data->hostname, data->port);
server = ldap_open(conn->hostname, conn->port);
if (server == NULL) {
failf(data, "LDAP: Cannot connect to %s:%d",
data->hostname, data->port);
conn->hostname, conn->port);
status = CURLE_COULDNT_CONNECT;
} else {
rc = ldap_simple_bind_s(server, data->user, data->passwd);

View File

@@ -23,7 +23,7 @@
*
* $Id$
*****************************************************************************/
CURLcode ldap(struct connectdata *conn);
CURLcode ldap_done(struct connectdata *conn);
CURLcode Curl_ldap(struct connectdata *conn);
CURLcode Curl_ldap_done(struct connectdata *conn);
#endif /* __LDAP_H */

View File

@@ -7,36 +7,26 @@ LIBRARY LIBCURL
DESCRIPTION 'curl libcurl - http://curl.haxx.se'
EXPORTS
curl_close @ 1 ;
curl_connect @ 2 ;
curl_disconnect @ 3 ;
curl_do @ 4 ;
curl_done @ 5 ;
curl_easy_cleanup @ 6 ;
curl_easy_getinfo @ 7 ;
curl_easy_init @ 8 ;
curl_easy_perform @ 9 ;
curl_easy_setopt @ 10 ;
curl_escape @ 11 ;
curl_formparse @ 12 ;
curl_free @ 13 ;
curl_getdate @ 14 ;
curl_getenv @ 15 ;
curl_init @ 16 ;
curl_open @ 17 ;
curl_read @ 18 ;
curl_setopt @ 19 ;
curl_slist_append @ 20 ;
curl_slist_free_all @ 21 ;
curl_transfer @ 22 ;
curl_unescape @ 23 ;
curl_version @ 24 ;
curl_write @ 25 ;
maprintf @ 26 ;
mfprintf @ 27 ;
mprintf @ 28 ;
msprintf @ 29 ;
msnprintf @ 30 ;
mvfprintf @ 31 ;
strequal @ 32 ;
strnequal @ 33 ;
curl_easy_cleanup @ 1 ;
curl_easy_getinfo @ 2 ;
curl_easy_init @ 3 ;
curl_easy_perform @ 4 ;
curl_easy_setopt @ 5 ;
curl_escape @ 6 ;
curl_formparse @ 7 ;
curl_formfree @ 8 ;
curl_getdate @ 9 ;
curl_getenv @ 10 ;
curl_slist_append @ 11 ;
curl_slist_free_all @ 12 ;
curl_unescape @ 13 ;
curl_version @ 14 ;
curl_maprintf @ 15 ;
curl_mfprintf @ 16 ;
curl_mprintf @ 17 ;
curl_msprintf @ 18 ;
curl_msnprintf @ 19 ;
curl_mvfprintf @ 20 ;
curl_strequal @ 21 ;
curl_strnequal @ 22 ;

View File

@@ -72,7 +72,7 @@ void *curl_domalloc(size_t size, int line, char *source)
return mem;
}
char *curl_dostrdup(char *str, int line, char *source)
char *curl_dostrdup(const char *str, int line, char *source)
{
char *mem;
size_t len;
@@ -120,7 +120,7 @@ int curl_socket(int domain, int type, int protocol, int line, char *source)
return sockfd;
}
int curl_accept(int s, struct sockaddr *addr, int *addrlen,
int curl_accept(int s, struct sockaddr *addr, socklen_t *addrlen,
int line, char *source)
{
int sockfd=(accept)(s, addr, addrlen);
@@ -135,7 +135,7 @@ int curl_sclose(int sockfd, int line, char *source)
int res=sclose(sockfd);
fprintf(logfile?logfile:stderr, "FD %s:%d sclose(%d)\n",
source, line, sockfd);
return sockfd;
return res;
}
FILE *curl_fopen(char *file, char *mode, int line, char *source)

View File

@@ -7,13 +7,13 @@
void *curl_domalloc(size_t size, int line, char *source);
void *curl_dorealloc(void *ptr, size_t size, int line, char *source);
void curl_dofree(void *ptr, int line, char *source);
char *curl_dostrdup(char *str, int line, char *source);
char *curl_dostrdup(const char *str, int line, char *source);
void curl_memdebug(char *logname);
/* file descriptor manipulators */
int curl_socket(int domain, int type, int protocol, int, char *);
int curl_sclose(int sockfd, int, char *);
int curl_accept(int s, struct sockaddr *addr, int *addrlen,
int curl_accept(int s, struct sockaddr *addr, socklen_t *addrlen,
int line, char *source);
/* FILE functions */

View File

@@ -207,7 +207,7 @@ struct asprintf {
size_t alloc; /* length of alloc */
};
int msprintf(char *buffer, const char *format, ...);
int curl_msprintf(char *buffer, const char *format, ...);
static int dprintf_DollarString(char *input, char **end)
{
@@ -955,11 +955,11 @@ static int dprintf_formatf(
if(width >= 0) {
/* RECURSIVE USAGE */
fptr += msprintf(fptr, "%d", width);
fptr += curl_msprintf(fptr, "%d", width);
}
if(prec >= 0) {
/* RECURSIVE USAGE */
fptr += msprintf(fptr, ".%d", prec);
fptr += curl_msprintf(fptr, ".%d", prec);
}
if (p->flags & FLAGS_LONG)
strcat(fptr, "l");
@@ -1025,7 +1025,7 @@ static int addbyter(int output, FILE *data)
return -1;
}
int msnprintf(char *buffer, size_t maxlength, const char *format, ...)
int curl_msnprintf(char *buffer, size_t maxlength, const char *format, ...)
{
va_list ap_save; /* argument pointer */
int retcode;
@@ -1045,7 +1045,7 @@ int msnprintf(char *buffer, size_t maxlength, const char *format, ...)
return retcode;
}
int mvsnprintf(char *buffer, size_t maxlength, const char *format, va_list ap_save)
int curl_mvsnprintf(char *buffer, size_t maxlength, const char *format, va_list ap_save)
{
int retcode;
struct nsprintf info;
@@ -1092,7 +1092,7 @@ static int alloc_addbyter(int output, FILE *data)
}
char *maprintf(const char *format, ...)
char *curl_maprintf(const char *format, ...)
{
va_list ap_save; /* argument pointer */
int retcode;
@@ -1113,7 +1113,7 @@ char *maprintf(const char *format, ...)
return NULL;
}
char *mvaprintf(const char *format, va_list ap_save)
char *curl_mvaprintf(const char *format, va_list ap_save)
{
int retcode;
struct asprintf info;
@@ -1140,7 +1140,7 @@ static int storebuffer(int output, FILE *data)
return output; /* act like fputc() ! */
}
int msprintf(char *buffer, const char *format, ...)
int curl_msprintf(char *buffer, const char *format, ...)
{
va_list ap_save; /* argument pointer */
int retcode;
@@ -1153,7 +1153,7 @@ int msprintf(char *buffer, const char *format, ...)
extern int fputc(int, FILE *);
int mprintf(const char *format, ...)
int curl_mprintf(const char *format, ...)
{
int retcode;
va_list ap_save; /* argument pointer */
@@ -1163,7 +1163,7 @@ int mprintf(const char *format, ...)
return retcode;
}
int mfprintf(FILE *whereto, const char *format, ...)
int curl_mfprintf(FILE *whereto, const char *format, ...)
{
int retcode;
va_list ap_save; /* argument pointer */
@@ -1173,7 +1173,7 @@ int mfprintf(FILE *whereto, const char *format, ...)
return retcode;
}
int mvsprintf(char *buffer, const char *format, va_list ap_save)
int curl_mvsprintf(char *buffer, const char *format, va_list ap_save)
{
int retcode;
retcode = dprintf_formatf(&buffer, storebuffer, format, ap_save);
@@ -1181,12 +1181,12 @@ int mvsprintf(char *buffer, const char *format, va_list ap_save)
return retcode;
}
int mvprintf(const char *format, va_list ap_save)
int curl_mvprintf(const char *format, va_list ap_save)
{
return dprintf_formatf(stdout, fputc, format, ap_save);
}
int mvfprintf(FILE *whereto, const char *format, va_list ap_save)
int curl_mvfprintf(FILE *whereto, const char *format, va_list ap_save)
{
return dprintf_formatf(whereto, fputc, format, ap_save);
}

View File

@@ -27,9 +27,26 @@
#include <stdlib.h>
#include <string.h>
#include "getenv.h"
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef HAVE_PWD_H
#include <pwd.h>
#endif
#include <curl/curl.h>
#include "strequal.h"
/* The last #include file should be: */
#ifdef MALLOCDEBUG
#include "memdebug.h"
#endif
/* Debug this single source file with:
'make netrc' then run './netrc'!
@@ -51,15 +68,15 @@ enum {
#define LOGINSIZE 64
#define PASSWORDSIZE 64
int ParseNetrc(char *host,
char *login,
char *password)
int Curl_parsenetrc(char *host,
char *login,
char *password)
{
FILE *file;
char netrcbuffer[256];
int retcode=1;
char *home = GetEnv("HOME"); /* portable environment reader */
char *home = NULL;
int state=NOTHING;
char state_login=0;
@@ -67,11 +84,25 @@ int ParseNetrc(char *host,
#define NETRC DOT_CHAR "netrc"
if(!home)
return -1;
#if defined(HAVE_GETPWUID) && defined(HAVE_GETEUID)
struct passwd *pw;
pw= getpwuid(geteuid());
if (pw)
home = pw->pw_dir;
#else
void *pw=NULL;
#endif
if(NULL == pw) {
home = curl_getenv("HOME"); /* portable environment reader */
if(!home) {
return -1;
}
}
if(strlen(home)>(sizeof(netrcbuffer)-strlen(NETRC))) {
free(home);
if(NULL==pw)
free(home);
return -1;
}
@@ -139,7 +170,8 @@ int ParseNetrc(char *host,
fclose(file);
}
free(home);
if(NULL==pw)
free(home);
return retcode;
}

View File

@@ -22,7 +22,7 @@
*
* $Id$
*****************************************************************************/
int ParseNetrc(char *host,
char *login,
char *password);
int Curl_parsenetrc(char *host,
char *login,
char *password);
#endif

View File

@@ -45,7 +45,7 @@
#include "progress.h"
void time2str(char *r, int t)
static void time2str(char *r, int t)
{
int h = (t/3600);
int m = (t-(h*3600))/60;
@@ -55,7 +55,7 @@ void time2str(char *r, int t)
/* The point of this function would be to return a string of the input data,
but never longer than 5 columns. Add suffix k, M, G when suitable... */
char *max5data(double bytes, char *max5)
static char *max5data(double bytes, char *max5)
{
#define ONE_KILOBYTE 1024
#define ONE_MEGABYTE (1024*1024)
@@ -91,16 +91,16 @@ char *max5data(double bytes, char *max5)
*/
void pgrsDone(struct UrlData *data)
void Curl_pgrsDone(struct UrlData *data)
{
if(!(data->progress.flags & PGRS_HIDE)) {
data->progress.lastshow=0;
pgrsUpdate(data); /* the final (forced) update */
Curl_pgrsUpdate(data); /* the final (forced) update */
fprintf(data->err, "\n");
}
}
void pgrsTime(struct UrlData *data, timerid timer)
void Curl_pgrsTime(struct UrlData *data, timerid timer)
{
switch(timer) {
default:
@@ -111,19 +111,19 @@ void pgrsTime(struct UrlData *data, timerid timer)
/* This is set at the start of a single fetch, there may be several
fetches within an operation, why we add all other times relative
to this one */
data->progress.t_startsingle = tvnow();
data->progress.t_startsingle = Curl_tvnow();
break;
case TIMER_NAMELOOKUP:
data->progress.t_nslookup += tvdiff(tvnow(),
data->progress.t_nslookup += Curl_tvdiff(Curl_tvnow(),
data->progress.t_startsingle);
break;
case TIMER_CONNECT:
data->progress.t_connect += tvdiff(tvnow(),
data->progress.t_connect += Curl_tvdiff(Curl_tvnow(),
data->progress.t_startsingle);
break;
case TIMER_PRETRANSFER:
data->progress.t_pretransfer += tvdiff(tvnow(),
data->progress.t_pretransfer += Curl_tvdiff(Curl_tvnow(),
data->progress.t_startsingle);
break;
case TIMER_POSTRANSFER:
@@ -132,22 +132,22 @@ void pgrsTime(struct UrlData *data, timerid timer)
}
}
void pgrsStartNow(struct UrlData *data)
void Curl_pgrsStartNow(struct UrlData *data)
{
data->progress.start = tvnow();
data->progress.start = Curl_tvnow();
}
void pgrsSetDownloadCounter(struct UrlData *data, double size)
void Curl_pgrsSetDownloadCounter(struct UrlData *data, double size)
{
data->progress.downloaded = size;
}
void pgrsSetUploadCounter(struct UrlData *data, double size)
void Curl_pgrsSetUploadCounter(struct UrlData *data, double size)
{
data->progress.uploaded = size;
}
void pgrsSetDownloadSize(struct UrlData *data, double size)
void Curl_pgrsSetDownloadSize(struct UrlData *data, double size)
{
if(size > 0) {
data->progress.size_dl = size;
@@ -155,7 +155,7 @@ void pgrsSetDownloadSize(struct UrlData *data, double size)
}
}
void pgrsSetUploadSize(struct UrlData *data, double size)
void Curl_pgrsSetUploadSize(struct UrlData *data, double size)
{
if(size > 0) {
data->progress.size_ul = size;
@@ -171,7 +171,7 @@ void pgrsSetUploadSize(struct UrlData *data, double size)
*/
int pgrsUpdate(struct UrlData *data)
int Curl_pgrsUpdate(struct UrlData *data)
{
struct timeval now;
int result;
@@ -203,6 +203,9 @@ int pgrsUpdate(struct UrlData *data)
even when not displayed! */
else if(!(data->progress.flags & PGRS_HEADERS_OUT)) {
if (!data->progress.callback) {
if(data->resume_from)
fprintf(data->err, "** Resuming transfer from byte position %d\n",
data->resume_from);
fprintf(data->err,
" %% Total %% Received %% Xferd Average Speed Time Curr.\n"
" Dload Upload Total Current Left Speed\n");
@@ -210,16 +213,16 @@ int pgrsUpdate(struct UrlData *data)
data->progress.flags |= PGRS_HEADERS_OUT; /* headers are shown */
}
now = tvnow(); /* what time is it */
now = Curl_tvnow(); /* what time is it */
if(data->progress.lastshow == tvlong(now))
/* The exact time spent so far */
data->progress.timespent = Curl_tvdiff (now, data->progress.start);
if(data->progress.lastshow == Curl_tvlong(now))
return 0; /* never update this more than once a second if the end isn't
reached */
data->progress.lastshow = now.tv_sec;
/* The exact time spent so far */
data->progress.timespent = tvdiff (now, data->progress.start);
/* The average download speed this far */
data->progress.dlspeed = data->progress.downloaded/(data->progress.timespent!=0.0?data->progress.timespent:1.0);
@@ -257,17 +260,13 @@ int pgrsUpdate(struct UrlData *data)
}
/* Figure out the estimated time of arrival for the upload */
if(data->progress.flags & PGRS_UL_SIZE_KNOWN) {
if(!data->progress.ulspeed)
data->progress.ulspeed=1;
if((data->progress.flags & PGRS_UL_SIZE_KNOWN) && data->progress.ulspeed){
ulestimate = data->progress.size_ul / data->progress.ulspeed;
ulpercen = (data->progress.uploaded / data->progress.size_ul)*100;
}
/* ... and the download */
if(data->progress.flags & PGRS_DL_SIZE_KNOWN) {
if(!data->progress.dlspeed)
data->progress.dlspeed=1;
if((data->progress.flags & PGRS_DL_SIZE_KNOWN) && data->progress.dlspeed) {
dlestimate = data->progress.size_dl / data->progress.dlspeed;
dlpercen = (data->progress.downloaded / data->progress.size_dl)*100;
}

View File

@@ -36,14 +36,14 @@ typedef enum {
TIMER_LAST /* must be last */
} timerid;
void pgrsDone(struct UrlData *data);
void pgrsStartNow(struct UrlData *data);
void pgrsSetDownloadSize(struct UrlData *data, double size);
void pgrsSetUploadSize(struct UrlData *data, double size);
void pgrsSetDownloadCounter(struct UrlData *data, double size);
void pgrsSetUploadCounter(struct UrlData *data, double size);
int pgrsUpdate(struct UrlData *data);
void pgrsTime(struct UrlData *data, timerid timer);
void Curl_pgrsDone(struct UrlData *data);
void Curl_pgrsStartNow(struct UrlData *data);
void Curl_pgrsSetDownloadSize(struct UrlData *data, double size);
void Curl_pgrsSetUploadSize(struct UrlData *data, double size);
void Curl_pgrsSetDownloadCounter(struct UrlData *data, double size);
void Curl_pgrsSetUploadCounter(struct UrlData *data, double size);
int Curl_pgrsUpdate(struct UrlData *data);
void Curl_pgrsTime(struct UrlData *data, timerid timer);
/* Don't show progress for sizes smaller than: */

Some files were not shown because too many files have changed in this diff Show More