Compare commits

...

470 Commits

Author SHA1 Message Date
Daniel Stenberg
5c2df3e1a4 7.10.7 2003-08-15 07:08:02 +00:00
Daniel Stenberg
6fc55467f4 removed lots of "added in [version]" where [version] is resonably old 2003-08-15 06:35:41 +00:00
Daniel Stenberg
a147a07956 check for long long
changed the use of AC_CHECK_TYPE as the previous approach is deprecated
require 2.57 properly
2003-08-14 22:44:06 +00:00
Daniel Stenberg
a10581d459 Possible code for large file support, added within #if 0 so far. 2003-08-14 22:42:18 +00:00
Daniel Stenberg
cc2d6942bb new Russian mirror both web and download 2003-08-14 22:38:35 +00:00
Daniel Stenberg
3974c02bb1 supprt for the new memlimit stuff 2003-08-14 22:38:03 +00:00
Daniel Stenberg
09b5ddaea5 added one "added in blabla" and removed a few 2003-08-14 22:00:56 +00:00
Daniel Stenberg
acbcd68d89 Curl_SSL_InitSessions can return error, so check the return code and bail
out if necessary
2003-08-14 15:06:36 +00:00
Daniel Stenberg
4281470fca Curl_llist_destroy() checks the input for non-NULL 2003-08-14 15:06:08 +00:00
Daniel Stenberg
68a4aa6773 new proto for Curl_hash_init 2003-08-14 15:05:25 +00:00
Daniel Stenberg
905b160097 1. check allocs
2. don't leave allocated memory behind when returning error
2003-08-14 15:05:13 +00:00
Daniel Stenberg
52596c339b return failure when the host cache creation fails 2003-08-14 15:02:25 +00:00
Daniel Stenberg
73500267ee activate the new memory limit tests if requested
only set cookiejar if selected
2003-08-14 15:01:52 +00:00
Daniel Stenberg
e6011e33a6 return failure when an alloc function fails 2003-08-14 15:01:20 +00:00
Daniel Stenberg
3454319c17 prevent memory leak when going out of memory 2003-08-14 14:20:03 +00:00
Daniel Stenberg
02c78ecf81 allow out-of-memory testing by setting a limit. That number of memory
allocation calls will succeed, the following will return NULL!
2003-08-14 14:19:36 +00:00
Daniel Stenberg
caca034302 better freeing when bailing out due to bad output glob 2003-08-14 13:38:19 +00:00
Daniel Stenberg
fb366ed35f free data on failure 2003-08-14 13:37:55 +00:00
Daniel Stenberg
b352ffca15 test87 verifies the new and better check for bad -o #[num] stuff 2003-08-14 13:37:32 +00:00
Daniel Stenberg
2d94856efd ignore the .pid files 2003-08-14 13:01:07 +00:00
Daniel Stenberg
ae66bd1284 ignore lib506 too 2003-08-14 13:00:34 +00:00
Daniel Stenberg
89d9d4e6c8 better report on why tests are skipped, and also show a count of the amount
of test cases that were "considered".
2003-08-14 12:59:54 +00:00
Daniel Stenberg
fe60fc4730 In case the output urlglob file name returned is NULL, then there was
badness in the string and we help our user by returning an error.
2003-08-14 11:53:53 +00:00
Daniel Stenberg
46690d5e1c modified the #[num] code to be more robust, to return NULL on errors and
to support numbers larger than 9
2003-08-14 11:53:09 +00:00
Daniel Stenberg
beaea8cb25 corrected this test case 2003-08-14 11:51:22 +00:00
Daniel Stenberg
409ec90c85 test urlglobbing range requests 2003-08-14 11:50:58 +00:00
Daniel Stenberg
4d423eeabe test86 added 2003-08-14 11:50:40 +00:00
Daniel Stenberg
019e612225 David Byron's fix that makes this script use 'cygpath' instead of 'pwd' if
this runs on windows, to find out the current working directory.
2003-08-12 21:18:39 +00:00
Daniel Stenberg
6550d271f0 7.10.7-pre4 commit 2003-08-12 12:48:40 +00:00
Daniel Stenberg
c46da65263 nicer make target for the pdf conversion 2003-08-12 09:08:05 +00:00
Daniel Stenberg
b46745759b don't treat index.html as the generated HTML pages 2003-08-12 08:58:46 +00:00
Daniel Stenberg
9687571a84 added the new man pages 2003-08-12 08:58:23 +00:00
Daniel Stenberg
c13236de25 corrected return type 2003-08-12 08:58:15 +00:00
Daniel Stenberg
8ffbb6acd4 added the new curl_share_* man pages, the libcurl-easy, the libcurl-share,
made the generated pdf and html files get removed on 'make clean'. Made
the pdf conversion remove the temporary .ps files.
2003-08-12 08:51:23 +00:00
Daniel Stenberg
a3e5d81765 separated the easy-specific stuff into a new libcurl-easy.3 man page and
made the libcurl.3 one a more generic overview
2003-08-12 08:46:02 +00:00
Daniel Stenberg
e2aecfe80f added the asynchdns bit 2003-08-12 08:26:38 +00:00
Daniel Stenberg
a3c1248214 Bugfix from Serge Semashko that fixes a bug introduced when we applied his
NTLM patch. Test case 84 and 85 verify this.
2003-08-12 08:20:16 +00:00
Daniel Stenberg
b933639222 more auth tests 2003-08-12 08:19:23 +00:00
Daniel Stenberg
27619fc450 Added support for CURLINFO_HTTP_CONNECTCODE 2003-08-11 23:15:41 +00:00
Daniel Stenberg
96fecba190 bindlocal works for Windows! 2003-08-11 23:15:13 +00:00
Daniel Stenberg
50257d4f50 Check CURL_VERSION_ASYNCHDNS for feature output 2003-08-11 23:13:41 +00:00
Daniel Stenberg
3eb4ae031c Set the CURL_VERSION_ASYNCHDNS bit if USE_ARES is defined. 2003-08-11 23:13:09 +00:00
Daniel Stenberg
6a4ec3be81 Added CURLINFO_HTTP_CONNECTCODE
Added CURL_VERSION_ASYNCHDNS
2003-08-11 23:12:46 +00:00
Daniel Stenberg
cc9ac6ad14 mention curl_version_info 2003-08-11 23:07:38 +00:00
Daniel Stenberg
644990a835 mention the pre3 release 2003-08-11 22:48:26 +00:00
Daniel Stenberg
d3b81ea3f7 Vincent Sanders's massive update of this example code. One could argue
weather this is still an "example" or a whole new API layer! ;-)
2003-08-11 21:34:52 +00:00
Daniel Stenberg
3660f67534 edits 2003-08-11 16:17:51 +00:00
Daniel Stenberg
203cc4a5c3 two more known bugs 2003-08-11 15:15:25 +00:00
Daniel Stenberg
c7be232fee added include "http.h" to prevent a warning 2003-08-11 14:55:30 +00:00
Daniel Stenberg
2617b379be define USE_ARES nicer if enabled 2003-08-11 13:18:06 +00:00
Daniel Stenberg
84ed5e755a use safefree instead 2003-08-11 12:30:21 +00:00
Daniel Stenberg
2f17615790 forgot the backslash 2003-08-11 12:26:18 +00:00
Daniel Stenberg
acfa131c8c memory leak fixed when re-using connections with proxy user+passwd 2003-08-11 12:25:30 +00:00
Daniel Stenberg
793d0e27e1 --proxy-ntlm added 2003-08-11 12:23:55 +00:00
Daniel Stenberg
fdf0c443c3 81 + 82 test NTLM proxy stuff 2003-08-11 12:23:33 +00:00
Daniel Stenberg
1b39b53321 remodeled the help text to avoid those annoying puts() problems when a
string reaches > 512 bytes...
2003-08-11 12:04:46 +00:00
Daniel Stenberg
1679993e3b CURLOPT_PROXYAUTH explained 2003-08-11 11:54:14 +00:00
Daniel Stenberg
4c831f8b68 CURLOPT_PROXYAUTH added by Serge Semashko 2003-08-11 11:48:01 +00:00
Daniel Stenberg
7a19923afa Serge Semashko added CURLOPT_PROXYAUTH support, and now NTLM for proxies
work.
2003-08-11 11:47:45 +00:00
Daniel Stenberg
3e122a765d Christian Beutenmueller corrected the CURLOPT_FILE referer, as we nowadays
call it CURLOPT_WRITEDATA.
2003-08-11 11:29:33 +00:00
Daniel Stenberg
d873ba8c9f added test80 2003-08-11 11:09:26 +00:00
Daniel Stenberg
8093338f39 tunnel through proxy, with both proxy and regular authentication 2003-08-11 11:09:03 +00:00
Daniel Stenberg
07660eea1e -Z and -@ no longer work, they are now officially available for other
options, more frequently used, in a future release
2003-08-11 10:34:25 +00:00
Daniel Stenberg
a2b2d4cd5c added test 79, a basic test that fetches an FTP URL over a HTTP proxy 2003-08-11 10:12:35 +00:00
Daniel Stenberg
96e217b496 the new cookie functions that require 'data' passed in 2003-08-11 09:56:06 +00:00
Daniel Stenberg
2dd1518d63 support sending off cookies without contents 2003-08-11 09:55:48 +00:00
Daniel Stenberg
168703b7bf Added some infof() calls, that require the data pointer so now several
cookie functions need that.

I also fixed the cookie loader to properly load and deal with cookies without
contents (or rather with a blank content).
2003-08-11 09:55:11 +00:00
Daniel Stenberg
0f2d680f1f added Dirk Manske 2003-08-11 07:30:24 +00:00
Daniel Stenberg
b7930b6ebd removed the dashes 2003-08-11 07:28:42 +00:00
Daniel Stenberg
8fa43b469a documenting the share interface 2003-08-11 07:25:02 +00:00
Daniel Stenberg
894e52f61a removed the BUGS section 2003-08-11 07:24:21 +00:00
Daniel Stenberg
3c294691aa remove the BUGS section 2003-08-11 07:23:49 +00:00
Daniel Stenberg
acbf932861 fix lines that start with " 2003-08-11 07:23:19 +00:00
Daniel Stenberg
26f5c53be8 test case 506 added, written by Dirk Manske 2003-08-11 06:44:46 +00:00
Daniel Stenberg
8dd069604c Dirk Manske's bugfix for the share stuff 2003-08-11 06:30:02 +00:00
Daniel Stenberg
5dadbd094e don't claim the PASV connect is connected unless it *really* is! 2003-08-10 17:11:41 +00:00
Daniel Stenberg
514a8739b6 make sure the string is long enough 2003-08-08 17:56:47 +00:00
Daniel Stenberg
12e78a082e Gisle Vanem fixed a single-byte overflow 2003-08-08 17:18:21 +00:00
Daniel Stenberg
9273096a8a David Byron's fix for file:// URLs with drive letters included. 2003-08-08 17:12:04 +00:00
Daniel Stenberg
686c6133f8 chmod the cabundle file before we attempt to write to it, to make
make distcheck run fine
2003-08-08 11:13:18 +00:00
Daniel Stenberg
1d1276cc3a ftp create dirs work done 2003-08-08 11:05:18 +00:00
Daniel Stenberg
d987676ef0 added CURLOPT_FTP_CREATE_MISSING_DIRS 2003-08-08 11:04:35 +00:00
Daniel Stenberg
6e4658c89d ftp-create-dirs test when MKD fails 2003-08-08 10:32:08 +00:00
Daniel Stenberg
b7cbcf7434 --ftp-create-dirs 2003-08-08 10:26:08 +00:00
Daniel Stenberg
e347d06a49 introducing --ftp-create_dirs 2003-08-08 10:24:13 +00:00
Daniel Stenberg
2077e9365a --ftp-create-dirs test 2003-08-08 10:23:46 +00:00
Daniel Stenberg
6e3adc9b14 Support COUNT in the control file, to set the number of times the custom
REPLY is to be sent back before getting blanked and reverted to the built-in
action. Now, we can make CWD fail once and then succeed when retried.
2003-08-08 10:21:47 +00:00
Daniel Stenberg
7954eee639 re-arranged the cwd/mkd stuff a bit 2003-08-08 09:55:16 +00:00
Daniel Stenberg
f9f1f0e316 Early Ehlinger's CURLOPT_FTP_CREATE_MISSING_DIRS patch was applied 2003-08-08 09:13:19 +00:00
Daniel Stenberg
a9afe6aa84 new -z tests 2003-08-08 08:13:11 +00:00
Daniel Stenberg
6d36796135 corrected main meta data title 2003-08-08 07:07:15 +00:00
Daniel Stenberg
9e81fd5703 added CLEANFILES to make distcheck run fine 2003-08-07 14:14:54 +00:00
Daniel Stenberg
609059b6ec infilesize must be a long to work on 64bit archs 2003-08-07 13:20:58 +00:00
Daniel Stenberg
6af73f417a use 644 for the chmod 2003-08-07 06:43:11 +00:00
Daniel Stenberg
32468a0072 argh, it wasn't *that* easy to generate the ca-bundle header in the build
dir instead of the source dir, reverting that change
2003-08-06 23:59:15 +00:00
Daniel Stenberg
6800c45104 fixed syntax error 2003-08-06 23:56:24 +00:00
Daniel Stenberg
0d8c754ffd better cleaning up of memory in case of failures in the get-loop (it was
taken care of by the exit-free anyway but caused test case 75 and 76 to
report memory leaks).

Also re-indented a small section.
2003-08-06 23:48:08 +00:00
Daniel Stenberg
1b80276496 better cleaning up allocated memory in case of failures 2003-08-06 23:47:01 +00:00
Daniel Stenberg
bf9a138276 more tests 2003-08-06 23:45:59 +00:00
Daniel Stenberg
b3f9c636b9 new urlglob test 2003-08-06 23:10:58 +00:00
Daniel Stenberg
18975d44a6 minor cleanup 2003-08-06 23:10:36 +00:00
Daniel Stenberg
b201db5cec explain more how the test case number awareness is sent to the test server(s) 2003-08-06 22:47:55 +00:00
Daniel Stenberg
bbe23945e4 fix the treatment of the variable width specifier '*', which caused a bug
in the urlglobbing just now, fixed in the debian bug tracker as Bug#203827
2003-08-06 22:32:47 +00:00
Daniel Stenberg
bbdc0394ff make an uninstall hook in the same manner we already did an install hook
as otherwise will make distcheck fail
2003-08-06 22:15:12 +00:00
Daniel Stenberg
38a9b14965 chmod the hugehelp.c in the dist hook to make distcheck run fine 2003-08-06 22:14:39 +00:00
Daniel Stenberg
77ba0d3686 generate the ca-bundle.h in the build dir, and also make sure to chmod
the file in the dist-hook to make distcheck run fine
2003-08-06 22:14:16 +00:00
Daniel Stenberg
065c8d7a95 Domenico Andreoli fixed the section number in the main meta data 2003-08-06 21:23:42 +00:00
Daniel Stenberg
c704d1545c include "share.h" for the cookie sharing 2003-08-06 15:26:24 +00:00
Daniel Stenberg
62b65a5f20 make it build without ares support
make sure it set async false even when using ipv6 (made test case 20 fail
before)
2003-08-06 15:26:02 +00:00
Daniel Stenberg
665a7a3848 505 was missing 2003-08-06 13:49:20 +00:00
Daniel Stenberg
256b9f31e1 more fix 2003-08-06 13:22:29 +00:00
Daniel Stenberg
a3037e1173 updated the ares instruction 2003-08-06 13:21:19 +00:00
Daniel Stenberg
f3e7a5d755 LDFLAGS fix to make the GSSAPI build again 2003-08-05 15:22:15 +00:00
Daniel Stenberg
5f0cba7775 added README.ares 2003-08-05 14:54:15 +00:00
Daniel Stenberg
673759fe7e how to build with ares 2003-08-05 14:52:31 +00:00
Daniel Stenberg
b73612392d ares awareness/usage/support added. If configure --enable-ares is used, we
build libcurl to use ares for asynch name resolves.
2003-08-05 14:40:59 +00:00
Daniel Stenberg
f85935f0f9 Add --enable-ares support, which will make us build curl with ares for
asynch name resolves. Still very experimental, beware!
2003-08-05 13:37:29 +00:00
Daniel Stenberg
1e7e53c87e clean up the dir tree hierarchy in *_done() to make persistant connection
FTP use the correct directories!

Reported in bug report #783116
2003-08-05 13:04:10 +00:00
Daniel Stenberg
b9fdf3cc3b added test 146 for a ftp persitency test, as reported on the list 2003-08-05 13:00:00 +00:00
Daniel Stenberg
c462601362 persistant connection test 2003-08-05 12:59:23 +00:00
Daniel Stenberg
859877dcfc auth problems 2003-08-05 12:32:02 +00:00
Daniel Stenberg
c04ce95106 cleaned up after David Byron's comment on the libcurl list, aug 5 2003 2003-08-04 23:13:39 +00:00
Daniel Stenberg
98ee12bc35 Jan Sundin reported a case where curl ignored a cookie that browsers don't,
which turned up to be due to the number of dots in the 'domain'. I've now
  made curl follow the the original netscape cookie spec less strict on that
  part.
2003-08-04 23:05:57 +00:00
Daniel Stenberg
fdda786fa2 added test 73 2003-08-04 22:58:06 +00:00
Daniel Stenberg
831be4f4dd Verifies Jan Sundin's cookie bug, dated aug 4 2003. 2003-08-04 22:57:58 +00:00
Daniel Stenberg
41ae97e710 Dirk Manske's patch that introduces cookie support to the share interface. 2003-08-04 15:02:42 +00:00
Daniel Stenberg
f72ba7f79d Mark Fletcher provided an excellent bug report that identified a problem
with FOLLOWLOCATION and chunked transfer-encoding, as libcurl would not
 properly ignore the body contents of 3XX response that included the
 Location: header.
2003-08-03 22:18:14 +00:00
Daniel Stenberg
296046510b serios info leakage! 2003-08-03 21:33:25 +00:00
Daniel Stenberg
db9f87f697 When proxy authentication is used in a CONNECT request (as used for all SSL
connects and otherwise enforced tunnel-thru-proxy requests), the same
authentication header is also wrongly sent to the remote host.

The name and password can then be captured by an evil host and possibly get
used for malicious purposes.
2003-08-02 23:36:35 +00:00
Daniel Stenberg
3270ea55dd updated as the second proxy-auth header was a proof of a serious info leak
bug!!
2003-08-02 23:35:59 +00:00
Daniel Stenberg
a358ac24f4 Joerg Mueller-Tolk fixed a minor mistake 2003-08-01 14:20:48 +00:00
Daniel Stenberg
8bedd43b28 recent action 2003-08-01 12:33:19 +00:00
Daniel Stenberg
9ea2087ede David Byron's makefile fix to allow 7.10.6 to build fine using VC 2003-08-01 07:53:27 +00:00
Daniel Stenberg
9f7c634133 add a check for 'ar' since the lack of it bit Jared Ingersoll
we might need to check for some other tools too that on Solaris are put
in those weird dirs...
2003-07-30 15:10:26 +00:00
Daniel Stenberg
da20d68a12 removed silly target that only works when building from CVS 2003-07-30 14:26:36 +00:00
Daniel Stenberg
d3e512c738 Jrg Mller-Tolk updated this to build fine with 7.10.6 2003-07-30 14:19:44 +00:00
Daniel Stenberg
339f84fe1f ftp proxy support would be nice 2003-07-30 13:41:59 +00:00
Daniel Stenberg
2d41b735ec updated to match the recent ftp patch that makes it check for resumability 2003-07-30 07:52:02 +00:00
Daniel Stenberg
e3b4dd08ff Daniel Noguerol made the ftp code output "Accept-Ranges: bytes" in similar
style like other faked HTTP headers when NOBODY and HEADER are used.
2003-07-30 07:51:33 +00:00
Daniel Stenberg
6809a906bb Make sure to generate an uncompressed hugehelp.c file for inclusion in
the distribution archive, as it isn't sure zlib is present everywhere. Those
who care much for compressed help should regenerate the file.
2003-07-30 07:33:41 +00:00
Daniel Stenberg
1c35cbcc07 Reverted the 'filetime' struct field back to a 'long' as time_t is sometimes
unsigned and we want this to be able to hold -1 for illegal/unset values.
2003-07-30 07:22:28 +00:00
Daniel Stenberg
5f8989a436 CURLDEBUG not MALLOCDEBUG 2003-07-29 11:07:38 +00:00
Daniel Stenberg
aa7b0648ff Fixes based on Gisle Vanem's input since this script failed due to
possibly crlf newlines.
2003-07-28 23:00:56 +00:00
Daniel Stenberg
2fbe61960f Digest *OR* Basic authorization test 2003-07-28 22:17:37 +00:00
Daniel Stenberg
bdb5e5a250 7.10.6 2003-07-28 12:13:48 +00:00
Daniel Stenberg
48a580e609 clear http->send_buffer when we have freed the memory it pointed to 2003-07-28 10:21:57 +00:00
Daniel Stenberg
1361fc69b9 updated to the new ftp dir parsing code that allows a preceeding double
slash
2003-07-28 09:02:15 +00:00
Daniel Stenberg
93352e56d8 As noticed by Kevin Roth, we shall not speak of root dir when it isn't
necessarily the root...
2003-07-28 08:53:12 +00:00
Daniel Stenberg
d9246ff24d Franois Pons brought a patch that once again made curl deal with ftp and
"double slash" as indicating the root directory. In the RFC1738-fix of April
30, that ability was removed (since it is not the "right" way).
2003-07-28 08:50:02 +00:00
Daniel Stenberg
9301bc3444 use the correct 'test71' file name for the temp file 2003-07-28 08:23:46 +00:00
Daniel Stenberg
76352c4e2d got a bug report on -F in config files, so I wrote up this test to verify
that is works... and it did! ;-)
2003-07-28 08:21:07 +00:00
Daniel Stenberg
428f41bd12 having it in CVS causes us problems *grrr* 2003-07-25 09:46:07 +00:00
Daniel Stenberg
99c32e460f Andrs Garca updated with the added files etc 2003-07-25 08:59:55 +00:00
Daniel Stenberg
83f249cf65 With an unknown CA path, we undef the variable. To build properly without
SSL/CA.
2003-07-25 08:47:34 +00:00
Daniel Stenberg
2c2baa93ea only check for CA bundle path if build with SSL support
set a conditional for the makefile if we know the CA path or not
2003-07-25 08:47:10 +00:00
Daniel Stenberg
f0278ca114 Removed #include <sys/resource.h>, as pointed out by Henry Bland we don't
need it.
2003-07-25 08:30:58 +00:00
Daniel Stenberg
297b1b5013 the test compared numericly if though it could contain a string, and I
lowered the number of retries to 10
2003-07-23 17:28:36 +00:00
Daniel Stenberg
e9f63bf4e8 When we re-use an existing connection we must make sure that we don't
accidentally re-use the connect_addr field, as that might no longer be
around. Fix verified by Tracy Boehrer who basicly debugged and tracked down
this problem.
2003-07-23 17:06:21 +00:00
Daniel Stenberg
556ce1c6a1 minor code style fix 2003-07-23 12:55:24 +00:00
Daniel Stenberg
cc4ff62681 Split out the changes from the year 2002 into a separate file, named
CHANGES.2002.
2003-07-23 11:59:20 +00:00
Daniel Stenberg
0423fd9b55 SSLCERTS was moved into the docs/ directory 2003-07-23 11:39:05 +00:00
Daniel Stenberg
789ab20bf7 moved SSLCERTS into the docs/ directory 2003-07-23 11:38:19 +00:00
Daniel Stenberg
b47462bd68 Daniel Kouril's fix to make the GSS-Negotiate work fine. 2003-07-23 11:28:59 +00:00
Daniel Stenberg
1a94fee42d Juan F. Codagnone's fixes to build properly on Windows again 2003-07-23 08:21:21 +00:00
Daniel Stenberg
a91ce6a5d6 Plain default version of this file, to allow users to build easier from
CVS. This will be updated by the configure script, and a default is placed
here by the maketgz script.
2003-07-23 08:11:28 +00:00
Daniel Stenberg
981ffd9fce reversed the check for GSSAPI when request that auth 2003-07-22 11:15:46 +00:00
Daniel Stenberg
e76c960624 CURLDEBUG, not MALLOCDEBUG 2003-07-22 10:00:37 +00:00
Daniel Stenberg
416c92cc6f More support for NTLM on proxies, now proxy state and nonce is stored in
a separate struct properly.
2003-07-22 09:59:36 +00:00
Daniel Stenberg
fb731eb3e7 The NTLM functions now take a 'proxy' argument as well. 2003-07-22 09:58:57 +00:00
Daniel Stenberg
6f2a4d290f Added a separate struct for the proxyntlm data, as it will/can be different
than the remote server's. That is, both the server and the proxy can in
fact require NTLM auth.
2003-07-22 09:58:18 +00:00
Daniel Stenberg
cefc8ba938 CURLDEBUG is the symbol now 2003-07-22 09:57:09 +00:00
Daniel Stenberg
d0bd644eef Don't depend on the TIME_WITH_SYS_TIME define. win32 doesn't have sys/time.h
and I don't think we need it.
2003-07-22 08:23:16 +00:00
Daniel Stenberg
071c95128e moved the proxyuser and proxypasswd fields from the sessionhandle to the
connectdata to work as expected
2003-07-21 13:16:30 +00:00
Daniel Stenberg
1a192c489b adjusted to support NTLM for proxies 2003-07-21 13:16:01 +00:00
Daniel Stenberg
56014e74a0 krb4-fixes for the moved user+password fields within the structs 2003-07-21 09:19:48 +00:00
Daniel Stenberg
172271498d pre4-commit 2003-07-21 08:25:31 +00:00
Daniel Stenberg
f2882cb88c pre4 2003-07-21 08:25:21 +00:00
Daniel Stenberg
152f1fee40 the CWD-null bug fix 2003-07-21 07:54:20 +00:00
Daniel Stenberg
968234e6ae the fixed skip-blanks in the FTP CWD code called for this adjustment 2003-07-20 00:19:44 +00:00
Daniel Stenberg
5e133e2dff David Gardner pointed out in bug report 770755 that using the FTP command CWD
with a blank argument is a bad idea. Now skip blanks.
2003-07-20 00:18:11 +00:00
Daniel Stenberg
0049c09fc3 If NTLM is requested, only re-use connections that have the exact same
credentials.
2003-07-20 00:02:47 +00:00
Daniel Stenberg
a2a63c27f4 explains my fixes just committed 2003-07-19 23:58:21 +00:00
Daniel Stenberg
c50a601f1a modified to work fine with the new persistant connection working test suite
HTTP server
2003-07-19 23:57:08 +00:00
Daniel Stenberg
bc0fd6db71 swsclose added 2003-07-19 23:56:44 +00:00
Daniel Stenberg
52b631fade Access the user and passwd fields from the connectdata struct now instead
of the sessionhandle struct, as that was not good.
2003-07-19 23:56:33 +00:00
Daniel Stenberg
2f0bc9d1f7 No longer stores user+password in the sessionhandle, now doing that in the
connectdata struct instead. Each being an allocated pointer.

The passwdgiven field was turned into a local variable in the only
function it was being used.
2003-07-19 23:55:15 +00:00
Daniel Stenberg
5ef6520d4e fixed the CONNECT thing again 2003-07-19 23:54:15 +00:00
Daniel Stenberg
2c1925161e If the data contents contains the word 'swsclose', then this server will
disconnect the client after the response have been sent. This also happens
if the respons is zero byte long.

In all other cases (unless an error happens), it will now maintain the
connection to allow proper persistant connection testing. This was required
for the NTLM testing to work so I finally had to fix this. Of course most of
the existing HTTP tests will be adjusted to work with this new rule of test
file syntax for HTTP tests.

Also fixed the log function to deal with varargs for better logging.
2003-07-19 23:44:22 +00:00
Daniel Stenberg
0529b349d5 recent changes 2003-07-16 00:04:45 +00:00
Daniel Stenberg
b4620364a2 more fixes from Doug Kaufman for DJGPP builds for DOS 2003-07-15 23:47:25 +00:00
Daniel Stenberg
634aef3895 updated to work with Dan Winship's NTLM domain stuff fix 2003-07-15 23:38:06 +00:00
Daniel Stenberg
06c86d1a8c Moved the NTLM credentials to the connectdata struct instead, as NTLM
authenticates connections and not single requests. This should make it work
better when we mix requests from multiple hosts. Problem pointed out by
Cris Bailiff.
2003-07-15 23:36:50 +00:00
Daniel Stenberg
79749f8eb4 Fix to the endless loop of bad Basic authentication as reported in Cris
Bailiff's bug report 768275.
2003-07-15 23:06:02 +00:00
Daniel Stenberg
b036986b3e Dan Winship's patch added that makes use of DOMAIN\USER or DOMAIN/USER
for the user field. I changed it slightly to stay with strchr() only instead
of strpbrk() for portability reasons.
2003-07-15 22:58:36 +00:00
Daniel Stenberg
938f1d1da7 Dan Winship's fix to make the new auth stuff such as NTLM to work with
the multi interface
2003-07-15 22:46:01 +00:00
Daniel Stenberg
58b6b3df06 Dan Winship pointed out this flaw 2003-07-15 22:44:48 +00:00
Daniel Stenberg
f9c3347f7c re-use existing variable instead of declaring a new local one 2003-07-05 13:27:02 +00:00
Daniel Stenberg
5b72eb0b03 Some of Doug Kaufman's changes for the DOS port 2003-07-05 13:13:49 +00:00
Daniel Stenberg
6dd4c13bc0 the latest changes 2003-07-04 18:18:17 +00:00
Daniel Stenberg
e4e7db551f HAVE_SETVBUF removed, no longer used 2003-07-04 18:17:58 +00:00
Daniel Stenberg
ebfde8da56 removed 2003-07-04 18:15:53 +00:00
Daniel Stenberg
756bc0f4b7 Dan Grayson pointed out that we set the CURL_CA_BUNDLE variable wrongly in
the configure script. We set it differently now and generate the
lib/ca-bundle.h file entirely.
2003-07-04 18:15:25 +00:00
Daniel Stenberg
269d491b6a remove the usage of setvbuf() and use fflush() instead if no buffering should
be done on the output
2003-07-04 17:16:34 +00:00
Daniel Stenberg
449e5bc2ad CURLDEBUG not MALLOCDEBUG anymore 2003-07-04 16:37:16 +00:00
Daniel Stenberg
8736c11d84 adjusted to the NTLM updates 2003-07-04 16:36:31 +00:00
Daniel Stenberg
45fc760985 Peter Sylvester's patch was applied that introduces the following:
CURLOPT_SSL_CTX_FUNCTION to set a callback that gets called with the
   OpenSSL's ssl_ctx pointer passed in and allow a callback to act on it. If
   anything but CURLE_OK is returned, that will also be returned by libcurl
   all the way back. If this function changes the CURLOPT_URL, libcurl will
   detect this and instead go use the new URL.

   CURLOPT_SSL_CTX_DATA is a pointer you set to get passed to the callback set
   with CURLOPT_SSL_CTX_FUNCTION.
2003-07-04 16:29:23 +00:00
Daniel Stenberg
7968e3c2de David Byron's patch that allows a client to make the server quit with a
magic url.
2003-07-01 15:21:42 +00:00
Daniel Stenberg
964a41c75c new CVS info 2003-07-01 12:12:10 +00:00
Daniel Stenberg
5931d53637 Gisle Vanem found a lib handle leak in the ldap code 2003-07-01 10:12:52 +00:00
Daniel Stenberg
3ed3ae5bcf When I introduced the DIST_SUBDIRS usage, I broken the 'make install' for
include files and docs, so now I've added a custom install hook to run
make install for docs and install when data is installed at the top-level.
2003-06-27 14:37:38 +00:00
Sterling Hughes
6519cc70c5 revert out my bogus commit. ;-) 2003-06-26 21:30:48 +00:00
Sterling Hughes
505a4f27fa test commit 2003-06-26 21:17:29 +00:00
Daniel Stenberg
79144eba99 new tests 2003-06-26 11:45:04 +00:00
Daniel Stenberg
26e17d89c9 produce a skip-report at the end of all tests, and thus record and count
them properly
2003-06-26 11:44:01 +00:00
Daniel Stenberg
4322c1106f beautified and added comments all over 2003-06-26 11:42:54 +00:00
Daniel Stenberg
73071dfd4f mention the new flag bits we support 2003-06-26 11:41:24 +00:00
Daniel Stenberg
b7c14b3c27 mention that it copies the string you add 2003-06-26 11:41:08 +00:00
Daniel Stenberg
3130b44535 added lots, mostly the new auth-related option(s) 2003-06-26 11:40:44 +00:00
Daniel Stenberg
a2bd73334f added lots of auth stuff and updated other things too 2003-06-26 11:40:04 +00:00
Daniel Stenberg
1a393f5625 mention COOKIES, removed added entries, corrected the FPL-SSL link/reference 2003-06-26 11:38:53 +00:00
Daniel Stenberg
d4951e837e mention the other formats the docs come in 2003-06-26 11:37:13 +00:00
Daniel Stenberg
26f6365e93 adjusted to recent changes 2003-06-26 11:36:32 +00:00
Daniel Stenberg
3a552b1e63 we do support NTLM now... 2003-06-26 11:35:48 +00:00
Daniel Stenberg
69eb1790da CURLDEBUG is the symbol to use, no longer MALLOCDEBUG 2003-06-26 11:34:36 +00:00
Daniel Stenberg
a1af6f3614 adjusted the compressed generation to be more helpful in comments etc 2003-06-26 11:34:07 +00:00
Daniel Stenberg
3aced61465 support for the new auth stuff
more output on --version/-V
mention --manual on the help output text
2003-06-26 11:33:29 +00:00
Daniel Stenberg
6f02ddfce8 new httpauth support, changed filetime variable kind 2003-06-26 11:31:50 +00:00
Daniel Stenberg
c2faa39b62 added CURLOPT_HTTPAUTH support 2003-06-26 11:30:59 +00:00
Daniel Stenberg
2d3734b8b5 Adjusted to work properly with the new authentication stuff
Added code to deal with white spaces in relocation headers.
2003-06-26 11:30:26 +00:00
Daniel Stenberg
ed908b7f89 use CURLDEBUG instead of MALLOCDEBUG 2003-06-26 11:28:26 +00:00
Daniel Stenberg
f7d795a364 use CURLDEBUG 2003-06-26 11:27:38 +00:00
Daniel Stenberg
8919b39d54 adjusted to use the same API as the OpenSSL version of the MD5 functions 2003-06-26 11:27:22 +00:00
Daniel Stenberg
84cedc094e added ntlm flag bits 2003-06-26 11:26:50 +00:00
Daniel Stenberg
3b2b2496d7 Many fixes, most of them based on comments by Eric Glass 2003-06-26 11:26:26 +00:00
Daniel Stenberg
445684c409 new proto for Curl_input_negotiate 2003-06-26 11:25:42 +00:00
Daniel Stenberg
898e067ccc kill warnings 2003-06-26 11:25:23 +00:00
Daniel Stenberg
12859e345f major adjustments to the new authentication support 2003-06-26 11:24:55 +00:00
Daniel Stenberg
89f4af695e include GSS in the debug string if available, support a few new flag
booleans
2003-06-26 11:22:48 +00:00
Daniel Stenberg
308bc9d919 use CURLDEBUG instead of MALLOCDEBUG for preprocessor conditions 2003-06-26 11:22:12 +00:00
Daniel Stenberg
db566c54ae use CURLDEBUG instead of MALLOCDEBUG 2003-06-26 11:16:37 +00:00
Daniel Stenberg
81d403e207 one typecast less for the localtime(), use CURLDEBUG instead of MALLOCDEBUG 2003-06-26 06:52:48 +00:00
Daniel Stenberg
2bd71d70ff use CURLDEBUG instead of MALLOCDEBUG 2003-06-26 06:50:32 +00:00
Daniel Stenberg
1eef6f44ba CURLDEBUG instead of MALLOCDEBUG 2003-06-26 06:47:20 +00:00
Daniel Stenberg
204f03912f We noe use CURLDEBUG instead of MALLOCDEBUG 2003-06-26 06:45:15 +00:00
Daniel Stenberg
f8c3b3aa18 moved from former CVS 2003-06-26 06:21:29 +00:00
Daniel Stenberg
d4df981463 Added time_t 2003-06-26 06:19:37 +00:00
Daniel Stenberg
497c6d516d up to date with the actual situation 2003-06-25 23:40:48 +00:00
Daniel Stenberg
8288862b7e Cris Bailiff's patch that should make us do NTLM correctly. When we've
authenticated our connection, we can continue without any Authorization:
headers as long as our connection is maintained.
2003-06-13 10:15:55 +00:00
Daniel Stenberg
9aae16c236 stdout is good enough 2003-06-13 09:09:04 +00:00
Daniel Stenberg
80c194a70a work more on pids, less on pidfiles to be able to do better kills at the
end of the test where the pidfiles aren't found, but "our" server is running
2003-06-13 09:04:08 +00:00
Daniel Stenberg
c832b2db5b fixed NTLM test 67, added test 68 for bad NTLM name/password 2003-06-13 08:03:45 +00:00
Daniel Stenberg
27018882ec Cris Bailiff's bugfix 2003-06-13 07:56:38 +00:00
Daniel Stenberg
caf6e9c540 use more curlish strings, these should be able to change... 2003-06-13 07:14:46 +00:00
Daniel Stenberg
e727fb82f2 Marty Kuhrt's #include fixes for VMS 2003-06-13 06:48:04 +00:00
Daniel Stenberg
c78df56801 get and use only the first line of the curl --version output 2003-06-12 23:05:12 +00:00
Daniel Stenberg
d13202f43b modified 2003-06-12 23:03:08 +00:00
Daniel Stenberg
9d139a6b35 Make the HTTP auth stuff work, Dan Fandrich made --version output a list
of all supported protocols.
2003-06-12 23:02:36 +00:00
Daniel Stenberg
d2abe44e6f remove the dumpit file after use 2003-06-12 19:17:08 +00:00
Daniel Stenberg
bc67228576 corrected a comment 2003-06-12 17:40:56 +00:00
Daniel Stenberg
ecf32c964a CURLHTTP* renamed to CURLAUTH* and NEGOTIATE is now GSSNEGOTIATE as there's
a "plain" Negotiate as well.
2003-06-12 17:34:27 +00:00
Daniel Stenberg
e58f30b82a NTLM test case 2003-06-12 16:39:35 +00:00
Daniel Stenberg
654e3f1101 require the netrc_debug feature the same way we now can require SSL
present client-side
2003-06-12 16:38:14 +00:00
Daniel Stenberg
86689dc524 now test cases can be set to be dependent on the presence of "SSL" in the
client/library
2003-06-12 16:22:52 +00:00
Daniel Stenberg
5f62a0c1ca make it build with older OpenSSL 2003-06-12 13:55:40 +00:00
Daniel Stenberg
ad1bf0f389 attempt to make older OpenSSL versions work with the DES stuff 2003-06-12 13:18:10 +00:00
Daniel Stenberg
9c7703ace1 Based on Dan Fandrich's patch and gzip unpack function, we now compress
the 'hugehelp' text if libz and gzip are available at build time.
2003-06-12 12:54:34 +00:00
Daniel Stenberg
4a8155b53c store HAVE_LIBZ as an automake conditional 2003-06-12 12:53:38 +00:00
Daniel Stenberg
80d6d5c5c4 fixing details for NTLM 2003-06-11 16:14:45 +00:00
Daniel Stenberg
c624be8388 more how I envision it _should_ work, but it still doesn't... 2003-06-11 15:33:09 +00:00
Daniel Stenberg
09df1cd41e to support "redirects" after the full body is transfered 2003-06-11 15:31:40 +00:00
Daniel Stenberg
52c5b57200 made a nicer output for the decode test, as it served as a nice tool for me ;-) 2003-06-11 15:31:06 +00:00
Daniel Stenberg
5ea04a852e when we get the auth headers, we still need to read out the full body response
as otherwise we can re-send requests on the same connection nicely
2003-06-11 15:30:30 +00:00
Daniel Stenberg
a2eef05198 correct mistakes 2003-06-11 14:05:13 +00:00
Daniel Stenberg
55f75af353 describe the NTLM mechanism too 2003-06-11 13:44:58 +00:00
Daniel Stenberg
fb6a51b8fd basic NTLM support 2003-06-11 13:44:31 +00:00
Daniel Stenberg
252cc2213e ntlm added 2003-06-11 13:42:53 +00:00
Daniel Stenberg
73c5f24fa4 Initial take at NTLM authentication. It doesn't really work at this point
but the infrastructure is there.
2003-06-11 13:38:55 +00:00
Daniel Stenberg
4c80e103a0 clarify the CUSTOMREQUEST and HTTPHEADER options slightly 2003-06-10 13:06:38 +00:00
Daniel Stenberg
39ea557360 CURLOPT_HTTPAUTH docu 2003-06-10 12:58:40 +00:00
Daniel Stenberg
d0cc92a01a Set auth type differently, we use one CURLOPT_HTTPAUTH instead as we plan
to add more method in the future.
2003-06-10 12:49:16 +00:00
Daniel Stenberg
d7980c1a45 Daniel Kouril for the HTTP negotiate stuff 2003-06-10 12:25:01 +00:00
Daniel Stenberg
e56ae1426c Daniel Kouril's patch that adds HTTP negotiation support to libcurl was
added.
2003-06-10 12:22:19 +00:00
Daniel Stenberg
696843c020 we fix more 2003-06-10 12:07:10 +00:00
Daniel Stenberg
6ff5621dd7 more generic 2003-06-10 12:05:12 +00:00
Daniel Stenberg
e7fb72a732 Pass the error stream pointer to the URL globber, so that it can report
errors correctly to the user, if need be.

Also fixed so that a missing ] in the globbing process no longer leads
to core dump.
2003-06-10 09:42:22 +00:00
Daniel Stenberg
8d30d34e0c When doing very big GET requests over HTTPS, we need to add some extra
funky logic in order to make re-tries work fine with OpenSSL. This corrects
the problem David Orrell noticed.
2003-06-06 14:58:26 +00:00
Daniel Stenberg
bc7fe85f8a Just moved around some logic in Curl_write() to make it easier to debug. 2003-06-06 14:56:50 +00:00
Daniel Stenberg
89352d92c5 spellfix 2003-06-06 06:44:05 +00:00
Daniel Stenberg
c32390d84c Reversed the logic to only include the <sys/select.h> header on systems
known to really NEED it as another system that doesn't have it came up:
very old Linux libc5-based systems (as addition to all HPUX versions).

The only known system at this point is AIX.
2003-06-05 14:04:44 +00:00
Daniel Stenberg
45ca866a2d LDAP problem added as mention in bug report #735752 2003-06-03 08:10:53 +00:00
Daniel Stenberg
ceef206c21 include the time headers just like we used to do in the curl/curl.h header
once upon the time
2003-06-03 08:07:06 +00:00
Daniel Stenberg
7c6424f0a9 we want the time defines too 2003-06-03 08:06:23 +00:00
Daniel Stenberg
bc942de6f1 Content-Length: now overrides other means of knowing when the stream has
ended.
2003-06-03 07:53:18 +00:00
Daniel Stenberg
06984df5cb Make the Content-Length info override the Connection: close header, so that
libcurl will stop reading when the number of bytes have arrived and not wait
for a closed socket.
2003-06-02 14:57:08 +00:00
Daniel Stenberg
4f136a3a76 the 500-599 test case range 2003-06-02 14:48:27 +00:00
Daniel Stenberg
363bf3ba30 ignore more 2003-06-02 13:55:40 +00:00
Daniel Stenberg
acb895956a ignore 2003-06-02 13:53:13 +00:00
Daniel Stenberg
21e87b9bb3 David Byron's fix to get the progress-bar use the local size too when
doing a resumed download.
2003-06-02 13:42:42 +00:00
Daniel Stenberg
c896ebcf12 makefile fiddle
changed how http requests are sent - now in one chunk more often
HPUX include fix in the external headers
better SSL work-arounds for bad SSL servers
modified error message when CURLE_HTTP_RETURNED_ERROR is returned
2003-06-02 13:31:25 +00:00
Daniel Stenberg
d288222e80 work-around SSL implementation flaws better, pointed out in bug report
#745122.
2003-06-02 13:27:03 +00:00
Daniel Stenberg
4eb2a6c9a3 make a more descriptive error message when CURLE_HTTP_RETURNED_ERROR is
returned
2003-06-02 13:14:57 +00:00
Daniel Stenberg
2563731c4d haven't updates this in a loooong time 2003-05-28 10:24:20 +00:00
Daniel Stenberg
4e410111db Posting static data using POST and chunked encoded now also appends the
data to the initial request buffer, if the total post data is less than
100K.
2003-05-28 07:54:33 +00:00
Daniel Stenberg
5670563a26 include sys/time.h, we didn't have a time() proto anymore. Did one of the
changes in curl/curl.h make this occur?
2003-05-27 22:56:01 +00:00
Daniel Stenberg
6caa656d01 Documented which rules the public headers must follow when we write
preprocessor checks for condititions.
2003-05-27 12:51:46 +00:00
Daniel Stenberg
c12af7aed1 oops, removed a # too many 2003-05-27 12:51:15 +00:00
Daniel Stenberg
dcb6d1c01d remove usage of HAVE_* defines, we cannot and shall not depend on any such
defines in the public external header files
2003-05-27 12:45:51 +00:00
Daniel Stenberg
18234cbdac sys/select.h is not present on HPUX, avoid including it 2003-05-27 12:34:48 +00:00
Daniel Stenberg
06bf988dc1 made it work ;-) 2003-05-27 12:18:00 +00:00
Daniel Stenberg
55ff4c3f08 if cvs update fails, attempt again after 5 seconds and retry 50 times
before giving up
2003-05-27 12:03:24 +00:00
Daniel Stenberg
4915002168 Only build in lib and src by default, make the others dist-subdirs.
Make the test stuff get built when we run 'make test' instead.
2003-05-27 08:51:09 +00:00
Daniel Stenberg
5bd8d60e41 Rudy Koento experienced problems with curl's recent habit of POSTing data in
two separate send() calls, first the headers and then the data. I've now made
a fix that for static and known content that isn't to be chunked-encoded,
everything is now sent in one single system call again. This is also better
for network performance reasons.
2003-05-27 08:33:08 +00:00
Daniel Stenberg
fc872808c5 runs on DOS now 2003-05-27 07:37:34 +00:00
Daniel Stenberg
0f4feda382 include file flaw and yet another socks5-fix 2003-05-27 06:41:06 +00:00
Daniel Stenberg
90b0f38316 Another socks5-fix. Make sure that when we use a socks-proxy, it is not the
same as using a httpproxy so we must make sure to better check for http
proxies before we do HTTP proxy stuff. This included authorization and
URI usage in the request etc.
2003-05-27 06:28:25 +00:00
Daniel Stenberg
18f630ab21 CURLOPT_HTTPDIGEST is added 2003-05-27 06:25:56 +00:00
Daniel Stenberg
e97fd44151 language 2003-05-26 12:32:22 +00:00
Daniel Stenberg
b75679778f ftp ASCII transfers in general need fixing 2003-05-26 08:19:06 +00:00
Daniel Stenberg
35a84ad576 Chris Lewis mentioned that he doesn't get WIN32 defined, only _WIN32 so we
make an adjustment to catch this.
2003-05-26 07:57:53 +00:00
Daniel Stenberg
4ed28be75a even more 2003-05-23 11:24:39 +00:00
Daniel Stenberg
e2f4656a86 Ricardo Cadime found a socket leak when listing directories without
contents. Test cases 144 and 145 were added to verify the fix.

Now we deal with return code 450 properly and other codes also do proper
cleanup.
2003-05-23 11:14:09 +00:00
Daniel Stenberg
1e14da5c60 more ftp testing using NLST and no contents and bad return code 2003-05-23 11:10:35 +00:00
Daniel Stenberg
b2ef79ef3d Rudy Koento's problem fixed, test case 66 verifies this. 2003-05-23 09:47:57 +00:00
Daniel Stenberg
f488874ff5 test 66 returns one line of data with no header (HTTP) 2003-05-23 09:46:19 +00:00
Daniel Stenberg
23258648da --digest added, --compressed rephrased 2003-05-23 08:06:31 +00:00
Daniel Stenberg
6b84ebe501 include digest.h for proto 2003-05-23 06:44:24 +00:00
Daniel Stenberg
07dd067f73 DJGPP fix by Gisle Vanem 2003-05-23 06:43:14 +00:00
Daniel Stenberg
420744d048 more more more 2003-05-22 22:47:48 +00:00
Daniel Stenberg
01108e3a63 warning-free is better 2003-05-22 22:45:38 +00:00
Daniel Stenberg
8026b1e194 Introducing --digest 2003-05-22 22:40:01 +00:00
Daniel Stenberg
a39d77227f Better Digest stuff 2003-05-22 22:39:38 +00:00
Daniel Stenberg
9f69deec7d Added CURLOPT_HTTPDIGEST support
SOCKS5 fix as suggested by Jis in bugreport #741841.
2003-05-22 22:38:46 +00:00
Daniel Stenberg
e912f772e0 Document the <dataNUM> thing we use, 2003-05-22 22:37:00 +00:00
Daniel Stenberg
0102726aeb Digest support added 2003-05-22 22:36:39 +00:00
Daniel Stenberg
1e7aa04040 Digest testing added 2003-05-22 22:36:22 +00:00
Daniel Stenberg
00a7c6fe6b proper header added 2003-05-22 16:23:27 +00:00
Daniel Stenberg
87f8c0d471 hush the compiler 2003-05-22 16:12:30 +00:00
Daniel Stenberg
334d78cd18 Initial Digest support. At least partly working. 2003-05-22 16:09:54 +00:00
Daniel Stenberg
2356325592 David Balazic pointed out the lack of checks for a valid %XX code when
we unescape a string. We now check and decode only valid %XX strings.
2003-05-21 15:53:59 +00:00
Daniel Stenberg
d78ec593fa fix the makefile in packages/DOS too 2003-05-21 08:12:52 +00:00
Daniel Stenberg
d5043133e6 Gisle Vanem made curl build with djgpp on DOS. 2003-05-21 08:08:48 +00:00
Daniel Stenberg
509f69a457 Gisle Vanem's fix to make the 'curl -M' output nicer 2003-05-21 07:21:44 +00:00
Daniel Stenberg
662c659220 missing semicolon, by Gisle Vanem 2003-05-20 12:44:55 +00:00
Daniel Stenberg
9a6566e774 Gisle Vanem's code for not trusting h_aliases to always be non-NULL 2003-05-20 09:41:39 +00:00
Daniel Stenberg
4da0428d9e Remind about the gpg command lines 2003-05-20 06:33:13 +00:00
Daniel Stenberg
8ee1177206 support user name and password in proxy environment variables 2003-05-19 13:14:26 +00:00
Daniel Stenberg
e9154b2549 the proxy environment variables now may contain user name and password 2003-05-19 13:09:41 +00:00
Daniel Stenberg
d398a0dd58 remove debug output 2003-05-19 13:08:48 +00:00
Daniel Stenberg
7723a24297 setenv support added to allow test cases to require a set of environment
variables
2003-05-19 13:06:10 +00:00
Daniel Stenberg
95a4b8db68 7.10.5 commit 2003-05-19 11:45:10 +00:00
Daniel Stenberg
663c1898a3 known AIX ipv6 problems 2003-05-16 10:57:53 +00:00
Daniel Stenberg
465de793e8 Skip any preceeding dots from the domain name of cookies when we keep them
in memory, only add it when we save the cookie. This makes all tailmatching
and domain string matching internally a lot easier.

This was also the reason for a remaining bug I introduced in my overhaul.
2003-05-15 22:28:19 +00:00
Daniel Stenberg
de9b76cef0 change the order of the in_addr_t tests, so that 'unsigned long' is tested
for first, as it seems to be what many systems use
2003-05-15 21:13:36 +00:00
Daniel Stenberg
1747a8d3d9 1. George Comninos' progress meter fix
2. I also added the pre-releases and dates to the log
2003-05-15 08:13:19 +00:00
Daniel Stenberg
1094e79749 documented CURLOPT_FTP_USE_EPRT 2003-05-14 09:03:51 +00:00
Daniel Stenberg
22569681bc George Comninos provided a fix that calls the progress meter when waiting
for FTP command responses take >1 second.
2003-05-14 06:31:00 +00:00
Daniel Stenberg
e615d117a0 Setup and use CURL_INADDR_NONE all over instead of INADDR_NONE. We setup
the define accordingly in the hostip.h header to work nicely all over.
2003-05-13 12:12:17 +00:00
Daniel Stenberg
a51258b6bb before using if2ip(), check if the address is an ip address and skip it if
it is.
2003-05-13 12:11:31 +00:00
Daniel Stenberg
8894bd07b6 libtool 1.4.2 is enough 2003-05-13 09:38:09 +00:00
Daniel Stenberg
ec45a9e825 fix comment 2003-05-13 09:37:45 +00:00
Daniel Stenberg
871358a6e5 before checking for network interfaces using if2ip(), check that the given
name isn't an ip address
2003-05-12 13:06:48 +00:00
Daniel Stenberg
2e2e0fba60 no more complaining when I have 1.5 and it tests for 1.4.2 2003-05-12 13:05:11 +00:00
Daniel Stenberg
4a5139e3f4 fixes from the last week+ 2003-05-12 12:49:22 +00:00
Daniel Stenberg
8f85933d7c Dan F clarified the CURLOPT_ENCODING description after his changes to
allow "" to enable all support formats.
2003-05-12 12:47:35 +00:00
Daniel Stenberg
246f3a63f6 Dan Fandrich added --compressed docu 2003-05-12 12:46:45 +00:00
Daniel Stenberg
e99eff4eb0 setting ENCODING to "" means enable-all-you-support 2003-05-12 12:45:57 +00:00
Daniel Stenberg
c0197f19cf Dan Fandrich changed CURLOPT_ENCODING to select all supported encodings if
set to "".  This frees the application from having to know which encodings
 the library supports.
2003-05-12 12:45:14 +00:00
Daniel Stenberg
3994d67eea Dan Fandrich lowered the libtool requirement 2003-05-12 12:38:52 +00:00
Daniel Stenberg
9ead79c9d4 when we have accepted the server's connection in a PORT sequence, we set
the new socket to non-blocking
2003-05-12 12:37:35 +00:00
Daniel Stenberg
9371aed46c avoid the write loop 2003-05-12 12:37:05 +00:00
Daniel Stenberg
940707ad66 incoming proxy headers shall be sent to the debug function has HEADERs not
DATA
2003-05-12 12:29:00 +00:00
Daniel Stenberg
e6c267fb4c oops, run libtoolize as the first tool 2003-05-09 08:17:41 +00:00
Daniel Stenberg
93538fccd6 run libtoolize too 2003-05-09 08:13:02 +00:00
Daniel Stenberg
83a7fad308 run libtoolize to generate these files 2003-05-09 08:12:46 +00:00
Daniel Stenberg
3c7e33388e CURLOPT_FTP_USE_EPRT added 2003-05-09 07:42:47 +00:00
Daniel Stenberg
7b0f35edb6 --disable-eprt added 2003-05-09 07:39:50 +00:00
Daniel Stenberg
94a157d0b0 support for CURLOPT_FTP_USE_EPRT added 2003-05-09 07:39:29 +00:00
Daniel Stenberg
ca04620253 AIX wants sys/select.h 2003-05-09 07:37:27 +00:00
Daniel Stenberg
073ef0b36a clarify on the curl name issue and that there may be other libcurl-based
tools that provide GUI
2003-05-09 07:07:13 +00:00
Daniel Stenberg
c41c05d4f4 Kevin Delafield reported another case where we didn't correctly check for
EAGAIN but only EWOULDBLOCK, which caused badness on HPUX. We also check for
 and act the same on EINTR errors as well now.
2003-05-06 08:19:36 +00:00
Daniel Stenberg
f1ea54e07a fixed the required tools' version numbers 2003-05-05 14:19:54 +00:00
Daniel Stenberg
a139ce901a the writable argv check now should not exit when building a cross-compiled
curl
2003-05-04 16:07:19 +00:00
Daniel Stenberg
7431957113 put back the libtool test, now for 1.5
require autoconf 2.57
require automake 1.7
2003-05-03 16:25:49 +00:00
Daniel Stenberg
1752d80915 If there is a custom Host: header specified, we use that host name to
extract the correct set of cookies to send. This functionality is verified
by test case 62.
2003-05-02 09:13:19 +00:00
Daniel Stenberg
aa7420e109 send correct cookies when using a custom Host: 2003-05-02 09:12:26 +00:00
Daniel Stenberg
a290d4b9db fixed the format slightly 2003-05-02 09:11:53 +00:00
Daniel Stenberg
19a4314e7f corrected a comment about gzip not being supported 2003-05-01 17:49:47 +00:00
Daniel Stenberg
d166e85e0a FTP URL with type=a 2003-05-01 17:48:59 +00:00
Daniel Stenberg
f213e857ab Andy Cedilnik fixed some compiler warnings 2003-05-01 13:37:36 +00:00
Daniel Stenberg
eb6130baa7 ourerrno became Curl_ourerrno() and is now available to all libcurl 2003-05-01 13:37:05 +00:00
Daniel Stenberg
f69ea2c68a Use the proper Curl_ourerrno() function instead of plain errno, for better
portability. Also use Andy Cedilnik's compiler warning fixes.
2003-05-01 13:36:28 +00:00
Daniel Stenberg
078441d477 the test numbers are now only for human readability, the numbers no longer
enforces protocol/server
2003-04-30 20:29:31 +00:00
Daniel Stenberg
95f6b15a67 no longer assume that the test number implies servers to run 2003-04-30 20:28:49 +00:00
Daniel Stenberg
ee29dbdb8f Each test case now specifies which server(s) it needs, without relying on the
test number.
2003-04-30 20:25:39 +00:00
Daniel Stenberg
15f3f4c93f we say welcome to test 142 2003-04-30 20:08:01 +00:00
Daniel Stenberg
6932e94e0e verify that curl fails fine when an FTP URL with a too deep dir hierarchy
is used
2003-04-30 20:07:37 +00:00
Daniel Stenberg
3ef06d7efe when making up the list of path parts, save the last entry pointing to NULL
as otherwise we'll go nuts
2003-04-30 20:04:17 +00:00
Daniel Stenberg
fb012b48e9 recent action 2003-04-30 20:01:22 +00:00
Daniel Stenberg
bc77bf217f if there's a cookiehost allocated, free that too 2003-04-30 19:58:36 +00:00
Daniel Stenberg
37d1e9351e ok, make the test run ok too 2003-04-30 19:56:53 +00:00
Daniel Stenberg
4494c0dee0 various new cookie tests with a custom Host: header set 2003-04-30 19:49:51 +00:00
Daniel Stenberg
26afc604ac modified to work with modified code 2003-04-30 17:16:25 +00:00
Daniel Stenberg
9aefcada19 modified to produce nicer output when a single test fails 2003-04-30 17:15:38 +00:00
Daniel Stenberg
69fc363760 make the diffs with 'diff -u' to make them nicer and easier to read 2003-04-30 17:15:00 +00:00
Daniel Stenberg
bea02ddebe stop parsing Host: host names at colons too 2003-04-30 17:12:29 +00:00
Daniel Stenberg
3fb257c39c modified to the new cookie function proto 2003-04-30 17:05:19 +00:00
Daniel Stenberg
7c96c5a39b extract host name from custom Host: headers to use for cookies 2003-04-30 17:04:53 +00:00
Daniel Stenberg
efd836d971 Many cookie fixes:
o Save domains in jars like Mozilla does. It means all domains set in
    Set-Cookie: headers are dot-prefixed.
  o Save and use the 'tailmatch' field in the Mozilla/Netscape cookie jars (the
    second column).
  o Reject cookies using illegal domains in the Set-Cookie: line. Concerns
    both domains with too few dots or domains that are outside the currently
    operating server host's domain.
  o Set the path part by default to the one used in the request, if none was
    set in the Set-Cookie line.
2003-04-30 17:03:43 +00:00
Daniel Stenberg
836aaa1647 changes need for the new ftp path treatment and the new cookie code 2003-04-30 17:01:00 +00:00
Daniel Stenberg
bf2b3dbf3e David Balazic's patch to make the FTP operations "do right" according to
RFC1738, which means it'll use one CWD for each pathpart.
2003-04-30 16:59:42 +00:00
Daniel Stenberg
b4fa2ff995 two more platforms Rich Gray built curl on 2003-04-30 07:32:43 +00:00
Daniel Stenberg
2f9cabc30b Peter Kovacs provided a patch that makes the CURLINFO_CONNECT_TIME work fine
when using the multi interface (too).
2003-04-29 18:03:30 +00:00
Daniel Stenberg
63593f5597 mention configure --help 2003-04-29 16:55:17 +00:00
Daniel Stenberg
c0acaa5d2c CURLOPT_FTPPORT could support port number too 2003-04-28 17:29:32 +00:00
Daniel Stenberg
2e46f8d0a6 corrected the comment which wasn't correct 2003-04-28 13:48:16 +00:00
Daniel Stenberg
51da6aaa07 RSAglue.lib is no longer needed with recent OpenSSL versions 2003-04-25 15:08:46 +00:00
Daniel Stenberg
c8b79e36db Dan Fandrich added support for the gzip Content-Encoding for --compressed 2003-04-24 06:34:31 +00:00
Daniel Stenberg
208374bcc9 Bryan Kemp's reported problems with curl and PUT from stdin and a faked
content-length made me add test case 60, that does exactly this, but it
seems to run fine...
2003-04-23 12:09:58 +00:00
Daniel Stenberg
7f0a6e7203 last 10 days or so 2003-04-22 23:30:04 +00:00
Daniel Stenberg
54ebb9cfd4 libtool 1.5 stuff 2003-04-22 23:29:27 +00:00
Daniel Stenberg
49e9c1495b stop checking for libtool, we don't run that in this script 2003-04-22 23:26:00 +00:00
Daniel Stenberg
a84b0fbd52 Dan Fandrich corrected the error messages on "bad encoding". 2003-04-22 22:33:39 +00:00
Daniel Stenberg
c95814c04d Dan Fandrich's gzip bugfix 2003-04-22 22:32:02 +00:00
Daniel Stenberg
9f8123f1b8 Dan Fandrich's fix 2003-04-22 22:31:02 +00:00
Daniel Stenberg
8b23db4f4d Peter Sylvester pointed out that curl_easy_setopt() will always (wrongly)
return CURLE_OK no matter what happens.
2003-04-22 21:42:39 +00:00
Daniel Stenberg
d77cc13374 two dashes is enough 2003-04-16 12:46:20 +00:00
Daniel Stenberg
9a12db1aa2 typecast the setting of the size, as it might be an off_t which is bigger
than long and libcurl expects a long...
2003-04-15 14:18:37 +00:00
Daniel Stenberg
eb54d34bec If MALLOCDEBUG, include the lib's setup.h here so that the proper defines
are set before all system headers, as otherwise we get compiler warnings
on my Solaris at least.
2003-04-15 14:01:57 +00:00
Daniel Stenberg
4b1203d4c9 include config.h before all system headers, so that _FILE_OFFSET_BITS and
similar is set properly by us first
2003-04-15 13:32:26 +00:00
Daniel Stenberg
183a9c6244 extended the -F section 2003-04-15 09:58:27 +00:00
Daniel Stenberg
1f2294d585 treat uploaded .html files as text/html by default 2003-04-15 09:29:39 +00:00
Daniel Stenberg
0b839c4f77 return the same error for the sslv2 "certificate verify failed" code 2003-04-14 22:00:36 +00:00
Daniel Stenberg
1d4fd1fcae new wording by Kevin Roth 2003-04-14 14:54:18 +00:00
Daniel Stenberg
b1d8d72c16 ignore all stamp-h* 2003-04-14 13:09:44 +00:00
Daniel Stenberg
bafb68b844 With the recent fix of libcurl, it shall now return CURLE_SSL_CACERT when
it had problems withe CA cert and thus we offer a huge blurb of verbose
help to explain to the poor user why this happens.
2003-04-14 13:09:09 +00:00
Daniel Stenberg
21873b52e9 Restored the SSL error codes since they was broken in the 7.10.4 release,
also now attempt to detect and return the specific CACERT error code.
2003-04-14 12:53:29 +00:00
Daniel Stenberg
0aa8b82871 FTP CWD response fixed
gzip content-encoding added
chunked content-encoding fixed
2003-04-14 07:13:08 +00:00
Daniel Stenberg
f9781afafd clarified the CURLINFO_SIZE_DOWNLOAD somewhat on Juan F. Codagnone's
suggestion
2003-04-11 16:52:30 +00:00
Daniel Stenberg
fece361a55 Nic fixed so that Curl_client_write() must not be called with 0 lenth data.
I edited somewhat and removed trailing whitespaces.
2003-04-11 16:31:18 +00:00
Daniel Stenberg
7b51b2f128 Nic Hines fixed this bug when deflate or gzip contents were downloaded using
chunked encoding.
2003-04-11 16:23:43 +00:00
Daniel Stenberg
22d88fb28e ah, move the zero byte too or havoc will occur 2003-04-11 16:23:06 +00:00
Daniel Stenberg
f7c5b28e76 verify the new url parser fix 2003-04-11 16:22:27 +00:00
Daniel Stenberg
5760f2a307 support ? as separator instead of / even if not protocol was given 2003-04-11 16:08:41 +00:00
Daniel Stenberg
ee46efb5a5 these guys deserve a mentioning here as well 2003-04-11 08:57:19 +00:00
Daniel Stenberg
eb6ffebfc7 Dan the man on the list 2003-04-11 08:55:08 +00:00
Daniel Stenberg
c06c44f286 Dan Fandrich's added gzip support documented. 2003-04-11 08:51:24 +00:00
Daniel Stenberg
019c4088cf Dan Fandrich's gzip patch applied 2003-04-11 08:49:20 +00:00
Daniel Stenberg
0b0a88b78d when saving a cookie jar fails, you don't get an error code or anything,
just a warning in the verbose output stream
2003-04-11 08:19:06 +00:00
Daniel Stenberg
028e9cc56f According to RFC959, CWD is supposed to return 250 on success, but
there seem to be non-compliant FTP servers out there that return 200,
 so we accept any '2xy' response now.
2003-04-11 08:10:54 +00:00
Daniel Stenberg
e0d8615ece show a verbose warning message in case cookie-saving fails, after
Ralph Mitchell's notification.
2003-04-11 07:39:16 +00:00
Daniel Stenberg
c8ecbda40b new ftp tests 2003-04-10 11:43:47 +00:00
Daniel Stenberg
2324c10d43 another week has passed 2003-04-10 11:36:56 +00:00
Daniel Stenberg
89cfa76291 Vlad Krupin's URL parsing patch to fix the URL parsing when the URL has no
slash after the host name, but still a ? and following "parameters".
2003-04-10 09:44:39 +00:00
Daniel Stenberg
072070a22c oops, committed test code not meant to be here 2003-04-09 12:02:06 +00:00
Daniel Stenberg
3c3ad134ea the default debugfunction shows incoming headers as well 2003-04-09 11:57:06 +00:00
Daniel Stenberg
a4ffcfd4d5 timecond support added
made the Last-Modified (faked) header look correct using GMT always
2003-04-09 11:56:31 +00:00
Daniel Stenberg
136670c58a three new ftp tests 2003-04-09 11:55:24 +00:00
Daniel Stenberg
28169725fa <mdtm> added 2003-04-09 11:53:09 +00:00
Daniel Stenberg
5b13106f54 MDTM support added 2003-04-09 11:52:24 +00:00
Daniel Stenberg
1a2db0dfb1 James Bursa fixed a flaw in the content-type extracting code that could
miss the first letter
2003-04-08 14:48:38 +00:00
Daniel Stenberg
696f95bb0a share.c added 2003-04-08 10:35:35 +00:00
Daniel Stenberg
acec588fe3 --disable-eprt perhaps? 2003-04-07 06:41:24 +00:00
Daniel Stenberg
6ed0da8e98 Ryan Weaver's fix to prevent the ca bundle to get installed even when
building curl without SSL support!
2003-04-06 12:29:45 +00:00
Daniel Stenberg
7fd91d70bd adjusted the formpost testcases to the new boundary string construction 2003-04-04 12:30:35 +00:00
Daniel Stenberg
61788a0389 Changed how boundary strings are generated. This new way uses 28 dashes
and 12 following hexadecimal letters, which seems to be what IE uses.
This makes curl work smoother with more stupidly written server apps.

Worked this out together with Martijn Broenland.
2003-04-04 12:24:01 +00:00
Daniel Stenberg
0821447b5b spell fix 2003-04-03 16:11:47 +00:00
Daniel Stenberg
3cba274ba6 kill a compiler warning on cygwin 2003-04-03 14:16:15 +00:00
Daniel Stenberg
df7bbcfd21 Added log output for when the writing of the input HTTP request is successful
or unsuccessful. Used to track down the recent cygwin test suite problems.
2003-04-03 13:43:15 +00:00
Daniel Stenberg
021d406f0c Modified how we log data to server.input, as we can't keep the file open
very much as it makes it troublesome on certain operating systems.
2003-04-03 13:42:06 +00:00
Daniel Stenberg
294569c502 new 2003-04-03 13:39:36 +00:00
300 changed files with 13351 additions and 11612 deletions

View File

@@ -9,3 +9,6 @@ config.status
curl-config
autom4te.cache
depcomp
config.guess
config.sub
ltmain.sh

2132
CHANGES

File diff suppressed because it is too large Load Diff

1504
CHANGES.2002 Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -16,8 +16,8 @@ Compile and build instructions follow below.
CHANGES.$year contains changes for the particular year.
tests/memanalyze.pl
is for analyzing the output generated by curl if -DMALLOCDEBUG
is used when compiling
is for analyzing the output generated by curl if -DCURLDEBUG
is used when compiling (run configure with --enable-debug)
buildconf builds the makefiles and configure stuff
@@ -40,9 +40,9 @@ REQUIREMENTS
You need the following software installed:
o autoconf 2.50 (or later)
o automake 1.5 (or later)
o libtool 1.4 (or later)
o autoconf 2.57 (or later)
o automake 1.7 (or later)
o libtool 1.4.2 (or later)
o GNU m4 (required by autoconf)
o nroff + perl

View File

@@ -4,12 +4,13 @@
AUTOMAKE_OPTIONS = foreign
EXTRA_DIST = CHANGES COPYING maketgz SSLCERTS reconf Makefile.dist \
EXTRA_DIST = CHANGES COPYING maketgz reconf Makefile.dist \
curl-config.in build_vms.com curl-style.el sample.emacs testcurl.sh
bin_SCRIPTS = curl-config
SUBDIRS = docs lib src include tests packages
SUBDIRS = lib src
DIST_SUBDIRS = $(SUBDIRS) tests include packages docs
# create a root makefile in the distribution:
dist-hook:
@@ -25,10 +26,10 @@ pdf:
check: test
test:
@(cd tests; $(MAKE) quiet-test)
@(cd tests; $(MAKE) all quiet-test)
test-full:
@(cd tests; $(MAKE) full-test)
@(cd tests; $(MAKE) all full-test)
#
# Build source and binary rpms. For rpm-3.0 and above, the ~/.rpmmacros
@@ -77,3 +78,13 @@ pkgadd:
# resulting .tar.bz2 file will end up at packages/Win32/cygwin
cygwinbin:
$(MAKE) -C packages/Win32/cygwin cygwinbin
# We extend the standard install with a custom hook:
install-data-hook:
cd include && $(MAKE) install
cd docs && $(MAKE) install
# We extend the standard uninstall with a custom hook:
uninstall-hook:
cd include && $(MAKE) uninstall
cd docs && $(MAKE) uninstall

View File

@@ -59,6 +59,10 @@ vc-ssl-dll:
cd ..\src
nmake -f Makefile.vc6
djgpp:
make -C lib -f Makefile.dj
make -C src -f Makefile.dj
cygwin:
./configure
make

10
README
View File

@@ -31,6 +31,7 @@ WEB SITE
Visit the curl web site or mirrors for the latest news:
Sweden -- http://curl.haxx.se/
Russia -- http://curl.tsuren.net/
US -- http://curl.sf.net/
Australia -- http://curl.planetmirror.com/
@@ -44,20 +45,21 @@ DOWNLOAD
Australia -- http://curl.planetmirror.com/download/
US -- http://curl.sourceforge.net/download/
Hongkong -- http://www.execve.net/curl/
Russia -- http://curl.tsuren.net/download/
CVS
To download the very latest source off the CVS server do this:
cvs -d :pserver:anonymous@cvs.curl.sourceforge.net:/cvsroot/curl login
cvs -d :pserver:cvsread@cvs.php.net:/repository login
(just press enter when asked for password)
(enter "phpfi" when asked for password)
cvs -d :pserver:anonymous@cvs.curl.sourceforge.net:/cvsroot/curl co curl
cvs -d :pserver:cvsread@cvs.php.net:/repository co curl
(you'll get a directory named curl created, filled with the source code)
cvs -d :pserver:anonymous@cvs.curl.sourceforge.net:/cvsroot/curl logout
cvs -d :pserver:cvsread@cvs.php.net:/repository logout
(you're off the hook!)

View File

@@ -152,10 +152,8 @@ AC_DEFUN([TYPE_IN_ADDR_T],
AC_MSG_CHECKING([for in_addr_t equivalent])
AC_CACHE_VAL([curl_cv_in_addr_t_equiv],
[
# Systems have either "struct sockaddr *" or
# "void *" as the second argument to getpeername
curl_cv_in_addr_t_equiv=
for t in int size_t unsigned long "unsigned long"; do
for t in "unsigned long" int size_t unsigned long; do
AC_TRY_COMPILE([
#include <sys/types.h>
#include <sys/socket.h>

View File

@@ -6,18 +6,19 @@ die(){
}
#--------------------------------------------------------------------------
# autoconf 2.50 or newer
# autoconf 2.57 or newer
#
need_autoconf="2.57"
ac_version=`${AUTOCONF:-autoconf} --version 2>/dev/null|head -1| sed -e 's/^[^0-9]*//' -e 's/[a-z]* *$//'`
if test -z "$ac_version"; then
echo "buildconf: autoconf not found."
echo " You need autoconf version 2.50 or newer installed."
echo " You need autoconf version $need_autoconf or newer installed."
exit 1
fi
IFS=.; set $ac_version; IFS=' '
if test "$1" = "2" -a "$2" -lt "50" || test "$1" -lt "2"; then
if test "$1" = "2" -a "$2" -lt "57" || test "$1" -lt "2"; then
echo "buildconf: autoconf version $ac_version found."
echo " You need autoconf version 2.50 or newer installed."
echo " You need autoconf version $need_autoconf or newer installed."
echo " If you have a sufficient autoconf installed, but it"
echo " is not named 'autoconf', then try setting the"
echo " AUTOCONF environment variable."
@@ -48,18 +49,19 @@ fi
echo "buildconf: autoheader version $ah_version (ok)"
#--------------------------------------------------------------------------
# automake 1.5 or newer
# automake 1.7 or newer
#
am_version=`${AUTOMAKE:-automake} --version 2>/dev/null|head -1| sed -e 's/^[^0-9]*//' -e 's/[a-z]* *$//'`
need_automake="1.7"
am_version=`${AUTOMAKE:-automake} --version 2>/dev/null|head -1| sed -e 's/^.* \([0-9]\)/\1/' -e 's/[a-z]* *$//'`
if test -z "$am_version"; then
echo "buildconf: automake not found."
echo " You need automake version 1.5 or newer installed."
echo " You need automake version $need_automake or newer installed."
exit 1
fi
IFS=.; set $am_version; IFS=' '
if test "$1" = "1" -a "$2" -lt "5" || test "$1" -lt "1"; then
if test "$1" = "1" -a "$2" -lt "7" || test "$1" -lt "1"; then
echo "buildconf: automake version $am_version found."
echo " You need automake version 1.5 or newer installed."
echo " You need automake version $need_automake or newer installed."
echo " If you have a sufficient automake installed, but it"
echo " is not named 'autommake', then try setting the"
echo " AUTOMAKE environment variable."
@@ -68,33 +70,37 @@ fi
echo "buildconf: automake version $am_version (ok)"
#--------------------------------------------------------------------------
# libtool 1.4 or newer
# libtool check
#
LIBTOOL_WANTED_MAJOR=1
LIBTOOL_WANTED_MINOR=4
LIBTOOL_WANTED_PATCH=
LIBTOOL_WANTED_VERSION=1.4
LIBTOOL_WANTED_PATCH=2
LIBTOOL_WANTED_VERSION=1.4.2
libtool=`which glibtool 2>/dev/null`
if test ! -x "$libtool"; then
libtool=`which libtool`
fi
lt_pversion=`$libtool --version 2>/dev/null|sed -e 's/^[^0-9]*//' -e 's/[- ].*//'`
#lt_pversion=`${LIBTOOL:-$libtool} --version 2>/dev/null|head -1| sed -e 's/^.* \([0-9]\)/\1/' -e 's/[a-z]* *$//'`
lt_pversion=`$libtool --version 2>/dev/null|head -1|sed -e 's/^[^0-9]*//g' -e 's/[- ].*//'`
if test -z "$lt_pversion"; then
echo "buildconf: libtool not found."
echo " You need libtool version $LIBTOOL_WANTED_VERSION or newer installed"
exit 1
fi
lt_version=`echo $lt_pversion|sed -e 's/\([a-z]*\)$/.\1/'`
lt_version=`echo $lt_pversion` #|sed -e 's/\([a-z]*\)$/.\1/'`
IFS=.; set $lt_version; IFS=' '
lt_status="good"
if test "$1" = "$LIBTOOL_WANTED_MAJOR"; then
if test "$2" -lt "$LIBTOOL_WANTED_MINOR"; then
lt_status="bad"
elif test ! -z "$LIBTOOL_WANTED_PATCH"; then
if test "$3" -lt "$LIBTOOL_WANTED_PATCH"; then
lt_status="bad"
if test -n "$3"; then
if test "$3" -lt "$LIBTOOL_WANTED_PATCH"; then
lt_status="bad"
fi
fi
fi
fi
@@ -104,15 +110,20 @@ if test $lt_status != "good"; then
exit 1
fi
echo "buildconf: libtool version $lt_pversion (ok)"
echo "buildconf: libtool version $lt_version (ok)"
# ------------------------------------------------------------
# run the correct scripts now
echo "buildconf: running libtoolize"
${LIBTOOLIZE:-libtoolize} --copy --automake || die "The command '${LIBTOOLIZE:-libtoolize} --copy --automake' failed"
echo "buildconf: running aclocal"
aclocal || die "The command 'aclocal' failed"
${ACLOCAL:-aclocal} || die "The command '${AUTOHEADER:-aclocal}' failed"
echo "buildconf: running autoheader"
autoheader || die "The command 'autoheader' failed"
${AUTOHEADER:-autoheader} || die "The command '${AUTOHEADER:-autoheader}' failed"
echo "buildconf: running autoconf"
autoconf || die "The command 'autoconf' failed"
${AUTOCONF:-autoconf} || die "The command '${AUTOCONF:-autoconf}' failed"
echo "buildconf: running automake"
automake -a || die "The command 'automake -a' failed"
${AUTOMAKE:-automake} -a || die "The command '${AUTOMAKE:-automake} -a' failed"
exit 0

1363
config.guess vendored

File diff suppressed because it is too large Load Diff

1470
config.sub vendored

File diff suppressed because it is too large Load Diff

View File

@@ -1,9 +1,7 @@
dnl $Id$
dnl Process this file with autoconf to produce a configure script.
dnl Ensure that this file is processed with autoconf 2.50 or newer
dnl Don't even think about removing this check!
AC_PREREQ(2.50)
AC_PREREQ(2.57)
dnl We don't know the version number "staticly" so we use a dash here
AC_INIT(curl, [-], [curl-bug@haxx.se])
@@ -14,12 +12,17 @@ This configure script may be copied, distributed and modified under the
terms of the curl license; see COPYING for more details])
AC_CONFIG_SRCDIR([lib/urldata.h])
AM_CONFIG_HEADER(lib/config.h src/config.h tests/server/config.h lib/ca-bundle.h)
AM_CONFIG_HEADER(lib/config.h src/config.h tests/server/config.h )
AM_MAINTAINER_MODE
dnl SED is needed by some of the tools
AC_PATH_PROG( SED, sed, , $PATH:/usr/bin:/usr/local/bin)
AC_SUBST(SED)
dnl AR is used by libtool, and try the odd Solaris path too
AC_PATH_PROG( AR, ar, , $PATH:/usr/bin:/usr/local/bin:/usr/ccs/bin)
AC_SUBST(AR)
dnl figure out the libcurl version
VERSION=`$SED -ne 's/^#define LIBCURL_VERSION "\(.*\)"/\1/p' ${srcdir}/include/curl/curl.h`
AM_INIT_AUTOMAKE(curl,$VERSION)
@@ -349,15 +352,17 @@ dnl Check if the operating system allows programs to write to their own argv[]
dnl **********************************************************************
AC_MSG_CHECKING([if argv can be written to])
AC_TRY_RUN([
AC_RUN_IFELSE([[
int main(int argc, char ** argv) {
argv[0][0] = ' ';
return (argv[0][0] == ' ')?0:1;
}
],
]],
AC_DEFINE(HAVE_WRITABLE_ARGV, 1, [Define this symbol if your OS supports changing the contents of argv])
AC_MSG_RESULT(yes),
AC_MSG_RESULT(no)
AC_MSG_RESULT(no),
AC_MSG_RESULT(no)
AC_MSG_WARN([the previous check could not be made default was used])
)
dnl **********************************************************************
@@ -452,6 +457,63 @@ else
AC_MSG_RESULT(no)
fi
dnl **********************************************************************
dnl Check for GSS-API libraries
dnl **********************************************************************
AC_ARG_WITH(gssapi-includes,
AC_HELP_STRING([--with-gssapi-includes=DIR],
[Specify location of GSSAPI header]),
[ GSSAPI_INCS="-I$withval"
want_gss="yes" ]
)
AC_ARG_WITH(gssapi-libs,
AC_HELP_STRING([--with-gssapi-libs=DIR],
[Specify location of GSSAPI libs]),
[ GSSAPI_LIBS="-L$withval -lgssapi"
want_gss="yes" ]
)
AC_ARG_WITH(gssapi,
AC_HELP_STRING([--with-gssapi=DIR],
[Where to look for GSSAPI]),
[ GSSAPI_ROOT="$withval"
want_gss="yes" ]
)
AC_MSG_CHECKING([if GSSAPI support is requested])
if test x"$want_gss" = xyes; then
if test -z "$GSSAPI_INCS"; then
if test -f "$GSSAPI_ROOT/bin/krb5-config"; then
gss_cppflags=`$GSSAPI_ROOT/bin/krb5-config --cflags gssapi`
CPPFLAGS="$CPPFLAGS $gss_cppflags"
else
CPPFLAGS="$GSSAPI_ROOT/include"
fi
else
CPPFLAGS="$CPPFLAGS $GSSAPI_INCS"
fi
if test -z "$GSSAPI_LIB_DIR"; then
if test -f "$GSSAPI_ROOT/bin/krb5-config"; then
gss_ldflags=`$GSSAPI_ROOT/bin/krb5-config --libs gssapi`
LDFLAGS="$LDFLAGS $gss_ldflags"
else
LDFLAGS="$LDFLAGS $GSSAPI_ROOT/lib -lgssapi"
fi
else
LDFLAGS="$LDFLAGS $GSSAPI_LIB_DIR"
fi
AC_MSG_RESULT(yes)
AC_DEFINE(GSSAPI, 1, [if you have the gssapi libraries])
else
AC_MSG_RESULT(no)
fi
dnl Detect the pkg-config tool, as it may have extra info about the
dnl openssl installation we can use. I *believe* this is what we are
dnl expected to do on really recent Redhat Linux hosts.
@@ -484,6 +546,8 @@ dnl **********************************************************************
dnl Default to compiler & linker defaults for SSL files & libraries.
OPT_SSL=off
dnl Default to no CA bundle
ca="no"
AC_ARG_WITH(ssl,dnl
AC_HELP_STRING([--with-ssl=PATH],[where to look for SSL, PATH points to the SSL installation (default: /usr/local/ssl)])
AC_HELP_STRING([--without-ssl], [disable SSL]),
@@ -567,6 +631,36 @@ else
AC_SUBST(OPENSSL_ENABLED)
AC_MSG_CHECKING([CA cert bundle install path])
AC_ARG_WITH(ca-bundle,
AC_HELP_STRING([--with-ca-bundle=FILE], [File name to install the CA bundle as])
AC_HELP_STRING([--without-ca-bundle], [Don't install the CA bundle]),
[ ca="$withval" ],
[
if test "x$prefix" != xNONE; then
ca="\${prefix}/share/curl/curl-ca-bundle.crt"
else
ca="$ac_default_prefix/share/curl/curl-ca-bundle.crt"
fi
] )
if test X"$OPT_SSL" = Xno; then
ca="no"
fi
if test "x$ca" != "xno"; then
CURL_CA_BUNDLE='"'$ca'"'
AC_SUBST(CURL_CA_BUNDLE)
fi
AC_MSG_RESULT([$ca])
dnl these can only exist if openssl exists
AC_CHECK_FUNCS( RAND_status \
RAND_screen \
RAND_egd )
fi
if test X"$OPT_SSL" != Xoff &&
@@ -574,15 +668,10 @@ else
AC_MSG_ERROR([OpenSSL libs and/or directories were not found where specified!])
fi
dnl these can only exist if openssl exists
AC_CHECK_FUNCS( RAND_status \
RAND_screen \
RAND_egd )
fi
AM_CONDITIONAL(CABUNDLE, test x$ca != xno)
dnl **********************************************************************
dnl Check for the presence of ZLIB libraries and headers
dnl **********************************************************************
@@ -625,6 +714,9 @@ case "$OPT_ZLIB" in
;;
esac
dnl set variable for use in automakefile(s)
AM_CONDITIONAL(HAVE_LIBZ, test x"$HAVE_LIBZ" = x1)
dnl Default is to try the thread-safe versions of a few functions
OPT_THREAD=on
@@ -735,15 +827,14 @@ AC_C_CONST
AC_TYPE_SIZE_T
AC_HEADER_TIME
# mprintf() checks:
AC_CHECK_SIZEOF(off_t)
# check for 'long double'
# AC_CHECK_SIZEOF(long double, 8)
# check for 'long long'
# AC_CHECK_SIZEOF(long long, 4)
AC_CHECK_TYPE(long long,
[AC_DEFINE(HAVE_LONGLONG, 1, [if your compiler supports 'long long'])])
# check for ssize_t
AC_CHECK_TYPE(ssize_t, int)
AC_CHECK_TYPE(ssize_t, ,
AC_DEFINE(ssize_t, int, [the signed version of size_t]))
TYPE_SOCKLEN_T
TYPE_IN_ADDR_T
@@ -772,7 +863,6 @@ AC_CHECK_FUNCS( socket \
tcgetattr \
perror \
closesocket \
setvbuf \
sigaction \
signal \
getpass_r \
@@ -820,31 +910,6 @@ AC_PATH_PROGS( NROFF, gnroff nroff, ,
$PATH:/usr/bin/:/usr/local/bin )
AC_SUBST(NROFF)
AC_MSG_CHECKING([CA cert bundle install path])
AC_ARG_WITH(ca-bundle,
AC_HELP_STRING([--with-ca-bundle=FILE], [File name to install the CA bundle as])
AC_HELP_STRING([--without-ca-bundle], [Don't install the CA bundle]),
[ ca="$withval" ],
[
if test "x$prefix" != xNONE; then
ca="$prefix/share/curl/curl-ca-bundle.crt"
else
ca="$ac_default_prefix/share/curl/curl-ca-bundle.crt"
fi
] )
if test "x$ca" = "xno"; then
dnl let's not keep "no" as path name, blank it instead
ca=""
else
AC_DEFINE_UNQUOTED(CURL_CA_BUNDLE, "$ca", [CA bundle full path name])
fi
CURL_CA_BUNDLE="$ca"
AC_SUBST(CURL_CA_BUNDLE)
AC_MSG_RESULT([$ca])
AC_PROG_YACC
dnl AC_PATH_PROG( RANLIB, ranlib, /usr/bin/ranlib,
@@ -864,7 +929,7 @@ AC_HELP_STRING([--disable-debug],[Disable debug options]),
;;
*) AC_MSG_RESULT(yes)
CPPFLAGS="$CPPFLAGS -DMALLOCDEBUG"
CPPFLAGS="$CPPFLAGS -DCURLDEBUG"
CFLAGS="$CFLAGS -g"
if test "$GCC" = "yes"; then
CFLAGS="$CFLAGS -W -Wall -Wwrite-strings -pedantic -Wundef -Wpointer-arith -Wnested-externs"
@@ -887,6 +952,31 @@ AC_HELP_STRING([--disable-debug],[Disable debug options]),
AC_MSG_RESULT(no)
)
ares="no"
AC_MSG_CHECKING([whether to enable ares])
AC_ARG_ENABLE(ares,
AC_HELP_STRING([--enable-ares],[Enable using ares for name lookups])
AC_HELP_STRING([--disable-ares],[Disable using ares for name lookups]),
[ case "$enableval" in
no)
AC_MSG_RESULT(no)
;;
*) AC_MSG_RESULT(yes)
if test "x$IPV6_ENABLED" = "x1"; then
AC_MSG_ERROR([ares doesn't work with ipv6, disable ipv6 to use ares])
fi
AC_DEFINE(USE_ARES, 1, [Define if you want to enable ares support])
ares="yes"
;;
esac ],
AC_MSG_RESULT(no)
)
AM_CONDITIONAL(ARES, test x$ares = xyes)
AC_CONFIG_FILES([Makefile \
docs/Makefile \
docs/examples/Makefile \
@@ -907,6 +997,7 @@ AC_CONFIG_FILES([Makefile \
packages/Linux/RPM/curl.spec \
packages/Linux/RPM/curl-ssl.spec \
packages/Solaris/Makefile \
packages/DOS/Makefile \
packages/EPM/curl.list \
packages/EPM/Makefile \
curl-config

View File

@@ -37,7 +37,7 @@
(setq tab-width 8
indent-tabs-mode nil ; Use spaces. Not tabs.
comment-column 40
c-font-lock-extra-types (append '("bool" "CURL" "CURLcode" "ssize_t" "size_t" "socklen_t" "fd_set"))
c-font-lock-extra-types (append '("bool" "CURL" "CURLcode" "ssize_t" "size_t" "socklen_t" "fd_set" "time_t"))
)
;; keybindings for C, C++, and Objective-C. We can put these in
;; c-mode-base-map because of inheritance ...

View File

@@ -1,4 +1,4 @@
Updated: February 25, 2003 (http://curl.haxx.se/docs/faq.html)
Updated: June 17, 2003 (http://curl.haxx.se/docs/faq.html)
_ _ ____ _
___| | | | _ \| |
/ __| | | | |_) | |
@@ -97,6 +97,12 @@ FAQ
We spell it cURL or just curl. We pronounce it with an initial k sound:
[kurl].
NOTE: there are numerous sub-projects and related projects that also use the
word curl in the project names in various combinations, but you should take
notice that this FAQ is directed at the command-line tool named curl (and
libcurl the library), and may therefore not be valid for other curl
projects.
1.2 What is libcurl?
libcurl is a reliable and portable library which provides you with an easy
@@ -132,11 +138,9 @@ FAQ
better. We do however believe in a few rules when it comes to the future of
curl:
* Curl is to remain a command line tool. If you want GUIs or fancy scripting
capabilities, you're free to write another tool that uses libcurl and that
offers this. There's no point in having a single tool that does every
imaginable thing. That's also one of the great advantages of having the
core of curl as a library.
* Curl -- the command line tool -- is to remain a non-graphical command line
tool. If you want GUIs or fancy scripting capabilities, you should look
for another tool that uses libcurl.
* We do not add things to curl that other small and available tools already
do very fine at the side. Curl's output is fine to pipe into another
@@ -589,9 +593,11 @@ FAQ
4.9. Curl can't authenticate to the server that requires NTLM?
NTLM is a Microsoft proprietary protocol. Unfortunately, curl does not
currently support that. Proprietary formats are evil. You should not use
such ones.
This is supported in curl 7.10.6 or later. No earlier curl version knows
of this magic.
NTLM is a Microsoft proprietary protocol. Proprietary formats are evil. You
should not use such ones.
4.10 My HTTP request using HEAD, PUT or DELETE doesn't work!
@@ -772,6 +778,5 @@ FAQ
discussions and a large amount of people have contributed with source code
knowing that this is the license we use. This license puts the restrictions
we want on curl/libcurl and it does not spread to other programs or
libraries that use it. The recent dual license modification should make it
possible for everyone to use libcurl or curl in their projects, no matter
what license they already have in use.
libraries that use it. It should be possible for everyone to use libcurl or
curl in their projects, no matter what license they already have in use.

View File

@@ -17,27 +17,30 @@ Misc
- progress bar/time specs while downloading
- "standard" proxy environment variables support
- config file support
- compiles on win32 (reported built on 29 operating systems)
- compiles on win32 (reported builds on 40+ operating systems)
- redirectable stderr
- use selected network interface for outgoing traffic
- selectable network interface for outgoing traffic
- IPv6 support
- persistant connections
- socks5 support
- supports user name + password in proxy environment variables
- operations through proxy "tunnel" (using CONNECT)
HTTP
- HTTP/1.1 compliant
- HTTP/1.1 compliant (optionally uses 1.0)
- GET
- PUT
- HEAD
- POST
- multipart POST
- authentication
- multipart formpost (RFC1867-style)
- authentication (Basic, Digest, NTLM(*1), GSS-Negotiate(*3))
- resume (both GET and PUT)
- follow redirects
- maximum amount of redirects to follow
- custom HTTP request
- cookie get/send fully parsed
- understands the netscape cookie file format
- custom headers (that can replace/remove internally generated headers)
- reads/writes the netscape cookie file format
- custom headers (replace/remove internally generated headers)
- custom user-agent string
- custom referer string
- range
@@ -45,12 +48,16 @@ HTTP
- time conditions
- via http-proxy
- retrieve file modification date
- Content-Encoding support for deflate and gzip
- "Transfer-Encoding: chunked" support for "uploads"
HTTPS (*1)
- (all the HTTP features)
- using certificates
- verify server certificate
- via http-proxy
- select desired encryption
- force usage of a specific SSL version (SSLv2, SSLv3 or TLSv1)
FTP
- download
@@ -90,5 +97,6 @@ GOPHER
FILE
- URL support
*1 = requires OpenSSL
*2 = requires OpenLDAP
*1 = requires OpenSSL
*2 = requires OpenLDAP
*3 = requires a GSSAPI-compliant library, such as Heimdal or similar.

View File

@@ -31,6 +31,10 @@ UNIX
If you have checked out the sources from the CVS repository, read the
CVS-INFO on how to proceed.
Get a full listing of all available configure options by invoking it like:
./configure --help
If you want to install curl in a different file hierarchy than /usr/local,
you need to specify that already when running configure:
@@ -200,7 +204,14 @@ Win32
Before running nmake define the OPENSSL_PATH environment variable with
the root/base directory of OpenSSL, for example:
set OPENSSL_PATH=c:\openssl-0.9.6b
set OPENSSL_PATH=c:\openssl-0.9.7a
lib/Makefile.vc6 depends on zlib (http://www.gzip.org/zlib/) as well.
Please read the zlib documentation on how to compile zlib. Define the
ZLIB_PATH environment variable to the location of zlib.h and zlib.lib,
for example:
set ZLIB_PATH=c:\zlib-1.1.4
Then run 'nmake vc-ssl' or 'nmake vc-ssl-dll' in curl's root
directory. 'nmake vc-ssl' will create a libcurl static and dynamic
@@ -444,6 +455,7 @@ PORTS
- StrongARM NetBSD 1.4.1
- Ultrix 4.3a
- i386 BeOS
- i386 DOS
- i386 FreeBSD
- i386 HURD
- i386 Linux 1.3, 2.0, 2.2, 2.3, 2.4
@@ -454,10 +466,12 @@ PORTS
- i386 Solaris 2.7
- i386 Windows 95, 98, ME, NT, 2000
- i386 QNX 6
- i486 ncr-sysv4.3.03 (NCR MP-RAS)
- ia64 Linux 2.3.99
- m68k AmigaOS 3
- m68k Linux
- m68k OpenBSD
- m88k dg-dgux5.4R3.00
- s390 Linux
- XScale/PXA250 Linux 2.4

View File

@@ -3,6 +3,27 @@ join in and help us correct one or more of these! Also be sure to check the
changelog of the current development status, as one or more of these problems
may have been fixed since this was written!
* libcurl doesn't treat the content-length of compressed data properly, as
it seems HTTP servers send the *uncompressed* length in that header and
libcurl thinks of it as the *compressed* lenght. Some explanations are here:
http://curl.haxx.se/mail/lib-2003-06/0146.html
* Downloading 0 (zero) bytes files over FTP will not create a zero byte file
locally, which is because libcurl doesn't call the write callback with zero
bytes. Explained here: http://curl.haxx.se/mail/archive-2003-04/0143.html
* Using CURLOPT_FAILONERROR (-f/--fail) will make authentication to stop
working if you use anything but plain Basic auth.
* LDAP output is garbled. Hardly anyone seems to care about LDAP functionality
in curl/libcurl why this report has been closed and set to be solved later.
If you feel this is something you want fixed, get in touch and we'll start
working.
http://sourceforge.net/tracker/index.php?func=detail&aid=735752&group_id=976&atid=100976
* IPv6 support on AIX 4.3.3 doesn't work due to a missing sockaddr_storage
struct. It has been reported to work on AIX 5.1 though.
* Running 'make test' on Mac OS X gives 4 errors. This seems to be related
to some kind of libtool problem:
http://curl.haxx.se/mail/archive-2002-03/0029.html and

View File

@@ -11,7 +11,7 @@ SIMPLE USAGE
curl http://www.netscape.com/
Get the root README file from funet's ftp-server:
Get the README file the user's home directory at funet's ftp-server:
curl ftp://ftp.funet.fi/README
@@ -19,7 +19,7 @@ SIMPLE USAGE
curl http://www.weirdserver.com:8000/
Get a list of the root directory of an FTP site:
Get a list of a directory of an FTP site:
curl ftp://cool.haxx.se/
@@ -243,7 +243,7 @@ POST (HTTP)
To post to this, you enter a curl command line like:
curl -d "user=foobar&pass=12345&id=blablabla&dig=submit" (continues)
curl -d "user=foobar&pass=12345&id=blablabla&ding=submit" (continues)
http://www.formpost.com/getthis/post.cgi

View File

@@ -19,7 +19,7 @@ PDFPAGES = \
SUBDIRS = examples libcurl
EXTRA_DIST = MANUAL BUGS CONTRIBUTE FAQ FEATURES INTERNALS \
EXTRA_DIST = MANUAL BUGS CONTRIBUTE FAQ FEATURES INTERNALS SSLCERTS \
README.win32 RESOURCES TODO TheArtOfHttpScripting THANKS \
VERSIONS KNOWN_BUGS BINDINGS $(man_MANS) $(HTMLPAGES) \
HISTORY INSTALL libcurl-the-guide $(PDFPAGES)

View File

@@ -13,8 +13,8 @@ README.win32
are win32-based.
The unix-style man pages are tricky to read on windows, so therefore are all
those pages also converted to HTML and those are also included in the
release archives.
those pages converted to HTML as well as pdf, and included in the release
archives.
The main curl.1 man page is also "built-in" in the command line tool. Use a
command line similar to this in order to extract a separate text file:

View File

@@ -2,88 +2,93 @@ This project has been alive for several years. Countless people have provided
feedback that have improved curl. Here follows a (incomplete) list of people
that have contributed with non-trivial parts:
- Daniel Stenberg <daniel@haxx.se>
- Rafael Sagula <sagula@inf.ufrgs.br>
- Sampo Kellomaki <sampo@iki.fi>
- Linas Vepstas <linas@linas.org>
- Bjorn Reese <breese@mail1.stofanet.dk>
- Johan Anderson <johan@homemail.com>
- Kjell Ericson <Kjell.Ericson@haxx.se>
- Troy Engel <tengel@sonic.net>
- Ryan Nelson <ryan@inch.com>
- Bj<EFBFBD>rn Stenberg <bjorn@haxx.se>
- Angus Mackay <amackay@gus.ml.org>
- Eric Young <eay@cryptsoft.com>
- Simon Dick <simond@totally.irrelevant.org>
- Oren Tirosh <oren@monty.hishome.net>
- Steven G. Johnson <stevenj@alum.mit.edu>
- Gilbert Ramirez Jr. <gram@verdict.uthscsa.edu>
- Andr<EFBFBD>s Garc<72>a <ornalux@redestb.es>
- Douglas E. Wegscheid <wegscd@whirlpool.com>
- Mark Butler <butlerm@xmission.com>
- Eric Thelin <eric@generation-i.com>
- Marc Boucher <marc@mbsi.ca>
- Greg Onufer <Greg.Onufer@Eng.Sun.COM>
- Doug Kaufman <dkaufman@rahul.net>
- David Eriksson <david@2good.com>
- Ralph Beckmann <rabe@uni-paderborn.de>
- T. Yamada <tai@imasy.or.jp>
- Lars J. Aas <larsa@sim.no>
- J<EFBFBD>rn Hartroth <Joern.Hartroth@computer.org>
- Matthew Clarke <clamat@van.maves.ca>
- Linus Nielsen Feltzing <linus@haxx.se>
- Felix von Leitner <felix@convergence.de>
- Dan Zitter <dzitter@zitter.net>
- Jongki Suwandi <Jongki.Suwandi@eng.sun.com>
- Chris Maltby <chris@aurema.com>
- Ron Zapp <rzapper@yahoo.com>
- Paul Marquis <pmarquis@iname.com>
- Ellis Pritchard <ellis@citria.com>
- Damien Adant <dams@usa.net>
- Chris <cbayliss@csc.come>
- Marco G. Salvagno <mgs@whiz.cjb.net>
- Paul Marquis <pmarquis@iname.com>
- David LeBlanc <dleblanc@qnx.com>
- Rich Gray at Plus Technologies
- Luong Dinh Dung <u8luong@lhsystems.hu>
- Torsten Foertsch <torsten.foertsch@gmx.net>
- Kristian K<>hntopp <kris@koehntopp.de>
- Fred Noz <FNoz@siac.com>
- Caolan McNamara <caolan@csn.ul.ie>
- Albert Chin-A-Young <china@thewrittenword.com>
- Stephen Kick <skick@epicrealm.com>
- Martin Hedenfalk <mhe@stacken.kth.se>
- Richard Prescott <rip at step.polymtl.ca>
- Jason S. Priebe <priebe@wral-tv.com>
- T. Bharath <TBharath@responsenetworks.com>
- Alexander Kourakos <awk@users.sourceforge.net>
- James Griffiths <griffiths_james@yahoo.com>
- Loic Dachary <loic@senga.org>
- Robert Weaver <robert.weaver@sabre.com>
- Ingo Ralf Blum <ingoralfblum@ingoralfblum.com>
- Jun-ichiro itojun Hagino <itojun@iijlab.net>
- Frederic Lepied <flepied@mandrakesoft.com>
- Georg Horn <horn@koblenz-net.de>
- Cris Bailiff <c.bailiff@awayweb.com>
- Sterling Hughes <sterling@designmultimedia.com>
- S. Moonesamy
- Ingo Wilken <iw@WWW.Ecce-Terram.DE>
- Pawel A. Gajda <mis@k2.net.pl>
- Patrick Bihan-Faou
- Nico Baggus <Nico.Baggus@mail.ing.nl>
- Sergio Ballestrero
- Andrew Francis <locust@familyhealth.com.au>
- Tomasz Lacki <Tomasz.Lacki@primark.pl>
- Georg Huettenegger <georg@ist.org>
- John Lask <johnlask@hotmail.com>
- Eric Lavigne <erlavigne@wanadoo.fr>
- Marcus Webster <marcus.webster@phocis.com>
- G<EFBFBD>tz Babin-Ebell <babin<69>ebell@trustcenter.de>
- Andreas Damm <andreas-sourceforge@radab.org>
- Jacky Lam <sylam@emsoftltd.com>
- James Gallagher <jgallagher@gso.uri.edu>
- Kjetil Jacobsen <kjetilja@cs.uit.no>
- Markus F.X.J. Oberhumer <markus@oberhumer.com>
- Miklos Nemeth <mnemeth@kfkisystems.com>
- Kevin Roth <kproth@users.sourceforge.net>
- Ralph Mitchell <rmitchell@eds.com>
Daniel Stenberg <daniel@haxx.se>
Rafael Sagula <sagula@inf.ufrgs.br>
Sampo Kellomaki <sampo@iki.fi>
Linas Vepstas <linas@linas.org>
Bjorn Reese <breese@mail1.stofanet.dk>
Johan Anderson <johan@homemail.com>
Kjell Ericson <Kjell.Ericson@haxx.se>
Troy Engel <tengel@sonic.net>
Ryan Nelson <ryan@inch.com>
Bj<EFBFBD>rn Stenberg <bjorn@haxx.se>
Angus Mackay <amackay@gus.ml.org>
Eric Young <eay@cryptsoft.com>
Simon Dick <simond@totally.irrelevant.org>
Oren Tirosh <oren@monty.hishome.net>
Steven G. Johnson <stevenj@alum.mit.edu>
Gilbert Ramirez Jr. <gram@verdict.uthscsa.edu>
Andr<EFBFBD>s Garc<72>a <ornalux@redestb.es>
Douglas E. Wegscheid <wegscd@whirlpool.com>
Mark Butler <butlerm@xmission.com>
Eric Thelin <eric@generation-i.com>
Marc Boucher <marc@mbsi.ca>
Greg Onufer <Greg.Onufer@Eng.Sun.COM>
Doug Kaufman <dkaufman@rahul.net>
David Eriksson <david@2good.com>
Ralph Beckmann <rabe@uni-paderborn.de>
T. Yamada <tai@imasy.or.jp>
Lars J. Aas <larsa@sim.no>
J<EFBFBD>rn Hartroth <Joern.Hartroth@computer.org>
Matthew Clarke <clamat@van.maves.ca>
Linus Nielsen Feltzing <linus@haxx.se>
Felix von Leitner <felix@convergence.de>
Dan Zitter <dzitter@zitter.net>
Jongki Suwandi <Jongki.Suwandi@eng.sun.com>
Chris Maltby <chris@aurema.com>
Ron Zapp <rzapper@yahoo.com>
Paul Marquis <pmarquis@iname.com>
Ellis Pritchard <ellis@citria.com>
Damien Adant <dams@usa.net>
Chris <cbayliss@csc.come>
Marco G. Salvagno <mgs@whiz.cjb.net>
Paul Marquis <pmarquis@iname.com>
David LeBlanc <dleblanc@qnx.com>
Rich Gray at Plus Technologies
Luong Dinh Dung <u8luong@lhsystems.hu>
Torsten Foertsch <torsten.foertsch@gmx.net>
Kristian K<>hntopp <kris@koehntopp.de>
Fred Noz <FNoz@siac.com>
Caolan McNamara <caolan@csn.ul.ie>
Albert Chin-A-Young <china@thewrittenword.com>
Stephen Kick <skick@epicrealm.com>
Martin Hedenfalk <mhe@stacken.kth.se>
Richard Prescott <rip at step.polymtl.ca>
Jason S. Priebe <priebe@wral-tv.com>
T. Bharath <TBharath@responsenetworks.com>
Alexander Kourakos <awk@users.sourceforge.net>
James Griffiths <griffiths_james@yahoo.com>
Loic Dachary <loic@senga.org>
Robert Weaver <robert.weaver@sabre.com>
Ingo Ralf Blum <ingoralfblum@ingoralfblum.com>
Jun-ichiro itojun Hagino <itojun@iijlab.net>
Frederic Lepied <flepied@mandrakesoft.com>
Georg Horn <horn@koblenz-net.de>
Cris Bailiff <c.bailiff@awayweb.com>
Sterling Hughes <sterling@designmultimedia.com>
S. Moonesamy
Ingo Wilken <iw@WWW.Ecce-Terram.DE>
Pawel A. Gajda <mis@k2.net.pl>
Patrick Bihan-Faou
Nico Baggus <Nico.Baggus@mail.ing.nl>
Sergio Ballestrero
Andrew Francis <locust@familyhealth.com.au>
Tomasz Lacki <Tomasz.Lacki@primark.pl>
Georg Huettenegger <georg@ist.org>
John Lask <johnlask@hotmail.com>
Eric Lavigne <erlavigne@wanadoo.fr>
Marcus Webster <marcus.webster@phocis.com>
G<EFBFBD>tz Babin-Ebell <babin<69>ebell@trustcenter.de>
Andreas Damm <andreas-sourceforge@radab.org>
Jacky Lam <sylam@emsoftltd.com>
James Gallagher <jgallagher@gso.uri.edu>
Kjetil Jacobsen <kjetilja@cs.uit.no>
Markus F.X.J. Oberhumer <markus@oberhumer.com>
Miklos Nemeth <mnemeth@kfkisystems.com>
Kevin Roth <kproth@users.sourceforge.net>
Ralph Mitchell <rmitchell@eds.com>
Dan Fandrich <dan@coneharvesters.com>
Jean-Philippe Barrette-LaPierre <jpb@rrette.com>
Richard Bramante <RBramante@on.com>
Daniel Kouril <kouril@ics.muni.cz>
Dirk Manske <dm@nettraffic.de>

View File

@@ -10,52 +10,39 @@ TODO
send us patches that improve things! Also check the http://curl.haxx.se/dev
web section for various technical development notes.
All bugs documented in the KNOWN_BUGS document are subject for fixing!
LIBCURL
* Introduce an interface to libcurl that allows applications to easier get to
know what cookies that are received. Pushing interface that calls a
callback on each received cookie? Querying interface that asks about
existing cookies? We probably need both. Enable applications to modify
existing cookies as well.
* Make content encoding/decoding internally be made using a filter system.
existing cookies as well. http://curl.haxx.se/dev/COOKIES
* Introduce another callback interface for upload/download that makes one
less copy of data and thus a faster operation.
[http://curl.haxx.se/dev/no_copy_callbacks.txt]
* Add asynchronous name resolving (http://libdenise.sf.net/). This should be
made to work on most of the supported platforms, or otherwise it isn't
really interesting.
* Data sharing. Tell which easy handles within a multi handle that should
share cookies, connection cache, dns cache, ssl session cache. Full
suggestion found here: http://curl.haxx.se/dev/sharing.txt
* Mutexes. By adding mutex callback support, the 'data sharing' mentioned
above can be made between several easy handles running in different threads
too. The actual mutex implementations will be left for the application to
implement, libcurl will merely call 'getmutex' and 'leavemutex' callbacks.
Part of the sharing suggestion at: http://curl.haxx.se/dev/sharing.txt
* More data sharing. curl_share_* functions already exist and work, and they
can be extended to share more.
* Set the SO_KEEPALIVE socket option to make libcurl notice and disconnect
very long time idle connections.
* Go through the code and verify that libcurl deals with big files >2GB and
>4GB all over. Bug reports (and source reviews) indicate that it doesn't
currently work properly.
>4GB all over. Bug reports (and source reviews) show that it doesn't
currently work.
* CURLOPT_MAXFILESIZE. Prevent downloads that are larger than the specified
size. CURLE_FILESIZE_EXCEEDED would then be returned. Gautam Mani
requested. That is, the download should not even begin but be aborted
immediately.
* Allow the http_proxy (and other) environment variables to contain user and
password as well in the style: http://proxyuser:proxypasswd@proxy:port
Berend Reitsma suggested.
LIBCURL - multi interface
* Add curl_multi_timeout() to make libcurl's ares-functionality better.
* Make sure we don't ever loop because of non-blocking sockets return
EWOULDBLOCK or similar. This FTP command sending, the SSL connection etc.
@@ -73,52 +60,28 @@ TODO
FTP
* FTP ASCII upload does not follow RFC959 section 3.1.1.1: "The sender
converts the data from an internal character representation to the standard
8-bit NVT-ASCII representation (see the Telnet specification). The
receiver will convert the data from the standard form to his own internal
form."
* Support the most common FTP proxies, Philip Newton provided a list
allegedly from ncftp:
http://curl.haxx.se/mail/archive-2003-04/0126.html
* Make CURLOPT_FTPPORT support an additional port number on the IP/if/name,
like "blabla:[port]" or possibly even "blabla:[portfirst]-[portsecond]".
* FTP ASCII transfers do not follow RFC959. They don't convert the data
accordingly.
* Since USERPWD always override the user and password specified in URLs, we
might need another way to specify user+password for anonymous ftp logins.
* An option to only download remote FTP files if they're newer than the local
one is a good idea, and it would fit right into the same syntax as the
already working http dito works. It of course requires that 'MDTM' works,
and it isn't a standard FTP command.
* Add FTPS support with SSL for the data connection too. This should be made
according to the specs written in draft-murray-auth-ftp-ssl-08.txt,
"Securing FTP with TLS"
according to the specs written in draft-murray-auth-ftp-ssl-11.txt,
"Securing FTP with TLS", valid until September 27th 2003.
http://curl.haxx.se/rfc/draft-murray-auth-ftp-ssl-11.txt
HTTP
* If the "body" of the POST is < MSS it really aught to be sent along with
the headers. More generally, if the last chunk of the POST body is < MSS,
it should be sent with the previous chunk (which may be the POST headers).
So long as any one send is larger than MSS (or there is only one send when
< MSS :), the Nagle Algorithm will not be a problem on any stack where
Nagle is implemented correctly. (pointed out by Rick Jones)
* Authentication: NTLM. Support for that MS crap called NTLM
authentication. MS proxies and servers sometime require that. Since that
protocol is a proprietary one, it involves reverse engineering and network
sniffing. This should however be a library-based functionality. There are a
few different efforts "out there" to make open source HTTP clients support
this and it should be possible to take advantage of other people's hard
work. http://modntlm.sourceforge.net/ is one. There's a web page at
http://www.innovation.ch/java/ntlm.html that contains detailed reverse-
engineered info.
* RFC2617 compliance, "Digest Access Authentication" A valid test page seem
to exist at: http://hopf.math.nwu.edu/testpage/digest/ And some friendly
person's server source code is available at
http://hopf.math.nwu.edu/digestauth/index.html Then there's the Apache
mod_digest source code too of course. It seems as if Netscape doesn't
support this, and not many servers do. Although this is a lot better
authentication method than the more common "Basic". Basic sends the
password in cleartext over the network, this "Digest" method uses a
challange-response protocol which increases security quite a lot.
* Digest, NTLM and GSS-Negotiate support for HTTP proxies. They all work
on direct-connections to the server.
* Pipelining. Sending multiple requests before the previous one(s) are done.
This could possibly be implemented using the multi interface to queue

View File

@@ -2,7 +2,7 @@
.\" nroff -man curl.1
.\" Written by Daniel Stenberg
.\"
.TH curl 1 "14 Feb 2003" "Curl 7.10.3" "Curl Manual"
.TH curl 1 "8 Aug 2003" "Curl 7.10.7" "Curl Manual"
.SH NAME
curl \- transfer a URL
.SH SYNOPSIS
@@ -10,14 +10,18 @@ curl \- transfer a URL
.I [URL...]
.SH DESCRIPTION
.B curl
is a client to get documents/files from or send documents to a server, using
any of the supported protocols (HTTP, HTTPS, FTP, GOPHER, DICT, TELNET, LDAP
or FILE). The command is designed to work without user interaction or any kind
of interactivity.
is a tool to transfer data from or to a server, using one of the supported
protocols (HTTP, HTTPS, FTP, FTPS, GOPHER, DICT, TELNET, LDAP or FILE). The
command is designed to work without user interaction.
curl offers a busload of useful tricks like proxy support, user
authentication, ftp upload, HTTP post, SSL (https:) connections, cookies, file
transfer resume and more.
transfer resume and more. As you will see below, the amount of features will
make your head spin!
curl is powered by libcurl for all transfer-related features. See
.BR libcurl (3)
for details.
.SH URL
The URL syntax is protocol dependent. You'll find a detailed description in
RFC 2396.
@@ -48,10 +52,8 @@ specified on a single command line and cannot be used between separate curl
invokes.
.SH OPTIONS
.IP "-a/--append"
(FTP)
When used in a ftp upload, this will tell curl to append to the target
file instead of overwriting it. If the file doesn't exist, it will
be created.
(FTP) When used in an FTP upload, this will tell curl to append to the target
file instead of overwriting it. If the file doesn't exist, it will be created.
If this option is used twice, the second one will disable append mode again.
.IP "-A/--user-agent <agent string>"
@@ -63,6 +65,16 @@ surround the string with single quote marks. This can also be set with the
If this option is set more than once, the last one will be the one that's
used.
.IP "--anyauth"
(HTTP) Tells curl to figure out authentication method by itself, and use the
most secure one the remote site claims it supports. This is done by first
doing a request and checking the response-headers, thus inducing an extra
network round-trip. This is used instead of setting a specific authentication
method, which you can do with \fI--digest\fP, \fI--ntlm\fP, and
\fI--negotiate\fP. (Added in 7.10.6)
If this option is used several times, the following occurrences make no
difference.
.IP "-b/--cookie <name=data>"
(HTTP)
Pass the data to the HTTP server as a cookie. It is supposedly the
@@ -90,12 +102,26 @@ also be enforced by using an URL that ends with ";type=A". This option causes
data sent to stdout to be in text mode for win32 systems.
If this option is used twice, the second one will disable ASCII usage.
.IP "--basic"
(HTTP) Tells curl to use HTTP Basic authentication. This is the default and
this option is usually pointless, unless you use it to override a previously
set option that sets a different authentication method (such as \fI--ntlm\fP,
\fI--digest\fP and \fI--negotiate\fP). (Added in 7.10.6)
If this option is used several times, the following occurrences make no
difference.
.IP "--ciphers <list of ciphers>"
(SSL) Specifies which ciphers to use in the connection. The list of ciphers
must be using valid ciphers. Read up on SSL cipher list details on this URL:
.I http://www.openssl.org/docs/apps/ciphers.html (Option added in curl 7.9)
.I http://www.openssl.org/docs/apps/ciphers.html
If this option is used several times, the last one will override the others.
.IP "--compressed"
(HTTP) Request a compressed response using one of the algorithms libcurl
supports, and return the uncompressed document. If this option is used and
the server sends an unsupported encoding, Curl will report an error.
If this option is used several times, each occurrence will toggle it on/off.
.IP "--connect-timeout <seconds>"
Maximum time in seconds that you allow the connection to the server to take.
This only limits the connection phase, once curl has connected this option is
@@ -108,7 +134,13 @@ operation. Curl writes all cookies previously read from a specified file as
well as all cookies received from remote server(s). If no cookies are known,
no file will be written. The file will be written using the Netscape cookie
file format. If you set the file name to a single dash, "-", the cookies will
be written to stdout. (Option added in curl 7.9)
be written to stdout.
.B NOTE
If the cookie jar can't be created or written to, the whole curl operation
won't fail or even report an error clearly. Using -v will get a warning
displayed, but that is the only visible feedback you get about this possibly
lethal situation.
If this option is used several times, the last specfied file name will be
used.
@@ -122,7 +154,7 @@ Use "-C -" to tell curl to automatically find out where/how to resume the
transfer. It then uses the given output/input files to figure that out.
If this option is used several times, the last one will be used.
.IP "---create-dirs"
.IP "--create-dirs"
When used in conjunction with the -o option, curl will create the necessary
local directory hierarchy as needed.
.IP "--crlf"
@@ -134,7 +166,7 @@ If this option is used twice, the second will again disable crlf converting.
that can emulate as if a user has filled in a HTML form and pressed the submit
button. Note that the data is sent exactly as specified with no extra
processing (with all newlines cut off). The data is expected to be
"url-encoded". This will cause curl to pass the data to the server using the
\&"url-encoded". This will cause curl to pass the data to the server using the
content-type application/x-www-form-urlencoded. Compare to -F. If more than
one -d/--data option is used on the same command line, the data pieces
specified will be merged together with a separating &-letter. Thus, using '-d
@@ -145,7 +177,7 @@ If you start the data with the letter @, the rest should be a file name to
read the data from, or - if you want curl to read the data from stdin. The
contents of the file must already be url-encoded. Multiple files can also be
specified. Posting data from a file named 'foobar' would thus be done with
"--data @foobar".
\&"--data @foobar".
To post data purely binary, you should instead use the --data-binary option.
@@ -166,9 +198,27 @@ want to post a binary file without the strip-newlines feature of the
If this option is used several times, the ones following the first will
append data.
.IP "--digest"
(HTTP) Enables HTTP Digest authentication. This is a authentication that
prevents the password from being sent over the wire in clear text. Use this in
combination with the normal -u/--user option to set user name and
password. See also \fI--ntlm\fP, \fP--negotiate\fI and \fI--anyauth\fP for
related options. (Added in curl 7.10.6)
If this option is used several times, the following occurrences make no
difference.
.IP "--disable-eprt"
(FTP) Tell curl to disable the use of the EPRT and LPRT commands when doing
active FTP transfers. Curl will normally always first attempt to use EPRT,
then LPRT before using PORT, but with this option, it will use PORT right
away. EPRT and LPRT are extensions to the original FTP protocol, may not work
on all servers but enable more functionality in a better way than the
traditional PORT command. (Aded in 7.10.5)
If this option is used several times, each occurrence will toggle this on/off.
.IP "--disable-epsv"
(FTP) Tell curl to disable the use of the EPSV command when doing passive FTP
downloads. Curl will normally always first attempt to use EPSV before PASV,
transfers. Curl will normally always first attempt to use EPSV before PASV,
but with this option, it will not try using EPSV.
If this option is used several times, each occurrence will toggle this on/off.
@@ -241,6 +291,12 @@ normal cases when a HTTP server fails to deliver a document, it returns a HTML
document stating so (which often also describes why and more). This flag will
prevent curl from outputting that and fail silently instead.
If this option is used twice, the second will again disable silent failure.
.IP "--ftp-create-dirs"
(FTP) When an FTP URL/operation uses a path that doesn't currently exist on
the server, the standard behaviour of curl is to fail. Using this option, curl
will instead attempt to create missing directories. (Added in 7.10.7)
If this option is used twice, the second will again disable silent failure.
.IP "-F/--form <name=content>"
(HTTP) This lets curl emulate a filled in form in which a user has pressed the
@@ -256,12 +312,18 @@ Example, to send your password file to the server, where
\&'password' is the name of the form-field to which /etc/passwd will be the
input:
.B curl
-F password=@/etc/passwd www.mypasswords.com
\fBcurl\fP -F password=@/etc/passwd www.mypasswords.com
To read the file's content from stdin insted of a file, use - where the file
name should've been. This goes for both @ and < constructs.
You can also tell curl what Content-Type to use for the file upload part, by
using 'type=', in a manner similar to:
\fBcurl\fP -F "web=@index.html;type=text/html" url.com
See further examples and details in the MANUAL.
This option can be used multiple times.
.IP "-g/--globoff"
This option switches off the "URL globbing parser". When you set this option,
@@ -272,7 +334,7 @@ contents but they should be encoded according to the URI standard.
When used, this option will make all data specified with -d/--data or
--data-binary to be used in a HTTP GET request instead of the POST request
that otherwise would be used. The data will be appended to the URL with a '?'
separator. (Option added in curl 7.9)
separator.
If used in combination with -I, the POST data will instead be appended to the
URL with a HEAD request.
@@ -381,9 +443,18 @@ If this option is used twice, the second will again disable list only.
(HTTP/HTTPS) If the server reports that the requested page has a different
location (indicated with the header line Location:) this flag will let curl
attempt to reattempt the get on the new place. If used together with -i or -I,
headers from all requested pages will be shown. If this flag is used when
making a HTTP POST, curl will automatically switch to GET after the initial
POST has been done.
headers from all requested pages will be shown. If authentication is used,
curl will only send its credentials to the initial host, so if a redirect
takes curl to a different host, it won't intercept the user+password. See also
\fI--location-trusted\fP on how to change this.
If this option is used twice, the second will again disable location following.
.IP "--location-trusted"
(HTTP/HTTPS) Like \fI--location\fP, but will allow sending the name + password
to all hosts that the site may redirect to. This may or may not introduce a
security breach if the site redirects you do a site to which you'll send your
authentication info (which is plaintext in the case of HTTP Basic
authentication).
If this option is used twice, the second will again disable location following.
.IP "-m/--max-time <seconds>"
@@ -417,6 +488,19 @@ to allow curl to ftp to the machine host.domain.com with user name
.B "machine host.domain.com login myself password secret"
If this option is used twice, the second will again disable netrc usage.
.IP "--negotiate"
(HTTP) Enables GSS-Negotiate authentication. The GSS-Negotiate method was
designed by Microsoft and is used in their web aplications. It is primarily
meant as a support for Kerberos5 authentication but may be also used along
with another authentication methods. For more information see IETF draft
draft-brezak-spnego-http-04.txt. (Added in 7.10.6)
\fBNOTE\fP that this option requiures that the library was built with GSSAPI
support. This is not very common. Use \fIcurl --version\fP to see if your
version supports GSS-Negotiate.
If this option is used several times, the following occurrences make no
difference.
.IP "-N/--no-buffer"
Disables the buffering of the output stream. In normal work situations, curl
will use a standard buffered output stream that will have the effect that it
@@ -424,6 +508,19 @@ will output the data in chunks, not necessarily exactly when the data arrives.
Using this option will disable that buffering.
If this option is used twice, the second will again switch on buffering.
.IP "--ntlm"
(HTTP) Enables NTLM authentication. The NTLM authentication method was
designed by Microsoft and is used by IIS web servers. It is a proprietary
protocol, reversed engineered by clever people and implemented in curl based
on their efforts. This kind of behavior should not be endorsed, you should
encourage everyone who uses NTLM to switch to a public and documented
authentication method instead. Such as Digest. (Added in 7.10.6)
\fBNOTE\fP that this option requiures that the library was built with SSL
support. Use \fIcurl --version\fP to see if your version supports NTLM.
If this option is used several times, the following occurrences make no
difference.
.IP "-o/--output <file>"
Write output to <file> instead of stdout. If you are using {} or [] to fetch
multiple documents, you can use '#' followed by a number in the <file>
@@ -440,8 +537,8 @@ You may use this option as many times as you have number of URLs.
See also the --create-dirs option to create the local directories dynamically.
.IP "-O/--remote-name"
Write output to a local file named like the remote file we get. (Only
the file part of the remote file is used, the path is cut off.)
Write output to a local file named like the remote file we get. (Only the file
part of the remote file is used, the path is cut off.)
You may use this option as many times as you have number of URLs.
.IP "-p/--proxytunnel"
@@ -580,7 +677,7 @@ descriptive information, to the given output file. Use "-" as filename to have
the output sent to stdout.
If this option is used several times, the last one will be used. (Added in
curl 7.9.7)
7.9.7)
.IP "--trace-ascii <file>"
Enables a full trace dump of all incoming and outgoing data, including
descriptive information, to the given output file. Use "-" as filename to have
@@ -591,12 +688,15 @@ the ASCII part of the dump. It makes smaller output that might be easier to
read for untrained humans.
If this option is used several times, the last one will be used. (Added in
curl 7.9.7)
7.9.7)
.IP "-u/--user <user:password>"
Specify user and password to use when fetching. Read the MANUAL for detailed
examples of how to use this. If no password is specified, curl will ask for it
interactively.
You can also use the --digest option to enable Digest authentication when
communicating with HTTP 1.1 servers.
If this option is used several times, the last one will be used.
.IP "-U/--proxy-user <user:password>"
Specify user and password to use for Proxy authentication. If no
@@ -626,8 +726,17 @@ If you think this option still doesn't give you enough details, consider using
If this option is used twice, the second will again disable verbose.
.IP "-V/--version"
Displays the full version of curl, libcurl and other 3rd party libraries
linked with the executable.
Displays information about curl and the libcurl version it uses.
The first line includes the full version of curl, libcurl and other 3rd party
libraries linked with the executable.
The second line (starts with "Protocols:") shows all protocols that libcurl
reports to support.
The third line (starts with "Features:") shows specific features libcurl
reports to offer.
.IP "-w/--write-out <format>"
Defines what to display after a completed and successful operation. The format
is a string that may contain plain text mixed with any number of variables. The
@@ -706,7 +815,7 @@ at port 1080.
This option overrides existing environment variables that sets proxy to
use. If there's an environment variable setting a proxy, you can set proxy to
"" to override it.
\&"" to override it.
\fBNote\fP that all operations that are performed over a HTTP proxy will
transparantly be converted to HTTP. It means that certain protocol specific
@@ -928,8 +1037,6 @@ Unrecognized transfer encoding
.IP XX
There will appear more error codes here in future releases. The existing ones
are meant to never change.
.SH BUGS
If you do find bugs, mail them to curl-bug@haxx.se.
.SH AUTHORS / CONTRIBUTORS
Daniel Stenberg is the main author, but the whole list of contributors is
found in the separate THANKS file.

View File

@@ -9,7 +9,7 @@ EXTRA_DIST = README curlgtk.c sepheaders.c simple.c postit2.c \
multithread.c getinmemory.c ftpupload.c httpput.c \
simplessl.c ftpgetresp.c http-post.c post-callback.c \
multi-app.c multi-double.c multi-single.c multi-post.c \
fopen.c simplepost.c
fopen.c simplepost.c makefile.dj
all:
@echo "done"

View File

@@ -1,28 +1,53 @@
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* $Id$
* This example source code introduces a c library buffered I/O interface to
* URL reads it supports fopen(), fread(), fgets(), feof(), fclose(),
* rewind(). Supported functions have identical prototypes to their normal c
* lib namesakes and are preceaded by url_ .
*
* This example source code introduces an fopen()/fread()/fclose() emulation
* for URL reads. Using an approach similar to this, you could replace your
* program's fopen() with this url_fopen() and fread() with url_fread() and
* it should be possible to read remote streams instead of (only) local files.
* Using this code you can replace your program's fopen() with url_fopen()
* and fread() with url_fread() and it become possible to read remote streams
* instead of (only) local files. Local files (ie those that can be directly
* fopened) will drop back to using the underlying clib implementations
*
* See the main() function at the bottom that shows a tiny app in action.
* See the main() function at the bottom that shows an app that retrives from a
* specified url using fgets() and fread() and saves as two output files.
*
* This source code is a proof of concept. It will need further attention to
* become production-use useful and solid.
* Coyright (c)2003 Simtec Electronics
*
* Re-implemented by Vincent Sanders <vince@kyllikki.org> with extensive
* reference to original curl example code
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* This example requires libcurl 7.9.7 or later.
*/
#include <stdio.h>
#include <string.h>
#include <sys/time.h>
#include <stdlib.h>
#include <errno.h>
#include <curl/curl.h>
@@ -30,206 +55,511 @@
#error "too old libcurl version, get the latest!"
#endif
struct data {
int type;
union {
CURL *curl;
FILE *file;
} handle;
/* This is the documented biggest possible buffer chunk we can get from
libcurl in one single callback! */
char buffer[CURL_MAX_WRITE_SIZE];
enum fcurl_type_e { CFTYPE_NONE=0, CFTYPE_FILE=1, CFTYPE_CURL=2 };
char *readptr; /* read from here */
int bytes; /* bytes available from read pointer */
struct fcurl_data
{
enum fcurl_type_e type; /* type of handle */
union {
CURL *curl;
FILE *file;
} handle; /* handle */
CURLMcode m; /* stored from a previous url_fread() */
char *buffer; /* buffer to store cached data*/
int buffer_len; /* currently allocated buffers length */
int buffer_pos; /* end of data in buffer*/
int still_running; /* Is background url fetch still in progress */
};
typedef struct data URL_FILE;
typedef struct fcurl_data URL_FILE;
/* exported functions */
URL_FILE *url_fopen(char *url,const char *operation);
int url_fclose(URL_FILE *file);
int url_feof(URL_FILE *file);
size_t url_fread(void *ptr, size_t size, size_t nmemb, URL_FILE *file);
char * url_fgets(char *ptr, int size, URL_FILE *file);
void url_rewind(URL_FILE *file);
/* we use a global one for convenience */
CURLM *multi_handle;
static
size_t write_callback(char *buffer,
size_t size,
size_t nitems,
void *userp)
/* curl calls this routine to get more data */
static size_t
write_callback(char *buffer,
size_t size,
size_t nitems,
void *userp)
{
URL_FILE *url = (URL_FILE *)userp;
size *= nitems;
char *newbuff;
int rembuff;
memcpy(url->readptr, buffer, size);
url->readptr += size;
url->bytes += size;
URL_FILE *url = (URL_FILE *)userp;
size *= nitems;
fprintf(stderr, "callback %d size bytes\n", size);
rembuff=url->buffer_len - url->buffer_pos;//remaining space in buffer
return size;
if(size > rembuff)
{
//not enuf space in buffer
newbuff=realloc(url->buffer,url->buffer_len + (size - rembuff));
if(newbuff==NULL)
{
fprintf(stderr,"callback buffer grow failed\n");
size=rembuff;
}
else
{
/* realloc suceeded increase buffer size*/
url->buffer_len+=size - rembuff;
url->buffer=newbuff;
/*printf("Callback buffer grown to %d bytes\n",url->buffer_len);*/
}
}
memcpy(&url->buffer[url->buffer_pos], buffer, size);
url->buffer_pos += size;
/*fprintf(stderr, "callback %d size bytes\n", size);*/
return size;
}
URL_FILE *url_fopen(char *url, char *operation)
/* use to attempt to fill the read buffer up to requested number of bytes */
static int
curl_fill_buffer(URL_FILE *file,int want,int waittime)
{
/* this code could check for URLs or types in the 'url' and
basicly use the real fopen() for standard files */
fd_set fdread;
fd_set fdwrite;
fd_set fdexcep;
int maxfd;
struct timeval timeout;
int rc;
URL_FILE *file;
int still_running;
(void)operation;
/* only attempt to fill buffer if transactions still running and buffer
* doesnt exceed required size already
*/
if((!file->still_running) || (file->buffer_pos > want))
return 0;
file = (URL_FILE *)malloc(sizeof(URL_FILE));
if(!file)
return NULL;
/* attempt to fill buffer */
do
{
FD_ZERO(&fdread);
FD_ZERO(&fdwrite);
FD_ZERO(&fdexcep);
memset(file, 0, sizeof(URL_FILE));
/* set a suitable timeout to fail on */
timeout.tv_sec = 60; /* 1 minute */
timeout.tv_usec = 0;
file->type = 1; /* marked as URL, use 0 for plain file */
file->handle.curl = curl_easy_init();
/* get file descriptors from the transfers */
curl_multi_fdset(multi_handle, &fdread, &fdwrite, &fdexcep, &maxfd);
curl_easy_setopt(file->handle.curl, CURLOPT_URL, url);
curl_easy_setopt(file->handle.curl, CURLOPT_FILE, file);
curl_easy_setopt(file->handle.curl, CURLOPT_VERBOSE, FALSE);
curl_easy_setopt(file->handle.curl, CURLOPT_WRITEFUNCTION, write_callback);
rc = select(maxfd+1, &fdread, &fdwrite, &fdexcep, &timeout);
if(!multi_handle)
multi_handle = curl_multi_init();
switch(rc) {
case -1:
/* select error */
break;
curl_multi_add_handle(multi_handle, file->handle.curl);
case 0:
break;
while(CURLM_CALL_MULTI_PERFORM ==
curl_multi_perform(multi_handle, &still_running));
default:
/* timeout or readable/writable sockets */
/* note we *could* be more efficient and not wait for
* CURLM_CALL_MULTI_PERFORM to clear here and check it on re-entry
* but that gets messy */
while(curl_multi_perform(multi_handle, &file->still_running) ==
CURLM_CALL_MULTI_PERFORM);
/* if still_running would be 0 now, we should return NULL */
return file;
break;
}
} while(file->still_running && (file->buffer_pos < want));
return 1;
}
void url_fclose(URL_FILE *file)
/* use to remove want bytes from the front of a files buffer */
static int
curl_use_buffer(URL_FILE *file,int want)
{
/* make sure the easy handle is not in the multi handle anymore */
curl_multi_remove_handle(multi_handle, file->handle.curl);
/* sort out buffer */
if((file->buffer_pos - want) <=0)
{
/* ditch buffer - write will recreate */
if(file->buffer)
free(file->buffer);
/* cleanup */
curl_easy_cleanup(file->handle.curl);
file->buffer=NULL;
file->buffer_pos=0;
file->buffer_len=0;
}
else
{
/* move rest down make it available for later */
memmove(file->buffer,
&file->buffer[want],
(file->buffer_pos - want));
file->buffer_pos -= want;
}
return 0;
}
size_t url_fread(void *ptr, size_t size, size_t nmemb, URL_FILE *file)
URL_FILE *
url_fopen(char *url,const char *operation)
{
fd_set fdread;
fd_set fdwrite;
fd_set fdexcep;
int maxfd;
struct timeval timeout;
int rc;
int still_running = 0;
/* this code could check for URLs or types in the 'url' and
basicly use the real fopen() for standard files */
if(!file->bytes) { /* no data available at this point */
URL_FILE *file;
(void)operation;
file->readptr = file->buffer; /* reset read pointer */
file = (URL_FILE *)malloc(sizeof(URL_FILE));
if(!file)
return NULL;
if(CURLM_CALL_MULTI_PERFORM == file->m) {
while(CURLM_CALL_MULTI_PERFORM ==
curl_multi_perform(multi_handle, &still_running)) {
if(file->bytes) {
printf("(fread) WOAH! THis happened!\n");
break;
}
}
if(!still_running) {
printf("DONE RUNNING AROUND!\n");
return 0;
}
memset(file, 0, sizeof(URL_FILE));
if((file->handle.file=fopen(url,operation)))
{
file->type = CFTYPE_FILE; /* marked as URL */
}
else
{
file->type = CFTYPE_CURL; /* marked as URL */
file->handle.curl = curl_easy_init();
curl_easy_setopt(file->handle.curl, CURLOPT_URL, url);
curl_easy_setopt(file->handle.curl, CURLOPT_FILE, file);
curl_easy_setopt(file->handle.curl, CURLOPT_VERBOSE, FALSE);
curl_easy_setopt(file->handle.curl, CURLOPT_WRITEFUNCTION, write_callback);
if(!multi_handle)
multi_handle = curl_multi_init();
curl_multi_add_handle(multi_handle, file->handle.curl);
/* lets start the fetch */
while(curl_multi_perform(multi_handle, &file->still_running) ==
CURLM_CALL_MULTI_PERFORM );
if((file->buffer_pos == 0) && (!file->still_running))
{
/* if still_running is 0 now, we should return NULL */
/* make sure the easy handle is not in the multi handle anymore */
curl_multi_remove_handle(multi_handle, file->handle.curl);
/* cleanup */
curl_easy_cleanup(file->handle.curl);
free(file);
file = NULL;
}
}
return file;
}
int
url_fclose(URL_FILE *file)
{
int ret=0;/* default is good return */
switch(file->type)
{
case CFTYPE_FILE:
ret=fclose(file->handle.file); /* passthrough */
break;
case CFTYPE_CURL:
/* make sure the easy handle is not in the multi handle anymore */
curl_multi_remove_handle(multi_handle, file->handle.curl);
/* cleanup */
curl_easy_cleanup(file->handle.curl);
break;
default: /* unknown or supported type - oh dear */
ret=EOF;
errno=EBADF;
break;
}
if(file->buffer)
free(file->buffer);/* free any allocated buffer space */
free(file);
return ret;
}
int
url_feof(URL_FILE *file)
{
int ret=0;
switch(file->type)
{
case CFTYPE_FILE:
ret=feof(file->handle.file);
break;
case CFTYPE_CURL:
if((file->buffer_pos == 0) && (!file->still_running))
ret = 1;
break;
default: /* unknown or supported type - oh dear */
ret=-1;
errno=EBADF;
break;
}
return ret;
}
size_t
url_fread(void *ptr, size_t size, size_t nmemb, URL_FILE *file)
{
size_t want;
switch(file->type)
{
case CFTYPE_FILE:
want=fread(ptr,size,nmemb,file->handle.file);
break;
case CFTYPE_CURL:
want = nmemb * size;
curl_fill_buffer(file,want,1);
/* check if theres data in the buffer - if not curl_fill_buffer()
* either errored or EOF */
if(!file->buffer_pos)
return 0;
/* ensure only available data is considered */
if(file->buffer_pos < want)
want = file->buffer_pos;
/* xfer data to caller */
memcpy(ptr, file->buffer, want);
curl_use_buffer(file,want);
want = want / size; /* number of items - nb correct op - checked
* with glibc code*/
/*printf("(fread) return %d bytes %d left\n", want,file->buffer_pos);*/
break;
default: /* unknown or supported type - oh dear */
want=0;
errno=EBADF;
break;
}
return want;
}
char *
url_fgets(char *ptr, int size, URL_FILE *file)
{
int want = size - 1;/* always need to leave room for zero termination */
int loop;
switch(file->type)
{
case CFTYPE_FILE:
ptr = fgets(ptr,size,file->handle.file);
break;
case CFTYPE_CURL:
curl_fill_buffer(file,want,1);
/* check if theres data in the buffer - if not fill either errored or
* EOF */
if(!file->buffer_pos)
return NULL;
/* ensure only available data is considered */
if(file->buffer_pos < want)
want = file->buffer_pos;
/*buffer contains data */
/* look for newline or eof */
for(loop=0;loop < want;loop++)
{
if(file->buffer[loop] == '\n')
{
want=loop+1;/* include newline */
break;
}
}
/* xfer data to caller */
memcpy(ptr, file->buffer, want);
ptr[want]=0;/* allways null terminate */
curl_use_buffer(file,want);
/*printf("(fgets) return %d bytes %d left\n", want,file->buffer_pos);*/
break;
default: /* unknown or supported type - oh dear */
ptr=NULL;
errno=EBADF;
break;
}
return ptr;/*success */
}
void
url_rewind(URL_FILE *file)
{
switch(file->type)
{
case CFTYPE_FILE:
rewind(file->handle.file); /* passthrough */
break;
case CFTYPE_CURL:
/* halt transaction */
curl_multi_remove_handle(multi_handle, file->handle.curl);
/* restart */
curl_multi_add_handle(multi_handle, file->handle.curl);
/* ditch buffer - write will recreate - resets stream pos*/
if(file->buffer)
free(file->buffer);
file->buffer=NULL;
file->buffer_pos=0;
file->buffer_len=0;
break;
default: /* unknown or supported type - oh dear */
break;
}
}
/* Small main program to retrive from a url using fgets and fread saving the
* output to two test files (note the fgets method will corrupt binary files if
* they contain 0 chars */
int
main(int argc, char *argv[])
{
URL_FILE *handle;
FILE *outf;
int nread;
char buffer[256];
char *url;
if(argc < 2)
{
url="http://192.168.7.3/testfile";/* default to testurl */
}
else
{
url=argv[1];/* use passed url */
}
/* copy from url line by line with fgets */
outf=fopen("fgets.test","w+");
if(!outf)
{
perror("couldnt open fgets output file\n");
return 1;
}
handle = url_fopen(url, "r");
if(!handle)
{
printf("couldn't url_fopen()\n");
fclose(outf);
return 2;
}
while(!url_feof(handle))
{
url_fgets(buffer,sizeof(buffer),handle);
fwrite(buffer,1,strlen(buffer),outf);
}
url_fclose(handle);
fclose(outf);
/* Copy from url with fread */
outf=fopen("fread.test","w+");
if(!outf)
{
perror("couldnt open fread output file\n");
return 1;
}
handle = url_fopen("testfile", "r");
if(!handle) {
printf("couldn't url_fopen()\n");
fclose(outf);
return 2;
}
do {
nread = url_fread(buffer, 1,sizeof(buffer), handle);
fwrite(buffer,1,nread,outf);
} while(nread);
FD_ZERO(&fdread);
FD_ZERO(&fdwrite);
FD_ZERO(&fdexcep);
/* set a suitable timeout to fail on */
timeout.tv_sec = 500; /* 5 minutes */
timeout.tv_usec = 0;
url_fclose(handle);
/* get file descriptors from the transfers */
curl_multi_fdset(multi_handle, &fdread, &fdwrite, &fdexcep, &maxfd);
fclose(outf);
rc = select(maxfd+1, &fdread, &fdwrite, &fdexcep, &timeout);
switch(rc) {
case -1:
/* select error */
break;
case 0:
break;
default:
/* timeout or readable/writable sockets */
printf("select() returned %d!\n", rc);
do {
file->m = curl_multi_perform(multi_handle, &still_running);
if(file->bytes)
/* we have received data, return that now */
break;
} while(CURLM_CALL_MULTI_PERFORM == file->m);
/* Test rewind */
outf=fopen("rewind.test","w+");
if(!outf)
{
perror("couldnt open fread output file\n");
return 1;
}
if(!still_running)
printf("DONE RUNNING AROUND!\n");
break;
}
} while(still_running && (file->bytes <= 0));
}
else
printf("(fread) Skip network read\n");
handle = url_fopen("testfile", "r");
if(!handle) {
printf("couldn't url_fopen()\n");
fclose(outf);
return 2;
}
if(file->bytes) {
/* data already available, return that */
int want = size * nmemb;
nread = url_fread(buffer, 1,sizeof(buffer), handle);
fwrite(buffer,1,nread,outf);
url_rewind(handle);
if(file->bytes < want)
want = file->bytes;
buffer[0]='\n';
fwrite(buffer,1,1,outf);
memcpy(ptr, file->readptr, want);
file->readptr += want;
file->bytes -= want;
nread = url_fread(buffer, 1,sizeof(buffer), handle);
fwrite(buffer,1,nread,outf);
printf("(fread) return %d bytes\n", want);
return want;
}
return 0; /* no data available to return */
}
int main(int argc, char *argv[])
{
URL_FILE *handle;
int nread;
char buffer[256];
(void)argc;
(void)argv;
handle = url_fopen("http://curl.haxx.se/", "r");
if(!handle) {
printf("couldn't url_fopen()\n");
}
do {
nread = url_fread(buffer, sizeof(buffer), 1, handle);
printf("We got: %d bytes\n", nread);
} while(nread);
url_fclose(handle);
return 0;
url_fclose(handle);
fclose(outf);
return 0;/* all done */
}

31
docs/examples/makefile.dj Normal file
View File

@@ -0,0 +1,31 @@
#
# Adapted for djgpp / Watt-32 / DOS by
# Gisle Vanem <giva@bgnett.no>
#
include ../../packages/DOS/common.dj
CFLAGS += -I../../include
LIBS = ../../lib/libcurl.a
ifeq ($(USE_SSL),1)
LIBS += $(OPENSSL_ROOT)/lib/libssl.a $(OPENSSL_ROOT)/lib/libcrypt.a
endif
LIBS += $(WATT32_ROOT)/lib/libwatt.a $(ZLIB_ROOT)/libz.a
PROGRAMS = fopen.exe ftpget.exe ftpgetre.exe ftpuploa.exe getinmem.exe \
http-pos.exe httpput.exe multi-ap.exe multi-do.exe \
multi-po.exe multi-si.exe persista.exe post-cal.exe \
postit2.exe sepheade.exe simple.exe simpless.exe
all: $(PROGRAMS)
.c.exe:
$(CC) $(CFLAGS) -o $@ $^ $(LIBS)
@echo
clean:
rm -f $(PROGRAMS)

View File

@@ -40,7 +40,7 @@ int main(int argc, char **argv)
FILE *headerfile;
const char *pCertFile = "testcert.pem";
const char *pCACertFile="cacert.pem"
const char *pCACertFile="cacert.pem";
const char *pKeyName;
const char *pKeyType;

View File

@@ -28,7 +28,6 @@ man_MANS = \
curl_mprintf.3 \
curl_global_init.3 \
curl_global_cleanup.3 \
libcurl.3 \
curl_multi_add_handle.3 \
curl_multi_cleanup.3 \
curl_multi_fdset.3 \
@@ -36,7 +35,11 @@ man_MANS = \
curl_multi_init.3 \
curl_multi_perform.3 \
curl_multi_remove_handle.3 \
curl_share_cleanup.3 curl_share_init.3 curl_share_setopt.3 \
libcurl.3 \
libcurl-easy.3 \
libcurl-multi.3 \
libcurl-share.3 \
libcurl-errors.3
HTMLPAGES = \
@@ -63,7 +66,6 @@ HTMLPAGES = \
curl_mprintf.html \
curl_global_init.html \
curl_global_cleanup.html \
libcurl.html \
curl_multi_add_handle.html \
curl_multi_cleanup.html \
curl_multi_fdset.html \
@@ -71,9 +73,12 @@ HTMLPAGES = \
curl_multi_init.html \
curl_multi_perform.html \
curl_multi_remove_handle.html \
curl_share_cleanup.html curl_share_init.html curl_share_setopt.html \
libcurl.html \
libcurl-multi.html \
libcurl-errors.html \
index.html
libcurl-easy.html \
libcurl-share.html \
libcurl-errors.html
PDFPAGES = \
curl_easy_cleanup.pdf \
@@ -99,7 +104,6 @@ PDFPAGES = \
curl_mprintf.pdf \
curl_global_init.pdf \
curl_global_cleanup.pdf \
libcurl.pdf \
curl_multi_add_handle.pdf \
curl_multi_cleanup.pdf \
curl_multi_fdset.pdf \
@@ -107,26 +111,31 @@ PDFPAGES = \
curl_multi_init.pdf \
curl_multi_perform.pdf \
curl_multi_remove_handle.pdf \
curl_share_cleanup.pdf curl_share_init.pdf curl_share_setopt.pdf \
libcurl.pdf \
libcurl-multi.pdf \
libcurl-easy.pdf \
libcurl-share.pdf \
libcurl-errors.pdf
EXTRA_DIST = $(man_MANS) $(HTMLPAGES) $(PDFPAGES)
CLEANFILES = $(HTMLPAGES) $(PDFPAGES)
EXTRA_DIST = $(man_MANS) $(HTMLPAGES) index.html $(PDFPAGES)
MAN2HTML= gnroff -man $< | man2html >$@
SUFFIXES = .1 .3 .html
SUFFIXES = .3 .html
html: $(HTMLPAGES)
.3.html:
$(MAN2HTML)
.1.html:
$(MAN2HTML)
pdf: $(PDFPAGES)
pdf:
for file in $(man_MANS); do \
foo=`echo $$file | sed -e 's/\.[0-9]$$//g'`; \
groff -Tps -man $$file >$$foo.ps; \
ps2pdf $$foo.ps $$foo.pdf; \
done
.3.pdf:
@(foo=`echo $@ | sed -e 's/\.[0-9]$$//g'`; \
groff -Tps -man $< >$$foo.ps; \
ps2pdf $$foo.ps $@; \
rm $$foo.ps; \
echo "converted $< to $@")

View File

@@ -2,7 +2,7 @@
.\" nroff -man [file]
.\" $Id$
.\"
.TH curl_easy_init 3 "25 Apr 2002" "libcurl 7.9.7" "libcurl Manual"
.TH curl_easy_getinfo 3 "25 Apr 2002" "libcurl 7.9.7" "libcurl Manual"
.SH NAME
curl_easy_getinfo - Extract information from a curl session (added in 7.4)
.SH SYNOPSIS
@@ -78,7 +78,8 @@ uploaded.
.TP
.B CURLINFO_SIZE_DOWNLOAD
Pass a pointer to a double to receive the total amount of bytes that were
downloaded.
downloaded. The amount is only for the latest transfer and will be reset again
for each new transfer.
.TP
.B CURLINFO_SPEED_DOWNLOAD
Pass a pointer to a double to receive the average download speed that curl

View File

@@ -1,7 +1,7 @@
.\" nroff -man [file]
.\" $Id$
.\"
.TH curl_easy_setopt 3 "3 Dec 2002" "libcurl 7.10.3" "libcurl Manual"
.TH curl_easy_setopt 3 "8 Aug 2003" "libcurl 7.10.7" "libcurl Manual"
.SH NAME
curl_easy_setopt - set options for a curl easy handle
.SH SYNOPSIS
@@ -75,7 +75,7 @@ of bytes actually taken care of. If that amount differs from the amount passed
to your function, it'll signal an error to the library and it will abort the
transfer and return \fICURLE_WRITE_ERROR\fP.
Set the \fIstream\fP argument with the \fBCURLOPT_FILE\fP option.
Set the \fIstream\fP argument with the \fBCURLOPT_WRITEDATA\fP option.
\fBNOTE:\fP you will be passed as much data as possible in all invokes, but
you cannot possibly make any assumptions. It may be one byte, it may be
@@ -143,12 +143,11 @@ operation and an error (CURLE_BAD_PASSWORD_ENTERED) will be returned.
is a zero-terminated string that is text that prefixes the input request.
\fIbuffer\fP is a pointer to data where the entered password should be stored
and \fIbuflen\fP is the maximum number of bytes that may be written in the
buffer. (Added in 7.4.2)
buffer.
.TP
.B CURLOPT_PASSWDDATA
Pass a void * to whatever data you want. The passed pointer will be the first
argument sent to the specifed \fICURLOPT_PASSWDFUNCTION\fP function. (Added in
7.4.2)
argument sent to the specifed \fICURLOPT_PASSWDFUNCTION\fP function.
.TP
.B CURLOPT_HEADERFUNCTION
Function pointer that should match the following prototype: \fIsize_t
@@ -161,7 +160,7 @@ multiplied with \fInmemb\fP. The pointer named \fIstream\fP will be the one
you passed to libcurl with the \fICURLOPT_WRITEHEADER\fP option. Return the
number of bytes actually written or return -1 to signal error to the library
(it will cause it to abort the transfer with a \fICURLE_WRITE_ERROR\fP return
code). (Added in 7.7.2)
code).
.TP
.B CURLOPT_WRITEHEADER
Pass a pointer to be used to write the header part of the received data to. If
@@ -248,23 +247,23 @@ default. (Added in 7.10)
Set the parameter to non-zero to get the library to tunnel all operations
through a given HTTP proxy. Note that there is a big difference between using
a proxy and to tunnel through it. If you don't know what this means, you
probably don't want this tunneling option. (Added in 7.3)
probably don't want this tunneling option.
.TP
.B CURLOPT_INTERFACE
Pass a char * as parameter. This set the interface name to use as outgoing
network interface. The name can be an interface name, an IP address or a host
name. (Added in 7.3)
name.
.TP
.B CURLOPT_DNS_CACHE_TIMEOUT
Pass a long, this sets the timeout in seconds. Name resolves will be kept in
memory for this number of seconds. Set to zero (0) to completely disable
caching, or set to -1 to make the cached entries remain forever. By default,
libcurl caches info for 60 seconds. (Added in 7.9.3)
libcurl caches info for 60 seconds.
.TP
.B CURLOPT_DNS_USE_GLOBAL_CACHE
Pass a long. If the value is non-zero, it tells curl to use a global DNS cache
that will survive between easy handle creations and deletions. This is not
thread-safe and this will use a global varible. (Added in 7.9.3)
thread-safe and this will use a global varible.
.TP
.B CURLOPT_BUFFERSIZE
Pass a long specifying your prefered size for the receive buffer in libcurl.
@@ -272,7 +271,7 @@ The main point of this would be that the write callback gets called more often
and with smaller chunks. This is just treated as a request, not an order. You
cannot be guaranteed to actually get the given size. (Added in 7.10)
.PP
.SH NAMES and PASSWORDS OPTIONS
.SH NAMES and PASSWORDS OPTIONS (Authentication)
.TP 0.4i
.B CURLOPT_NETRC
This parameter controls the preference of libcurl between using user names and
@@ -322,23 +321,87 @@ prompt function.
When using HTTP and CURLOPT_FOLLOWLOCATION, libcurl might perform several
requests to possibly different hosts. libcurl will only send this user and
password information to hosts using the initial host name, so if libcurl
follows locations to other hosts it will not send the user and password to
those. This is enforced to prevent accidental information leakage.
password information to hosts using the initial host name (unless
CURLOPT_UNRESTRICTED_AUTH is set), so if libcurl follows locations to other
hosts it will not send the user and password to those. This is enforced to
prevent accidental information leakage.
.TP
.B CURLOPT_PROXYUSERPWD
Pass a char * as parameter, which should be [user name]:[password] to use for
the connection to the HTTP proxy. If the password is left out, you will be
prompted for it. \fICURLOPT_PASSWDFUNCTION\fP can be used to set your own
prompt function.
.TP
.B CURLOPT_HTTPAUTH
Pass a long as parameter, which is set to a bitmask, to tell libcurl what
authentication method(s) you want it to use. The available bits are listed
below. If more than one bit is set, libcurl will first query the site to see
what authentication methods it supports and then pick the best one you allow
it to use. Note that for some methods, this will induce an extra network
round-trip. Set the actual name and password with the \fICURLOPT_USERPWD\fP
option. (Added in 7.10.6)
.RS
.TP 5
.B CURLAUTH_BASIC
HTTP Basic authentication. This is the default choice, and the only method
that is in wide-spread use and supported virtually everywhere. This is sending
the user name and password over the network in plain text, easily captured by
others.
.TP
.B CURLAUTH_DIGEST
HTTP Digest authentication. Digest authentication is defined in RFC2617 and
is a more secure way to do authentication over public networks than the
regular old-fashioned Basic method.
.TP
.B CURLAUTH_GSSNEGOTIATE
HTTP GSS-Negotiate authentication. The GSS-Negotiate method was designed by
Microsoft and is used in their web aplications. It is primarily meant as a
support for Kerberos5 authentication but may be also used along with another
authentication methods. For more information see IETF draft
draft-brezak-spnego-http-04.txt.
.TP
.B CURLAUTH_NTLM
HTTP NTLM authentication. A proprietary protocol invented and used by
Microsoft. It uses a challenge-response and hash concept similar to Digest to
prevent the password from being evesdropped.
.TP
.B CURLAUTH_ANY
This is a convenience macro that sets all bits and thus makes libcurl pick any
it finds suitable. libcurl will automaticly select the one it finds most
secure.
.TP
.B CURLAUTH_ANYSAFE
This is a convenience macro that sets all bits except Basic and thus makes
libcurl pick any it finds suitable. libcurl will automaticly select the one it
finds most secure.
.RE
.TP
.B CURLOPT_PROXYAUTH
Pass a long as parameter, which is set to a bitmask, to tell libcurl what
authentication method(s) you want it to use for your proxy authentication. If
more than one bit is set, libcurl will first query the site to see what
authentication methods it supports and then pick the best one you allow it to
use. Note that for some methods, this will induce an extra network
round-trip. Set the actual name and password with the
\fICURLOPT_PROXYUSERPWD\fP option. The bitmask can be constructed by or'ing
together the bits listed above for the \fICURLOPT_HTTPAUTH\fP option. As of
this writing, only Basic and NTLM work. (Added in 7.10.7)
.PP
.SH HTTP OPTIONS
.TP 0.4i
.B CURLOPT_ENCODING
Two encodings are supported \fIdentity\fP, which does nothing, and
\fIdeflate\fP to request the server to compress its reponse using the
zlib algorithm. This is not an order, the server may or may not do it.
See the special file lib/README.encoding for details.
Sets the contents of the Accept-Encoding: header sent in an HTTP
request, and enables decoding of a response when a Content-Encoding:
header is received. Three encodings are supported: \fIidentity\fP,
which does nothing, \fIdeflate\fP which requests the server to
compress its response using the zlib algorithm, and \fIgzip\fP which
requests the gzip algorithm. If a zero-length string is set, then an
Accept-Encoding: header containing all supported encodings is sent.
This is a request, not an order; the server may or may not do it. This
option must be set (to any non-NULL value) or else any unsolicited
encoding done by the server is ignored. See the special file
lib/README.encoding for details.
.TP
.B CURLOPT_FOLLOWLOCATION
A non-zero parameter tells the library to follow any Location: header that the
@@ -358,7 +421,7 @@ that this is meaningful only when setting \fICURLOPT_FOLLOWLOCATION\fP.
Pass a long. The set number will be the redirection limit. If that many
redirections have been followed, the next redirect will cause an error
(\fICURLE_TOO_MANY_REDIRECTS\fP). This option only makes sense if the
\fICURLOPT_FOLLOWLOCATION\fP is used at the same time. (Added in 7.5)
\fICURLOPT_FOLLOWLOCATION\fP is used at the same time.
.TP
.B CURLOPT_PUT
A non-zero parameter tells the library to use HTTP PUT to transfer data. The
@@ -390,8 +453,7 @@ the \fICURLOPT_HTTPPOST\fP option.
If you want to post data to the server without letting libcurl do a strlen()
to measure the data size, this option must be used. When this option is used
you can post fully binary data, which otherwise is likely to fail. If this
size is set to zero, the library will use strlen() to get the size. (Added in
libcurl 7.2)
size is set to zero, the library will use strlen() to get the size.
.TP
.B CURLOPT_HTTPPOST
Tells libcurl you want a multipart/formdata HTTP POST to be made and you
@@ -429,6 +491,10 @@ curl adds CRLF after each header item. Failure to comply with this will
result in strange bugs because the server will most likely ignore part
of the headers you specified.
The first line in a request (usually containing a GET or POST) is not a header
and cannot be replaced using this option. Only the lines following the
request-line are headers.
\fBNOTE:\fPThe most commonly replaced headers have "shortcuts" in the options
CURLOPT_COOKIE, CURLOPT_USERAGENT and CURLOPT_REFERER.
.TP
@@ -477,7 +543,14 @@ internally known cookies to the specified file when \fIcurl_easy_cleanup(3)\fP
is called. If no cookies are known, no file will be created. Specify "-" to
instead have the cookies written to stdout. Using this option also enables
cookies for this session, so if you for example follow a location it will make
matching cookies get sent accordingly. (Added in 7.9)
matching cookies get sent accordingly.
.B NOTE
If the cookie jar file can't be created or written to (when the
curl_easy_cleanup() is called), libcurl will not and cannot report an error
for this. Using CURLOPT_VERBOSE or CURLOPT_DEBUGFUNCTION will get a warning to
display, but that is the only visible feedback you get about this possibly
lethal situation.
.TP
.B CURLOPT_TIMECONDITION
Pass a long as parameter. This defines how the CURLOPT_TIMEVALUE time value is
@@ -492,7 +565,7 @@ CURLOPT_TIMECONDITION.
.B CURLOPT_HTTPGET
Pass a long. If the long is non-zero, this forces the HTTP request to get back
to GET. Only really usable if POST, PUT or a custom request have been used
previously using the same curl handle. (Added in 7.8.1)
previously using the same curl handle.
.TP
.B CURLOPT_HTTP_VERSION
Pass a long, set to one of the values described below. They force libcurl to
@@ -560,11 +633,23 @@ and symbolic links.
A non-zero parameter tells the library to append to the remote file instead of
overwrite it. This is only useful when uploading to a ftp site.
.TP
.B CURLOPT_FTP_USE_EPRT
Pass a long. If the value is non-zero, it tells curl to use the EPRT (and
LPRT) command when doing active FTP downloads (which is enabled by
CURLOPT_FTPPORT). Using EPRT means that it will first attempt to use EPRT and
then LPRT before using PORT, but if you pass FALSE (zero) to this option, it
will not try using EPRT or LPRT, only plain PORT. (Added in 7.10.5)
.TP
.B CURLOPT_FTP_USE_EPSV
Pass a long. If the value is non-zero, it tells curl to use the EPSV command
when doing passive FTP downloads (which it always does by default). Using EPSV
means that it will first attempt to use EPSV before using PASV, but if you
pass FALSE (zero) to this option, it will not try using EPSV, only plain PASV.
.TP
.B CURLOPT_FTP_CREATE_MISSING_DIRS
Pass a long. If the value is non-zero, curl will attempt to create any remote
directory that it fails to CWD into. CWD is the command that changes working
directory. (Added in 7.10.7)
.PP
.SH PROTOCOL OPTIONS
.TP 0.4i
@@ -593,17 +678,25 @@ want the transfer to start from.
.TP
.B CURLOPT_CUSTOMREQUEST
Pass a pointer to a zero terminated string as parameter. It will be user
instead of GET or HEAD when doing the HTTP request. This is useful for doing
DELETE or other more or less obscure HTTP requests. Don't do this at will,
make sure your server supports the command first.
instead of GET or HEAD when doing a HTTP request, or instead of LIST or NLST
when doing an ftp directory listing. This is useful for doing DELETE or other
more or less obscure HTTP requests. Don't do this at will, make sure your
server supports the command first.
NOTE: many people have wrongly used this option to replace the entire request
with their own, including multiple headers and POST contents. While that might
work in many cases, it will cause libcurl to send invalid requests and it
could possibly confuse the remote server badly. Use \fICURLOPT_POST\fP and
\fICURLOPT_POSTFIELDS\fP to set POST data. Use \fICURLOPT_HTTPHEADER\fP to
replace or extend the set of headers sent by libcurl. Use
\fICURLOPT_HTTP_VERSION\fP to change HTTP version.
.TP
.B CURLOPT_FILETIME
Pass a long. If it is a non-zero value, libcurl will attempt to get the
modification date of the remote document in this operation. This requires that
the remote server sends the time or replies to a time querying command. The
\fIcurl_easy_getinfo(3)\fP function with the \fICURLINFO_FILETIME\fP argument
can be used after a transfer to extract the received time (if any). (Added in
7.5)
can be used after a transfer to extract the received time (if any).
.TP
.B CURLOPT_NOBODY
A non-zero parameter tells the library to not include the body-part in the
@@ -654,7 +747,7 @@ open connections to increase.
\fBNOTE:\fP if you already have performed transfers with this curl handle,
setting a smaller MAXCONNECTS than before may cause open connections to get
closed unnecessarily. (Added in 7.7)
closed unnecessarily.
.TP
.B CURLOPT_CLOSEPOLICY
Pass a long. This option sets what policy libcurl should use when the
@@ -665,7 +758,7 @@ the connection that was least recently used, that connection is also least
likely to be capable of re-use. Use \fICURLCLOSEPOLICY_OLDEST\fP to make
libcurl close the oldest connection, the one that was created first among the
ones in the connection cache. The other close policies are not support
yet. (Added in 7.7)
yet.
.TP
.B CURLOPT_FRESH_CONNECT
Pass a long. Set to non-zero to make the next transfer use a new (fresh)
@@ -673,7 +766,7 @@ connection by force. If the connection cache is full before this connection,
one of the existing connections will be closed as according to the selected or
default policy. This option should be used with caution and only if you
understand what it does. Set this to 0 to have libcurl attempt re-using an
existing connection (default behavior). (Added in 7.7)
existing connection (default behavior).
.TP
.B CURLOPT_FORBID_REUSE
Pass a long. Set to non-zero to make the next transfer explicitly close the
@@ -681,7 +774,7 @@ connection when done. Normally, libcurl keep all connections alive when done
with one transfer in case there comes a succeeding one that can re-use them.
This option should be used with caution and only if you understand what it
does. Set to 0 to have libcurl keep the connection open for possibly later
re-use (default behavior). (Added in 7.7)
re-use (default behavior).
.TP
.B CURLOPT_CONNECTTIMEOUT
Pass a long. It should contain the maximum time in seconds that you allow the
@@ -718,12 +811,11 @@ a certificate but you need one to load your private key.
.B CURLOPT_SSLKEY
Pass a pointer to a zero terminated string as parameter. The string should be
the file name of your private key. The default format is "PEM" and can be
changed with \fICURLOPT_SSLKEYTYPE\fP. (Added in 7.9.3)
changed with \fICURLOPT_SSLKEYTYPE\fP.
.TP
.B CURLOPT_SSLKEYTYPE
Pass a pointer to a zero terminated string as parameter. The string should be
the format of your private key. Supported formats are "PEM", "DER" and "ENG".
(Added in 7.9.3)
\fBNOTE:\fPThe format "ENG" enables you to load the private key from a crypto
engine. in this case \fICURLOPT_SSLKEY\fP is used as an identifier passed to
@@ -734,19 +826,18 @@ Pass a pointer to a zero terminated string as parameter. It will be used as
the password required to use the \fICURLOPT_SSLKEY\fP private key. If the
password is not supplied, you will be prompted for
it. \fICURLOPT_PASSWDFUNCTION\fP can be used to set your own prompt function.
(Added in 7.9.3)
.TP
.B CURLOPT_SSL_ENGINE
Pass a pointer to a zero terminated string as parameter. It will be used as
the identifier for the crypto engine you want to use for your private
key. (Added in 7.9.3)
key.
\fBNOTE:\fPIf the crypto device cannot be loaded,
\fICURLE_SSL_ENGINE_NOTFOUND\fP is returned.
.TP
.B CURLOPT_SSL_ENGINEDEFAULT
Sets the actual crypto engine as the default for (asymetric) crypto
operations. (Added in 7.9.3)
operations.
\fBNOTE:\fPIf the crypto device cannot be set,
\fICURLE_SSL_ENGINE_SETFAILED\fP is returned.
@@ -760,15 +851,15 @@ servers make this difficult why you at times may have to use this option.
Pass a long that is set to a zero value to stop curl from verifying the peer's
certificate (7.10 starting setting this option to TRUE by default). Alternate
certificates to verify against can be specified with the CURLOPT_CAINFO option
(Added in 7.4.2) or a certificate directory can be specified with the
CURLOPT_CAPATH option (Added in 7.9.8). As of 7.10, curl installs a default
bundle. CURLOPT_SSL_VERIFYHOST may also need to be set to 1 or 0 if
or a certificate directory can be specified with the CURLOPT_CAPATH option
(Added in 7.9.8). As of 7.10, curl installs a default bundle.
CURLOPT_SSL_VERIFYHOST may also need to be set to 1 or 0 if
CURLOPT_SSL_VERIFYPEER is disabled (it defaults to 2).
.TP
.B CURLOPT_CAINFO
Pass a char * to a zero terminated string naming a file holding one or more
certificates to verify the peer with. This only makes sense when used in
combination with the CURLOPT_SSL_VERIFYPEER option. (Added in 7.4.2)
combination with the CURLOPT_SSL_VERIFYPEER option.
.TP
.B CURLOPT_CAPATH
Pass a char * to a zero terminated string naming a directory holding multiple
@@ -790,8 +881,7 @@ socket. It will be used to seed the random engine for SSL.
.B CURLOPT_SSL_VERIFYHOST
Pass a long. Set if we should verify the Common name from the peer certificate
in the SSL handshake, set 1 to check existence, 2 to ensure that it matches
the provided hostname. This is by default set to 2. (Added in 7.8.1, default
changed in 7.10)
the provided hostname. This is by default set to 2. (default changed in 7.10)
.TP
.B CURLOPT_SSL_CIPHER_LIST
Pass a char *, pointing to a zero terminated string holding the list of
@@ -810,7 +900,7 @@ Pass a char * as parameter. Set the krb4 security level, this also enables
krb4 awareness. This is a string, 'clear', 'safe', 'confidential' or
\&'private'. If the string is set but doesn't match one of these, 'private'
will be used. Set the string to NULL to disable kerberos4. The kerberos
support only works for FTP. (Added in 7.3)
support only works for FTP.
.PP
.SH OTHER OPTIONS
.TP 0.4i
@@ -825,7 +915,3 @@ error occurred as \fI<curl/curl.h>\fP defines. See the \fIlibcurl-errors.3\fP
man page for the full list with descriptions.
.SH "SEE ALSO"
.BR curl_easy_init "(3), " curl_easy_cleanup "(3), "
.SH BUGS
If you find any bugs, or just have questions, subscribe to one of the mailing
lists and post. We won't bite.

View File

@@ -2,16 +2,16 @@
.\" nroff -man [file]
.\" $Id:
.\"
.TH curl_free 3 "24 Sept 2002" "libcurl 7.10" "libcurl Manual"
.TH curl_free 3 "12 Aug 2003" "libcurl 7.10" "libcurl Manual"
.SH NAME
curl_free - reclaim memory that has been obtained through a libcurl call
.SH SYNOPSIS
.B #include <curl/curl.h>
.sp
.BI "void *curl_free( char *" ptr " );"
.BI "void curl_free( char *" ptr " );"
.ad
.SH DESCRIPTION
curl_free reclaims memory that has been obtained through a libcurl call.
Use curl_free() instead of free() to avoid anomalies that can result from differences in memory management between your application and libcurl.
.SH "SEE ALSO"
.I curl_unescape(), curl_free()
.I curl_unescape()

View File

@@ -0,0 +1,19 @@
.\" $Id$
.\"
.TH curl_share_cleanup 3 "8 Aug 2003" "libcurl 7.10.7" "libcurl Manual"
.SH NAME
curl_share_cleanup - Clean up a shared object
.SH SYNOPSIS
.B #include <curl/curl.h>
.sp
.BI "CURLSHcode curl_share_cleanup( );"
.ad
.SH DESCRIPTION
This function deletes a shared object. The share handle cannot be used anymore
when this function has been called.
.SH RETURN VALUE
If this function returns non-zero, the object was not properly deleted and it
still remains!
.SH "SEE ALSO"
.BR curl_share_init "(3), " curl_share_setopt "(3)"

View File

@@ -0,0 +1,21 @@
.\" $Id$
.\"
.TH curl_share_init 3 "8 Aug 2003" "libcurl 7.10.7" "libcurl Manual"
.SH NAME
curl_share_init - Create a shared object
.SH SYNOPSIS
.B #include <curl/curl.h>
.sp
.BI "CURLSH *curl_share_init( );"
.ad
.SH DESCRIPTION
This function returns a CURLSH handle to be used as input to all the other
share-functions, sometimes refered to as a share handle on some places in the
documentation. This init call MUST have a corresponding call to
\fIcurl_share_cleanup\fP when all operations using the share are complete.
.SH RETURN VALUE
If this function returns NULL, something went wrong and you got no share
object to use.
.SH "SEE ALSO"
.BR curl_share_cleanup "(3), " curl_share_setopt "(3)"

View File

@@ -0,0 +1,46 @@
.\" $Id$
.\"
.TH curl_share_setopt 3 "8 Aug 2003" "libcurl 7.10.7" "libcurl Manual"
.SH NAME
curl_share_setopt - Set options for a shared object
.SH SYNOPSIS
.B #include <curl/curl.h>
.sp
CURLSHcode curl_share_setopt(CURLSH *share, CURLSHoption option, parameter);
.ad
.SH DESCRIPTION
Set the \fIoption\fP to \fIparameter\fP for the given \fIshare\fP.
.SH OPTIONS
.TP 0.4i
.B CURLSHOPT_LOCKFUNC
The \fIparameter\fP must be a pointer to a function matching the following
prototype:
void lock_function(CURL *handle, curl_lock_data data, curl_lock_access access,
void *userptr);
\fIdata\fP defines what data libcurl wants to lock, and you must make sure that
only one lock is given at any time for each kind of data.
\fIaccess\fP defines what access type libcurl wants, shared or single.
\fIuserptr\fP is the pointer you set with \fICURLSHOPT_USERDAT\fP.
.TP
.B CURLSHOPT_UNLOCKFUNC
hej
.TP
.B CURLSHOPT_SHARE
hej
.TP
.B CURLSHOPT_UNSHARE
hej
.TP
.B CURLSHOPT_USERDATA
hej
.PP
.SH RETURN VALUE
If this function returns non-zero, something was wrong!
.SH "SEE ALSO"
.BR curl_share_cleanup "(3), " curl_share_init "(3)"

View File

@@ -2,7 +2,7 @@
.\" nroff -man [file]
.\" $Id$
.\"
.TH curl_slist_append 3 "21 Feb 2003" "libcurl 7.10.4" "libcurl Manual"
.TH curl_slist_append 3 "19 Jun 2003" "libcurl 7.10.4" "libcurl Manual"
.SH NAME
curl_slist_append - add a string to an slist
.SH SYNOPSIS
@@ -15,7 +15,8 @@ curl_slist_append - add a string to an slist
curl_slist_append() appends a specified string to a linked list of
strings. The existing \fIlist\fP should be passed as the first argument while
the new list is returned from this function. The specified \fIstring\fP has
been appended when this function returns.
been appended when this function returns. curl_slist_append() copies the
string.
The list should be freed again (after usage) with \fBcurl_slist_free_all()\fP.
.SH RETURN VALUE

View File

@@ -13,15 +13,7 @@ curl_version - returns the libcurl version string
.SH DESCRIPTION
Returns a human readable string with the version number of libcurl and some of
its important components (like OpenSSL version).
Note: this returns the actual running lib's version, you might have installed
a newer lib's include files in your system which may turn your LIBCURL_VERSION
#define value to differ from this result.
.SH RETURN VALUE
A pointer to a zero terminated string.
.SH "SEE ALSO"
The
.I LIBCURL_VERSION
#define in <curl/curl.h>
.SH BUGS
Surely there are some, you tell me!
.BR curl_version_info "(3)"

View File

@@ -2,7 +2,7 @@
.\" nroff -man [file]
.\" $Id$
.\"
.TH curl_version_info 3 "30 Sep 2002" "libcurl 7.10" "libcurl Manual"
.TH curl_version_info 3 "12 Aug 2003" "libcurl 7.10.7" "libcurl Manual"
.SH NAME
curl_version_info - returns run-time libcurl version info
.SH SYNOPSIS
@@ -69,6 +69,21 @@ supports SSL (HTTPS/FTPS)
.TP
.B CURL_VERSION_LIBZ
supports HTTP deflate using libz
.TP
.B CURL_VERSION_NTLM
supports HTTP NTLM (added in 7.10.6)
.TP
.B CURL_VERSION_GSSNEGOTIATE
supports HTTP GSS-Negotiate (added in 7.10.6)
.TP
.B CURL_VERSION_DEBUG
libcurl was built with extra debug capabilities built-in. This is mainly of
interest for libcurl hackers. (added in 7.10.6)
.TP
.B CURL_VERSION_ASYNCHDNS
libcurl was built with support for asynchronous name lookups, which allows
more exact timeouts (even on Windows) and less blocking when using the multi
interface. (added in 7.10.7)
.PP
\fIssl_version\fP is an ascii string for the OpenSSL version used. If libcurl
has no SSL support, this is NULL.
@@ -83,11 +98,8 @@ libcurl has no libz support, this is NULL.
names protocols that libcurl supports (using lowercase letters). The protocol
names are the same as would be used in URLs. The array is terminated by a NULL
entry.
.SH RETURN VALUE
A pointer to a curl_version_info_data struct.
.SH "SEE ALSO"
\fIcurl_version(3)\fP
.SH BUGS
No known bugs.

View File

@@ -12,7 +12,9 @@
<h2>Overviews</h2>
<A HREF="libcurl.html">libcurl</A>
<br><a href="libcurl-easy.html">libcurl-easy</a>
<br><a href="libcurl-multi.html">libcurl-multi</a>
<br><a href="libcurl-share.html">libcurl-share</a>
<br><a href="libcurl-errors.html">libcurl-errors</a>
<br><a href="../libcurl-the-guide">libcurl-the-guide</a> (plain text)
@@ -40,6 +42,9 @@
<br><a href="curl_multi_init.html">curl_multi_init</a>
<br><a href="curl_multi_perform.html">curl_multi_perform</a>
<br><a href="curl_multi_remove_handle.html">curl_multi_remove_handle</a>
<br><a href="curl_share_cleanup.html">curl_share_cleanup</A>
<br><a href="curl_share_init.html">curl_share_init</A>
<br><a href="curl_share_setopt.html">curl_share_setopt</A>
<br><a href="curl_slist_append.html">curl_slist_append</A>
<br><a href="curl_slist_free_all.html">curl_slist_free_all</A>
<br><a href="curl_strequal.html">curl_strequal</A>

View File

@@ -0,0 +1,29 @@
.\" You can view this file with:
.\" nroff -man [file]
.\" $Id$
.\"
.TH libcurl 3 "12 Aug 2003" "libcurl 7.10.7" "libcurl easy interface"
.SH NAME
libcurl-easy \- easy interface overview
.SH DESCRIPTION
When using libcurl's "easy" interface you init your session and get a handle
(often referred to as an "easy handle" in various docs and sources), which you
use as input to the easy interface functions you use. Use
\fIcurl_easy_init()\fP to get the handle.
You continue by setting all the options you want in the upcoming transfer, the
most important among them is the URL itself (you can't transfer anything
without a specified URL as you may have figured out yourself). You might want
to set some callbacks as well that will be called from the library when data
is available etc. \fIcurl_easy_setopt()\fP is used for all this.
When all is setup, you tell libcurl to perform the transfer using
\fIcurl_easy_perform()\fP. It will then do the entire operation and won't
return until it is done (successfully or not).
After the transfer has been made, you can set new options and make another
transfer, or if you're done, cleanup the session by calling
\fIcurl_easy_cleanup()\fP. If you want persistant connections, you don't
cleanup immediately, but instead run ahead and perform other transfers using
the same easy handle.

View File

@@ -2,7 +2,7 @@
.\" nroff -man [file]
.\" $Id$
.\"
.TH libcurl-multi 5 "13 Oct 2001" "libcurl 7.10.1" "libcurl multi interface"
.TH libcurl-multi 3 "13 Oct 2001" "libcurl 7.10.1" "libcurl multi interface"
.SH NAME
libcurl-multi \- how to use the multi interface
.SH DESCRIPTION

View File

@@ -0,0 +1,46 @@
.\" You can view this file with:
.\" nroff -man [file]
.\" $Id$
.\"
.TH libcurl-share 3 "8 Aug 2003" "libcurl 7.10.7" "libcurl share interface"
.SH NAME
libcurl-share \- how to use the share interface
.SH DESCRIPTION
This is an overview on how to use the libcurl share interface in your C
programs. There are specific man pages for each function mentioned in
here.
All functions in the share interface are prefixed with curl_share.
.SH "OBJECTIVES"
The share interface was added to enable sharing of data between curl
\&"handles".
.SH "ONE SET OF DATA - MANY TRANSFERS"
You can have multiple easy handles share data between them. Have them update
and use the \fBsame\fP cookie database or DNS cache! This way, each single
transfer will take advantage from data updates made by the other transfer(s).
.SH "SHARE OBJECT"
You create a shared object with \fIcurl_share_init()\fP. It returns a handle
for a newly created one.
You tell the shared object what data you want it to share by using
\fIcurl_share_setopt()\fP. Currently you can only share DNS and/or COOKIE
data.
Since you can use this share from multiple threads, and libcurl has no
internal thread synchronization, you must provide mutex callbacks if you're
using this multi-threaded. You set lock and unlock functions with
\fIcurl_share_setopt()\fP too.
Then, you make an easy handle to use this share, you set the CURLOPT_SHARE
option with \fIcurl_easy_setopt\fP, and pass in share handle. You can make any
number of easy handles share the same share handle.
To make an easy handle stop using that particular share, you set CURLOPT_SHARE
to NULL for that easy handle. To make a handle stop sharing a particular data,
you can CURLSHOPT_UNSHARE it.
When you're done using the share, make sure that no easy handle is still using
it, and call \fIcurl_share_cleanup()\fP on the handle.
.SH "SEE ALSO"
.BR curl_share_init "(3), " curl_share_setopt "(3), " curl_share_cleanup "(3)"

View File

@@ -7,38 +7,37 @@
libcurl \- client-side URL transfers
.SH DESCRIPTION
This is an overview on how to use libcurl in your C programs. There are
specific man pages for each function mentioned in here. There's also the
libcurl-the-guide document for a complete tutorial to programming with
libcurl.
specific man pages for each function mentioned in here. There are also the
\fIlibcurl-easy\fP man page, the \fIlibcurl-multi\fP man page, the
\fIlibcurl-share\fP man page and the \fIlibcurl-the-guide\fP document for
further reading on how to do programming with libcurl.
There are a dozen custom bindings that bring libcurl access to your favourite
language. Look elsewhere for documentation on those.
There exist more than a dozen custom bindings that bring libcurl access to
your favourite language. Look elsewhere for documentation on those.
All applications that use libcurl should call \fIcurl_global_init()\fP exactly
once before any libcurl function can be used. After all usage of libcurl is
complete, it \fBmust\fP call \fIcurl_global_cleanup()\fP. In between those two
calls, you can use libcurl as described below.
When using libcurl's "easy" interface you init your session and get a handle,
which you use as input to the easy interface functions you use. Use
\fIcurl_easy_init()\fP to get the handle. There is also the so called "multi"
interface, try the \fIlibcurl-multi(3)\fP man page for an overview of that.
To transfer files, you always set up an "easy handle" using
\fIcurl_easy_init()\fP, but when you want the file(s) transfered you have the
option of using the "easy" interface, or the "multi" interface.
You continue by setting all the options you want in the upcoming transfer,
most important among them is the URL itself (you can't transfer anything
without a specified URL as you may have figured out yourself). You might want
to set some callbacks as well that will be called from the library when data
is available etc. \fIcurl_easy_setopt()\fP is there for this.
The easy interface is a synchronous interface with which you call
\fIcurl_easy_perform\fP and let it perform the transfer. When it is completed,
the function return and you can continue. More details are found in the
\fIlibcurl-easy\fP man page.
When all is setup, you tell libcurl to perform the transfer using
\fIcurl_easy_perform()\fP. It will then do the entire operation and won't
return until it is done (successfully or not).
The multi interface on the other hand is an asynchronous interface, that you
call and that performs only a little piece of the tranfer on each invoke. It
is perfect if you want to do things while the transfer is in progress, or
similar. The multi interface allows you to select() on libcurl action, and
even to easily download multiple files simultaneously using a single thread.
After the transfer has been made, you can set new options and make another
transfer, or if you're done, cleanup the session by calling
\fIcurl_easy_cleanup()\fP. If you want persistant connections, you don't
cleanup immediately, but instead run ahead and perform other transfers using
the same handle. See the chapter below for Persistant Connections.
You can have multiple easy handles share certain data, even if they are used
in different threads. This magic is setup using the share interface, as
described in the \fIlibcurl-share\fP man page.
There is also a series of other helpful functions to use. They are:
@@ -107,14 +106,15 @@ Persistent connections means that libcurl can re-use the same connection for
several transfers, if the conditions are right.
libcurl will *always* attempt to use persistent connections. Whenever you use
curl_easy_perform(), libcurl will attempt to use an existing connection to do
the transfer, and if none exists it'll open a new one that will be subject for
re-use on a possible following call to curl_easy_perform().
\fIcurl_easy_perform()\fP or \fIcurl_multi_perform()\fP, libcurl will attempt
to use an existing connection to do the transfer, and if none exists it'll
open a new one that will be subject for re-use on a possible following call to
\fIcurl_easy_perform()\fP or \fIcurl_multi_perform()\fP.
To allow libcurl to take full advantage of persistent connections, you should
do as many of your file transfers as possible using the same curl handle. When
you call curl_easy_cleanup(), all the possibly open connections held by
you call \fIcurl_easy_cleanup()\fP, all the possibly open connections held by
libcurl will be closed and forgotten.
Note that the options set with curl_easy_setopt() will be used in on every
repeat curl_easy_perform() call
Note that the options set with \fIcurl_easy_setopt()\fP will be used in on
every repeated \fIcurl_easy_perform()\fP call.

View File

@@ -13,3 +13,26 @@ of environment. You should include files from here using...
... style and point the compiler's include path to the directory holding the
curl subdirectory. It makes it more likely to survive future modifications.
NOTE FOR LIBCURL HACKERS
All the include files in this tree are written and intended to be installed on
a system that may serve multiple platforms and multiple applications, all
using libcurl (possibly even different libcurl installations using different
versions). Therefore, all header files in here must obey these rules:
* They cannot depend on or use configure-generated results from libcurl's or
curl's directories. Other applications may not run configure as (lib)curl
does, and using platform dependent info here may break other platforms.
* We cannot assume anything else but very basic compiler features being
present. While libcurl requires an ANSI C compiler to build, some of the
earlier ANSI compilers clearly can't deal with some preprocessor operators.
* Newlines must remain unix-style for older compilers' sake.
* Comments must be written in the old-style /* unnested C-fashion */
To figure out how to do good and portable checks for features, operating
systems or specific hardwarare, a very good resource is Bjorn Reese's
collection at http://predef.sf.net/

View File

@@ -23,23 +23,39 @@
* $Id$
***************************************************************************/
/* If you have problems, all libcurl docs and details are found here:
http://curl.haxx.se/libcurl/
*/
/* This is the version number of the libcurl package from which this header
file origins: */
#define LIBCURL_VERSION "7.10.7"
/* This is the numeric version of the libcurl version number, meant for easier
parsing and comparions by programs. The LIBCURL_VERSION_NUM define will
always follow this syntax:
0xXXYYZZ
Where XX, YY and ZZ are the main version, release and patch numbers in
hexadecimal. All three numbers are always represented using two digits. 1.2
would appear as "0x010200" while version 9.11.7 appears as "0x090b07".
This 6-digit hexadecimal number does not show pre-release number, and it is
always a greater number in a more recent release. It makes comparisons with
greater than and less than work.
*/
#define LIBCURL_VERSION_NUM 0x070a07
#include <stdio.h>
/* The include stuff here is mainly for time_t! */
/* The include stuff here below is mainly for time_t! */
#ifdef vms
# include <types.h>
# include <time.h>
#else
# include <sys/types.h>
# ifdef TIME_WITH_SYS_TIME
# include <sys/time.h>
# include <time.h>
# else
# ifdef HAVE_SYS_TIME_H
# include <sys/time.h>
# else
# include <time.h>
# endif
# endif
# include <time.h>
#endif /* defined (vms) */
#ifndef TRUE
@@ -55,39 +71,36 @@
extern "C" {
#endif
/* stupid #define trick to preserve functionality with older code, but
making it use our name space for the future */
/* silly trick to preserve functionality with older code, but making it use
our name space for the future */
#define HttpPost curl_httppost
struct curl_httppost {
struct curl_httppost *next; /* next entry in the list */
char *name; /* pointer to allocated name */
long namelength; /* length of name length */
char *contents; /* pointer to allocated data contents */
long contentslength; /* length of contents field */
/* CMC: Added support for buffer uploads */
char *buffer; /* pointer to allocated buffer contents */
long bufferlength; /* length of buffer field */
char *contenttype; /* Content-Type */
struct curl_httppost *next; /* next entry in the list */
char *name; /* pointer to allocated name */
long namelength; /* length of name length */
char *contents; /* pointer to allocated data contents */
long contentslength; /* length of contents field */
char *buffer; /* pointer to allocated buffer contents */
long bufferlength; /* length of buffer field */
char *contenttype; /* Content-Type */
struct curl_slist* contentheader; /* list of extra headers for this form */
struct curl_httppost *more; /* if one field name has more than one file, this
link should link to following files */
long flags; /* as defined below */
#define HTTPPOST_FILENAME (1<<0) /* specified content is a file name */
#define HTTPPOST_READFILE (1<<1) /* specified content is a file name */
#define HTTPPOST_PTRNAME (1<<2) /* name is only stored pointer
do not free in formfree */
struct curl_httppost *more; /* if one field name has more than one
file, this link should link to following
files */
long flags; /* as defined below */
#define HTTPPOST_FILENAME (1<<0) /* specified content is a file name */
#define HTTPPOST_READFILE (1<<1) /* specified content is a file name */
#define HTTPPOST_PTRNAME (1<<2) /* name is only stored pointer
do not free in formfree */
#define HTTPPOST_PTRCONTENTS (1<<3) /* contents is only stored pointer
do not free in formfree */
#define HTTPPOST_BUFFER (1<<4) /* upload file from buffer */
#define HTTPPOST_PTRBUFFER (1<<5) /* upload file from pointer contents */
/* CMC: Added support for buffer uploads */
#define HTTPPOST_BUFFER (1<<4) /* upload file from buffer */
#define HTTPPOST_PTRBUFFER (1<<5) /* upload file from pointer contents */
char *showfilename; /* The file name to show. If not set, the actual
file name will be used (if this is a file part) */
char *showfilename; /* The file name to show. If not set, the
actual file name will be used (if this
is a file part) */
};
typedef int (*curl_progress_callback)(void *clientp,
@@ -130,7 +143,7 @@ typedef int (*curl_debug_callback)
curl_infotype type, /* what kind of data */
char *data, /* points to the data */
size_t size, /* size of the data pointed to */
void *userp); /* whatever the user please */
void *userptr); /* whatever the user please */
/* All possible error codes from all sorts of curl functions. Future versions
may return other values, stay prepared.
@@ -207,6 +220,11 @@ typedef enum {
CURL_LAST /* never use! */
} CURLcode;
typedef CURLcode (*curl_ssl_ctx_callback)(CURL *curl, /* easy handle */
void *ssl_ctx, /* actually an
OpenSSL SSL_CTX */
void *userptr);
/* Make a spelling correction for the operation timed-out define */
#define CURLE_OPERATION_TIMEDOUT CURLE_OPERATION_TIMEOUTED
#define CURLE_HTTP_NOT_FOUND CURLE_HTTP_RETURNED_ERROR
@@ -217,6 +235,14 @@ typedef enum {
CURLPROXY_SOCKS5 = 5
} curl_proxytype;
#define CURLAUTH_NONE 0 /* nothing */
#define CURLAUTH_BASIC (1<<0) /* Basic (default) */
#define CURLAUTH_DIGEST (1<<1) /* Digest */
#define CURLAUTH_GSSNEGOTIATE (1<<2) /* GSS-Negotiate */
#define CURLAUTH_NTLM (1<<3) /* NTLM */
#define CURLAUTH_ANY ~0 /* all types set */
#define CURLAUTH_ANYSAFE (~CURLAUTH_BASIC)
/* this was the error code 50 in 7.7.3 and a few earlier versions, this
is no longer used by libcurl but is instead #defined here only to not
make programs break */
@@ -266,6 +292,12 @@ typedef enum {
#define CINIT(name,type,number) CURLOPT_/**/name = type + number
#endif
/*
* This macro-mania below setups the CURLOPT_[what] enum, to be used with
* curl_easy_setopt(). The first argument in the CINIT() macro is the [what]
* word.
*/
typedef enum {
CINIT(NOTHING, LONG, 0), /********* the first one is unused ************/
@@ -275,24 +307,19 @@ typedef enum {
/* The full URL to get/put */
CINIT(URL, OBJECTPOINT, 2),
/* Port number to connect to, if other than default. Specify the CONF_PORT
flag in the CURLOPT_FLAGS to activate this */
/* Port number to connect to, if other than default. */
CINIT(PORT, LONG, 3),
/* Name of proxy to use. Specify the CONF_PROXY flag in the CURLOPT_FLAGS to
activate this */
/* Name of proxy to use. */
CINIT(PROXY, OBJECTPOINT, 4),
/* Name and password to use when fetching. Specify the CONF_USERPWD flag in
the CURLOPT_FLAGS to activate this */
/* "name:password" to use when fetching. */
CINIT(USERPWD, OBJECTPOINT, 5),
/* Name and password to use with Proxy. Specify the CONF_PROXYUSERPWD
flag in the CURLOPT_FLAGS to activate this */
/* "name:password" to use with proxy. */
CINIT(PROXYUSERPWD, OBJECTPOINT, 6),
/* Range to get, specified as an ASCII string. Specify the CONF_RANGE flag
in the CURLOPT_FLAGS to activate this */
/* Range to get, specified as an ASCII string. */
CINIT(RANGE, OBJECTPOINT, 7),
/* not used */
@@ -413,7 +440,6 @@ typedef enum {
as described elsewhere. */
CINIT(WRITEINFO, OBJECTPOINT, 40),
/* Previous FLAG bits */
CINIT(VERBOSE, LONG, 41), /* talk a lot */
CINIT(HEADER, LONG, 42), /* throw the header out too */
CINIT(NOPROGRESS, LONG, 43), /* shut off the progress meter */
@@ -624,6 +650,33 @@ typedef enum {
and password to whatever host the server decides. */
CINIT(UNRESTRICTED_AUTH, LONG, 105),
/* Specificly switch on or off the FTP engine's use of the EPRT command ( it
also disables the LPRT attempt). By default, those ones will always be
attempted before the good old traditional PORT command. */
CINIT(FTP_USE_EPRT, LONG, 106),
/* Set this to a bitmask value to enable the particular authentications
methods you like. Use this in combination with CURLOPT_USERPWD.
Note that setting multiple bits may cause extra network round-trips. */
CINIT(HTTPAUTH, LONG, 107),
/* Set the ssl context callback function, currently only for OpenSSL ssl_ctx
in second argument. The function must be matching the
curl_ssl_ctx_callback proto. */
CINIT(SSL_CTX_FUNCTION, FUNCTIONPOINT, 108),
/* Set the userdata for the ssl context callback function's third
argument */
CINIT(SSL_CTX_DATA, OBJECTPOINT, 109),
/* FTP Option that causes missing dirs to be created on the remote server */
CINIT(FTP_CREATE_MISSING_DIRS, LONG, 110),
/* Set this to a bitmask value to enable the particular authentications
methods you like. Use this in combination with CURLOPT_PROXYUSERPWD.
Note that setting multiple bits may cause extra network round-trips. */
CINIT(PROXYAUTH, LONG, 111),
CURLOPT_LASTENTRY /* the last unused */
} CURLoption;
@@ -692,9 +745,9 @@ typedef enum {
#endif
/* These functions are in the libcurl, they're here for portable reasons and
they are used by the 'curl' client. They really should be moved to some kind
of "portability library" since it has nothing to do with file transfers and
/* These functions are in libcurl, they're here for portable reasons and they
are used by the 'curl' client. They really should be moved to some kind of
"portability library" since it has nothing to do with file transfers and
might be usable to other programs...
NOTE: they return TRUE if the strings match *case insensitively*.
@@ -702,9 +755,12 @@ typedef enum {
extern int (curl_strequal)(const char *s1, const char *s2);
extern int (curl_strnequal)(const char *s1, const char *s2, size_t n);
/* DEPRECATED function to build formdata */
#ifdef CURL_OLDSTYLE
/* DEPRECATED function to build formdata. Stop using this, it will cease
to exist. */
int curl_formparse(char *, struct curl_httppost **,
struct curl_httppost **_post);
#endif
/* name is uppercase CURLFORM_<name> */
#ifdef CFINIT
@@ -783,47 +839,122 @@ typedef enum {
CURL_FORMADD_LAST /* last */
} CURLFORMcode;
/*
* NAME curl_formadd()
*
* DESCRIPTION
*
* Pretty advanved function for building multi-part formposts. Each invoke
* adds one part that together construct a full post. Then use
* CURLOPT_HTTPPOST to send it off to libcurl.
*/
CURLFORMcode curl_formadd(struct curl_httppost **httppost,
struct curl_httppost **last_post,
...);
struct curl_httppost **last_post,
...);
/* cleanup a form: */
/*
* NAME curl_formfree()
*
* DESCRIPTION
*
* Free a multipart formpost previously built with curl_formadd().
*/
void curl_formfree(struct curl_httppost *form);
/* Unix and Win32 getenv function call, this returns a malloc()'ed string that
MUST be free()ed after usage is complete. */
/*
* NAME curl_getenv()
*
* DESCRIPTION
*
* Returns a malloc()'ed string that MUST be curl_free()ed after usage is
* complete.
*/
char *curl_getenv(const char *variable);
/* Returns a static ascii string of the libcurl version. */
/*
* NAME curl_version()
*
* DESCRIPTION
*
* Returns a static ascii string of the libcurl version.
*/
char *curl_version(void);
/* Escape and unescape URL encoding in strings. The functions return a new
* allocated string or NULL if an error occurred. */
/*
* NAME curl_escape()
*
* DESCRIPTION
*
* Escapes URL strings (converts all letters consider illegal in URLs to their
* %XX versions). This function returns a new allocated string or NULL if an
* error occurred.
*/
char *curl_escape(const char *string, int length);
/*
* NAME curl_unescape()
*
* DESCRIPTION
*
* Unescapes URL encoding in strings (converts all %XX codes to their 8bit
* versions). This function returns a new allocated string or NULL if an error
* occurred.
*/
char *curl_unescape(const char *string, int length);
/* 20020912 WJM. Provide for a de-allocation in the same translation unit
that did the allocation. Added in libcurl 7.10 */
/*
* NAME curl_free()
*
* DESCRIPTION
*
* Provided for de-allocation in the same translation unit that did the
* allocation. Added in libcurl 7.10
*/
void curl_free(void *p);
/* curl_global_init() should be invoked exactly once for each application that
uses libcurl */
/*
* NAME curl_global_init()
*
* DESCRIPTION
*
* curl_global_init() should be invoked exactly once for each application that
* uses libcurl
*/
CURLcode curl_global_init(long flags);
/* curl_global_cleanup() should be invoked exactly once for each application
that uses libcurl */
/*
* NAME curl_global_cleanup()
*
* DESCRIPTION
*
* curl_global_cleanup() should be invoked exactly once for each application
* that uses libcurl
*/
void curl_global_cleanup(void);
/* This is the version number */
#define LIBCURL_VERSION "7.10.4"
#define LIBCURL_VERSION_NUM 0x070a04
/* linked-list structure for the CURLOPT_QUOTE option (and other) */
struct curl_slist {
char *data;
struct curl_slist *next;
char *data;
struct curl_slist *next;
};
/*
* NAME curl_slist_append()
*
* DESCRIPTION
*
* Appends a string to a linked list. If no list exists, it will be created
* first. Returns the new list, after appending.
*/
struct curl_slist *curl_slist_append(struct curl_slist *, const char *);
/*
* NAME curl_slist_free_all()
*
* DESCRIPTION
*
* free a previously built curl_slist.
*/
void curl_slist_free_all(struct curl_slist *);
/*
@@ -861,22 +992,17 @@ typedef enum {
CURLINFO_REQUEST_SIZE = CURLINFO_LONG + 12,
CURLINFO_SSL_VERIFYRESULT = CURLINFO_LONG + 13,
CURLINFO_FILETIME = CURLINFO_LONG + 14,
CURLINFO_CONTENT_LENGTH_DOWNLOAD = CURLINFO_DOUBLE + 15,
CURLINFO_CONTENT_LENGTH_UPLOAD = CURLINFO_DOUBLE + 16,
CURLINFO_STARTTRANSFER_TIME = CURLINFO_DOUBLE + 17,
CURLINFO_CONTENT_TYPE = CURLINFO_STRING + 18,
CURLINFO_REDIRECT_TIME = CURLINFO_DOUBLE + 19,
CURLINFO_REDIRECT_COUNT = CURLINFO_LONG + 20,
CURLINFO_PRIVATE = CURLINFO_STRING + 21,
CURLINFO_HTTP_CONNECTCODE = CURLINFO_LONG + 22,
/* Fill in new entries below here! */
CURLINFO_CONTENT_TYPE = CURLINFO_STRING + 18,
CURLINFO_REDIRECT_TIME = CURLINFO_DOUBLE + 19,
CURLINFO_REDIRECT_COUNT = CURLINFO_LONG + 20,
CURLINFO_PRIVATE = CURLINFO_STRING + 21,
/* Fill in new entries here! */
CURLINFO_LASTONE = 22
CURLINFO_LASTONE = 23
} CURLINFO;
typedef enum {
@@ -991,8 +1117,19 @@ typedef struct {
#define CURL_VERSION_KERBEROS4 (1<<1)
#define CURL_VERSION_SSL (1<<2)
#define CURL_VERSION_LIBZ (1<<3)
#define CURL_VERSION_NTLM (1<<4)
#define CURL_VERSION_GSSNEGOTIATE (1<<5)
#define CURL_VERSION_DEBUG (1<<6) /* built with debug capabilities */
#define CURL_VERSION_ASYNCHDNS (1<<7)
/* returns a pointer to a static copy of the version info struct */
/*
* NAME curl_version_info()
*
* DESCRIPTION
*
* This function returns a pointer to a static copy of the version info
* struct. See above.
*/
curl_version_info_data *curl_version_info(CURLversion);
#ifdef __cplusplus

View File

@@ -45,10 +45,23 @@
file descriptors simultaneous easily.
*/
#if defined(_WIN32) && !defined(WIN32)
/* Chris Lewis mentioned that he doesn't get WIN32 defined, only _WIN32 so we
make this adjustment to catch this. */
#define WIN32 1
#endif
#if defined(WIN32) && !defined(__GNUC__) || defined(__MINGW32__)
#include <winsock.h>
#else
#ifdef _AIX
/* HP-UX systems version 9, 10 and 11 lack sys/select.h and so does oldish
libc5-based Linux systems. Only include it on system that are known to
require it! */
#include <sys/select.h>
#endif
#include <sys/socket.h>
#include <sys/time.h>
#include <sys/types.h>

View File

@@ -23,9 +23,7 @@
* $Id$
***************************************************************************/
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
size_t fread (void *, size_t, size_t, FILE *);
size_t fwrite (const void *, size_t, size_t, FILE *);

View File

@@ -5,16 +5,23 @@
AUTOMAKE_OPTIONS = foreign nostdinc
EXTRA_DIST = getdate.y Makefile.b32 Makefile.b32.resp Makefile.m32 \
Makefile.vc6 Makefile.riscos libcurl.def curllib.dsp \
Makefile.vc6 Makefile.riscos libcurl.def curllib.dsp \
curllib.dsw config-vms.h config-win32.h config-riscos.h config-mac.h \
config.h.in ca-bundle.crt README.encoding README.memoryleak
config.h.in ca-bundle.crt README.encoding README.memoryleak \
README.ares makefile.dj config.dj
lib_LTLIBRARIES = libcurl.la
if ARES
ARESINC = -I$(top_srcdir)/ares
endif
# we use srcdir/include for the static global include files
# we use builddir/lib for the generated lib/config.h file to get found
# we use srcdir/lib for the lib-private header files
INCLUDES = -I$(top_srcdir)/include -I$(top_builddir)/lib -I$(top_srcdir)/lib
INCLUDES = -I$(top_srcdir)/include -I$(top_builddir)/lib -I$(top_srcdir)/lib $(ARESINC)
LDFLAGS += -L$(top_srcdir)/lib
VERSION=-version-info 2:2:0
@@ -47,15 +54,18 @@ VERSION=-version-info 2:2:0
#
if NO_UNDEFINED
# The -no-undefined flag is CRUCIAL for this to build fine on Cygwin. If we
# find a case in which we need to remove this flag, we should most likely
# write a configure check that detects when this flag is needed and when its
# not.
libcurl_la_LDFLAGS = -no-undefined $(VERSION)
# The -no-undefined flag is CRUCIAL for this to build fine on Cygwin.
UNDEF = -no-undefined
else
libcurl_la_LDFLAGS = $(VERSION)
UNDEF =
endif
if ARES
ARESLIB = -lares -L$(top_builddir)/ares
endif
libcurl_la_LDFLAGS = $(UNDEF) $(VERSION) $(ARESLIB)
libcurl_la_SOURCES = arpa_telnet.h file.c getpass.h netrc.h timeval.c \
base64.c file.h hostip.c progress.c timeval.h base64.h formdata.c \
hostip.h progress.h cookie.c formdata.h http.c sendf.c cookie.h ftp.c \
@@ -66,16 +76,29 @@ getpass.c netrc.c telnet.h getinfo.c getinfo.h transfer.c strequal.c \
strequal.h easy.c security.h security.c krb4.c krb4.h memdebug.c \
memdebug.h inet_ntoa_r.h http_chunks.c http_chunks.h strtok.c strtok.h \
connect.c connect.h llist.c llist.h hash.c hash.h multi.c \
content_encoding.c content_encoding.h share.c share.h
content_encoding.c content_encoding.h share.c share.h http_digest.c \
md5.c md5.h http_digest.h http_negotiate.c http_negotiate.h \
http_ntlm.c http_ntlm.h ca-bundle.h
noinst_HEADERS = setup.h transfer.h
BUILT_SOURCES = $(srcdir)/getdate.c $(srcdir)/ca-bundle.h
# Say $(srcdir), so GNU make does not report an ambiguity with the .y.c rule.
$(srcdir)/getdate.c: getdate.y
cd $(srcdir) && \
$(YACC) $(YFLAGS) getdate.y; \
mv -f y.tab.c getdate.c
$(srcdir)/ca-bundle.h: Makefile.in Makefile
chmod 0644 $@
echo "/* The file is generated automaticly */" > $@
if CABUNDLE
echo '#define CURL_CA_BUNDLE @CURL_CA_BUNDLE@' >> $@
else
echo '#undef CURL_CA_BUNDLE /* unknown */' >> $@
endif
install-data-hook:
@if test -n "@CURL_CA_BUNDLE@"; then \
$(mkinstalldirs) `dirname $(DESTDIR)@CURL_CA_BUNDLE@`; \
@@ -85,4 +108,5 @@ install-data-hook:
# this hook is mainly for non-unix systems to build even if configure
# isn't run
dist-hook:
cp $(srcdir)/ca-bundle.h.in $(distdir)/ca-bundle.h
chmod 0644 $(distdir)/ca-bundle.h
echo "/* ca bundle path set in here*/" > $(distdir)/ca-bundle.h

View File

@@ -26,24 +26,28 @@ ifdef SSL
DLL_LIBS = -L$(OPENSSL_PATH)/out -leay32 -lssl32
endif
ifdef ZLIB
INCLUDES += -I"$(ZLIB_PATH)"
CFLAGS += -DHAVE_LIBZ -DHAVE_ZLIB_H
DLL_LIBS += -L$(ZLIB_PATH) -lz
INCLUDES += -I"$(ZLIB_PATH)"
CFLAGS += -DHAVE_LIBZ -DHAVE_ZLIB_H
DLL_LIBS += -L$(ZLIB_PATH) -lz
endif
COMPILE = $(CC) $(INCLUDES) $(CFLAGS)
libcurl_a_LIBRARIES = libcurl.a
libcurl_a_SOURCES = arpa_telnet.h file.c getpass.h netrc.h timeval.c base64.c \
file.h hostip.c progress.c timeval.h base64.h formdata.c hostip.h progress.h \
cookie.c formdata.h http.c sendf.c cookie.h ftp.c http.h sendf.h url.c dict.c \
ftp.h if2ip.c speedcheck.c url.h dict.h getdate.c if2ip.h speedcheck.h \
urldata.h transfer.c getdate.h ldap.c ssluse.c version.c transfer.h getenv.c \
ldap.h ssluse.h escape.c getenv.h mprintf.c telnet.c escape.h getpass.c netrc.c \
telnet.h getinfo.c strequal.c strequal.h easy.c security.h \
security.c krb4.h krb4.c memdebug.h memdebug.c inet_ntoa_r.h http_chunks.h http_chunks.c \
strtok.c connect.c hash.c llist.c multi.c share.c share.h\
content_encoding.h content_encoding.c
file.h hostip.c progress.c timeval.h base64.h formdata.c hostip.h \
progress.h cookie.c formdata.h http.c sendf.c cookie.h ftp.c \
http.h sendf.h url.c dict.c ftp.h if2ip.c speedcheck.c url.h \
dict.h getdate.c if2ip.h speedcheck.h urldata.h transfer.c getdate.h \
ldap.c ssluse.c version.c transfer.h getenv.c \
ldap.h ssluse.h escape.c getenv.h mprintf.c telnet.c escape.h \
getpass.c netrc.c telnet.h getinfo.c strequal.c strequal.h easy.c \
security.h security.c krb4.h krb4.c memdebug.h memdebug.c \
inet_ntoa_r.h http_chunks.h http_chunks.c \
strtok.c connect.c hash.c llist.c multi.c share.c share.h \
content_encoding.h content_encoding.c http_digest.h http_digest.c \
http_negotiate.c http_negotiate.h http_ntlm.c http_ntlm.h md5.h \
md5.c
libcurl_a_OBJECTS = file.o timeval.o base64.o hostip.o progress.o \
formdata.o cookie.o http.o sendf.o ftp.o url.o dict.o if2ip.o \
@@ -51,7 +55,7 @@ libcurl_a_OBJECTS = file.o timeval.o base64.o hostip.o progress.o \
getenv.o escape.o mprintf.o telnet.o getpass.o netrc.o getinfo.o \
strequal.o easy.o security.o krb4.o memdebug.o http_chunks.o \
strtok.o connect.o hash.o llist.o multi.o share.o \
content_encoding.o
content_encoding.o http_digest.o http_negotiate.o http_ntlm.o md5.o
LIBRARIES = $(libcurl_a_LIBRARIES)
SOURCES = $(libcurl_a_SOURCES)

View File

@@ -33,7 +33,7 @@
LIB_NAME = libcurl
LIB_NAME_DEBUG = libcurld
!IFNDEF OPENSSL_PATH
OPENSSL_PATH = ../../openssl-0.9.6
OPENSSL_PATH = ../../openssl-0.9.7a
!ENDIF
#############################################################
@@ -48,7 +48,8 @@ LNKDLL = link.exe /DLL /def:libcurl.def
LNKLIB = link.exe -lib
LFLAGS = /nologo
LINKLIBS = ws2_32.lib winmm.lib
SSLLIBS = libeay32.lib ssleay32.lib RSAglue.lib
SSLLIBS = libeay32.lib ssleay32.lib
# RSAglue.lib was formerly needed in the SSLLIBS
CFGSET = FALSE
######################
@@ -201,7 +202,11 @@ X_OBJS= \
$(DIROBJ)\hash.obj \
$(DIROBJ)\llist.obj \
$(DIROBJ)\share.obj \
$(DIROBJ)\multi.obj
$(DIROBJ)\multi.obj \
$(DIROBJ)\http_digest.obj \
$(DIROBJ)\http_negotiate.obj \
$(DIROBJ)\http_ntlm.obj \
$(DIROBJ)\md5.obj
all : $(TARGET)

44
lib/README.ares Normal file
View File

@@ -0,0 +1,44 @@
$Id$
_ _ ____ _
___| | | | _ \| |
/ __| | | | |_) | |
| (__| |_| | _ <| |___
\___|\___/|_| \_\_____|
How To Build libcurl to use ares for asynch name resolves
=========================================================
ares:
ftp://athena-dist.mit.edu/pub/ATHENA/ares/ares-1.1.1.tar.gz
http://curl.haxx.se/dev/ares-1.1.1.tar.gz
ares patch:
http://curl.haxx.se/dev/ares2.diff
Mac OS X quirk:
ares 1.1.1 contains too old versions of config.guess and config.sub. Copy
the ones from the curl source tree in to the ares source tree before you
run configure.
Build ares
==========
1. unpack the ares-1.1.1 archive
2. apply patch (if you're on Mac OS X or windows)
3. ./configure
4. make
Build libcurl to use ares
=========================
1. Move the ares source/build tree to subdirectory in the curl root named
'ares'.
2. ./buildconf
3. ./configure --enable-ares
4. make
If the configure script detects IPv6 support), you need to explicitly disable
that (--disable-ipv6) since ares isn't IPv6 compatible (yet).
Please let me know how it builds, runs, works or whatever. I had to do some
fairly big changes in some code parts to get this to work.

View File

@@ -5,15 +5,15 @@
HTTP/1.1 [RFC 2616] specifies that a client may request that a server encode
its response. This is usually used to compress a response using one of a set
of commonly available compression techniques. These schemes are `deflate'
(the zlib algorithm), `gzip' and `compress' [sec 3.5, RFC 2616]. A client
requests that the sever perform an encoding by including an Accept-Encoding
header in the request document. The value of the header should be one of the
recognized tokens `deflate', ... (there's a way to register new
schemes/tokens, see sec 3.5 of the spec). A server MAY honor the client's
encoding request. When a response is encoded, the server includes a
Content-Encoding header in the response. The value of the Content-Encoding
header indicates which scheme was used to encode the data.
of commonly available compression techniques. These schemes are `deflate' (the
zlib algorithm), `gzip' and `compress' [sec 3.5, RFC 2616]. A client requests
that the sever perform an encoding by including an Accept-Encoding header in
the request document. The value of the header should be one of the recognized
tokens `deflate', ... (there's a way to register new schemes/tokens, see sec
3.5 of the spec). A server MAY honor the client's encoding request. When a
response is encoded, the server includes a Content-Encoding header in the
response. The value of the Content-Encoding header indicates which scheme was
used to encode the data.
A client may tell a server that it can understand several different encoding
schemes. In this case the server may choose any one of those and use it to
@@ -24,11 +24,10 @@ information on the Accept-Encoding header.
* Current support for content encoding:
I added support for the 'deflate' content encoding to both libcurl and curl.
Both regular and chunked transfers should work although I've tested only the
former. The library zlib is required for this feature. Places where I
modified the source code are commented and typically include my initials and
the date (e.g., 08/29/02 jhrg).
Support for the 'deflate' and 'gzip' content encoding are supported by
libcurl. Both regular and chunked transfers should work fine. The library
zlib is required for this feature. 'deflate' support was added by James
Gallagher, and support for the 'gzip' encoding was added by Dan Fandrich.
* The libcurl interface:
@@ -39,15 +38,23 @@ To cause libcurl to request a content encoding use:
where <string> is the intended value of the Accept-Encoding header.
Currently, libcurl only understands how to process responses that use the
`deflate' Content-Encoding, so the only value for CURLOPT_ENCODING that will
work (besides "identity," which does nothing) is "deflate." If a response is
encoded using either the `gzip' or `compress' methods, libcurl will return an
error indicating that the response could not be decoded. If <string> is null
or empty no Accept-Encoding header is generated.
"deflate" or "gzip" Content-Encoding, so the only values for CURLOPT_ENCODING
that will work (besides "identity," which does nothing) are "deflate" and
"gzip" If a response is encoded using the "compress" or methods, libcurl will
return an error indicating that the response could not be decoded. If
<string> is NULL no Accept-Encoding header is generated. If <string> is a
zero-length string, then an Accept-Encoding header containing all supported
encodings will be generated.
The CURLOPT_ENCODING must be set to any non-NULL value for content to be
automatically decoded. If it is not set and the server still sends encoded
content (despite not having been asked), the data is returned in its raw form
and the Content-Encoding type is not checked.
* The curl interface:
Use the --compressed option with curl to cause it to ask servers to compress
responses using deflate.
responses using deflate.
James Gallagher <jgallagher@gso.uri.edu>
Dan Fandrich <dan@coneharvesters.com>

View File

@@ -17,7 +17,7 @@ Single-threaded
Build
Rebuild libcurl with -DMALLOCDEBUG (usually, rerunning configure with
Rebuild libcurl with -DCURLDEBUG (usually, rerunning configure with
--enable-debug fixes this). 'make clean' first, then 'make' so that all
files actually are rebuilt properly. It will also make sense to build
libcurl with the debug option (usually -g to the compiler) so that debugging

View File

@@ -42,7 +42,7 @@
#include "base64.h"
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
#include "memdebug.h"
#endif
@@ -220,6 +220,8 @@ int main(int argc, char **argv, char **envp)
#ifdef TEST_DECODE
/* decoding test harness. Read in a base64 string from stdin and write out the
* length returned by Curl_base64_decode, followed by the decoded data itself
*
* gcc -DTEST_DECODE base64.c -o base64 mprintf.o memdebug.o
*/
#include <stdio.h>
@@ -232,13 +234,31 @@ int main(int argc, char **argv, char **envp)
int base64Len;
unsigned char *data;
int dataLen;
int i, j;
base64 = (char *)suck(&base64Len);
data = (unsigned char *)malloc(base64Len * 3/4 + 8);
dataLen = Curl_base64_decode(base64, data);
fprintf(stderr, "%d\n", dataLen);
fwrite(data,1,dataLen,stdout);
for(i=0; i < dataLen; i+=0x10) {
printf("0x%02x: ", i);
for(j=0; j < 0x10; j++)
if((j+i) < dataLen)
printf("%02x ", data[i+j]);
else
printf(" ");
printf(" | ");
for(j=0; j < 0x10; j++)
if((j+i) < dataLen)
printf("%c", isgraph(data[i+j])?data[i+j]:'.');
else
break;
puts("");
}
free(base64); free(data);
return 0;

92
lib/config.dj Normal file
View File

@@ -0,0 +1,92 @@
#ifndef _CURL_CONFIG_DJGPP_H
#define _CURL_CONFIG_DJGPP_H
#define OS "djgpp"
#define PACKAGE "curl"
#define CURL_CA_BUNDLE "/dev/env/CURL_CA_BUNDLE"
#if (DJGPP_MINOR >= 4)
/* #define HAVE_DLOPEN 1 maybe not (DXE3) */
#endif
#if 1 /* use ioctlsocket() via fsext'ed fcntl() */
#define HAVE_O_NONBLOCK 1
#else
#define HAVE_IOCTLSOCKET 1
#endif
#define HAVE_ALARM 1
#define HAVE_ARPA_INET_H 1
#define HAVE_CLOSESOCKET 1
#define HAVE_FCNTL_H 1
#define HAVE_GETHOSTBYADDR 1
#define HAVE_GETHOSTNAME 1
#define HAVE_GETPASS 1
#define HAVE_GETSERVBYNAME 1
#define HAVE_GETTIMEOFDAY 1
#define HAVE_INET_ADDR 1
#define HAVE_INET_NTOA 1
#define HAVE_IO_H 1
#define HAVE_MALLOC_H 1
#define HAVE_MEMORY_H 1
#define HAVE_NETDB_H 1
#define HAVE_NETINET_IN_H 1
#define HAVE_NET_IF_H 1
#define HAVE_PERROR 1
#define HAVE_SELECT 1
#define HAVE_SETJMP_H 1
#define HAVE_SETVBUF 1
#define HAVE_SIGNAL 1
#define HAVE_SIGACTION 1
#define HAVE_SIGSETJMP 1
#define HAVE_SOCKET 1
#define HAVE_STRCASECMP 1
#define HAVE_STRDUP 1
#define HAVE_STRFTIME 1
#define HAVE_STRICMP 1
#define HAVE_STRSTR 1
#define HAVE_SYS_SOCKET_H 1
#define HAVE_SYS_STAT_H 1
#define HAVE_SYS_TYPES_H 1
#define HAVE_TERMIOS_H 1
#define HAVE_TIME_H 1
#define HAVE_UNAME 1
#define HAVE_UNISTD_H 1
#define HAVE_VPRINTF 1
#define RETSIGTYPE void
#define SIZEOF_LONG_DOUBLE 16
#define SIZEOF_LONG_LONG 8
#define STDC_HEADERS 1
#define TIME_WITH_SYS_TIME 1
#define BSD
#define USE_ZLIB
/* #define MALLOCDEBUG */
#ifdef HAVE_OPENSSL_ENGINE_H /* on cmd-line */
#define HAVE_OPENSSL_X509_H 1
#define HAVE_OPENSSL_SSL_H 1
#define HAVE_OPENSSL_RSA_H 1
#define HAVE_OPENSSL_PEM_H 1
#define HAVE_OPENSSL_ERR_H 1
#define HAVE_OPENSSL_CRYPTO_H 1
#define HAVE_LIBSSL 1
#define HAVE_LIBCRYPTO 1
#define OPENSSL_NO_KRB5 1
#endif
#define in_addr_t u_long
#define socklen_t int
#define ssize_t int
#include <stdlib.h>
#include <string.h>
#include <tcp.h> /* Watt-32 API */
#undef word
#endif /* _CURL_CONFIG_DJGPP_H */

View File

@@ -70,6 +70,7 @@
#define EINPROGRESS WSAEINPROGRESS
#define EWOULDBLOCK WSAEWOULDBLOCK
#define EISCONN WSAEISCONN
#define ENOTSOCK WSAENOTSOCK
#endif
#include "urldata.h"
@@ -77,12 +78,11 @@
#include "if2ip.h"
/* The last #include file should be: */
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
#include "memdebug.h"
#endif
static
int ourerrno(void)
int Curl_ourerrno(void)
{
#ifdef WIN32
return (int)GetLastError();
@@ -191,17 +191,8 @@ int waitconnect(int sockfd, /* socket */
static CURLcode bindlocal(struct connectdata *conn,
int sockfd)
{
#if !defined(WIN32)||defined(__CYGWIN32__)
/* We don't generally like checking for OS-versions, we should make this
HAVE_XXXX based, although at the moment I don't have a decent test for
this! */
#ifdef HAVE_INET_NTOA
#ifndef INADDR_NONE
#define INADDR_NONE (in_addr_t) ~0
#endif
struct SessionHandle *data = conn->data;
/*************************************************************
@@ -213,12 +204,20 @@ static CURLcode bindlocal(struct connectdata *conn,
size_t size;
char myhost[256] = "";
in_addr_t in;
int rc;
if(Curl_if2ip(data->set.device, myhost, sizeof(myhost))) {
/* First check if the given name is an IP address */
in=inet_addr(data->set.device);
if((in == CURL_INADDR_NONE) &&
Curl_if2ip(data->set.device, myhost, sizeof(myhost))) {
/*
* We now have the numerical IPv4-style x.y.z.w in the 'myhost' buffer
*/
h = Curl_resolv(data, myhost, 0);
rc = Curl_resolv(conn, myhost, 0, &h);
if(rc == 1)
rc = Curl_wait_for_resolv(conn, &h);
}
else {
if(strlen(data->set.device)>1) {
@@ -226,11 +225,14 @@ static CURLcode bindlocal(struct connectdata *conn,
* This was not an interface, resolve the name as a host name
* or IP number
*/
h = Curl_resolv(data, data->set.device, 0);
if(h) {
rc = Curl_resolv(conn, data->set.device, 0, &h);
if(rc == 1)
rc = Curl_wait_for_resolv(conn, &h);
if(h)
/* we know data->set.device is shorter than the myhost array */
strcpy(myhost, data->set.device);
}
}
}
@@ -247,7 +249,7 @@ static CURLcode bindlocal(struct connectdata *conn,
infof(data, "We bind local end to %s\n", myhost);
in=inet_addr(myhost);
if (INADDR_NONE != in) {
if (CURL_INADDR_NONE != in) {
if ( h ) {
Curl_addrinfo *addr = h->addr;
@@ -336,7 +338,6 @@ static CURLcode bindlocal(struct connectdata *conn,
} /* end of device selection support */
#endif /* end of HAVE_INET_NTOA */
#endif /* end of not WIN32 */
return CURLE_HTTP_PORT_FAILED;
}
@@ -350,7 +351,7 @@ int socketerror(int sockfd)
if( -1 == getsockopt(sockfd, SOL_SOCKET, SO_ERROR,
(void *)&err, &errSize))
err = ourerrno();
err = Curl_ourerrno();
return err;
}
@@ -414,7 +415,7 @@ CURLcode Curl_is_connected(struct connectdata *conn,
return CURLE_COULDNT_CONNECT;
}
else if(1 != rc) {
int error = ourerrno();
int error = Curl_ourerrno();
failf(data, "Failed connect to %s:%d, errno: %d",
conn->hostname, conn->port, error);
return CURLE_COULDNT_CONNECT;
@@ -526,7 +527,7 @@ CURLcode Curl_connecthost(struct connectdata *conn, /* context */
rc = connect(sockfd, ai->ai_addr, ai->ai_addrlen);
if(-1 == rc) {
int error=ourerrno();
int error=Curl_ourerrno();
switch (error) {
case EINPROGRESS:
@@ -645,7 +646,7 @@ CURLcode Curl_connecthost(struct connectdata *conn, /* context */
sizeof(serv_addr));
if(-1 == rc) {
int error=ourerrno();
int error=Curl_ourerrno();
switch (error) {
case EINPROGRESS:

View File

@@ -37,4 +37,6 @@ CURLcode Curl_connecthost(struct connectdata *conn,
Curl_ipconnect **addr, /* the one we used */
bool *connected /* truly connected? */
);
int Curl_ourerrno(void);
#endif

View File

@@ -1,8 +1,8 @@
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
@@ -10,7 +10,7 @@
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
@@ -25,13 +25,26 @@
#ifdef HAVE_LIBZ
#include <stdlib.h>
#include <string.h>
#include "urldata.h"
#include <curl/curl.h>
#include <curl/types.h>
#include "sendf.h"
#define DSIZ 4096 /* buffer size for decompressed data */
#define DSIZ 0x10000 /* buffer size for decompressed data */
#define GZIP_MAGIC_0 0x1f
#define GZIP_MAGIC_1 0x8b
/* gzip flag byte */
#define ASCII_FLAG 0x01 /* bit 0 set: file probably ascii text */
#define HEAD_CRC 0x02 /* bit 1 set: header CRC present */
#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */
#define ORIG_NAME 0x08 /* bit 3 set: original file name present */
#define COMMENT 0x10 /* bit 4 set: file comment present */
#define RESERVED 0xE0 /* bits 5..7: reserved */
static CURLcode
process_zlib_error(struct SessionHandle *data, z_stream *z)
@@ -55,7 +68,7 @@ exit_zlib(z_stream *z, bool *zlib_init, CURLcode result)
}
CURLcode
Curl_unencode_deflate_write(struct SessionHandle *data,
Curl_unencode_deflate_write(struct SessionHandle *data,
struct Curl_transfer_keeper *k,
ssize_t nread)
{
@@ -63,7 +76,7 @@ Curl_unencode_deflate_write(struct SessionHandle *data,
int result; /* Curl_client_write status */
char decomp[DSIZ]; /* Put the decompressed data here. */
z_stream *z = &k->z; /* zlib state structure */
/* Initialize zlib? */
if (!k->zlib_init) {
z->zalloc = (alloc_func)Z_NULL;
@@ -74,7 +87,7 @@ Curl_unencode_deflate_write(struct SessionHandle *data,
k->zlib_init = 1;
}
/* Set the compressed input when this fucntion is called */
/* Set the compressed input when this function is called */
z->next_in = (Bytef *)k->str;
z->avail_in = nread;
@@ -87,11 +100,12 @@ Curl_unencode_deflate_write(struct SessionHandle *data,
status = inflate(z, Z_SYNC_FLUSH);
if (status == Z_OK || status == Z_STREAM_END) {
result = Curl_client_write(data, CLIENTWRITE_BODY, decomp,
DSIZ - z->avail_out);
/* if !CURLE_OK, clean up, return */
if (result) {
return exit_zlib(z, &k->zlib_init, result);
if (DSIZ - z->avail_out) {
result = Curl_client_write(data, CLIENTWRITE_BODY, decomp,
DSIZ - z->avail_out);
/* if !CURLE_OK, clean up, return */
if (result)
return exit_zlib(z, &k->zlib_init, result);
}
/* Done?; clean up, return */
@@ -103,7 +117,233 @@ Curl_unencode_deflate_write(struct SessionHandle *data,
}
/* Done with these bytes, exit */
if (status == Z_OK && z->avail_in == 0 && z->avail_out > 0)
if (status == Z_OK && z->avail_in == 0 && z->avail_out > 0)
return result;
}
else { /* Error; exit loop, handle below */
return exit_zlib(z, &k->zlib_init, process_zlib_error(data, z));
}
}
}
/* Skip over the gzip header */
static enum {
GZIP_OK,
GZIP_BAD,
GZIP_UNDERFLOW
} check_gzip_header(unsigned char const *data, ssize_t len, ssize_t *headerlen)
{
int method, flags;
const ssize_t totallen = len;
/* The shortest header is 10 bytes */
if (len < 10)
return GZIP_UNDERFLOW;
if ((data[0] != GZIP_MAGIC_0) || (data[1] != GZIP_MAGIC_1))
return GZIP_BAD;
method = data[2];
flags = data[3];
if (method != Z_DEFLATED || (flags & RESERVED) != 0) {
/* Can't handle this compression method or unknown flag */
return GZIP_BAD;
}
/* Skip over time, xflags, OS code and all previous bytes */
len -= 10;
data += 10;
if (flags & EXTRA_FIELD) {
ssize_t extra_len;
if (len < 2)
return GZIP_UNDERFLOW;
extra_len = (data[1] << 8) | data[0];
if (len < (extra_len+2))
return GZIP_UNDERFLOW;
len -= (extra_len + 2);
}
if (flags & ORIG_NAME) {
/* Skip over NUL-terminated file name */
while (len && *data) {
--len;
++data;
}
if (!len || *data)
return GZIP_UNDERFLOW;
/* Skip over the NUL */
--len;
++data;
}
if (flags & COMMENT) {
/* Skip over NUL-terminated comment */
while (len && *data) {
--len;
++data;
}
if (!len || *data)
return GZIP_UNDERFLOW;
/* Skip over the NUL */
--len;
++data;
}
if (flags & HEAD_CRC) {
if (len < 2)
return GZIP_UNDERFLOW;
len -= 2;
data += 2;
}
*headerlen = totallen - len;
return GZIP_OK;
}
CURLcode
Curl_unencode_gzip_write(struct SessionHandle *data,
struct Curl_transfer_keeper *k,
ssize_t nread)
{
int status; /* zlib status */
int result; /* Curl_client_write status */
char decomp[DSIZ]; /* Put the decompressed data here. */
z_stream *z = &k->z; /* zlib state structure */
/* Initialize zlib? */
if (!k->zlib_init) {
z->zalloc = (alloc_func)Z_NULL;
z->zfree = (free_func)Z_NULL;
z->opaque = 0; /* of dubious use 08/27/02 jhrg */
if (inflateInit2(z, -MAX_WBITS) != Z_OK)
return process_zlib_error(data, z);
k->zlib_init = 1; /* Initial call state */
}
/* This next mess is to get around the potential case where there isn't
enough data passed in to skip over the gzip header. If that happens,
we malloc a block and copy what we have then wait for the next call. If
there still isn't enough (this is definitely a worst-case scenario), we
make the block bigger, copy the next part in and keep waiting. */
/* Skip over gzip header? */
if (k->zlib_init == 1) {
/* Initial call state */
ssize_t hlen;
switch (check_gzip_header((unsigned char *)k->str, nread, &hlen)) {
case GZIP_OK:
z->next_in = (Bytef *)k->str + hlen;
z->avail_in = nread - hlen;
k->zlib_init = 3; /* Inflating stream state */
break;
case GZIP_UNDERFLOW:
/* We need more data so we can find the end of the gzip header.
It's possible that the memory block we malloc here will never be
freed if the transfer abruptly aborts after this point. Since it's
unlikely that circumstances will be right for this code path to be
followed in the first place, and it's even more unlikely for a transfer
to fail immediately afterwards, it should seldom be a problem. */
z->avail_in = nread;
z->next_in = malloc(z->avail_in);
if (z->next_in == NULL) {
return exit_zlib(z, &k->zlib_init, CURLE_OUT_OF_MEMORY);
}
memcpy(z->next_in, k->str, z->avail_in);
k->zlib_init = 2; /* Need more gzip header data state */
/* We don't have any data to inflate yet */
return CURLE_OK;
case GZIP_BAD:
default:
return exit_zlib(z, &k->zlib_init, process_zlib_error(data, z));
}
}
else if (k->zlib_init == 2) {
/* Need more gzip header data state */
ssize_t hlen;
unsigned char *oldblock = z->next_in;
z->avail_in += nread;
z->next_in = realloc(z->next_in, z->avail_in);
if (z->next_in == NULL) {
free(oldblock);
return exit_zlib(z, &k->zlib_init, CURLE_OUT_OF_MEMORY);
}
/* Append the new block of data to the previous one */
memcpy(z->next_in + z->avail_in - nread, k->str, nread);
switch (check_gzip_header(z->next_in, z->avail_in, &hlen)) {
case GZIP_OK:
/* This is the zlib stream data */
free(z->next_in);
/* Don't point into the malloced block since we just freed it */
z->next_in = (Bytef *)k->str + hlen + nread - z->avail_in;
z->avail_in = z->avail_in - hlen;
k->zlib_init = 3; /* Inflating stream state */
break;
case GZIP_UNDERFLOW:
/* We still don't have any data to inflate! */
return CURLE_OK;
case GZIP_BAD:
default:
free(z->next_in);
return exit_zlib(z, &k->zlib_init, process_zlib_error(data, z));
}
}
else {
/* Inflating stream state */
z->next_in = (Bytef *)k->str;
z->avail_in = nread;
}
if (z->avail_in == 0) {
/* We don't have any data to inflate; wait until next time */
return CURLE_OK;
}
/* because the buffer size is fixed, iteratively decompress
and transfer to the client via client_write. */
for (;;) {
/* (re)set buffer for decompressed output for every iteration */
z->next_out = (Bytef *)&decomp[0];
z->avail_out = DSIZ;
status = inflate(z, Z_SYNC_FLUSH);
if (status == Z_OK || status == Z_STREAM_END) {
if(DSIZ - z->avail_out) {
result = Curl_client_write(data, CLIENTWRITE_BODY, decomp,
DSIZ - z->avail_out);
/* if !CURLE_OK, clean up, return */
if (result)
return exit_zlib(z, &k->zlib_init, result);
}
/* Done?; clean up, return */
/* We should really check the gzip CRC here */
if (status == Z_STREAM_END) {
if (inflateEnd(z) == Z_OK)
return exit_zlib(z, &k->zlib_init, result);
else
return exit_zlib(z, &k->zlib_init, process_zlib_error(data, z));
}
/* Done with these bytes, exit */
if (status == Z_OK && z->avail_in == 0 && z->avail_out > 0)
return result;
}
else { /* Error; exit loop, handle below */

View File

@@ -20,7 +20,22 @@
*
* $Id$
***************************************************************************/
#include "setup.h"
/*
* Comma-separated list all supported Content-Encodings ('identity' is implied)
*/
#ifdef HAVE_LIBZ
#define ALL_CONTENT_ENCODINGS "deflate, gzip"
#else
#define ALL_CONTENT_ENCODINGS "identity"
#endif
CURLcode Curl_unencode_deflate_write(struct SessionHandle *data,
struct Curl_transfer_keeper *k,
ssize_t nread);
CURLcode
Curl_unencode_gzip_write(struct SessionHandle *data,
struct Curl_transfer_keeper *k,
ssize_t nread);

View File

@@ -86,13 +86,15 @@ Example set of cookies:
#include <string.h>
#include <ctype.h>
#include "urldata.h"
#include "cookie.h"
#include "getdate.h"
#include "strequal.h"
#include "strtok.h"
#include "sendf.h"
/* The last #include file should be: */
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
#include "memdebug.h"
#endif
@@ -111,6 +113,17 @@ free_cookiemess(struct Cookie *co)
free(co);
}
static bool tailmatch(const char *little, const char *bigone)
{
unsigned int littlelen = strlen(little);
unsigned int biglen = strlen(bigone);
if(littlelen > biglen)
return FALSE;
return strequal(little, bigone+biglen-littlelen);
}
/****************************************************************************
*
* Curl_cookie_add()
@@ -120,10 +133,18 @@ free_cookiemess(struct Cookie *co)
***************************************************************************/
struct Cookie *
Curl_cookie_add(struct CookieInfo *c,
Curl_cookie_add(struct SessionHandle *data,
/* The 'data' pointer here may be NULL at times, and thus
must only be used very carefully for things that can deal
with data being NULL. Such as infof() and similar */
struct CookieInfo *c,
bool httpheader, /* TRUE if HTTP header-style line */
char *lineptr, /* first character of the line */
char *domain) /* default domain */
char *domain, /* default domain */
char *path) /* full path used when this cookie is set,
used to get default path for the cookie
unless set */
{
struct Cookie *clist;
char what[MAX_COOKIE_LINE];
@@ -134,6 +155,7 @@ Curl_cookie_add(struct CookieInfo *c,
struct Cookie *lastc=NULL;
time_t now = time(NULL);
bool replace_old = FALSE;
bool badcookie = FALSE; /* cookies are good by default. mmmmm yummy */
/* First, alloc and init a new struct for it */
co = (struct Cookie *)malloc(sizeof(struct Cookie));
@@ -186,8 +208,73 @@ Curl_cookie_add(struct CookieInfo *c,
co->path=strdup(whatptr);
}
else if(strequal("domain", name)) {
co->domain=strdup(whatptr);
co->field1= (whatptr[0]=='.')?2:1;
/* note that this name may or may not have a preceeding dot, but
we don't care about that, we treat the names the same anyway */
char *ptr=whatptr;
int dotcount=1;
unsigned int i;
static const char *seventhree[]= {
"com", "edu", "net", "org", "gov", "mil", "int"
};
/* Count the dots, we need to make sure that there are THREE dots
in the normal domains, or TWO in the seventhree-domains. */
if('.' == whatptr[0])
/* don't count the initial dot, assume it */
ptr++;
do {
ptr = strchr(ptr, '.');
if(ptr) {
ptr++;
dotcount++;
}
} while(ptr);
for(i=0;
i<sizeof(seventhree)/sizeof(seventhree[0]); i++) {
if(tailmatch(seventhree[i], whatptr)) {
dotcount++; /* we allow one dot less for these */
break;
}
}
/* The original Netscape cookie spec defined that this domain name
MUST have three dots (or two if one of the seven holy TLDs),
but it seems that these kinds of cookies are in use "out there"
so we cannot be that strict. I've therefore lowered the check
to not allow less than two dots. */
if(dotcount < 2) {
/* Received and skipped a cookie with a domain using too few
dots. */
badcookie=TRUE; /* mark this as a bad cookie */
infof(data, "skipped cookie with illegal dotcount domain: %s",
whatptr);
}
else {
/* Now, we make sure that our host is within the given domain,
or the given domain is not valid and thus cannot be set. */
if(!domain || tailmatch(whatptr, domain)) {
char *ptr=whatptr;
if(ptr[0] == '.')
ptr++;
co->domain=strdup(ptr); /* dont prefix with dots internally */
co->tailmatch=TRUE; /* we always do that if the domain name was
given */
}
else {
/* we did not get a tailmatch and then the attempted set domain
is not a domain to which the current host belongs. Mark as
bad. */
badcookie=TRUE;
infof(data, "skipped cookie with bad tailmatch domain: %s",
whatptr);
}
}
}
else if(strequal("version", name)) {
co->version=strdup(whatptr);
@@ -249,8 +336,11 @@ Curl_cookie_add(struct CookieInfo *c,
semiptr=strchr(ptr, '\0');
} while(semiptr);
if(NULL == co->name) {
/* we didn't get a cookie name, this is an illegal line, bail out */
if(badcookie || (NULL == co->name)) {
/* we didn't get a cookie name or a bad one,
this is an illegal line, bail out */
if(co->expirestr)
free(co->expirestr);
if(co->domain)
free(co->domain);
if(co->path)
@@ -264,8 +354,20 @@ Curl_cookie_add(struct CookieInfo *c,
}
if(NULL == co->domain)
/* no domain given in the header line, set the default now */
/* no domain was given in the header line, set the default now */
co->domain=domain?strdup(domain):NULL;
if((NULL == co->path) && path) {
/* no path was given in the header line, set the default now */
char *endslash = strrchr(path, '/');
if(endslash) {
int pathlen = endslash-path+1; /* include the ending slash */
co->path=malloc(pathlen+1); /* one extra for the zero byte */
if(co->path) {
memcpy(co->path, path, pathlen);
co->path[pathlen]=0; /* zero terminate */
}
}
}
}
else {
/* This line is NOT a HTTP header style line, we do offer support for
@@ -297,9 +399,12 @@ Curl_cookie_add(struct CookieInfo *c,
/* Now loop through the fields and init the struct we already have
allocated */
for(ptr=firstptr, fields=0; ptr; ptr=strtok_r(NULL, "\t", &tok_buf), fields++) {
for(ptr=firstptr, fields=0; ptr;
ptr=strtok_r(NULL, "\t", &tok_buf), fields++) {
switch(fields) {
case 0:
if(ptr[0]=='.') /* skip preceeding dots */
ptr++;
co->domain = strdup(ptr);
break;
case 1:
@@ -312,10 +417,8 @@ Curl_cookie_add(struct CookieInfo *c,
As far as I can see, it is set to true when the cookie says
.domain.com and to false when the domain is complete www.domain.com
We don't currently take advantage of this knowledge.
*/
co->field1=strequal(ptr, "TRUE")+1; /* store information */
co->tailmatch=strequal(ptr, "TRUE"); /* store information */
break;
case 2:
/* It turns out, that sometimes the file format allows the path
@@ -345,13 +448,16 @@ Curl_cookie_add(struct CookieInfo *c,
}
}
if(7 != fields) {
if(6 == fields) {
/* we got a cookie with blank contents, fix it */
co->value = strdup("");
}
else if(7 != fields) {
/* we did not find the sufficient number of fields to recognize this
as a valid line, abort and go home */
free_cookiemess(co);
return NULL;
}
}
if(!c->running && /* read from a file */
@@ -374,13 +480,8 @@ Curl_cookie_add(struct CookieInfo *c,
/* the names are identical */
if(clist->domain && co->domain) {
if(strequal(clist->domain, co->domain) ||
(clist->domain[0]=='.' &&
strequal(&(clist->domain[1]), co->domain)) ||
(co->domain[0]=='.' &&
strequal(clist->domain, &(co->domain[1]))) )
/* The domains are identical, or at least identical if you skip the
preceeding dot */
if(strequal(clist->domain, co->domain))
/* The domains are identical */
replace_old=TRUE;
}
else if(!clist->domain && !co->domain)
@@ -461,6 +562,12 @@ Curl_cookie_add(struct CookieInfo *c,
clist = clist->next;
}
if(c->running)
/* Only show this when NOT reading the cookies from a file */
infof(data, "%s cookie %s=\"%s\" for domain %s, path %s, expire %d\n",
replace_old?"Replaced":"Added", co->name, co->value,
co->domain, co->path, co->expires);
if(!replace_old) {
/* then make the last item point on this new one */
if(lastc)
@@ -470,7 +577,6 @@ Curl_cookie_add(struct CookieInfo *c,
}
c->numcookies++; /* one more cookie in the jar */
return co;
}
@@ -484,7 +590,8 @@ Curl_cookie_add(struct CookieInfo *c,
* If 'newsession' is TRUE, discard all "session cookies" on read from file.
*
****************************************************************************/
struct CookieInfo *Curl_cookie_init(char *file,
struct CookieInfo *Curl_cookie_init(struct SessionHandle *data,
char *file,
struct CookieInfo *inc,
bool newsession)
{
@@ -532,7 +639,7 @@ struct CookieInfo *Curl_cookie_init(char *file,
while(*lineptr && isspace((int)*lineptr))
lineptr++;
Curl_cookie_add(c, headerline, lineptr, NULL);
Curl_cookie_add(data, c, headerline, lineptr, NULL, NULL);
}
if(fromfile)
fclose(fp);
@@ -561,9 +668,6 @@ struct Cookie *Curl_cookie_getlist(struct CookieInfo *c,
struct Cookie *newco;
struct Cookie *co;
time_t now = time(NULL);
int hostlen=strlen(host);
int domlen;
struct Cookie *mainco=NULL;
if(!c || !c->cookies)
@@ -572,43 +676,42 @@ struct Cookie *Curl_cookie_getlist(struct CookieInfo *c,
co = c->cookies;
while(co) {
/* only process this cookie if it is not expired or had no expire
date AND that if the cookie requires we're secure we must only
continue if we are! */
/* only process this cookie if it is not expired or had no expire
date AND that if the cookie requires we're secure we must only
continue if we are! */
if( (co->expires<=0 || (co->expires> now)) &&
(co->secure?secure:TRUE) ) {
/* now check if the domain is correct */
if(!co->domain ||
(co->tailmatch && tailmatch(co->domain, host)) ||
(!co->tailmatch && strequal(host, co->domain)) ) {
/* the right part of the host matches the domain stuff in the
cookie data */
/* now check the left part of the path with the cookies path
requirement */
if(!co->path ||
checkprefix(co->path, path) ) {
/* now check if the domain is correct */
domlen=co->domain?strlen(co->domain):0;
if(!co->domain ||
((domlen<=hostlen) &&
strequal(host+(hostlen-domlen), co->domain)) ) {
/* the right part of the host matches the domain stuff in the
cookie data */
/* and now, we know this is a match and we should create an
entry for the return-linked-list */
newco = (struct Cookie *)malloc(sizeof(struct Cookie));
if(newco) {
/* first, copy the whole source cookie: */
memcpy(newco, co, sizeof(struct Cookie));
/* now check the left part of the path with the cookies path
requirement */
if(!co->path ||
checkprefix(co->path, path) ) {
/* and now, we know this is a match and we should create an
entry for the return-linked-list */
newco = (struct Cookie *)malloc(sizeof(struct Cookie));
if(newco) {
/* first, copy the whole source cookie: */
memcpy(newco, co, sizeof(struct Cookie));
/* then modify our next */
newco->next = mainco;
/* point the main to us */
mainco = newco;
}
}
}
}
co = co->next;
/* then modify our next */
newco->next = mainco;
/* point the main to us */
mainco = newco;
}
}
}
}
co = co->next;
}
return mainco; /* return the new list */
@@ -716,15 +819,19 @@ int Curl_cookie_output(struct CookieInfo *c, char *dumphere)
while(co) {
fprintf(out,
"%s\t" /* domain */
"%s\t" /* field1 */
"%s%s\t" /* domain */
"%s\t" /* tailmatch */
"%s\t" /* path */
"%s\t" /* secure */
"%u\t" /* expires */
"%s\t" /* name */
"%s\n", /* value */
/* Make sure all domains are prefixed with a dot if they allow
tailmatching. This is Mozilla-style. */
(co->tailmatch && co->domain && co->domain[0] != '.')? ".":"",
co->domain?co->domain:"unknown",
co->field1==2?"TRUE":"FALSE",
co->tailmatch?"TRUE":"FALSE",
co->path?co->path:"/",
co->secure?"TRUE":"FALSE",
(unsigned int)co->expires,
@@ -741,31 +848,4 @@ int Curl_cookie_output(struct CookieInfo *c, char *dumphere)
return 0;
}
#ifdef CURL_COOKIE_DEBUG
/*
* On my Solaris box, this command line builds this test program:
*
* gcc -g -o cooktest -DCURL_COOKIE_DEBUG -DHAVE_CONFIG_H -I.. -I../include cookie.c strequal.o getdate.o memdebug.o mprintf.o strtok.o -lnsl -lsocket
*
*/
int main(int argc, char **argv)
{
struct CookieInfo *c=NULL;
if(argc>1) {
c = Curl_cookie_init(argv[1], c);
Curl_cookie_add(c, TRUE, "PERSONALIZE=none;expires=Monday, 13-Jun-1988 03:04:55 GMT; domain=.fidelity.com; path=/ftgw; secure");
Curl_cookie_add(c, TRUE, "foobar=yes; domain=.haxx.se; path=/looser;");
c = Curl_cookie_init(argv[1], c);
Curl_cookie_output(c);
Curl_cookie_cleanup(c);
return 0;
}
return 1;
}
#endif
#endif /* CURL_DISABLE_HTTP */

View File

@@ -40,8 +40,7 @@ struct Cookie {
char *domain; /* domain = <this> */
long expires; /* expires = <this> */
char *expirestr; /* the plain text version */
char field1; /* read from a cookie file, 1 => FALSE, 2=> TRUE */
bool tailmatch; /* weather we do tail-matchning of the domain name */
/* RFC 2109 keywords. Version=1 means 2109-compliant cookie sending */
char *version; /* Version = <value> */
@@ -69,14 +68,18 @@ struct CookieInfo {
#define MAX_NAME 256
#define MAX_NAME_TXT "255"
struct SessionHandle;
/*
* Add a cookie to the internal list of cookies. The domain argument is only
* used if the header boolean is TRUE.
* Add a cookie to the internal list of cookies. The domain and path arguments
* are only used if the header boolean is TRUE.
*/
struct Cookie *Curl_cookie_add(struct CookieInfo *, bool header, char *line,
char *domain);
struct CookieInfo *Curl_cookie_init(char *, struct CookieInfo *, bool);
struct Cookie *Curl_cookie_add(struct SessionHandle *data,
struct CookieInfo *, bool header, char *line,
char *domain, char *path);
struct CookieInfo *Curl_cookie_init(struct SessionHandle *data,
char *, struct CookieInfo *, bool);
struct Cookie *Curl_cookie_getlist(struct CookieInfo *, char *, char *, bool);
void Curl_cookie_freelist(struct Cookie *);
void Curl_cookie_cleanup(struct CookieInfo *);

View File

@@ -243,6 +243,10 @@ SOURCE=.\url.c
# End Source File
# Begin Source File
SOURCE=.\share.c
# End Source File
# Begin Source File
SOURCE=.\version.c
# End Source File
# End Group
@@ -385,6 +389,22 @@ SOURCE=.\url.h
SOURCE=.\urldata.h
# End Source File
# Begin Source File
SOURCE=.\http_digest.c
# End Source File
# Begin Source File
SOURCE=.\md5.c
# End Source File
# Begin Source File
SOURCE=.\http_digest.h
# End Source File
# Begin Source File
SOURCE=.\md5.h
# End Source File
# End Group
# Begin Group "Resource Files"

View File

@@ -44,7 +44,6 @@
#endif
#include <netinet/in.h>
#include <sys/time.h>
#include <sys/resource.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif

View File

@@ -46,7 +46,6 @@
#endif
#include <netinet/in.h>
#include <sys/time.h>
#include <sys/resource.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
@@ -77,6 +76,7 @@
#include "url.h"
#include "getinfo.h"
#include "hostip.h"
#include "share.h"
#define _MPRINTF_REPLACE /* use our functions only */
#include <curl/mprintf.h>
@@ -200,6 +200,7 @@ CURLcode curl_easy_setopt(CURL *curl, CURLoption tag, ...)
long param_long = 0;
void *param_obj = NULL;
struct SessionHandle *data = curl;
CURLcode ret=CURLE_FAILED_INIT;
va_start(arg, tag);
@@ -213,35 +214,45 @@ CURLcode curl_easy_setopt(CURL *curl, CURLoption tag, ...)
if(tag < CURLOPTTYPE_OBJECTPOINT) {
/* This is a LONG type */
param_long = va_arg(arg, long);
Curl_setopt(data, tag, param_long);
ret = Curl_setopt(data, tag, param_long);
}
else if(tag < CURLOPTTYPE_FUNCTIONPOINT) {
/* This is a object pointer type */
param_obj = va_arg(arg, void *);
Curl_setopt(data, tag, param_obj);
ret = Curl_setopt(data, tag, param_obj);
}
else {
param_func = va_arg(arg, func_T );
Curl_setopt(data, tag, param_func);
ret = Curl_setopt(data, tag, param_func);
}
va_end(arg);
return CURLE_OK;
return ret;
}
CURLcode curl_easy_perform(CURL *curl)
{
struct SessionHandle *data = (struct SessionHandle *)curl;
if (Curl_global_host_cache_use(data) && data->hostcache != Curl_global_host_cache_get()) {
if (data->hostcache) {
Curl_hash_destroy(data->hostcache);
}
data->hostcache = Curl_global_host_cache_get();
}
if ( ! (data->share && data->share->hostcache) ) {
if (!data->hostcache) {
data->hostcache = Curl_hash_alloc(7, Curl_freednsinfo);
if (Curl_global_host_cache_use(data) &&
data->hostcache != Curl_global_host_cache_get()) {
if (data->hostcache)
Curl_hash_destroy(data->hostcache);
data->hostcache = Curl_global_host_cache_get();
}
if (!data->hostcache) {
data->hostcache = Curl_hash_alloc(7, Curl_freednsinfo);
if(!data->hostcache)
/* While we possibly could survive and do good without a host cache,
the fact that creating it failed indicates that things are truly
screwed up and we should bail out! */
return CURLE_OUT_OF_MEMORY;
}
}
return Curl_perform(data);
@@ -250,8 +261,10 @@ CURLcode curl_easy_perform(CURL *curl)
void curl_easy_cleanup(CURL *curl)
{
struct SessionHandle *data = (struct SessionHandle *)curl;
if (!Curl_global_host_cache_use(data)) {
Curl_hash_destroy(data->hostcache);
if ( ! (data->share && data->share->hostcache) ) {
if ( !Curl_global_host_cache_use(data)) {
Curl_hash_destroy(data->hostcache);
}
}
Curl_close(data);
}
@@ -313,7 +326,8 @@ CURL *curl_easy_duphandle(CURL *incurl)
if(data->cookies)
/* If cookies are enabled in the parent handle, we enable them
in the clone as well! */
outcurl->cookies = Curl_cookie_init(data->cookies->filename,
outcurl->cookies = Curl_cookie_init(data,
data->cookies->filename,
outcurl->cookies,
data->set.cookiesession);

View File

@@ -33,7 +33,7 @@
#include <string.h>
/* The last #include file should be: */
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
#include "memdebug.h"
#endif
@@ -79,6 +79,10 @@ char *curl_escape(const char *string, int length)
return ns;
}
#define ishex(in) ((in >= 'a' && in <= 'f') || \
(in >= 'A' && in <= 'F') || \
(in >= '0' && in <= '9'))
char *curl_unescape(const char *string, int length)
{
int alloc = (length?length:(int)strlen(string))+1;
@@ -93,13 +97,19 @@ char *curl_unescape(const char *string, int length)
while(--alloc > 0) {
in = *string;
if('%' == in) {
/* encoded part */
if(sscanf(string+1, "%02X", &hex)) {
in = hex;
string+=2;
alloc-=2;
}
if(('%' == in) && ishex(string[1]) && ishex(string[2])) {
/* this is two hexadecimal digits following a '%' */
char hexstr[3];
char *ptr;
hexstr[0] = string[1];
hexstr[1] = string[2];
hexstr[2] = 0;
hex = strtol(hexstr, &ptr, 16);
in = hex;
string+=2;
alloc-=2;
}
ns[index++] = in;
@@ -109,6 +119,9 @@ char *curl_unescape(const char *string, int length)
return ns;
}
/* For operating systems/environments that use different malloc/free
ssystems for the app and for this library, we provide a free that uses
the library's memory system */
void curl_free(void *p)
{
free(p);

View File

@@ -48,7 +48,6 @@
#include <netinet/in.h>
#endif
#include <sys/time.h>
#include <sys/resource.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
@@ -88,18 +87,19 @@
#include <curl/mprintf.h>
/* The last #include file should be: */
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
#include "memdebug.h"
#endif
/* Emulate a connect-then-transfer protocol. We connect to the file here */
CURLcode Curl_file_connect(struct connectdata *conn)
{
char *actual_path = curl_unescape(conn->path, 0);
char *real_path = curl_unescape(conn->path, 0);
struct FILE *file;
int fd;
#if defined(WIN32) || defined(__EMX__)
int i;
char *actual_path;
#endif
file = (struct FILE *)malloc(sizeof(struct FILE));
@@ -110,6 +110,29 @@ CURLcode Curl_file_connect(struct connectdata *conn)
conn->proto.file = file;
#if defined(WIN32) || defined(__EMX__)
/* If the first character is a slash, and there's
something that looks like a drive at the beginning of
the path, skip the slash. If we remove the initial
slash in all cases, paths without drive letters end up
relative to the current directory which isn't how
browsers work.
Some browsers accept | instead of : as the drive letter
separator, so we do too.
On other platforms, we need the slash to indicate an
absolute pathname. On Windows, absolute paths start
with a drive letter.
*/
actual_path = real_path;
if ((actual_path[0] == '/') &&
actual_path[1] &&
(actual_path[2] == ':' || actual_path[2] == '|'))
{
actual_path[2] = ':';
actual_path++;
}
/* change path separators from '/' to '\\' for Windows and OS/2 */
for (i=0; actual_path[i] != '\0'; ++i)
if (actual_path[i] == '/')
@@ -117,9 +140,9 @@ CURLcode Curl_file_connect(struct connectdata *conn)
fd = open(actual_path, O_RDONLY | O_BINARY); /* no CR/LF translation! */
#else
fd = open(actual_path, O_RDONLY);
fd = open(real_path, O_RDONLY);
#endif
free(actual_path);
free(real_path);
if(fd == -1) {
failf(conn->data, "Couldn't open file %s", conn->path);

View File

@@ -124,15 +124,12 @@ Content-Disposition: form-data; name="FILECONTENT"
#include "strequal.h"
/* The last #include file should be: */
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
#include "memdebug.h"
#endif
/* Length of the random boundary string. The risk of this being used
in binary data is very close to zero, 64^32 makes
6277101735386680763835789423207666416102355444464034512896
combinations... */
#define BOUNDARY_LENGTH 32
/* Length of the random boundary string. */
#define BOUNDARY_LENGTH 40
/* What kind of Content-Type to use on un-specified files with unrecognized
extensions. */
@@ -520,7 +517,7 @@ static const char * ContentTypeForFilename (const char *filename,
{".jpg", "image/jpeg"},
{".jpeg", "image/jpeg"},
{".txt", "text/plain"},
{".html", "text/plain"}
{".html", "text/html"}
};
if(prevtype)
@@ -1049,22 +1046,23 @@ char *Curl_FormBoundary(void)
the same form won't be identical */
int i;
static char table62[]=
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
static char table16[]="abcdef0123456789";
retstring = (char *)malloc(BOUNDARY_LENGTH);
retstring = (char *)malloc(BOUNDARY_LENGTH+1);
if(!retstring)
return NULL; /* failed */
srand(time(NULL)+randomizer++); /* seed */
strcpy(retstring, "curl"); /* bonus commercials 8*) */
strcpy(retstring, "----------------------------");
for(i=4; i<(BOUNDARY_LENGTH-1); i++) {
retstring[i] = table62[rand()%62];
}
retstring[BOUNDARY_LENGTH-1]=0; /* zero terminate */
for(i=strlen(retstring); i<BOUNDARY_LENGTH; i++)
retstring[i] = table16[rand()%16];
/* 28 dashes and 12 hexadecimal digits makes 12^16 (184884258895036416)
combinations */
retstring[BOUNDARY_LENGTH]=0; /* zero terminate */
return retstring;
}

395
lib/ftp.c
View File

@@ -94,17 +94,29 @@
#include <curl/mprintf.h>
/* The last #include file should be: */
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
#include "memdebug.h"
#endif
/* Local API functions */
static CURLcode ftp_sendquote(struct connectdata *conn, struct curl_slist *quote);
static CURLcode ftp_sendquote(struct connectdata *conn,
struct curl_slist *quote);
static CURLcode ftp_cwd(struct connectdata *conn, char *path);
static CURLcode ftp_mkd(struct connectdata *conn, char *path);
static CURLcode cwd_and_mkd(struct connectdata *conn, char *path);
/* easy-to-use macro: */
#define FTPSENDF(x,y,z) if((result = Curl_ftpsendf(x,y,z))) return result
static void freedirs(struct FTP *ftp)
{
int i;
for (i=0; ftp->dirs[i]; i++){
free(ftp->dirs[i]);
ftp->dirs[i]=NULL;
}
}
/***********************************************************************
*
* AllowServerConnect()
@@ -158,6 +170,7 @@ static CURLcode AllowServerConnect(struct SessionHandle *data,
infof(data, "Connection accepted from server\n");
conn->secondarysocket = s;
Curl_nonblock(s, TRUE); /* enable non-blocking */
}
break;
}
@@ -237,7 +250,7 @@ CURLcode Curl_GetFTPResponse(ssize_t *nreadp, /* return number of bytes read */
if(!ftp->cache) {
readfd = rkeepfd; /* set every lap */
interval.tv_sec = timeout;
interval.tv_sec = 1; /* use 1 second timeout intervals */
interval.tv_usec = 0;
switch (select (sockfd+1, &readfd, NULL, NULL, &interval)) {
@@ -246,9 +259,10 @@ CURLcode Curl_GetFTPResponse(ssize_t *nreadp, /* return number of bytes read */
failf(data, "Transfer aborted due to select() error: %d", errno);
break;
case 0: /* timeout */
result = CURLE_OPERATION_TIMEDOUT;
failf(data, "Transfer aborted due to timeout");
break;
if(Curl_pgrsUpdate(conn))
return CURLE_ABORTED_BY_CALLBACK;
continue; /* just continue in our loop for the timeout duration */
default:
break;
}
@@ -407,9 +421,9 @@ CURLcode Curl_ftp_connect(struct connectdata *conn)
/* get some initial data into the ftp struct */
ftp->bytecountp = &conn->bytecount;
/* no need to duplicate them, the data struct won't change */
ftp->user = data->state.user;
ftp->passwd = data->state.passwd;
/* no need to duplicate them, this connectdata struct won't change */
ftp->user = conn->user;
ftp->passwd = conn->passwd;
ftp->response_time = 3600; /* set default response time-out */
if (data->set.tunnel_thru_httpproxy) {
@@ -510,7 +524,7 @@ CURLcode Curl_ftp_connect(struct connectdata *conn)
/* we may need to issue a KAUTH here to have access to the files
* do it if user supplied a password
*/
if(data->state.passwd && *data->state.passwd) {
if(conn->passwd && *conn->passwd) {
result = Curl_krb_kauth(conn);
if(result)
return result;
@@ -596,6 +610,14 @@ CURLcode Curl_ftp_done(struct connectdata *conn)
int ftpcode;
CURLcode result=CURLE_OK;
/* free the dir tree parts */
freedirs(ftp);
if(ftp->file) {
free(ftp->file);
ftp->file = NULL;
}
if(data->set.upload) {
if((-1 != data->set.infilesize) &&
(data->set.infilesize != *ftp->bytecountp) &&
@@ -707,33 +729,6 @@ CURLcode ftp_sendquote(struct connectdata *conn, struct curl_slist *quote)
return CURLE_OK;
}
/***********************************************************************
*
* ftp_cwd()
*
* Send 'CWD' to the remote server to Change Working Directory.
* It is the ftp version of the unix 'cd' command.
*/
static
CURLcode ftp_cwd(struct connectdata *conn, char *path)
{
ssize_t nread;
int ftpcode;
CURLcode result;
FTPSENDF(conn, "CWD %s", path);
result = Curl_GetFTPResponse(&nread, conn, &ftpcode);
if (result)
return result;
if (ftpcode != 250) {
failf(conn->data, "Couldn't cd to %s", path);
return CURLE_FTP_ACCESS_DENIED;
}
return CURLE_OK;
}
/***********************************************************************
*
* ftp_getfiletime()
@@ -933,6 +928,7 @@ ftp_pasv_verbose(struct connectdata *conn,
# endif
# else
(void)hostent_buf; /* avoid compiler warning */
answer = gethostbyaddr((char *) &address, sizeof(address), AF_INET);
# endif
#else
@@ -961,7 +957,7 @@ ftp_pasv_verbose(struct connectdata *conn,
#else
const int niflags = NI_NUMERICHOST | NI_NUMERICSERV;
#endif
port = 0; /* unused, prevent warning */
(void)port; /* prevent compiler warning */
if (getnameinfo(addr->ai_addr, addr->ai_addrlen,
nbuf, sizeof(nbuf), sbuf, sizeof(sbuf), niflags)) {
snprintf(nbuf, sizeof(nbuf), "?");
@@ -1076,7 +1072,8 @@ CURLcode ftp_use_port(struct connectdata *conn)
return CURLE_FTP_PORT_FAILED;
}
for (modep = (char **)mode; modep && *modep; modep++) {
for (modep = (char **)(data->set.ftp_use_eprt?&mode[0]:&mode[2]);
modep && *modep; modep++) {
int lprtaf, eprtaf;
int alen=0, plen=0;
@@ -1177,7 +1174,6 @@ CURLcode ftp_use_port(struct connectdata *conn)
return result;
if (ftpcode != 200) {
failf(data, "Server does not grok %s", *modep);
continue;
}
else
@@ -1186,6 +1182,7 @@ CURLcode ftp_use_port(struct connectdata *conn)
if (!*modep) {
sclose(portsock);
failf(data, "PORT command attempts failed");
return CURLE_FTP_PORT_FAILED;
}
/* we set the secondary socket variable to this for now, it
@@ -1206,13 +1203,25 @@ CURLcode ftp_use_port(struct connectdata *conn)
bool sa_filled_in = FALSE;
if(data->set.ftpport) {
if(Curl_if2ip(data->set.ftpport, myhost, sizeof(myhost))) {
h = Curl_resolv(data, myhost, 0);
in_addr_t in;
int rc;
/* First check if the given name is an IP address */
in=inet_addr(data->set.ftpport);
if((in == CURL_INADDR_NONE) &&
Curl_if2ip(data->set.ftpport, myhost, sizeof(myhost))) {
rc = Curl_resolv(conn, myhost, 0, &h);
if(rc == 1)
rc = Curl_wait_for_resolv(conn, &h);
}
else {
int len = strlen(data->set.ftpport);
if(len>1)
h = Curl_resolv(data, data->set.ftpport, 0);
if(len>1) {
rc = Curl_resolv(conn, data->set.ftpport, 0, &h);
if(rc == 1)
rc = Curl_wait_for_resolv(conn, &h);
}
if(h)
strcpy(myhost, data->set.ftpport); /* buffer overflow risk */
}
@@ -1351,6 +1360,7 @@ CURLcode ftp_use_pasv(struct connectdata *conn,
CURLcode result;
struct Curl_dns_entry *addr=NULL;
Curl_ipconnect *conninfo;
int rc;
/*
Here's the excecutive summary on what to do:
@@ -1475,14 +1485,20 @@ CURLcode ftp_use_pasv(struct connectdata *conn,
* We don't want to rely on a former host lookup that might've expired
* now, instead we remake the lookup here and now!
*/
addr = Curl_resolv(data, conn->proxyhost, conn->port);
rc = Curl_resolv(conn, conn->proxyhost, conn->port, &addr);
if(rc == 1)
rc = Curl_wait_for_resolv(conn, &addr);
connectport =
(unsigned short)conn->port; /* we connect to the proxy's port */
}
else {
/* normal, direct, ftp connection */
addr = Curl_resolv(data, newhostp, newport);
rc = Curl_resolv(conn, newhostp, newport, &addr);
if(rc == 1)
rc = Curl_wait_for_resolv(conn, &addr);
if(!addr) {
failf(data, "Can't resolve new host %s:%d", newhostp, newport);
return CURLE_FTP_CANT_GET_HOST;
@@ -1741,7 +1757,7 @@ CURLcode Curl_ftp_nextconnect(struct connectdata *conn)
if(result)
return result;
/* Send any PREQUOTE strings after transfer type is set? (Wesley Laxton)*/
/* Send any PREQUOTE strings after transfer type is set? */
if(data->set.prequote) {
if ((result = ftp_sendquote(conn, data->set.prequote)) != CURLE_OK)
return result;
@@ -1918,8 +1934,14 @@ CURLcode Curl_ftp_nextconnect(struct connectdata *conn)
return result;
}
else {
failf(data, "%s", buf+4);
return CURLE_FTP_COULDNT_RETR_FILE;
if(dirlist && (ftpcode == 450)) {
/* simply no matching files */
ftp->no_transfer = TRUE; /* don't think we should download anything */
}
else {
failf(data, "%s", buf+4);
return CURLE_FTP_COULDNT_RETR_FILE;
}
}
}
@@ -1954,26 +1976,61 @@ CURLcode ftp_perform(struct connectdata *conn,
if ((result = ftp_sendquote(conn, data->set.quote)) != CURLE_OK)
return result;
}
/* This is a re-used connection. Since we change directory to where the
transfer is taking place, we must now get back to the original dir
where we ended up after login: */
if (conn->bits.reuse && ftp->entrypath) {
if ((result = ftp_cwd(conn, ftp->entrypath)) != CURLE_OK)
if ((result = cwd_and_mkd(conn, ftp->entrypath)) != CURLE_OK)
return result;
}
/* change directory first! */
if(ftp->dir && ftp->dir[0]) {
if ((result = ftp_cwd(conn, ftp->dir)) != CURLE_OK)
{
int i; /* counter for loop */
for (i=0; ftp->dirs[i]; i++) {
/* RFC 1738 says empty components should be respected too, but
that is plain stupid since CWD can't be used with an empty argument */
if ((result = cwd_and_mkd(conn, ftp->dirs[i])) != CURLE_OK)
return result;
}
}
/* Requested time of file? */
if(data->set.get_filetime && ftp->file) {
/* Requested time of file or time-depended transfer? */
if((data->set.get_filetime || data->set.timecondition) &&
ftp->file) {
result = ftp_getfiletime(conn, ftp->file);
if(result)
return result;
switch( result )
{
case CURLE_FTP_COULDNT_RETR_FILE:
case CURLE_OK:
if(data->set.timecondition) {
if((data->info.filetime > 0) && (data->set.timevalue > 0)) {
switch(data->set.timecondition) {
case TIMECOND_IFMODSINCE:
default:
if(data->info.filetime < data->set.timevalue) {
infof(data, "The requested document is not new enough\n");
ftp->no_transfer = TRUE; /* mark this to not transfer data */
return CURLE_OK;
}
break;
case TIMECOND_IFUNMODSINCE:
if(data->info.filetime > data->set.timevalue) {
infof(data, "The requested document is not old enough\n");
ftp->no_transfer = TRUE; /* mark this to not transfer data */
return CURLE_OK;
}
break;
} /* switch */
}
else {
infof(data, "Skipping time comparison\n");
}
}
break;
default:
return result;
} /* switch */
}
/* If we have selected NOBODY and HEADER, it means that we only want file
@@ -1984,6 +2041,8 @@ CURLcode ftp_perform(struct connectdata *conn,
may not support it! It is however the only way we have to get a file's
size! */
ssize_t filesize;
ssize_t nread;
int ftpcode;
ftp->no_transfer = TRUE; /* this means no actual transfer is made */
@@ -2003,6 +2062,18 @@ CURLcode ftp_perform(struct connectdata *conn,
return result;
}
/* Determine if server can respond to REST command and therefore
whether it can do a range */
FTPSENDF(conn, "REST 0", NULL);
result = Curl_GetFTPResponse(&nread, conn, &ftpcode);
if ((CURLE_OK == result) && (ftpcode == 350)) {
result = Curl_client_write(data, CLIENTWRITE_BOTH,
(char *)"Accept-ranges: bytes\r\n", 0);
if(result)
return result;
}
/* If we asked for a time of the file and we actually got one as
well, we "emulate" a HTTP-style header in our output. */
@@ -2011,12 +2082,12 @@ CURLcode ftp_perform(struct connectdata *conn,
struct tm *tm;
#ifdef HAVE_LOCALTIME_R
struct tm buffer;
tm = (struct tm *)localtime_r(&data->info.filetime, &buffer);
tm = (struct tm *)localtime_r((time_t *)&data->info.filetime, &buffer);
#else
tm = localtime((unsigned long *)&data->info.filetime);
tm = localtime((time_t *)&data->info.filetime);
#endif
/* format: "Tue, 15 Nov 1994 12:45:26 GMT" */
strftime(buf, BUFSIZE-1, "Last-Modified: %a, %d %b %Y %H:%M:%S %Z\r\n",
strftime(buf, BUFSIZE-1, "Last-Modified: %a, %d %b %Y %H:%M:%S GMT\r\n",
tm);
result = Curl_client_write(data, CLIENTWRITE_BOTH, buf, 0);
if(result)
@@ -2043,7 +2114,7 @@ CURLcode ftp_perform(struct connectdata *conn,
else {
/* We have chosen (this is default) to use the PASV command */
result = ftp_use_pasv(conn, connected);
if(connected)
if(!result && *connected)
infof(data, "Connected the data stream with PASV!\n");
}
@@ -2061,34 +2132,71 @@ CURLcode ftp_perform(struct connectdata *conn,
*/
CURLcode Curl_ftp(struct connectdata *conn)
{
CURLcode retcode;
bool connected;
CURLcode retcode=CURLE_OK;
bool connected=0;
struct SessionHandle *data = conn->data;
struct FTP *ftp;
int dirlength=0; /* 0 forces strlen() */
char *slash_pos; /* position of the first '/' char in curpos */
char *cur_pos=conn->ppath; /* current position in ppath. point at the begin
of next path component */
int path_part=0;/* current path component */
/* the ftp struct is already inited in ftp_connect() */
ftp = conn->proto.ftp;
conn->size = -1; /* make sure this is unknown at this point */
/* We split the path into dir and file parts *before* we URLdecode
it */
ftp->file = strrchr(conn->ppath, '/');
if(ftp->file) {
if(ftp->file != conn->ppath)
dirlength=ftp->file-conn->ppath; /* don't count the traling slash */
Curl_pgrsSetUploadCounter(data, 0);
Curl_pgrsSetDownloadCounter(data, 0);
Curl_pgrsSetUploadSize(data, 0);
Curl_pgrsSetDownloadSize(data, 0);
ftp->file++; /* point to the first letter in the file name part or
remain NULL */
}
else {
ftp->file = conn->ppath; /* there's only a file part */
/* fixed : initialize ftp->dirs[xxx] to NULL !
is done in Curl_ftp_connect() */
/* parse the URL path into separate path components */
while((slash_pos=strchr(cur_pos, '/'))) {
/* 1 or 0 to indicate absolute directory */
bool absolute_dir = (cur_pos - conn->ppath > 0) && (path_part == 0);
/* seek out the next path component */
if (slash_pos-cur_pos) {
/* we skip empty path components, like "x//y" since the FTP command CWD
requires a parameter and a non-existant parameter a) doesn't work on
many servers and b) has no effect on the others. */
ftp->dirs[path_part] = curl_unescape(cur_pos - absolute_dir,
slash_pos - cur_pos + absolute_dir);
if (!ftp->dirs[path_part]) { /* run out of memory ... */
failf(data, "no memory");
freedirs(ftp);
return CURLE_OUT_OF_MEMORY;
}
}
else {
cur_pos = slash_pos + 1; /* jump to the rest of the string */
continue;
}
if(!retcode) {
cur_pos = slash_pos + 1; /* jump to the rest of the string */
if(++path_part >= (CURL_MAX_FTP_DIRDEPTH-1)) {
/* too deep, we need the last entry to be kept NULL at all
times to signal end of list */
failf(data, "too deep dir hierarchy");
freedirs(ftp);
return CURLE_URL_MALFORMAT;
}
}
}
ftp->file = cur_pos; /* the rest is the file name */
if(*ftp->file) {
ftp->file = curl_unescape(ftp->file, 0);
if(NULL == ftp->file) {
freedirs(ftp);
failf(data, "no memory");
return CURLE_OUT_OF_MEMORY;
}
@@ -2096,26 +2204,23 @@ CURLcode Curl_ftp(struct connectdata *conn)
else
ftp->file=NULL; /* instead of point to a zero byte, we make it a NULL
pointer */
ftp->urlpath = conn->ppath;
if(dirlength) {
ftp->dir = curl_unescape(ftp->urlpath, dirlength);
if(NULL == ftp->dir) {
if(ftp->file)
free(ftp->file);
failf(data, "no memory");
return CURLE_OUT_OF_MEMORY; /* failure */
}
}
else
ftp->dir = NULL;
retcode = ftp_perform(conn, &connected);
if(CURLE_OK == retcode) {
if(connected)
retcode = Curl_ftp_nextconnect(conn);
else
if(retcode && (conn->secondarysocket >= 0)) {
/* Failure detected, close the second socket if it was created already */
sclose(conn->secondarysocket);
conn->secondarysocket = -1;
}
if(ftp->no_transfer)
/* no data to transfer */
retcode=Curl_Transfer(conn, -1, -1, FALSE, NULL, -1, NULL);
else if(!connected)
/* since we didn't connect now, we want do_more to get called */
conn->bits.do_more = TRUE;
}
@@ -2187,16 +2292,112 @@ CURLcode Curl_ftp_disconnect(struct connectdata *conn)
if(ftp) {
if(ftp->entrypath)
free(ftp->entrypath);
if(ftp->cache)
if(ftp->cache) {
free(ftp->cache);
if(ftp->file)
ftp->cache = NULL;
}
if(ftp->file) {
free(ftp->file);
if(ftp->dir)
free(ftp->dir);
ftp->file = ftp->dir = NULL; /* zero */
ftp->file = NULL; /* zero */
}
freedirs(ftp);
}
return CURLE_OK;
}
/***********************************************************************
*
* ftp_mkd()
*
* Makes a directory on the FTP server.
*
* Calls failf()
*/
CURLcode ftp_mkd(struct connectdata *conn, char *path)
{
CURLcode result=CURLE_OK;
int ftpcode; /* for ftp status */
ssize_t nread;
/* Create a directory on the remote server */
FTPSENDF(conn, "MKD %s", path);
result = Curl_GetFTPResponse(&nread, conn, &ftpcode);
if(result)
return result;
switch(ftpcode) {
case 257:
/* success! */
infof( conn->data , "Created remote directory %s\n" , path );
break;
case 550:
failf(conn->data, "Permission denied to make directory %s", path);
result = CURLE_FTP_ACCESS_DENIED;
break;
default:
failf(conn->data, "unrecognized MKD response: %d", ftpcode );
result = CURLE_FTP_ACCESS_DENIED;
break;
}
return result;
}
/***********************************************************************
*
* ftp_cwd()
*
* Send 'CWD' to the remote server to Change Working Directory. It is the ftp
* version of the unix 'cd' command. This function is only called from the
* cwd_and_mkd() function these days.
*
* This function does NOT call failf().
*/
static
CURLcode ftp_cwd(struct connectdata *conn, char *path)
{
ssize_t nread;
int ftpcode;
CURLcode result;
FTPSENDF(conn, "CWD %s", path);
result = Curl_GetFTPResponse(&nread, conn, &ftpcode);
if (!result) {
/* According to RFC959, CWD is supposed to return 250 on success, but
there seem to be non-compliant FTP servers out there that return 200,
so we accept any '2xy' code here. */
if (ftpcode/100 != 2)
result = CURLE_FTP_ACCESS_DENIED;
}
return result;
}
/***********************************************************************
*
* ftp_cwd_and_mkd()
*
* Change to the given directory. If the directory is not present, and we
* have been told to allow it, then create the directory and cd to it.
*
*/
static CURLcode cwd_and_mkd(struct connectdata *conn, char *path)
{
CURLcode result;
result = ftp_cwd(conn, path);
if (result) {
if(conn->data->set.ftp_create_missing_dirs) {
result = ftp_mkd(conn, path);
if (result)
/* ftp_mkd() calls failf() itself */
return result;
result = ftp_cwd(conn, path);
}
if(result)
failf(conn->data, "Couldn't cd to %s", path);
}
return result;
}
#endif /* CURL_DISABLE_FTP */

View File

@@ -35,7 +35,7 @@
#include <unixlib.h>
#endif
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
#include "memdebug.h"
#endif

View File

@@ -36,8 +36,10 @@
#endif
/* Make this the last #include */
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
#include "memdebug.h"
#else
#include <stdlib.h>
#endif
/*
@@ -104,6 +106,9 @@ CURLcode Curl_getinfo(struct SessionHandle *data, CURLINFO info, ...)
case CURLINFO_HTTP_CODE:
*param_longp = data->info.httpcode;
break;
case CURLINFO_HTTP_CONNECTCODE:
*param_longp = data->info.httpproxycode;
break;
case CURLINFO_FILETIME:
*param_longp = data->info.filetime;
break;

View File

@@ -89,7 +89,7 @@ char *getpass_r(const char *prompt, char *buffer, size_t buflen)
#endif
/* The last #include file should be: */
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
#include "memdebug.h"
#endif
@@ -99,7 +99,7 @@ char *getpass_r(const char *prompt, char *buffer, size_t buflen)
char infp_fclose = 0;
FILE *outfp;
RETSIGTYPE (*sigint)();
#ifndef __EMX__
#ifdef SIGTSTP
RETSIGTYPE (*sigtstp)();
#endif
size_t bytes_read;
@@ -117,9 +117,7 @@ char *getpass_r(const char *prompt, char *buffer, size_t buflen)
#endif
sigint = signal(SIGINT, SIG_IGN);
/* 20000318 mgs
* this is needed by the emx system, SIGTSTP is not a supported signal */
#ifndef __EMX__
#ifdef SIGTSTP
sigtstp = signal(SIGTSTP, SIG_IGN);
#endif
@@ -181,7 +179,7 @@ char *getpass_r(const char *prompt, char *buffer, size_t buflen)
#endif
signal(SIGINT, sigint);
#ifndef __EMX__
#ifdef SIGTSTP
signal(SIGTSTP, sigtstp);
#endif

View File

@@ -29,7 +29,7 @@
#include "hash.h"
#include "llist.h"
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
/* this must be the last include file */
#include "memdebug.h"
#endif
@@ -64,8 +64,9 @@ _hash_element_dtor (void *user, void *element)
free(e);
}
void
Curl_hash_init (curl_hash *h, int slots, curl_hash_dtor dtor)
/* return 1 on error, 0 is fine */
int
Curl_hash_init(curl_hash *h, int slots, curl_hash_dtor dtor)
{
int i;
@@ -74,21 +75,35 @@ Curl_hash_init (curl_hash *h, int slots, curl_hash_dtor dtor)
h->slots = slots;
h->table = (curl_llist **) malloc(slots * sizeof(curl_llist *));
for (i = 0; i < slots; ++i) {
h->table[i] = Curl_llist_alloc((curl_llist_dtor) _hash_element_dtor);
if(h->table) {
for (i = 0; i < slots; ++i) {
h->table[i] = Curl_llist_alloc((curl_llist_dtor) _hash_element_dtor);
if(!h->table[i]) {
while(i--)
Curl_llist_destroy(h->table[i], NULL);
free(h->table);
return 1; /* failure */
}
}
return 0; /* fine */
}
else
return 1; /* failure */
}
curl_hash *
Curl_hash_alloc (int slots, curl_hash_dtor dtor)
Curl_hash_alloc(int slots, curl_hash_dtor dtor)
{
curl_hash *h;
h = (curl_hash *) malloc(sizeof(curl_hash));
if (NULL == h)
return NULL;
Curl_hash_init(h, slots, dtor);
if (h) {
if(Curl_hash_init(h, slots, dtor)) {
/* failure */
free(h);
h = NULL;
}
}
return h;
}

View File

@@ -45,7 +45,7 @@ typedef struct _curl_hash_element {
} curl_hash_element;
void Curl_hash_init(curl_hash *, int, curl_hash_dtor);
int Curl_hash_init(curl_hash *, int, curl_hash_dtor);
curl_hash *Curl_hash_alloc(int, curl_hash_dtor);
int Curl_hash_add(curl_hash *, char *, size_t, const void *);
int Curl_hash_delete(curl_hash *h, char *key, size_t key_len);

View File

@@ -65,6 +65,7 @@
#include "hostip.h"
#include "hash.h"
#include "share.h"
#include "url.h"
#define _MPRINTF_REPLACE /* use our functions only */
#include <curl/mprintf.h>
@@ -74,17 +75,20 @@
#endif
/* The last #include file should be: */
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
#include "memdebug.h"
#endif
static curl_hash hostname_cache;
static int host_cache_initialized;
static Curl_addrinfo *my_getaddrinfo(struct SessionHandle *data,
char *hostname,
int port,
char **bufp);
static Curl_addrinfo *my_getaddrinfo(struct connectdata *conn,
char *hostname,
int port,
int *waitp);
#if !defined(HAVE_GETHOSTBYNAME_R) || defined(USE_ARES)
static struct hostent* pack_hostent(char** buf, struct hostent* orig);
#endif
void Curl_global_host_cache_init(void)
{
@@ -135,15 +139,14 @@ create_hostcache_id(char *server, int port, ssize_t *entry_len)
char *id = NULL;
/* Get the length of the new entry id */
*entry_len = *entry_len + /* Hostname length */
1 + /* The ':' seperator */
_num_chars(port); /* The number of characters the port will take up */
*entry_len = *entry_len + /* Hostname length */
1 + /* ':' seperator */
_num_chars(port); /* number of characters the port will take up */
/* Allocate the new entry id */
id = malloc(*entry_len + 1);
if (!id) {
if (!id)
return NULL;
}
/* Create the new entry */
/* If sprintf() doesn't return the entry length, that signals failure */
@@ -192,57 +195,26 @@ hostcache_prune(curl_hash *hostcache, int cache_timeout, int now)
hostcache_timestamp_remove);
}
#if defined(MALLOCDEBUG) && defined(AGGRESIVE_TEST)
/* Called from Curl_done() to check that there's no DNS cache entry with
a non-zero counter left. */
void Curl_scan_cache_used(void *user, void *ptr)
{
struct Curl_dns_entry *e = ptr;
(void)user; /* prevent compiler warning */
if(e->inuse) {
fprintf(stderr, "*** WARNING: locked DNS cache entry detected: %s\n",
e->entry_id);
/* perform a segmentation fault to draw attention */
*(void **)0 = 0;
}
}
#endif
/* Macro to save redundant free'ing of entry_id */
#define HOSTCACHE_RETURN(dns) \
{ \
free(entry_id); \
if(data->share) \
{ \
Curl_share_unlock(data, CURL_LOCK_DATA_DNS); \
} \
return dns; \
}
#ifdef HAVE_SIGSETJMP
/* Beware this is a global and unique instance */
sigjmp_buf curl_jmpenv;
#endif
struct Curl_dns_entry *Curl_resolv(struct SessionHandle *data,
char *hostname,
int port)
{
char *entry_id = NULL;
struct Curl_dns_entry *dns = NULL;
ssize_t entry_len;
time_t now;
char *bufp;
#ifdef HAVE_SIGSETJMP
/* this allows us to time-out from the name resolver, as the timeout
will generate a signal and we will siglongjmp() from that here */
if(!data->set.no_signal && sigsetjmp(curl_jmpenv, 1)) {
/* this is coming from a siglongjmp() */
failf(data, "name lookup time-outed");
return NULL;
}
#endif
/* When calling Curl_resolv() has resulted in a response with a returned
address, we call this function to store the information in the dns
cache etc */
static struct Curl_dns_entry *
cache_resolv_response(struct SessionHandle *data,
Curl_addrinfo *addr,
char *hostname,
int port)
{
char *entry_id;
int entry_len;
struct Curl_dns_entry *dns;
time_t now;
/* Create an entry id, based upon the hostname and port */
entry_len = strlen(hostname);
@@ -251,60 +223,124 @@ struct Curl_dns_entry *Curl_resolv(struct SessionHandle *data,
if (!entry_id)
return NULL;
if(data->share)
{
Curl_share_lock(data, CURL_LOCK_DATA_DNS, CURL_LOCK_ACCESS_SINGLE);
}
/* See if its already in our dns cache */
dns = Curl_hash_pick(data->hostcache, entry_id, entry_len+1);
/* Create a new cache entry */
dns = (struct Curl_dns_entry *) malloc(sizeof(struct Curl_dns_entry));
if (!dns) {
Curl_addrinfo *addr = my_getaddrinfo(data, hostname, port, &bufp);
if (!addr) {
HOSTCACHE_RETURN(NULL);
}
/* Create a new cache entry */
dns = (struct Curl_dns_entry *) malloc(sizeof(struct Curl_dns_entry));
if (!dns) {
Curl_freeaddrinfo(addr);
HOSTCACHE_RETURN(NULL);
}
dns->inuse = 0;
dns->addr = addr;
/* Save it in our host cache */
Curl_hash_add(data->hostcache, entry_id, entry_len+1, (const void *) dns);
Curl_freeaddrinfo(addr);
free(entry_id);
return NULL;
}
dns->inuse = 0;
dns->addr = addr;
/* Store it in our dns cache */
Curl_hash_add(data->hostcache, entry_id, entry_len+1,
(const void *) dns);
time(&now);
dns->timestamp = now;
dns->inuse++; /* mark entry as in-use */
#ifdef MALLOCDEBUG
dns->entry_id = entry_id;
#endif
/* Remove outdated and unused entries from the hostcache */
hostcache_prune(data->hostcache,
data->set.dns_cache_timeout,
now);
HOSTCACHE_RETURN(dns);
/* free the allocated entry_id again */
free(entry_id);
return dns;
}
/* Resolve a name and return a pointer in the 'entry' argument if one
is available.
Return codes:
-1 = error, no pointer
0 = OK, pointer provided
1 = waiting for response, no pointer
*/
int Curl_resolv(struct connectdata *conn,
char *hostname,
int port,
struct Curl_dns_entry **entry)
{
char *entry_id = NULL;
struct Curl_dns_entry *dns = NULL;
ssize_t entry_len;
int wait;
struct SessionHandle *data = conn->data;
/* default to failure */
int rc = -1;
*entry = NULL;
#ifdef HAVE_SIGSETJMP
/* this allows us to time-out from the name resolver, as the timeout
will generate a signal and we will siglongjmp() from that here */
if(!data->set.no_signal && sigsetjmp(curl_jmpenv, 1)) {
/* this is coming from a siglongjmp() */
failf(data, "name lookup timed out");
return -1;
}
#endif
/* Create an entry id, based upon the hostname and port */
entry_len = strlen(hostname);
entry_id = create_hostcache_id(hostname, port, &entry_len);
/* If we can't create the entry id, fail */
if (!entry_id)
return -1;
if(data->share)
Curl_share_lock(data, CURL_LOCK_DATA_DNS, CURL_LOCK_ACCESS_SINGLE);
/* See if its already in our dns cache */
dns = Curl_hash_pick(data->hostcache, entry_id, entry_len+1);
/* free the allocated entry_id again */
free(entry_id);
if (!dns) {
/* The entry was not in the cache. Resolve it to IP address */
/* If my_getaddrinfo() returns NULL, 'wait' might be set to a non-zero
value indicating that we need to wait for the response to the resolve
call */
Curl_addrinfo *addr = my_getaddrinfo(conn, hostname, port, &wait);
if (!addr) {
if(wait)
/* the response to our resolve call will come asynchronously at
a later time, good or bad */
rc = 1;
}
else
/* we got a response, store it in the cache */
dns = cache_resolv_response(data, addr, hostname, port);
}
if(data->share)
Curl_share_unlock(data, CURL_LOCK_DATA_DNS);
*entry = dns;
return rc;
}
void Curl_resolv_unlock(struct SessionHandle *data, struct Curl_dns_entry *dns)
{
if(data->share)
{
Curl_share_lock(data, CURL_LOCK_DATA_DNS, CURL_LOCK_ACCESS_SINGLE);
}
dns->inuse--;
if(data->share)
{
Curl_share_unlock(data, CURL_LOCK_DATA_DNS);
}
}
/*
@@ -317,7 +353,7 @@ void Curl_freeaddrinfo(Curl_addrinfo *p)
#ifdef ENABLE_IPV6
freeaddrinfo(p);
#else
free(p);
free(p); /* works fine for the ARES case too */
#endif
}
@@ -335,9 +371,226 @@ void Curl_freednsinfo(void *freethis)
/* --- resolve name or IP-number --- */
#ifdef ENABLE_IPV6
/* Allocate enough memory to hold the full name information structs and
* everything. OSF1 is known to require at least 8872 bytes. The buffer
* required for storing all possible aliases and IP numbers is according to
* Stevens' Unix Network Programming 2nd edition, p. 304: 8192 bytes!
*/
#define CURL_NAMELOOKUP_SIZE 9000
#ifdef MALLOCDEBUG
#ifdef USE_ARES
CURLcode Curl_multi_ares_fdset(struct connectdata *conn,
fd_set *read_fd_set,
fd_set *write_fd_set,
int *max_fdp)
{
int max = ares_fds(conn->data->state.areschannel,
read_fd_set, write_fd_set);
*max_fdp = max;
return CURLE_OK;
}
/* called to check if the name is resolved now */
CURLcode Curl_is_resolved(struct connectdata *conn, bool *done)
{
fd_set read_fds, write_fds;
static const struct timeval tv={0,0};
int count;
struct SessionHandle *data = conn->data;
int nfds = ares_fds(data->state.areschannel, &read_fds, &write_fds);
count = select(nfds, &read_fds, &write_fds, NULL,
(struct timeval *)&tv);
if(count)
ares_process(data->state.areschannel, &read_fds, &write_fds);
if(conn->async.done) {
*done = TRUE;
if(!conn->async.dns)
return CURLE_COULDNT_RESOLVE_HOST;
}
else
*done = FALSE;
return CURLE_OK;
}
/* This is a function that locks and waits until the name resolve operation
has completed.
If 'entry' is non-NULL, make it point to the resolved dns entry
Return CURLE_COULDNT_RESOLVE_HOST if the host was not resolved, and
CURLE_OPERATION_TIMEDOUT if a time-out occurred.
*/
CURLcode Curl_wait_for_resolv(struct connectdata *conn,
struct Curl_dns_entry **entry)
{
CURLcode rc=CURLE_OK;
struct SessionHandle *data = conn->data;
/* Wait for the name resolve query to complete. */
while (1) {
int nfds=0;
fd_set read_fds, write_fds;
struct timeval *tvp, tv;
int count;
FD_ZERO(&read_fds);
FD_ZERO(&write_fds);
nfds = ares_fds(data->state.areschannel, &read_fds, &write_fds);
if (nfds == 0)
break;
tvp = ares_timeout(data->state.areschannel,
NULL, /* pass in our maximum time here */
&tv);
count = select(nfds, &read_fds, &write_fds, NULL, tvp);
if (count < 0 && errno != EINVAL)
break;
ares_process(data->state.areschannel, &read_fds, &write_fds);
}
/* Operation complete, if the lookup was successful we now have the entry
in the cache. */
/* this destroys the channel and we cannot use it anymore after this */
ares_destroy(data->state.areschannel);
if(entry)
*entry = conn->async.dns;
if(!conn->async.dns) {
/* a name was not resolved */
if(conn->async.done)
rc = CURLE_COULDNT_RESOLVE_HOST;
else
rc = CURLE_OPERATION_TIMEDOUT;
/* close the connection, since we can't return failure here without
cleaning up this connection properly */
Curl_disconnect(conn);
}
return rc;
}
/* this function gets called by ares when we got the name resolved */
static void host_callback(void *arg, /* "struct connectdata *" */
int status,
struct hostent *hostent)
{
struct connectdata *conn = (struct connectdata *)arg;
struct Curl_dns_entry *dns = NULL;
conn->async.done = TRUE;
conn->async.status = status;
if(ARES_SUCCESS == status) {
/* we got a resolved name in 'hostent' */
char *bufp = (char *)malloc(CURL_NAMELOOKUP_SIZE);
if(bufp) {
/* pack_hostent() copies to and shrinks the target buffer */
struct hostent *he = pack_hostent(&bufp, hostent);
dns = cache_resolv_response(conn->data, he,
conn->async.hostname, conn->async.port);
}
}
conn->async.dns = dns;
/* The input hostent struct will be freed by ares when we return from this
function */
}
/*
* Return name information about the given hostname and port number. If
* successful, the 'hostent' is returned and the forth argument will point to
* memory we need to free after use. That meory *MUST* be freed with
* Curl_freeaddrinfo(), nothing else.
*/
static Curl_addrinfo *my_getaddrinfo(struct connectdata *conn,
char *hostname,
int port,
int *waitp)
{
int rc;
char *bufp;
struct SessionHandle *data = conn->data;
rc = ares_init(&data->state.areschannel);
*waitp = FALSE;
if(!rc) {
/* only if success */
bufp = strdup(hostname);
if(bufp) {
Curl_safefree(conn->async.hostname);
conn->async.hostname = bufp;
conn->async.port = port;
conn->async.done = FALSE; /* not done */
conn->async.status = 0; /* clear */
conn->async.dns = NULL; /* clear */
ares_gethostbyname(data->state.areschannel, hostname, PF_INET,
host_callback, conn);
*waitp = TRUE; /* please wait for the response */
}
else
ares_destroy(data->state.areschannel);
}
return NULL; /* no struct yet */
}
#else
/* For builds without ARES, Curl_resolv() can never return wait==TRUE,
so this function will never be called. If it still gets called, we
return failure at once. */
CURLcode Curl_wait_for_resolv(struct connectdata *conn,
struct Curl_dns_entry **entry)
{
(void)conn;
*entry=NULL;
return CURLE_COULDNT_RESOLVE_HOST;
}
CURLcode Curl_multi_ares_fdset(struct connectdata *conn,
fd_set *read_fd_set,
fd_set *write_fd_set,
int *max_fdp)
{
(void)conn;
(void)read_fd_set;
(void)write_fd_set;
(void)max_fdp;
return CURLE_OK;
}
CURLcode Curl_is_resolved(struct connectdata *conn, bool *done)
{
(void)conn;
*done = TRUE;
return CURLE_OK;
}
#endif
#if defined(ENABLE_IPV6) && !defined(USE_ARES)
#ifdef CURLDEBUG
/* These two are strictly for memory tracing and are using the same
* style as the family otherwise present in memdebug.c. I put these ones
* here since they require a bunch of struct types I didn't wanna include
@@ -380,15 +633,18 @@ void curl_freeaddrinfo(struct addrinfo *freethis,
* memory we need to free after use. That meory *MUST* be freed with
* Curl_freeaddrinfo(), nothing else.
*/
static Curl_addrinfo *my_getaddrinfo(struct SessionHandle *data,
static Curl_addrinfo *my_getaddrinfo(struct connectdata *conn,
char *hostname,
int port,
char **bufp)
int *waitp)
{
struct addrinfo hints, *res;
int error;
char sbuf[NI_MAXSERV];
int s, pf = PF_UNSPEC;
struct SessionHandle *data = conn->data;
*waitp=0; /* don't wait, we have the response now */
/* see if we have an IPv6 stack */
s = socket(PF_INET6, SOCK_DGRAM, 0);
@@ -413,20 +669,17 @@ static Curl_addrinfo *my_getaddrinfo(struct SessionHandle *data,
infof(data, "getaddrinfo(3) failed for %s:%d\n", hostname, port);
return NULL;
}
*bufp=(char *)res; /* make it point to the result struct */
return res;
}
#else /* following code is IPv4-only */
#ifndef HAVE_GETHOSTBYNAME_R
#if !defined(HAVE_GETHOSTBYNAME_R) || defined(USE_ARES)
static void hostcache_fixoffset(struct hostent *h, int offset);
/**
/*
* Performs a "deep" copy of a hostent into a buffer (returns a pointer to the
* copy). Make absolutely sure the destination buffer is big enough!
*
* Keith McGuigan
* 10/3/2001 */
*/
static struct hostent* pack_hostent(char** buf, struct hostent* orig)
{
char *bufptr;
@@ -455,18 +708,22 @@ static struct hostent* pack_hostent(char** buf, struct hostent* orig)
copy->h_aliases = (char**)bufptr;
/* Figure out how many aliases there are */
for (i = 0; orig->h_aliases[i] != NULL; ++i);
for (i = 0; orig->h_aliases && orig->h_aliases[i]; ++i);
/* Reserve room for the array */
bufptr += (i + 1) * sizeof(char*);
/* Clone all known aliases */
for(i = 0; (str = orig->h_aliases[i]); i++) {
len = strlen(str) + 1;
strncpy(bufptr, str, len);
copy->h_aliases[i] = bufptr;
bufptr += len;
if(orig->h_aliases) {
for(i = 0; (str = orig->h_aliases[i]); i++) {
len = strlen(str) + 1;
strncpy(bufptr, str, len);
copy->h_aliases[i] = bufptr;
bufptr += len;
}
}
/* if(!orig->h_aliases) i was already set to 0 */
/* Terminate the alias list with a NULL */
copy->h_aliases[i] = NULL;
@@ -511,6 +768,25 @@ static struct hostent* pack_hostent(char** buf, struct hostent* orig)
}
#endif
static void hostcache_fixoffset(struct hostent *h, int offset)
{
int i=0;
h->h_name=(char *)((long)h->h_name+offset);
h->h_aliases=(char **)((long)h->h_aliases+offset);
while(h->h_aliases[i]) {
h->h_aliases[i]=(char *)((long)h->h_aliases[i]+offset);
i++;
}
h->h_addr_list=(char **)((long)h->h_addr_list+offset);
i=0;
while(h->h_addr_list[i]) {
h->h_addr_list[i]=(char *)((long)h->h_addr_list[i]+offset);
i++;
}
}
#ifndef USE_ARES
static char *MakeIP(unsigned long num, char *addr, int addr_len)
{
#if defined(HAVE_INET_NTOA) || defined(HAVE_INET_NTOA_R)
@@ -532,48 +808,26 @@ static char *MakeIP(unsigned long num, char *addr, int addr_len)
return (addr);
}
#ifndef INADDR_NONE
#define INADDR_NONE (in_addr_t) ~0
#endif
static void hostcache_fixoffset(struct hostent *h, int offset)
{
int i=0;
h->h_name=(char *)((long)h->h_name+offset);
h->h_aliases=(char **)((long)h->h_aliases+offset);
while(h->h_aliases[i]) {
h->h_aliases[i]=(char *)((long)h->h_aliases[i]+offset);
i++;
}
h->h_addr_list=(char **)((long)h->h_addr_list+offset);
i=0;
while(h->h_addr_list[i]) {
h->h_addr_list[i]=(char *)((long)h->h_addr_list[i]+offset);
i++;
}
}
/* The original code to this function was once stolen from the Dancer source
code, written by Bjorn Reese, it has since been patched and modified
considerably. */
static Curl_addrinfo *my_getaddrinfo(struct SessionHandle *data,
static Curl_addrinfo *my_getaddrinfo(struct connectdata *conn,
char *hostname,
int port,
char **bufp)
int *waitp)
{
struct hostent *h = NULL;
in_addr_t in;
int ret; /* this variable is unused on several platforms but used on some */
struct SessionHandle *data = conn->data;
#define CURL_NAMELOOKUP_SIZE 9000
/* Allocate enough memory to hold the full name information structs and
* everything. OSF1 is known to require at least 8872 bytes. The buffer
* required for storing all possible aliases and IP numbers is according to
* Stevens' Unix Network Programming 2nd editor, p. 304: 8192 bytes! */
port=0; /* unused in IPv4 code */
(void)port; /* unused in IPv4 code */
ret = 0; /* to prevent the compiler warning */
if ( (in=inet_addr(hostname)) != INADDR_NONE ) {
*waitp = 0; /* don't wait, we act synchronously */
in=inet_addr(hostname);
if (in != CURL_INADDR_NONE) {
struct in_addr *addrentry;
struct namebuf {
struct hostent hostentry;
@@ -583,7 +837,6 @@ static Curl_addrinfo *my_getaddrinfo(struct SessionHandle *data,
} *buf = (struct namebuf *)malloc(sizeof(struct namebuf));
if(!buf)
return NULL; /* major failure */
*bufp = (char *)buf;
h = &buf->hostentry;
h->h_addr_list = &buf->h_addr_list[0];
@@ -604,7 +857,6 @@ static Curl_addrinfo *my_getaddrinfo(struct SessionHandle *data,
int *buf = (int *)malloc(CURL_NAMELOOKUP_SIZE);
if(!buf)
return NULL; /* major failure */
*bufp=(char *)buf;
/* Workaround for gethostbyname_r bug in qnx nto. It is also _required_
for some of these functions. */
@@ -630,7 +882,7 @@ static Curl_addrinfo *my_getaddrinfo(struct SessionHandle *data,
step_size+=200;
}
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
infof(data, "gethostbyname_r() uses %d bytes\n", step_size);
#endif
@@ -640,7 +892,6 @@ static Curl_addrinfo *my_getaddrinfo(struct SessionHandle *data,
offset=(long)h-(long)buf;
hostcache_fixoffset(h, offset);
buf=(int *)h;
*bufp=(char *)buf;
}
else
#endif
@@ -680,7 +931,7 @@ static Curl_addrinfo *my_getaddrinfo(struct SessionHandle *data,
if(!h) /* failure */
res=1;
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
infof(data, "gethostbyname_r() uses %d bytes\n", step_size);
#endif
if(!res) {
@@ -689,7 +940,6 @@ static Curl_addrinfo *my_getaddrinfo(struct SessionHandle *data,
offset=(long)h-(long)buf;
hostcache_fixoffset(h, offset);
buf=(int *)h;
*bufp=(char *)buf;
}
else
#endif
@@ -732,13 +982,11 @@ static Curl_addrinfo *my_getaddrinfo(struct SessionHandle *data,
infof(data, "gethostbyname_r(2) failed for %s\n", hostname);
h = NULL; /* set return code to NULL */
free(buf);
*bufp=NULL;
}
#else
else {
if ((h = gethostbyname(hostname)) == NULL ) {
infof(data, "gethostbyname(2) failed for %s\n", hostname);
*bufp=NULL;
}
else
{
@@ -747,7 +995,6 @@ static Curl_addrinfo *my_getaddrinfo(struct SessionHandle *data,
static one we got a pointer to might get removed when we don't
want/expect that */
h = pack_hostent(&buf, h);
*bufp=(char *)buf;
}
#endif
}
@@ -755,3 +1002,5 @@ static Curl_addrinfo *my_getaddrinfo(struct SessionHandle *data,
}
#endif /* end of IPv4-specific code */
#endif /* end of !USE_ARES */

View File

@@ -29,6 +29,7 @@
struct addrinfo;
struct hostent;
struct SessionHandle;
struct connectdata;
void Curl_global_host_cache_init(void);
void Curl_global_host_cache_dtor(void);
@@ -41,9 +42,6 @@ struct Curl_dns_entry {
time_t timestamp;
long inuse; /* use-counter, make very sure you decrease this
when you're done using the address you received */
#ifdef MALLOCDEBUG
char *entry_id;
#endif
};
/*
@@ -54,10 +52,18 @@ struct Curl_dns_entry {
* use, or we'll leak memory!
*/
struct Curl_dns_entry *Curl_resolv(struct SessionHandle *data,
char *hostname,
int port);
int Curl_resolv(struct connectdata *conn,
char *hostname,
int port,
struct Curl_dns_entry **dnsentry);
CURLcode Curl_is_resolved(struct connectdata *conn, bool *done);
CURLcode Curl_wait_for_resolv(struct connectdata *conn,
struct Curl_dns_entry **dnsentry);
CURLcode Curl_multi_ares_fdset(struct connectdata *conn,
fd_set *read_fd_set,
fd_set *write_fd_set,
int *max_fdp);
/* unlock a previously resolved dns entry */
void Curl_resolv_unlock(struct SessionHandle *data, struct Curl_dns_entry *dns);
@@ -70,7 +76,7 @@ void Curl_freeaddrinfo(Curl_addrinfo *freeaddr);
/* free cached name info */
void Curl_freednsinfo(void *freethis);
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
void curl_freeaddrinfo(struct addrinfo *freethis,
int line, const char *source);
int curl_getaddrinfo(char *hostname, char *service,
@@ -79,4 +85,11 @@ int curl_getaddrinfo(char *hostname, char *service,
int line, const char *source);
#endif
#ifndef INADDR_NONE
#define CURL_INADDR_NONE (in_addr_t) ~0
#else
#define CURL_INADDR_NONE INADDR_NONE
#endif
#endif

View File

@@ -54,7 +54,6 @@
#endif
#endif
#include <sys/resource.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
@@ -89,15 +88,23 @@
#include "cookie.h"
#include "strequal.h"
#include "ssluse.h"
#include "http_digest.h"
#include "http_ntlm.h"
#include "http_negotiate.h"
#include "url.h"
#include "share.h"
#include "http.h"
#define _MPRINTF_REPLACE /* use our functions only */
#include <curl/mprintf.h>
/* The last #include file should be: */
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
#include "memdebug.h"
#endif
static CURLcode Curl_output_basic_proxy(struct connectdata *conn);
/* fread() emulation to provide POST and/or request data */
static int readmoredata(char *buffer,
size_t size,
@@ -190,6 +197,7 @@ CURLcode add_buffer_send(send_buffer *in,
char *ptr;
int size;
struct HTTP *http = conn->proto.http;
int sendsize;
/* The looping below is required since we use non-blocking sockets, but due
to the circumstances we will just loop and try again and again etc */
@@ -197,7 +205,28 @@ CURLcode add_buffer_send(send_buffer *in,
ptr = in->buffer;
size = in->size_used;
res = Curl_write(conn, sockfd, ptr, size, &amount);
if(conn->protocol & PROT_HTTPS) {
/* We never send more than CURL_MAX_WRITE_SIZE bytes in one single chunk
when we speak HTTPS, as if only a fraction of it is sent now, this data
needs to fit into the normal read-callback buffer later on and that
buffer is using this size.
*/
sendsize= (size > CURL_MAX_WRITE_SIZE)?CURL_MAX_WRITE_SIZE:size;
/* OpenSSL is very picky and we must send the SAME buffer pointer to the
library when we attempt to re-send this buffer. Sending the same data
is not enough, we must use the exact same address. For this reason, we
must copy the data to the uploadbuffer first, since that is the buffer
we will be using if this send is retried later.
*/
memcpy(conn->data->state.uploadbuffer, ptr, sendsize);
ptr = conn->data->state.uploadbuffer;
}
else
sendsize = size;
res = Curl_write(conn, sockfd, ptr, sendsize, &amount);
if(CURLE_OK == res) {
@@ -213,7 +242,8 @@ CURLcode add_buffer_send(send_buffer *in,
and wait until it might work again. */
size -= amount;
ptr += amount;
ptr = in->buffer + amount;
/* backup the currently set pointers */
http->backup.fread = conn->fread;
@@ -403,6 +433,13 @@ CURLcode Curl_ConnectHTTPProxyTunnel(struct connectdata *conn,
infof(data, "Establish HTTP proxy tunnel to %s:%d\n", hostname, remote_port);
/*
* This code currently only supports Basic authentication for this CONNECT
* request to a proxy.
*/
if(conn->bits.proxy_user_passwd)
Curl_output_basic_proxy(conn);
/* OK, now send the connect request to the proxy */
result =
Curl_sendf(tunnelsocket, conn,
@@ -491,16 +528,16 @@ CURLcode Curl_ConnectHTTPProxyTunnel(struct connectdata *conn,
/* a newline is CRLF in ftp-talk, so the CR is ignored as
the line isn't really terminated until the LF comes */
/* output debug output if that is requested */
if(data->set.verbose)
Curl_debug(data, CURLINFO_DATA_IN, line_start, perline);
if('\r' == line_start[0]) {
/* end of headers */
keepon=FALSE;
break; /* breaks out of loop, not switch */
}
/* output debug output if that is requested */
if(data->set.verbose)
Curl_debug(data, CURLINFO_HEADER_IN, line_start, perline);
if(2 == sscanf(line_start, "HTTP/1.%d %d",
&subversion,
&httperror)) {
@@ -519,6 +556,8 @@ CURLcode Curl_ConnectHTTPProxyTunnel(struct connectdata *conn,
if(error)
return CURLE_RECV_ERROR;
data->info.httpproxycode = httperror;
if(200 != httperror) {
if(407 == httperror)
/* Added Nov 6 1998 */
@@ -527,6 +566,14 @@ CURLcode Curl_ConnectHTTPProxyTunnel(struct connectdata *conn,
failf(data, "Received error code %d from proxy", httperror);
return CURLE_RECV_ERROR;
}
/* If a proxy-authorization header was used for the proxy, then we should
make sure that it isn't accidentally used for the document request
after we've connected. So let's free and clear it here. */
Curl_safefree(conn->allocptr.proxyuserpwd);
conn->allocptr.proxyuserpwd = NULL;
Curl_http_auth_stage(data, 401); /* move on to the host auth */
infof (data, "Proxy replied to CONNECT request\n");
return CURLE_OK;
@@ -591,11 +638,15 @@ CURLcode Curl_http_done(struct connectdata *conn)
conn->fread = data->set.fread; /* restore */
conn->fread_in = data->set.in; /* restore */
if (http == NULL)
return CURLE_OK;
if(http->send_buffer) {
send_buffer *buff = http->send_buffer;
free(buff->buffer);
free(buff);
http->send_buffer = NULL; /* cleaer the pointer */
}
if(HTTPREQ_POST_FORM == data->set.httpreq) {
@@ -616,6 +667,56 @@ CURLcode Curl_http_done(struct connectdata *conn)
return CURLE_OK;
}
static CURLcode Curl_output_basic(struct connectdata *conn)
{
char *authorization;
struct SessionHandle *data=conn->data;
sprintf(data->state.buffer, "%s:%s", conn->user, conn->passwd);
if(Curl_base64_encode(data->state.buffer, strlen(data->state.buffer),
&authorization) >= 0) {
if(conn->allocptr.userpwd)
free(conn->allocptr.userpwd);
conn->allocptr.userpwd = aprintf( "Authorization: Basic %s\015\012",
authorization);
free(authorization);
}
else
return CURLE_OUT_OF_MEMORY;
return CURLE_OK;
}
static CURLcode Curl_output_basic_proxy(struct connectdata *conn)
{
char *authorization;
struct SessionHandle *data=conn->data;
sprintf(data->state.buffer, "%s:%s", conn->proxyuser, conn->proxypasswd);
if(Curl_base64_encode(data->state.buffer, strlen(data->state.buffer),
&authorization) >= 0) {
Curl_safefree(conn->allocptr.proxyuserpwd);
conn->allocptr.proxyuserpwd =
aprintf("Proxy-authorization: Basic %s\015\012", authorization);
free(authorization);
}
else
return CURLE_OUT_OF_MEMORY;
return CURLE_OK;
}
void Curl_http_auth_stage(struct SessionHandle *data,
int stage)
{
if(stage == 401)
data->state.authwant = data->set.httpauth;
else if(stage == 407)
data->state.authwant = data->set.proxyauth;
else
return; /* bad input stage */
data->state.authstage = stage;
data->state.authavail = CURLAUTH_NONE;
}
CURLcode Curl_http(struct connectdata *conn)
{
struct SessionHandle *data=conn->data;
@@ -626,6 +727,15 @@ CURLcode Curl_http(struct connectdata *conn)
char *ppath = conn->ppath; /* three previous function arguments */
char *host = conn->name;
const char *te = ""; /* tranfer-encoding */
char *ptr;
char *request;
if(!data->state.authstage) {
if(conn->bits.httpproxy && conn->bits.proxy_user_passwd)
Curl_http_auth_stage(data, 407);
else
Curl_http_auth_stage(data, 401);
}
if(!conn->proto.http) {
/* Only allocate this struct if we don't already have it! */
@@ -646,6 +756,13 @@ CURLcode Curl_http(struct connectdata *conn)
data->set.upload) {
data->set.httpreq = HTTPREQ_PUT;
}
request = data->set.customrequest?
data->set.customrequest:
(data->set.no_body?(char *)"HEAD":
((HTTPREQ_POST == data->set.httpreq) ||
(HTTPREQ_POST_FORM == data->set.httpreq))?(char *)"POST":
(HTTPREQ_PUT == data->set.httpreq)?(char *)"PUT":(char *)"GET");
/* The User-Agent string has been built in url.c already, because it might
have been used in the proxy connect, but if we have got a header with
@@ -656,27 +773,73 @@ CURLcode Curl_http(struct connectdata *conn)
conn->allocptr.uagent=NULL;
}
if((conn->bits.user_passwd) && !checkheaders(data, "Authorization:")) {
char *authorization;
/* To prevent the user+password to get sent to other than the original
host due to a location-follow, we do some weirdo checks here */
if(!data->state.this_is_a_follow ||
!data->state.auth_host ||
curl_strequal(data->state.auth_host, conn->hostname) ||
data->set.http_disable_hostname_check_before_authentication) {
/* To prevent the user+password to get sent to other than the original
host due to a location-follow, we do some weirdo checks here */
if(!data->state.this_is_a_follow ||
!data->state.auth_host ||
curl_strequal(data->state.auth_host, conn->hostname) ||
data->set.http_disable_hostname_check_before_authentication) {
sprintf(data->state.buffer, "%s:%s",
data->state.user, data->state.passwd);
if(Curl_base64_encode(data->state.buffer, strlen(data->state.buffer),
&authorization) >= 0) {
if(conn->allocptr.userpwd)
free(conn->allocptr.userpwd);
conn->allocptr.userpwd = aprintf( "Authorization: Basic %s\015\012",
authorization);
free(authorization);
/* Send proxy authentication header if needed */
if (data->state.authstage == 407) {
#ifdef USE_SSLEAY
if(data->state.authwant == CURLAUTH_NTLM) {
result = Curl_output_ntlm(conn, TRUE);
if(result)
return result;
}
else
#endif
if((data->state.authwant == CURLAUTH_BASIC) && /* Basic */
conn->bits.proxy_user_passwd &&
!checkheaders(data, "Proxy-authorization:")) {
result = Curl_output_basic_proxy(conn);
if(result)
return result;
/* Switch to web authentication after proxy authentication is done */
Curl_http_auth_stage(data, 401);
}
}
/* Send web authentication header if needed */
if (data->state.authstage == 401) {
#ifdef GSSAPI
if((data->state.authwant == CURLAUTH_GSSNEGOTIATE) &&
data->state.negotiate.context &&
!GSS_ERROR(data->state.negotiate.status)) {
result = Curl_output_negotiate(conn);
if (result)
return result;
}
else
#endif
#ifdef USE_SSLEAY
if(data->state.authwant == CURLAUTH_NTLM) {
result = Curl_output_ntlm(conn, FALSE);
if(result)
return result;
}
else
#endif
{
if((data->state.authwant == CURLAUTH_DIGEST) &&
data->state.digest.nonce) {
result = Curl_output_digest(conn,
(unsigned char *)request,
(unsigned char *)ppath);
if(result)
return result;
}
else if((data->state.authwant == CURLAUTH_BASIC) && /* Basic */
conn->bits.user_passwd &&
!checkheaders(data, "Authorization:")) {
result = Curl_output_basic(conn);
if(result)
return result;
}
}
}
}
if((data->change.referer) && !checkheaders(data, "Referer:")) {
if(conn->allocptr.ref)
free(conn->allocptr.ref);
@@ -714,30 +877,30 @@ CURLcode Curl_http(struct connectdata *conn)
}
}
if(data->cookies) {
co = Curl_cookie_getlist(data->cookies,
host, ppath,
(bool)(conn->protocol&PROT_HTTPS?TRUE:FALSE));
}
if (data->change.proxy && *data->change.proxy &&
!data->set.tunnel_thru_httpproxy &&
!(conn->protocol&PROT_HTTPS)) {
/* The path sent to the proxy is in fact the entire URL */
ppath = data->change.url;
}
if(HTTPREQ_POST_FORM == data->set.httpreq) {
/* we must build the whole darned post sequence first, so that we have
a size of the whole shebang before we start to send it */
result = Curl_getFormData(&http->sendit, data->set.httppost,
&http->postsize);
if(CURLE_OK != result) {
/* Curl_getFormData() doesn't use failf() */
failf(data, "failed creating formpost data");
return result;
}
}
ptr = checkheaders(data, "Host:");
if(ptr) {
/* If we have a given custom Host: header, we extract the host name
in order to possibly use it for cookie reasons later on. */
char *start = ptr+strlen("Host:");
char *ptr;
while(*start && isspace((int)*start ))
start++;
ptr = start; /* start host-scanning here */
if(!checkheaders(data, "Host:")) {
/* scan through the string to find the end (space or colon) */
while(*ptr && !isspace((int)*ptr) && !(':'==*ptr))
ptr++;
if(ptr != start) {
int len=ptr-start;
conn->allocptr.cookiehost = malloc(len+1);
if(!conn->allocptr.cookiehost)
return CURLE_OUT_OF_MEMORY;
memcpy(conn->allocptr.cookiehost, start, len);
conn->allocptr.cookiehost[len]=0;
}
}
else {
/* if ptr_host is already set, it is almost OK since we only re-use
connections to the very same host and port, but when we use a HTTP
proxy we have a persistant connect and yet we must change the Host:
@@ -765,6 +928,34 @@ CURLcode Curl_http(struct connectdata *conn)
conn->remote_port);
}
if(data->cookies) {
Curl_share_lock(data, CURL_LOCK_DATA_COOKIE, CURL_LOCK_ACCESS_SINGLE);
co = Curl_cookie_getlist(data->cookies,
conn->allocptr.cookiehost?
conn->allocptr.cookiehost:host, ppath,
(bool)(conn->protocol&PROT_HTTPS?TRUE:FALSE));
Curl_share_unlock(data, CURL_LOCK_DATA_COOKIE);
}
if (conn->bits.httpproxy &&
!data->set.tunnel_thru_httpproxy &&
!(conn->protocol&PROT_HTTPS)) {
/* The path sent to the proxy is in fact the entire URL */
ppath = data->change.url;
}
if(HTTPREQ_POST_FORM == data->set.httpreq) {
/* we must build the whole darned post sequence first, so that we have
a size of the whole shebang before we start to send it */
result = Curl_getFormData(&http->sendit, data->set.httppost,
&http->postsize);
if(CURLE_OK != result) {
/* Curl_getFormData() doesn't use failf() */
failf(data, "failed creating formpost data");
return result;
}
}
if(!checkheaders(data, "Pragma:"))
http->p_pragma = "Pragma: no-cache\r\n";
@@ -861,13 +1052,14 @@ CURLcode Curl_http(struct connectdata *conn)
}
}
do {
{
/* Use 1.1 unless the use specificly asked for 1.0 */
const char *httpstring=
data->set.httpversion==CURL_HTTP_VERSION_1_0?"1.0":"1.1";
send_buffer *req_buffer;
struct curl_slist *headers=data->set.headers;
size_t postsize;
/* initialize a dynamic send-buffer */
req_buffer = add_buffer_init();
@@ -875,7 +1067,7 @@ CURLcode Curl_http(struct connectdata *conn)
/* add the main request stuff */
add_bufferf(req_buffer,
"%s " /* GET/HEAD/POST/PUT */
"%s HTTP/%s\r\n" /* path */
"%s HTTP/%s\r\n" /* path + HTTP version */
"%s" /* proxyuserpwd */
"%s" /* userpwd */
"%s" /* range */
@@ -888,16 +1080,12 @@ CURLcode Curl_http(struct connectdata *conn)
"%s" /* referer */
"%s",/* transfer-encoding */
data->set.customrequest?data->set.customrequest:
(data->set.no_body?"HEAD":
((HTTPREQ_POST == data->set.httpreq) ||
(HTTPREQ_POST_FORM == data->set.httpreq))?"POST":
(HTTPREQ_PUT == data->set.httpreq)?"PUT":"GET"),
ppath, httpstring,
(conn->bits.proxy_user_passwd &&
conn->allocptr.proxyuserpwd)?conn->allocptr.proxyuserpwd:"",
(conn->bits.user_passwd && conn->allocptr.userpwd)?
conn->allocptr.userpwd:"",
request,
ppath,
httpstring,
(conn->bits.httpproxy && conn->allocptr.proxyuserpwd)?
conn->allocptr.proxyuserpwd:"",
conn->allocptr.userpwd?conn->allocptr.userpwd:"",
(conn->bits.use_range && conn->allocptr.rangeline)?
conn->allocptr.rangeline:"",
(data->set.useragent && *data->set.useragent && conn->allocptr.uagent)?
@@ -917,7 +1105,7 @@ CURLcode Curl_http(struct connectdata *conn)
struct Cookie *store=co;
/* now loop through all cookies that matched */
while(co) {
if(co->value && strlen(co->value)) {
if(co->value) {
if(0 == count) {
add_bufferf(req_buffer, "Cookie: ");
}
@@ -1105,6 +1293,11 @@ CURLcode Curl_http(struct connectdata *conn)
case HTTPREQ_POST:
/* this is the simple POST, using x-www-form-urlencoded style */
/* store the size of the postfields */
postsize = data->set.postfieldsize?
data->set.postfieldsize:
(data->set.postfields?strlen(data->set.postfields):0);
if(!conn->bits.upload_chunky) {
/* We only set Content-Length and allow a custom Content-Length if
we don't upload data chunked, as RFC2616 forbids us to set both
@@ -1113,11 +1306,7 @@ CURLcode Curl_http(struct connectdata *conn)
if(!checkheaders(data, "Content-Length:"))
/* we allow replacing this header, although it isn't very wise to
actually set your own */
add_bufferf(req_buffer,
"Content-Length: %d\r\n",
data->set.postfieldsize?
data->set.postfieldsize:
(data->set.postfields?strlen(data->set.postfields):0) );
add_bufferf(req_buffer, "Content-Length: %d\r\n", postsize);
}
if(!checkheaders(data, "Content-Type:"))
@@ -1126,21 +1315,38 @@ CURLcode Curl_http(struct connectdata *conn)
add_buffer(req_buffer, "\r\n", 2);
/* and here we setup the pointers to the actual data */
if(data->set.postfields) {
if(data->set.postfieldsize)
http->postsize = data->set.postfieldsize;
else
http->postsize = strlen(data->set.postfields);
http->postdata = data->set.postfields;
http->sending = HTTPSEND_BODY;
if(postsize < (100*1024)) {
/* The post data is less than 100K, then append it to the header.
This limit is no magic limit but only set to prevent really huge
POSTs to get the data duplicated with malloc() and family. */
conn->fread = (curl_read_callback)readmoredata;
conn->fread_in = (void *)conn;
if(!conn->bits.upload_chunky)
/* We're not sending it 'chunked', append it to the request
already now to reduce the number if send() calls */
add_buffer(req_buffer, data->set.postfields, postsize);
else {
/* Append the POST data chunky-style */
add_bufferf(req_buffer, "%x\r\n", postsize);
add_buffer(req_buffer, data->set.postfields, postsize);
add_buffer(req_buffer, "\r\n0\r\n", 5); /* end of a chunked
transfer stream */
}
}
else {
/* A huge POST coming up, do data separate from the request */
http->postsize = postsize;
http->postdata = data->set.postfields;
/* set the upload size to the progress meter */
Curl_pgrsSetUploadSize(data, http->postsize);
http->sending = HTTPSEND_BODY;
conn->fread = (curl_read_callback)readmoredata;
conn->fread_in = (void *)conn;
/* set the upload size to the progress meter */
Curl_pgrsSetUploadSize(data, http->postsize);
}
}
else
/* set the upload size to the progress meter */
@@ -1156,8 +1362,8 @@ CURLcode Curl_http(struct connectdata *conn)
result =
Curl_Transfer(conn, conn->firstsocket, -1, TRUE,
&http->readbytecount,
conn->firstsocket,
&http->writebytecount);
http->postdata?conn->firstsocket:-1,
http->postdata?&http->writebytecount:NULL);
break;
default:
@@ -1178,8 +1384,7 @@ CURLcode Curl_http(struct connectdata *conn)
}
if(result)
return result;
} while (0); /* this is just a left-over from the multiple document download
attempts */
}
return CURLE_OK;
}

View File

@@ -42,5 +42,6 @@ CURLcode Curl_http_connect(struct connectdata *conn);
void Curl_httpchunk_init(struct connectdata *conn);
CHUNKcode Curl_httpchunk_read(struct connectdata *conn, char *datap,
ssize_t length, ssize_t *wrote);
void Curl_http_auth_stage(struct SessionHandle *data, int stage);
#endif
#endif

View File

@@ -39,7 +39,7 @@
#include <curl/mprintf.h>
/* The last #include file should be: */
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
#include "memdebug.h"
#endif
@@ -102,8 +102,9 @@ CHUNKcode Curl_httpchunk_read(struct connectdata *conn,
size_t length,
size_t *wrote)
{
CURLcode result;
CURLcode result=CURLE_OK;
struct Curl_chunker *ch = &conn->proto.http->chunk;
struct Curl_transfer_keeper *k = &conn->keep;
int piece;
*wrote = 0; /* nothing yet */
@@ -180,21 +181,29 @@ CHUNKcode Curl_httpchunk_read(struct connectdata *conn,
switch (conn->keep.content_encoding) {
case IDENTITY:
#endif
result = Curl_client_write(conn->data, CLIENTWRITE_BODY, datap,
piece);
if(!k->ignorebody)
result = Curl_client_write(conn->data, CLIENTWRITE_BODY, datap,
piece);
#ifdef HAVE_LIBZ
break;
case DEFLATE:
/* update conn->keep.str to point to the chunk data. */
conn->keep.str = datap;
result = Curl_unencode_deflate_write(conn->data, &conn->keep, piece);
break;
case GZIP:
/* update conn->keep.str to point to the chunk data. */
conn->keep.str = datap;
result = Curl_unencode_gzip_write(conn->data, &conn->keep, piece);
break;
case COMPRESS:
default:
failf (conn->data,
"Unrecognized content encoding type. "
"libcurl understands `identity' and `deflate' "
"libcurl understands `identity', `deflate' and `gzip' "
"content encodings.");
return CHUNKE_BAD_ENCODING;
}

231
lib/http_digest.c Normal file
View File

@@ -0,0 +1,231 @@
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
* $Id$
***************************************************************************/
#include "setup.h"
#ifndef CURL_DISABLE_HTTP
/* -- WIN32 approved -- */
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#include <stdlib.h>
#include <ctype.h>
#include "urldata.h"
#include "sendf.h"
#include "strequal.h"
#include "md5.h"
#include "http_digest.h"
#define _MPRINTF_REPLACE /* use our functions only */
#include <curl/mprintf.h>
/* The last #include file should be: */
#ifdef CURLDEBUG
#include "memdebug.h"
#endif
/* Test example header:
WWW-Authenticate: Digest realm="testrealm", nonce="1053604598"
*/
CURLdigest Curl_input_digest(struct connectdata *conn,
char *header) /* rest of the www-authenticate:
header */
{
bool more = TRUE;
struct SessionHandle *data=conn->data;
/* skip initial whitespaces */
while(*header && isspace((int)*header))
header++;
if(checkprefix("Digest", header)) {
header += strlen("Digest");
/* clear off any former leftovers and init to defaults */
Curl_digest_cleanup(data);
while(more) {
char value[32];
char content[128];
int totlen=0;
while(*header && isspace((int)*header))
header++;
/* how big can these strings be? */
if(2 == sscanf(header, "%31[^=]=\"%127[^\"]\"",
value, content)) {
if(strequal(value, "nonce")) {
data->state.digest.nonce = strdup(content);
}
else if(strequal(value, "cnonce")) {
data->state.digest.cnonce = strdup(content);
}
else if(strequal(value, "realm")) {
data->state.digest.realm = strdup(content);
}
else if(strequal(value, "algorithm")) {
if(strequal(content, "MD5-sess"))
data->state.digest.algo = CURLDIGESTALGO_MD5SESS;
/* else, remain using the default md5 */
}
else {
/* unknown specifier, ignore it! */
}
totlen = strlen(value)+strlen(content)+3;
}
else
break; /* we're done here */
header += totlen;
if(',' == *header)
/* allow the list to be comma-separated */
header++;
}
if(!data->state.digest.nonce)
return CURLDIGEST_BAD;
}
else
/* else not a digest, get out */
return CURLDIGEST_NONE;
return CURLDIGEST_FINE;
}
/* convert md5 chunk to RFC2617 (section 3.1.3) -suitable ascii string*/
static void md5_to_ascii(unsigned char *source, /* 16 bytes */
unsigned char *dest) /* 33 bytes */
{
int i;
for(i=0; i<16; i++)
sprintf((char *)&dest[i*2], "%02x", source[i]);
}
CURLcode Curl_output_digest(struct connectdata *conn,
unsigned char *request,
unsigned char *uripath)
{
/* We have a Digest setup for this, use it!
Now, to get all the details for this sorted out, I must urge you dear friend
to read up on the RFC2617 section 3.2.2, */
unsigned char md5buf[16]; /* 16 bytes/128 bits */
unsigned char ha1[33]; /* 32 digits and 1 zero byte */
unsigned char ha2[33];
unsigned char request_digest[33];
unsigned char *md5this;
struct SessionHandle *data = conn->data;
/*
if the algorithm is "MD5" or unspecified (which then defaults to MD5):
A1 = unq(username-value) ":" unq(realm-value) ":" passwd
if the algorithm is "MD5-sess" then:
A1 = H( unq(username-value) ":" unq(realm-value) ":" passwd )
":" unq(nonce-value) ":" unq(cnonce-value)
*/
if(data->state.digest.algo == CURLDIGESTALGO_MD5SESS) {
md5this = (unsigned char *)
aprintf("%s:%s:%s:%s:%s",
conn->user,
data->state.digest.realm,
conn->passwd,
data->state.digest.nonce,
data->state.digest.cnonce);
}
else {
md5this = (unsigned char *)
aprintf("%s:%s:%s",
conn->user,
data->state.digest.realm,
conn->passwd);
}
Curl_md5it(md5buf, md5this);
free(md5this); /* free this again */
md5_to_ascii(md5buf, ha1);
/*
A2 = Method ":" digest-uri-value
(The "Method" value is the HTTP request method as specified in section
5.1.1 of RFC 2616)
*/
md5this = (unsigned char *)aprintf("%s:%s", request, uripath);
Curl_md5it(md5buf, md5this);
free(md5this); /* free this again */
md5_to_ascii(md5buf, ha2);
md5this = (unsigned char *)aprintf("%s:%s:%s", ha1, data->state.digest.nonce,
ha2);
Curl_md5it(md5buf, md5this);
free(md5this); /* free this again */
md5_to_ascii(md5buf, request_digest);
/* for test case 64 (snooped from a Mozilla 1.3a request)
Authorization: Digest username="testuser", realm="testrealm", \
nonce="1053604145", uri="/64", response="c55f7f30d83d774a3d2dcacf725abaca"
*/
conn->allocptr.userpwd =
aprintf( "Authorization: Digest "
"username=\"%s\", "
"realm=\"%s\", "
"nonce=\"%s\", "
"uri=\"%s\", "
"response=\"%s\"\r\n",
conn->user,
data->state.digest.realm,
data->state.digest.nonce,
uripath, /* this is the PATH part of the URL */
request_digest );
return CURLE_OK;
}
void Curl_digest_cleanup(struct SessionHandle *data)
{
if(data->state.digest.nonce)
free(data->state.digest.nonce);
data->state.digest.nonce = NULL;
if(data->state.digest.cnonce)
free(data->state.digest.cnonce);
data->state.digest.cnonce = NULL;
if(data->state.digest.realm)
free(data->state.digest.realm);
data->state.digest.realm = NULL;
data->state.digest.algo = CURLDIGESTALGO_MD5; /* default algorithm */
}
#endif

48
lib/http_digest.h Normal file
View File

@@ -0,0 +1,48 @@
#ifndef __HTTP_DIGEST_H
#define __HTTP_DIGEST_H
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
* $Id$
***************************************************************************/
typedef enum {
CURLDIGEST_NONE, /* not a digest */
CURLDIGEST_BAD, /* a digest, but one we don't like */
CURLDIGEST_FINE, /* a digest we act on */
CURLDIGEST_LAST /* last entry in this enum, don't use */
} CURLdigest;
enum {
CURLDIGESTALGO_MD5,
CURLDIGESTALGO_MD5SESS
};
/* this is for digest header input */
CURLdigest Curl_input_digest(struct connectdata *conn, char *header);
/* this is for creating digest header output */
CURLcode Curl_output_digest(struct connectdata *conn,
unsigned char *request,
unsigned char *uripath);
void Curl_digest_cleanup(struct SessionHandle *data);
#endif

216
lib/http_negotiate.c Normal file
View File

@@ -0,0 +1,216 @@
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
* $Id$
***************************************************************************/
#include "setup.h"
#ifdef GSSAPI
#ifndef CURL_DISABLE_HTTP
/* -- WIN32 approved -- */
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#include <stdlib.h>
#include <ctype.h>
#include <errno.h>
#include "urldata.h"
#include "sendf.h"
#include "strequal.h"
#include "base64.h"
#include "http_negotiate.h"
#define _MPRINTF_REPLACE /* use our functions only */
#include <curl/mprintf.h>
/* The last #include file should be: */
#ifdef CURLDEBUG
#include "memdebug.h"
#endif
static int
get_gss_name(struct connectdata *conn, gss_name_t *server)
{
OM_uint32 major_status, minor_status;
gss_buffer_desc token = GSS_C_EMPTY_BUFFER;
char name[2048];
/* GSSAPI implementation by Globus (known as GSI) requires the name to be
of form "<service>/<fqdn>" instead of <service>@<fqdn> (ie. slash instead
of at-sign). Also GSI servers are often identified as 'host' not 'khttp'.
Change following lines if you want to use GSI */
token.length = strlen("khttp@") + strlen(conn->hostname) + 1;
if (token.length + 1 > sizeof(name))
return EMSGSIZE;
sprintf(name, "khttp@%s", conn->hostname);
token.value = (void *) name;
major_status = gss_import_name(&minor_status,
&token,
GSS_C_NT_HOSTBASED_SERVICE,
server);
return GSS_ERROR(major_status) ? -1 : 0;
}
static void
log_gss_error(struct connectdata *conn, OM_uint32 error_status, char *prefix)
{
OM_uint32 maj_stat, min_stat;
OM_uint32 msg_ctx = 0;
gss_buffer_desc status_string;
char buf[1024];
size_t len;
snprintf(buf, sizeof(buf), "%s", prefix);
len = strlen(buf);
do {
maj_stat = gss_display_status (&min_stat,
error_status,
GSS_C_MECH_CODE,
GSS_C_NO_OID,
&msg_ctx,
&status_string);
if (sizeof(buf) > len + status_string.length + 1) {
sprintf(buf + len, ": %s", (char*) status_string.value);
len += status_string.length;
}
gss_release_buffer(&min_stat, &status_string);
} while (!GSS_ERROR(maj_stat) && msg_ctx != 0);
infof(conn->data, buf);
}
int Curl_input_negotiate(struct connectdata *conn, char *header)
{
struct negotiatedata *neg_ctx = &conn->data->state.negotiate;
OM_uint32 major_status, minor_status, minor_status2;
gss_buffer_desc input_token = GSS_C_EMPTY_BUFFER;
gss_buffer_desc output_token = GSS_C_EMPTY_BUFFER;
int ret;
size_t len;
while(*header && isspace((int)*header))
header++;
if(!checkprefix("GSS-Negotiate", header))
return -1;
if (neg_ctx->context && neg_ctx->status == GSS_S_COMPLETE) {
/* We finished succesfully our part of authentication, but server
* rejected it (since we're again here). Exit with an error since we
* can't invent anything better */
Curl_cleanup_negotiate(conn->data);
return -1;
}
if (neg_ctx->server_name == NULL &&
(ret = get_gss_name(conn, &neg_ctx->server_name)))
return ret;
header += strlen("GSS-Negotiate");
while(*header && isspace((int)*header))
header++;
len = strlen(header);
if (len > 0) {
int rawlen;
input_token.length = (len+3)/4 * 3;
input_token.value = malloc(input_token.length);
if (input_token.value == NULL)
return ENOMEM;
rawlen = Curl_base64_decode(header, input_token.value);
if (rawlen < 0)
return -1;
input_token.length = rawlen;
}
major_status = gss_init_sec_context(&minor_status,
GSS_C_NO_CREDENTIAL,
&neg_ctx->context,
neg_ctx->server_name,
GSS_C_NO_OID,
GSS_C_DELEG_FLAG,
0,
GSS_C_NO_CHANNEL_BINDINGS,
&input_token,
NULL,
&output_token,
NULL,
NULL);
if (input_token.length > 0)
gss_release_buffer(&minor_status2, &input_token);
neg_ctx->status = major_status;
if (GSS_ERROR(major_status)) {
/* Curl_cleanup_negotiate(conn->data) ??? */
log_gss_error(conn, minor_status,
(char *)"gss_init_sec_context() failed: ");
return -1;
}
if (output_token.length == 0) {
return -1;
}
neg_ctx->output_token = output_token;
/* conn->bits.close = FALSE; */
return 0;
}
CURLcode Curl_output_negotiate(struct connectdata *conn)
{
struct negotiatedata *neg_ctx = &conn->data->state.negotiate;
OM_uint32 minor_status;
char *encoded = NULL;
int len = Curl_base64_encode(neg_ctx->output_token.value,
neg_ctx->output_token.length,
&encoded);
if (len < 0)
return CURLE_OUT_OF_MEMORY;
conn->allocptr.userpwd =
aprintf("Authorization: GSS-Negotiate %s\r\n", encoded);
free(encoded);
gss_release_buffer(&minor_status, &neg_ctx->output_token);
return (conn->allocptr.userpwd == NULL) ? CURLE_OUT_OF_MEMORY : CURLE_OK;
}
void Curl_cleanup_negotiate(struct SessionHandle *data)
{
OM_uint32 minor_status;
struct negotiatedata *neg_ctx = &data->state.negotiate;
if (neg_ctx->context != GSS_C_NO_CONTEXT)
gss_delete_sec_context(&minor_status, &neg_ctx->context, GSS_C_NO_BUFFER);
if (neg_ctx->output_token.length != 0)
gss_release_buffer(&minor_status, &neg_ctx->output_token);
if (neg_ctx->server_name != GSS_C_NO_NAME)
gss_release_name(&minor_status, &neg_ctx->server_name);
memset(neg_ctx, 0, sizeof(*neg_ctx));
}
#endif
#endif

39
lib/http_negotiate.h Normal file
View File

@@ -0,0 +1,39 @@
#ifndef __HTTP_NEGOTIATE_H
#define __HTTP_NEGOTIATE_H
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
* $Id$
***************************************************************************/
#ifdef GSSAPI
/* this is for Negotiate header input */
int Curl_input_negotiate(struct connectdata *conn, char *header);
/* this is for creating Negotiate header output */
CURLcode Curl_output_negotiate(struct connectdata *conn);
void Curl_cleanup_negotiate(struct SessionHandle *data);
#endif
#endif

575
lib/http_ntlm.c Normal file
View File

@@ -0,0 +1,575 @@
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
* $Id$
***************************************************************************/
#include "setup.h"
/* NTLM details:
http://davenport.sourceforge.net/ntlm.html
http://www.innovation.ch/java/ntlm.html
*/
#ifndef CURL_DISABLE_HTTP
#ifdef USE_SSLEAY
/* We need OpenSSL for the crypto lib to provide us with MD4 and DES */
/* -- WIN32 approved -- */
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#include <stdlib.h>
#include <ctype.h>
#include "urldata.h"
#include "sendf.h"
#include "strequal.h"
#include "base64.h"
#include "http_ntlm.h"
#include "url.h"
#include "http.h" /* for Curl_http_auth_stage() */
#define _MPRINTF_REPLACE /* use our functions only */
#include <curl/mprintf.h>
#include <openssl/des.h>
#include <openssl/md4.h>
#include <openssl/ssl.h>
#if OPENSSL_VERSION_NUMBER < 0x00907001L
#define DES_key_schedule des_key_schedule
#define DES_cblock des_cblock
#define DES_set_odd_parity des_set_odd_parity
#define DES_set_key des_set_key
#define DES_ecb_encrypt des_ecb_encrypt
/* This is how things were done in the old days */#define DESKEY(x) x
#define DESKEY(x) x
#define DESKEYARG(x) x
#else
/* Modern version */
#define DESKEYARG(x) *x
#define DESKEY(x) &x
#endif
/* The last #include file should be: */
#ifdef CURLDEBUG
#include "memdebug.h"
#endif
/* Define this to make the type-3 message include the NT response message */
#undef USE_NTRESPONSES
/*
(*) = A "security buffer" is a triplet consisting of two shorts and one
long:
1. a 'short' containing the length of the buffer in bytes
2. a 'short' containing the allocated space for the buffer in bytes
3. a 'long' containing the offset to the start of the buffer from the
beginning of the NTLM message, in bytes.
*/
CURLntlm Curl_input_ntlm(struct connectdata *conn,
bool proxy, /* if proxy or not */
char *header) /* rest of the www-authenticate:
header */
{
/* point to the correct struct with this */
struct ntlmdata *ntlm;
ntlm = proxy?&conn->proxyntlm:&conn->ntlm;
/* skip initial whitespaces */
while(*header && isspace((int)*header))
header++;
if(checkprefix("NTLM", header)) {
unsigned char buffer[256];
header += strlen("NTLM");
while(*header && isspace((int)*header))
header++;
if(*header) {
/* We got a type-2 message here:
Index Description Content
0 NTLMSSP Signature Null-terminated ASCII "NTLMSSP"
(0x4e544c4d53535000)
8 NTLM Message Type long (0x02000000)
12 Target Name security buffer(*)
20 Flags long
24 Challenge 8 bytes
(32) Context (optional) 8 bytes (two consecutive longs)
(40) Target Information (optional) security buffer(*)
32 (48) start of data block
*/
int size = Curl_base64_decode(header, buffer);
ntlm->state = NTLMSTATE_TYPE2; /* we got a type-2 */
if(size >= 48)
/* the nonce of interest is index [24 .. 31], 8 bytes */
memcpy(ntlm->nonce, &buffer[24], 8);
/* at index decimal 20, there's a 32bit NTLM flag field */
}
else {
if(ntlm->state >= NTLMSTATE_TYPE1)
return CURLNTLM_BAD;
ntlm->state = NTLMSTATE_TYPE1; /* we should sent away a type-1 */
}
}
return CURLNTLM_FINE;
}
/*
* Turns a 56 bit key into the 64 bit, odd parity key and sets the key. The
* key schedule ks is also set.
*/
static void setup_des_key(unsigned char *key_56,
DES_key_schedule DESKEYARG(ks))
{
DES_cblock key;
key[0] = key_56[0];
key[1] = ((key_56[0] << 7) & 0xFF) | (key_56[1] >> 1);
key[2] = ((key_56[1] << 6) & 0xFF) | (key_56[2] >> 2);
key[3] = ((key_56[2] << 5) & 0xFF) | (key_56[3] >> 3);
key[4] = ((key_56[3] << 4) & 0xFF) | (key_56[4] >> 4);
key[5] = ((key_56[4] << 3) & 0xFF) | (key_56[5] >> 5);
key[6] = ((key_56[5] << 2) & 0xFF) | (key_56[6] >> 6);
key[7] = (key_56[6] << 1) & 0xFF;
DES_set_odd_parity(&key);
DES_set_key(&key, ks);
}
/*
* takes a 21 byte array and treats it as 3 56-bit DES keys. The
* 8 byte plaintext is encrypted with each key and the resulting 24
* bytes are stored in the results array.
*/
static void calc_resp(unsigned char *keys,
unsigned char *plaintext,
unsigned char *results)
{
DES_key_schedule ks;
setup_des_key(keys, DESKEY(ks));
DES_ecb_encrypt((DES_cblock*) plaintext, (DES_cblock*) results,
DESKEY(ks), DES_ENCRYPT);
setup_des_key(keys+7, DESKEY(ks));
DES_ecb_encrypt((DES_cblock*) plaintext, (DES_cblock*) (results+8),
DESKEY(ks), DES_ENCRYPT);
setup_des_key(keys+14, DESKEY(ks));
DES_ecb_encrypt((DES_cblock*) plaintext, (DES_cblock*) (results+16),
DESKEY(ks), DES_ENCRYPT);
}
/*
* Set up lanmanager and nt hashed passwords
*/
static void mkhash(char *password,
unsigned char *nonce, /* 8 bytes */
unsigned char *lmresp /* must fit 0x18 bytes */
#ifdef USE_NTRESPONSES
, unsigned char *ntresp /* must fit 0x18 bytes */
#endif
)
{
unsigned char lmbuffer[21];
#ifdef USE_NTRESPONSES
unsigned char ntbuffer[21];
#endif
unsigned char *pw;
static const unsigned char magic[] = {
0x4B, 0x47, 0x53, 0x21, 0x40, 0x23, 0x24, 0x25
};
int i;
int len = strlen(password);
/* make it fit at least 14 bytes */
pw = malloc(len<7?14:len*2);
if(!pw)
return; /* this will lead to a badly generated package */
if (len > 14)
len = 14;
for (i=0; i<len; i++)
pw[i] = toupper(password[i]);
for (; i<14; i++)
pw[i] = 0;
{
/* create LanManager hashed password */
DES_key_schedule ks;
setup_des_key(pw, DESKEY(ks));
DES_ecb_encrypt((DES_cblock *)magic, (DES_cblock *)lmbuffer,
DESKEY(ks), DES_ENCRYPT);
setup_des_key(pw+7, DESKEY(ks));
DES_ecb_encrypt((DES_cblock *)magic, (DES_cblock *)(lmbuffer+8),
DESKEY(ks), DES_ENCRYPT);
memset(lmbuffer+16, 0, 5);
}
/* create LM responses */
calc_resp(lmbuffer, nonce, lmresp);
#ifdef USE_NTRESPONSES
{
/* create NT hashed password */
MD4_CTX MD4;
len = strlen(password);
for (i=0; i<len; i++) {
pw[2*i] = password[i];
pw[2*i+1] = 0;
}
MD4_Init(&MD4);
MD4_Update(&MD4, pw, 2*len);
MD4_Final(ntbuffer, &MD4);
memset(ntbuffer+16, 0, 8);
}
calc_resp(ntbuffer, nonce, ntresp);
#endif
free(pw);
}
#define SHORTPAIR(x) ((x) & 0xff), ((x) >> 8)
#define LONGQUARTET(x) ((x) & 0xff), (((x) >> 8)&0xff), \
(((x) >>16)&0xff), ((x)>>24)
/* this is for creating ntlm header output */
CURLcode Curl_output_ntlm(struct connectdata *conn,
bool proxy)
{
const char *domain=""; /* empty */
const char *host=""; /* empty */
int domlen=strlen(domain);
int hostlen = strlen(host);
int hostoff; /* host name offset */
int domoff; /* domain name offset */
int size;
char *base64=NULL;
unsigned char ntlmbuf[256]; /* enough, unless the host/domain is very long */
/* point to the address of the pointer that holds the string to sent to the
server, which is for a plain host or for a HTTP proxy */
char **allocuserpwd;
/* point to the name and password for this */
char *userp;
char *passwdp;
/* point to the correct struct with this */
struct ntlmdata *ntlm;
if(proxy) {
allocuserpwd = &conn->allocptr.proxyuserpwd;
userp = conn->proxyuser;
passwdp = conn->proxypasswd;
ntlm = &conn->proxyntlm;
}
else {
allocuserpwd = &conn->allocptr.userpwd;
userp = conn->user;
passwdp = conn->passwd;
ntlm = &conn->ntlm;
}
switch(ntlm->state) {
case NTLMSTATE_TYPE1:
default: /* for the weird cases we (re)start here */
hostoff = 32;
domoff = hostoff + hostlen;
/* Create and send a type-1 message:
Index Description Content
0 NTLMSSP Signature Null-terminated ASCII "NTLMSSP"
(0x4e544c4d53535000)
8 NTLM Message Type long (0x01000000)
12 Flags long
16 Supplied Domain security buffer(*)
24 Supplied Workstation security buffer(*)
32 start of data block
*/
snprintf((char *)ntlmbuf, sizeof(ntlmbuf), "NTLMSSP%c"
"\x01%c%c%c" /* 32-bit type = 1 */
"%c%c%c%c" /* 32-bit NTLM flag field */
"%c%c" /* domain length */
"%c%c" /* domain allocated space */
"%c%c" /* domain name offset */
"%c%c" /* 2 zeroes */
"%c%c" /* host length */
"%c%c" /* host allocated space */
"%c%c" /* host name offset */
"%c%c" /* 2 zeroes */
"%s" /* host name */
"%s", /* domain string */
0, /* trailing zero */
0,0,0, /* part of type-1 long */
LONGQUARTET(
NTLMFLAG_NEGOTIATE_OEM| /* 2 */
NTLMFLAG_NEGOTIATE_NTLM_KEY /* 200 */
/* equals 0x0202 */
),
SHORTPAIR(domlen),
SHORTPAIR(domlen),
SHORTPAIR(domoff),
0,0,
SHORTPAIR(hostlen),
SHORTPAIR(hostlen),
SHORTPAIR(hostoff),
0,0,
host, domain);
/* initial packet length */
size = 32 + hostlen + domlen;
/* now keeper of the base64 encoded package size */
size = Curl_base64_encode(ntlmbuf, size, &base64);
if(size >0 ) {
Curl_safefree(*allocuserpwd);
*allocuserpwd = aprintf("%sAuthorization: NTLM %s\r\n",
proxy?"Proxy-":"",
base64);
free(base64);
}
else
return CURLE_OUT_OF_MEMORY; /* FIX TODO */
break;
case NTLMSTATE_TYPE2:
/* We received the type-2 already, create a type-3 message:
Index Description Content
0 NTLMSSP Signature Null-terminated ASCII "NTLMSSP"
(0x4e544c4d53535000)
8 NTLM Message Type long (0x03000000)
12 LM/LMv2 Response security buffer(*)
20 NTLM/NTLMv2 Response security buffer(*)
28 Domain Name security buffer(*)
36 User Name security buffer(*)
44 Workstation Name security buffer(*)
(52) Session Key (optional) security buffer(*)
(60) Flags (optional) long
52 (64) start of data block
*/
{
int lmrespoff;
int ntrespoff;
int useroff;
unsigned char lmresp[0x18]; /* fixed-size */
#ifdef USE_NTRESPONSES
unsigned char ntresp[0x18]; /* fixed-size */
#endif
const char *user;
int userlen;
user = strchr(userp, '\\');
if(!user)
user = strchr(userp, '/');
if (user) {
domain = userp;
domlen = user - domain;
user++;
}
else
user = userp;
userlen = strlen(user);
mkhash(passwdp, &ntlm->nonce[0], lmresp
#ifdef USE_NTRESPONSES
, ntresp
#endif
);
domoff = 64; /* always */
useroff = domoff + domlen;
hostoff = useroff + userlen;
lmrespoff = hostoff + hostlen;
ntrespoff = lmrespoff + 0x18;
/* Create the big type-3 message binary blob */
size = snprintf((char *)ntlmbuf, sizeof(ntlmbuf),
"NTLMSSP%c"
"\x03%c%c%c" /* type-3, 32 bits */
"%c%c%c%c" /* LanManager length + allocated space */
"%c%c" /* LanManager offset */
"%c%c" /* 2 zeroes */
"%c%c" /* NT-response length */
"%c%c" /* NT-response allocated space */
"%c%c" /* NT-response offset */
"%c%c" /* 2 zeroes */
"%c%c" /* domain length */
"%c%c" /* domain allocated space */
"%c%c" /* domain name offset */
"%c%c" /* 2 zeroes */
"%c%c" /* user length */
"%c%c" /* user allocated space */
"%c%c" /* user offset */
"%c%c" /* 2 zeroes */
"%c%c" /* host length */
"%c%c" /* host allocated space */
"%c%c" /* host offset */
"%c%c%c%c%c%c" /* 6 zeroes */
"\xff\xff" /* message length */
"%c%c" /* 2 zeroes */
"\x01\x82" /* flags */
"%c%c" /* 2 zeroes */
/* domain string */
/* user string */
/* host string */
/* LanManager response */
/* NT response */
,
0, /* zero termination */
0,0,0, /* type-3 long, the 24 upper bits */
SHORTPAIR(0x18), /* LanManager response length, twice */
SHORTPAIR(0x18),
SHORTPAIR(lmrespoff),
0x0, 0x0,
#ifdef USE_NTRESPONSES
SHORTPAIR(0x18), /* NT-response length, twice */
SHORTPAIR(0x18),
#else
0x0, 0x0,
0x0, 0x0,
#endif
SHORTPAIR(ntrespoff),
0x0, 0x0,
SHORTPAIR(domlen),
SHORTPAIR(domlen),
SHORTPAIR(domoff),
0x0, 0x0,
SHORTPAIR(userlen),
SHORTPAIR(userlen),
SHORTPAIR(useroff),
0x0, 0x0,
SHORTPAIR(hostlen),
SHORTPAIR(hostlen),
SHORTPAIR(hostoff),
0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0,
0x0, 0x0);
/* size is now 64 */
size=64;
ntlmbuf[62]=ntlmbuf[63]=0;
memcpy(&ntlmbuf[size], domain, domlen);
size += domlen;
memcpy(&ntlmbuf[size], user, userlen);
size += userlen;
/* we append the binary hashes to the end of the blob */
if(size < ((int)sizeof(ntlmbuf) - 0x18)) {
memcpy(&ntlmbuf[size], lmresp, 0x18);
size += 0x18;
}
#ifdef USE_NTRESPONSES
if(size < ((int)sizeof(ntlmbuf) - 0x18)) {
memcpy(&ntlmbuf[size], ntresp, 0x18);
size += 0x18;
}
#endif
ntlmbuf[56] = size & 0xff;
ntlmbuf[57] = size >> 8;
/* convert the binary blob into base64 */
size = Curl_base64_encode(ntlmbuf, size, &base64);
if(size >0 ) {
Curl_safefree(*allocuserpwd);
*allocuserpwd = aprintf("%sAuthorization: NTLM %s\r\n",
proxy?"Proxy-":"",
base64);
free(base64);
}
else
return CURLE_OUT_OF_MEMORY; /* FIX TODO */
ntlm->state = NTLMSTATE_TYPE3; /* we sent a type-3 */
/* Switch to web authentication after proxy authentication is done */
if (proxy)
Curl_http_auth_stage(conn->data, 401);
}
break;
case NTLMSTATE_TYPE3:
/* connection is already authenticated,
* don't send a header in future requests */
if(*allocuserpwd) {
free(*allocuserpwd);
*allocuserpwd=NULL;
}
break;
}
return CURLE_OK;
}
#endif /* USE_SSLEAY */
#endif /* !CURL_DISABLE_HTTP */

143
lib/http_ntlm.h Normal file
View File

@@ -0,0 +1,143 @@
#ifndef __HTTP_NTLM_H
#define __HTTP_NTLM_H
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
* $Id$
***************************************************************************/
typedef enum {
CURLNTLM_NONE, /* not a ntlm */
CURLNTLM_BAD, /* an ntlm, but one we don't like */
CURLNTLM_FIRST, /* the first 401-reply we got with NTLM */
CURLNTLM_FINE, /* an ntlm we act on */
CURLNTLM_LAST /* last entry in this enum, don't use */
} CURLntlm;
/* this is for ntlm header input */
CURLntlm Curl_input_ntlm(struct connectdata *conn, bool proxy, char *header);
/* this is for creating ntlm header output */
CURLcode Curl_output_ntlm(struct connectdata *conn, bool proxy);
void Curl_ntlm_cleanup(struct SessionHandle *data);
/* Flag bits definitions based on http://davenport.sourceforge.net/ntlm.html */
#define NTLMFLAG_NEGOTIATE_UNICODE (1<<0)
/* Indicates that Unicode strings are supported for use in security buffer
data. */
#define NTLMFLAG_NEGOTIATE_OEM (1<<1)
/* Indicates that OEM strings are supported for use in security buffer data. */
#define NTLMFLAG_REQUEST_TARGET (1<<2)
/* Requests that the server's authentication realm be included in the Type 2
message. */
/* unknown (1<<3) */
#define NTLMFLAG_NEGOTIATE_SIGN (1<<4)
/* Specifies that authenticated communication between the client and server
should carry a digital signature (message integrity). */
#define NTLMFLAG_NEGOTIATE_SEAL (1<<5)
/* Specifies that authenticated communication between the client and server
should be encrypted (message confidentiality). */
#define NTLMFLAG_NEGOTIATE_DATAGRAM_STYLE (1<<6)
/* unknown purpose */
#define NTLMFLAG_NEGOTIATE_LM_KEY (1<<7)
/* Indicates that the LAN Manager session key should be used for signing and
sealing authenticated communications. */
#define NTLMFLAG_NEGOTIATE_NETWARE (1<<8)
/* unknown purpose */
#define NTLMFLAG_NEGOTIATE_NTLM_KEY (1<<9)
/* Indicates that NTLM authentication is being used. */
/* unknown (1<<10) */
/* unknown (1<<11) */
#define NTLMFLAG_NEGOTIATE_DOMAIN_SUPPLIED (1<<12)
/* Sent by the client in the Type 1 message to indicate that a desired
authentication realm is included in the message. */
#define NTLMFLAG_NEGOTIATE_WORKSTATION_SUPPLIED (1<<13)
/* Sent by the client in the Type 1 message to indicate that the client
workstation's name is included in the message. */
#define NTLMFLAG_NEGOTIATE_LOCAL_CALL (1<<14)
/* Sent by the server to indicate that the server and client are on the same
machine. Implies that the client may use a pre-established local security
context rather than responding to the challenge. */
#define NTLMFLAG_NEGOTIATE_ALWAYS_SIGN (1<<15)
/* Indicates that authenticated communication between the client and server
should be signed with a "dummy" signature. */
#define NTLMFLAG_TARGET_TYPE_DOMAIN (1<<16)
/* Sent by the server in the Type 2 message to indicate that the target
authentication realm is a domain. */
#define NTLMFLAG_TARGET_TYPE_SERVER (1<<17)
/* Sent by the server in the Type 2 message to indicate that the target
authentication realm is a server. */
#define NTLMFLAG_TARGET_TYPE_SHARE (1<<18)
/* Sent by the server in the Type 2 message to indicate that the target
authentication realm is a share. Presumably, this is for share-level
authentication. Usage is unclear. */
#define NTLMFLAG_NEGOTIATE_NTLM2_KEY (1<<19)
/* Indicates that the NTLM2 signing and sealing scheme should be used for
protecting authenticated communications. */
#define NTLMFLAG_REQUEST_INIT_RESPONSE (1<<20)
/* unknown purpose */
#define NTLMFLAG_REQUEST_ACCEPT_RESPONSE (1<<21)
/* unknown purpose */
#define NTLMFLAG_REQUEST_NONNT_SESSION_KEY (1<<22)
/* unknown purpose */
#define NTLMFLAG_NEGOTIATE_TARGET_INFO (1<<23)
/* Sent by the server in the Type 2 message to indicate that it is including a
Target Information block in the message. */
/* unknown (1<24) */
/* unknown (1<25) */
/* unknown (1<26) */
/* unknown (1<27) */
/* unknown (1<28) */
#define NTLMFLAG_NEGOTIATE_128 (1<<29)
/* Indicates that 128-bit encryption is supported. */
#define NTLMFLAG_NEGOTIATE_KEY_EXCHANGE (1<<30)
/* unknown purpose */
#define NTLMFLAG_NEGOTIATE_56 (1<<31)
/* Indicates that 56-bit encryption is supported. */
#endif

View File

@@ -73,10 +73,14 @@
#endif
/* The last #include file should be: */
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
#include "memdebug.h"
#endif
#ifdef DJGPP
#define IOCTL_3_ARGS
#endif
#define SYS_ERROR -1
char *Curl_if2ip(char *interface, char *buf, int buf_size)

View File

@@ -64,7 +64,7 @@
#endif
/* The last #include file should be: */
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
#include "memdebug.h"
#endif
@@ -322,7 +322,7 @@ CURLcode Curl_krb_kauth(struct connectdata *conn)
save = Curl_set_command_prot(conn, prot_private);
result = Curl_ftpsendf(conn, "SITE KAUTH %s", conn->data->state.user);
result = Curl_ftpsendf(conn, "SITE KAUTH %s", conn->user);
if(result)
return result;
@@ -363,7 +363,7 @@ CURLcode Curl_krb_kauth(struct connectdata *conn)
for(; *p && *p != ' ' && *p != '\r' && *p != '\n'; p++);
*p = 0;
des_string_to_key (conn->data->state.passwd, &key);
des_string_to_key (conn->passwd, &key);
des_key_sched(&key, schedule);
des_pcbc_encrypt((void *)tkt.dat, (void *)tktcopy.dat,

View File

@@ -74,7 +74,7 @@ static void DynaOpen(void)
* liblber.so automatically, but since it does not we will
* handle it here by opening liblber.so as global.
*/
dlopen("liblber.so",
liblber = dlopen("liblber.so",
#ifdef RTLD_LAZY_GLOBAL /* It turns out some systems use this: */
RTLD_LAZY_GLOBAL
#else
@@ -178,8 +178,8 @@ CURLcode Curl_ldap(struct connectdata *conn)
status = CURLE_COULDNT_CONNECT;
} else {
rc = ldap_simple_bind_s(server,
conn->bits.user_passwd?data->state.user:NULL,
conn->bits.user_passwd?data->state.passwd:NULL);
conn->bits.user_passwd?conn->user:NULL,
conn->bits.user_passwd?conn->passwd:NULL);
if (rc != 0) {
failf(data, "LDAP: %s", ldap_err2string(rc));
status = CURLE_LDAP_CANNOT_BIND;

View File

@@ -28,7 +28,7 @@
#include "llist.h"
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
/* this must be the last include file */
#include "memdebug.h"
#endif
@@ -159,10 +159,10 @@ Curl_llist_count(curl_llist *list)
void
Curl_llist_destroy(curl_llist *list, void *user)
{
while (list->size > 0) {
Curl_llist_remove(list, CURL_LLIST_TAIL(list), user);
}
if(list) {
while (list->size > 0)
Curl_llist_remove(list, CURL_LLIST_TAIL(list), user);
free(list);
list = NULL;
free(list);
}
}

157
lib/makefile.dj Normal file
View File

@@ -0,0 +1,157 @@
#
# Adapted for djgpp2 / Watt-32 / DOS by
# Gisle Vanem <giva@bgnett.no>
#
DEPEND_PREREQ = config.h getdate.c
include ../packages/DOS/common.dj
ifeq ($(USE_SSL),1)
CFLAGS += -I$(OPENSSL_ROOT)
endif
SOURCES = base64.c connect.c content_.c cookie.c dict.c \
easy.c escape.c file.c formdata.c ftp.c \
getdate.c getenv.c getinfo.c getpass.c hash.c \
hostip.c http.c http_chu.c if2ip.c krb4.c \
ldap.c llist.c memdebug.c mprintf.c multi.c \
netrc.c progress.c security.c sendf.c share.c \
speedche.c ssluse.c strequal.c strtok.c telnet.c \
timeval.c transfer.c url.c version.c
OBJECTS = $(SOURCES:.c=.o)
CURL_LIB = libcurl.a
all: config.h $(CURL_LIB)
$(CURL_LIB): $(OBJECTS)
ar rs $@ $?
config.h: config.dj
@echo '#include "./config.dj"' > $@
getdate.c: getdate.y
$(YACC) -o $@ $^
clean:
- rm -f $(OBJECTS) $(CURL_LIB) Makefile.bak config.h getdate.c
# DO NOT DELETE THIS LINE
base64.o: base64.c setup.h config.h config.dj ../include/curl/mprintf.h \
base64.h
connect.o: connect.c setup.h config.h config.dj urldata.h cookie.h \
../include/curl/curl.h ../include/curl/types.h ../include/curl/easy.h \
../include/curl/multi.h formdata.h timeval.h http_chunks.h hostip.h \
hash.h llist.h sendf.h if2ip.h
content_.o: content_.c setup.h config.h config.dj
cookie.o: cookie.c setup.h config.h config.dj cookie.h \
../include/curl/curl.h ../include/curl/types.h ../include/curl/easy.h \
../include/curl/multi.h getdate.h strequal.h strtok.h
dict.o: dict.c setup.h config.h config.dj urldata.h cookie.h \
../include/curl/curl.h ../include/curl/types.h ../include/curl/easy.h \
../include/curl/multi.h formdata.h timeval.h http_chunks.h hostip.h \
hash.h llist.h transfer.h sendf.h progress.h strequal.h \
../include/curl/mprintf.h
easy.o: easy.c setup.h config.h config.dj strequal.h urldata.h cookie.h \
../include/curl/curl.h ../include/curl/types.h ../include/curl/easy.h \
../include/curl/multi.h formdata.h timeval.h http_chunks.h hostip.h \
hash.h llist.h transfer.h ssluse.h url.h getinfo.h \
../include/curl/mprintf.h
escape.o: escape.c setup.h config.h config.dj ../include/curl/curl.h \
../include/curl/types.h ../include/curl/easy.h ../include/curl/multi.h
file.o: file.c setup.h config.h config.dj urldata.h cookie.h \
../include/curl/curl.h ../include/curl/types.h ../include/curl/easy.h \
../include/curl/multi.h formdata.h timeval.h http_chunks.h hostip.h \
hash.h llist.h progress.h sendf.h escape.h ../include/curl/mprintf.h
formdata.o: formdata.c setup.h config.h config.dj ../include/curl/curl.h \
../include/curl/types.h ../include/curl/easy.h ../include/curl/multi.h \
formdata.h strequal.h
ftp.o: ftp.c setup.h config.h config.dj ../include/curl/curl.h \
../include/curl/types.h ../include/curl/easy.h ../include/curl/multi.h \
urldata.h cookie.h formdata.h timeval.h http_chunks.h hostip.h hash.h \
llist.h sendf.h if2ip.h progress.h transfer.h escape.h http.h ftp.h \
strequal.h ssluse.h connect.h ../include/curl/mprintf.h
getdate.o: getdate.c setup.h config.h config.dj getdate.h
getenv.o: getenv.c setup.h config.h config.dj
getinfo.o: getinfo.c setup.h config.h config.dj ../include/curl/curl.h \
../include/curl/types.h ../include/curl/easy.h ../include/curl/multi.h \
urldata.h cookie.h formdata.h timeval.h http_chunks.h hostip.h hash.h \
llist.h
getpass.o: getpass.c setup.h config.h config.dj
hash.o: hash.c setup.h config.h config.dj hash.h llist.h
hostip.o: hostip.c setup.h config.h config.dj urldata.h cookie.h \
../include/curl/curl.h ../include/curl/types.h ../include/curl/easy.h \
../include/curl/multi.h formdata.h timeval.h http_chunks.h hostip.h \
hash.h llist.h sendf.h share.h ../include/curl/mprintf.h
http.o: http.c setup.h config.h config.dj urldata.h cookie.h \
../include/curl/curl.h ../include/curl/types.h ../include/curl/easy.h \
../include/curl/multi.h formdata.h timeval.h http_chunks.h hostip.h \
hash.h llist.h transfer.h sendf.h progress.h base64.h strequal.h \
ssluse.h ../include/curl/mprintf.h
http_chu.o: http_chu.c setup.h config.h config.dj urldata.h cookie.h \
../include/curl/curl.h ../include/curl/types.h ../include/curl/easy.h \
../include/curl/multi.h formdata.h timeval.h http_chunks.h hostip.h \
hash.h llist.h sendf.h content_encoding.h ../include/curl/mprintf.h
if2ip.o: if2ip.c setup.h config.h config.dj
krb4.o: krb4.c setup.h config.h config.dj
ldap.o: ldap.c setup.h config.h config.dj urldata.h cookie.h \
../include/curl/curl.h ../include/curl/types.h ../include/curl/easy.h \
../include/curl/multi.h formdata.h timeval.h http_chunks.h hostip.h \
hash.h llist.h sendf.h escape.h transfer.h ../include/curl/mprintf.h
llist.o: llist.c setup.h config.h config.dj llist.h
memdebug.o: memdebug.c
mprintf.o: mprintf.c setup.h config.h config.dj
multi.o: multi.c setup.h config.h config.dj ../include/curl/curl.h \
../include/curl/types.h ../include/curl/easy.h ../include/curl/multi.h \
urldata.h cookie.h formdata.h timeval.h http_chunks.h hostip.h hash.h \
llist.h transfer.h url.h connect.h progress.h
netrc.o: netrc.c setup.h config.h config.dj ../include/curl/curl.h \
../include/curl/types.h ../include/curl/easy.h ../include/curl/multi.h \
strequal.h strtok.h
progress.o: progress.c setup.h config.h config.dj ../include/curl/curl.h \
../include/curl/types.h ../include/curl/easy.h ../include/curl/multi.h \
urldata.h cookie.h formdata.h timeval.h http_chunks.h hostip.h hash.h \
llist.h sendf.h progress.h ../include/curl/mprintf.h
security.o: security.c setup.h config.h config.dj
sendf.o: sendf.c setup.h config.h config.dj ../include/curl/curl.h \
../include/curl/types.h ../include/curl/easy.h ../include/curl/multi.h \
urldata.h cookie.h formdata.h timeval.h http_chunks.h hostip.h hash.h \
llist.h sendf.h connect.h ../include/curl/mprintf.h
share.o: share.c setup.h config.h config.dj ../include/curl/curl.h \
../include/curl/types.h ../include/curl/easy.h ../include/curl/multi.h \
urldata.h cookie.h formdata.h timeval.h http_chunks.h hostip.h hash.h \
llist.h share.h
speedche.o: speedche.c setup.h config.h config.dj ../include/curl/curl.h \
../include/curl/types.h ../include/curl/easy.h ../include/curl/multi.h \
urldata.h cookie.h formdata.h timeval.h http_chunks.h hostip.h hash.h \
llist.h sendf.h speedcheck.h
ssluse.o: ssluse.c setup.h config.h config.dj urldata.h cookie.h \
../include/curl/curl.h ../include/curl/types.h ../include/curl/easy.h \
../include/curl/multi.h formdata.h timeval.h http_chunks.h hostip.h \
hash.h llist.h sendf.h url.h
strequal.o: strequal.c setup.h config.h config.dj
strtok.o: strtok.c setup.h config.h config.dj
telnet.o: telnet.c setup.h config.h config.dj urldata.h cookie.h \
../include/curl/curl.h ../include/curl/types.h ../include/curl/easy.h \
../include/curl/multi.h formdata.h timeval.h http_chunks.h hostip.h \
hash.h llist.h transfer.h sendf.h ../include/curl/mprintf.h \
arpa_telnet.h
timeval.o: timeval.c timeval.h setup.h config.h config.dj
transfer.o: transfer.c setup.h config.h config.dj strequal.h urldata.h \
cookie.h ../include/curl/curl.h ../include/curl/types.h \
../include/curl/easy.h ../include/curl/multi.h formdata.h timeval.h \
http_chunks.h hostip.h hash.h llist.h netrc.h content_encoding.h \
transfer.h sendf.h speedcheck.h getpass.h progress.h getdate.h http.h \
url.h getinfo.h ssluse.h ../include/curl/mprintf.h
url.o: url.c setup.h config.h config.dj urldata.h cookie.h \
../include/curl/curl.h ../include/curl/types.h ../include/curl/easy.h \
../include/curl/multi.h formdata.h timeval.h http_chunks.h hostip.h \
hash.h llist.h netrc.h base64.h ssluse.h if2ip.h transfer.h sendf.h \
getpass.h progress.h strequal.h escape.h strtok.h share.h \
content_encoding.h ftp.h dict.h telnet.h http.h file.h ldap.h url.h \
connect.h ca-bundle.h ../include/curl/mprintf.h
version.o: version.c setup.h config.h config.dj ../include/curl/curl.h \
../include/curl/types.h ../include/curl/easy.h ../include/curl/multi.h \
urldata.h cookie.h formdata.h timeval.h http_chunks.h hostip.h hash.h \
llist.h

356
lib/md5.c Normal file
View File

@@ -0,0 +1,356 @@
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
* $Id$
***************************************************************************/
#include "setup.h"
#ifndef USE_SSLEAY
/* This code segment is only used if OpenSSL is not provided, as if it is
we use the MD5-function provided there instead. No good duplicating
code! */
/* Copyright (C) 1991-2, RSA Data Security, Inc. Created 1991. All
rights reserved.
License to copy and use this software is granted provided that it
is identified as the "RSA Data Security, Inc. MD5 Message-Digest
Algorithm" in all material mentioning or referencing this software
or this function.
License is also granted to make and use derivative works provided
that such works are identified as "derived from the RSA Data
Security, Inc. MD5 Message-Digest Algorithm" in all material
mentioning or referencing the derived work.
RSA Data Security, Inc. makes no representations concerning either
the merchantability of this software or the suitability of this
software for any particular purpose. It is provided "as is"
without express or implied warranty of any kind.
These notices must be retained in any copies of any part of this
documentation and/or software.
*/
#include <string.h>
/* UINT4 defines a four byte word */
typedef unsigned long int UINT4;
/* MD5 context. */
struct md5_ctx {
UINT4 state[4]; /* state (ABCD) */
UINT4 count[2]; /* number of bits, modulo 2^64 (lsb first) */
unsigned char buffer[64]; /* input buffer */
};
typedef struct md5_ctx MD5_CTX;
static void MD5_Init(struct md5_ctx *);
static void MD5_Update(struct md5_ctx *, unsigned char *, unsigned int);
static void MD5_Final(unsigned char [16], struct md5_ctx *);
/* Constants for MD5Transform routine.
*/
#define S11 7
#define S12 12
#define S13 17
#define S14 22
#define S21 5
#define S22 9
#define S23 14
#define S24 20
#define S31 4
#define S32 11
#define S33 16
#define S34 23
#define S41 6
#define S42 10
#define S43 15
#define S44 21
static void MD5Transform(UINT4 [4], unsigned char [64]);
static void Encode(unsigned char *, UINT4 *, unsigned int);
static void Decode(UINT4 *, unsigned char *, unsigned int);
#define MD5_memcpy(dst,src,len) memcpy(dst,src,len)
#define MD5_memset(dst,val,len) memset(dst,val,len)
static unsigned char PADDING[64] = {
0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
/* F, G, H and I are basic MD5 functions.
*/
#define F(x, y, z) (((x) & (y)) | ((~x) & (z)))
#define G(x, y, z) (((x) & (z)) | ((y) & (~z)))
#define H(x, y, z) ((x) ^ (y) ^ (z))
#define I(x, y, z) ((y) ^ ((x) | (~z)))
/* ROTATE_LEFT rotates x left n bits.
*/
#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32-(n))))
/* FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4.
Rotation is separate from addition to prevent recomputation.
*/
#define FF(a, b, c, d, x, s, ac) { \
(a) += F ((b), (c), (d)) + (x) + (UINT4)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define GG(a, b, c, d, x, s, ac) { \
(a) += G ((b), (c), (d)) + (x) + (UINT4)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define HH(a, b, c, d, x, s, ac) { \
(a) += H ((b), (c), (d)) + (x) + (UINT4)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define II(a, b, c, d, x, s, ac) { \
(a) += I ((b), (c), (d)) + (x) + (UINT4)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
/* MD5 initialization. Begins an MD5 operation, writing a new context.
*/
static void MD5_Init (context)
struct md5_ctx *context; /* context */
{
context->count[0] = context->count[1] = 0;
/* Load magic initialization constants.
*/
context->state[0] = 0x67452301;
context->state[1] = 0xefcdab89;
context->state[2] = 0x98badcfe;
context->state[3] = 0x10325476;
}
/* MD5 block update operation. Continues an MD5 message-digest
operation, processing another message block, and updating the
context.
*/
static void MD5_Update (context, input, inputLen)
struct md5_ctx *context; /* context */
unsigned char *input; /* input block */
unsigned int inputLen; /* length of input block */
{
unsigned int i, index, partLen;
/* Compute number of bytes mod 64 */
index = (unsigned int)((context->count[0] >> 3) & 0x3F);
/* Update number of bits */
if ((context->count[0] += ((UINT4)inputLen << 3))
< ((UINT4)inputLen << 3))
context->count[1]++;
context->count[1] += ((UINT4)inputLen >> 29);
partLen = 64 - index;
/* Transform as many times as possible. */
if (inputLen >= partLen) {
MD5_memcpy((void *)&context->buffer[index], (void *)input, partLen);
MD5Transform(context->state, context->buffer);
for (i = partLen; i + 63 < inputLen; i += 64)
MD5Transform(context->state, &input[i]);
index = 0;
}
else
i = 0;
/* Buffer remaining input */
MD5_memcpy((void *)&context->buffer[index], (void *)&input[i],
inputLen-i);
}
/* MD5 finalization. Ends an MD5 message-digest operation, writing the
the message digest and zeroizing the context.
*/
static void MD5_Final (digest, context)
unsigned char digest[16]; /* message digest */
struct md5_ctx *context; /* context */
{
unsigned char bits[8];
unsigned int index, padLen;
/* Save number of bits */
Encode (bits, context->count, 8);
/* Pad out to 56 mod 64. */
index = (unsigned int)((context->count[0] >> 3) & 0x3f);
padLen = (index < 56) ? (56 - index) : (120 - index);
MD5_Update (context, PADDING, padLen);
/* Append length (before padding) */
MD5_Update (context, bits, 8);
/* Store state in digest */
Encode (digest, context->state, 16);
/* Zeroize sensitive information. */
MD5_memset ((void *)context, 0, sizeof (*context));
}
/* MD5 basic transformation. Transforms state based on block. */
static void MD5Transform (state, block)
UINT4 state[4];
unsigned char block[64];
{
UINT4 a = state[0], b = state[1], c = state[2], d = state[3], x[16];
Decode (x, block, 64);
/* Round 1 */
FF (a, b, c, d, x[ 0], S11, 0xd76aa478); /* 1 */
FF (d, a, b, c, x[ 1], S12, 0xe8c7b756); /* 2 */
FF (c, d, a, b, x[ 2], S13, 0x242070db); /* 3 */
FF (b, c, d, a, x[ 3], S14, 0xc1bdceee); /* 4 */
FF (a, b, c, d, x[ 4], S11, 0xf57c0faf); /* 5 */
FF (d, a, b, c, x[ 5], S12, 0x4787c62a); /* 6 */
FF (c, d, a, b, x[ 6], S13, 0xa8304613); /* 7 */
FF (b, c, d, a, x[ 7], S14, 0xfd469501); /* 8 */
FF (a, b, c, d, x[ 8], S11, 0x698098d8); /* 9 */
FF (d, a, b, c, x[ 9], S12, 0x8b44f7af); /* 10 */
FF (c, d, a, b, x[10], S13, 0xffff5bb1); /* 11 */
FF (b, c, d, a, x[11], S14, 0x895cd7be); /* 12 */
FF (a, b, c, d, x[12], S11, 0x6b901122); /* 13 */
FF (d, a, b, c, x[13], S12, 0xfd987193); /* 14 */
FF (c, d, a, b, x[14], S13, 0xa679438e); /* 15 */
FF (b, c, d, a, x[15], S14, 0x49b40821); /* 16 */
/* Round 2 */
GG (a, b, c, d, x[ 1], S21, 0xf61e2562); /* 17 */
GG (d, a, b, c, x[ 6], S22, 0xc040b340); /* 18 */
GG (c, d, a, b, x[11], S23, 0x265e5a51); /* 19 */
GG (b, c, d, a, x[ 0], S24, 0xe9b6c7aa); /* 20 */
GG (a, b, c, d, x[ 5], S21, 0xd62f105d); /* 21 */
GG (d, a, b, c, x[10], S22, 0x2441453); /* 22 */
GG (c, d, a, b, x[15], S23, 0xd8a1e681); /* 23 */
GG (b, c, d, a, x[ 4], S24, 0xe7d3fbc8); /* 24 */
GG (a, b, c, d, x[ 9], S21, 0x21e1cde6); /* 25 */
GG (d, a, b, c, x[14], S22, 0xc33707d6); /* 26 */
GG (c, d, a, b, x[ 3], S23, 0xf4d50d87); /* 27 */
GG (b, c, d, a, x[ 8], S24, 0x455a14ed); /* 28 */
GG (a, b, c, d, x[13], S21, 0xa9e3e905); /* 29 */
GG (d, a, b, c, x[ 2], S22, 0xfcefa3f8); /* 30 */
GG (c, d, a, b, x[ 7], S23, 0x676f02d9); /* 31 */
GG (b, c, d, a, x[12], S24, 0x8d2a4c8a); /* 32 */
/* Round 3 */
HH (a, b, c, d, x[ 5], S31, 0xfffa3942); /* 33 */
HH (d, a, b, c, x[ 8], S32, 0x8771f681); /* 34 */
HH (c, d, a, b, x[11], S33, 0x6d9d6122); /* 35 */
HH (b, c, d, a, x[14], S34, 0xfde5380c); /* 36 */
HH (a, b, c, d, x[ 1], S31, 0xa4beea44); /* 37 */
HH (d, a, b, c, x[ 4], S32, 0x4bdecfa9); /* 38 */
HH (c, d, a, b, x[ 7], S33, 0xf6bb4b60); /* 39 */
HH (b, c, d, a, x[10], S34, 0xbebfbc70); /* 40 */
HH (a, b, c, d, x[13], S31, 0x289b7ec6); /* 41 */
HH (d, a, b, c, x[ 0], S32, 0xeaa127fa); /* 42 */
HH (c, d, a, b, x[ 3], S33, 0xd4ef3085); /* 43 */
HH (b, c, d, a, x[ 6], S34, 0x4881d05); /* 44 */
HH (a, b, c, d, x[ 9], S31, 0xd9d4d039); /* 45 */
HH (d, a, b, c, x[12], S32, 0xe6db99e5); /* 46 */
HH (c, d, a, b, x[15], S33, 0x1fa27cf8); /* 47 */
HH (b, c, d, a, x[ 2], S34, 0xc4ac5665); /* 48 */
/* Round 4 */
II (a, b, c, d, x[ 0], S41, 0xf4292244); /* 49 */
II (d, a, b, c, x[ 7], S42, 0x432aff97); /* 50 */
II (c, d, a, b, x[14], S43, 0xab9423a7); /* 51 */
II (b, c, d, a, x[ 5], S44, 0xfc93a039); /* 52 */
II (a, b, c, d, x[12], S41, 0x655b59c3); /* 53 */
II (d, a, b, c, x[ 3], S42, 0x8f0ccc92); /* 54 */
II (c, d, a, b, x[10], S43, 0xffeff47d); /* 55 */
II (b, c, d, a, x[ 1], S44, 0x85845dd1); /* 56 */
II (a, b, c, d, x[ 8], S41, 0x6fa87e4f); /* 57 */
II (d, a, b, c, x[15], S42, 0xfe2ce6e0); /* 58 */
II (c, d, a, b, x[ 6], S43, 0xa3014314); /* 59 */
II (b, c, d, a, x[13], S44, 0x4e0811a1); /* 60 */
II (a, b, c, d, x[ 4], S41, 0xf7537e82); /* 61 */
II (d, a, b, c, x[11], S42, 0xbd3af235); /* 62 */
II (c, d, a, b, x[ 2], S43, 0x2ad7d2bb); /* 63 */
II (b, c, d, a, x[ 9], S44, 0xeb86d391); /* 64 */
state[0] += a;
state[1] += b;
state[2] += c;
state[3] += d;
/* Zeroize sensitive information. */
MD5_memset ((void *)x, 0, sizeof (x));
}
/* Encodes input (UINT4) into output (unsigned char). Assumes len is
a multiple of 4.
*/
static void Encode (unsigned char *output,
UINT4 *input,
unsigned int len)
{
unsigned int i, j;
for (i = 0, j = 0; j < len; i++, j += 4) {
output[j] = (unsigned char)(input[i] & 0xff);
output[j+1] = (unsigned char)((input[i] >> 8) & 0xff);
output[j+2] = (unsigned char)((input[i] >> 16) & 0xff);
output[j+3] = (unsigned char)((input[i] >> 24) & 0xff);
}
}
/* Decodes input (unsigned char) into output (UINT4). Assumes len is
a multiple of 4.
*/
static void Decode (UINT4 *output,
unsigned char *input,
unsigned int len)
{
unsigned int i, j;
for (i = 0, j = 0; j < len; i++, j += 4)
output[i] = ((UINT4)input[j]) | (((UINT4)input[j+1]) << 8) |
(((UINT4)input[j+2]) << 16) | (((UINT4)input[j+3]) << 24);
}
#else
/* If OpenSSL is present */
#include <openssl/md5.h>
#include <string.h>
#endif
void Curl_md5it(unsigned char *outbuffer, /* 16 bytes */
unsigned char *input)
{
MD5_CTX ctx;
MD5_Init(&ctx);
MD5_Update(&ctx, input, strlen((char *)input));
MD5_Final(outbuffer, &ctx);
}

View File

@@ -1,31 +1,29 @@
#ifndef __CA_BUNDLE_H
#define __CA_BUNDLE_H
/*****************************************************************************
#ifndef __MD5_H
#define __MD5_H
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* In order to be useful for every potential user, curl and libcurl are
* dual-licensed under the MPL and the MIT/X-derivate licenses.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the MPL or the MIT/X-derivate
* licenses. You may pick one of these licenses.
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
* $Id$
*****************************************************************************/
***************************************************************************/
void Curl_md5it(unsigned char *output,
unsigned char *input);
#ifndef CURL_CA_BUNDLE
/* Set this to the full path file name of the ca cert bundle */
#undef CURL_CA_BUNDLE
#endif
#endif /* __CA_BUNDLE_H */

View File

@@ -1,4 +1,4 @@
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
@@ -62,7 +62,10 @@ struct memdebug {
* Don't use these with multithreaded test programs!
*/
FILE *logfile;
#define logfile curl_debuglogfile
FILE *curl_debuglogfile;
static bool memlimit; /* enable memory limit */
static long memsize; /* set number of mallocs allowed */
/* this sets the log file name */
void curl_memdebug(const char *logname)
@@ -73,12 +76,47 @@ void curl_memdebug(const char *logname)
logfile = stderr;
}
/* This function sets the number of malloc() calls that should return
successfully! */
void curl_memlimit(long limit)
{
memlimit = TRUE;
memsize = limit;
}
/* returns TRUE if this isn't allowed! */
static bool countcheck(const char *func, int line, const char *source)
{
/* if source is NULL, then the call is made internally and this check
should not be made */
if(memlimit && source) {
if(!memsize) {
if(logfile && source)
fprintf(logfile, "LIMIT %s:%d %s reached memlimit\n",
source, line, func);
return TRUE; /* RETURN ERROR! */
}
else
memsize--; /* countdown */
/* log the countdown */
if(logfile && source)
fprintf(logfile, "LIMIT %s:%d %ld ALLOCS left\n",
source, line, memsize);
}
return FALSE; /* allow this */
}
void *curl_domalloc(size_t wantedsize, int line, const char *source)
{
struct memdebug *mem;
size_t size;
if(countcheck("malloc", line, source))
return NULL;
/* alloc at least 64 bytes */
size = sizeof(struct memdebug)+wantedsize;
@@ -106,6 +144,9 @@ char *curl_dostrdup(const char *str, int line, const char *source)
exit(2);
}
if(countcheck("strdup", line, source))
return NULL;
len=strlen(str)+1;
mem=curl_domalloc(len, 0, NULL); /* NULL prevents logging */
@@ -125,6 +166,9 @@ void *curl_dorealloc(void *ptr, size_t wantedsize,
size_t size = sizeof(struct memdebug)+wantedsize;
if(countcheck("realloc", line, source))
return NULL;
mem = (struct memdebug *)((char *)ptr - offsetof(struct memdebug, mem));
mem=(struct memdebug *)(realloc)(mem, size);
@@ -220,4 +264,4 @@ int curl_fclose(FILE *file, int line, const char *source)
#ifdef VMS
int VOID_VAR_MEMDEBUG;
#endif
#endif /* MALLOCDEBUG */
#endif /* CURLDEBUG */

View File

@@ -1,4 +1,4 @@
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
@@ -39,6 +39,8 @@
#include <memory.h>
#endif
#define logfile curl_debuglogfile
extern FILE *logfile;
/* memory functions */
@@ -47,6 +49,7 @@ void *curl_dorealloc(void *ptr, size_t size, int line, const char *source);
void curl_dofree(void *ptr, int line, const char *source);
char *curl_dostrdup(const char *str, int line, const char *source);
void curl_memdebug(const char *logname);
void curl_memlimit(long limit);
/* file descriptor manipulators */
int curl_socket(int domain, int type, int protocol, int, const char *);

View File

@@ -48,7 +48,7 @@
/* The last #include file should be: */
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
#include "memdebug.h"
#endif
@@ -171,6 +171,7 @@ static BOOL dprintf_IsQualifierNoDollar(char c)
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
case 'h': case 'l': case 'L': case 'Z': case 'q':
case '*':
return TRUE;
default:
return FALSE;

View File

@@ -31,6 +31,9 @@
#ifdef HAVE_SYS_SOCKET_H
#include <sys/socket.h>
#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <curl/curl.h>
@@ -38,9 +41,10 @@
#include "transfer.h"
#include "url.h"
#include "connect.h"
#include "progress.h"
/* The last #include file should be: */
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
#include "memdebug.h"
#endif
@@ -52,7 +56,8 @@ struct Curl_message {
typedef enum {
CURLM_STATE_INIT,
CURLM_STATE_CONNECT, /* connect has been sent off */
CURLM_STATE_CONNECT, /* resolve/connect has been sent off */
CURLM_STATE_WAITRESOLVE, /* we're awaiting the resolve to finalize */
CURLM_STATE_WAITCONNECT, /* we're awaiting the connect to finalize */
CURLM_STATE_DO, /* send off the request (part 1) */
CURLM_STATE_DO_MORE, /* send off the request (part 2) */
@@ -235,6 +240,14 @@ CURLMcode curl_multi_fdset(CURLM *multi_handle,
switch(easy->state) {
default:
break;
case CURLM_STATE_WAITRESOLVE:
/* waiting for a resolve to complete */
Curl_multi_ares_fdset(easy->easy_conn, read_fd_set, write_fd_set,
&this_max_fd);
if(this_max_fd > *max_fd)
*max_fd = this_max_fd;
break;
case CURLM_STATE_WAITCONNECT:
case CURLM_STATE_DO_MORE:
{
@@ -289,6 +302,7 @@ CURLMcode curl_multi_perform(CURLM *multi_handle, int *running_handles)
CURLMcode result=CURLM_OK;
struct Curl_message *msg = NULL;
bool connected;
bool async;
*running_handles = 0; /* bump this once for every living handle */
@@ -298,7 +312,7 @@ CURLMcode curl_multi_perform(CURLM *multi_handle, int *running_handles)
easy=multi->easy.next;
while(easy) {
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
fprintf(stderr, "HANDLE %p: State: %x\n",
(char *)easy, easy->state);
#endif
@@ -316,6 +330,7 @@ CURLMcode curl_multi_perform(CURLM *multi_handle, int *running_handles)
easy->easy_handle->state.used_interface = Curl_if_multi;
}
break;
case CURLM_STATE_CONNECT:
if (Curl_global_host_cache_use(easy->easy_handle)) {
easy->easy_handle->hostcache = Curl_global_host_cache_get();
@@ -328,16 +343,47 @@ CURLMcode curl_multi_perform(CURLM *multi_handle, int *running_handles)
}
/* Connect. We get a connection identifier filled in. */
easy->result = Curl_connect(easy->easy_handle, &easy->easy_conn);
Curl_pgrsTime(easy->easy_handle, TIMER_STARTSINGLE);
easy->result = Curl_connect(easy->easy_handle, &easy->easy_conn, &async);
/* after the connect has been sent off, go WAITCONNECT */
if(CURLE_OK == easy->result) {
easy->state = CURLM_STATE_WAITCONNECT;
result = CURLM_CALL_MULTI_PERFORM;
if(async)
/* We're now waiting for an asynchronous name lookup */
easy->state = CURLM_STATE_WAITRESOLVE;
else {
/* after the connect has been sent off, go WAITCONNECT */
easy->state = CURLM_STATE_WAITCONNECT;
result = CURLM_CALL_MULTI_PERFORM;
}
}
break;
case CURLM_STATE_WAITRESOLVE:
/* awaiting an asynch name resolve to complete */
{
bool done;
/* check if we have the name resolved by now */
easy->result = Curl_is_resolved(easy->easy_conn, &done);
if(done) {
/* Perform the next step in the connection phase, and then move on
to the WAITCONNECT state */
easy->result = Curl_async_resolved(easy->easy_conn);
easy->state = CURLM_STATE_WAITCONNECT;
}
if(CURLE_OK != easy->result) {
/* failure detected */
easy->easy_conn = NULL; /* no more connection */
break;
}
}
break;
case CURLM_STATE_WAITCONNECT:
/* awaiting a completion of an asynch connect */
{
bool connected;
easy->result = Curl_is_connected(easy->easy_conn,
@@ -435,8 +481,9 @@ CURLMcode curl_multi_perform(CURLM *multi_handle, int *running_handles)
/* When we follow redirects, must to go back to the CONNECT state */
if(easy->easy_conn->newurl) {
easy->result = Curl_follow(easy->easy_handle,
strdup(easy->easy_conn->newurl));
char *newurl = easy->easy_conn->newurl;
easy->easy_conn->newurl = NULL;
easy->result = Curl_follow(easy->easy_handle, newurl);
if(CURLE_OK == easy->result) {
easy->state = CURLM_STATE_CONNECT;
result = CURLM_CALL_MULTI_PERFORM;
@@ -468,11 +515,12 @@ CURLMcode curl_multi_perform(CURLM *multi_handle, int *running_handles)
}
if(CURLM_STATE_COMPLETED != easy->state) {
if(CURLE_OK != easy->result)
if(CURLE_OK != easy->result) {
/*
* If an error was returned, and we aren't in completed state now,
* then we go to completed and consider this transfer aborted. */
easy->state = CURLM_STATE_COMPLETED;
}
else
/* this one still lives! */
(*running_handles)++;

View File

@@ -46,7 +46,7 @@
#include "strtok.h"
/* The last #include file should be: */
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
#include "memdebug.h"
#endif
@@ -119,7 +119,7 @@ int Curl_parsenetrc(char *host,
sprintf(netrcbuffer, "%s%s%s", home, DIR_CHAR, NETRC);
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
{
/* This is a hack to allow testing.
* If compiled with --enable-debug and CURL_DEBUG_NETRC is defined,
@@ -141,7 +141,7 @@ int Curl_parsenetrc(char *host,
free(override);
}
}
#endif /* MALLOCDEBUG */
#endif /* CURLDEBUG */
file = fopen(netrcbuffer, "r");
if(file) {

View File

@@ -172,18 +172,20 @@ void Curl_pgrsSetUploadCounter(struct SessionHandle *data, double size)
void Curl_pgrsSetDownloadSize(struct SessionHandle *data, double size)
{
if(size > 0) {
data->progress.size_dl = size;
data->progress.size_dl = size;
if(size > 0)
data->progress.flags |= PGRS_DL_SIZE_KNOWN;
}
else
data->progress.flags &= ~PGRS_DL_SIZE_KNOWN;
}
void Curl_pgrsSetUploadSize(struct SessionHandle *data, double size)
{
if(size > 0) {
data->progress.size_ul = size;
data->progress.size_ul = size;
if(size > 0)
data->progress.flags |= PGRS_UL_SIZE_KNOWN;
}
else
data->progress.flags &= ~PGRS_UL_SIZE_KNOWN;
}
/* EXAMPLE OUTPUT to follow:

View File

@@ -60,7 +60,7 @@
#include "ftp.h"
/* The last #include file should be: */
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
#include "memdebug.h"
#endif

View File

@@ -46,6 +46,7 @@
#include <curl/curl.h>
#include "urldata.h"
#include "sendf.h"
#include "connect.h" /* for the Curl_ourerrno() proto */
#define _MPRINTF_REPLACE /* use the internal *printf() functions */
#include <curl/mprintf.h>
@@ -55,7 +56,7 @@
#endif
#include <string.h>
/* The last #include file should be: */
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
#include "memdebug.h"
#endif
@@ -93,7 +94,6 @@ struct curl_slist *curl_slist_append(struct curl_slist *list,
new_item->data = strdup(data);
}
if (new_item == NULL || new_item->data == NULL) {
fprintf(stderr, "Cannot allocate memory for QUOTE list.\n");
return NULL;
}
@@ -132,8 +132,8 @@ void curl_slist_free_all(struct curl_slist *list)
void Curl_infof(struct SessionHandle *data, const char *fmt, ...)
{
va_list ap;
if(data->set.verbose) {
if(data && data->set.verbose) {
va_list ap;
char print_buffer[1024 + 1];
va_start(ap, fmt);
vsnprintf(print_buffer, 1024, fmt, ap);
@@ -228,6 +228,8 @@ CURLcode Curl_write(struct connectdata *conn, int sockfd,
ssize_t *written)
{
ssize_t bytes_written;
CURLcode retcode;
(void)conn;
#ifdef USE_SSLEAY
/* SSL_write() is said to return 'int' while write() and send() returns
@@ -242,12 +244,28 @@ CURLcode Curl_write(struct connectdata *conn, int sockfd,
switch(err) {
case SSL_ERROR_WANT_READ:
case SSL_ERROR_WANT_WRITE:
/* this is basicly the EWOULDBLOCK equivalent */
/* The operation did not complete; the same TLS/SSL I/O function
should be called again later. This is basicly an EWOULDBLOCK
equivalent. */
*written = 0;
return CURLE_OK;
case SSL_ERROR_SYSCALL:
failf(conn->data, "SSL_write() returned SYSCALL, errno = %d\n", errno);
failf(conn->data, "SSL_write() returned SYSCALL, errno = %d\n",
Curl_ourerrno());
return CURLE_SEND_ERROR;
case SSL_ERROR_SSL:
{
/* A failure in the SSL library occurred, usually a
protocol error. The OpenSSL error queue contains more
information on the error. */
char error_buffer[120]; /* OpenSSL documents that this must be at least
120 bytes long. */
int sslerror = ERR_get_error();
failf(conn->data, "SSL_write() error: %s\n",
ERR_error_string(sslerror, error_buffer));
return CURLE_SEND_ERROR;
}
break;
}
/* a true error */
failf(conn->data, "SSL_write() return error %d\n", err);
@@ -267,27 +285,31 @@ CURLcode Curl_write(struct connectdata *conn, int sockfd,
bytes_written = swrite(sockfd, mem, len);
}
if(-1 == bytes_written) {
#ifdef WIN32
if(WSAEWOULDBLOCK == GetLastError())
int err = Curl_ourerrno();
if(
#ifdef WSAEWOULDBLOCK
/* This is how Windows does it */
(WSAEWOULDBLOCK == err)
#else
/* As pointed out by Christophe Demory on March 11 2003, errno
may be EWOULDBLOCK or on some systems EAGAIN when it returned
due to its inability to send off data without blocking. We
therefor treat both error codes the same here */
if((EWOULDBLOCK == errno) || (EAGAIN == errno))
/* As pointed out by Christophe Demory on March 11 2003, errno
may be EWOULDBLOCK or on some systems EAGAIN when it returned
due to its inability to send off data without blocking. We
therefor treat both error codes the same here */
(EWOULDBLOCK == err) || (EAGAIN == err) || (EINTR == err)
#endif
{
)
/* this is just a case of EWOULDBLOCK */
*written=0;
return CURLE_OK;
}
bytes_written=0;
}
#ifdef USE_SSLEAY
}
#endif
*written = bytes_written;
return (-1 != bytes_written)?CURLE_OK:CURLE_SEND_ERROR;
retcode = (-1 != bytes_written)?CURLE_OK:CURLE_SEND_ERROR;
return retcode;
}
/* client_write() sends data to the write callback(s)
@@ -345,6 +367,7 @@ int Curl_read(struct connectdata *conn,
ssize_t *n)
{
ssize_t nread;
(void)conn;
*n=0; /* reset amount to zero */
#ifdef USE_SSLEAY
@@ -363,6 +386,17 @@ int Curl_read(struct connectdata *conn,
case SSL_ERROR_WANT_WRITE:
/* there's data pending, re-invoke SSL_read() */
return -1; /* basicly EWOULDBLOCK */
case SSL_ERROR_SYSCALL:
/* openssl/ssl.h says "look at error stack/return value/errno" */
{
char error_buffer[120]; /* OpenSSL documents that this must be at least
120 bytes long. */
int sslerror = ERR_get_error();
failf(conn->data, "SSL read: %s, errno %d",
ERR_error_string(sslerror, error_buffer),
Curl_ourerrno() );
}
return CURLE_RECV_ERROR;
default:
failf(conn->data, "SSL read error: %d", err);
return CURLE_RECV_ERROR;
@@ -379,10 +413,11 @@ int Curl_read(struct connectdata *conn,
nread = sread (sockfd, buf, buffersize);
if(-1 == nread) {
int err = Curl_ourerrno();
#ifdef WIN32
if(WSAEWOULDBLOCK == GetLastError())
if(WSAEWOULDBLOCK == err)
#else
if(EWOULDBLOCK == errno)
if((EWOULDBLOCK == err) || (EAGAIN == err) || (EINTR == err))
#endif
return -1;
}
@@ -408,6 +443,7 @@ int Curl_debug(struct SessionHandle *data, curl_infotype type,
switch(type) {
case CURLINFO_TEXT:
case CURLINFO_HEADER_OUT:
case CURLINFO_HEADER_IN:
fwrite(s_infotype[type], 2, 1, data->set.err);
fwrite(ptr, size, 1, data->set.err);
break;

View File

@@ -135,20 +135,40 @@ defined(HAVE_LIBSSL) && defined(HAVE_LIBCRYPTO)
#define HAVE_ALARM
#endif
#define PATH_CHAR ";"
#define DIR_CHAR "\\"
#define DOT_CHAR "_"
#else
#ifdef DJGPP
#define sclose(x) close_s(x)
#define sread(x,y,z) read_s(x,y,z)
#define swrite(x,y,z) write_s(x,y,z)
#define select(n,r,w,x,t) select_s(n,r,w,x,t)
#define ioctl(x,y,z) ioctlsocket(x,y,(char *)(z))
#define IOCTL_3_ARGS
#include <tcp.h>
#ifdef word
#undef word
#endif
#else
#define sclose(x) close(x)
#define sread(x,y,z) recv(x,y,z,0)
#define swrite(x,y,z) send(x,y,z,0)
#define HAVE_ALARM
#define PATH_CHAR ":"
#endif
#define DIR_CHAR "/"
#define DOT_CHAR "."
#ifdef DJGPP
#undef DOT_CHAR
#define DOT_CHAR "_"
#endif
#ifdef HAVE_STRCASECMP
/* this is for "-ansi -Wall -pedantic" to stop complaining! */
extern int (strcasecmp)(const char *s1, const char *s2);
@@ -165,7 +185,7 @@ int fileno( FILE *stream);
* Information regarding a single IP witin a Curl_addrinfo MUST be stored in
* a Curl_ipconnect struct.
*/
#ifdef ENABLE_IPV6
#if defined(ENABLE_IPV6) && !defined(USE_ARES)
typedef struct addrinfo Curl_addrinfo;
typedef struct addrinfo Curl_ipconnect;
#else
@@ -173,6 +193,21 @@ typedef struct hostent Curl_addrinfo;
typedef struct in_addr Curl_ipconnect;
#endif
#if 0
#if (SIZEOF_OFF_T > 4)
/* off_t is bigger than 4 bytes, and that makes it our prefered variable
type for filesizes */
typedef off_t filesize_t;
#else
#ifdef HAVE_LONGLONG
/* we have long long, use this for filesizes internally */
typedef long long filesize_t;
#else
/* small off_t and no long long, no support for large files :-( */
typedef long filesize_t;
#endif /* didn't have long long */
#endif /* sizeof wasn't bigger than 4 */
#endif /* 0 */
#endif /* __CONFIG_H */

Some files were not shown because too many files have changed in this diff Show More