Compare commits

..

919 Commits

Author SHA1 Message Date
Daniel Stenberg
5c2df3e1a4 7.10.7 2003-08-15 07:08:02 +00:00
Daniel Stenberg
6fc55467f4 removed lots of "added in [version]" where [version] is resonably old 2003-08-15 06:35:41 +00:00
Daniel Stenberg
a147a07956 check for long long
changed the use of AC_CHECK_TYPE as the previous approach is deprecated
require 2.57 properly
2003-08-14 22:44:06 +00:00
Daniel Stenberg
a10581d459 Possible code for large file support, added within #if 0 so far. 2003-08-14 22:42:18 +00:00
Daniel Stenberg
cc2d6942bb new Russian mirror both web and download 2003-08-14 22:38:35 +00:00
Daniel Stenberg
3974c02bb1 supprt for the new memlimit stuff 2003-08-14 22:38:03 +00:00
Daniel Stenberg
09b5ddaea5 added one "added in blabla" and removed a few 2003-08-14 22:00:56 +00:00
Daniel Stenberg
acbcd68d89 Curl_SSL_InitSessions can return error, so check the return code and bail
out if necessary
2003-08-14 15:06:36 +00:00
Daniel Stenberg
4281470fca Curl_llist_destroy() checks the input for non-NULL 2003-08-14 15:06:08 +00:00
Daniel Stenberg
68a4aa6773 new proto for Curl_hash_init 2003-08-14 15:05:25 +00:00
Daniel Stenberg
905b160097 1. check allocs
2. don't leave allocated memory behind when returning error
2003-08-14 15:05:13 +00:00
Daniel Stenberg
52596c339b return failure when the host cache creation fails 2003-08-14 15:02:25 +00:00
Daniel Stenberg
73500267ee activate the new memory limit tests if requested
only set cookiejar if selected
2003-08-14 15:01:52 +00:00
Daniel Stenberg
e6011e33a6 return failure when an alloc function fails 2003-08-14 15:01:20 +00:00
Daniel Stenberg
3454319c17 prevent memory leak when going out of memory 2003-08-14 14:20:03 +00:00
Daniel Stenberg
02c78ecf81 allow out-of-memory testing by setting a limit. That number of memory
allocation calls will succeed, the following will return NULL!
2003-08-14 14:19:36 +00:00
Daniel Stenberg
caca034302 better freeing when bailing out due to bad output glob 2003-08-14 13:38:19 +00:00
Daniel Stenberg
fb366ed35f free data on failure 2003-08-14 13:37:55 +00:00
Daniel Stenberg
b352ffca15 test87 verifies the new and better check for bad -o #[num] stuff 2003-08-14 13:37:32 +00:00
Daniel Stenberg
2d94856efd ignore the .pid files 2003-08-14 13:01:07 +00:00
Daniel Stenberg
ae66bd1284 ignore lib506 too 2003-08-14 13:00:34 +00:00
Daniel Stenberg
89d9d4e6c8 better report on why tests are skipped, and also show a count of the amount
of test cases that were "considered".
2003-08-14 12:59:54 +00:00
Daniel Stenberg
fe60fc4730 In case the output urlglob file name returned is NULL, then there was
badness in the string and we help our user by returning an error.
2003-08-14 11:53:53 +00:00
Daniel Stenberg
46690d5e1c modified the #[num] code to be more robust, to return NULL on errors and
to support numbers larger than 9
2003-08-14 11:53:09 +00:00
Daniel Stenberg
beaea8cb25 corrected this test case 2003-08-14 11:51:22 +00:00
Daniel Stenberg
409ec90c85 test urlglobbing range requests 2003-08-14 11:50:58 +00:00
Daniel Stenberg
4d423eeabe test86 added 2003-08-14 11:50:40 +00:00
Daniel Stenberg
019e612225 David Byron's fix that makes this script use 'cygpath' instead of 'pwd' if
this runs on windows, to find out the current working directory.
2003-08-12 21:18:39 +00:00
Daniel Stenberg
6550d271f0 7.10.7-pre4 commit 2003-08-12 12:48:40 +00:00
Daniel Stenberg
c46da65263 nicer make target for the pdf conversion 2003-08-12 09:08:05 +00:00
Daniel Stenberg
b46745759b don't treat index.html as the generated HTML pages 2003-08-12 08:58:46 +00:00
Daniel Stenberg
9687571a84 added the new man pages 2003-08-12 08:58:23 +00:00
Daniel Stenberg
c13236de25 corrected return type 2003-08-12 08:58:15 +00:00
Daniel Stenberg
8ffbb6acd4 added the new curl_share_* man pages, the libcurl-easy, the libcurl-share,
made the generated pdf and html files get removed on 'make clean'. Made
the pdf conversion remove the temporary .ps files.
2003-08-12 08:51:23 +00:00
Daniel Stenberg
a3e5d81765 separated the easy-specific stuff into a new libcurl-easy.3 man page and
made the libcurl.3 one a more generic overview
2003-08-12 08:46:02 +00:00
Daniel Stenberg
e2aecfe80f added the asynchdns bit 2003-08-12 08:26:38 +00:00
Daniel Stenberg
a3c1248214 Bugfix from Serge Semashko that fixes a bug introduced when we applied his
NTLM patch. Test case 84 and 85 verify this.
2003-08-12 08:20:16 +00:00
Daniel Stenberg
b933639222 more auth tests 2003-08-12 08:19:23 +00:00
Daniel Stenberg
27619fc450 Added support for CURLINFO_HTTP_CONNECTCODE 2003-08-11 23:15:41 +00:00
Daniel Stenberg
96fecba190 bindlocal works for Windows! 2003-08-11 23:15:13 +00:00
Daniel Stenberg
50257d4f50 Check CURL_VERSION_ASYNCHDNS for feature output 2003-08-11 23:13:41 +00:00
Daniel Stenberg
3eb4ae031c Set the CURL_VERSION_ASYNCHDNS bit if USE_ARES is defined. 2003-08-11 23:13:09 +00:00
Daniel Stenberg
6a4ec3be81 Added CURLINFO_HTTP_CONNECTCODE
Added CURL_VERSION_ASYNCHDNS
2003-08-11 23:12:46 +00:00
Daniel Stenberg
cc9ac6ad14 mention curl_version_info 2003-08-11 23:07:38 +00:00
Daniel Stenberg
644990a835 mention the pre3 release 2003-08-11 22:48:26 +00:00
Daniel Stenberg
d3b81ea3f7 Vincent Sanders's massive update of this example code. One could argue
weather this is still an "example" or a whole new API layer! ;-)
2003-08-11 21:34:52 +00:00
Daniel Stenberg
3660f67534 edits 2003-08-11 16:17:51 +00:00
Daniel Stenberg
203cc4a5c3 two more known bugs 2003-08-11 15:15:25 +00:00
Daniel Stenberg
c7be232fee added include "http.h" to prevent a warning 2003-08-11 14:55:30 +00:00
Daniel Stenberg
2617b379be define USE_ARES nicer if enabled 2003-08-11 13:18:06 +00:00
Daniel Stenberg
84ed5e755a use safefree instead 2003-08-11 12:30:21 +00:00
Daniel Stenberg
2f17615790 forgot the backslash 2003-08-11 12:26:18 +00:00
Daniel Stenberg
acfa131c8c memory leak fixed when re-using connections with proxy user+passwd 2003-08-11 12:25:30 +00:00
Daniel Stenberg
793d0e27e1 --proxy-ntlm added 2003-08-11 12:23:55 +00:00
Daniel Stenberg
fdf0c443c3 81 + 82 test NTLM proxy stuff 2003-08-11 12:23:33 +00:00
Daniel Stenberg
1b39b53321 remodeled the help text to avoid those annoying puts() problems when a
string reaches > 512 bytes...
2003-08-11 12:04:46 +00:00
Daniel Stenberg
1679993e3b CURLOPT_PROXYAUTH explained 2003-08-11 11:54:14 +00:00
Daniel Stenberg
4c831f8b68 CURLOPT_PROXYAUTH added by Serge Semashko 2003-08-11 11:48:01 +00:00
Daniel Stenberg
7a19923afa Serge Semashko added CURLOPT_PROXYAUTH support, and now NTLM for proxies
work.
2003-08-11 11:47:45 +00:00
Daniel Stenberg
3e122a765d Christian Beutenmueller corrected the CURLOPT_FILE referer, as we nowadays
call it CURLOPT_WRITEDATA.
2003-08-11 11:29:33 +00:00
Daniel Stenberg
d873ba8c9f added test80 2003-08-11 11:09:26 +00:00
Daniel Stenberg
8093338f39 tunnel through proxy, with both proxy and regular authentication 2003-08-11 11:09:03 +00:00
Daniel Stenberg
07660eea1e -Z and -@ no longer work, they are now officially available for other
options, more frequently used, in a future release
2003-08-11 10:34:25 +00:00
Daniel Stenberg
a2b2d4cd5c added test 79, a basic test that fetches an FTP URL over a HTTP proxy 2003-08-11 10:12:35 +00:00
Daniel Stenberg
96e217b496 the new cookie functions that require 'data' passed in 2003-08-11 09:56:06 +00:00
Daniel Stenberg
2dd1518d63 support sending off cookies without contents 2003-08-11 09:55:48 +00:00
Daniel Stenberg
168703b7bf Added some infof() calls, that require the data pointer so now several
cookie functions need that.

I also fixed the cookie loader to properly load and deal with cookies without
contents (or rather with a blank content).
2003-08-11 09:55:11 +00:00
Daniel Stenberg
0f2d680f1f added Dirk Manske 2003-08-11 07:30:24 +00:00
Daniel Stenberg
b7930b6ebd removed the dashes 2003-08-11 07:28:42 +00:00
Daniel Stenberg
8fa43b469a documenting the share interface 2003-08-11 07:25:02 +00:00
Daniel Stenberg
894e52f61a removed the BUGS section 2003-08-11 07:24:21 +00:00
Daniel Stenberg
3c294691aa remove the BUGS section 2003-08-11 07:23:49 +00:00
Daniel Stenberg
acbf932861 fix lines that start with " 2003-08-11 07:23:19 +00:00
Daniel Stenberg
26f5c53be8 test case 506 added, written by Dirk Manske 2003-08-11 06:44:46 +00:00
Daniel Stenberg
8dd069604c Dirk Manske's bugfix for the share stuff 2003-08-11 06:30:02 +00:00
Daniel Stenberg
5dadbd094e don't claim the PASV connect is connected unless it *really* is! 2003-08-10 17:11:41 +00:00
Daniel Stenberg
514a8739b6 make sure the string is long enough 2003-08-08 17:56:47 +00:00
Daniel Stenberg
12e78a082e Gisle Vanem fixed a single-byte overflow 2003-08-08 17:18:21 +00:00
Daniel Stenberg
9273096a8a David Byron's fix for file:// URLs with drive letters included. 2003-08-08 17:12:04 +00:00
Daniel Stenberg
686c6133f8 chmod the cabundle file before we attempt to write to it, to make
make distcheck run fine
2003-08-08 11:13:18 +00:00
Daniel Stenberg
1d1276cc3a ftp create dirs work done 2003-08-08 11:05:18 +00:00
Daniel Stenberg
d987676ef0 added CURLOPT_FTP_CREATE_MISSING_DIRS 2003-08-08 11:04:35 +00:00
Daniel Stenberg
6e4658c89d ftp-create-dirs test when MKD fails 2003-08-08 10:32:08 +00:00
Daniel Stenberg
b7cbcf7434 --ftp-create-dirs 2003-08-08 10:26:08 +00:00
Daniel Stenberg
e347d06a49 introducing --ftp-create_dirs 2003-08-08 10:24:13 +00:00
Daniel Stenberg
2077e9365a --ftp-create-dirs test 2003-08-08 10:23:46 +00:00
Daniel Stenberg
6e3adc9b14 Support COUNT in the control file, to set the number of times the custom
REPLY is to be sent back before getting blanked and reverted to the built-in
action. Now, we can make CWD fail once and then succeed when retried.
2003-08-08 10:21:47 +00:00
Daniel Stenberg
7954eee639 re-arranged the cwd/mkd stuff a bit 2003-08-08 09:55:16 +00:00
Daniel Stenberg
f9f1f0e316 Early Ehlinger's CURLOPT_FTP_CREATE_MISSING_DIRS patch was applied 2003-08-08 09:13:19 +00:00
Daniel Stenberg
a9afe6aa84 new -z tests 2003-08-08 08:13:11 +00:00
Daniel Stenberg
6d36796135 corrected main meta data title 2003-08-08 07:07:15 +00:00
Daniel Stenberg
9e81fd5703 added CLEANFILES to make distcheck run fine 2003-08-07 14:14:54 +00:00
Daniel Stenberg
609059b6ec infilesize must be a long to work on 64bit archs 2003-08-07 13:20:58 +00:00
Daniel Stenberg
6af73f417a use 644 for the chmod 2003-08-07 06:43:11 +00:00
Daniel Stenberg
32468a0072 argh, it wasn't *that* easy to generate the ca-bundle header in the build
dir instead of the source dir, reverting that change
2003-08-06 23:59:15 +00:00
Daniel Stenberg
6800c45104 fixed syntax error 2003-08-06 23:56:24 +00:00
Daniel Stenberg
0d8c754ffd better cleaning up of memory in case of failures in the get-loop (it was
taken care of by the exit-free anyway but caused test case 75 and 76 to
report memory leaks).

Also re-indented a small section.
2003-08-06 23:48:08 +00:00
Daniel Stenberg
1b80276496 better cleaning up allocated memory in case of failures 2003-08-06 23:47:01 +00:00
Daniel Stenberg
bf9a138276 more tests 2003-08-06 23:45:59 +00:00
Daniel Stenberg
b3f9c636b9 new urlglob test 2003-08-06 23:10:58 +00:00
Daniel Stenberg
18975d44a6 minor cleanup 2003-08-06 23:10:36 +00:00
Daniel Stenberg
b201db5cec explain more how the test case number awareness is sent to the test server(s) 2003-08-06 22:47:55 +00:00
Daniel Stenberg
bbe23945e4 fix the treatment of the variable width specifier '*', which caused a bug
in the urlglobbing just now, fixed in the debian bug tracker as Bug#203827
2003-08-06 22:32:47 +00:00
Daniel Stenberg
bbdc0394ff make an uninstall hook in the same manner we already did an install hook
as otherwise will make distcheck fail
2003-08-06 22:15:12 +00:00
Daniel Stenberg
38a9b14965 chmod the hugehelp.c in the dist hook to make distcheck run fine 2003-08-06 22:14:39 +00:00
Daniel Stenberg
77ba0d3686 generate the ca-bundle.h in the build dir, and also make sure to chmod
the file in the dist-hook to make distcheck run fine
2003-08-06 22:14:16 +00:00
Daniel Stenberg
065c8d7a95 Domenico Andreoli fixed the section number in the main meta data 2003-08-06 21:23:42 +00:00
Daniel Stenberg
c704d1545c include "share.h" for the cookie sharing 2003-08-06 15:26:24 +00:00
Daniel Stenberg
62b65a5f20 make it build without ares support
make sure it set async false even when using ipv6 (made test case 20 fail
before)
2003-08-06 15:26:02 +00:00
Daniel Stenberg
665a7a3848 505 was missing 2003-08-06 13:49:20 +00:00
Daniel Stenberg
256b9f31e1 more fix 2003-08-06 13:22:29 +00:00
Daniel Stenberg
a3037e1173 updated the ares instruction 2003-08-06 13:21:19 +00:00
Daniel Stenberg
f3e7a5d755 LDFLAGS fix to make the GSSAPI build again 2003-08-05 15:22:15 +00:00
Daniel Stenberg
5f0cba7775 added README.ares 2003-08-05 14:54:15 +00:00
Daniel Stenberg
673759fe7e how to build with ares 2003-08-05 14:52:31 +00:00
Daniel Stenberg
b73612392d ares awareness/usage/support added. If configure --enable-ares is used, we
build libcurl to use ares for asynch name resolves.
2003-08-05 14:40:59 +00:00
Daniel Stenberg
f85935f0f9 Add --enable-ares support, which will make us build curl with ares for
asynch name resolves. Still very experimental, beware!
2003-08-05 13:37:29 +00:00
Daniel Stenberg
1e7e53c87e clean up the dir tree hierarchy in *_done() to make persistant connection
FTP use the correct directories!

Reported in bug report #783116
2003-08-05 13:04:10 +00:00
Daniel Stenberg
b9fdf3cc3b added test 146 for a ftp persitency test, as reported on the list 2003-08-05 13:00:00 +00:00
Daniel Stenberg
c462601362 persistant connection test 2003-08-05 12:59:23 +00:00
Daniel Stenberg
859877dcfc auth problems 2003-08-05 12:32:02 +00:00
Daniel Stenberg
c04ce95106 cleaned up after David Byron's comment on the libcurl list, aug 5 2003 2003-08-04 23:13:39 +00:00
Daniel Stenberg
98ee12bc35 Jan Sundin reported a case where curl ignored a cookie that browsers don't,
which turned up to be due to the number of dots in the 'domain'. I've now
  made curl follow the the original netscape cookie spec less strict on that
  part.
2003-08-04 23:05:57 +00:00
Daniel Stenberg
fdda786fa2 added test 73 2003-08-04 22:58:06 +00:00
Daniel Stenberg
831be4f4dd Verifies Jan Sundin's cookie bug, dated aug 4 2003. 2003-08-04 22:57:58 +00:00
Daniel Stenberg
41ae97e710 Dirk Manske's patch that introduces cookie support to the share interface. 2003-08-04 15:02:42 +00:00
Daniel Stenberg
f72ba7f79d Mark Fletcher provided an excellent bug report that identified a problem
with FOLLOWLOCATION and chunked transfer-encoding, as libcurl would not
 properly ignore the body contents of 3XX response that included the
 Location: header.
2003-08-03 22:18:14 +00:00
Daniel Stenberg
296046510b serios info leakage! 2003-08-03 21:33:25 +00:00
Daniel Stenberg
db9f87f697 When proxy authentication is used in a CONNECT request (as used for all SSL
connects and otherwise enforced tunnel-thru-proxy requests), the same
authentication header is also wrongly sent to the remote host.

The name and password can then be captured by an evil host and possibly get
used for malicious purposes.
2003-08-02 23:36:35 +00:00
Daniel Stenberg
3270ea55dd updated as the second proxy-auth header was a proof of a serious info leak
bug!!
2003-08-02 23:35:59 +00:00
Daniel Stenberg
a358ac24f4 Joerg Mueller-Tolk fixed a minor mistake 2003-08-01 14:20:48 +00:00
Daniel Stenberg
8bedd43b28 recent action 2003-08-01 12:33:19 +00:00
Daniel Stenberg
9ea2087ede David Byron's makefile fix to allow 7.10.6 to build fine using VC 2003-08-01 07:53:27 +00:00
Daniel Stenberg
9f7c634133 add a check for 'ar' since the lack of it bit Jared Ingersoll
we might need to check for some other tools too that on Solaris are put
in those weird dirs...
2003-07-30 15:10:26 +00:00
Daniel Stenberg
da20d68a12 removed silly target that only works when building from CVS 2003-07-30 14:26:36 +00:00
Daniel Stenberg
d3e512c738 Jrg Mller-Tolk updated this to build fine with 7.10.6 2003-07-30 14:19:44 +00:00
Daniel Stenberg
339f84fe1f ftp proxy support would be nice 2003-07-30 13:41:59 +00:00
Daniel Stenberg
2d41b735ec updated to match the recent ftp patch that makes it check for resumability 2003-07-30 07:52:02 +00:00
Daniel Stenberg
e3b4dd08ff Daniel Noguerol made the ftp code output "Accept-Ranges: bytes" in similar
style like other faked HTTP headers when NOBODY and HEADER are used.
2003-07-30 07:51:33 +00:00
Daniel Stenberg
6809a906bb Make sure to generate an uncompressed hugehelp.c file for inclusion in
the distribution archive, as it isn't sure zlib is present everywhere. Those
who care much for compressed help should regenerate the file.
2003-07-30 07:33:41 +00:00
Daniel Stenberg
1c35cbcc07 Reverted the 'filetime' struct field back to a 'long' as time_t is sometimes
unsigned and we want this to be able to hold -1 for illegal/unset values.
2003-07-30 07:22:28 +00:00
Daniel Stenberg
5f8989a436 CURLDEBUG not MALLOCDEBUG 2003-07-29 11:07:38 +00:00
Daniel Stenberg
aa7b0648ff Fixes based on Gisle Vanem's input since this script failed due to
possibly crlf newlines.
2003-07-28 23:00:56 +00:00
Daniel Stenberg
2fbe61960f Digest *OR* Basic authorization test 2003-07-28 22:17:37 +00:00
Daniel Stenberg
bdb5e5a250 7.10.6 2003-07-28 12:13:48 +00:00
Daniel Stenberg
48a580e609 clear http->send_buffer when we have freed the memory it pointed to 2003-07-28 10:21:57 +00:00
Daniel Stenberg
1361fc69b9 updated to the new ftp dir parsing code that allows a preceeding double
slash
2003-07-28 09:02:15 +00:00
Daniel Stenberg
93352e56d8 As noticed by Kevin Roth, we shall not speak of root dir when it isn't
necessarily the root...
2003-07-28 08:53:12 +00:00
Daniel Stenberg
d9246ff24d Franois Pons brought a patch that once again made curl deal with ftp and
"double slash" as indicating the root directory. In the RFC1738-fix of April
30, that ability was removed (since it is not the "right" way).
2003-07-28 08:50:02 +00:00
Daniel Stenberg
9301bc3444 use the correct 'test71' file name for the temp file 2003-07-28 08:23:46 +00:00
Daniel Stenberg
76352c4e2d got a bug report on -F in config files, so I wrote up this test to verify
that is works... and it did! ;-)
2003-07-28 08:21:07 +00:00
Daniel Stenberg
428f41bd12 having it in CVS causes us problems *grrr* 2003-07-25 09:46:07 +00:00
Daniel Stenberg
99c32e460f Andrs Garca updated with the added files etc 2003-07-25 08:59:55 +00:00
Daniel Stenberg
83f249cf65 With an unknown CA path, we undef the variable. To build properly without
SSL/CA.
2003-07-25 08:47:34 +00:00
Daniel Stenberg
2c2baa93ea only check for CA bundle path if build with SSL support
set a conditional for the makefile if we know the CA path or not
2003-07-25 08:47:10 +00:00
Daniel Stenberg
f0278ca114 Removed #include <sys/resource.h>, as pointed out by Henry Bland we don't
need it.
2003-07-25 08:30:58 +00:00
Daniel Stenberg
297b1b5013 the test compared numericly if though it could contain a string, and I
lowered the number of retries to 10
2003-07-23 17:28:36 +00:00
Daniel Stenberg
e9f63bf4e8 When we re-use an existing connection we must make sure that we don't
accidentally re-use the connect_addr field, as that might no longer be
around. Fix verified by Tracy Boehrer who basicly debugged and tracked down
this problem.
2003-07-23 17:06:21 +00:00
Daniel Stenberg
556ce1c6a1 minor code style fix 2003-07-23 12:55:24 +00:00
Daniel Stenberg
cc4ff62681 Split out the changes from the year 2002 into a separate file, named
CHANGES.2002.
2003-07-23 11:59:20 +00:00
Daniel Stenberg
0423fd9b55 SSLCERTS was moved into the docs/ directory 2003-07-23 11:39:05 +00:00
Daniel Stenberg
789ab20bf7 moved SSLCERTS into the docs/ directory 2003-07-23 11:38:19 +00:00
Daniel Stenberg
b47462bd68 Daniel Kouril's fix to make the GSS-Negotiate work fine. 2003-07-23 11:28:59 +00:00
Daniel Stenberg
1a94fee42d Juan F. Codagnone's fixes to build properly on Windows again 2003-07-23 08:21:21 +00:00
Daniel Stenberg
a91ce6a5d6 Plain default version of this file, to allow users to build easier from
CVS. This will be updated by the configure script, and a default is placed
here by the maketgz script.
2003-07-23 08:11:28 +00:00
Daniel Stenberg
981ffd9fce reversed the check for GSSAPI when request that auth 2003-07-22 11:15:46 +00:00
Daniel Stenberg
e76c960624 CURLDEBUG, not MALLOCDEBUG 2003-07-22 10:00:37 +00:00
Daniel Stenberg
416c92cc6f More support for NTLM on proxies, now proxy state and nonce is stored in
a separate struct properly.
2003-07-22 09:59:36 +00:00
Daniel Stenberg
fb731eb3e7 The NTLM functions now take a 'proxy' argument as well. 2003-07-22 09:58:57 +00:00
Daniel Stenberg
6f2a4d290f Added a separate struct for the proxyntlm data, as it will/can be different
than the remote server's. That is, both the server and the proxy can in
fact require NTLM auth.
2003-07-22 09:58:18 +00:00
Daniel Stenberg
cefc8ba938 CURLDEBUG is the symbol now 2003-07-22 09:57:09 +00:00
Daniel Stenberg
d0bd644eef Don't depend on the TIME_WITH_SYS_TIME define. win32 doesn't have sys/time.h
and I don't think we need it.
2003-07-22 08:23:16 +00:00
Daniel Stenberg
071c95128e moved the proxyuser and proxypasswd fields from the sessionhandle to the
connectdata to work as expected
2003-07-21 13:16:30 +00:00
Daniel Stenberg
1a192c489b adjusted to support NTLM for proxies 2003-07-21 13:16:01 +00:00
Daniel Stenberg
56014e74a0 krb4-fixes for the moved user+password fields within the structs 2003-07-21 09:19:48 +00:00
Daniel Stenberg
172271498d pre4-commit 2003-07-21 08:25:31 +00:00
Daniel Stenberg
f2882cb88c pre4 2003-07-21 08:25:21 +00:00
Daniel Stenberg
152f1fee40 the CWD-null bug fix 2003-07-21 07:54:20 +00:00
Daniel Stenberg
968234e6ae the fixed skip-blanks in the FTP CWD code called for this adjustment 2003-07-20 00:19:44 +00:00
Daniel Stenberg
5e133e2dff David Gardner pointed out in bug report 770755 that using the FTP command CWD
with a blank argument is a bad idea. Now skip blanks.
2003-07-20 00:18:11 +00:00
Daniel Stenberg
0049c09fc3 If NTLM is requested, only re-use connections that have the exact same
credentials.
2003-07-20 00:02:47 +00:00
Daniel Stenberg
a2a63c27f4 explains my fixes just committed 2003-07-19 23:58:21 +00:00
Daniel Stenberg
c50a601f1a modified to work fine with the new persistant connection working test suite
HTTP server
2003-07-19 23:57:08 +00:00
Daniel Stenberg
bc0fd6db71 swsclose added 2003-07-19 23:56:44 +00:00
Daniel Stenberg
52b631fade Access the user and passwd fields from the connectdata struct now instead
of the sessionhandle struct, as that was not good.
2003-07-19 23:56:33 +00:00
Daniel Stenberg
2f0bc9d1f7 No longer stores user+password in the sessionhandle, now doing that in the
connectdata struct instead. Each being an allocated pointer.

The passwdgiven field was turned into a local variable in the only
function it was being used.
2003-07-19 23:55:15 +00:00
Daniel Stenberg
5ef6520d4e fixed the CONNECT thing again 2003-07-19 23:54:15 +00:00
Daniel Stenberg
2c1925161e If the data contents contains the word 'swsclose', then this server will
disconnect the client after the response have been sent. This also happens
if the respons is zero byte long.

In all other cases (unless an error happens), it will now maintain the
connection to allow proper persistant connection testing. This was required
for the NTLM testing to work so I finally had to fix this. Of course most of
the existing HTTP tests will be adjusted to work with this new rule of test
file syntax for HTTP tests.

Also fixed the log function to deal with varargs for better logging.
2003-07-19 23:44:22 +00:00
Daniel Stenberg
0529b349d5 recent changes 2003-07-16 00:04:45 +00:00
Daniel Stenberg
b4620364a2 more fixes from Doug Kaufman for DJGPP builds for DOS 2003-07-15 23:47:25 +00:00
Daniel Stenberg
634aef3895 updated to work with Dan Winship's NTLM domain stuff fix 2003-07-15 23:38:06 +00:00
Daniel Stenberg
06c86d1a8c Moved the NTLM credentials to the connectdata struct instead, as NTLM
authenticates connections and not single requests. This should make it work
better when we mix requests from multiple hosts. Problem pointed out by
Cris Bailiff.
2003-07-15 23:36:50 +00:00
Daniel Stenberg
79749f8eb4 Fix to the endless loop of bad Basic authentication as reported in Cris
Bailiff's bug report 768275.
2003-07-15 23:06:02 +00:00
Daniel Stenberg
b036986b3e Dan Winship's patch added that makes use of DOMAIN\USER or DOMAIN/USER
for the user field. I changed it slightly to stay with strchr() only instead
of strpbrk() for portability reasons.
2003-07-15 22:58:36 +00:00
Daniel Stenberg
938f1d1da7 Dan Winship's fix to make the new auth stuff such as NTLM to work with
the multi interface
2003-07-15 22:46:01 +00:00
Daniel Stenberg
58b6b3df06 Dan Winship pointed out this flaw 2003-07-15 22:44:48 +00:00
Daniel Stenberg
f9c3347f7c re-use existing variable instead of declaring a new local one 2003-07-05 13:27:02 +00:00
Daniel Stenberg
5b72eb0b03 Some of Doug Kaufman's changes for the DOS port 2003-07-05 13:13:49 +00:00
Daniel Stenberg
6dd4c13bc0 the latest changes 2003-07-04 18:18:17 +00:00
Daniel Stenberg
e4e7db551f HAVE_SETVBUF removed, no longer used 2003-07-04 18:17:58 +00:00
Daniel Stenberg
ebfde8da56 removed 2003-07-04 18:15:53 +00:00
Daniel Stenberg
756bc0f4b7 Dan Grayson pointed out that we set the CURL_CA_BUNDLE variable wrongly in
the configure script. We set it differently now and generate the
lib/ca-bundle.h file entirely.
2003-07-04 18:15:25 +00:00
Daniel Stenberg
269d491b6a remove the usage of setvbuf() and use fflush() instead if no buffering should
be done on the output
2003-07-04 17:16:34 +00:00
Daniel Stenberg
449e5bc2ad CURLDEBUG not MALLOCDEBUG anymore 2003-07-04 16:37:16 +00:00
Daniel Stenberg
8736c11d84 adjusted to the NTLM updates 2003-07-04 16:36:31 +00:00
Daniel Stenberg
45fc760985 Peter Sylvester's patch was applied that introduces the following:
CURLOPT_SSL_CTX_FUNCTION to set a callback that gets called with the
   OpenSSL's ssl_ctx pointer passed in and allow a callback to act on it. If
   anything but CURLE_OK is returned, that will also be returned by libcurl
   all the way back. If this function changes the CURLOPT_URL, libcurl will
   detect this and instead go use the new URL.

   CURLOPT_SSL_CTX_DATA is a pointer you set to get passed to the callback set
   with CURLOPT_SSL_CTX_FUNCTION.
2003-07-04 16:29:23 +00:00
Daniel Stenberg
7968e3c2de David Byron's patch that allows a client to make the server quit with a
magic url.
2003-07-01 15:21:42 +00:00
Daniel Stenberg
964a41c75c new CVS info 2003-07-01 12:12:10 +00:00
Daniel Stenberg
5931d53637 Gisle Vanem found a lib handle leak in the ldap code 2003-07-01 10:12:52 +00:00
Daniel Stenberg
3ed3ae5bcf When I introduced the DIST_SUBDIRS usage, I broken the 'make install' for
include files and docs, so now I've added a custom install hook to run
make install for docs and install when data is installed at the top-level.
2003-06-27 14:37:38 +00:00
Sterling Hughes
6519cc70c5 revert out my bogus commit. ;-) 2003-06-26 21:30:48 +00:00
Sterling Hughes
505a4f27fa test commit 2003-06-26 21:17:29 +00:00
Daniel Stenberg
79144eba99 new tests 2003-06-26 11:45:04 +00:00
Daniel Stenberg
26e17d89c9 produce a skip-report at the end of all tests, and thus record and count
them properly
2003-06-26 11:44:01 +00:00
Daniel Stenberg
4322c1106f beautified and added comments all over 2003-06-26 11:42:54 +00:00
Daniel Stenberg
73071dfd4f mention the new flag bits we support 2003-06-26 11:41:24 +00:00
Daniel Stenberg
b7c14b3c27 mention that it copies the string you add 2003-06-26 11:41:08 +00:00
Daniel Stenberg
3130b44535 added lots, mostly the new auth-related option(s) 2003-06-26 11:40:44 +00:00
Daniel Stenberg
a2bd73334f added lots of auth stuff and updated other things too 2003-06-26 11:40:04 +00:00
Daniel Stenberg
1a393f5625 mention COOKIES, removed added entries, corrected the FPL-SSL link/reference 2003-06-26 11:38:53 +00:00
Daniel Stenberg
d4951e837e mention the other formats the docs come in 2003-06-26 11:37:13 +00:00
Daniel Stenberg
26f6365e93 adjusted to recent changes 2003-06-26 11:36:32 +00:00
Daniel Stenberg
3a552b1e63 we do support NTLM now... 2003-06-26 11:35:48 +00:00
Daniel Stenberg
69eb1790da CURLDEBUG is the symbol to use, no longer MALLOCDEBUG 2003-06-26 11:34:36 +00:00
Daniel Stenberg
a1af6f3614 adjusted the compressed generation to be more helpful in comments etc 2003-06-26 11:34:07 +00:00
Daniel Stenberg
3aced61465 support for the new auth stuff
more output on --version/-V
mention --manual on the help output text
2003-06-26 11:33:29 +00:00
Daniel Stenberg
6f02ddfce8 new httpauth support, changed filetime variable kind 2003-06-26 11:31:50 +00:00
Daniel Stenberg
c2faa39b62 added CURLOPT_HTTPAUTH support 2003-06-26 11:30:59 +00:00
Daniel Stenberg
2d3734b8b5 Adjusted to work properly with the new authentication stuff
Added code to deal with white spaces in relocation headers.
2003-06-26 11:30:26 +00:00
Daniel Stenberg
ed908b7f89 use CURLDEBUG instead of MALLOCDEBUG 2003-06-26 11:28:26 +00:00
Daniel Stenberg
f7d795a364 use CURLDEBUG 2003-06-26 11:27:38 +00:00
Daniel Stenberg
8919b39d54 adjusted to use the same API as the OpenSSL version of the MD5 functions 2003-06-26 11:27:22 +00:00
Daniel Stenberg
84cedc094e added ntlm flag bits 2003-06-26 11:26:50 +00:00
Daniel Stenberg
3b2b2496d7 Many fixes, most of them based on comments by Eric Glass 2003-06-26 11:26:26 +00:00
Daniel Stenberg
445684c409 new proto for Curl_input_negotiate 2003-06-26 11:25:42 +00:00
Daniel Stenberg
898e067ccc kill warnings 2003-06-26 11:25:23 +00:00
Daniel Stenberg
12859e345f major adjustments to the new authentication support 2003-06-26 11:24:55 +00:00
Daniel Stenberg
89f4af695e include GSS in the debug string if available, support a few new flag
booleans
2003-06-26 11:22:48 +00:00
Daniel Stenberg
308bc9d919 use CURLDEBUG instead of MALLOCDEBUG for preprocessor conditions 2003-06-26 11:22:12 +00:00
Daniel Stenberg
db566c54ae use CURLDEBUG instead of MALLOCDEBUG 2003-06-26 11:16:37 +00:00
Daniel Stenberg
81d403e207 one typecast less for the localtime(), use CURLDEBUG instead of MALLOCDEBUG 2003-06-26 06:52:48 +00:00
Daniel Stenberg
2bd71d70ff use CURLDEBUG instead of MALLOCDEBUG 2003-06-26 06:50:32 +00:00
Daniel Stenberg
1eef6f44ba CURLDEBUG instead of MALLOCDEBUG 2003-06-26 06:47:20 +00:00
Daniel Stenberg
204f03912f We noe use CURLDEBUG instead of MALLOCDEBUG 2003-06-26 06:45:15 +00:00
Daniel Stenberg
f8c3b3aa18 moved from former CVS 2003-06-26 06:21:29 +00:00
Daniel Stenberg
d4df981463 Added time_t 2003-06-26 06:19:37 +00:00
Daniel Stenberg
497c6d516d up to date with the actual situation 2003-06-25 23:40:48 +00:00
Daniel Stenberg
8288862b7e Cris Bailiff's patch that should make us do NTLM correctly. When we've
authenticated our connection, we can continue without any Authorization:
headers as long as our connection is maintained.
2003-06-13 10:15:55 +00:00
Daniel Stenberg
9aae16c236 stdout is good enough 2003-06-13 09:09:04 +00:00
Daniel Stenberg
80c194a70a work more on pids, less on pidfiles to be able to do better kills at the
end of the test where the pidfiles aren't found, but "our" server is running
2003-06-13 09:04:08 +00:00
Daniel Stenberg
c832b2db5b fixed NTLM test 67, added test 68 for bad NTLM name/password 2003-06-13 08:03:45 +00:00
Daniel Stenberg
27018882ec Cris Bailiff's bugfix 2003-06-13 07:56:38 +00:00
Daniel Stenberg
caf6e9c540 use more curlish strings, these should be able to change... 2003-06-13 07:14:46 +00:00
Daniel Stenberg
e727fb82f2 Marty Kuhrt's #include fixes for VMS 2003-06-13 06:48:04 +00:00
Daniel Stenberg
c78df56801 get and use only the first line of the curl --version output 2003-06-12 23:05:12 +00:00
Daniel Stenberg
d13202f43b modified 2003-06-12 23:03:08 +00:00
Daniel Stenberg
9d139a6b35 Make the HTTP auth stuff work, Dan Fandrich made --version output a list
of all supported protocols.
2003-06-12 23:02:36 +00:00
Daniel Stenberg
d2abe44e6f remove the dumpit file after use 2003-06-12 19:17:08 +00:00
Daniel Stenberg
bc67228576 corrected a comment 2003-06-12 17:40:56 +00:00
Daniel Stenberg
ecf32c964a CURLHTTP* renamed to CURLAUTH* and NEGOTIATE is now GSSNEGOTIATE as there's
a "plain" Negotiate as well.
2003-06-12 17:34:27 +00:00
Daniel Stenberg
e58f30b82a NTLM test case 2003-06-12 16:39:35 +00:00
Daniel Stenberg
654e3f1101 require the netrc_debug feature the same way we now can require SSL
present client-side
2003-06-12 16:38:14 +00:00
Daniel Stenberg
86689dc524 now test cases can be set to be dependent on the presence of "SSL" in the
client/library
2003-06-12 16:22:52 +00:00
Daniel Stenberg
5f62a0c1ca make it build with older OpenSSL 2003-06-12 13:55:40 +00:00
Daniel Stenberg
ad1bf0f389 attempt to make older OpenSSL versions work with the DES stuff 2003-06-12 13:18:10 +00:00
Daniel Stenberg
9c7703ace1 Based on Dan Fandrich's patch and gzip unpack function, we now compress
the 'hugehelp' text if libz and gzip are available at build time.
2003-06-12 12:54:34 +00:00
Daniel Stenberg
4a8155b53c store HAVE_LIBZ as an automake conditional 2003-06-12 12:53:38 +00:00
Daniel Stenberg
80d6d5c5c4 fixing details for NTLM 2003-06-11 16:14:45 +00:00
Daniel Stenberg
c624be8388 more how I envision it _should_ work, but it still doesn't... 2003-06-11 15:33:09 +00:00
Daniel Stenberg
09df1cd41e to support "redirects" after the full body is transfered 2003-06-11 15:31:40 +00:00
Daniel Stenberg
52c5b57200 made a nicer output for the decode test, as it served as a nice tool for me ;-) 2003-06-11 15:31:06 +00:00
Daniel Stenberg
5ea04a852e when we get the auth headers, we still need to read out the full body response
as otherwise we can re-send requests on the same connection nicely
2003-06-11 15:30:30 +00:00
Daniel Stenberg
a2eef05198 correct mistakes 2003-06-11 14:05:13 +00:00
Daniel Stenberg
55f75af353 describe the NTLM mechanism too 2003-06-11 13:44:58 +00:00
Daniel Stenberg
fb6a51b8fd basic NTLM support 2003-06-11 13:44:31 +00:00
Daniel Stenberg
252cc2213e ntlm added 2003-06-11 13:42:53 +00:00
Daniel Stenberg
73c5f24fa4 Initial take at NTLM authentication. It doesn't really work at this point
but the infrastructure is there.
2003-06-11 13:38:55 +00:00
Daniel Stenberg
4c80e103a0 clarify the CUSTOMREQUEST and HTTPHEADER options slightly 2003-06-10 13:06:38 +00:00
Daniel Stenberg
39ea557360 CURLOPT_HTTPAUTH docu 2003-06-10 12:58:40 +00:00
Daniel Stenberg
d0cc92a01a Set auth type differently, we use one CURLOPT_HTTPAUTH instead as we plan
to add more method in the future.
2003-06-10 12:49:16 +00:00
Daniel Stenberg
d7980c1a45 Daniel Kouril for the HTTP negotiate stuff 2003-06-10 12:25:01 +00:00
Daniel Stenberg
e56ae1426c Daniel Kouril's patch that adds HTTP negotiation support to libcurl was
added.
2003-06-10 12:22:19 +00:00
Daniel Stenberg
696843c020 we fix more 2003-06-10 12:07:10 +00:00
Daniel Stenberg
6ff5621dd7 more generic 2003-06-10 12:05:12 +00:00
Daniel Stenberg
e7fb72a732 Pass the error stream pointer to the URL globber, so that it can report
errors correctly to the user, if need be.

Also fixed so that a missing ] in the globbing process no longer leads
to core dump.
2003-06-10 09:42:22 +00:00
Daniel Stenberg
8d30d34e0c When doing very big GET requests over HTTPS, we need to add some extra
funky logic in order to make re-tries work fine with OpenSSL. This corrects
the problem David Orrell noticed.
2003-06-06 14:58:26 +00:00
Daniel Stenberg
bc7fe85f8a Just moved around some logic in Curl_write() to make it easier to debug. 2003-06-06 14:56:50 +00:00
Daniel Stenberg
89352d92c5 spellfix 2003-06-06 06:44:05 +00:00
Daniel Stenberg
c32390d84c Reversed the logic to only include the <sys/select.h> header on systems
known to really NEED it as another system that doesn't have it came up:
very old Linux libc5-based systems (as addition to all HPUX versions).

The only known system at this point is AIX.
2003-06-05 14:04:44 +00:00
Daniel Stenberg
45ca866a2d LDAP problem added as mention in bug report #735752 2003-06-03 08:10:53 +00:00
Daniel Stenberg
ceef206c21 include the time headers just like we used to do in the curl/curl.h header
once upon the time
2003-06-03 08:07:06 +00:00
Daniel Stenberg
7c6424f0a9 we want the time defines too 2003-06-03 08:06:23 +00:00
Daniel Stenberg
bc942de6f1 Content-Length: now overrides other means of knowing when the stream has
ended.
2003-06-03 07:53:18 +00:00
Daniel Stenberg
06984df5cb Make the Content-Length info override the Connection: close header, so that
libcurl will stop reading when the number of bytes have arrived and not wait
for a closed socket.
2003-06-02 14:57:08 +00:00
Daniel Stenberg
4f136a3a76 the 500-599 test case range 2003-06-02 14:48:27 +00:00
Daniel Stenberg
363bf3ba30 ignore more 2003-06-02 13:55:40 +00:00
Daniel Stenberg
acb895956a ignore 2003-06-02 13:53:13 +00:00
Daniel Stenberg
21e87b9bb3 David Byron's fix to get the progress-bar use the local size too when
doing a resumed download.
2003-06-02 13:42:42 +00:00
Daniel Stenberg
c896ebcf12 makefile fiddle
changed how http requests are sent - now in one chunk more often
HPUX include fix in the external headers
better SSL work-arounds for bad SSL servers
modified error message when CURLE_HTTP_RETURNED_ERROR is returned
2003-06-02 13:31:25 +00:00
Daniel Stenberg
d288222e80 work-around SSL implementation flaws better, pointed out in bug report
#745122.
2003-06-02 13:27:03 +00:00
Daniel Stenberg
4eb2a6c9a3 make a more descriptive error message when CURLE_HTTP_RETURNED_ERROR is
returned
2003-06-02 13:14:57 +00:00
Daniel Stenberg
2563731c4d haven't updates this in a loooong time 2003-05-28 10:24:20 +00:00
Daniel Stenberg
4e410111db Posting static data using POST and chunked encoded now also appends the
data to the initial request buffer, if the total post data is less than
100K.
2003-05-28 07:54:33 +00:00
Daniel Stenberg
5670563a26 include sys/time.h, we didn't have a time() proto anymore. Did one of the
changes in curl/curl.h make this occur?
2003-05-27 22:56:01 +00:00
Daniel Stenberg
6caa656d01 Documented which rules the public headers must follow when we write
preprocessor checks for condititions.
2003-05-27 12:51:46 +00:00
Daniel Stenberg
c12af7aed1 oops, removed a # too many 2003-05-27 12:51:15 +00:00
Daniel Stenberg
dcb6d1c01d remove usage of HAVE_* defines, we cannot and shall not depend on any such
defines in the public external header files
2003-05-27 12:45:51 +00:00
Daniel Stenberg
18234cbdac sys/select.h is not present on HPUX, avoid including it 2003-05-27 12:34:48 +00:00
Daniel Stenberg
06bf988dc1 made it work ;-) 2003-05-27 12:18:00 +00:00
Daniel Stenberg
55ff4c3f08 if cvs update fails, attempt again after 5 seconds and retry 50 times
before giving up
2003-05-27 12:03:24 +00:00
Daniel Stenberg
4915002168 Only build in lib and src by default, make the others dist-subdirs.
Make the test stuff get built when we run 'make test' instead.
2003-05-27 08:51:09 +00:00
Daniel Stenberg
5bd8d60e41 Rudy Koento experienced problems with curl's recent habit of POSTing data in
two separate send() calls, first the headers and then the data. I've now made
a fix that for static and known content that isn't to be chunked-encoded,
everything is now sent in one single system call again. This is also better
for network performance reasons.
2003-05-27 08:33:08 +00:00
Daniel Stenberg
fc872808c5 runs on DOS now 2003-05-27 07:37:34 +00:00
Daniel Stenberg
0f4feda382 include file flaw and yet another socks5-fix 2003-05-27 06:41:06 +00:00
Daniel Stenberg
90b0f38316 Another socks5-fix. Make sure that when we use a socks-proxy, it is not the
same as using a httpproxy so we must make sure to better check for http
proxies before we do HTTP proxy stuff. This included authorization and
URI usage in the request etc.
2003-05-27 06:28:25 +00:00
Daniel Stenberg
18f630ab21 CURLOPT_HTTPDIGEST is added 2003-05-27 06:25:56 +00:00
Daniel Stenberg
e97fd44151 language 2003-05-26 12:32:22 +00:00
Daniel Stenberg
b75679778f ftp ASCII transfers in general need fixing 2003-05-26 08:19:06 +00:00
Daniel Stenberg
35a84ad576 Chris Lewis mentioned that he doesn't get WIN32 defined, only _WIN32 so we
make an adjustment to catch this.
2003-05-26 07:57:53 +00:00
Daniel Stenberg
4ed28be75a even more 2003-05-23 11:24:39 +00:00
Daniel Stenberg
e2f4656a86 Ricardo Cadime found a socket leak when listing directories without
contents. Test cases 144 and 145 were added to verify the fix.

Now we deal with return code 450 properly and other codes also do proper
cleanup.
2003-05-23 11:14:09 +00:00
Daniel Stenberg
1e14da5c60 more ftp testing using NLST and no contents and bad return code 2003-05-23 11:10:35 +00:00
Daniel Stenberg
b2ef79ef3d Rudy Koento's problem fixed, test case 66 verifies this. 2003-05-23 09:47:57 +00:00
Daniel Stenberg
f488874ff5 test 66 returns one line of data with no header (HTTP) 2003-05-23 09:46:19 +00:00
Daniel Stenberg
23258648da --digest added, --compressed rephrased 2003-05-23 08:06:31 +00:00
Daniel Stenberg
6b84ebe501 include digest.h for proto 2003-05-23 06:44:24 +00:00
Daniel Stenberg
07dd067f73 DJGPP fix by Gisle Vanem 2003-05-23 06:43:14 +00:00
Daniel Stenberg
420744d048 more more more 2003-05-22 22:47:48 +00:00
Daniel Stenberg
01108e3a63 warning-free is better 2003-05-22 22:45:38 +00:00
Daniel Stenberg
8026b1e194 Introducing --digest 2003-05-22 22:40:01 +00:00
Daniel Stenberg
a39d77227f Better Digest stuff 2003-05-22 22:39:38 +00:00
Daniel Stenberg
9f69deec7d Added CURLOPT_HTTPDIGEST support
SOCKS5 fix as suggested by Jis in bugreport #741841.
2003-05-22 22:38:46 +00:00
Daniel Stenberg
e912f772e0 Document the <dataNUM> thing we use, 2003-05-22 22:37:00 +00:00
Daniel Stenberg
0102726aeb Digest support added 2003-05-22 22:36:39 +00:00
Daniel Stenberg
1e7aa04040 Digest testing added 2003-05-22 22:36:22 +00:00
Daniel Stenberg
00a7c6fe6b proper header added 2003-05-22 16:23:27 +00:00
Daniel Stenberg
87f8c0d471 hush the compiler 2003-05-22 16:12:30 +00:00
Daniel Stenberg
334d78cd18 Initial Digest support. At least partly working. 2003-05-22 16:09:54 +00:00
Daniel Stenberg
2356325592 David Balazic pointed out the lack of checks for a valid %XX code when
we unescape a string. We now check and decode only valid %XX strings.
2003-05-21 15:53:59 +00:00
Daniel Stenberg
d78ec593fa fix the makefile in packages/DOS too 2003-05-21 08:12:52 +00:00
Daniel Stenberg
d5043133e6 Gisle Vanem made curl build with djgpp on DOS. 2003-05-21 08:08:48 +00:00
Daniel Stenberg
509f69a457 Gisle Vanem's fix to make the 'curl -M' output nicer 2003-05-21 07:21:44 +00:00
Daniel Stenberg
662c659220 missing semicolon, by Gisle Vanem 2003-05-20 12:44:55 +00:00
Daniel Stenberg
9a6566e774 Gisle Vanem's code for not trusting h_aliases to always be non-NULL 2003-05-20 09:41:39 +00:00
Daniel Stenberg
4da0428d9e Remind about the gpg command lines 2003-05-20 06:33:13 +00:00
Daniel Stenberg
8ee1177206 support user name and password in proxy environment variables 2003-05-19 13:14:26 +00:00
Daniel Stenberg
e9154b2549 the proxy environment variables now may contain user name and password 2003-05-19 13:09:41 +00:00
Daniel Stenberg
d398a0dd58 remove debug output 2003-05-19 13:08:48 +00:00
Daniel Stenberg
7723a24297 setenv support added to allow test cases to require a set of environment
variables
2003-05-19 13:06:10 +00:00
Daniel Stenberg
95a4b8db68 7.10.5 commit 2003-05-19 11:45:10 +00:00
Daniel Stenberg
663c1898a3 known AIX ipv6 problems 2003-05-16 10:57:53 +00:00
Daniel Stenberg
465de793e8 Skip any preceeding dots from the domain name of cookies when we keep them
in memory, only add it when we save the cookie. This makes all tailmatching
and domain string matching internally a lot easier.

This was also the reason for a remaining bug I introduced in my overhaul.
2003-05-15 22:28:19 +00:00
Daniel Stenberg
de9b76cef0 change the order of the in_addr_t tests, so that 'unsigned long' is tested
for first, as it seems to be what many systems use
2003-05-15 21:13:36 +00:00
Daniel Stenberg
1747a8d3d9 1. George Comninos' progress meter fix
2. I also added the pre-releases and dates to the log
2003-05-15 08:13:19 +00:00
Daniel Stenberg
1094e79749 documented CURLOPT_FTP_USE_EPRT 2003-05-14 09:03:51 +00:00
Daniel Stenberg
22569681bc George Comninos provided a fix that calls the progress meter when waiting
for FTP command responses take >1 second.
2003-05-14 06:31:00 +00:00
Daniel Stenberg
e615d117a0 Setup and use CURL_INADDR_NONE all over instead of INADDR_NONE. We setup
the define accordingly in the hostip.h header to work nicely all over.
2003-05-13 12:12:17 +00:00
Daniel Stenberg
a51258b6bb before using if2ip(), check if the address is an ip address and skip it if
it is.
2003-05-13 12:11:31 +00:00
Daniel Stenberg
8894bd07b6 libtool 1.4.2 is enough 2003-05-13 09:38:09 +00:00
Daniel Stenberg
ec45a9e825 fix comment 2003-05-13 09:37:45 +00:00
Daniel Stenberg
871358a6e5 before checking for network interfaces using if2ip(), check that the given
name isn't an ip address
2003-05-12 13:06:48 +00:00
Daniel Stenberg
2e2e0fba60 no more complaining when I have 1.5 and it tests for 1.4.2 2003-05-12 13:05:11 +00:00
Daniel Stenberg
4a5139e3f4 fixes from the last week+ 2003-05-12 12:49:22 +00:00
Daniel Stenberg
8f85933d7c Dan F clarified the CURLOPT_ENCODING description after his changes to
allow "" to enable all support formats.
2003-05-12 12:47:35 +00:00
Daniel Stenberg
246f3a63f6 Dan Fandrich added --compressed docu 2003-05-12 12:46:45 +00:00
Daniel Stenberg
e99eff4eb0 setting ENCODING to "" means enable-all-you-support 2003-05-12 12:45:57 +00:00
Daniel Stenberg
c0197f19cf Dan Fandrich changed CURLOPT_ENCODING to select all supported encodings if
set to "".  This frees the application from having to know which encodings
 the library supports.
2003-05-12 12:45:14 +00:00
Daniel Stenberg
3994d67eea Dan Fandrich lowered the libtool requirement 2003-05-12 12:38:52 +00:00
Daniel Stenberg
9ead79c9d4 when we have accepted the server's connection in a PORT sequence, we set
the new socket to non-blocking
2003-05-12 12:37:35 +00:00
Daniel Stenberg
9371aed46c avoid the write loop 2003-05-12 12:37:05 +00:00
Daniel Stenberg
940707ad66 incoming proxy headers shall be sent to the debug function has HEADERs not
DATA
2003-05-12 12:29:00 +00:00
Daniel Stenberg
e6c267fb4c oops, run libtoolize as the first tool 2003-05-09 08:17:41 +00:00
Daniel Stenberg
93538fccd6 run libtoolize too 2003-05-09 08:13:02 +00:00
Daniel Stenberg
83a7fad308 run libtoolize to generate these files 2003-05-09 08:12:46 +00:00
Daniel Stenberg
3c7e33388e CURLOPT_FTP_USE_EPRT added 2003-05-09 07:42:47 +00:00
Daniel Stenberg
7b0f35edb6 --disable-eprt added 2003-05-09 07:39:50 +00:00
Daniel Stenberg
94a157d0b0 support for CURLOPT_FTP_USE_EPRT added 2003-05-09 07:39:29 +00:00
Daniel Stenberg
ca04620253 AIX wants sys/select.h 2003-05-09 07:37:27 +00:00
Daniel Stenberg
073ef0b36a clarify on the curl name issue and that there may be other libcurl-based
tools that provide GUI
2003-05-09 07:07:13 +00:00
Daniel Stenberg
c41c05d4f4 Kevin Delafield reported another case where we didn't correctly check for
EAGAIN but only EWOULDBLOCK, which caused badness on HPUX. We also check for
 and act the same on EINTR errors as well now.
2003-05-06 08:19:36 +00:00
Daniel Stenberg
f1ea54e07a fixed the required tools' version numbers 2003-05-05 14:19:54 +00:00
Daniel Stenberg
a139ce901a the writable argv check now should not exit when building a cross-compiled
curl
2003-05-04 16:07:19 +00:00
Daniel Stenberg
7431957113 put back the libtool test, now for 1.5
require autoconf 2.57
require automake 1.7
2003-05-03 16:25:49 +00:00
Daniel Stenberg
1752d80915 If there is a custom Host: header specified, we use that host name to
extract the correct set of cookies to send. This functionality is verified
by test case 62.
2003-05-02 09:13:19 +00:00
Daniel Stenberg
aa7420e109 send correct cookies when using a custom Host: 2003-05-02 09:12:26 +00:00
Daniel Stenberg
a290d4b9db fixed the format slightly 2003-05-02 09:11:53 +00:00
Daniel Stenberg
19a4314e7f corrected a comment about gzip not being supported 2003-05-01 17:49:47 +00:00
Daniel Stenberg
d166e85e0a FTP URL with type=a 2003-05-01 17:48:59 +00:00
Daniel Stenberg
f213e857ab Andy Cedilnik fixed some compiler warnings 2003-05-01 13:37:36 +00:00
Daniel Stenberg
eb6130baa7 ourerrno became Curl_ourerrno() and is now available to all libcurl 2003-05-01 13:37:05 +00:00
Daniel Stenberg
f69ea2c68a Use the proper Curl_ourerrno() function instead of plain errno, for better
portability. Also use Andy Cedilnik's compiler warning fixes.
2003-05-01 13:36:28 +00:00
Daniel Stenberg
078441d477 the test numbers are now only for human readability, the numbers no longer
enforces protocol/server
2003-04-30 20:29:31 +00:00
Daniel Stenberg
95f6b15a67 no longer assume that the test number implies servers to run 2003-04-30 20:28:49 +00:00
Daniel Stenberg
ee29dbdb8f Each test case now specifies which server(s) it needs, without relying on the
test number.
2003-04-30 20:25:39 +00:00
Daniel Stenberg
15f3f4c93f we say welcome to test 142 2003-04-30 20:08:01 +00:00
Daniel Stenberg
6932e94e0e verify that curl fails fine when an FTP URL with a too deep dir hierarchy
is used
2003-04-30 20:07:37 +00:00
Daniel Stenberg
3ef06d7efe when making up the list of path parts, save the last entry pointing to NULL
as otherwise we'll go nuts
2003-04-30 20:04:17 +00:00
Daniel Stenberg
fb012b48e9 recent action 2003-04-30 20:01:22 +00:00
Daniel Stenberg
bc77bf217f if there's a cookiehost allocated, free that too 2003-04-30 19:58:36 +00:00
Daniel Stenberg
37d1e9351e ok, make the test run ok too 2003-04-30 19:56:53 +00:00
Daniel Stenberg
4494c0dee0 various new cookie tests with a custom Host: header set 2003-04-30 19:49:51 +00:00
Daniel Stenberg
26afc604ac modified to work with modified code 2003-04-30 17:16:25 +00:00
Daniel Stenberg
9aefcada19 modified to produce nicer output when a single test fails 2003-04-30 17:15:38 +00:00
Daniel Stenberg
69fc363760 make the diffs with 'diff -u' to make them nicer and easier to read 2003-04-30 17:15:00 +00:00
Daniel Stenberg
bea02ddebe stop parsing Host: host names at colons too 2003-04-30 17:12:29 +00:00
Daniel Stenberg
3fb257c39c modified to the new cookie function proto 2003-04-30 17:05:19 +00:00
Daniel Stenberg
7c96c5a39b extract host name from custom Host: headers to use for cookies 2003-04-30 17:04:53 +00:00
Daniel Stenberg
efd836d971 Many cookie fixes:
o Save domains in jars like Mozilla does. It means all domains set in
    Set-Cookie: headers are dot-prefixed.
  o Save and use the 'tailmatch' field in the Mozilla/Netscape cookie jars (the
    second column).
  o Reject cookies using illegal domains in the Set-Cookie: line. Concerns
    both domains with too few dots or domains that are outside the currently
    operating server host's domain.
  o Set the path part by default to the one used in the request, if none was
    set in the Set-Cookie line.
2003-04-30 17:03:43 +00:00
Daniel Stenberg
836aaa1647 changes need for the new ftp path treatment and the new cookie code 2003-04-30 17:01:00 +00:00
Daniel Stenberg
bf2b3dbf3e David Balazic's patch to make the FTP operations "do right" according to
RFC1738, which means it'll use one CWD for each pathpart.
2003-04-30 16:59:42 +00:00
Daniel Stenberg
b4fa2ff995 two more platforms Rich Gray built curl on 2003-04-30 07:32:43 +00:00
Daniel Stenberg
2f9cabc30b Peter Kovacs provided a patch that makes the CURLINFO_CONNECT_TIME work fine
when using the multi interface (too).
2003-04-29 18:03:30 +00:00
Daniel Stenberg
63593f5597 mention configure --help 2003-04-29 16:55:17 +00:00
Daniel Stenberg
c0acaa5d2c CURLOPT_FTPPORT could support port number too 2003-04-28 17:29:32 +00:00
Daniel Stenberg
2e46f8d0a6 corrected the comment which wasn't correct 2003-04-28 13:48:16 +00:00
Daniel Stenberg
51da6aaa07 RSAglue.lib is no longer needed with recent OpenSSL versions 2003-04-25 15:08:46 +00:00
Daniel Stenberg
c8b79e36db Dan Fandrich added support for the gzip Content-Encoding for --compressed 2003-04-24 06:34:31 +00:00
Daniel Stenberg
208374bcc9 Bryan Kemp's reported problems with curl and PUT from stdin and a faked
content-length made me add test case 60, that does exactly this, but it
seems to run fine...
2003-04-23 12:09:58 +00:00
Daniel Stenberg
7f0a6e7203 last 10 days or so 2003-04-22 23:30:04 +00:00
Daniel Stenberg
54ebb9cfd4 libtool 1.5 stuff 2003-04-22 23:29:27 +00:00
Daniel Stenberg
49e9c1495b stop checking for libtool, we don't run that in this script 2003-04-22 23:26:00 +00:00
Daniel Stenberg
a84b0fbd52 Dan Fandrich corrected the error messages on "bad encoding". 2003-04-22 22:33:39 +00:00
Daniel Stenberg
c95814c04d Dan Fandrich's gzip bugfix 2003-04-22 22:32:02 +00:00
Daniel Stenberg
9f8123f1b8 Dan Fandrich's fix 2003-04-22 22:31:02 +00:00
Daniel Stenberg
8b23db4f4d Peter Sylvester pointed out that curl_easy_setopt() will always (wrongly)
return CURLE_OK no matter what happens.
2003-04-22 21:42:39 +00:00
Daniel Stenberg
d77cc13374 two dashes is enough 2003-04-16 12:46:20 +00:00
Daniel Stenberg
9a12db1aa2 typecast the setting of the size, as it might be an off_t which is bigger
than long and libcurl expects a long...
2003-04-15 14:18:37 +00:00
Daniel Stenberg
eb54d34bec If MALLOCDEBUG, include the lib's setup.h here so that the proper defines
are set before all system headers, as otherwise we get compiler warnings
on my Solaris at least.
2003-04-15 14:01:57 +00:00
Daniel Stenberg
4b1203d4c9 include config.h before all system headers, so that _FILE_OFFSET_BITS and
similar is set properly by us first
2003-04-15 13:32:26 +00:00
Daniel Stenberg
183a9c6244 extended the -F section 2003-04-15 09:58:27 +00:00
Daniel Stenberg
1f2294d585 treat uploaded .html files as text/html by default 2003-04-15 09:29:39 +00:00
Daniel Stenberg
0b839c4f77 return the same error for the sslv2 "certificate verify failed" code 2003-04-14 22:00:36 +00:00
Daniel Stenberg
1d4fd1fcae new wording by Kevin Roth 2003-04-14 14:54:18 +00:00
Daniel Stenberg
b1d8d72c16 ignore all stamp-h* 2003-04-14 13:09:44 +00:00
Daniel Stenberg
bafb68b844 With the recent fix of libcurl, it shall now return CURLE_SSL_CACERT when
it had problems withe CA cert and thus we offer a huge blurb of verbose
help to explain to the poor user why this happens.
2003-04-14 13:09:09 +00:00
Daniel Stenberg
21873b52e9 Restored the SSL error codes since they was broken in the 7.10.4 release,
also now attempt to detect and return the specific CACERT error code.
2003-04-14 12:53:29 +00:00
Daniel Stenberg
0aa8b82871 FTP CWD response fixed
gzip content-encoding added
chunked content-encoding fixed
2003-04-14 07:13:08 +00:00
Daniel Stenberg
f9781afafd clarified the CURLINFO_SIZE_DOWNLOAD somewhat on Juan F. Codagnone's
suggestion
2003-04-11 16:52:30 +00:00
Daniel Stenberg
fece361a55 Nic fixed so that Curl_client_write() must not be called with 0 lenth data.
I edited somewhat and removed trailing whitespaces.
2003-04-11 16:31:18 +00:00
Daniel Stenberg
7b51b2f128 Nic Hines fixed this bug when deflate or gzip contents were downloaded using
chunked encoding.
2003-04-11 16:23:43 +00:00
Daniel Stenberg
22d88fb28e ah, move the zero byte too or havoc will occur 2003-04-11 16:23:06 +00:00
Daniel Stenberg
f7c5b28e76 verify the new url parser fix 2003-04-11 16:22:27 +00:00
Daniel Stenberg
5760f2a307 support ? as separator instead of / even if not protocol was given 2003-04-11 16:08:41 +00:00
Daniel Stenberg
ee46efb5a5 these guys deserve a mentioning here as well 2003-04-11 08:57:19 +00:00
Daniel Stenberg
eb6ffebfc7 Dan the man on the list 2003-04-11 08:55:08 +00:00
Daniel Stenberg
c06c44f286 Dan Fandrich's added gzip support documented. 2003-04-11 08:51:24 +00:00
Daniel Stenberg
019c4088cf Dan Fandrich's gzip patch applied 2003-04-11 08:49:20 +00:00
Daniel Stenberg
0b0a88b78d when saving a cookie jar fails, you don't get an error code or anything,
just a warning in the verbose output stream
2003-04-11 08:19:06 +00:00
Daniel Stenberg
028e9cc56f According to RFC959, CWD is supposed to return 250 on success, but
there seem to be non-compliant FTP servers out there that return 200,
 so we accept any '2xy' response now.
2003-04-11 08:10:54 +00:00
Daniel Stenberg
e0d8615ece show a verbose warning message in case cookie-saving fails, after
Ralph Mitchell's notification.
2003-04-11 07:39:16 +00:00
Daniel Stenberg
c8ecbda40b new ftp tests 2003-04-10 11:43:47 +00:00
Daniel Stenberg
2324c10d43 another week has passed 2003-04-10 11:36:56 +00:00
Daniel Stenberg
89cfa76291 Vlad Krupin's URL parsing patch to fix the URL parsing when the URL has no
slash after the host name, but still a ? and following "parameters".
2003-04-10 09:44:39 +00:00
Daniel Stenberg
072070a22c oops, committed test code not meant to be here 2003-04-09 12:02:06 +00:00
Daniel Stenberg
3c3ad134ea the default debugfunction shows incoming headers as well 2003-04-09 11:57:06 +00:00
Daniel Stenberg
a4ffcfd4d5 timecond support added
made the Last-Modified (faked) header look correct using GMT always
2003-04-09 11:56:31 +00:00
Daniel Stenberg
136670c58a three new ftp tests 2003-04-09 11:55:24 +00:00
Daniel Stenberg
28169725fa <mdtm> added 2003-04-09 11:53:09 +00:00
Daniel Stenberg
5b13106f54 MDTM support added 2003-04-09 11:52:24 +00:00
Daniel Stenberg
1a2db0dfb1 James Bursa fixed a flaw in the content-type extracting code that could
miss the first letter
2003-04-08 14:48:38 +00:00
Daniel Stenberg
696f95bb0a share.c added 2003-04-08 10:35:35 +00:00
Daniel Stenberg
acec588fe3 --disable-eprt perhaps? 2003-04-07 06:41:24 +00:00
Daniel Stenberg
6ed0da8e98 Ryan Weaver's fix to prevent the ca bundle to get installed even when
building curl without SSL support!
2003-04-06 12:29:45 +00:00
Daniel Stenberg
7fd91d70bd adjusted the formpost testcases to the new boundary string construction 2003-04-04 12:30:35 +00:00
Daniel Stenberg
61788a0389 Changed how boundary strings are generated. This new way uses 28 dashes
and 12 following hexadecimal letters, which seems to be what IE uses.
This makes curl work smoother with more stupidly written server apps.

Worked this out together with Martijn Broenland.
2003-04-04 12:24:01 +00:00
Daniel Stenberg
0821447b5b spell fix 2003-04-03 16:11:47 +00:00
Daniel Stenberg
3cba274ba6 kill a compiler warning on cygwin 2003-04-03 14:16:15 +00:00
Daniel Stenberg
df7bbcfd21 Added log output for when the writing of the input HTTP request is successful
or unsuccessful. Used to track down the recent cygwin test suite problems.
2003-04-03 13:43:15 +00:00
Daniel Stenberg
021d406f0c Modified how we log data to server.input, as we can't keep the file open
very much as it makes it troublesome on certain operating systems.
2003-04-03 13:42:06 +00:00
Daniel Stenberg
294569c502 new 2003-04-03 13:39:36 +00:00
Daniel Stenberg
bfd00ac2ed 7.10.4 commit 2003-04-02 07:48:56 +00:00
Daniel Stenberg
735a4714f4 Version 7.10.4 2003-04-02 07:42:39 +00:00
Daniel Stenberg
827fd47198 documented the new killserver tag 2003-04-01 08:43:09 +00:00
Daniel Stenberg
e26b917661 kill the ftp server afterwards, it is just so messed up 2003-04-01 08:42:37 +00:00
Daniel Stenberg
92872a2a3c log when we've returned verification that we are the test server 2003-04-01 08:42:14 +00:00
Daniel Stenberg
16ddb09cb4 support the new <killserver> tag 2003-04-01 08:41:49 +00:00
Daniel Stenberg
d37031f14e ignore lib505 too 2003-04-01 07:13:28 +00:00
Daniel Stenberg
b4e84ca7d2 lib505.c is a new test case for ftp uploading with rename 2003-04-01 07:13:04 +00:00
Daniel Stenberg
47970b9e6f Added support for the RNFR/RNTO commands 2003-04-01 07:10:08 +00:00
Daniel Stenberg
f65f120d50 updated 2003-03-31 22:16:12 +00:00
Daniel Stenberg
df00ec3c82 move the ssl config clone call to before the connectionexists call and then
also subsequently free the ssl struct if the connection struct is to be
deleted
2003-03-31 21:43:05 +00:00
Sterling Hughes
ad6fca28f9 testing, ignore this commit 2003-03-31 15:59:17 +00:00
Daniel Stenberg
fd33923496 7.10.4-pre6 commit 2003-03-31 14:02:43 +00:00
Daniel Stenberg
a55649dc82 added dist-hook that clears the tests/log dir properly as otherwise
'make distcheck' doesn't pass
2003-03-31 11:37:47 +00:00
Daniel Stenberg
9558f229db Fixup after talks with Richard Bramante. We should now make better
comparisons before re-using SSL connections and re-using SSL connection IDs.
2003-03-31 05:13:26 +00:00
Daniel Stenberg
7917bfb1c9 --location-trusted added, which does a normal location plus the new
CURLOPT_UNRESTRICTED_AUTH option set TRUE.

Patch by Guillaume Cottenceau.
2003-03-31 04:42:20 +00:00
Daniel Stenberg
25f611ca42 Guillaume Cottenceau's patch that adds CURLOPT_UNRESTRICTED_AUTH that
disables the host name check in the FOLLOWLOCATION code. With that option
set, libcurl will send user+password to all hosts.
2003-03-31 04:41:05 +00:00
Daniel Stenberg
e6eb49e7e0 10 days of fixes 2003-03-31 04:05:03 +00:00
Daniel Stenberg
9a075f53dc clarify USERPWD somewhat more 2003-03-31 04:04:40 +00:00
Daniel Stenberg
4b3f800c03 Frankie Fong filed bug report #708708 which identified a problem with
ConnectionExists() when first doing a proxy connecto to a HTTPS site and then
switching over to a HTTP connection to the same host.

This fix corrects the problem.
2003-03-31 03:42:01 +00:00
Daniel Stenberg
82bc76b243 Dan Shearer's fix from bug report #618892, which makes 'curl -O' output
an error message about a missing URL.
2003-03-29 11:03:30 +00:00
Daniel Stenberg
18b9b04907 send as much as possible of the POST at once 2003-03-28 12:56:07 +00:00
Daniel Stenberg
87f1f08b36 added section titles and a CONTACT paragraph asking people to use the mailing
lists
2003-03-27 15:09:33 +00:00
Daniel Stenberg
7d7ebbe9f7 fixed the pkg-config stuff for rh9 2003-03-26 19:05:24 +00:00
Daniel Stenberg
5111ce782f add URLs to patch and diff 2003-03-26 11:48:03 +00:00
Daniel Stenberg
20b0e563ce mention the URL to the mailing lists 2003-03-26 11:44:04 +00:00
Daniel Stenberg
8b6cf239a3 attempt to extract openssl information using pkg-config 2003-03-25 22:40:43 +00:00
Daniel Stenberg
bcc285cffd Renamed configure.in to configure.ac, as this is the supposedly new preferred
name for it.
2003-03-25 15:56:33 +00:00
Daniel Stenberg
d5ba030942 use init and copyright to get a better header in the generated script 2003-03-25 15:54:06 +00:00
Daniel Stenberg
803f43592a white space and indent fix 2003-03-25 14:23:12 +00:00
Daniel Stenberg
904b9ccaa3 ignore getdate.c 2003-03-24 23:11:16 +00:00
Daniel Stenberg
89721ff04a Richard Bramante's provided a fix for a handle re-use problem seen when you
change options on an SSL-enabled connection between requests.
2003-03-24 23:10:38 +00:00
Daniel Stenberg
6164823921 Removed the "TC TrustCenter, Germany, Class 0 CA." certificate:
"It is a DEMO certificate and was never intended to be in any list of trusted
CA certificates."

(quote by Gtz Babin-Ebell, trustcenter.de)
2003-03-24 11:06:57 +00:00
Daniel Stenberg
f8b3c47f99 all those changes 2003-03-24 10:47:06 +00:00
Daniel Stenberg
56dd2da962 Hopefully this change addresses these two bug reports: 707003 and 706624.
We need to make sure that when we init a 'connectdata' struct and then
afterwards check for and re-use another one, we must be careful so that the
newly set values are transmitted and used in the surviving connectdata struct.
2003-03-21 08:09:48 +00:00
Daniel Stenberg
68bb74d172 lib/getdate.c.cvs may need a rename if you don't have yacc or bison 2003-03-20 15:12:43 +00:00
Daniel Stenberg
bf5e12c8e0 if the cvs update fails, don't continue further 2003-03-20 14:38:42 +00:00
Daniel Stenberg
de11f5e53a make the ENGINE depend on the USE_SSLEAY define too 2003-03-19 21:28:39 +00:00
Daniel Stenberg
b125e8e23a set binary mode for some file handling and it might work better on some
cygwin installations (using DOS-style files somehow?)
2003-03-19 09:26:29 +00:00
Daniel Stenberg
b28b616eb2 typecast the conversion from const char * to char * 2003-03-19 09:16:59 +00:00
Daniel Stenberg
4534ca238d Rename getdate.c to getdate.c.cvs, since the "normal" build procedure do
imply that yacc/bison exists and can generate this file. Those without one
of those tools can then checkout and rename the getdate.c.cvs file.
2003-03-19 09:09:40 +00:00
Daniel Stenberg
cee0e94294 clarify that 22 can be returned on --fail for all HTTP errors being 400
or above
2003-03-18 10:01:51 +00:00
Daniel Stenberg
8319ea7078 more defensive checking as platforms differ... 2003-03-17 17:20:26 +00:00
Daniel Stenberg
5334a58f9b Andy Cedilnik's corrections 2003-03-17 12:38:08 +00:00
Daniel Stenberg
2616bdc4cd it just never ends 2003-03-16 18:41:00 +00:00
Daniel Stenberg
c149b3f797 regenerated from getdate.y 2003-03-16 16:20:56 +00:00
Daniel Stenberg
d349eb3d43 Juan F. Codagnone pointed out a missing thing from the march 2 fix 2003-03-16 16:15:24 +00:00
Daniel Stenberg
9b43ade1c0 typecase getpid() to int to prevent compiler warning 2003-03-16 10:46:52 +00:00
Daniel Stenberg
ad05d0a8d9 figure out select()'s argument types 2003-03-15 21:04:01 +00:00
Daniel Stenberg
e6bfbe9683 Gisle Vanem's fix to get this working nicely on windows 2003-03-15 21:02:20 +00:00
Daniel Stenberg
c871efca4b Gisle Vanem fixed a name collision with structure '"CONTEXT" in <winnt.h> 2003-03-15 21:00:47 +00:00
Daniel Stenberg
12c72b419c missing newline added 2003-03-15 17:26:59 +00:00
Daniel Stenberg
a15b7691ca no server needed 2003-03-15 17:12:46 +00:00
Daniel Stenberg
01618d323b allow 'none' as server 2003-03-15 17:11:13 +00:00
Daniel Stenberg
bb6d0e37e3 Sort out the ENGINE problems people seem to be having. Now we put all ENGINE
related stuff within HAVE_OPENSSL_ENGINE_H and we don't make any private
typedef or similar if the header is missing...
2003-03-15 16:51:43 +00:00
Daniel Stenberg
24a6100897 * use the pid returned back from test-servers and kill them before starting
them the first time
* verify that the server we start really comes up fine and works as
  expected before continue
* count test cases where the server can't be run (for whatever reason)
* prefix lots of messages with RUN: to make it easier to realize which script
  is saying what when running tests verbose
* remove the generic sleep(1) from each test, makes the suite fly! ;-)

I hope these changes will make the tests run somewhat more reliably on more
platforms.
2003-03-15 16:43:58 +00:00
Daniel Stenberg
0251563c98 report pid back in the WE ROOLZ message 2003-03-15 16:39:15 +00:00
Daniel Stenberg
72673a351b removed the "banner" when the server is starting 2003-03-15 16:05:47 +00:00
Daniel Stenberg
96b7131844 detect lack of perl before running tests 2003-03-15 15:08:55 +00:00
Daniel Stenberg
dcc2f16416 Rick Jones' minor thing to build better on HPUX 11 2003-03-15 14:47:09 +00:00
Daniel Stenberg
c2b8a04000 Nico Baggus little adjustment to build with OpenSSL 0.9.7 (the ENGINE thing) 2003-03-14 17:21:06 +00:00
Daniel Stenberg
d65587b06c improved "deeper" check 2003-03-14 12:44:56 +00:00
Daniel Stenberg
1ab0134522 No longer halts operation if select or socket are missing, since in most
cases this is wrong... and if they're truly missing, we won't succeed to
link later on anyway.
2003-03-13 23:02:33 +00:00
Daniel Stenberg
afffce80f0 Philippe Raoult needed this to build on FreeBSD 2003-03-13 21:41:02 +00:00
Daniel Stenberg
70b80b0160 Extra function-find magic for platforms that don't like the way the
default AC_CHECK_FUNCS() work. HPUX 11 is one of them.
2003-03-13 17:06:55 +00:00
Daniel Stenberg
64067a04b5 output what cvs returned, see if we can make the script bail out when
cvs update fails
2003-03-13 15:56:31 +00:00
Daniel Stenberg
38cf0268c0 use include path from BUILD dir since we want the config.h 2003-03-13 15:54:46 +00:00
Daniel Stenberg
2d2034703f Things are moving along... 2003-03-12 14:29:47 +00:00
Daniel Stenberg
571ceeff90 When we append stuff to the URL, we must make sure the text is properly
URL encoded before. Test case 58 added to verify this.
2003-03-12 14:20:16 +00:00
Daniel Stenberg
34c4ba4321 -m on curl on windows with telnet doesn't work 2003-03-12 14:14:52 +00:00
Daniel Stenberg
babb372eb9 This verifies that my fix for bug report #700275 works. 2003-03-12 14:04:23 +00:00
Daniel Stenberg
5b9b82c1cd test58 added 2003-03-12 14:03:53 +00:00
Daniel Stenberg
075c534270 improved the header checks
--enable-libgcc
check for a sed before using it
2003-03-12 13:42:07 +00:00
Daniel Stenberg
f317f8b149 Add include files to prevent warnings on some (HPUX) systems. 2003-03-12 08:54:45 +00:00
Daniel Stenberg
f8d552dde5 include "config.h" from the lib's private dir 2003-03-12 08:54:11 +00:00
Daniel Stenberg
606f72bb13 Setup include path to the lib dir to enable inclusion of "config.h" 2003-03-12 08:53:44 +00:00
Daniel Stenberg
327e6a2b4f Made set_local_option() properly static as reported by Rick Jones 2003-03-12 08:44:00 +00:00
Daniel Stenberg
be8f6c7f5c Prefix defines and symbols with CURL_ to reduce the risk of colliding with
various system's other defines.
2003-03-12 08:40:45 +00:00
Daniel Stenberg
073448e0ea support a few more common typedefs 2003-03-12 08:11:24 +00:00
Daniel Stenberg
f136f435b5 Massige use of AC_HELP_STRING() all over makes the --help output so much
nicer!
2003-03-12 08:07:30 +00:00
Daniel Stenberg
ebea6b487b another week of changes, especially libtool gave us an adventure to remember 2003-03-11 19:22:10 +00:00
Daniel Stenberg
e5b7dc56e6 syntax error 2003-03-11 19:12:07 +00:00
Daniel Stenberg
c2d8025a0a Christophe Demory fixed the check to work better for non-blocking on HP-UX
systems. Bug report #701749.
2003-03-11 19:07:41 +00:00
Daniel Stenberg
853e240e1d Use ssize_t instead of 'int' to make the 64 bit sparc compiler happier.
Fix by Richard Gorton.
2003-03-11 18:58:21 +00:00
Daniel Stenberg
8755a6d1ac Richard Gorton improved the random_the_seed() function for systems where
we don't find/know of a good random source. This way, we get a better
randomness which in turn should make SSL connections more secure.
2003-03-11 18:55:34 +00:00
Daniel Stenberg
9f723061cb don't check for netinet/if_ether.h, we don't include it and it causes
configure warnings on many systems
2003-03-11 17:16:02 +00:00
Daniel Stenberg
652683fc04 Martin C. Martin's fix to produce an error message in case of failure
in the Curl_is_connected() function.
2003-03-11 16:28:23 +00:00
Daniel Stenberg
83a463891d added things to fix at the next major release/change 2003-03-10 20:46:54 +00:00
Daniel Stenberg
957b618fdc clarify 2003-03-10 20:43:59 +00:00
Daniel Stenberg
ebe5191b63 no the data is not freed, this is left for the app to do when needed 2003-03-10 17:01:11 +00:00
Daniel Stenberg
c426234df7 AAAARG
libtool 1.4.3 is scary as hell and caused just about every build on all sorts
of platforms to stop working, thanks to the fact that it ruquires a SED
variables somehow set by the configure script. It works fine on my linux
running autoconf 2.57 and automake 1.7 but others seem not to do as fine.

Reverting back to the ltmain.sh we had previously, which I believe is 1.4.2
including handmade patches for FreeBSD.

ALERT ALERT ALERT before we try 1.4.3 or similar versions again, check the
${SED} stuff and similar carefully.
2003-03-10 14:52:33 +00:00
Daniel Stenberg
8c3a10392e Include sys/types.h as well. Ray DeGennaro reports successful compiling on
AIX when this fix is applied and I cannot see how this will break any
systems.
2003-03-10 12:25:32 +00:00
Daniel Stenberg
d0e44946e9 figure out the path to a 'sed' as otherwise libtool gets crazy 2003-03-07 13:36:32 +00:00
Daniel Stenberg
46a593d968 libtoolize 1.4.3 brought these 2003-03-07 09:03:09 +00:00
Daniel Stenberg
eb0cc34951 Removed define, risc os build, POST-GET bug fixed, AIX 4.3 problems solved
and two makefiles fixed.
2003-03-04 06:41:50 +00:00
Daniel Stenberg
1c7dfda4bc output the md5sum as the last step 2003-03-03 23:26:39 +00:00
Daniel Stenberg
3c0e4a2fa1 Added share.obj 2003-03-03 22:39:34 +00:00
Daniel Stenberg
c753072ae1 moved the disable-thread warning to the switch code so that the AIX 4.3
automatic disable won't cause a warning
2003-03-03 22:31:58 +00:00
Daniel Stenberg
243942a7aa Detect AIX 4.3 or later, and if found disable the check for the thread-safe
*_r() functions as they're not needed (and if fact mess things up for us).
Brought to our attention by the friendly Troels Walsted Hansen in bug report
#696217.
2003-03-03 22:30:25 +00:00
Daniel Stenberg
8d5ac8b43c AIX 4.3 or later should use gethostbyname() and not the *_r() version. 2003-03-03 22:23:48 +00:00
Daniel Stenberg
17962b3d2e Added typecast to please the MSVC compiler. 2003-03-03 06:45:27 +00:00
Daniel Stenberg
f70acd5979 another typecast added to please the borland compiler 2003-03-03 06:42:52 +00:00
Daniel Stenberg
ffe5c46224 Add (void) on our uses of the swrite() macro when we don't read the return
code as this makes compiler warnings. We *should* fix the code to deal with
the return codes instead...
2003-03-03 06:40:36 +00:00
Daniel Stenberg
3242ea5f66 Init postdata properly before issuing a request, so that there isn't any
lingering POST-stuff that confuses GET requests. Juan F. Codagnone reported
this problem in bug report #653859.
2003-03-02 17:43:42 +00:00
Daniel Stenberg
39a282bffc moved a variable declaration to remove a compiler warnings with the MSVC
compiler, mentioned by Andi Jahja
2003-03-02 17:20:59 +00:00
Daniel Stenberg
29583004ce include the engine stuff 2003-02-28 15:50:05 +00:00
Daniel Stenberg
0a1a185874 Andres Garcia Garcia updated to build with the most recent OpenSSL and
the recent libcurl changes.
2003-02-28 15:49:32 +00:00
Daniel Stenberg
8f809e2a93 James Bursa made it compile on RISC OS as well. 2003-02-28 13:11:10 +00:00
Daniel Stenberg
f216059b49 James Bursa wrote a section about cross-compiling for RISC OS 2003-02-28 13:10:54 +00:00
Daniel Stenberg
9121b1f41d the strequal and strnequal should now be called with the proper curl_ prefix 2003-02-28 12:20:08 +00:00
Daniel Stenberg
60e015d0c1 Removed the defines for strequal() and strnequal(). 2003-02-28 12:17:56 +00:00
Daniel Stenberg
7e049fca61 recent stuff 2003-02-28 08:40:57 +00:00
Daniel Stenberg
0f0e4de6a4 mention what happens if size is set to -1 2003-02-28 07:55:01 +00:00
Daniel Stenberg
bc1102922b spell out that POSTFIELDS should be url-encoded in most cases 2003-02-28 07:53:31 +00:00
Daniel Stenberg
a3d3642a30 spell better 2003-02-27 23:10:38 +00:00
Daniel Stenberg
248eb47329 Updated to better reflect reality. Also displays how the CURLMsg struct
looks like.
2003-02-27 14:25:54 +00:00
Daniel Stenberg
52ebf50607 It appears that there are FTP-servers that return size 0 for files
when SIZE is used on the file while being in BINARY mode. To work
around that (stupid) behavior, we attempt to parse the RETR response
even if the SIZE returned size zero.

Debugging help from Salvatore Sorrentino on February 26, 2003.
2003-02-27 12:50:54 +00:00
Daniel Stenberg
d34a4b126e test138 is for RETR without size and without a working SIZE 2003-02-26 17:09:47 +00:00
Daniel Stenberg
de96719a45 support <size>-1</size> to completely disable the SIZE command 2003-02-26 17:05:36 +00:00
Daniel Stenberg
9876ed09fe added support for RETRNOSIZE in the control file to tell RETR to not
include size in the 150-reply
2003-02-26 16:57:00 +00:00
Daniel Stenberg
17cbbe3dc5 added a test case for RETR that doesn't get the size in the 150-reply 2003-02-26 16:56:00 +00:00
Daniel Stenberg
b995af17eb added index.html 2003-02-26 13:46:38 +00:00
Daniel Stenberg
6589579850 random updates 2003-02-26 13:01:29 +00:00
Daniel Stenberg
5ddc260fc2 No longer loop to read multiple times before returning back from the transfer
function, as this could easily end up looping for a very long time (more or
less until the whole transfer was done) and no library-using app would want
that.

Found thanks to a report by Kyle Sallee.
2003-02-26 12:42:25 +00:00
Daniel Stenberg
db5c9cd4c0 updated, now features less mentions about older versions 2003-02-25 08:52:32 +00:00
Daniel Stenberg
9b6d010aef better sslcerts link 2003-02-25 08:36:29 +00:00
Daniel Stenberg
065b87e949 7.10.4-pre2 commit 2003-02-24 18:14:48 +00:00
Daniel Stenberg
a6206a3aef Fixes to bring back the the "Expect: 100-continue" functionality. If the
header is used, we must wait for a 100-code (or timeout), before we send the
data. The timeout is merely 1000 ms at this point. We may have reason to set
a longer timeout in the future.
2003-02-24 16:53:53 +00:00
Daniel Stenberg
30639ed72b Kjetil Jacobsen found out that setting CURLOPT_MAXCONNECTS to a value higher
than 5 could cause a segfault.
2003-02-24 14:50:20 +00:00
Daniel Stenberg
9d02a39e13 fixed language for limit-rate 2003-02-24 13:28:32 +00:00
Daniel Stenberg
fc0af0d4d1 daily was weekly, added a little thing about feb 2003 2003-02-24 08:18:17 +00:00
Daniel Stenberg
a683416081 added an EXAMPLE section 2003-02-21 15:19:50 +00:00
Daniel Stenberg
9faf57ee8d how to disable FTP PORT 2003-02-17 23:23:11 +00:00
Daniel Stenberg
44b9ccb7e2 This script clearly misses to remove the build dir at times when it exits,
so we now remove everything matching "build-*" when the script starts.
2003-02-17 09:15:26 +00:00
Daniel Stenberg
de003d9cf8 mention --trace and --trace-ascii in the -v/--versbose section to remind
people how to get even more details shown
2003-02-17 09:02:51 +00:00
Daniel Stenberg
8a2a523c70 mention more cacert magic 2003-02-14 22:28:12 +00:00
Daniel Stenberg
c3dfe50aaf Fix Curl_is_connected() even more to deal with waitconnect() return codes
even better (also based on input from Martin).
2003-02-14 09:11:51 +00:00
Daniel Stenberg
9659d862c7 Matthew Clarke built curl on AIX 3.2.5 2003-02-14 09:06:07 +00:00
Daniel Stenberg
69ab4cd391 include <sys/socket.h> to compile the fd_set stuff properly on all systems 2003-02-14 09:03:03 +00:00
Daniel Stenberg
498f3985b3 geterrno() renamed to ourerrno() to prevent the name clash that occurred in
AIX 3.2.5 and possibly other OSF-like system headers.
2003-02-14 09:01:01 +00:00
Daniel Stenberg
977175d4fd Martin C. Martin's fix for multi-interface connects to non-listening ports. 2003-02-14 08:02:55 +00:00
Daniel Stenberg
3ddc7b9390 Christopher R. Palmer fixed Curl_base64_encode() to deal with zeroes in the
data to encode.
2003-02-13 18:30:10 +00:00
Daniel Stenberg
10e9bf623f language 2003-02-08 14:36:18 +00:00
Daniel Stenberg
48a5c64e94 include stdarg.h since we use va_* stuff 2003-02-06 19:28:17 +00:00
Daniel Stenberg
140606ccd5 I made curl run fine on a XScale/PXA250 2003-02-05 08:09:33 +00:00
Daniel Stenberg
f56d006f93 Re-arranged the SSL connection code (again). The recent fix was not a very
good one. This should work fine again.
2003-02-05 07:43:05 +00:00
Jean-Philippe Barette-LaPierre
beb13a1d3e added the sharing of DNS cache 2003-02-04 23:48:46 +00:00
Daniel Stenberg
fa47138327 VMS has setjmp.h 2003-02-04 22:28:36 +00:00
Daniel Stenberg
9421d4510a Nico Baggus updated build script for VMS 2003-02-04 22:28:19 +00:00
Daniel Stenberg
ff8abfca85 assume zlib 1.1.4 - pointed out by Kevin Roth 2003-02-04 18:24:55 +00:00
Daniel Stenberg
5c858965b8 HAVE_LIBZ is the actual name of the define we use 2003-02-04 18:23:41 +00:00
Daniel Stenberg
e3f83cb17a make it more obvious what this is by not even trying to show a manual 2003-02-04 18:22:31 +00:00
Daniel Stenberg
de6008e01a James Bursa corrected a bad comment 2003-02-04 18:12:41 +00:00
Daniel Stenberg
6417e696df fixes during the last couple of days 2003-02-04 12:33:13 +00:00
Daniel Stenberg
5d28f3781b Improved error reporting in case of bad SSL_connect()s, and we also no
longer use the SSL functions that store the error message in a static buffer
since that is not very multi-thread friendly.
2003-02-04 12:29:57 +00:00
Daniel Stenberg
10026bb62e scan through the PATH as well, to find stunnel 2003-02-03 22:15:33 +00:00
Daniel Stenberg
21c16f923c Julian Noble pointed out that capath is indeed working fine on Windows
these days since the c_rehash tool is written (fixed) to do the proper
action even on file systems that don't support symlinks.
2003-02-03 21:36:52 +00:00
Daniel Stenberg
32cef52f0d Kevin Roth corrected the zlib stuff to work better. 2003-01-31 07:07:28 +00:00
Daniel Stenberg
e7dd7c54ff don't check for the CA cert bundle if --insecure is used 2003-01-30 14:48:07 +00:00
Daniel Stenberg
b0b50bd12a typecast the argument to isspace() to an int to prevent warnings on some
compilers
2003-01-30 06:06:24 +00:00
Daniel Stenberg
f2c6057490 curl now uses stricter VERIFYHOST by default and only uses a lesser check
if --insecure is used. Reported by Hamish Mackenzie.
2003-01-30 05:15:57 +00:00
Daniel Stenberg
169b2eeb94 Fixes bug #669059. We now extract the Content-Type better and more accurate. 2003-01-30 05:04:02 +00:00
Daniel Stenberg
f81d027f60 test case 57 - verifies that the Content-Type extraction does not stop on
the first space anymore but cuts off the trailing spaces only.

Bug report #669059.
2003-01-30 05:03:19 +00:00
Daniel Stenberg
872eeb7339 changes from the last week or so 2003-01-29 13:56:04 +00:00
Daniel Stenberg
7f67a28c2a HAVE_WRITABLE_ARGV is set if argv[] is writable on the system, and then
we attempt to hide some of the more sensitive command line arguments
2003-01-29 13:16:03 +00:00
Daniel Stenberg
30a46e1135 John McGowan found a problem where the DEBUGFUNCTION was called with bad
data on uploads.
2003-01-29 12:52:45 +00:00
Daniel Stenberg
3a01478ce8 add the new emacs file and removed the former one 2003-01-29 12:15:16 +00:00
Daniel Stenberg
53d71fab60 example showing how a .emacs using curl-style.el could look like, thanks
to Mats Lidell for awesome elisp hacking!
2003-01-29 12:14:37 +00:00
Daniel Stenberg
be891f112c this is the former emacs file we no longer use, go with curl-style.el and
be happy!
2003-01-29 11:55:16 +00:00
Daniel Stenberg
89934239d7 reset conn->size to -1 on the ftp-do function to make it not go on to
ftp_done() with the previous transfer's value, as Dave Halbakken found out.
He also verified this fixed corrected the problem.
2003-01-29 10:54:39 +00:00
Daniel Stenberg
8986037fdd previous changes 2003-01-29 10:17:10 +00:00
Daniel Stenberg
a7c72b7abf removed the local variables for emacs and vim, use the new sample.emacs
way for emacs, and vim users should provide a similar non-polluting style
2003-01-29 10:14:20 +00:00
Daniel Stenberg
409ac80710 removed weirdo {{{ and }}} comments
removed emacs local-variables stuff
2003-01-29 10:12:06 +00:00
Daniel Stenberg
fc7bebdf55 the README.curl is named MANUAL these days 2003-01-28 16:33:05 +00:00
Daniel Stenberg
ca52549557 revised and better 2003-01-28 08:03:13 +00:00
Daniel Stenberg
8948a65654 removed -Wcast-align from --enable-debug with gcc, it just gives too many
warnings that I can't be concerned about at this point.
2003-01-27 14:26:06 +00:00
Daniel Stenberg
b4e33cfcc7 Removed the long-living compiler warnings on the des_pcbc_encrypt() function
calls!
2003-01-27 14:19:22 +00:00
Daniel Stenberg
86742e8334 tests that were not run due to restraints (the netrc-tests) were counted
as skipped twice, and thus the total number of tests appeared wrong
2003-01-27 13:51:35 +00:00
Daniel Stenberg
173b35eaf8 made it work
made it cause less compiler warnings
made it require 7.9.7 to build
2003-01-27 10:25:20 +00:00
Daniel Stenberg
2b054e5309 Bertrand Demiddelaer found and fixed this memory leak. 2003-01-24 11:13:59 +00:00
Daniel Stenberg
a302ff1605 string.h keeps the proto for memset() on some platforms, used for FD_ZERO 2003-01-23 19:41:30 +00:00
Daniel Stenberg
f7bb4e6138 added a default to the switch() in order to prevent a compiler warning 2003-01-23 12:00:15 +00:00
Daniel Stenberg
5c5489916b fix the configure option query 2003-01-23 07:37:21 +00:00
Daniel Stenberg
5627cf7167 mention what kind of error you may get if this is not followed 2003-01-23 06:15:26 +00:00
Daniel Stenberg
c05dae4a68 spell 2003-01-23 06:09:35 +00:00
Daniel Stenberg
57e61e3743 This is the new Emacs style for curl hacking, based on work written by
Mats Lidell in project Rockbox.
2003-01-23 06:00:02 +00:00
Daniel Stenberg
a6c395c156 Duncan Wilcox reported a crash with --interface on FreeBSD when ipv6-enabled
and this has been verified to correct the problem.
2003-01-23 05:38:20 +00:00
Daniel Stenberg
543e0b1e0f oops, broken comment fixed 2003-01-22 18:50:51 +00:00
Daniel Stenberg
64b0ff875f extern C this to work in C++ conditions 2003-01-22 18:30:58 +00:00
Daniel Stenberg
a034208a00 reversed the actions on the cmp check for detecting if we're re-running
a test on the same CVS setup as previous, as they seemed to be wrong.

We're not actually using the result for anything at this point though.
2003-01-22 12:29:19 +00:00
Daniel Stenberg
5f1251586b use LANG set to C to prevent localized dates etc 2003-01-22 09:46:33 +00:00
Daniel Stenberg
6f6cffdc32 pass the options to configure properly 2003-01-22 07:57:52 +00:00
Daniel Stenberg
21a98ef264 check for empty confopts before asking for it 2003-01-22 07:41:35 +00:00
Daniel Stenberg
aa90436435 put the configure options in the setup file was well
make -i
show lib/config.h
and some initial checks to prevent this running multiple times without the
CVS having changed
2003-01-22 06:59:52 +00:00
Daniel Stenberg
61225052f9 updated copyright years 2003-01-21 17:25:58 +00:00
Daniel Stenberg
dee3163d95 when a chunked error is noticed, store the error number in the error string
to enable better error-tracking
2003-01-21 16:03:38 +00:00
Daniel Stenberg
8b0668b99e skip the chmod 2003-01-21 15:09:20 +00:00
Daniel Stenberg
8471a82c85 run 'make test-full' instead of 'make test' to get more details in case of
errors
2003-01-21 10:36:35 +00:00
Daniel Stenberg
ed4dff63b9 make test-full in the root dir should run verbose tests but not stop on
single failures
2003-01-21 10:35:34 +00:00
Daniel Stenberg
838e776542 use 'make test-full' instead of only 'make test' as it gives a lot of more
info in case of failures
2003-01-21 10:33:29 +00:00
Daniel Stenberg
39c12790bc Added a 'test-full' target to run the tests in verbose mode. 2003-01-21 10:32:40 +00:00
Daniel Stenberg
126e6d6645 pass srcdir to the ftps-server as well 2003-01-21 10:29:06 +00:00
Daniel Stenberg
5796a1b282 runtests.pl now passes the sourcedir path to the httpsserver.pl script 2003-01-21 10:14:25 +00:00
Daniel Stenberg
90982529fc automake 1.5 should be enough 2003-01-21 09:36:15 +00:00
Daniel Stenberg
aba51d6b60 use process id in build directory name to do better 2003-01-20 20:20:51 +00:00
Daniel Stenberg
49bc4567bb first attempt at script for distributed testing on various unix hosts 2003-01-20 20:07:49 +00:00
Daniel Stenberg
2ac52705c6 output summary with easy identifyable string prefixes 2003-01-20 15:43:50 +00:00
Daniel Stenberg
d0eb56dd97 made this script detect proper versions of the tools we need to build a full
curl on a unix host from CVS
2003-01-20 15:24:54 +00:00
Daniel Stenberg
b9c60df04b added description in all AC_DEFINE() calls 2003-01-20 15:16:56 +00:00
Daniel Stenberg
8c236e4dfa not used anymore 2003-01-20 15:16:17 +00:00
Daniel Stenberg
154a59f21f Five more names we owe a big THANKS for their donations to the project. 2003-01-20 14:49:57 +00:00
Daniel Stenberg
9689e1c548 today's patches and Markus' correction 2003-01-20 14:40:06 +00:00
Daniel Stenberg
5a83976c99 Markus F.X.J. Oberhumer's patch that reduces memory usage quite a bit by
only allocating the scratch memory buffer once it is needed and not always
in the handle.
2003-01-20 12:52:34 +00:00
Daniel Stenberg
b5276a9a69 given passwords in netrc must be respected accordingly 2003-01-20 12:00:46 +00:00
Daniel Stenberg
30377baa5e steps I *MUST* perform when I release a package 2003-01-20 11:29:40 +00:00
Daniel Stenberg
aa8b7dd336 reverted bad header replacement 2003-01-16 21:10:10 +00:00
Daniel Stenberg
f26a338a54 copyright year update in the source header 2003-01-16 21:08:12 +00:00
Daniel Stenberg
c4383f1d99 fixes Marcus brought 2003-01-16 21:07:50 +00:00
Daniel Stenberg
4527995e66 Allow CURLINFO_PRIVATE to be NULL, patch by Markus Oberhumer 2003-01-16 10:59:53 +00:00
Daniel Stenberg
b0fbb98f41 Markus Oberhumer fixed the -cflags option 2003-01-16 10:58:49 +00:00
Daniel Stenberg
63667dfd96 no TABs in source code 2003-01-15 11:44:33 +00:00
Daniel Stenberg
77c388c928 removed a TAB 2003-01-15 11:43:03 +00:00
Daniel Stenberg
a69b814ded Kevin fixed the bad list address 2003-01-15 08:04:09 +00:00
Daniel Stenberg
c51ada766d previous legal file, no longer accurate nor used 2003-01-14 12:55:08 +00:00
Daniel Stenberg
ef2709f97c COPYING is the name of the file 2003-01-14 12:54:11 +00:00
Daniel Stenberg
bf9b9ca29d 7.10.3 commit 2003-01-14 12:42:26 +00:00
Daniel Stenberg
64f224bb22 more 2003-01-13 12:08:39 +00:00
Daniel Stenberg
285a8fe4d0 there is SOCKS support these days 2003-01-13 06:35:31 +00:00
Daniel Stenberg
3773d76dfd Steve Oliphant pointed out that test case 105 did not work anymore and this
was due to a missing fix for the password prompting
2003-01-10 16:19:32 +00:00
Daniel Stenberg
94c5c7bd6d added test 136 2003-01-09 16:48:51 +00:00
Daniel Stenberg
12cfc4c0b0 verify -u username: with ftp to use a blank password 2003-01-09 16:47:55 +00:00
Daniel Stenberg
9a2de6e6ee if userpwd is "username:", this now implies a blank password while only
"username" will cause libcurl to prompt for password. Bryan Kemp noticed.

test case 136 is added for this
2003-01-09 16:47:09 +00:00
Daniel Stenberg
2ede47b8c8 Wai (Simon) Liu provided the HTTP200ALIASES paragraph. 2003-01-09 15:04:55 +00:00
Daniel Stenberg
76e107506f Philippe Raoult's added note for HTTPHEADER 2003-01-09 14:58:54 +00:00
Daniel Stenberg
6f35ed51dc This fixed yet another connect problem with the multi interface and ipv4
stack. Kjetil Jacobsen reported and verified the fix.
2003-01-09 14:52:51 +00:00
Daniel Stenberg
c94ba66310 removed 2003-01-09 11:57:50 +00:00
Daniel Stenberg
a15133f5cf removed unused code 2003-01-09 11:50:34 +00:00
Daniel Stenberg
cc09e9d4c2 fix 2003-01-09 11:43:08 +00:00
Daniel Stenberg
16e0da2c4b call curl_multi_perform() correctly 2003-01-09 11:42:07 +00:00
Daniel Stenberg
ed22f75241 proper indent 2003-01-09 11:31:49 +00:00
Daniel Stenberg
ba25cad6e2 pass a file name to memanalyze to read from instead of using stdin 2003-01-09 11:26:57 +00:00
Daniel Stenberg
abb01123cb share.h is now a used header file 2003-01-09 11:19:51 +00:00
Daniel Stenberg
e2d249f8c5 fixed to deal with file names that contain colons, as in Windows 2003-01-09 11:03:02 +00:00
Daniel Stenberg
4a2ac166fa 7.10.3-pre4 2003-01-09 10:36:24 +00:00
Daniel Stenberg
5fab55383d rename the curl share error enum prefix 2003-01-09 10:26:29 +00:00
Daniel Stenberg
f152f23a68 Updated more and now looks and and the API possibly works almost like the
design document specifies. There is still no code inside that uses this.
2003-01-09 10:21:03 +00:00
Daniel Stenberg
24e78b3571 7+8 jan 2003 2003-01-09 09:53:08 +00:00
Daniel Stenberg
9a239edb52 updated to use the modified share-types 2003-01-08 15:50:52 +00:00
Daniel Stenberg
abcc5c5a82 cleaned up the share data types and prototypes to be more in line what
the design draft mentioned and what I think is fit
2003-01-08 15:50:06 +00:00
Daniel Stenberg
cb5ba675a7 mkdir() fix for win32 2003-01-08 15:04:42 +00:00
Daniel Stenberg
2288086695 nah, include test.h instead 2003-01-08 09:37:35 +00:00
Daniel Stenberg
61421b7a8f include curl.h without directory 2003-01-08 09:33:19 +00:00
Jean-Philippe Barette-LaPierre
6a7e53a7c7 fixed a very, very rare and very, very little memory leak 2003-01-08 02:27:47 +00:00
Daniel Stenberg
ca134d5522 Philippe Raoult's fix to handle wildcard certificate name checks 2003-01-07 16:33:11 +00:00
Daniel Stenberg
ec24efda74 Simon Liu's HTTP200ALIASES-patch! 2003-01-07 16:15:53 +00:00
Daniel Stenberg
7f0f10e498 stuff 2003-01-07 15:40:01 +00:00
Daniel Stenberg
aa5af100b4 clarified error code 19 2003-01-07 15:39:38 +00:00
Daniel Stenberg
37ae32f688 Only output valid filetime.
Return file-error if 550 is returned when trying MDTM
2003-01-07 11:25:44 +00:00
Daniel Stenberg
d0cffdec5d when sending an error message to the debugfunction, we append a newline so
that the output looks better
2003-01-07 11:23:52 +00:00
Daniel Stenberg
0f34521612 fixed the create_dir_hierarchy() to not use uninited memory, as noticed by
Matthew Blain.
2003-01-07 09:35:57 +00:00
Daniel Stenberg
e69362df22 Matthew Blain's improvements for debug builds 2003-01-07 09:31:45 +00:00
Daniel Stenberg
3de8f6f38e better ignore 2003-01-07 09:30:05 +00:00
Daniel Stenberg
5359bc8083 ignore lib504 too 2003-01-07 09:27:32 +00:00
Daniel Stenberg
eb6a14fe10 updated 2003-01-07 07:54:14 +00:00
Daniel Stenberg
2912537533 indent fix 2003-01-06 12:41:33 +00:00
Sterling Hughes
cfb32da198 fix bug (?) :-)
previously, if you called curl_easy_perform and then set the global dns
cache, the global cache wouldn't be used.  I don't see this really happening
in practice, but this code allows you to do it.
2003-01-06 06:17:15 +00:00
Daniel Stenberg
9b4f92130f return -1 even if SSL_pending() doesn't return non-zero, as we don't really
care how many bytes that is readable NOW. Philippe Raoult reported the
bug in 7.10.3-pre3.
2002-12-29 16:27:31 +00:00
Daniel Stenberg
5a2ab686a6 Marc Herbert's suggstion: mention that insecure is ignored if cacert or capath
is used.
2002-12-29 16:23:52 +00:00
Daniel Stenberg
3b8583b014 example configure command line 2002-12-20 16:00:56 +00:00
Daniel Stenberg
ed29552b1e Use AM_MAINTAINER_MODE which thus makes less maintainer stuff in the default
makefile when --enable-maintainer-mode is not used.
2002-12-20 15:54:24 +00:00
Daniel Stenberg
a2ada3cf96 7.10.3-commit 2002-12-20 09:03:38 +00:00
Daniel Stenberg
88825a1187 fixes 2002-12-19 16:37:07 +00:00
Daniel Stenberg
264e7fc58b removed fruitless attempts to overload some targets 2002-12-19 16:36:35 +00:00
Daniel Stenberg
1698015e3c Curl_base64_decode() fixed by Matthew B 2002-12-19 16:02:51 +00:00
Daniel Stenberg
39dc14c002 Fixed the usage of SSL_read() to properly return -1 if the EWOULDBLOCK
situation occurs, which it previously didn't!

This was reptoed by Evan Jordan in bug report #653022.

Also, if ERROR_SYSCALL is returned from SSL_write(), include the errno number
in the error string for easier error detection.
2002-12-19 15:45:15 +00:00
Daniel Stenberg
04c499a5fc CURLOPT_DNS_USE_GLOBAL_CACHE is not thread-safe 2002-12-19 15:22:36 +00:00
Daniel Stenberg
efbe930a69 CURLE_HTTP_NOT_FOUND => CURLE_HTTP_RETURNED_ERROR 2002-12-18 16:51:02 +00:00
Daniel Stenberg
747f87f61e Removed weird special multi interface condition that caused bug report
#651464.
2002-12-17 10:05:00 +00:00
Daniel Stenberg
5a4c56fc44 don't install the test programs 2002-12-17 09:40:13 +00:00
Daniel Stenberg
81f45ba92a writefunction data is not zero terminated 2002-12-16 17:33:21 +00:00
Daniel Stenberg
a5dc4e32f2 removed junk 2002-12-16 15:32:37 +00:00
Daniel Stenberg
2b839853ec Added test case 504, using multi interface and a local proxy without anything
listening on the port we use.
2002-12-16 15:30:10 +00:00
Daniel Stenberg
66b6cd68ed better desc 2002-12-16 15:05:31 +00:00
Daniel Stenberg
0ef3d90838 mistake, this only requires http 2002-12-16 14:50:10 +00:00
Daniel Stenberg
5cc50f9b27 the hostip.c commit 2002-12-16 11:40:57 +00:00
Daniel Stenberg
e879e26a5b EAGAIN on older (correct) glibc versions indicate a problem and not the need
for a bigger buffer and this is indeed badness for us. Making this work
on both old and new glibc versions require an ugly loop that in its worse
form cause 45 bad loops when using the correct glibc and a non-resolving
host name... :-/

We want a better fix. Badly.
2002-12-16 11:33:44 +00:00
Daniel Stenberg
96d84150e1 changes from last week 2002-12-16 10:55:18 +00:00
Daniel Stenberg
2aa0c6c488 cut off -O properly when building for debug
setup the Makefile in tests/libtest/
2002-12-16 10:31:25 +00:00
Daniel Stenberg
811138386f documented the %-variables 2002-12-13 16:25:39 +00:00
Daniel Stenberg
c433cf7459 fixed another space issue 2002-12-13 16:24:57 +00:00
Daniel Stenberg
e0d6ebc2f2 please mr CVS ignore these 2002-12-13 16:24:04 +00:00
Daniel Stenberg
4938991ab8 set up arg2 to point to argv[2] to be used at will by programs 2002-12-13 16:22:57 +00:00
Daniel Stenberg
13722f536e added 503 2002-12-13 16:22:17 +00:00
Daniel Stenberg
57f0e3292d used this to verify bug report 651460 2002-12-13 16:21:18 +00:00
Daniel Stenberg
da5ae565ab added support for CONNECT, both good and bad 2002-12-13 16:20:07 +00:00
Daniel Stenberg
87c5066242 test case 503 entered the dir 2002-12-13 16:17:27 +00:00
Daniel Stenberg
b528bde470 conn->bits.tcpconnect now keeps track of if this connection is connected
or not
2002-12-13 16:15:19 +00:00
Daniel Stenberg
57572e550f include files without the curl/ to reduce the risk of us including the wrong
set of include files during tests
2002-12-13 14:14:35 +00:00
Daniel Stenberg
3aea0d3d68 Evan Jordan's fix for a memory leak. Bug report 650989. 2002-12-13 14:08:49 +00:00
Daniel Stenberg
9ae920c1b6 make a little work-around for file:// in _is_connected() and voila, now the
multi interface works with file:// URLs fine (previously it crashed). This
won't make it work on Windows though...
2002-12-13 13:47:58 +00:00
Daniel Stenberg
dff406a360 one slash too many 2002-12-13 13:41:28 +00:00
Daniel Stenberg
d346ba5c3c lib502.c for multi interface tests on a single URL without select() 2002-12-13 13:40:25 +00:00
Daniel Stenberg
978541adc2 test 502, multi interface with file:// 2002-12-13 13:39:39 +00:00
Daniel Stenberg
637bce2707 bail out on crap received, makes test case 402 *NOT* ruin the test series
anymore!
2002-12-12 18:07:10 +00:00
Daniel Stenberg
07e3dc2ee2 missing space added, nows run old tests fine again 2002-12-12 16:46:45 +00:00
Daniel Stenberg
ead065d803 remove test piece 2002-12-12 13:44:26 +00:00
Daniel Stenberg
0150bff7b4 make ftps and https invoke both necessary servers 2002-12-12 13:42:21 +00:00
Daniel Stenberg
0f493b6038 fixes 2002-12-12 13:40:16 +00:00
Daniel Stenberg
f26b709c50 link the test tools this way instead 2002-12-12 13:39:02 +00:00
Daniel Stenberg
ae10d9cf22 no more 2002-12-12 13:36:50 +00:00
Daniel Stenberg
81af9674ed corrected 2002-12-12 12:49:29 +00:00
Daniel Stenberg
b63df7991a new subdir added 'libtest' 2002-12-12 12:20:33 +00:00
Daniel Stenberg
a79990465c supports the new 'tool' and 'server' tags 2002-12-12 12:20:06 +00:00
Daniel Stenberg
ad6bd530ac describe the new sections added for (better) libcurl testing 2002-12-12 12:15:02 +00:00
Daniel Stenberg
c1b369fd4c 500 + 501 added 2002-12-12 12:13:18 +00:00
Daniel Stenberg
01fcd3c2d5 run tiny specific libcurl-testing tools 2002-12-12 12:12:01 +00:00
Daniel Stenberg
7196d784d3 The first ever attempts to do pure libcurl test cases 2002-12-12 12:11:16 +00:00
Daniel Stenberg
0f0aaf51e0 Deal with HTML where ' is used instead of "
Cut off name from option
2002-12-12 11:43:59 +00:00
Daniel Stenberg
b5f493c55a moved the includes to outside the extern "C" stuff
decreased the write buffer size to 16KB to perform a lot better on Windows(!)
2002-12-11 11:42:40 +00:00
Daniel Stenberg
0aa031beb9 recent fluff 2002-12-10 13:11:24 +00:00
Daniel Stenberg
db6ff224f8 The initial HTTP request can now be sent in multiple parts, as part of the
regular transfer process. This required some new tweaks, like for example
we need to be able to tell the tranfer loop to not chunky-encode uploads
while we're transferring the rest of the request...
2002-12-10 13:10:00 +00:00
Daniel Stenberg
b3c7cd61f3 send_buffer is no more here 2002-12-10 13:08:22 +00:00
Daniel Stenberg
9ae05c4d91 added test56, nearly 100KB big! 2002-12-10 13:01:05 +00:00
Daniel Stenberg
264e6f6efd Test case for sending insanely big HTTP requests. Mainly done this way to
make sure that it isn't all sent off in one single send() but instead
really tests the multiple-part-send logic.
2002-12-10 13:00:32 +00:00
Daniel Stenberg
ec7bccf671 more logging, now logs the full response too, basic support for dealing
with chunked transfer-encoding uploads added
2002-12-10 12:59:16 +00:00
Daniel Stenberg
49f75ee8ce A normal POST now provides data to the main transfer loop via the usual
read callback, and thus won't put a lot of stress on the request sending
code (which currently does an ugly loop).
2002-12-09 16:05:57 +00:00
Daniel Stenberg
4bcc866c52 The fread() callback pointer and associated pointer is now stored in the
connectdata struct instead, and is no longer modified within the 'set' struct
as previously (which was a really BAAAD thing).
2002-12-09 15:37:54 +00:00
Daniel Stenberg
c65e088caf Added a default headers section and also made some minor details more
up-to-date with recent changes.
2002-12-09 14:39:01 +00:00
Daniel Stenberg
6ca4116555 better errno include and no extern 2002-12-05 19:39:17 +00:00
Daniel Stenberg
f6cdb820af read and write as much as possible until end of data or EWOULDBLOCK before
returning back to the select() loop. Consider this a test so far.
2002-12-05 14:26:30 +00:00
Daniel Stenberg
081e5a82ff deal with spaces in name and value tags a lot better! 2002-12-05 12:54:08 +00:00
Daniel Stenberg
2ad2a4bd9f changed proto for Curl_krb_kauth() 2002-12-05 11:26:20 +00:00
Daniel Stenberg
645e700da3 Solaris needs errno as an extern int. 2002-12-05 11:25:36 +00:00
Daniel Stenberg
92aea29a30 make WIN32 defined for Borland properly, as told by Alexander J. Oss 2002-12-04 11:06:17 +00:00
Daniel Stenberg
e1c01af929 called SSLCERTS now 2002-12-04 09:53:09 +00:00
Daniel Stenberg
7ef749497d 7.10.3-pre2 2002-12-04 09:09:26 +00:00
Daniel Stenberg
d72aa49126 The waiting for the 226 or 250 line expected to come after a transfer is
complete is now only made for 60 seconds and if no data was received during
those 60 seconds, we store a special error message (preparing to make this
a special error code) as this most likely means that the control connection
has died while we were transferring data.
2002-12-04 08:56:55 +00:00
Daniel Stenberg
e92bd312ec missing } 2002-12-03 12:41:10 +00:00
Daniel Stenberg
b097c2cfb0 clarified 2002-12-03 12:40:12 +00:00
Daniel Stenberg
a39cdc80b7 Jeff pointed out this flaw in the example 2002-12-03 12:34:43 +00:00
Daniel Stenberg
a47250810e -@ is no longer an official shortcut for --create-dirs 2002-12-03 11:13:12 +00:00
Daniel Stenberg
1f50f3031f don't officially use -@ for --create-dirs, only use the long form 2002-12-03 11:12:18 +00:00
Daniel Stenberg
75145dd753 clarify the DEBUGFUNCTION data not being zero terminated 2002-12-03 10:37:20 +00:00
Daniel Stenberg
d0b97f7e1f Curl_GetFTPResponse() takes a different set of parameters and now return a
proper CURLcode. The default timeout for reading one response is now also
possible to change while running.
2002-12-03 10:25:31 +00:00
Daniel Stenberg
199a0311e2 updated to reality 2002-12-03 09:32:57 +00:00
Daniel Stenberg
fa446f860f Nicolas Berloquin's fix of his previous dir creation patch 2002-12-03 08:07:52 +00:00
Daniel Stenberg
7a74303f3c Nicolas Berloquin's description of his -@/--create-dirs fix 2002-12-02 14:40:54 +00:00
Daniel Stenberg
7d9eabb981 Nicolas Berloquin's added code for dealing with -@/--create-dirs to create
the necessary directories as specified with -o.
2002-12-02 14:37:59 +00:00
Daniel Stenberg
ff5308a5af if the PWD reply parser failed, we leaked memory 2002-12-02 07:18:24 +00:00
Daniel Stenberg
3f8ba3a986 clarified SSL_VERIFYPEER and SSL_VERIFYHOST a bit, thanks to Soren Spies 2002-12-02 06:47:16 +00:00
Daniel Stenberg
4a555de1b2 wrapped the line for PRIVATE nicer 2002-12-01 11:23:06 +00:00
Daniel Stenberg
d27e4a08f9 more to ignore 2002-12-01 11:21:36 +00:00
Daniel Stenberg
bf678a1ca9 only use Content-Length: header if not transfering data chunked 2002-12-01 11:20:41 +00:00
Daniel Stenberg
13a903de28 mention CVS-INFO for more info when checked out from CVS
removed old section about problems with old autoconfs, I don't think that
happens anymore
2002-11-30 16:00:10 +00:00
Daniel Stenberg
a3c14c031e stuff done since the 7.10.2 release 2002-11-29 08:29:21 +00:00
Daniel Stenberg
e90d528026 let the Curl_FormReader() return 0 when it reaches end of data to that the
chunked transfer work
2002-11-29 08:12:20 +00:00
Daniel Stenberg
d64dd77993 fix the hash init to call the correct dns cleanup function 2002-11-28 15:48:54 +00:00
Daniel Stenberg
113850a748 added compareheader proto 2002-11-28 15:48:23 +00:00
Daniel Stenberg
1c49a00d64 compareheader() was moved over to http.c and got a Curl_ prefix
The chunked transfer upload never stopped due to a silly add before we checked
for >0!
2002-11-28 15:46:22 +00:00
Daniel Stenberg
eef6c83503 Moved the compareheader function into this file and added Curl_ prefix
We now check if the chunked transfer-encoding header has been added "by force"
and if so, we enabled the chunky upload!
2002-11-28 15:45:06 +00:00
Daniel Stenberg
ceb5648eb7 mention how to generate patches 2002-11-28 14:07:14 +00:00
Daniel Stenberg
a0eadb76ea bad use of AM_CONDITIONAL removed and now configure runs better when used
with --disable-ipv6 --without-zlib
2002-11-28 13:29:42 +00:00
Daniel Stenberg
065852e46c execve.net is an official download mirror in HK 2002-11-27 11:59:52 +00:00
Daniel Stenberg
e5e2fb8274 Dan Becker fixed a minor memory leak on persistent connnections using
FOLLOWLOCATION and CURLOPT_USERPWD.
2002-11-26 17:32:15 +00:00
Daniel Stenberg
0210b3c893 removed extra space from trace output 'Send data' 2002-11-26 17:13:30 +00:00
Daniel Stenberg
7df5677b46 fixed Curl_freeaddrinfo() to only free addrinfo, and added Curl_freednsinfo()
for freeing single dns cache entries
2002-11-26 09:41:54 +00:00
sm
2e71876b28 Removed MFC dependency in Release Build when using VC++ IDE 2002-11-26 02:12:27 +00:00
Daniel Stenberg
11576b1142 Nedelcho Stanev's work-around for SFU 3.0 2002-11-24 19:30:21 +00:00
Daniel Stenberg
ce011b8a2d bug fix for the problem Juan Ignacio Hervs discovered today 2002-11-22 16:59:40 +00:00
Daniel Stenberg
12cfb4f7ee this fix seems to make the '305 306' test case combination to run ok finally! 2002-11-22 13:48:24 +00:00
Daniel Stenberg
9e1123debe don't use curl.haxx.se 2002-11-22 07:39:15 +00:00
Daniel Stenberg
c7354142c0 dead code removal 2002-11-21 15:11:26 +00:00
Daniel Stenberg
dee84f448f new name, supports <textarea> and the <option> tags within <select> better 2002-11-21 15:09:04 +00:00
Daniel Stenberg
1607711603 4.12 Why do I get "certificate verify failed" ? 2002-11-20 19:17:43 +00:00
Daniel Stenberg
8bca5e05b8 Kjetil Jacobsen's patch that introduces CURLOPT_PRIVATE and CURLINFO_PRIVATE
for storage and retrieval of private data in the curl handle.
2002-11-20 19:11:22 +00:00
Daniel Stenberg
f68505ee23 Karol Pietrzak pointed out that simply including the include dir in --cflags
is not a good thing, as recent gccs for example complain if it is /usr/include

Right now, we just output "" until we think of something better.
2002-11-20 19:04:34 +00:00
Daniel Stenberg
d2174da641 7.10.2 2002-11-18 22:10:06 +00:00
Daniel Stenberg
255b1e68d0 as requested, CURLE_OPERATION_TIMEDOUT is now the same as
CURLE_OPERATION_TIMEOUTED
2002-11-18 21:58:46 +00:00
Daniel Stenberg
fbee6b87f5 fflush() the trace stream on each call 2002-11-15 14:15:28 +00:00
Daniel Stenberg
3836a70f97 removed nroff mistake 2002-11-15 14:13:46 +00:00
Daniel Stenberg
e0ec9fa294 no more dllinit.o usage 2002-11-15 14:13:05 +00:00
Daniel Stenberg
80fe50590f recent fixes 2002-11-15 14:11:45 +00:00
Daniel Stenberg
ae18d1c55a attempts to filter off optimize flags when --enable-debug is used 2002-11-15 14:11:20 +00:00
Daniel Stenberg
75194373e0 language 2002-11-14 09:55:00 +00:00
Daniel Stenberg
f3875048f6 clarified that strings need to be kept around until the handle is closed or
until the pointers are set to another value
2002-11-14 09:54:10 +00:00
Daniel Stenberg
210af986ad dllinit.c is removed 2002-11-13 22:16:44 +00:00
Daniel Stenberg
c03044f492 not used and we don't have permission to distribute this! 2002-11-13 22:16:16 +00:00
Daniel Stenberg
522b85ae21 4.11 Why does my HTTP range requests return the full document? 2002-11-12 20:00:02 +00:00
Daniel Stenberg
208e56dbe9 removed dllinit.c as MSVC doesn't need it 2002-11-12 08:15:38 +00:00
Daniel Stenberg
42acb00c81 moved the bools in the connectdata struct into the substruct named
ConnectBits where the other bools already are
2002-11-11 23:03:03 +00:00
Daniel Stenberg
ca6e770837 The test for DNS cache entries left locked is now only built if
AGGRESIVE_TEST is also defined, as an addition to MALLOCDEBUG. It doesn't
work for multi interface usage and should only be used with careful
consideration.
2002-11-11 22:51:09 +00:00
Daniel Stenberg
775968003c changed header 2002-11-11 22:41:45 +00:00
Daniel Stenberg
323d3e9b5d include SSLCERTS and not UPGRADE. We leave UPGRADE a while in CVS, but it
should be removed soonish.
2002-11-11 22:38:32 +00:00
Daniel Stenberg
16f9755e73 UPGRADE was renamed into this "SSLCERTS" 2002-11-11 22:37:59 +00:00
Daniel Stenberg
66eb98bb0a unlock dns cache entries with a function call instead of a variable fiddle 2002-11-11 22:36:00 +00:00
Daniel Stenberg
299546f5c0 Dave Halbakken added curl_version_info 2002-11-11 21:57:14 +00:00
Daniel Stenberg
7be9b4c418 transfer-encoding: chunked was implemented 2002-11-11 10:00:48 +00:00
Daniel Stenberg
03c22b4576 Now supports "Transfer-Encoding: chunked" for HTTP PUT operations where the
size of the uploaded file is unknown.
2002-11-11 08:40:37 +00:00
Daniel Stenberg
ef749fa9ce Bug report #634625 identified how curl returned timeout immediately when
CURLOPT_CONNECTTIMEOUT was used and provided a fix.
2002-11-07 08:45:10 +00:00
Daniel Stenberg
a23c92596e recent changes 2002-11-06 08:30:08 +00:00
Daniel Stenberg
abb1497c98 output all test case numbers with three digits 2002-11-06 08:29:48 +00:00
Daniel Stenberg
7a8594da43 language fix 2002-11-06 08:29:26 +00:00
Daniel Stenberg
cbf28daed9 Lehel Bernadt's fix to prevent debug message to get sent on errors when
debug wasn't enabled
2002-11-05 11:11:10 +00:00
Daniel Stenberg
0ff1ca30c3 ipv4-fixes for the new Curl_dns_entry struct and Curl_resolv() proto 2002-11-05 11:07:49 +00:00
Daniel Stenberg
2cff251863 Curl_resolv() now returns a different struct, and it contains a reference
counter so that the caller needs to decrease that counter when done with
the returned data.

If compiled with MALLOCDEBUG I've added some extra checking that the counter
is decreased before a handle is closed etc.
2002-11-05 10:51:41 +00:00
Daniel Stenberg
73d996bf26 Soren Spies filled in some info about Mac OS X 10.2 2002-10-31 13:25:03 +00:00
Daniel Stenberg
5bc78cb724 Disable the DNS cache (by setting the timeout to 0) made libcurl leak
memory. Avery Fay brought the example code that proved this.
2002-10-31 13:09:11 +00:00
Daniel Stenberg
cdba92ac3c when using checkprefix(), the first argument must be the prefix! 2002-10-28 22:19:23 +00:00
Daniel Stenberg
6d28f92ffe Transfer-Encoding: needs 17 bytes passed, not 18 2002-10-28 21:52:27 +00:00
Daniel Stenberg
01387f42c5 kromJx@crosswinds.net's fix that now uses checkprefix() instead of
strnequal() when the third argument was strlen(first argument) anyway.
This makes it less prone to errors. (Slightly edited by me)
2002-10-28 21:52:00 +00:00
Daniel Stenberg
8f52b731f4 the malloc debug system assumes single thread 2002-10-28 21:05:14 +00:00
Daniel Stenberg
d442088ed3 kromJx@crosswinds.net fixed typos 2002-10-28 20:58:28 +00:00
Daniel Stenberg
22a323890a works now with autoconf 2.54 2002-10-28 20:39:23 +00:00
Daniel Stenberg
163bba1410 Kevin Roth's patch that checks for the CA cert file at two more places if the
--cacert option is not used.

1. An environment variable named CURL_CA_BUNDLE may contain the full file
name to the file.

2. On Windows, the cert file may be named curl-ca-bundle.crt and put in the
same dir as curl is located (or the CWD) and curl will then use that file
instead.
2002-10-28 19:49:58 +00:00
Daniel Stenberg
db1c618fcf Kevin Roth's patch. $(RM) instead of @erase, and it also passes on the
USE_SSLEAY variable
2002-10-28 19:39:58 +00:00
Daniel Stenberg
01bdfa7b6d Kevin Roth's fixes that use $(RM) instead of @erase and modified SSL version 2002-10-28 19:38:46 +00:00
Daniel Stenberg
6a88c8d845 prevent compiler warning 2002-10-28 19:21:30 +00:00
Daniel Stenberg
b8a6913e09 prevent compiler warnings 2002-10-28 19:20:59 +00:00
Daniel Stenberg
744d8c1006 fixes 2002-10-28 19:17:49 +00:00
Daniel Stenberg
c2e2c98d81 fixed the cygwin check for -no-undefined 2002-10-23 14:45:28 +00:00
Daniel Stenberg
3fa353a2d3 improved the check for an ISO cpp by checking specificly for __BORLANDC__
too, as Emiliano Ida has confirmed it to work
2002-10-23 14:15:29 +00:00
Daniel Stenberg
c27c9f80d2 kromJx@crosswinds.net made it run properly with stunnel >=4.0 2002-10-23 14:07:34 +00:00
Daniel Stenberg
b5a74715cf bad headers can come in two kinds, we either treat everything as one big
badly assumed header, or we think that parts of the buffer is a bad header
and the rest is treated as a normal body part
2002-10-23 13:48:37 +00:00
Daniel Stenberg
13ee2901f4 another week, 7 fixes 2002-10-21 14:04:26 +00:00
Daniel Stenberg
32c03eadd6 glibc 2.2.93 gethostbyname_r() no longer returns ERANGE if the given buffer
size isn't big enough. For some reason they now return EAGAIN.

Redhat 8 ships with this glibc version.
2002-10-21 13:20:30 +00:00
Daniel Stenberg
0fa512f26d Nikita Schmidt's fix to debian bug report #165382. This is verified with
the new test case 55.
2002-10-21 12:07:02 +00:00
Daniel Stenberg
219d88518c Added test 55, follow location with a single slash in the original path.
This caused curl 7.10.1 to crash.
2002-10-21 12:02:44 +00:00
Daniel Stenberg
ecf3aee43a check for cygwin and if built on that, enable the no-undefined option for
libtool. Otherwise disable it.
2002-10-21 06:49:42 +00:00
Daniel Stenberg
7f08cab73e test 54 added, blank Location: field 2002-10-21 06:18:51 +00:00
Daniel Stenberg
c4e9ef199e --enable-debug now checks if gcc is used before it sets all those gcc-
specific options. This should make this option work on more platforms with
other compilers.
2002-10-21 05:52:05 +00:00
Daniel Stenberg
9e612b5550 make very sure that we return 'done' properly when a transfer is done, as
otherwise the multi interface gets problems
2002-10-18 15:28:33 +00:00
Daniel Stenberg
203633d34d return call_multi when we follow a location 2002-10-18 15:27:49 +00:00
Daniel Stenberg
45bd009bb1 if we found no string on the Location: line, don't try to follow it 2002-10-18 13:51:00 +00:00
Daniel Stenberg
ee656415c4 moved comments to first column and automake stopped complaining 2002-10-18 07:55:38 +00:00
Daniel Stenberg
156aad198f Make the COOKIESESSION work better by creating a list of cookie files files
when given in the curl_easy_setopt() and then parse them all on the first
curl_easy_perform() call instead.
2002-10-17 07:10:39 +00:00
Daniel Stenberg
b1ffb79a50 junk cookies test53 added 2002-10-17 07:03:26 +00:00
Daniel Stenberg
d6654bfe00 mucho fixed 2002-10-16 09:53:38 +00:00
Daniel Stenberg
eefdd67d22 Added new mirror 2002-10-15 14:18:31 +00:00
Daniel Stenberg
86a86d7afd Andrs Garca's corrections 2002-10-15 08:39:30 +00:00
Daniel Stenberg
b6dac2b484 ignore .ps and .pdf files too 2002-10-14 07:47:40 +00:00
Daniel Stenberg
e6367abae9 generate and include PDF versions of the docs in the release archive 2002-10-14 07:39:49 +00:00
Daniel Stenberg
fc4d1d9a60 my first take at a memory leak detection document 2002-10-13 10:34:33 +00:00
Daniel Stenberg
94bae20776 some more 2002-10-13 10:28:38 +00:00
Daniel Stenberg
bb8c8d273c added more info 2002-10-13 10:18:10 +00:00
Daniel Stenberg
ee600ace37 three silly bugs 2002-10-12 12:35:30 +00:00
Daniel Stenberg
da86e32eb4 -y and -Y was switched in the examples 2002-10-12 12:14:09 +00:00
Daniel Stenberg
b5bbc04ad1 return error properly when a non-blocking connect fails using the multi
interface
2002-10-12 11:18:08 +00:00
Daniel Stenberg
265c58611f When we receive a "bad header" we must sure not to write down the data part
as well, as then we write the same data twice.
2002-10-11 20:55:08 +00:00
Daniel Stenberg
25c973a39e fix bad free() that caused segfault 2002-10-11 17:44:36 +00:00
391 changed files with 20047 additions and 17118 deletions

View File

@@ -9,3 +9,6 @@ config.status
curl-config
autom4te.cache
depcomp
config.guess
config.sub
ltmain.sh

2041
CHANGES

File diff suppressed because it is too large Load Diff

1504
CHANGES.2002 Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
COPYRIGHT AND PERMISSION NOTICE
Copyright (c) 1996 - 2002, Daniel Stenberg, <daniel@haxx.se>.
Copyright (c) 1996 - 2003, Daniel Stenberg, <daniel@haxx.se>.
All rights reserved.

View File

@@ -15,8 +15,9 @@ Compile and build instructions follow below.
CHANGES.0 contains ancient changes.
CHANGES.$year contains changes for the particular year.
memanalyze.pl is for analyzing the output generated by curl if -DMALLOCDEBUG
is used when compiling
tests/memanalyze.pl
is for analyzing the output generated by curl if -DCURLDEBUG
is used when compiling (run configure with --enable-debug)
buildconf builds the makefiles and configure stuff
@@ -30,25 +31,39 @@ To build after having extracted everything from CVS, do this:
./configure
make
Daniel uses a ./configure line similar to this for easier development:
./configure --disable-shared --enable-debug --enable-maintainer-mode
REQUIREMENTS
You need the following software installed:
o autoconf 2.50 (or later)
o automake 1.5 (or later)
o libtool 1.4 (or later)
o autoconf 2.57 (or later)
o automake 1.7 (or later)
o libtool 1.4.2 (or later)
o GNU m4 (required by autoconf)
o nroff + perl (if you don't have nroff and perl and you for some reason
don't want to install them, you can rename the source file
src/hugehelp.c.cvs to src/hugehelp.c and avoid having to generate this
file. This will of course give you an older version of the file that isn't
up-to-date. That file was checked in once and won't be updated very
regularly.)
o nroff + perl
If you don't have nroff and perl and you for some reason don't want to
install them, you can rename the source file src/hugehelp.c.cvs to
src/hugehelp.c and avoid having to generate this file. This will of course
give you an older version of the file that isn't up-to-date. That file was
checked in once and won't be updated very regularly.
o yacc/bison
If you don't have yacc or bison, you must rename the lib/getdate.c.cvs file
to lib/getdate.c to be able to build libcurl. yacc/bison is normally used
to generate the lib/getdate.c file from the lib/getdate.y source file.
MAC OS X
For Mac OS X users, Guido Neitzer write down the following step-by-step guide:
With Mac OS X 10.2 and the associated Developer Tools, the installed versions
of the build tools are adequate. For Mac OS X 10.1 users, Guido Neitzer
wrote the following step-by-step guide:
1. Install fink (http://fink.sourceforge.net)
2. Update fink to the newest version (with the installed fink)

25
LEGAL
View File

@@ -1,25 +0,0 @@
Copyright (C) 1998-2002, Daniel Stenberg, <daniel@haxx.se>, et al.
Everyone is permitted to copy and distribute verbatim copies of this license
document, but changing it is not allowed.
In order to be useful for every potential user, the curl and libcurl are
dual-licensed under the MPL and the MIT/X-derivate licenses.
You may opt to use, copy, modify, merge, publish, distribute and/or sell
copies of the Software, and permit persons to whom the Software is furnished
to do so, under the terms of the MPL or the MIT/X-derivate licenses. You may
pick one of these licenses. The files MITX.txt and MPL-1.1.txt contain the
license texts.
As a courtesy to the open-source and free software community, we ask you to
dual-license any modifications that you make as well, under the terms of this
document.
Please remember to always keep the licensing information included in
individual source files up-to-date, so as to avoid misleading anyone as to
the status of these files.
I will use a submission policy according to which I will only enter
contributions into the CVS tree if the contributor agrees to both licenses
and this dual-license approach.

View File

@@ -1,21 +0,0 @@
COPYRIGHT AND PERMISSION NOTICE
Copyright (c) 1996 - 2002, Daniel Stenberg, <daniel@haxx.se>.
All rights reserved.
Permission to use, copy, modify, and distribute this software for any purpose
with or without fee is hereby granted, provided that the above copyright
notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name of a copyright holder shall not
be used in advertising or otherwise to promote the sale, use or other dealings
in this Software without prior written authorization of the copyright holder.

View File

@@ -1,470 +0,0 @@
MOZILLA PUBLIC LICENSE
Version 1.1
---------------
1. Definitions.
1.0.1. "Commercial Use" means distribution or otherwise making the
Covered Code available to a third party.
1.1. "Contributor" means each entity that creates or contributes to
the creation of Modifications.
1.2. "Contributor Version" means the combination of the Original
Code, prior Modifications used by a Contributor, and the Modifications
made by that particular Contributor.
1.3. "Covered Code" means the Original Code or Modifications or the
combination of the Original Code and Modifications, in each case
including portions thereof.
1.4. "Electronic Distribution Mechanism" means a mechanism generally
accepted in the software development community for the electronic
transfer of data.
1.5. "Executable" means Covered Code in any form other than Source
Code.
1.6. "Initial Developer" means the individual or entity identified
as the Initial Developer in the Source Code notice required by Exhibit
A.
1.7. "Larger Work" means a work which combines Covered Code or
portions thereof with code not governed by the terms of this License.
1.8. "License" means this document.
1.8.1. "Licensable" means having the right to grant, to the maximum
extent possible, whether at the time of the initial grant or
subsequently acquired, any and all of the rights conveyed herein.
1.9. "Modifications" means any addition to or deletion from the
substance or structure of either the Original Code or any previous
Modifications. When Covered Code is released as a series of files, a
Modification is:
A. Any addition to or deletion from the contents of a file
containing Original Code or previous Modifications.
B. Any new file that contains any part of the Original Code or
previous Modifications.
1.10. "Original Code" means Source Code of computer software code
which is described in the Source Code notice required by Exhibit A as
Original Code, and which, at the time of its release under this
License is not already Covered Code governed by this License.
1.10.1. "Patent Claims" means any patent claim(s), now owned or
hereafter acquired, including without limitation, method, process,
and apparatus claims, in any patent Licensable by grantor.
1.11. "Source Code" means the preferred form of the Covered Code for
making modifications to it, including all modules it contains, plus
any associated interface definition files, scripts used to control
compilation and installation of an Executable, or source code
differential comparisons against either the Original Code or another
well known, available Covered Code of the Contributor's choice. The
Source Code can be in a compressed or archival form, provided the
appropriate decompression or de-archiving software is widely available
for no charge.
1.12. "You" (or "Your") means an individual or a legal entity
exercising rights under, and complying with all of the terms of, this
License or a future version of this License issued under Section 6.1.
For legal entities, "You" includes any entity which controls, is
controlled by, or is under common control with You. For purposes of
this definition, "control" means (a) the power, direct or indirect,
to cause the direction or management of such entity, whether by
contract or otherwise, or (b) ownership of more than fifty percent
(50%) of the outstanding shares or beneficial ownership of such
entity.
2. Source Code License.
2.1. The Initial Developer Grant.
The Initial Developer hereby grants You a world-wide, royalty-free,
non-exclusive license, subject to third party intellectual property
claims:
(a) under intellectual property rights (other than patent or
trademark) Licensable by Initial Developer to use, reproduce,
modify, display, perform, sublicense and distribute the Original
Code (or portions thereof) with or without Modifications, and/or
as part of a Larger Work; and
(b) under Patents Claims infringed by the making, using or
selling of Original Code, to make, have made, use, practice,
sell, and offer for sale, and/or otherwise dispose of the
Original Code (or portions thereof).
(c) the licenses granted in this Section 2.1(a) and (b) are
effective on the date Initial Developer first distributes
Original Code under the terms of this License.
(d) Notwithstanding Section 2.1(b) above, no patent license is
granted: 1) for code that You delete from the Original Code; 2)
separate from the Original Code; or 3) for infringements caused
by: i) the modification of the Original Code or ii) the
combination of the Original Code with other software or devices.
2.2. Contributor Grant.
Subject to third party intellectual property claims, each Contributor
hereby grants You a world-wide, royalty-free, non-exclusive license
(a) under intellectual property rights (other than patent or
trademark) Licensable by Contributor, to use, reproduce, modify,
display, perform, sublicense and distribute the Modifications
created by such Contributor (or portions thereof) either on an
unmodified basis, with other Modifications, as Covered Code
and/or as part of a Larger Work; and
(b) under Patent Claims infringed by the making, using, or
selling of Modifications made by that Contributor either alone
and/or in combination with its Contributor Version (or portions
of such combination), to make, use, sell, offer for sale, have
made, and/or otherwise dispose of: 1) Modifications made by that
Contributor (or portions thereof); and 2) the combination of
Modifications made by that Contributor with its Contributor
Version (or portions of such combination).
(c) the licenses granted in Sections 2.2(a) and 2.2(b) are
effective on the date Contributor first makes Commercial Use of
the Covered Code.
(d) Notwithstanding Section 2.2(b) above, no patent license is
granted: 1) for any code that Contributor has deleted from the
Contributor Version; 2) separate from the Contributor Version;
3) for infringements caused by: i) third party modifications of
Contributor Version or ii) the combination of Modifications made
by that Contributor with other software (except as part of the
Contributor Version) or other devices; or 4) under Patent Claims
infringed by Covered Code in the absence of Modifications made by
that Contributor.
3. Distribution Obligations.
3.1. Application of License.
The Modifications which You create or to which You contribute are
governed by the terms of this License, including without limitation
Section 2.2. The Source Code version of Covered Code may be
distributed only under the terms of this License or a future version
of this License released under Section 6.1, and You must include a
copy of this License with every copy of the Source Code You
distribute. You may not offer or impose any terms on any Source Code
version that alters or restricts the applicable version of this
License or the recipients' rights hereunder. However, You may include
an additional document offering the additional rights described in
Section 3.5.
3.2. Availability of Source Code.
Any Modification which You create or to which You contribute must be
made available in Source Code form under the terms of this License
either on the same media as an Executable version or via an accepted
Electronic Distribution Mechanism to anyone to whom you made an
Executable version available; and if made available via Electronic
Distribution Mechanism, must remain available for at least twelve (12)
months after the date it initially became available, or at least six
(6) months after a subsequent version of that particular Modification
has been made available to such recipients. You are responsible for
ensuring that the Source Code version remains available even if the
Electronic Distribution Mechanism is maintained by a third party.
3.3. Description of Modifications.
You must cause all Covered Code to which You contribute to contain a
file documenting the changes You made to create that Covered Code and
the date of any change. You must include a prominent statement that
the Modification is derived, directly or indirectly, from Original
Code provided by the Initial Developer and including the name of the
Initial Developer in (a) the Source Code, and (b) in any notice in an
Executable version or related documentation in which You describe the
origin or ownership of the Covered Code.
3.4. Intellectual Property Matters
(a) Third Party Claims.
If Contributor has knowledge that a license under a third party's
intellectual property rights is required to exercise the rights
granted by such Contributor under Sections 2.1 or 2.2,
Contributor must include a text file with the Source Code
distribution titled "LEGAL" which describes the claim and the
party making the claim in sufficient detail that a recipient will
know whom to contact. If Contributor obtains such knowledge after
the Modification is made available as described in Section 3.2,
Contributor shall promptly modify the LEGAL file in all copies
Contributor makes available thereafter and shall take other steps
(such as notifying appropriate mailing lists or newsgroups)
reasonably calculated to inform those who received the Covered
Code that new knowledge has been obtained.
(b) Contributor APIs.
If Contributor's Modifications include an application programming
interface and Contributor has knowledge of patent licenses which
are reasonably necessary to implement that API, Contributor must
also include this information in the LEGAL file.
(c) Representations.
Contributor represents that, except as disclosed pursuant to
Section 3.4(a) above, Contributor believes that Contributor's
Modifications are Contributor's original creation(s) and/or
Contributor has sufficient rights to grant the rights conveyed by
this License.
3.5. Required Notices.
You must duplicate the notice in Exhibit A in each file of the Source
Code. If it is not possible to put such notice in a particular Source
Code file due to its structure, then You must include such notice in a
location (such as a relevant directory) where a user would be likely
to look for such a notice. If You created one or more Modification(s)
You may add your name as a Contributor to the notice described in
Exhibit A. You must also duplicate this License in any documentation
for the Source Code where You describe recipients' rights or ownership
rights relating to Covered Code. You may choose to offer, and to
charge a fee for, warranty, support, indemnity or liability
obligations to one or more recipients of Covered Code. However, You
may do so only on Your own behalf, and not on behalf of the Initial
Developer or any Contributor. You must make it absolutely clear than
any such warranty, support, indemnity or liability obligation is
offered by You alone, and You hereby agree to indemnify the Initial
Developer and every Contributor for any liability incurred by the
Initial Developer or such Contributor as a result of warranty,
support, indemnity or liability terms You offer.
3.6. Distribution of Executable Versions.
You may distribute Covered Code in Executable form only if the
requirements of Section 3.1-3.5 have been met for that Covered Code,
and if You include a notice stating that the Source Code version of
the Covered Code is available under the terms of this License,
including a description of how and where You have fulfilled the
obligations of Section 3.2. The notice must be conspicuously included
in any notice in an Executable version, related documentation or
collateral in which You describe recipients' rights relating to the
Covered Code. You may distribute the Executable version of Covered
Code or ownership rights under a license of Your choice, which may
contain terms different from this License, provided that You are in
compliance with the terms of this License and that the license for the
Executable version does not attempt to limit or alter the recipient's
rights in the Source Code version from the rights set forth in this
License. If You distribute the Executable version under a different
license You must make it absolutely clear that any terms which differ
from this License are offered by You alone, not by the Initial
Developer or any Contributor. You hereby agree to indemnify the
Initial Developer and every Contributor for any liability incurred by
the Initial Developer or such Contributor as a result of any such
terms You offer.
3.7. Larger Works.
You may create a Larger Work by combining Covered Code with other code
not governed by the terms of this License and distribute the Larger
Work as a single product. In such a case, You must make sure the
requirements of this License are fulfilled for the Covered Code.
4. Inability to Comply Due to Statute or Regulation.
If it is impossible for You to comply with any of the terms of this
License with respect to some or all of the Covered Code due to
statute, judicial order, or regulation then You must: (a) comply with
the terms of this License to the maximum extent possible; and (b)
describe the limitations and the code they affect. Such description
must be included in the LEGAL file described in Section 3.4 and must
be included with all distributions of the Source Code. Except to the
extent prohibited by statute or regulation, such description must be
sufficiently detailed for a recipient of ordinary skill to be able to
understand it.
5. Application of this License.
This License applies to code to which the Initial Developer has
attached the notice in Exhibit A and to related Covered Code.
6. Versions of the License.
6.1. New Versions.
Netscape Communications Corporation ("Netscape") may publish revised
and/or new versions of the License from time to time. Each version
will be given a distinguishing version number.
6.2. Effect of New Versions.
Once Covered Code has been published under a particular version of the
License, You may always continue to use it under the terms of that
version. You may also choose to use such Covered Code under the terms
of any subsequent version of the License published by Netscape. No one
other than Netscape has the right to modify the terms applicable to
Covered Code created under this License.
6.3. Derivative Works.
If You create or use a modified version of this License (which you may
only do in order to apply it to code which is not already Covered Code
governed by this License), You must (a) rename Your license so that
the phrases "Mozilla", "MOZILLAPL", "MOZPL", "Netscape",
"MPL", "NPL" or any confusingly similar phrase do not appear in your
license (except to note that your license differs from this License)
and (b) otherwise make it clear that Your version of the license
contains terms which differ from the Mozilla Public License and
Netscape Public License. (Filling in the name of the Initial
Developer, Original Code or Contributor in the notice described in
Exhibit A shall not of themselves be deemed to be modifications of
this License.)
7. DISCLAIMER OF WARRANTY.
COVERED CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF
DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING.
THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE
IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT,
YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE
COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER
OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF
ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
8. TERMINATION.
8.1. This License and the rights granted hereunder will terminate
automatically if You fail to comply with terms herein and fail to cure
such breach within 30 days of becoming aware of the breach. All
sublicenses to the Covered Code which are properly granted shall
survive any termination of this License. Provisions which, by their
nature, must remain in effect beyond the termination of this License
shall survive.
8.2. If You initiate litigation by asserting a patent infringement
claim (excluding declatory judgment actions) against Initial Developer
or a Contributor (the Initial Developer or Contributor against whom
You file such action is referred to as "Participant") alleging that:
(a) such Participant's Contributor Version directly or indirectly
infringes any patent, then any and all rights granted by such
Participant to You under Sections 2.1 and/or 2.2 of this License
shall, upon 60 days notice from Participant terminate prospectively,
unless if within 60 days after receipt of notice You either: (i)
agree in writing to pay Participant a mutually agreeable reasonable
royalty for Your past and future use of Modifications made by such
Participant, or (ii) withdraw Your litigation claim with respect to
the Contributor Version against such Participant. If within 60 days
of notice, a reasonable royalty and payment arrangement are not
mutually agreed upon in writing by the parties or the litigation claim
is not withdrawn, the rights granted by Participant to You under
Sections 2.1 and/or 2.2 automatically terminate at the expiration of
the 60 day notice period specified above.
(b) any software, hardware, or device, other than such Participant's
Contributor Version, directly or indirectly infringes any patent, then
any rights granted to You by such Participant under Sections 2.1(b)
and 2.2(b) are revoked effective as of the date You first made, used,
sold, distributed, or had made, Modifications made by that
Participant.
8.3. If You assert a patent infringement claim against Participant
alleging that such Participant's Contributor Version directly or
indirectly infringes any patent where such claim is resolved (such as
by license or settlement) prior to the initiation of patent
infringement litigation, then the reasonable value of the licenses
granted by such Participant under Sections 2.1 or 2.2 shall be taken
into account in determining the amount or value of any payment or
license.
8.4. In the event of termination under Sections 8.1 or 8.2 above,
all end user license agreements (excluding distributors and resellers)
which have been validly granted by You or any distributor hereunder
prior to termination shall survive termination.
9. LIMITATION OF LIABILITY.
UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT
(INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL
DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED CODE,
OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR
ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY
CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL,
WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER
COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN
INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF
LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY
RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW
PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE
EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO
THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
10. U.S. GOVERNMENT END USERS.
The Covered Code is a "commercial item," as that term is defined in
48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer
software" and "commercial computer software documentation," as such
terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48
C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995),
all U.S. Government End Users acquire Covered Code with only those
rights set forth herein.
11. MISCELLANEOUS.
This License represents the complete agreement concerning subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. This License shall be governed by
California law provisions (except to the extent applicable law, if
any, provides otherwise), excluding its conflict-of-law provisions.
With respect to disputes in which at least one party is a citizen of,
or an entity chartered or registered to do business in the United
States of America, any litigation relating to this License shall be
subject to the jurisdiction of the Federal Courts of the Northern
District of California, with venue lying in Santa Clara County,
California, with the losing party responsible for costs, including
without limitation, court costs and reasonable attorneys' fees and
expenses. The application of the United Nations Convention on
Contracts for the International Sale of Goods is expressly excluded.
Any law or regulation which provides that the language of a contract
shall be construed against the drafter shall not apply to this
License.
12. RESPONSIBILITY FOR CLAIMS.
As between Initial Developer and the Contributors, each party is
responsible for claims and damages arising, directly or indirectly,
out of its utilization of rights under this License and You agree to
work with Initial Developer and Contributors to distribute such
responsibility on an equitable basis. Nothing herein is intended or
shall be deemed to constitute any admission of liability.
13. MULTIPLE-LICENSED CODE.
Initial Developer may designate portions of the Covered Code as
"Multiple-Licensed". "Multiple-Licensed" means that the Initial
Developer permits you to utilize portions of the Covered Code under
Your choice of the NPL or the alternative licenses, if any, specified
by the Initial Developer in the file described in Exhibit A.
EXHIBIT A -Mozilla Public License.
``The contents of this file are subject to the Mozilla Public License
Version 1.1 (the "License"); you may not use this file except in
compliance with the License. You may obtain a copy of the License at
http://www.mozilla.org/MPL/
Software distributed under the License is distributed on an "AS IS"
basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
License for the specific language governing rights and limitations
under the License.
The Original Code is ______________________________________.
The Initial Developer of the Original Code is ________________________.
Portions created by ______________________ are Copyright (C) ______
_______________________. All Rights Reserved.
Contributor(s): ______________________________________.
Alternatively, the contents of this file may be used under the terms
of the _____ license (the "[___] License"), in which case the
provisions of [______] License are applicable instead of those
above. If you wish to allow use of your version of this file only
under the terms of the [____] License and not to allow others to use
your version of this file under the MPL, indicate your decision by
deleting the provisions above and replace them with the notice and
other provisions required by the [___] License. If you do not delete
the provisions above, a recipient may use your version of this file
under either the MPL or the [___] License."
[NOTE: The text of this Exhibit A may differ slightly from the text of
the notices in the Source Code files of the Original Code. You should
use the text of this Exhibit A rather than the text found in the
Original Code Source Code for Your Modifications.]

View File

@@ -4,24 +4,32 @@
AUTOMAKE_OPTIONS = foreign
EXTRA_DIST = CHANGES COPYING maketgz UPGRADE reconf Makefile.dist \
curl-config.in build_vms.com curl-mode.el
EXTRA_DIST = CHANGES COPYING maketgz reconf Makefile.dist \
curl-config.in build_vms.com curl-style.el sample.emacs testcurl.sh
bin_SCRIPTS = curl-config
SUBDIRS = docs lib src include tests packages
SUBDIRS = lib src
DIST_SUBDIRS = $(SUBDIRS) tests include packages docs
# create a root makefile in the distribution:
dist-hook:
rm -rf $(top_builddir)/tests/log
cp $(srcdir)/Makefile.dist $(distdir)/Makefile
html:
cd docs; make html
pdf:
cd docs; make pdf
check: test
test:
@(cd tests; $(MAKE) quiet-test)
@(cd tests; $(MAKE) all quiet-test)
test-full:
@(cd tests; $(MAKE) all full-test)
#
# Build source and binary rpms. For rpm-3.0 and above, the ~/.rpmmacros
@@ -70,3 +78,13 @@ pkgadd:
# resulting .tar.bz2 file will end up at packages/Win32/cygwin
cygwinbin:
$(MAKE) -C packages/Win32/cygwin cygwinbin
# We extend the standard install with a custom hook:
install-data-hook:
cd include && $(MAKE) install
cd docs && $(MAKE) install
# We extend the standard uninstall with a custom hook:
uninstall-hook:
cd include && $(MAKE) uninstall
cd docs && $(MAKE) uninstall

View File

@@ -59,6 +59,10 @@ vc-ssl-dll:
cd ..\src
nmake -f Makefile.vc6
djgpp:
make -C lib -f Makefile.dj
make -C src -f Makefile.dj
cygwin:
./configure
make

41
README
View File

@@ -11,39 +11,60 @@ README
MANUAL document. Find out how to install Curl by reading the INSTALL
document.
libcurl is a library that Curl is using to do its job. It is readily
libcurl is the library curl is using to do its job. It is readily
available to be used by your software. Read the libcurl.3 man page to
find out how!
learn how!
You find answers to the most frequent questions we get in the FAQ document.
Study the LEGAL file for distribution terms and similar.
Study the COPYING file for distribution terms and similar.
Visit the curl web site or mirror for the latest news:
CONTACT
http://curl.haxx.se/
http://curl.sf.net/
If you have problems, questions, ideas or suggestions, please contact us
by posting to a suitable mailing list. See http://curl.haxx.se/mail/
Many major contributors to the project are listed in the THANKS document.
WEB SITE
Visit the curl web site or mirrors for the latest news:
Sweden -- http://curl.haxx.se/
Russia -- http://curl.tsuren.net/
US -- http://curl.sf.net/
Australia -- http://curl.planetmirror.com/
DOWNLOAD
The official download mirror sites are:
Sweden -- ftp://ftp.sunet.se/pub/www/utilities/curl/
Sweden -- http://cool.haxx.se/curl/
Germany -- ftp://ftp.fu-berlin.de/pub/unix/network/curl/
Australia -- http://curl.planetmirror.com/download/
US -- http://curl.sourceforge.net/download/
Hongkong -- http://www.execve.net/curl/
Russia -- http://curl.tsuren.net/download/
CVS
To download the very latest source off the CVS server do this:
cvs -d :pserver:anonymous@cvs.curl.sourceforge.net:/cvsroot/curl login
cvs -d :pserver:cvsread@cvs.php.net:/repository login
(just press enter when asked for password)
(enter "phpfi" when asked for password)
cvs -d :pserver:anonymous@cvs.curl.sourceforge.net:/cvsroot/curl co curl
cvs -d :pserver:cvsread@cvs.php.net:/repository co curl
(you'll get a directory named curl created, filled with the source code)
cvs -d :pserver:anonymous@cvs.curl.sourceforge.net:/cvsroot/curl logout
cvs -d :pserver:cvsread@cvs.php.net:/repository logout
(you're off the hook!)
NOTICE
Curl contains pieces of source code that is Copyright (c) 1998, 1999
Kungliga Tekniska H<>gskolan. This notice is included here to comply with the
distribution terms.

34
UPGRADE
View File

@@ -1,34 +0,0 @@
Upgrading to curl/libcurl 7.10 from any previous version
========================================================
libcurl 7.10 performs peer SSL certificate verification by default. This is
done by installing a default CA cert bundle on 'make install' (or similar),
that CA bundle package is used by default on operations against SSL servers.
Alas, if you communicate with HTTPS servers using certifcates that are signed
by CAs present in the bundle, you will not notice any changed behavior and you
will seeminglessly get a higher security level on your SSL connections since
can be sure that the remote server really is the one it claims to be.
If the remote server uses a self-signed certificate, or if you don't install
curl's CA cert bundle or if it uses a certificate signed by a CA that isn't
included in the bundle, then you need to do one of the following:
1. Tell libcurl to *not* verify the peer. With libcurl you disable with with
curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, FALSE);
With the curl command tool, you disable this with -k/--insecure.
2. Get a CA certificate that can verify the remote server and use the proper
option to point out this CA cert for verification when connecting. For
libcurl hackers: curl_easy_setopt(curl, CURLOPT_CAPATH, capath);
With the curl command tool: --cacert [file]
This upgrade procedure has been deemed The Right Thing even though it adds
this extra trouble for some users, since it adds security to a majority of the
SSL connections that previously weren't really secure.
It turned out many people were using previous versions of curl/libcurl without
realizing the need for the CA cert options to get truly secure SSL
connections.

View File

@@ -1,90 +0,0 @@
/* Name of this package! */
#undef PACKAGE
/* Version number of this archive. */
#undef VERSION
/* Define if you have the getpass function. */
#undef HAVE_GETPASS
/* Define cpu-machine-OS */
#undef OS
/* Define if you have the gethostbyaddr_r() function with 5 arguments */
#undef HAVE_GETHOSTBYADDR_R_5
/* Define if you have the gethostbyaddr_r() function with 7 arguments */
#undef HAVE_GETHOSTBYADDR_R_7
/* Define if you have the gethostbyaddr_r() function with 8 arguments */
#undef HAVE_GETHOSTBYADDR_R_8
/* Define if you have the gethostbyname_r() function with 3 arguments */
#undef HAVE_GETHOSTBYNAME_R_3
/* Define if you have the gethostbyname_r() function with 5 arguments */
#undef HAVE_GETHOSTBYNAME_R_5
/* Define if you have the gethostbyname_r() function with 6 arguments */
#undef HAVE_GETHOSTBYNAME_R_6
/* Define if you have the inet_ntoa_r function declared. */
#undef HAVE_INET_NTOA_R_DECL
/* Define if you need the _REENTRANT define for some functions */
#undef NEED_REENTRANT
/* Define if you have the Kerberos4 libraries (including -ldes) */
#undef KRB4
/* Define if you want to enable IPv6 support */
#undef ENABLE_IPV6
/* Define this to 'int' if ssize_t is not an available typedefed type */
#undef ssize_t
/* Define this to 'int' if socklen_t is not an available typedefed type */
#undef socklen_t
/* Define this as a suitable file to read random data from */
#undef RANDOM_FILE
/* Define this to your Entropy Gathering Daemon socket pathname */
#undef EGD_SOCKET
/* Define if you have a working OpenSSL installation */
#undef OPENSSL_ENABLED
/* Define the one correct non-blocking socket method below */
#undef HAVE_FIONBIO
#undef HAVE_IOCTLSOCKET
#undef HAVE_IOCTLSOCKET_CASE
#undef HAVE_O_NONBLOCK
#undef HAVE_DISABLED_NONBLOCKING
/* Define this to 'int' if in_addr_t is not an available typedefed type */
#undef in_addr_t
/* Define to disable DICT */
#undef CURL_DISABLE_DICT
/* Define to disable FILE */
#undef CURL_DISABLE_FILE
/* Define to disable FTP */
#undef CURL_DISABLE_FTP
/* Define to disable GOPHER */
#undef CURL_DISABLE_GOPHER
/* Define to disable HTTP */
#undef CURL_DISABLE_HTTP
/* Define to disable LDAP */
#undef CURL_DISABLE_LDAP
/* Define to disable TELNET */
#undef CURL_DISABLE_TELNET
/* Define if you have zlib present */
#undef HAVE_LIBZ

View File

@@ -38,7 +38,7 @@ AC_DEFUN(CURL_CHECK_NONBLOCKING_SOCKET,
],[
dnl the O_NONBLOCK test was fine
nonblock="O_NONBLOCK"
AC_DEFINE(HAVE_O_NONBLOCK)
AC_DEFINE(HAVE_O_NONBLOCK, 1, [use O_NONBLOCK for non-blocking sockets])
],[
dnl the code was bad, try a different program now, test 2
@@ -52,7 +52,7 @@ dnl the code was bad, try a different program now, test 2
],[
dnl FIONBIO test was good
nonblock="FIONBIO"
AC_DEFINE(HAVE_FIONBIO)
AC_DEFINE(HAVE_FIONBIO, 1, [use FIONBIO for non-blocking sockets])
],[
dnl FIONBIO test was also bad
dnl the code was bad, try a different program now, test 3
@@ -66,7 +66,7 @@ dnl the code was bad, try a different program now, test 3
],[
dnl ioctlsocket test was good
nonblock="ioctlsocket"
AC_DEFINE(HAVE_IOCTLSOCKET)
AC_DEFINE(HAVE_IOCTLSOCKET, 1, [use ioctlsocket() for non-blocking sockets])
],[
dnl ioctlsocket didnt compile!
@@ -79,11 +79,11 @@ dnl ioctlsocket didnt compile!
],[
dnl ioctlsocket test was good
nonblock="IoctlSocket"
AC_DEFINE(HAVE_IOCTLSOCKET_CASE)
AC_DEFINE(HAVE_IOCTLSOCKET_CASE, 1, [use Ioctlsocket() for non-blocking sockets])
],[
dnl ioctlsocket didnt compile!
nonblock="nada"
AC_DEFINE(HAVE_DISABLED_NONBLOCKING)
AC_DEFINE(HAVE_DISABLED_NONBLOCKING, 1, [disabled non-blocking sockets])
])
dnl end of forth test
@@ -152,10 +152,8 @@ AC_DEFUN([TYPE_IN_ADDR_T],
AC_MSG_CHECKING([for in_addr_t equivalent])
AC_CACHE_VAL([curl_cv_in_addr_t_equiv],
[
# Systems have either "struct sockaddr *" or
# "void *" as the second argument to getpeername
curl_cv_in_addr_t_equiv=
for t in int size_t unsigned long "unsigned long"; do
for t in "unsigned long" int size_t unsigned long; do
AC_TRY_COMPILE([
#include <sys/types.h>
#include <sys/socket.h>
@@ -272,15 +270,15 @@ AC_DEFUN(CURL_CHECK_INET_NTOA_R,
AC_MSG_CHECKING(whether inet_ntoa_r is declared)
AC_EGREP_CPP(inet_ntoa_r,[
#include <arpa/inet.h>],[
AC_DEFINE(HAVE_INET_NTOA_R_DECL)
AC_DEFINE(HAVE_INET_NTOA_R_DECL, 1, [inet_ntoa_r() is declared])
AC_MSG_RESULT(yes)],[
AC_MSG_RESULT(no)
AC_MSG_CHECKING(whether inet_ntoa_r with -D_REENTRANT is declared)
AC_EGREP_CPP(inet_ntoa_r,[
#define _REENTRANT
#include <arpa/inet.h>],[
AC_DEFINE(HAVE_INET_NTOA_R_DECL)
AC_DEFINE(NEED_REENTRANT)
AC_DEFINE(HAVE_INET_NTOA_R_DECL, 1, [inet_ntoa_r() is declared])
AC_DEFINE(NEED_REENTRANT, 1, [need REENTRANT defined])
AC_MSG_RESULT(yes)],
AC_MSG_RESULT(no))])])
])
@@ -302,7 +300,7 @@ struct hostent_data hdata;
int rc;
rc = gethostbyaddr_r(address, length, type, &h, &hdata);],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_GETHOSTBYADDR_R_5)
AC_DEFINE(HAVE_GETHOSTBYADDR_R_5, 1, [gethostbyaddr_r() takes 5 args])
ac_cv_gethostbyaddr_args=5],[
AC_MSG_RESULT(no)
AC_MSG_CHECKING(if gethostbyaddr_r with -D_REENTRANT takes 5 arguments)
@@ -318,8 +316,8 @@ struct hostent_data hdata;
int rc;
rc = gethostbyaddr_r(address, length, type, &h, &hdata);],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_GETHOSTBYADDR_R_5)
AC_DEFINE(NEED_REENTRANT)
AC_DEFINE(HAVE_GETHOSTBYADDR_R_5, 1, [gethostbyaddr_r() takes 5 args])
AC_DEFINE(NEED_REENTRANT, 1, [need REENTRANT])
ac_cv_gethostbyaddr_args=5],[
AC_MSG_RESULT(no)
AC_MSG_CHECKING(if gethostbyaddr_r takes 7 arguments)
@@ -337,7 +335,7 @@ struct hostent * hp;
hp = gethostbyaddr_r(address, length, type, &h,
buffer, 8192, &h_errnop);],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_GETHOSTBYADDR_R_7)
AC_DEFINE(HAVE_GETHOSTBYADDR_R_7, 1, [gethostbyaddr_r() takes 7 args] )
ac_cv_gethostbyaddr_args=7],[
AC_MSG_RESULT(no)
AC_MSG_CHECKING(if gethostbyaddr_r takes 8 arguments)
@@ -356,7 +354,7 @@ int rc;
rc = gethostbyaddr_r(address, length, type, &h,
buffer, 8192, &hp, &h_errnop);],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_GETHOSTBYADDR_R_8)
AC_DEFINE(HAVE_GETHOSTBYADDR_R_8, 1, [gethostbyaddr_r() takes 8 args])
ac_cv_gethostbyaddr_args=8],[
AC_MSG_RESULT(no)
have_missing_r_funcs="$have_missing_r_funcs gethostbyaddr_r"])])])])])
@@ -380,7 +378,7 @@ gethostbyname_r(const char *, struct hostent *, struct hostent_data *);],[
struct hostent_data data;
gethostbyname_r(NULL, NULL, NULL);],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_GETHOSTBYNAME_R_3)
AC_DEFINE(HAVE_GETHOSTBYNAME_R_3, 1, [gethostbyname_r() takes 3 args])
ac_cv_gethostbyname_args=3],[
AC_MSG_RESULT(no)
AC_MSG_CHECKING([if gethostbyname_r with -D_REENTRANT takes 3 arguments])
@@ -398,8 +396,8 @@ gethostbyname_r(const char *,struct hostent *, struct hostent_data *);],[
struct hostent_data data;
gethostbyname_r(NULL, NULL, NULL);],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_GETHOSTBYNAME_R_3)
AC_DEFINE(NEED_REENTRANT)
AC_DEFINE(HAVE_GETHOSTBYNAME_R_3, 1, [gethostbyname_r() takes 3 args])
AC_DEFINE(NEED_REENTRANT, 1, [needs REENTRANT])
ac_cv_gethostbyname_args=3],[
AC_MSG_RESULT(no)
AC_MSG_CHECKING([if gethostbyname_r takes 5 arguments])
@@ -413,7 +411,7 @@ struct hostent *
gethostbyname_r(const char *, struct hostent *, char *, int, int *);],[
gethostbyname_r(NULL, NULL, NULL, 0, NULL);],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_GETHOSTBYNAME_R_5)
AC_DEFINE(HAVE_GETHOSTBYNAME_R_5, 1, [gethostbyname_r() takes 5 args])
ac_cv_gethostbyname_args=5],[
AC_MSG_RESULT(no)
AC_MSG_CHECKING([if gethostbyname_r takes 6 arguments])
@@ -428,7 +426,7 @@ gethostbyname_r(const char *, struct hostent *, char *, size_t,
struct hostent **, int *);],[
gethostbyname_r(NULL, NULL, NULL, 0, NULL, NULL);],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_GETHOSTBYNAME_R_6)
AC_DEFINE(HAVE_GETHOSTBYNAME_R_6, 1, [gethostbyname_r() takes 6 args])
ac_cv_gethostbyname_args=6],[
AC_MSG_RESULT(no)
have_missing_r_funcs="$have_missing_r_funcs gethostbyname_r"],

View File

@@ -1,69 +1,69 @@
$!
$
$ on control_y then goto Common_Exit!
$ orig = f$environment("DEFAULT")
$ loc = f$environment("PROCEDURE")
$ def = f$parse("X.X;1",loc) - "X.X;1"
$
$ set def 'def'
$ cc_qual = "/define=HAVE_CONFIG_H=1/include=(""../include/"",""../"",""../../openssl-0_9_6c/include/"")"
$ if p1 .eqs. "LISTING" then cc_qual = cc_qual + "/LIST/MACHINE"
$ if p1 .eqs. "DEBUG" then cc_qual = cc_qual + "/LIST/MACHINE/DEBUG"
$ msg_qual = ""
$ call build "[.lib]" "*.c"
$ call build "[.src]" "*.c"
$ call build "[.src]" "*.msg"
$!
$
$ on control_y then goto Common_Exit!
$ orig = f$environment("DEFAULT")
$ loc = f$environment("PROCEDURE")
$ def = f$parse("X.X;1",loc) - "X.X;1"
$
$ set def 'def'
$ cc_qual = "/define=HAVE_CONFIG_H=1/include=(""../include/"",""../"",""../../openssl-0_9_7/include/"")"
$ if p1 .eqs. "LISTING" then cc_qual = cc_qual + "/LIST/MACHINE"
$ if p1 .eqs. "DEBUG" then cc_qual = cc_qual + "/LIST/MACHINE/DEBUG"
$ msg_qual = ""
$ call build "[.lib]" "*.c"
$ call build "[.src]" "*.c"
$ call build "[.src]" "*.msg"
$ link /exe=curl.exe [.src]curl/lib/include=main,[.lib]curl/lib, -
[-.openssl-0_9_6c.axp.exe.ssl]libssl/lib, -
[-.openssl-0_9_6c.axp.exe.crypto]libcrypto/lib
$
$
$ goto Common_Exit
$build: subroutine
$ set noon
$ set default 'p1'
$ search = p2
$ reset = f$search("reset")
$ if f$search("CURL.OLB") .eqs. ""
$ then
$ LIB/CREATE/OBJECT CURL.OLB
$ endif
$ reset = f$search("reset",1)
$Loop:
$ file = f$search(search,1)
$ if file .eqs. "" then goto EndLoop
$ obj = f$search(f$parse(".OBJ;",file),2)
$ if (obj .nes. "")
$ then
$ if (f$cvtime(f$file(file,"rdt")) .gts. f$cvtime(f$file(obj,"rdt")))
$ then
$ call compile 'file'
$ lib/object curl.OLB 'f$parse(".obj;",file)'
$ else
$! write sys$output "File: ''file' is up to date"
$ endif
$ else
$! write sys$output "Object for file: ''file' does not exist"
$ call compile 'file'
$ lib/object curl.OLB 'f$parse(".obj;",file)'
$ endif
$ goto Loop
$EndLoop:
$ purge
$ set def 'def'
$ endsubroutine ! Build
$
$compile: subroutine
$ set noon
$ file = p1
$ qual = p2+p3+p4+p5+p6+p7+p8
$ typ = f$parse(file,,,"TYPE") - "."
$ cmd_c = "CC "+cc_qual
$ cmd_msg = "MESSAGE "+msg_qual
$ x = cmd_'typ'
$ 'x' 'file'
$ ENDSUBROUTINE ! Compile
$
$Common_Exit:
$ set default 'orig'
$ exit
[-.openssl-0_9_7.axp.exe.ssl]libssl/lib, -
[-.openssl-0_9_7.axp.exe.crypto]libcrypto/lib
$
$
$ goto Common_Exit
$build: subroutine
$ set noon
$ set default 'p1'
$ search = p2
$ reset = f$search("reset")
$ if f$search("CURL.OLB") .eqs. ""
$ then
$ LIB/CREATE/OBJECT CURL.OLB
$ endif
$ reset = f$search("reset",1)
$Loop:
$ file = f$search(search,1)
$ if file .eqs. "" then goto EndLoop
$ obj = f$search(f$parse(".OBJ;",file),2)
$ if (obj .nes. "")
$ then
$ if (f$cvtime(f$file(file,"rdt")) .gts. f$cvtime(f$file(obj,"rdt")))
$ then
$ call compile 'file'
$ lib/object curl.OLB 'f$parse(".obj;",file)'
$ else
$! write sys$output "File: ''file' is up to date"
$ endif
$ else
$! write sys$output "Object for file: ''file' does not exist"
$ call compile 'file'
$ lib/object curl.OLB 'f$parse(".obj;",file)'
$ endif
$ goto Loop
$EndLoop:
$ purge
$ set def 'def'
$ endsubroutine ! Build
$
$compile: subroutine
$ set noon
$ file = p1
$ qual = p2+p3+p4+p5+p6+p7+p8
$ typ = f$parse(file,,,"TYPE") - "."
$ cmd_c = "CC "+cc_qual
$ cmd_msg = "MESSAGE "+msg_qual
$ x = cmd_'typ'
$ 'x' 'file'
$ ENDSUBROUTINE ! Compile
$
$Common_Exit:
$ set default 'orig'
$ exit

126
buildconf
View File

@@ -5,7 +5,125 @@ die(){
exit
}
aclocal || die "The command 'aclocal' failed"
autoheader || die "The command 'autoheader' failed"
autoconf || die "The command 'autoconf' failed"
automake -a || die "The command 'automake $MAKEFILES' failed"
#--------------------------------------------------------------------------
# autoconf 2.57 or newer
#
need_autoconf="2.57"
ac_version=`${AUTOCONF:-autoconf} --version 2>/dev/null|head -1| sed -e 's/^[^0-9]*//' -e 's/[a-z]* *$//'`
if test -z "$ac_version"; then
echo "buildconf: autoconf not found."
echo " You need autoconf version $need_autoconf or newer installed."
exit 1
fi
IFS=.; set $ac_version; IFS=' '
if test "$1" = "2" -a "$2" -lt "57" || test "$1" -lt "2"; then
echo "buildconf: autoconf version $ac_version found."
echo " You need autoconf version $need_autoconf or newer installed."
echo " If you have a sufficient autoconf installed, but it"
echo " is not named 'autoconf', then try setting the"
echo " AUTOCONF environment variable."
exit 1
fi
echo "buildconf: autoconf version $ac_version (ok)"
#--------------------------------------------------------------------------
# autoheader 2.50 or newer
#
ah_version=`${AUTOHEADER:-autoheader} --version 2>/dev/null|head -1| sed -e 's/^[^0-9]*//' -e 's/[a-z]* *$//'`
if test -z "$ah_version"; then
echo "buildconf: autoheader not found."
echo " You need autoheader version 2.50 or newer installed."
exit 1
fi
IFS=.; set $ah_version; IFS=' '
if test "$1" = "2" -a "$2" -lt "50" || test "$1" -lt "2"; then
echo "buildconf: autoheader version $ah_version found."
echo " You need autoheader version 2.50 or newer installed."
echo " If you have a sufficient autoheader installed, but it"
echo " is not named 'autoheader', then try setting the"
echo " AUTOHEADER environment variable."
exit 1
fi
echo "buildconf: autoheader version $ah_version (ok)"
#--------------------------------------------------------------------------
# automake 1.7 or newer
#
need_automake="1.7"
am_version=`${AUTOMAKE:-automake} --version 2>/dev/null|head -1| sed -e 's/^.* \([0-9]\)/\1/' -e 's/[a-z]* *$//'`
if test -z "$am_version"; then
echo "buildconf: automake not found."
echo " You need automake version $need_automake or newer installed."
exit 1
fi
IFS=.; set $am_version; IFS=' '
if test "$1" = "1" -a "$2" -lt "7" || test "$1" -lt "1"; then
echo "buildconf: automake version $am_version found."
echo " You need automake version $need_automake or newer installed."
echo " If you have a sufficient automake installed, but it"
echo " is not named 'autommake', then try setting the"
echo " AUTOMAKE environment variable."
exit 1
fi
echo "buildconf: automake version $am_version (ok)"
#--------------------------------------------------------------------------
# libtool check
#
LIBTOOL_WANTED_MAJOR=1
LIBTOOL_WANTED_MINOR=4
LIBTOOL_WANTED_PATCH=2
LIBTOOL_WANTED_VERSION=1.4.2
libtool=`which glibtool 2>/dev/null`
if test ! -x "$libtool"; then
libtool=`which libtool`
fi
#lt_pversion=`${LIBTOOL:-$libtool} --version 2>/dev/null|head -1| sed -e 's/^.* \([0-9]\)/\1/' -e 's/[a-z]* *$//'`
lt_pversion=`$libtool --version 2>/dev/null|head -1|sed -e 's/^[^0-9]*//g' -e 's/[- ].*//'`
if test -z "$lt_pversion"; then
echo "buildconf: libtool not found."
echo " You need libtool version $LIBTOOL_WANTED_VERSION or newer installed"
exit 1
fi
lt_version=`echo $lt_pversion` #|sed -e 's/\([a-z]*\)$/.\1/'`
IFS=.; set $lt_version; IFS=' '
lt_status="good"
if test "$1" = "$LIBTOOL_WANTED_MAJOR"; then
if test "$2" -lt "$LIBTOOL_WANTED_MINOR"; then
lt_status="bad"
elif test ! -z "$LIBTOOL_WANTED_PATCH"; then
if test -n "$3"; then
if test "$3" -lt "$LIBTOOL_WANTED_PATCH"; then
lt_status="bad"
fi
fi
fi
fi
if test $lt_status != "good"; then
echo "buildconf: libtool version $lt_pversion found."
echo " You need libtool version $LIBTOOL_WANTED_VERSION or newer installed"
exit 1
fi
echo "buildconf: libtool version $lt_version (ok)"
# ------------------------------------------------------------
# run the correct scripts now
echo "buildconf: running libtoolize"
${LIBTOOLIZE:-libtoolize} --copy --automake || die "The command '${LIBTOOLIZE:-libtoolize} --copy --automake' failed"
echo "buildconf: running aclocal"
${ACLOCAL:-aclocal} || die "The command '${AUTOHEADER:-aclocal}' failed"
echo "buildconf: running autoheader"
${AUTOHEADER:-autoheader} || die "The command '${AUTOHEADER:-autoheader}' failed"
echo "buildconf: running autoconf"
${AUTOCONF:-autoconf} || die "The command '${AUTOCONF:-autoconf}' failed"
echo "buildconf: running automake"
${AUTOMAKE:-automake} -a || die "The command '${AUTOMAKE:-automake} -a' failed"
exit 0

1317
config.guess vendored

File diff suppressed because it is too large Load Diff

1411
config.sub vendored

File diff suppressed because it is too large Load Diff

View File

@@ -1,22 +1,37 @@
dnl $Id$
dnl Process this file with autoconf to produce a configure script.
dnl Ensure that this file is processed with autoconf 2.50 or newer
dnl Don't even think about removing this check!
AC_PREREQ(2.50)
AC_PREREQ(2.57)
dnl We don't know the version number "staticly" so we use a dash here
AC_INIT(curl, [-], [curl-bug@haxx.se])
dnl configure script copyright
AC_COPYRIGHT([Copyright (c) 1998 - 2003 Daniel Stenberg, <daniel@haxx.se>
This configure script may be copied, distributed and modified under the
terms of the curl license; see COPYING for more details])
dnl First some basic init macros
AC_INIT
AC_CONFIG_SRCDIR([lib/urldata.h])
AM_CONFIG_HEADER(lib/config.h src/config.h tests/server/config.h lib/ca-bundle.h)
AM_CONFIG_HEADER(lib/config.h src/config.h tests/server/config.h )
AM_MAINTAINER_MODE
dnl SED is needed by some of the tools
AC_PATH_PROG( SED, sed, , $PATH:/usr/bin:/usr/local/bin)
AC_SUBST(SED)
dnl AR is used by libtool, and try the odd Solaris path too
AC_PATH_PROG( AR, ar, , $PATH:/usr/bin:/usr/local/bin:/usr/ccs/bin)
AC_SUBST(AR)
dnl figure out the libcurl version
VERSION=`sed -ne 's/^#define LIBCURL_VERSION "\(.*\)"/\1/p' ${srcdir}/include/curl/curl.h`
VERSION=`$SED -ne 's/^#define LIBCURL_VERSION "\(.*\)"/\1/p' ${srcdir}/include/curl/curl.h`
AM_INIT_AUTOMAKE(curl,$VERSION)
AC_MSG_CHECKING([curl version])
AC_MSG_RESULT($VERSION)
dnl
dnl we extract the numerical version for curl-config only
VERSIONNUM=`sed -ne 's/^#define LIBCURL_VERSION_NUM 0x\(.*\)/\1/p' ${srcdir}/include/curl/curl.h`
VERSIONNUM=`$SED -ne 's/^#define LIBCURL_VERSION_NUM 0x\(.*\)/\1/p' ${srcdir}/include/curl/curl.h`
AC_SUBST(VERSIONNUM)
dnl Solaris pkgadd support definitions
@@ -34,7 +49,7 @@ dnl
AC_CANONICAL_HOST
dnl Get system canonical name
AC_DEFINE_UNQUOTED(OS, "${host}")
AC_DEFINE_UNQUOTED(OS, "${host}", [cpu-machine-OS])
dnl Check for AIX weirdos
AC_AIX
@@ -51,6 +66,17 @@ AC_LIBTOOL_WIN32_DLL
dnl libtool setup
AM_PROG_LIBTOOL
case $host in
*-*-cygwin | *-*-mingw* | *-*-pw32*)
need_no_undefined=yes
;;
*)
need_no_undefined=no
;;
esac
AM_CONDITIONAL(NO_UNDEFINED, test x$need_no_undefined = xyes)
dnl The install stuff has already been taken care of by the automake stuff
dnl AC_PROG_INSTALL
AC_PROG_MAKE_SET
@@ -60,14 +86,14 @@ dnl switch off particular protocols
dnl
AC_MSG_CHECKING([whether to support http])
AC_ARG_ENABLE(http,
[ --enable-http Enable HTTP support
--disable-http Disable HTTP support],
AC_HELP_STRING([--enable-http],[Enable HTTP support])
AC_HELP_STRING([--disable-http],[Disable HTTP support]),
[ case "$enableval" in
no)
AC_MSG_RESULT(no)
AC_DEFINE(CURL_DISABLE_HTTP)
AC_DEFINE(CURL_DISABLE_HTTP, 1, [to disable HTTP])
AC_MSG_WARN([disable HTTP disables FTP over proxy and GOPHER too])
AC_DEFINE(CURL_DISABLE_GOPHER)
AC_DEFINE(CURL_DISABLE_GOPHER, 1, [to disable GOPHER])
AC_SUBST(CURL_DISABLE_HTTP)
AC_SUBST(CURL_DISABLE_GOPHER)
;;
@@ -78,12 +104,12 @@ AC_ARG_ENABLE(http,
)
AC_MSG_CHECKING([whether to support ftp])
AC_ARG_ENABLE(ftp,
[ --enable-ftp Enable FTP support
--disable-ftp Disable FTP support],
AC_HELP_STRING([--enable-ftp],[Enable FTP support])
AC_HELP_STRING([--disable-ftp],[Disable FTP support]),
[ case "$enableval" in
no)
AC_MSG_RESULT(no)
AC_DEFINE(CURL_DISABLE_FTP)
AC_DEFINE(CURL_DISABLE_FTP, 1, [to disable FTP])
AC_SUBST(CURL_DISABLE_FTP)
;;
*) AC_MSG_RESULT(yes)
@@ -93,12 +119,12 @@ AC_ARG_ENABLE(ftp,
)
AC_MSG_CHECKING([whether to support gopher])
AC_ARG_ENABLE(gopher,
[ --enable-gopher Enable GOPHER support
--disable-gopher Disable GOPHER support],
AC_HELP_STRING([--enable-gopher],[Enable GOPHER support])
AC_HELP_STRING([--disable-gopher],[Disable GOPHER support]),
[ case "$enableval" in
no)
AC_MSG_RESULT(no)
AC_DEFINE(CURL_DISABLE_GOPHER)
AC_DEFINE(CURL_DISABLE_GOPHER, 1, [to disable GOPHER])
AC_SUBST(CURL_DISABLE_GOPHER)
;;
*) AC_MSG_RESULT(yes)
@@ -108,12 +134,12 @@ AC_ARG_ENABLE(gopher,
)
AC_MSG_CHECKING([whether to support file])
AC_ARG_ENABLE(file,
[ --enable-file Enable FILE support
--disable-file Disable FILE support],
AC_HELP_STRING([--enable-file],[Enable FILE support])
AC_HELP_STRING([--disable-file],[Disable FILE support]),
[ case "$enableval" in
no)
AC_MSG_RESULT(no)
AC_DEFINE(CURL_DISABLE_FILE)
AC_DEFINE(CURL_DISABLE_FILE, 1, [to disable FILE])
AC_SUBST(CURL_DISABLE_FILE)
;;
*) AC_MSG_RESULT(yes)
@@ -123,12 +149,12 @@ AC_ARG_ENABLE(file,
)
AC_MSG_CHECKING([whether to support ldap])
AC_ARG_ENABLE(ldap,
[ --enable-ldap Enable LDAP support
--disable-ldap Disable LDAP support],
AC_HELP_STRING([--enable-ldap],[Enable LDAP support])
AC_HELP_STRING([--disable-ldap],[Disable LDAP support]),
[ case "$enableval" in
no)
AC_MSG_RESULT(no)
AC_DEFINE(CURL_DISABLE_LDAP)
AC_DEFINE(CURL_DISABLE_LDAP, 1, [to disable LDAP])
AC_SUBST(CURL_DISABLE_LDAP)
;;
*) AC_MSG_RESULT(yes)
@@ -138,12 +164,12 @@ AC_ARG_ENABLE(ldap,
)
AC_MSG_CHECKING([whether to support dict])
AC_ARG_ENABLE(dict,
[ --enable-dict Enable DICT support
--disable-dict Disable DICT support],
AC_HELP_STRING([--enable-dict],[Enable DICT support])
AC_HELP_STRING([--disable-dict],[Disable DICT support]),
[ case "$enableval" in
no)
AC_MSG_RESULT(no)
AC_DEFINE(CURL_DISABLE_DICT)
AC_DEFINE(CURL_DISABLE_DICT, 1, [to disable DICT])
AC_SUBST(CURL_DISABLE_DICT)
;;
*) AC_MSG_RESULT(yes)
@@ -153,12 +179,12 @@ AC_ARG_ENABLE(dict,
)
AC_MSG_CHECKING([whether to support telnet])
AC_ARG_ENABLE(telnet,
[ --enable-telnet Enable TELNET support
--disable-telnet Disable TELNET support],
AC_HELP_STRING([--enable-telnet],[Enable TELNET support])
AC_HELP_STRING([--disable-telnet],[Disable TELNET support]),
[ case "$enableval" in
no)
AC_MSG_RESULT(no)
AC_DEFINE(CURL_DISABLE_TELNET)
AC_DEFINE(CURL_DISABLE_TELNET, 1, [to disable TELNET])
AC_SUBST(CURL_DISABLE_TELNET)
;;
*) AC_MSG_RESULT(yes)
@@ -174,8 +200,8 @@ dnl **********************************************************************
AC_MSG_CHECKING([whether to enable ipv6])
AC_ARG_ENABLE(ipv6,
[ --enable-ipv6 Enable ipv6 (with ipv4) support
--disable-ipv6 Disable ipv6 support],
AC_HELP_STRING([--enable-ipv6],[Enable ipv6 (with ipv4) support])
AC_HELP_STRING([--disable-ipv6],[Disable ipv6 support]),
[ case "$enableval" in
no)
AC_MSG_RESULT(no)
@@ -215,11 +241,11 @@ dnl Checks for libraries.
dnl **********************************************************************
dnl gethostbyname in the nsl lib?
AC_CHECK_FUNC(gethostbyname, , AC_CHECK_LIB(nsl, gethostbyname))
AC_CHECK_FUNC(gethostbyname, , [ AC_CHECK_LIB(nsl, gethostbyname) ])
if test "$ac_cv_lib_nsl_gethostbyname" != "yes" -a "$ac_cv_func_gethostbyname" != "yes"; then
dnl gethostbyname in the socket lib?
AC_CHECK_FUNC(gethostbyname, , AC_CHECK_LIB(socket, gethostbyname))
AC_CHECK_FUNC(gethostbyname, , [ AC_CHECK_LIB(socket, gethostbyname) ])
fi
dnl At least one system has been identified to require BOTH nsl and
@@ -244,7 +270,7 @@ if test "$ac_cv_lib_nsl_gethostbyname" = "$ac_cv_func_gethostbyname"; then
fi
dnl resolve lib?
AC_CHECK_FUNC(strcasecmp, , AC_CHECK_LIB(resolve, strcasecmp))
AC_CHECK_FUNC(strcasecmp, , [ AC_CHECK_LIB(resolve, strcasecmp) ])
if test "$ac_cv_lib_resolve_strcasecmp" = "$ac_cv_func_strcasecmp"; then
AC_CHECK_LIB(resolve, strcasecmp,
@@ -254,21 +280,36 @@ if test "$ac_cv_lib_resolve_strcasecmp" = "$ac_cv_func_strcasecmp"; then
fi
dnl socket lib?
AC_CHECK_FUNC(connect, , AC_CHECK_LIB(socket, connect))
AC_CHECK_FUNC(connect, , [ AC_CHECK_LIB(socket, connect) ])
dnl dl lib?
AC_CHECK_FUNC(dlclose, , AC_CHECK_LIB(dl, dlopen))
AC_CHECK_FUNC(dlclose, , [ AC_CHECK_LIB(dl, dlopen) ])
AC_MSG_CHECKING([whether to use libgcc])
AC_ARG_ENABLE(libgcc,
AC_HELP_STRING([--enable-libgcc],[use libgcc when linking]),
[ case "$enableval" in
yes)
LIBS="$LIBS -lgcc"
AC_MSG_RESULT(yes)
;;
*) AC_MSG_RESULT(no)
;;
esac ],
AC_MSG_RESULT(no)
)
dnl **********************************************************************
dnl Check how non-blocking sockets are set
dnl **********************************************************************
AC_ARG_ENABLE(nonblocking,
[ --enable-nonblocking Makes the script detect how to do it
--disable-nonblocking Makes the script disable non-blocking sockets],
AC_HELP_STRING([--enable-nonblocking],[Enable detecting how to do it])
AC_HELP_STRING([--disable-nonblocking],[Disable non-blocking socket detection]),
[
if test "$enableval" = "no" ; then
AC_MSG_WARN([non-blocking sockets disabled])
AC_DEFINE(HAVE_DISABLED_NONBLOCKING)
AC_DEFINE(HAVE_DISABLED_NONBLOCKING, 1,
[to disable NON-BLOCKING connections])
else
CURL_CHECK_NONBLOCKING_SOCKET
fi
@@ -282,44 +323,62 @@ dnl Check for the random seed preferences
dnl **********************************************************************
AC_ARG_WITH(egd-socket,
[ --with-egd-socket=FILE Entropy Gathering Daemon socket pathname],
AC_HELP_STRING([--with-egd-socket=FILE],
[Entropy Gathering Daemon socket pathname]),
[ EGD_SOCKET="$withval" ]
)
if test -n "$EGD_SOCKET" ; then
AC_DEFINE_UNQUOTED(EGD_SOCKET, "$EGD_SOCKET")
AC_DEFINE_UNQUOTED(EGD_SOCKET, "$EGD_SOCKET",
[your Entropy Gathering Daemon socket pathname] )
fi
dnl Check for user-specified random device
AC_ARG_WITH(random,
[ --with-random=FILE read randomness from FILE (default=/dev/urandom)],
AC_HELP_STRING([--with-random=FILE],[read randomness from FILE (default=/dev/urandom)]),
[ RANDOM_FILE="$withval" ],
[
dnl Check for random device
AC_CHECK_FILE("/dev/urandom",
[
RANDOM_FILE="/dev/urandom";
]
)
AC_CHECK_FILE("/dev/urandom", [ RANDOM_FILE="/dev/urandom"] )
]
)
if test -n "$RANDOM_FILE" ; then
AC_SUBST(RANDOM_FILE)
AC_DEFINE_UNQUOTED(RANDOM_FILE, "$RANDOM_FILE")
AC_DEFINE_UNQUOTED(RANDOM_FILE, "$RANDOM_FILE",
[a suitable file to read random data from])
fi
dnl **********************************************************************
dnl Check if the operating system allows programs to write to their own argv[]
dnl **********************************************************************
AC_MSG_CHECKING([if argv can be written to])
AC_RUN_IFELSE([[
int main(int argc, char ** argv) {
argv[0][0] = ' ';
return (argv[0][0] == ' ')?0:1;
}
]],
AC_DEFINE(HAVE_WRITABLE_ARGV, 1, [Define this symbol if your OS supports changing the contents of argv])
AC_MSG_RESULT(yes),
AC_MSG_RESULT(no),
AC_MSG_RESULT(no)
AC_MSG_WARN([the previous check could not be made default was used])
)
dnl **********************************************************************
dnl Check for the presence of Kerberos4 libraries and headers
dnl **********************************************************************
AC_ARG_WITH(krb4-includes,
[ --with-krb4-includes[=DIR] Specify location of kerberos4 headers],[
AC_HELP_STRING([--with-krb4-includes=DIR],
[Specify location of kerberos4 headers]),[
CPPFLAGS="$CPPFLAGS -I$withval"
KRB4INC="$withval"
want_krb4=yes
])
AC_ARG_WITH(krb4-libs,
[ --with-krb4-libs[=DIR] Specify location of kerberos4 libs],[
AC_HELP_STRING([--with-krb4-libs=DIR],[Specify location of kerberos4 libs]),[
LDFLAGS="$LDFLAGS -L$withval"
KRB4LIB="$withval"
want_krb4=yes
@@ -328,7 +387,7 @@ AC_ARG_WITH(krb4-libs,
OPT_KRB4=off
AC_ARG_WITH(krb4,dnl
[ --with-krb4[=DIR] where to look for Kerberos4],[
AC_HELP_STRING([--with-krb4=DIR],[where to look for Kerberos4]),[
OPT_KRB4="$withval"
if test X"$OPT_KRB4" != Xyes
then
@@ -366,7 +425,7 @@ then
AC_CHECK_HEADERS(des.h)
dnl resolv lib?
AC_CHECK_FUNC(res_search, , AC_CHECK_LIB(resolv, res_search))
AC_CHECK_FUNC(res_search, , [AC_CHECK_LIB(resolv, res_search)])
dnl Check for the Kerberos4 library
AC_CHECK_LIB(krb, krb_net_read,
@@ -382,7 +441,8 @@ then
AC_CHECK_FUNCS(krb_get_our_ip_for_realm)
dnl add define KRB4
AC_DEFINE(KRB4)
AC_DEFINE(KRB4, 1,
[if you have the Kerberos4 libraries (including -ldes)])
dnl substitute it too!
KRB4_ENABLED=1
@@ -397,6 +457,88 @@ else
AC_MSG_RESULT(no)
fi
dnl **********************************************************************
dnl Check for GSS-API libraries
dnl **********************************************************************
AC_ARG_WITH(gssapi-includes,
AC_HELP_STRING([--with-gssapi-includes=DIR],
[Specify location of GSSAPI header]),
[ GSSAPI_INCS="-I$withval"
want_gss="yes" ]
)
AC_ARG_WITH(gssapi-libs,
AC_HELP_STRING([--with-gssapi-libs=DIR],
[Specify location of GSSAPI libs]),
[ GSSAPI_LIBS="-L$withval -lgssapi"
want_gss="yes" ]
)
AC_ARG_WITH(gssapi,
AC_HELP_STRING([--with-gssapi=DIR],
[Where to look for GSSAPI]),
[ GSSAPI_ROOT="$withval"
want_gss="yes" ]
)
AC_MSG_CHECKING([if GSSAPI support is requested])
if test x"$want_gss" = xyes; then
if test -z "$GSSAPI_INCS"; then
if test -f "$GSSAPI_ROOT/bin/krb5-config"; then
gss_cppflags=`$GSSAPI_ROOT/bin/krb5-config --cflags gssapi`
CPPFLAGS="$CPPFLAGS $gss_cppflags"
else
CPPFLAGS="$GSSAPI_ROOT/include"
fi
else
CPPFLAGS="$CPPFLAGS $GSSAPI_INCS"
fi
if test -z "$GSSAPI_LIB_DIR"; then
if test -f "$GSSAPI_ROOT/bin/krb5-config"; then
gss_ldflags=`$GSSAPI_ROOT/bin/krb5-config --libs gssapi`
LDFLAGS="$LDFLAGS $gss_ldflags"
else
LDFLAGS="$LDFLAGS $GSSAPI_ROOT/lib -lgssapi"
fi
else
LDFLAGS="$LDFLAGS $GSSAPI_LIB_DIR"
fi
AC_MSG_RESULT(yes)
AC_DEFINE(GSSAPI, 1, [if you have the gssapi libraries])
else
AC_MSG_RESULT(no)
fi
dnl Detect the pkg-config tool, as it may have extra info about the
dnl openssl installation we can use. I *believe* this is what we are
dnl expected to do on really recent Redhat Linux hosts.
AC_PATH_PROG( PKGCONFIG, pkg-config, no, $PATH:/usr/bin:/usr/local/bin)
if test "$PKGCONFIG" != "no" ; then
AC_MSG_CHECKING([for OpenSSL options using pkg-config])
$PKGCONFIG --exists openssl
SSL_EXISTS=$?
if test "$SSL_EXISTS" -eq "0"; then
SSL_LIBS=`$PKGCONFIG --libs-only-l openssl 2>/dev/null`
SSL_LDFLAGS=`$PKGCONFIG --libs-only-L openssl 2>/dev/null`
SSL_CPPFLAGS=`$PKGCONFIG --cflags-only-I openssl 2>/dev/null`
LIBS="$LIBS $SSL_LIBS"
CPPFLAGS="$CPPFLAGS $SSL_CPPFLAGS"
LDFLAGS="$LDFLAGS $SSL_LDFLAGS"
AC_MSG_RESULT([yes])
else
AC_MSG_RESULT([no])
fi
fi
dnl **********************************************************************
dnl Check for the presence of SSL libraries and headers
@@ -404,8 +546,10 @@ dnl **********************************************************************
dnl Default to compiler & linker defaults for SSL files & libraries.
OPT_SSL=off
dnl Default to no CA bundle
ca="no"
AC_ARG_WITH(ssl,dnl
AC_HELP_STRING([--with-ssl=PATH], [where to look for SSL, PATH points to the SSL installation (default: /usr/local/ssl)])
AC_HELP_STRING([--with-ssl=PATH],[where to look for SSL, PATH points to the SSL installation (default: /usr/local/ssl)])
AC_HELP_STRING([--without-ssl], [disable SSL]),
OPT_SSL=$withval)
@@ -483,10 +627,40 @@ else
dnl If the ENGINE library seems to be around, check for the OpenSSL engine
dnl header, it is kind of "separated" from the main SSL check
AC_CHECK_FUNC(ENGINE_init, AC_CHECK_HEADERS(openssl/engine.h))
AC_CHECK_FUNC(ENGINE_init, [ AC_CHECK_HEADERS(openssl/engine.h) ])
AC_SUBST(OPENSSL_ENABLED)
AC_MSG_CHECKING([CA cert bundle install path])
AC_ARG_WITH(ca-bundle,
AC_HELP_STRING([--with-ca-bundle=FILE], [File name to install the CA bundle as])
AC_HELP_STRING([--without-ca-bundle], [Don't install the CA bundle]),
[ ca="$withval" ],
[
if test "x$prefix" != xNONE; then
ca="\${prefix}/share/curl/curl-ca-bundle.crt"
else
ca="$ac_default_prefix/share/curl/curl-ca-bundle.crt"
fi
] )
if test X"$OPT_SSL" = Xno; then
ca="no"
fi
if test "x$ca" != "xno"; then
CURL_CA_BUNDLE='"'$ca'"'
AC_SUBST(CURL_CA_BUNDLE)
fi
AC_MSG_RESULT([$ca])
dnl these can only exist if openssl exists
AC_CHECK_FUNCS( RAND_status \
RAND_screen \
RAND_egd )
fi
if test X"$OPT_SSL" != Xoff &&
@@ -494,15 +668,10 @@ else
AC_MSG_ERROR([OpenSSL libs and/or directories were not found where specified!])
fi
dnl these can only exist if openssl exists
AC_CHECK_FUNCS( RAND_status \
RAND_screen \
RAND_egd )
fi
AM_CONDITIONAL(CABUNDLE, test x$ca != xno)
dnl **********************************************************************
dnl Check for the presence of ZLIB libraries and headers
dnl **********************************************************************
@@ -513,9 +682,9 @@ _cppflags=$CPPFLAGS
_ldflags=$LDFLAGS
OPT_ZLIB="/usr/local"
AC_ARG_WITH(zlib,
AC_HELP_STRING([--with-zlib=PATH], [search for zlib in PATH])
AC_HELP_STRING([--without-zlib], [disable use of zlib]),
[OPT_ZLIB="$withval"])
AC_HELP_STRING([--with-zlib=PATH],[search for zlib in PATH])
AC_HELP_STRING([--without-zlib],[disable use of zlib]),
[OPT_ZLIB="$withval"])
case "$OPT_ZLIB" in
no)
@@ -532,8 +701,7 @@ case "$OPT_ZLIB" in
AC_CHECK_HEADER(zlib.h,[
AC_CHECK_LIB(z, gzread,
[AM_CONDITIONAL(CONTENT_ENCODING, true)
HAVE_LIBZ="1"
[HAVE_LIBZ="1"
AC_SUBST(HAVE_LIBZ)
LIBS="$LIBS -lz"
AC_DEFINE(HAVE_ZLIB_H, 1, [if you have the zlib.h header file])
@@ -546,16 +714,35 @@ case "$OPT_ZLIB" in
;;
esac
dnl set variable for use in automakefile(s)
AM_CONDITIONAL(HAVE_LIBZ, test x"$HAVE_LIBZ" = x1)
dnl Default is to try the thread-safe versions of a few functions
OPT_THREAD=on
dnl detect AIX 4.3 or later
dnl see full docs on this reasoning in the lib/hostip.c source file
AC_MSG_CHECKING([AIX 4.3 or later])
AC_PREPROC_IFELSE([
#if defined(_AIX) && defined(_AIX43)
printf("just fine");
#else
#error "this is not AIX 4.3 or later"
#endif
],
[ AC_MSG_RESULT([yes])
OPT_THREAD=off ],
[ AC_MSG_RESULT([no]) ]
)
AC_ARG_ENABLE(thread,dnl
[ --disable-thread tell configure to not look for thread-safe functions],
AC_HELP_STRING([--disable-thread],[don't look for thread-safe functions]),
OPT_THREAD=off
AC_MSG_WARN(libcurl will not get built using thread-safe functions)
)
if test X"$OPT_THREAD" = Xoff
then
AC_MSG_WARN(libcurl will not get built using thread-safe functions)
AC_DEFINE(DISABLED_THREADSAFE, 1, \
Set to explicitly specify we don't want to use thread-safe functions)
else
@@ -582,21 +769,23 @@ dnl **********************************************************************
dnl Checks for header files.
AC_HEADER_STDC
AC_CHECK_HEADERS( \
dnl First check for the very most basic headers. Then we can use these
dnl ones as default-headers when checking for the rest!
AC_CHECK_HEADERS(
sys/types.h \
sys/time.h \
sys/select.h \
sys/socket.h \
unistd.h \
malloc.h \
stdlib.h \
arpa/inet.h \
net/if.h \
netinet/in.h \
netinet/if_ether.h \
netdb.h \
sys/select.h \
sys/socket.h \
sys/sockio.h \
sys/stat.h \
sys/types.h \
sys/time.h \
sys/param.h \
termios.h \
termio.h \
@@ -611,7 +800,26 @@ AC_CHECK_HEADERS( \
utime.h \
sys/utime.h \
sys/poll.h \
setjmp.h
setjmp.h,
dnl to do if not found
[],
dnl to do if found
[],
dnl default includes
[
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#ifdef HAVE_SYS_SELECT_H
#include <sys/select.h>
#endif
#ifdef HAVE_SYS_SOCKET_H
#include <sys/socket.h>
#endif
]
)
dnl Checks for typedefs, structures, and compiler characteristics.
@@ -619,19 +827,20 @@ AC_C_CONST
AC_TYPE_SIZE_T
AC_HEADER_TIME
# mprintf() checks:
AC_CHECK_SIZEOF(off_t)
# check for 'long double'
# AC_CHECK_SIZEOF(long double, 8)
# check for 'long long'
# AC_CHECK_SIZEOF(long long, 4)
AC_CHECK_TYPE(long long,
[AC_DEFINE(HAVE_LONGLONG, 1, [if your compiler supports 'long long'])])
# check for ssize_t
AC_CHECK_TYPE(ssize_t, int)
AC_CHECK_TYPE(ssize_t, ,
AC_DEFINE(ssize_t, int, [the signed version of size_t]))
TYPE_SOCKLEN_T
TYPE_IN_ADDR_T
AC_FUNC_SELECT_ARGTYPES
dnl Checks for library functions.
dnl AC_PROG_GCC_TRADITIONAL
AC_TYPE_SIGNAL
@@ -654,7 +863,6 @@ AC_CHECK_FUNCS( socket \
tcgetattr \
perror \
closesocket \
setvbuf \
sigaction \
signal \
getpass_r \
@@ -664,7 +872,21 @@ AC_CHECK_FUNCS( socket \
dlopen \
utime \
sigsetjmp \
poll
poll,
dnl if found
[],
dnl if not found, $ac_func is the name we check for
func="$ac_func"
AC_MSG_CHECKING([deeper for $func])
AC_TRY_LINK( [],
[ $func ();],
AC_MSG_RESULT(yes!)
eval "ac_cv_func_$func=yes"
def=`echo "HAVE_$func" | tr 'a-z' 'A-Z'`
AC_DEFINE_UNQUOTED($def, 1, [If you have $func]),
AC_MSG_RESULT(but still no)
)
)
dnl sigsetjmp() might be a macro and no function so if it isn't found already
@@ -675,20 +897,11 @@ if test "$ac_cv_func_sigsetjmp" != "yes"; then
[sigjmp_buf jmpenv;
sigsetjmp(jmpenv, 1);],
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_SIGSETJMP),
AC_DEFINE(HAVE_SIGSETJMP, 1, [If you have sigsetjmp]),
AC_MSG_RESULT(no)
)
fi
dnl removed 'getpass' check on October 26, 2000
if test "$ac_cv_func_select" != "yes"; then
AC_MSG_ERROR(Can't work without an existing select() function)
fi
if test "$ac_cv_func_socket" != "yes"; then
AC_MSG_ERROR(Can't work without an existing socket() function)
fi
AC_PATH_PROG( PERL, perl, ,
$PATH:/usr/local/bin/perl:/usr/bin/:/usr/local/bin )
AC_SUBST(PERL)
@@ -697,32 +910,6 @@ AC_PATH_PROGS( NROFF, gnroff nroff, ,
$PATH:/usr/bin/:/usr/local/bin )
AC_SUBST(NROFF)
AC_MSG_CHECKING([CA cert bundle install path])
AC_ARG_WITH(ca-bundle,
AC_HELP_STRING([--with-ca-bundle=FILE], [File name to install the CA bundle as])
AC_HELP_STRING([--without-ca-bundle], [Don't install the CA bundle]),
[ ca="$withval" ],
[
if test "x$prefix" != xNONE; then
ca="$prefix/share/curl/curl-ca-bundle.crt"
else
ca="$ac_default_prefix/share/curl/curl-ca-bundle.crt"
fi
] )
if test "x$ca" = "xno"; then
dnl let's not keep "no" as path name, blank it instead
ca=""
else
AC_DEFINE_UNQUOTED(CURL_CA_BUNDLE, "$ca", [CA bundle full path name])
fi
CURL_CA_BUNDLE="$ca"
AC_SUBST(CURL_CA_BUNDLE)
AC_MSG_RESULT([$ca])
AC_PROG_YACC
dnl AC_PATH_PROG( RANLIB, ranlib, /usr/bin/ranlib,
@@ -734,21 +921,62 @@ dnl lame option to switch on debug options
dnl
AC_MSG_CHECKING([whether to enable debug options])
AC_ARG_ENABLE(debug,
[ --enable-debug Enable pedantic debug options
--disable-debug Disable debug options],
AC_HELP_STRING([--enable-debug],[Enable pedantic debug options])
AC_HELP_STRING([--disable-debug],[Disable debug options]),
[ case "$enableval" in
no)
AC_MSG_RESULT(no)
;;
*) AC_MSG_RESULT(yes)
CPPFLAGS="$CPPFLAGS -DMALLOCDEBUG"
CFLAGS="-W -Wall -Wwrite-strings -pedantic -Wundef -Wpointer-arith -Wcast-align -Wnested-externs -g"
CPPFLAGS="$CPPFLAGS -DCURLDEBUG"
CFLAGS="$CFLAGS -g"
if test "$GCC" = "yes"; then
CFLAGS="$CFLAGS -W -Wall -Wwrite-strings -pedantic -Wundef -Wpointer-arith -Wnested-externs"
fi
dnl strip off optimizer flags
NEWFLAGS=""
for flag in $CFLAGS; do
case "$flag" in
-O*)
dnl echo "cut off $flag"
;;
*)
NEWFLAGS="$NEWFLAGS $flag"
;;
esac
done
CFLAGS=$NEWFLAGS
;;
esac ],
AC_MSG_RESULT(no)
)
ares="no"
AC_MSG_CHECKING([whether to enable ares])
AC_ARG_ENABLE(ares,
AC_HELP_STRING([--enable-ares],[Enable using ares for name lookups])
AC_HELP_STRING([--disable-ares],[Disable using ares for name lookups]),
[ case "$enableval" in
no)
AC_MSG_RESULT(no)
;;
*) AC_MSG_RESULT(yes)
if test "x$IPV6_ENABLED" = "x1"; then
AC_MSG_ERROR([ares doesn't work with ipv6, disable ipv6 to use ares])
fi
AC_DEFINE(USE_ARES, 1, [Define if you want to enable ares support])
ares="yes"
;;
esac ],
AC_MSG_RESULT(no)
)
AM_CONDITIONAL(ARES, test x$ares = xyes)
AC_CONFIG_FILES([Makefile \
docs/Makefile \
docs/examples/Makefile \
@@ -760,6 +988,7 @@ AC_CONFIG_FILES([Makefile \
tests/Makefile \
tests/data/Makefile \
tests/server/Makefile \
tests/libtest/Makefile \
packages/Makefile \
packages/Win32/Makefile \
packages/Win32/cygwin/Makefile \
@@ -768,6 +997,7 @@ AC_CONFIG_FILES([Makefile \
packages/Linux/RPM/curl.spec \
packages/Linux/RPM/curl-ssl.spec \
packages/Solaris/Makefile \
packages/DOS/Makefile \
packages/EPM/curl.list \
packages/EPM/Makefile \
curl-config

View File

@@ -107,7 +107,11 @@ while test $# -gt 0; do
;;
--cflags)
echo -I@includedir@
if test "X@includedir@" = "X/usr/include"; then
echo ""
else
echo "-I@includedir@"
fi
;;
--libs)

View File

@@ -1,21 +0,0 @@
;;;; Emacs Lisp help for writing curl code. ;;;;
;;; In C files, put something like this to load this file automatically:
;;
;; /* -----------------------------------------------------------------
;; * local variables:
;; * eval: (load-file "../curl-mode.el")
;; * end:
;; */
;;
;; (note: make sure to get the path right in the argument to load-file).
;;; The curl hacker's C conventions
;;; we use intent-level 2
(setq c-basic-offset 2)
;;; never ever use tabs to indent!
(setq indent-tabs-mode nil)
;;; I like this, stolen from Subversion! ;-)
(setq angry-mob-with-torches-and-pitchforks t)

50
curl-style.el Normal file
View File

@@ -0,0 +1,50 @@
;;;; Emacs Lisp help for writing curl code. ;;;;
;;;; $Id$
;;; The curl hacker's C conventions.
;;; After loading this file and added the mode-hook you can in C
;;; files, put something like this to use the curl style
;;; automatically:
;;
;; /* -----------------------------------------------------------------
;; * local variables:
;; * eval: (set c-file-style "curl")
;; * end:
;; */
;;
(defconst curl-c-style
'((c-basic-offset . 2)
(c-comment-only-line-offset . 0)
(c-hanging-braces-alist . ((substatement-open before after)))
(c-offsets-alist . ((topmost-intro . 0)
(topmost-intro-cont . 0)
(substatement . +)
(substatement-open . 0)
(statement-case-intro . +)
(statement-case-open . 0)
(case-label . 0)
))
)
"Curl C Programming Style")
;; Customizations for all of c-mode, c++-mode, and objc-mode
(defun curl-c-mode-common-hook ()
"Curl C mode hook"
;; add curl style and set it for the current buffer
(c-add-style "curl" curl-c-style t)
(setq tab-width 8
indent-tabs-mode nil ; Use spaces. Not tabs.
comment-column 40
c-font-lock-extra-types (append '("bool" "CURL" "CURLcode" "ssize_t" "size_t" "socklen_t" "fd_set" "time_t"))
)
;; keybindings for C, C++, and Objective-C. We can put these in
;; c-mode-base-map because of inheritance ...
(define-key c-mode-base-map "\M-q" 'c-fill-paragraph)
(setq c-recognize-knr-p nil)
)
;; Set this is in your .emacs if you want to use the c-mode-hook as
;; defined here right out of the box.
; (add-hook 'c-mode-common-hook 'curl-c-mode-common-hook)

View File

@@ -1,3 +1,5 @@
Makefile
Makefile.in
*html
*ps
*pdf

View File

@@ -8,7 +8,7 @@ $Id$
BUGS
Curl and libcurl have grown substantially since the beginning. At the time
of writing (end of April 2002), there are 32000 lines of source code, and by
of writing (end of March 2003), there are 35000 lines of source code, and by
the time you read this it has probably grown even more.
Of course there are lots of bugs left. And lots of misfeatures.
@@ -19,18 +19,21 @@ BUGS
WHERE TO REPORT
If you can't fix a bug yourself and submit a fix for it, try to report an as
detailed report as possible to the curl mailing list to allow one of us to
detailed report as possible to a curl mailing list to allow one of us to
have a go at a solution. You should also post your bug/problem at curl's bug
tracking system over at
http://sourceforge.net/bugs/?group_id=976
(but please read the section below first before doing that)
(but please read the sections below first before doing that)
If you feel you need to ask around first, find a suitable mailing list and
post there. The lists are available on http://curl.haxx.se/mail/
WHAT TO REPORT
When reporting a bug, you should include information that will help us
understand what's wrong what you expected to happen and how to repeat the
understand what's wrong, what you expected to happen and how to repeat the
bad behavior. You therefore need to tell us:
- your operating system's name and version number (uname -a under a unix

View File

@@ -42,7 +42,8 @@ Naming
understandable and be named according to what they're used for. File-local
functions should be made static. We like lower case names.
See the INTERNALS document on how we name non-exported library-global symbols.
See the INTERNALS document on how we name non-exported library-global
symbols.
Indenting
@@ -55,7 +56,7 @@ Indenting
Commenting
Comment your source code extensively using C comments (/* comment */), do not
Comment your source code extensively using C comments (/* comment */), DO NOT
use C++ comments (// this style). Commented code is quality code and enables
future modifications much more. Uncommented code much more risk being
completely replaced when someone wants to extend things, since other persons'
@@ -64,7 +65,7 @@ Commenting
General Style
Keep your functions small. If they're small you avoid a lot of mistakes and
you don't accidentally mix up variables.
you don't accidentally mix up variables etc.
Non-clobbering All Over
@@ -78,11 +79,11 @@ Non-clobbering All Over
Platform Dependent Code
Use #ifdef HAVE_FEATURE to do conditional code. We avoid checking for
particular operting systems or hardware in the #ifdef lines. The HAVE_FEATURE
shall be generated by the configure script for unix-like systems and they are
hard-coded in the config-[system].h files for the others.
particular operating systems or hardware in the #ifdef lines. The
HAVE_FEATURE shall be generated by the configure script for unix-like systems
and they are hard-coded in the config-[system].h files for the others.
Separate Patches Doing Different Things
Separate Patches
It is annoying when you get a huge patch from someone that is said to fix 511
odd problems, but discussions and opinions don't agree with 510 of them - or
@@ -128,3 +129,31 @@ Test Cases
in the test suite. Every feature that is added should get at least one valid
test case that verifies that it works as documented. If every submitter also
post a few test cases, it won't end up as a heavy burden on a single person!
How To Make a Patch
Keep a copy of the unmodified curl sources. Make your changes in a separate
source tree. When you think you have something that you want to offer the
curl community, use GNU diff to generate patches.
If you have modified a single file, try something like:
diff -u undmodified-file.c my-changed-one.c > my-fixes.diff
If you have modified several files, possibly in different directories, you
can use diff recursively:
diff -ur curl-original-dir curl-modfied-sources-dir > my-fixes.diff
The GNU diff and GNU patch tools exist for virtually all platforms, including
all kinds of unixes and Windows:
For unix-like operating systems:
http://www.fsf.org/software/patch/patch.html
http://www.gnu.org/directory/diffutils.html
For Windows:
http://gnuwin32.sourceforge.net/packages/patch.htm
http://gnuwin32.sourceforge.net/packages/diffutils.htm

137
docs/FAQ
View File

@@ -1,4 +1,4 @@
Updated: October 8, 2002 (http://curl.haxx.se/docs/faq.html)
Updated: June 17, 2003 (http://curl.haxx.se/docs/faq.html)
_ _ ____ _
___| | | | _ \| |
/ __| | | | |_) | |
@@ -58,6 +58,8 @@ FAQ
4.8 I found a bug!
4.9 Curl can't authenticate to the server that requires NTLM?
4.10 My HTTP request using HEAD, PUT or DELETE doesn't work!
4.11 Why does my HTTP range requests return the full document?
4.12 Why do I get "certificate verify failed" ?
5. libcurl Issues
5.1 Is libcurl thread-safe?
@@ -95,6 +97,12 @@ FAQ
We spell it cURL or just curl. We pronounce it with an initial k sound:
[kurl].
NOTE: there are numerous sub-projects and related projects that also use the
word curl in the project names in various combinations, but you should take
notice that this FAQ is directed at the command-line tool named curl (and
libcurl the library), and may therefore not be valid for other curl
projects.
1.2 What is libcurl?
libcurl is a reliable and portable library which provides you with an easy
@@ -130,11 +138,9 @@ FAQ
better. We do however believe in a few rules when it comes to the future of
curl:
* Curl is to remain a command line tool. If you want GUIs or fancy scripting
capabilities, you're free to write another tool that uses libcurl and that
offers this. There's no point in having a single tool that does every
imaginable thing. That's also one of the great advantages of having the
core of curl as a library.
* Curl -- the command line tool -- is to remain a non-graphical command line
tool. If you want GUIs or fancy scripting capabilities, you should look
for another tool that uses libcurl.
* We do not add things to curl that other small and available tools already
do very fine at the side. Curl's output is fine to pipe into another
@@ -173,20 +179,21 @@ FAQ
Project cURL is entirely free and open. No person gets paid for developing
curl. We do this voluntarily on our spare time.
We get some help from companies. Contactor Data hosts the curl web site and
the main mailing list, Haxx owns the curl web site's domain and
sourceforge.net hosts several project tools we take advantage from like the
bug tracker, mailing lists and more.
We get some help from companies. Contactor Data hosts the curl web site,
Haxx owns the curl web site's domain and sourceforge.net hosts several
project services we take advantage from, like the bug tracker, mailing lists
and more.
If you want to support our project with a donation or similar, one way of
doing that would be to buy "gift certificates" at useful online shopping
sites, such as amazon.com or thinkgeek.com. Another way would be to sponsor
us through a banner-program or even better: by helping us coding,
documenting, testing etc.
documenting, testing etc. You're welcome to send us a buck using paypal, as
described here: http://curl.haxx.se/donation.html
1.7 What about CURL from curl.com?
During the summer 2001, curl.com has been busy advertising their client-side
During the summer 2001, curl.com was busy advertising their client-side
programming language for the web, named CURL.
We are in no way associated with curl.com or their CURL programming
@@ -201,17 +208,17 @@ FAQ
1.8 I have a problem who do I mail?
Please do not attempt to mail any single individual unless you really need
to. Keep curl-related questions on a suitable mailing list. All available
mailing lists are listed in the MANUAL document and online at
Please do not mail any single individual unless you really need to. Keep
curl-related questions on a suitable mailing list. All available mailing
lists are listed in the MANUAL document and online at
http://curl.haxx.se/mail/
Keeping curl-related questions and discussions on mailing lists allows others
to join in and help, to share their ideas, contribute their suggestions and
spread their wisdom. Keeping discussions on public mailing lists also allows
for others to learn from this (both current and future users thanks to the
web based archives of the mailing lists), thus saving us from having to
repeat ourselves even more. Thanks for respecting this.
Keeping curl-related questions and discussions on mailing lists allows
others to join in and help, to share their ideas, contribute their
suggestions and spread their wisdom. Keeping discussions on public mailing
lists also allows for others to learn from this (both current and future
users thanks to the web based archives of the mailing lists), thus saving us
from having to repeat ourselves even more. Thanks for respecting this.
2. Install Related Problems
@@ -273,8 +280,8 @@ FAQ
2.4. Does cURL support Socks (RFC 1928) ?
No. Nobody has wanted it that badly yet. We appreciate patches that bring
this functionality.
There is limited support for SOCKS5 for curl built with IPv6 support
disabled.
3. Usage problems
@@ -366,9 +373,10 @@ FAQ
http://curl.haxx.se/libcurl/
In December 2001, there are interfaces available for the following
languages: C/C++, Cocoa, Dylan, Java, Perl, PHP, Python, Rexx, Ruby, Scheme
and Tcl. By the time you read this, additional ones may have appeared!
In February 2003, there are interfaces available for the following
languages: Basic, C, C++, Cocoa, Dylan, Euphoria, Java, Lua, Object-Pascal,
Pascal, Perl, PHP, PostgreSQL, Python, Rexx, Ruby, Scheme and Tcl. By the
time you read this, additional ones may have appeared!
3.10 What about SOAP, WebDAV, XML-RPC or similar protocols over HTTP?
@@ -377,8 +385,8 @@ FAQ
XML-RPC are all such ones. You can use -X to set custom requests and -H to
set custom headers (or replace internally generated ones).
Using libcurl or PHP's curl modules is just as fine and you'd just use the
proper library options to do the same.
Using libcurl is of course just as fine and you'd just use the proper
library options to do the same.
3.11 How do I POST with a different Content-Type?
@@ -492,8 +500,7 @@ FAQ
curl '{curl,www}.haxx.se'
To be able to use those letters as actual parts of the URL (without using
them for the curl URL "globbing" system), use the -g/--globoff option (curl
7.6 and later):
them for the curl URL "globbing" system), use the -g/--globoff option:
curl -g 'www.site.com/weirdname[].html'
@@ -586,9 +593,11 @@ FAQ
4.9. Curl can't authenticate to the server that requires NTLM?
NTLM is a Microsoft proprietary protocol. Unfortunately, curl does not
currently support that. Proprietary formats are evil. You should not use
such ones.
This is supported in curl 7.10.6 or later. No earlier curl version knows
of this magic.
NTLM is a Microsoft proprietary protocol. Proprietary formats are evil. You
should not use such ones.
4.10 My HTTP request using HEAD, PUT or DELETE doesn't work!
@@ -601,6 +610,34 @@ FAQ
software you're trying to interact with. This is not anything curl can do
anything about.
4.11 Why does my HTTP range requests return the full document?
Because the range may not be supported by the server, or the server may
choose to ignore it and return the full document anyway.
4.12 Why do I get "certificate verify failed" ?
You invoke curl 7.10 or later to communicate on a https:// URL and get an
error back looking something similar to this:
curl: (35) SSL: error:14090086:SSL routines:
SSL3_GET_SERVER_CERTIFICATE:certificate verify failed
Then it means that curl couldn't verify that the server's certificate was
good. Curl verifies the certificate using the CA cert bundle that comes with
the curl installation.
To disable the verification (which makes it act like curl did before 7.10),
use -k. This does however enable man-in-the-middle attacks.
If you get this failure but are having a CA cert bundle installed and used,
the server's certificate is not signed by one of the CA's in the bundle. It
might for example be self-signed. You then correct this problem by obtaining
a valid CA cert for the server. Or again, decrease the security by disabling
this check.
Details are also in the SSLCERTS file in the release archives, found online
here: http://curl.haxx.se/docs/sslcerts.html
5. libcurl Issues
@@ -652,20 +689,15 @@ FAQ
5.3 How do I fetch multiple files with libcurl?
Starting with version 7.7, curl and libcurl will have excellent support for
transferring multiple files. You should just repeatedly set new URLs with
curl_easy_setopt() and then transfer it with curl_easy_perform(). The handle
you get from curl_easy_init() is not only reusable starting with libcurl
7.7, but also you're encouraged to reuse it if you can, as that will enable
libcurl to use persistent connections.
For libcurl prior to 7.7, there was no multiple file support. The only
available way to do multiple requests was to init/perform/cleanup for each
transfer.
libcurl has excellent support for transferring multiple files. You should
just repeatedly set new URLs with curl_easy_setopt() and then transfer it
with curl_easy_perform(). The handle you get from curl_easy_init() is not
only reusable, but you're even encouraged to reuse it if you can, as that
will enable libcurl to use persistent connections.
5.4 Does libcurl do Winsock initialization on win32 systems?
Yes (since 7.8.1) if told to in the curl_global_init() call.
Yes, if told to in the curl_global_init() call.
5.5 Does CURLOPT_FILE and CURLOPT_INFILE work on win32 ?
@@ -679,13 +711,11 @@ FAQ
5.6 What about Keep-Alive or persistent connections?
Starting with version 7.7, curl and libcurl will have excellent support for
persistent connections when transferring several files from the same server.
Curl will attempt to reuse connections for all URLs specified on the same
command line/config file, and libcurl will reuse connections for all
transfers that are made using the same libcurl handle.
Previous versions had no persistent connection support.
curl and libcurl have excellent support for persistent connections when
transferring several files from the same server. Curl will attempt to reuse
connections for all URLs specified on the same command line/config file, and
libcurl will reuse connections for all transfers that are made using the
same libcurl handle.
5.7 Link errors when building libcurl on Windows!
@@ -748,6 +778,5 @@ FAQ
discussions and a large amount of people have contributed with source code
knowing that this is the license we use. This license puts the restrictions
we want on curl/libcurl and it does not spread to other programs or
libraries that use it. The recent dual license modification should make it
possible for everyone to use libcurl or curl in their projects, no matter
what license they already have in use.
libraries that use it. It should be possible for everyone to use libcurl or
curl in their projects, no matter what license they already have in use.

View File

@@ -17,27 +17,30 @@ Misc
- progress bar/time specs while downloading
- "standard" proxy environment variables support
- config file support
- compiles on win32 (reported built on 29 operating systems)
- compiles on win32 (reported builds on 40+ operating systems)
- redirectable stderr
- use selected network interface for outgoing traffic
- selectable network interface for outgoing traffic
- IPv6 support
- persistant connections
- socks5 support
- supports user name + password in proxy environment variables
- operations through proxy "tunnel" (using CONNECT)
HTTP
- HTTP/1.1 compliant
- HTTP/1.1 compliant (optionally uses 1.0)
- GET
- PUT
- HEAD
- POST
- multipart POST
- authentication
- multipart formpost (RFC1867-style)
- authentication (Basic, Digest, NTLM(*1), GSS-Negotiate(*3))
- resume (both GET and PUT)
- follow redirects
- maximum amount of redirects to follow
- custom HTTP request
- cookie get/send fully parsed
- understands the netscape cookie file format
- custom headers (that can replace/remove internally generated headers)
- reads/writes the netscape cookie file format
- custom headers (replace/remove internally generated headers)
- custom user-agent string
- custom referer string
- range
@@ -45,12 +48,16 @@ HTTP
- time conditions
- via http-proxy
- retrieve file modification date
- Content-Encoding support for deflate and gzip
- "Transfer-Encoding: chunked" support for "uploads"
HTTPS (*1)
- (all the HTTP features)
- using certificates
- verify server certificate
- via http-proxy
- select desired encryption
- force usage of a specific SSL version (SSLv2, SSLv3 or TLSv1)
FTP
- download
@@ -90,5 +97,6 @@ GOPHER
FILE
- URL support
*1 = requires OpenSSL
*2 = requires OpenLDAP
*1 = requires OpenSSL
*2 = requires OpenLDAP
*3 = requires a GSSAPI-compliant library, such as Heimdal or similar.

View File

@@ -4,7 +4,7 @@
| (__| |_| | _ <| |___
\___|\___/|_| \_\_____|
How cURL Become Like This
How cURL Became Like This
In the second half of 1997, Daniel Stenberg came up with the idea to make
@@ -54,12 +54,12 @@ OpenSSL took over where SSLeay was abandoned.
May 1999, first Debian package.
August 1999, LDAP:// and FILE:// support added. The curl web site gets 1300
visits daily.
visits weekly.
Released curl 6.0 in September. 15000 lines of code.
December 28 1999, added project to Sourceforge and started using its services
for managing the project.
December 28 1999, added the project on Sourceforge and started using its
services for managing the project.
Spring 2000, major internal overhaul to provide a suitable library interface.
The first non-beta release was named 7.1 and arrived in August. This offered
@@ -67,7 +67,7 @@ the easy interface and turned out to be the beginning of actually getting
other software and programs to get based on and powered by libcurl. Almost
20000 lines of code.
August 2000, the curl web site gets 4000 visits daily.
August 2000, the curl web site gets 4000 visits weekly.
The PHP guys adopted libcurl already the same month, when the first ever third
party libcurl binding showed up. CURL has been a supported module in PHP since
@@ -92,7 +92,7 @@ code.
August 2001. curl is bundled in Mac OS X, 10.1. It was already becoming more
and more of a standard utility of Linux distributions and a regular in the BSD
ports collections. The curl web site gets 8000 visits daily. Curl Corporation
ports collections. The curl web site gets 8000 visits weekly. Curl Corporation
contacted Daniel to discuss "the name issue". After Daniel's reply, they have
never since got in touch again.
@@ -100,7 +100,7 @@ September 2001, libcurl 7.9 introduces cookie jar and curl_formadd(). During
the forthcoming 7.9.x releases, we introduced the multi interface slowly and
without much whistles.
June 2002, the curl web site gets 13000 visits daily. curl and libcurl is
June 2002, the curl web site gets 13000 visits weekly. curl and libcurl is
35000 lines of code. Reported successful compiles on more than 40 combinations
of CPUs and operating systems.
@@ -111,3 +111,6 @@ distributions and otherwise retrieved as part of other software.
September 2002, with the release of curl 7.10 it is released under the MIT
license only.
February 2003, the curl site averages at 20000 visits weekly. At any given
moment, there's an average of 3 people browsing the curl.haxx.se site.

25
docs/HOWTO-RELEASE Normal file
View File

@@ -0,0 +1,25 @@
Steps To Perform When Building a Public Release
* "make distcheck"
* ./maketgz
then upload the 3 curl packages maketgz created
* update these files:
www/_download.html
www/_changes.html
www/_newslog.html
www/Makefile
* commit the web changes
* 'cvs commit'
* 'cvs tag'
* write the release announcement, including:
- changes / bugfixes
- other curl-related news
- contributors
* mail release-announcement to curl-announce and curl-users

View File

@@ -28,11 +28,26 @@ UNIX
You probably need to be root when doing the last command.
If you have checked out the sources from the CVS repository, read the
CVS-INFO on how to proceed.
Get a full listing of all available configure options by invoking it like:
./configure --help
If you want to install curl in a different file hierarchy than /usr/local,
you need to specify that already when running configure:
./configure --prefix=/path/to/curl/tree
If you happen to have write permission in that directory, you can do 'make
install' without being root. An example of this would be to make a local
install in your own home directory:
./configure --prefix=$HOME
make
make install
The configure script always tries to find a working SSL library unless
explicitly told not to. If you have OpenSSL installed in the default search
path for your compiler/linker, you don't need to do anything special. If
@@ -71,33 +86,6 @@ UNIX
LIBS=-lRSAglue -lrsaref
(as suggested by Doug Kaufman)
KNOWN PROBLEMS (these ones should not happen anymore)
If you happen to have autoconf installed, but a version older than 2.12
you will get into trouble. Then you can still build curl by issuing these
commands (note that this requires curl to be built staticly): (from Ralph
Beckmann)
./configure [...]
cd lib; make; cd ..
cd src; make; cd ..
cp src/curl elsewhere/bin/
As suggested by David West, you can make a faked version of autoconf and
autoheader:
----start of autoconf----
#!/bin/bash
#fake autoconf for building curl
if [ "$1" = "--version" ] then
echo "Autoconf version 2.13"
fi
----end of autoconf----
Then make autoheader a symbolic link to the same script and make sure
they're executable and set to appear in the path *BEFORE* the actual (but
obsolete) autoconf and autoheader scripts.
MORE OPTIONS
To force configure to use the standard cc compiler if both cc and gcc are
@@ -216,7 +204,14 @@ Win32
Before running nmake define the OPENSSL_PATH environment variable with
the root/base directory of OpenSSL, for example:
set OPENSSL_PATH=c:\openssl-0.9.6b
set OPENSSL_PATH=c:\openssl-0.9.7a
lib/Makefile.vc6 depends on zlib (http://www.gzip.org/zlib/) as well.
Please read the zlib documentation on how to compile zlib. Define the
ZLIB_PATH environment variable to the location of zlib.h and zlib.lib,
for example:
set ZLIB_PATH=c:\zlib-1.1.4
Then run 'nmake vc-ssl' or 'nmake vc-ssl-dll' in curl's root
directory. 'nmake vc-ssl' will create a libcurl static and dynamic
@@ -416,6 +411,17 @@ CROSS COMPILE
The '--prefix' parameter specifies where cURL will be installed. If
'configure' completes successfully, do 'make' and 'make install' as usual.
RISC OS
=======
The library can be cross-compiled using gccsdk as follows:
CC=riscos-gcc AR=riscos-ar RANLIB='riscos-ar -s' ./configure \
--host=arm-riscos-aof --without-random --disable-shared
make
where riscos-gcc and riscos-ar are links to the gccsdk tools.
You can then link your program with curl/lib/.libs/libcurl.a
PORTS
=====
This is a probably incomplete list of known hardware and operating systems
@@ -435,7 +441,7 @@ PORTS
- MIPS IRIX 6.2, 6.5
- MIPS Linux
- Pocket PC/Win CE 3.0
- Power AIX 4.2, 4.3.1, 4.3.2, 5.1
- Power AIX 3.2.5, 4.2, 4.3.1, 4.3.2, 5.1
- PowerPC Darwin 1.0
- PowerPC Linux
- PowerPC Mac OS 9
@@ -449,6 +455,7 @@ PORTS
- StrongARM NetBSD 1.4.1
- Ultrix 4.3a
- i386 BeOS
- i386 DOS
- i386 FreeBSD
- i386 HURD
- i386 Linux 1.3, 2.0, 2.2, 2.3, 2.4
@@ -459,11 +466,14 @@ PORTS
- i386 Solaris 2.7
- i386 Windows 95, 98, ME, NT, 2000
- i386 QNX 6
- i486 ncr-sysv4.3.03 (NCR MP-RAS)
- ia64 Linux 2.3.99
- m68k AmigaOS 3
- m68k Linux
- m68k OpenBSD
- m88k dg-dgux5.4R3.00
- s390 Linux
- XScale/PXA250 Linux 2.4
OpenSSL
=======

View File

@@ -3,6 +3,27 @@ join in and help us correct one or more of these! Also be sure to check the
changelog of the current development status, as one or more of these problems
may have been fixed since this was written!
* libcurl doesn't treat the content-length of compressed data properly, as
it seems HTTP servers send the *uncompressed* length in that header and
libcurl thinks of it as the *compressed* lenght. Some explanations are here:
http://curl.haxx.se/mail/lib-2003-06/0146.html
* Downloading 0 (zero) bytes files over FTP will not create a zero byte file
locally, which is because libcurl doesn't call the write callback with zero
bytes. Explained here: http://curl.haxx.se/mail/archive-2003-04/0143.html
* Using CURLOPT_FAILONERROR (-f/--fail) will make authentication to stop
working if you use anything but plain Basic auth.
* LDAP output is garbled. Hardly anyone seems to care about LDAP functionality
in curl/libcurl why this report has been closed and set to be solved later.
If you feel this is something you want fixed, get in touch and we'll start
working.
http://sourceforge.net/tracker/index.php?func=detail&aid=735752&group_id=976&atid=100976
* IPv6 support on AIX 4.3.3 doesn't work due to a missing sockaddr_storage
struct. It has been reported to work on AIX 5.1 though.
* Running 'make test' on Mac OS X gives 4 errors. This seems to be related
to some kind of libtool problem:
http://curl.haxx.se/mail/archive-2002-03/0029.html and
@@ -15,6 +36,8 @@ may have been fixed since this was written!
* configure --disable-http is not fully supported. All other protocols seem
to work to disable.
* The -m parameter does not work when using telnet with curl on Windows.
* If a HTTP server responds to a HEAD request and includes a body (thus
violating the RFC2616), curl won't wait to read the response but just stop
reading and return back. If a second request (let's assume a GET) is then

View File

@@ -11,7 +11,7 @@ SIMPLE USAGE
curl http://www.netscape.com/
Get the root README file from funet's ftp-server:
Get the README file the user's home directory at funet's ftp-server:
curl ftp://ftp.funet.fi/README
@@ -19,7 +19,7 @@ SIMPLE USAGE
curl http://www.weirdserver.com:8000/
Get a list of the root directory of an FTP site:
Get a list of a directory of an FTP site:
curl ftp://cool.haxx.se/
@@ -166,13 +166,21 @@ UPLOADING
VERBOSE / DEBUG
If curl fails where it isn't supposed to, if the servers don't let you
in, if you can't understand the responses: use the -v flag to get VERBOSE
fetching. Curl will output lots of info and all data it sends and
receives in order to let the user see all client-server interaction.
If curl fails where it isn't supposed to, if the servers don't let you in,
if you can't understand the responses: use the -v flag to get verbose
fetching. Curl will output lots of info and what it sends and receives in
order to let the user see all client-server interaction (but it won't show
you the actual data).
curl -v ftp://ftp.upload.com/
To get even more details and information on what curl does, try using the
--trace or --trace-ascii options with a given file name to log to, like
this:
curl --trace trace.txt www.haxx.se
DETAILED INFORMATION
Different protocols provide different ways of getting detailed information
@@ -235,7 +243,7 @@ POST (HTTP)
To post to this, you enter a curl command line like:
curl -d "user=foobar&pass=12345&id=blablabla&dig=submit" (continues)
curl -d "user=foobar&pass=12345&id=blablabla&ding=submit" (continues)
http://www.formpost.com/getthis/post.cgi
@@ -350,6 +358,13 @@ COOKIES
curl -b headers www.example.com
While saving headers to a file is a working way to store cookies, it is
however error-prone and not the prefered way to do this. Instead, make curl
save the incoming cookies using the well-known netscape cookie format like
this:
curl -c cookies.txt www.example.com
Note that by specifying -b you enable the "cookie awareness" and with -L
you can make curl follow a location: (which often is used in combination
with cookies). So that if a site sends cookies and a location, you can
@@ -363,7 +378,11 @@ COOKIES
the cookies received from www.example.com. curl will send to the server the
stored cookies which match the request as it follows the location. The
file "empty.txt" may be a non-existant file.
Alas, to both read and write cookies from a netscape cookie file, you can
set both -b and -c to use the same file:
curl -b cookies.txt -c cookies.txt www.example.com
PROGRESS METER
@@ -404,12 +423,34 @@ SPEED LIMIT
To have curl abort the download if the speed is slower than 3000 bytes per
second for 1 minute, run:
curl -y 3000 -Y 60 www.far-away-site.com
curl -Y 3000 -y 60 www.far-away-site.com
This can very well be used in combination with the overall time limit, so
that the above operatioin must be completed in whole within 30 minutes:
curl -m 1800 -y 3000 -Y 60 www.far-away-site.com
curl -m 1800 -Y 3000 -y 60 www.far-away-site.com
Forcing curl not to transfer data faster than a given rate is also possible,
which might be useful if you're using a limited bandwidth connection and you
don't want your transfer to use all of it (sometimes referred to as
"bandwith throttle").
Make curl transfer data no faster than 10 kilobytes per second:
curl --limit-rate 10K www.far-away-site.com
or
curl --limit-rate 10240 www.far-away-site.com
Or prevent curl from uploading data faster than 1 megabyte per second:
curl -T upload --limit-rate 1M ftp://uploadshereplease.com
When using the --limit-rate option, the transfer rate is regulated on a
per-second basis, which will cause the total transfer speed to become lower
than the given number. Sometimes of course substantially lower, if your
transfer stalls during periods.
CONFIG FILE
@@ -548,7 +589,7 @@ HTTPS
from sites that require valid certificates. The only drawback is that the
certificate needs to be in PEM-format. PEM is a standard and open format to
store certificates with, but it is not used by the most commonly used
browsers (Netscape and MSEI both use the so called PKCS#12 format). If you
browsers (Netscape and MSIE both use the so called PKCS#12 format). If you
want curl to use the certificates you use with your (favourite) browser, you
may need to download/compile a converter that can convert your browser's
formatted certificates to PEM formatted ones. This kind of converter is
@@ -567,8 +608,8 @@ HTTPS
Many older SSL-servers have problems with SSLv3 or TLS, that newer versions
of OpenSSL etc is using, therefore it is sometimes useful to specify what
SSL-version curl should use. Use -3 or -2 to specify that exact SSL version
to use:
SSL-version curl should use. Use -3, -2 or -1 to specify that exact SSL
version to use (for SSLv3, SSLv2 or TLSv1 respectively):
curl -2 https://secure.site.com/
@@ -826,13 +867,13 @@ MAILING LISTS
Receives notifications on all CVS commits done to the curl source module.
This can become quite a large amount of mails during intense development,
be aware. This is for us who liks email...
be aware. This is for us who like email...
curl-www-commits
Receives notifications on all CVS commits done to the curl www module
(basicly the web site). This can become quite a large amount of mails
during intense changing, be aware. This is for us who liks email...
during intense changing, be aware. This is for us who like email...
Please direct curl questions, feature requests and trouble reports to one of
these mailing lists instead of mailing any individual.

View File

@@ -10,18 +10,23 @@ man_MANS = \
HTMLPAGES = \
curl.html \
curl-config.html
curl-config.html \
index.html
PDFPAGES = \
curl.pdf \
curl-config.pdf
SUBDIRS = examples libcurl
EXTRA_DIST = MANUAL BUGS CONTRIBUTE FAQ FEATURES INTERNALS \
EXTRA_DIST = MANUAL BUGS CONTRIBUTE FAQ FEATURES INTERNALS SSLCERTS \
README.win32 RESOURCES TODO TheArtOfHttpScripting THANKS \
VERSIONS KNOWN_BUGS BINDINGS $(man_MANS) $(HTMLPAGES) \
HISTORY INSTALL libcurl-the-guide
HISTORY INSTALL libcurl-the-guide $(PDFPAGES)
MAN2HTML= gnroff -man $< | man2html >$@
SUFFIXES = .1 .3 .html
SUFFIXES = .1 .3 .html .pdf
html: $(HTMLPAGES)
cd libcurl; make html
@@ -31,3 +36,13 @@ html: $(HTMLPAGES)
.1.html:
$(MAN2HTML)
MAN2PDF = groff -Tps -man curl.1 $< >$@
pdf:
for file in $(man_MANS); do \
foo=`echo $$file | sed -e 's/\.[0-9]$$//g'`; \
groff -Tps -man $$file >$$foo.ps; \
ps2pdf $$foo.ps $$foo.pdf; \
done
cd libcurl; make pdf

View File

@@ -13,8 +13,8 @@ README.win32
are win32-based.
The unix-style man pages are tricky to read on windows, so therefore are all
those pages also converted to HTML and those are also included in the
release archives.
those pages converted to HTML as well as pdf, and included in the release
archives.
The main curl.1 man page is also "built-in" in the command line tool. Use a
command line similar to this in order to extract a separate text file:

39
docs/SSLCERTS Normal file
View File

@@ -0,0 +1,39 @@
Peer SSL Certificate Verification
=================================
Starting in 7.10, libcurl performs peer SSL certificate verification by
default. This is done by installing a default CA cert bundle on 'make install'
(or similar), that CA bundle package is used by default on operations against
SSL servers.
Alas, if you communicate with HTTPS servers using certificates that are signed
by CAs present in the bundle, you will not notice any changed behavior and you
will seamlessly get a higher security level on your SSL connections since you
can be sure that the remote server really is the one it claims to be.
If the remote server uses a self-signed certificate, or if you don't install
curl's CA cert bundle or if it uses a certificate signed by a CA that isn't
included in the bundle, then you need to do one of the following:
1. Tell libcurl to *not* verify the peer. With libcurl you disable with with
curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, FALSE);
With the curl command tool, you disable this with -k/--insecure.
2. Get a CA certificate that can verify the remote server and use the proper
option to point out this CA cert for verification when connecting. For
libcurl hackers: curl_easy_setopt(curl, CURLOPT_CAPATH, capath);
With the curl command tool: --cacert [file]
Neglecting to use one of the above menthods when dealing with a server using a
certficate that isn't signed by one of the certficates in the installed CA
cert bundle, will cause SSL to report an error ("certificate verify failed")
during the handshake and SSL will then refuse further communication with that
server.
This procedure has been deemed The Right Thing even though it adds this extra
trouble for some users, since it adds security to a majority of the SSL
connections that previously weren't really secure. It turned out many people
were using previous versions of curl/libcurl without realizing the need for
the CA cert options to get truly secure SSL connections.

View File

@@ -2,83 +2,93 @@ This project has been alive for several years. Countless people have provided
feedback that have improved curl. Here follows a (incomplete) list of people
that have contributed with non-trivial parts:
- Daniel Stenberg <daniel@haxx.se>
- Rafael Sagula <sagula@inf.ufrgs.br>
- Sampo Kellomaki <sampo@iki.fi>
- Linas Vepstas <linas@linas.org>
- Bjorn Reese <breese@mail1.stofanet.dk>
- Johan Anderson <johan@homemail.com>
- Kjell Ericson <Kjell.Ericson@haxx.se>
- Troy Engel <tengel@sonic.net>
- Ryan Nelson <ryan@inch.com>
- Bj<EFBFBD>rn Stenberg <bjorn@haxx.se>
- Angus Mackay <amackay@gus.ml.org>
- Eric Young <eay@cryptsoft.com>
- Simon Dick <simond@totally.irrelevant.org>
- Oren Tirosh <oren@monty.hishome.net>
- Steven G. Johnson <stevenj@alum.mit.edu>
- Gilbert Ramirez Jr. <gram@verdict.uthscsa.edu>
- Andr<EFBFBD>s Garc<72>a <ornalux@redestb.es>
- Douglas E. Wegscheid <wegscd@whirlpool.com>
- Mark Butler <butlerm@xmission.com>
- Eric Thelin <eric@generation-i.com>
- Marc Boucher <marc@mbsi.ca>
- Greg Onufer <Greg.Onufer@Eng.Sun.COM>
- Doug Kaufman <dkaufman@rahul.net>
- David Eriksson <david@2good.com>
- Ralph Beckmann <rabe@uni-paderborn.de>
- T. Yamada <tai@imasy.or.jp>
- Lars J. Aas <larsa@sim.no>
- J<EFBFBD>rn Hartroth <Joern.Hartroth@computer.org>
- Matthew Clarke <clamat@van.maves.ca>
- Linus Nielsen Feltzing <linus@haxx.se>
- Felix von Leitner <felix@convergence.de>
- Dan Zitter <dzitter@zitter.net>
- Jongki Suwandi <Jongki.Suwandi@eng.sun.com>
- Chris Maltby <chris@aurema.com>
- Ron Zapp <rzapper@yahoo.com>
- Paul Marquis <pmarquis@iname.com>
- Ellis Pritchard <ellis@citria.com>
- Damien Adant <dams@usa.net>
- Chris <cbayliss@csc.come>
- Marco G. Salvagno <mgs@whiz.cjb.net>
- Paul Marquis <pmarquis@iname.com>
- David LeBlanc <dleblanc@qnx.com>
- Rich Gray at Plus Technologies
- Luong Dinh Dung <u8luong@lhsystems.hu>
- Torsten Foertsch <torsten.foertsch@gmx.net>
- Kristian K<>hntopp <kris@koehntopp.de>
- Fred Noz <FNoz@siac.com>
- Caolan McNamara <caolan@csn.ul.ie>
- Albert Chin-A-Young <china@thewrittenword.com>
- Stephen Kick <skick@epicrealm.com>
- Martin Hedenfalk <mhe@stacken.kth.se>
- Richard Prescott <rip at step.polymtl.ca>
- Jason S. Priebe <priebe@wral-tv.com>
- T. Bharath <TBharath@responsenetworks.com>
- Alexander Kourakos <awk@users.sourceforge.net>
- James Griffiths <griffiths_james@yahoo.com>
- Loic Dachary <loic@senga.org>
- Robert Weaver <robert.weaver@sabre.com>
- Ingo Ralf Blum <ingoralfblum@ingoralfblum.com>
- Jun-ichiro itojun Hagino <itojun@iijlab.net>
- Frederic Lepied <flepied@mandrakesoft.com>
- Georg Horn <horn@koblenz-net.de>
- Cris Bailiff <c.bailiff@awayweb.com>
- Sterling Hughes <sterling@designmultimedia.com>
- S. Moonesamy
- Ingo Wilken <iw@WWW.Ecce-Terram.DE>
- Pawel A. Gajda <mis@k2.net.pl>
- Patrick Bihan-Faou
- Nico Baggus <Nico.Baggus@mail.ing.nl>
- Sergio Ballestrero
- Andrew Francis <locust@familyhealth.com.au>
- Tomasz Lacki <Tomasz.Lacki@primark.pl>
- Georg Huettenegger <georg@ist.org>
- John Lask <johnlask@hotmail.com>
- Eric Lavigne <erlavigne@wanadoo.fr>
- Marcus Webster <marcus.webster@phocis.com>
- G<EFBFBD>tz Babin-Ebell <babin<69>ebell@trustcenter.de>
- Andreas Damm <andreas-sourceforge@radab.org>
- Jacky Lam <sylam@emsoftltd.com>
- James Gallagher <jgallagher@gso.uri.edu>
Daniel Stenberg <daniel@haxx.se>
Rafael Sagula <sagula@inf.ufrgs.br>
Sampo Kellomaki <sampo@iki.fi>
Linas Vepstas <linas@linas.org>
Bjorn Reese <breese@mail1.stofanet.dk>
Johan Anderson <johan@homemail.com>
Kjell Ericson <Kjell.Ericson@haxx.se>
Troy Engel <tengel@sonic.net>
Ryan Nelson <ryan@inch.com>
Bj<EFBFBD>rn Stenberg <bjorn@haxx.se>
Angus Mackay <amackay@gus.ml.org>
Eric Young <eay@cryptsoft.com>
Simon Dick <simond@totally.irrelevant.org>
Oren Tirosh <oren@monty.hishome.net>
Steven G. Johnson <stevenj@alum.mit.edu>
Gilbert Ramirez Jr. <gram@verdict.uthscsa.edu>
Andr<EFBFBD>s Garc<72>a <ornalux@redestb.es>
Douglas E. Wegscheid <wegscd@whirlpool.com>
Mark Butler <butlerm@xmission.com>
Eric Thelin <eric@generation-i.com>
Marc Boucher <marc@mbsi.ca>
Greg Onufer <Greg.Onufer@Eng.Sun.COM>
Doug Kaufman <dkaufman@rahul.net>
David Eriksson <david@2good.com>
Ralph Beckmann <rabe@uni-paderborn.de>
T. Yamada <tai@imasy.or.jp>
Lars J. Aas <larsa@sim.no>
J<EFBFBD>rn Hartroth <Joern.Hartroth@computer.org>
Matthew Clarke <clamat@van.maves.ca>
Linus Nielsen Feltzing <linus@haxx.se>
Felix von Leitner <felix@convergence.de>
Dan Zitter <dzitter@zitter.net>
Jongki Suwandi <Jongki.Suwandi@eng.sun.com>
Chris Maltby <chris@aurema.com>
Ron Zapp <rzapper@yahoo.com>
Paul Marquis <pmarquis@iname.com>
Ellis Pritchard <ellis@citria.com>
Damien Adant <dams@usa.net>
Chris <cbayliss@csc.come>
Marco G. Salvagno <mgs@whiz.cjb.net>
Paul Marquis <pmarquis@iname.com>
David LeBlanc <dleblanc@qnx.com>
Rich Gray at Plus Technologies
Luong Dinh Dung <u8luong@lhsystems.hu>
Torsten Foertsch <torsten.foertsch@gmx.net>
Kristian K<>hntopp <kris@koehntopp.de>
Fred Noz <FNoz@siac.com>
Caolan McNamara <caolan@csn.ul.ie>
Albert Chin-A-Young <china@thewrittenword.com>
Stephen Kick <skick@epicrealm.com>
Martin Hedenfalk <mhe@stacken.kth.se>
Richard Prescott <rip at step.polymtl.ca>
Jason S. Priebe <priebe@wral-tv.com>
T. Bharath <TBharath@responsenetworks.com>
Alexander Kourakos <awk@users.sourceforge.net>
James Griffiths <griffiths_james@yahoo.com>
Loic Dachary <loic@senga.org>
Robert Weaver <robert.weaver@sabre.com>
Ingo Ralf Blum <ingoralfblum@ingoralfblum.com>
Jun-ichiro itojun Hagino <itojun@iijlab.net>
Frederic Lepied <flepied@mandrakesoft.com>
Georg Horn <horn@koblenz-net.de>
Cris Bailiff <c.bailiff@awayweb.com>
Sterling Hughes <sterling@designmultimedia.com>
S. Moonesamy
Ingo Wilken <iw@WWW.Ecce-Terram.DE>
Pawel A. Gajda <mis@k2.net.pl>
Patrick Bihan-Faou
Nico Baggus <Nico.Baggus@mail.ing.nl>
Sergio Ballestrero
Andrew Francis <locust@familyhealth.com.au>
Tomasz Lacki <Tomasz.Lacki@primark.pl>
Georg Huettenegger <georg@ist.org>
John Lask <johnlask@hotmail.com>
Eric Lavigne <erlavigne@wanadoo.fr>
Marcus Webster <marcus.webster@phocis.com>
G<EFBFBD>tz Babin-Ebell <babin<69>ebell@trustcenter.de>
Andreas Damm <andreas-sourceforge@radab.org>
Jacky Lam <sylam@emsoftltd.com>
James Gallagher <jgallagher@gso.uri.edu>
Kjetil Jacobsen <kjetilja@cs.uit.no>
Markus F.X.J. Oberhumer <markus@oberhumer.com>
Miklos Nemeth <mnemeth@kfkisystems.com>
Kevin Roth <kproth@users.sourceforge.net>
Ralph Mitchell <rmitchell@eds.com>
Dan Fandrich <dan@coneharvesters.com>
Jean-Philippe Barrette-LaPierre <jpb@rrette.com>
Richard Bramante <RBramante@on.com>
Daniel Kouril <kouril@ics.muni.cz>
Dirk Manske <dm@nettraffic.de>

133
docs/TODO
View File

@@ -10,69 +10,49 @@ TODO
send us patches that improve things! Also check the http://curl.haxx.se/dev
web section for various technical development notes.
All bugs documented in the KNOWN_BUGS document are subject for fixing!
LIBCURL
* Introduce an interface to libcurl that allows applications to easier get to
know what cookies that are received. Pushing interface that calls a
callback on each received cookie? Querying interface that asks about
existing cookies? We probably need both.
* Make content encoding/decoding internally be made using a filter system.
existing cookies? We probably need both. Enable applications to modify
existing cookies as well. http://curl.haxx.se/dev/COOKIES
* Introduce another callback interface for upload/download that makes one
less copy of data and thus a faster operation.
[http://curl.haxx.se/dev/no_copy_callbacks.txt]
* Run-time querying about library characterics. What protocols do this
running libcurl support? What is the version number of the running libcurl
(returning the well-defined version-#define). This could possibly be made
by allowing curl_easy_getinfo() work with a NULL pointer for global info,
but perhaps better would be to introduce a new curl_getinfo() (or similar)
function for global info reading.
* Add asynchronous name resolving (http://daniel.haxx.se/resolver/). This
should be made to work on most of the supported platforms, or otherwise it
isn't really interesting.
* Data sharing. Tell which easy handles within a multi handle that should
share cookies, connection cache, dns cache, ssl session cache. Full
suggestion found here: http://curl.haxx.se/dev/sharing.txt
* Mutexes. By adding mutex callback support, the 'data sharing' mentioned
above can be made between several easy handles running in different threads
too. The actual mutex implementations will be left for the application to
implement, libcurl will merely call 'getmutex' and 'leavemutex' callbacks.
Part of the sharing suggestion at: http://curl.haxx.se/dev/sharing.txt
* More data sharing. curl_share_* functions already exist and work, and they
can be extended to share more.
* Set the SO_KEEPALIVE socket option to make libcurl notice and disconnect
very long time idle connections.
* Go through the code and verify that libcurl deals with big files >2GB and
>4GB all over. Bug reports (and source reviews) indicate that it doesn't
currently work properly.
* Make the built-in progress meter use its own dedicated output stream, and
make it possible to set it. Use stderr by default.
>4GB all over. Bug reports (and source reviews) show that it doesn't
currently work.
* CURLOPT_MAXFILESIZE. Prevent downloads that are larger than the specified
size. CURLE_FILESIZE_EXCEEDED would then be returned. Gautam Mani
requested. That is, the download should even begin but be aborted
requested. That is, the download should not even begin but be aborted
immediately.
* Allow the http_proxy (and other) environment variables to contain user and
password as well in the style: http://proxyuser:proxypasswd@proxy:port
Berend Reitsma suggested.
LIBCURL - multi interface
* Make sure we don't ever loop because of non-blocking sockets return
EWOULDBLOCK or similar. This concerns the HTTP request sending (and
especially regular HTTP POST), the FTP command sending etc.
* Add curl_multi_timeout() to make libcurl's ares-functionality better.
* Make uploads treated better. We need a way to tell libcurl we have data to
write, as the current system expects us to upload data each time the socket
is writable and there is no way to say that we want to upload data soon
just not right now, without that aborting the upload.
* Make sure we don't ever loop because of non-blocking sockets return
EWOULDBLOCK or similar. This FTP command sending, the SSL connection etc.
* Make transfers treated more carefully. We need a way to tell libcurl we
have data to write, as the current system expects us to upload data each
time the socket is writable and there is no way to say that we want to
upload data soon just not right now, without that aborting the upload. The
opposite situation should be possible as well, that we tell libcurl we're
ready to accept read data. Today libcurl feeds the data as soon as it is
available for reading, no matter what.
DOCUMENTATION
@@ -80,59 +60,28 @@ TODO
FTP
* FTP ASCII upload does not follow RFC959 section 3.1.1.1: "The sender
converts the data from an internal character representation to the standard
8-bit NVT-ASCII representation (see the Telnet specification). The
receiver will convert the data from the standard form to his own internal
form."
* Support the most common FTP proxies, Philip Newton provided a list
allegedly from ncftp:
http://curl.haxx.se/mail/archive-2003-04/0126.html
* An option to only download remote FTP files if they're newer than the local
one is a good idea, and it would fit right into the same syntax as the
already working http dito works. It of course requires that 'MDTM' works,
and it isn't a standard FTP command.
* Make CURLOPT_FTPPORT support an additional port number on the IP/if/name,
like "blabla:[port]" or possibly even "blabla:[portfirst]-[portsecond]".
* FTP ASCII transfers do not follow RFC959. They don't convert the data
accordingly.
* Since USERPWD always override the user and password specified in URLs, we
might need another way to specify user+password for anonymous ftp logins.
* Add FTPS support with SSL for the data connection too. This should be made
according to the specs written in draft-murray-auth-ftp-ssl-08.txt,
"Securing FTP with TLS"
according to the specs written in draft-murray-auth-ftp-ssl-11.txt,
"Securing FTP with TLS", valid until September 27th 2003.
http://curl.haxx.se/rfc/draft-murray-auth-ftp-ssl-11.txt
HTTP
* HTTP PUT for files passed on stdin *OR* when the --crlf option is
used. Requires libcurl to send the file with chunked content
encoding. [http://curl.haxx.se/dev/HTTP-PUT-stdin.txt] When the filter
system mentioned above gets real, it'll be a piece of cake to add.
* Pass a list of host name to libcurl to which we allow the user name and
password to get sent to. Currently, it only get sent to the host name that
the first URL uses (to prevent others from being able to read it), but this
also prevents the authentication info from getting sent when following
locations to legitimate other host names.
* "Content-Encoding: compress/gzip/zlib" HTTP 1.1 clearly defines how to get
and decode compressed documents. There is the zlib that is pretty good at
decompressing stuff. This work was started in October 1999 but halted again
since it proved more work than we thought. It is still a good idea to
implement though. This requires the filter system mentioned above.
* Authentication: NTLM. Support for that MS crap called NTLM
authentication. MS proxies and servers sometime require that. Since that
protocol is a proprietary one, it involves reverse engineering and network
sniffing. This should however be a library-based functionality. There are a
few different efforts "out there" to make open source HTTP clients support
this and it should be possible to take advantage of other people's hard
work. http://modntlm.sourceforge.net/ is one. There's a web page at
http://www.innovation.ch/java/ntlm.html that contains detailed reverse-
engineered info.
* RFC2617 compliance, "Digest Access Authentication" A valid test page seem
to exist at: http://hopf.math.nwu.edu/testpage/digest/ And some friendly
person's server source code is available at
http://hopf.math.nwu.edu/digestauth/index.html Then there's the Apache
mod_digest source code too of course. It seems as if Netscape doesn't
support this, and not many servers do. Although this is a lot better
authentication method than the more common "Basic". Basic sends the
password in cleartext over the network, this "Digest" method uses a
challange-response protocol which increases security quite a lot.
* Digest, NTLM and GSS-Negotiate support for HTTP proxies. They all work
on direct-connections to the server.
* Pipelining. Sending multiple requests before the previous one(s) are done.
This could possibly be implemented using the multi interface to queue
@@ -217,6 +166,9 @@ TODO
TEST SUITE
* If perl wasn't found by the configure script, don't attempt to run the
tests but explain something nice why it doesn't.
* Extend the test suite to include more protocols. The telnet could just do
ftp or http operations (for which we have test servers).
@@ -224,3 +176,10 @@ TODO
fork()s and it should become even more portable.
* Introduce a test suite that tests libcurl better and more explicitly.
NEXT MAJOR RELEASE
* curl_easy_cleanup() returns void, but curl_multi_cleanup() returns a
CURLMcode. These should be changed to be the same.
* curl_formparse() should be removed

View File

@@ -2,7 +2,7 @@
.\" nroff -man curl.1
.\" Written by Daniel Stenberg
.\"
.TH curl 1 "11 Sep 2002" "Curl 7.10" "Curl Manual"
.TH curl 1 "8 Aug 2003" "Curl 7.10.7" "Curl Manual"
.SH NAME
curl \- transfer a URL
.SH SYNOPSIS
@@ -10,14 +10,18 @@ curl \- transfer a URL
.I [URL...]
.SH DESCRIPTION
.B curl
is a client to get documents/files from or send documents to a server, using
any of the supported protocols (HTTP, HTTPS, FTP, GOPHER, DICT, TELNET, LDAP
or FILE). The command is designed to work without user interaction or any kind
of interactivity.
is a tool to transfer data from or to a server, using one of the supported
protocols (HTTP, HTTPS, FTP, FTPS, GOPHER, DICT, TELNET, LDAP or FILE). The
command is designed to work without user interaction.
curl offers a busload of useful tricks like proxy support, user
authentication, ftp upload, HTTP post, SSL (https:) connections, cookies, file
transfer resume and more.
transfer resume and more. As you will see below, the amount of features will
make your head spin!
curl is powered by libcurl for all transfer-related features. See
.BR libcurl (3)
for details.
.SH URL
The URL syntax is protocol dependent. You'll find a detailed description in
RFC 2396.
@@ -48,10 +52,8 @@ specified on a single command line and cannot be used between separate curl
invokes.
.SH OPTIONS
.IP "-a/--append"
(FTP)
When used in a ftp upload, this will tell curl to append to the target
file instead of overwriting it. If the file doesn't exist, it will
be created.
(FTP) When used in an FTP upload, this will tell curl to append to the target
file instead of overwriting it. If the file doesn't exist, it will be created.
If this option is used twice, the second one will disable append mode again.
.IP "-A/--user-agent <agent string>"
@@ -63,6 +65,16 @@ surround the string with single quote marks. This can also be set with the
If this option is set more than once, the last one will be the one that's
used.
.IP "--anyauth"
(HTTP) Tells curl to figure out authentication method by itself, and use the
most secure one the remote site claims it supports. This is done by first
doing a request and checking the response-headers, thus inducing an extra
network round-trip. This is used instead of setting a specific authentication
method, which you can do with \fI--digest\fP, \fI--ntlm\fP, and
\fI--negotiate\fP. (Added in 7.10.6)
If this option is used several times, the following occurrences make no
difference.
.IP "-b/--cookie <name=data>"
(HTTP)
Pass the data to the HTTP server as a cookie. It is supposedly the
@@ -90,12 +102,26 @@ also be enforced by using an URL that ends with ";type=A". This option causes
data sent to stdout to be in text mode for win32 systems.
If this option is used twice, the second one will disable ASCII usage.
.IP "--basic"
(HTTP) Tells curl to use HTTP Basic authentication. This is the default and
this option is usually pointless, unless you use it to override a previously
set option that sets a different authentication method (such as \fI--ntlm\fP,
\fI--digest\fP and \fI--negotiate\fP). (Added in 7.10.6)
If this option is used several times, the following occurrences make no
difference.
.IP "--ciphers <list of ciphers>"
(SSL) Specifies which ciphers to use in the connection. The list of ciphers
must be using valid ciphers. Read up on SSL cipher list details on this URL:
.I http://www.openssl.org/docs/apps/ciphers.html (Option added in curl 7.9)
.I http://www.openssl.org/docs/apps/ciphers.html
If this option is used several times, the last one will override the others.
.IP "--compressed"
(HTTP) Request a compressed response using one of the algorithms libcurl
supports, and return the uncompressed document. If this option is used and
the server sends an unsupported encoding, Curl will report an error.
If this option is used several times, each occurrence will toggle it on/off.
.IP "--connect-timeout <seconds>"
Maximum time in seconds that you allow the connection to the server to take.
This only limits the connection phase, once curl has connected this option is
@@ -108,7 +134,13 @@ operation. Curl writes all cookies previously read from a specified file as
well as all cookies received from remote server(s). If no cookies are known,
no file will be written. The file will be written using the Netscape cookie
file format. If you set the file name to a single dash, "-", the cookies will
be written to stdout. (Option added in curl 7.9)
be written to stdout.
.B NOTE
If the cookie jar can't be created or written to, the whole curl operation
won't fail or even report an error clearly. Using -v will get a warning
displayed, but that is the only visible feedback you get about this possibly
lethal situation.
If this option is used several times, the last specfied file name will be
used.
@@ -122,6 +154,9 @@ Use "-C -" to tell curl to automatically find out where/how to resume the
transfer. It then uses the given output/input files to figure that out.
If this option is used several times, the last one will be used.
.IP "--create-dirs"
When used in conjunction with the -o option, curl will create the necessary
local directory hierarchy as needed.
.IP "--crlf"
(FTP) Convert LF to CRLF in upload. Useful for MVS (OS/390).
@@ -131,7 +166,7 @@ If this option is used twice, the second will again disable crlf converting.
that can emulate as if a user has filled in a HTML form and pressed the submit
button. Note that the data is sent exactly as specified with no extra
processing (with all newlines cut off). The data is expected to be
"url-encoded". This will cause curl to pass the data to the server using the
\&"url-encoded". This will cause curl to pass the data to the server using the
content-type application/x-www-form-urlencoded. Compare to -F. If more than
one -d/--data option is used on the same command line, the data pieces
specified will be merged together with a separating &-letter. Thus, using '-d
@@ -142,7 +177,7 @@ If you start the data with the letter @, the rest should be a file name to
read the data from, or - if you want curl to read the data from stdin. The
contents of the file must already be url-encoded. Multiple files can also be
specified. Posting data from a file named 'foobar' would thus be done with
"--data @foobar".
\&"--data @foobar".
To post data purely binary, you should instead use the --data-binary option.
@@ -163,9 +198,27 @@ want to post a binary file without the strip-newlines feature of the
If this option is used several times, the ones following the first will
append data.
.IP "--digest"
(HTTP) Enables HTTP Digest authentication. This is a authentication that
prevents the password from being sent over the wire in clear text. Use this in
combination with the normal -u/--user option to set user name and
password. See also \fI--ntlm\fP, \fP--negotiate\fI and \fI--anyauth\fP for
related options. (Added in curl 7.10.6)
If this option is used several times, the following occurrences make no
difference.
.IP "--disable-eprt"
(FTP) Tell curl to disable the use of the EPRT and LPRT commands when doing
active FTP transfers. Curl will normally always first attempt to use EPRT,
then LPRT before using PORT, but with this option, it will use PORT right
away. EPRT and LPRT are extensions to the original FTP protocol, may not work
on all servers but enable more functionality in a better way than the
traditional PORT command. (Aded in 7.10.5)
If this option is used several times, each occurrence will toggle this on/off.
.IP "--disable-epsv"
(FTP) Tell curl to disable the use of the EPSV command when doing passive FTP
downloads. Curl will normally always first attempt to use EPSV before PASV,
transfers. Curl will normally always first attempt to use EPSV before PASV,
but with this option, it will not try using EPSV.
If this option is used several times, each occurrence will toggle this on/off.
@@ -214,14 +267,21 @@ If this option is used several times, the last one will be used.
peer. The file may contain multiple CA certificates. The certificate(s) must
be in PEM format.
curl recognizes the environment variable named 'CURL_CA_BUNDLE' if that is
set, and uses the given path as a path to a CA cert bundle. This option
overrides that variable.
The windows version of curl will automatically look for a CA certs file named
\'curl-ca-bundle.crt\', either in the same directory as curl.exe, or in the
Current Working Directory, or in any folder along your PATH.
If this option is used several times, the last one will be used.
.IP "--capath <CA certificate directory>"
(HTTPS) Tells curl to use the specified certificate directory to verify the
peer. The certificates must be in PEM format, and the directory must have been
processed using the c_rehash utility supplied with openssl. Certificate directories
are not supported under Windows (because c_rehash uses symbolink links to
create them). Using --capath can allow curl to make https connections much
more efficiently than using --cacert if the --cacert file contains many CA certificates.
peer. The certificates must be in PEM format, and the directory must have been
processed using the c_rehash utility supplied with openssl. Using --capath can
allow curl to make https connections much more efficiently than using --cacert
if the --cacert file contains many CA certificates.
If this option is used several times, the last one will be used.
.IP "-f/--fail"
@@ -231,6 +291,12 @@ normal cases when a HTTP server fails to deliver a document, it returns a HTML
document stating so (which often also describes why and more). This flag will
prevent curl from outputting that and fail silently instead.
If this option is used twice, the second will again disable silent failure.
.IP "--ftp-create-dirs"
(FTP) When an FTP URL/operation uses a path that doesn't currently exist on
the server, the standard behaviour of curl is to fail. Using this option, curl
will instead attempt to create missing directories. (Added in 7.10.7)
If this option is used twice, the second will again disable silent failure.
.IP "-F/--form <name=content>"
(HTTP) This lets curl emulate a filled in form in which a user has pressed the
@@ -246,12 +312,18 @@ Example, to send your password file to the server, where
\&'password' is the name of the form-field to which /etc/passwd will be the
input:
.B curl
-F password=@/etc/passwd www.mypasswords.com
\fBcurl\fP -F password=@/etc/passwd www.mypasswords.com
To read the file's content from stdin insted of a file, use - where the file
name should've been. This goes for both @ and < constructs.
You can also tell curl what Content-Type to use for the file upload part, by
using 'type=', in a manner similar to:
\fBcurl\fP -F "web=@index.html;type=text/html" url.com
See further examples and details in the MANUAL.
This option can be used multiple times.
.IP "-g/--globoff"
This option switches off the "URL globbing parser". When you set this option,
@@ -262,7 +334,7 @@ contents but they should be encoded according to the URI standard.
When used, this option will make all data specified with -d/--data or
--data-binary to be used in a HTTP GET request instead of the POST request
that otherwise would be used. The data will be appended to the URL with a '?'
separator. (Option added in curl 7.9)
separator.
If used in combination with -I, the POST data will instead be appended to the
URL with a HEAD request.
@@ -315,6 +387,8 @@ to be made secure by using the CA certificate bundle installed by
default. This makes all connections considered "insecure" to fail unless
-k/--insecure is used.
This option is ignored if --cacert or --capath is used!
If this option is used twice, the second time will again disable it.
.IP "--krb4 <level>"
(FTP) Enable kerberos4 authentication and use. The level must be entered and
@@ -342,8 +416,8 @@ url = "http://curl.haxx.se/docs/"
This option can be used multiple times.
.IP "--limit-rate <speed>"
Specify the maximum transfer rate you want curl to use. This feature is useful
if you have a limited pipe and you'd prefer you have your transfer not use
your entire bandwidth.
if you have a limited pipe and you'd like your transfer not use your entire
bandwidth.
The given speed is measured in bytes/second, unless a suffix is
appended. Appending 'k' or 'K' will count the number as kilobytes, 'm' or M'
@@ -369,9 +443,18 @@ If this option is used twice, the second will again disable list only.
(HTTP/HTTPS) If the server reports that the requested page has a different
location (indicated with the header line Location:) this flag will let curl
attempt to reattempt the get on the new place. If used together with -i or -I,
headers from all requested pages will be shown. If this flag is used when
making a HTTP POST, curl will automatically switch to GET after the initial
POST has been done.
headers from all requested pages will be shown. If authentication is used,
curl will only send its credentials to the initial host, so if a redirect
takes curl to a different host, it won't intercept the user+password. See also
\fI--location-trusted\fP on how to change this.
If this option is used twice, the second will again disable location following.
.IP "--location-trusted"
(HTTP/HTTPS) Like \fI--location\fP, but will allow sending the name + password
to all hosts that the site may redirect to. This may or may not introduce a
security breach if the site redirects you do a site to which you'll send your
authentication info (which is plaintext in the case of HTTP Basic
authentication).
If this option is used twice, the second will again disable location following.
.IP "-m/--max-time <seconds>"
@@ -405,6 +488,19 @@ to allow curl to ftp to the machine host.domain.com with user name
.B "machine host.domain.com login myself password secret"
If this option is used twice, the second will again disable netrc usage.
.IP "--negotiate"
(HTTP) Enables GSS-Negotiate authentication. The GSS-Negotiate method was
designed by Microsoft and is used in their web aplications. It is primarily
meant as a support for Kerberos5 authentication but may be also used along
with another authentication methods. For more information see IETF draft
draft-brezak-spnego-http-04.txt. (Added in 7.10.6)
\fBNOTE\fP that this option requiures that the library was built with GSSAPI
support. This is not very common. Use \fIcurl --version\fP to see if your
version supports GSS-Negotiate.
If this option is used several times, the following occurrences make no
difference.
.IP "-N/--no-buffer"
Disables the buffering of the output stream. In normal work situations, curl
will use a standard buffered output stream that will have the effect that it
@@ -412,6 +508,19 @@ will output the data in chunks, not necessarily exactly when the data arrives.
Using this option will disable that buffering.
If this option is used twice, the second will again switch on buffering.
.IP "--ntlm"
(HTTP) Enables NTLM authentication. The NTLM authentication method was
designed by Microsoft and is used by IIS web servers. It is a proprietary
protocol, reversed engineered by clever people and implemented in curl based
on their efforts. This kind of behavior should not be endorsed, you should
encourage everyone who uses NTLM to switch to a public and documented
authentication method instead. Such as Digest. (Added in 7.10.6)
\fBNOTE\fP that this option requiures that the library was built with SSL
support. Use \fIcurl --version\fP to see if your version supports NTLM.
If this option is used several times, the following occurrences make no
difference.
.IP "-o/--output <file>"
Write output to <file> instead of stdout. If you are using {} or [] to fetch
multiple documents, you can use '#' followed by a number in the <file>
@@ -425,9 +534,11 @@ or use several variables like:
curl http://{site,host}.host[1-5].com -o "#1_#2"
You may use this option as many times as you have number of URLs.
See also the --create-dirs option to create the local directories dynamically.
.IP "-O/--remote-name"
Write output to a local file named like the remote file we get. (Only
the file part of the remote file is used, the path is cut off.)
Write output to a local file named like the remote file we get. (Only the file
part of the remote file is used, the path is cut off.)
You may use this option as many times as you have number of URLs.
.IP "-p/--proxytunnel"
@@ -566,7 +677,7 @@ descriptive information, to the given output file. Use "-" as filename to have
the output sent to stdout.
If this option is used several times, the last one will be used. (Added in
curl 7.9.7)
7.9.7)
.IP "--trace-ascii <file>"
Enables a full trace dump of all incoming and outgoing data, including
descriptive information, to the given output file. Use "-" as filename to have
@@ -577,11 +688,14 @@ the ASCII part of the dump. It makes smaller output that might be easier to
read for untrained humans.
If this option is used several times, the last one will be used. (Added in
curl 7.9.7)
7.9.7)
.IP "-u/--user <user:password>"
Specify user and password to use when fetching. See README.curl for detailed
examples of how to use this. If no password is specified, curl will
ask for it interactively.
Specify user and password to use when fetching. Read the MANUAL for detailed
examples of how to use this. If no password is specified, curl will ask for it
interactively.
You can also use the --digest option to enable Digest authentication when
communicating with HTTP 1.1 servers.
If this option is used several times, the last one will be used.
.IP "-U/--proxy-user <user:password>"
@@ -607,10 +721,22 @@ info provided by curl.
Note that if you want to see HTTP headers in the output, \fI-i/--include\fP
might be option you're looking for.
If you think this option still doesn't give you enough details, consider using
\fI--trace\fP or \fI--trace-ascii\fP instead.
If this option is used twice, the second will again disable verbose.
.IP "-V/--version"
Displays the full version of curl, libcurl and other 3rd party libraries
linked with the executable.
Displays information about curl and the libcurl version it uses.
The first line includes the full version of curl, libcurl and other 3rd party
libraries linked with the executable.
The second line (starts with "Protocols:") shows all protocols that libcurl
reports to support.
The third line (starts with "Features:") shows specific features libcurl
reports to offer.
.IP "-w/--write-out <format>"
Defines what to display after a completed and successful operation. The format
is a string that may contain plain text mixed with any number of variables. The
@@ -689,7 +815,7 @@ at port 1080.
This option overrides existing environment variables that sets proxy to
use. If there's an environment variable setting a proxy, you can set proxy to
"" to override it.
\&"" to override it.
\fBNote\fP that all operations that are performed over a HTTP proxy will
transparantly be converted to HTTP. It means that certain protocol specific
@@ -820,13 +946,15 @@ FTP couldn't set binary. Couldn't change transfer method to binary.
.IP 18
Partial file. Only a part of the file was transfered.
.IP 19
FTP couldn't RETR file. The RETR command failed.
FTP couldn't download/access the given file, the RETR (or similar) command
failed.
.IP 20
FTP write error. The transfer was reported bad by the server.
.IP 21
FTP quote error. A quote command returned error from the server.
.IP 22
HTTP not found. The requested page was not found. This return code only
HTTP page not retrieved. The requested url was not found or returned another
error with the HTTP error code being 400 or above. This return code only
appears if --fail is used.
.IP 23
Write error. Curl couldn't write data to a local filesystem or similar.
@@ -909,8 +1037,6 @@ Unrecognized transfer encoding
.IP XX
There will appear more error codes here in future releases. The existing ones
are meant to never change.
.SH BUGS
If you do find bugs, mail them to curl-bug@haxx.se.
.SH AUTHORS / CONTRIBUTORS
Daniel Stenberg is the main author, but the whole list of contributors is
found in the separate THANKS file.

View File

@@ -9,7 +9,7 @@ EXTRA_DIST = README curlgtk.c sepheaders.c simple.c postit2.c \
multithread.c getinmemory.c ftpupload.c httpput.c \
simplessl.c ftpgetresp.c http-post.c post-callback.c \
multi-app.c multi-double.c multi-single.c multi-post.c \
fopen.c simplepost.c
fopen.c simplepost.c makefile.dj
all:
@echo "done"

View File

@@ -1,222 +1,565 @@
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* $Id$
* This example source code introduces a c library buffered I/O interface to
* URL reads it supports fopen(), fread(), fgets(), feof(), fclose(),
* rewind(). Supported functions have identical prototypes to their normal c
* lib namesakes and are preceaded by url_ .
*
* This example source code introduces an fopen()/fread()/fclose() emulation
* for URL reads. Using an approach similar to this, you could replace your
* program's fopen() with this url_fopen() and fread() with url_fread() and
* it should be possible to read remote streams instead of (only) local files.
* Using this code you can replace your program's fopen() with url_fopen()
* and fread() with url_fread() and it become possible to read remote streams
* instead of (only) local files. Local files (ie those that can be directly
* fopened) will drop back to using the underlying clib implementations
*
* See the main() function at the bottom that shows a tiny app in action.
* See the main() function at the bottom that shows an app that retrives from a
* specified url using fgets() and fread() and saves as two output files.
*
* This source code is a proof of concept. It will need further attention to
* become production-use useful and solid.
* Coyright (c)2003 Simtec Electronics
*
* Re-implemented by Vincent Sanders <vince@kyllikki.org> with extensive
* reference to original curl example code
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* This example requires libcurl 7.9.7 or later.
*/
#include <stdio.h>
#include <string.h>
#include <sys/time.h>
#include <stdlib.h>
#include <errno.h>
#include <curl/curl.h>
#include <curl/types.h>
#include <curl/easy.h>
struct data {
int type;
union {
CURL *curl;
FILE *file;
} handle;
#if (LIBCURL_VERSION_NUM < 0x070907)
#error "too old libcurl version, get the latest!"
#endif
/* TODO: We should perhaps document the biggest possible buffer chunk we can
get from libcurl in one single callback... */
char buffer[CURL_MAX_WRITE_SIZE];
char *readptr; /* read from here */
int bytes; /* bytes available from read pointer */
enum fcurl_type_e { CFTYPE_NONE=0, CFTYPE_FILE=1, CFTYPE_CURL=2 };
CURLMcode m; /* stored from a previous url_fread() */
struct fcurl_data
{
enum fcurl_type_e type; /* type of handle */
union {
CURL *curl;
FILE *file;
} handle; /* handle */
char *buffer; /* buffer to store cached data*/
int buffer_len; /* currently allocated buffers length */
int buffer_pos; /* end of data in buffer*/
int still_running; /* Is background url fetch still in progress */
};
typedef struct data URL_FILE;
typedef struct fcurl_data URL_FILE;
/* exported functions */
URL_FILE *url_fopen(char *url,const char *operation);
int url_fclose(URL_FILE *file);
int url_feof(URL_FILE *file);
size_t url_fread(void *ptr, size_t size, size_t nmemb, URL_FILE *file);
char * url_fgets(char *ptr, int size, URL_FILE *file);
void url_rewind(URL_FILE *file);
/* we use a global one for convenience */
CURLM *multi_handle;
static
size_t write_callback(char *buffer,
size_t size,
size_t nitems,
void *userp)
/* curl calls this routine to get more data */
static size_t
write_callback(char *buffer,
size_t size,
size_t nitems,
void *userp)
{
URL_FILE *url = (URL_FILE *)userp;
size *= nitems;
char *newbuff;
int rembuff;
memcpy(url->readptr, buffer, size);
url->readptr += size;
url->bytes += size;
URL_FILE *url = (URL_FILE *)userp;
size *= nitems;
return size;
}
rembuff=url->buffer_len - url->buffer_pos;//remaining space in buffer
URL_FILE *url_fopen(char *url, char *operation)
{
/* this code could check for URLs or types in the 'url' and
basicly use the real fopen() for standard files */
if(size > rembuff)
{
//not enuf space in buffer
newbuff=realloc(url->buffer,url->buffer_len + (size - rembuff));
if(newbuff==NULL)
{
fprintf(stderr,"callback buffer grow failed\n");
size=rembuff;
}
else
{
/* realloc suceeded increase buffer size*/
url->buffer_len+=size - rembuff;
url->buffer=newbuff;
URL_FILE *file;
int still_running;
file = (URL_FILE *)malloc(sizeof(URL_FILE));
if(!file)
return NULL;
memset(file, 0, sizeof(URL_FILE));
file->type = 1; /* marked as URL, use 0 for plain file */
file->handle.curl = curl_easy_init();
curl_easy_setopt(file->handle.curl, CURLOPT_URL, url);
curl_easy_setopt(file->handle.curl, CURLOPT_FILE, file);
curl_easy_setopt(file->handle.curl, CURLOPT_VERBOSE, FALSE);
curl_easy_setopt(file->handle.curl, CURLOPT_WRITEFUNCTION, write_callback);
if(!multi_handle)
multi_handle = curl_multi_init();
curl_multi_add_handle(multi_handle, file->handle.curl);
while(CURLM_CALL_MULTI_PERFORM ==
curl_multi_perform(multi_handle, &still_running));
/* if still_running would be 0 now, we should return NULL */
return file;
}
void url_fclose(URL_FILE *file)
{
/* make sure the easy handle is not in the multi handle anymore */
curl_multi_remove_handle(multi_handle, file->handle.curl);
/* cleanup */
curl_easy_cleanup(file->handle.curl);
}
size_t url_fread(void *ptr, size_t size, size_t nmemb, URL_FILE *file)
{
fd_set fdread;
fd_set fdwrite;
fd_set fdexcep;
int maxfd;
struct timeval timeout;
int rc;
int still_running = 0;
if(!file->bytes) { /* no data available at this point */
file->readptr = file->buffer; /* reset read pointer */
if(CURLM_CALL_MULTI_PERFORM == file->m) {
while(CURLM_CALL_MULTI_PERFORM ==
curl_multi_perform(multi_handle, &still_running)) {
if(file->bytes) {
printf("(fread) WOAH! THis happened!\n");
break;
}
}
if(!still_running) {
printf("NO MORE RUNNING AROUND!\n");
return 0;
}
/*printf("Callback buffer grown to %d bytes\n",url->buffer_len);*/
}
}
FD_ZERO(&fdread);
FD_ZERO(&fdwrite);
FD_ZERO(&fdexcep);
/* set a suitable timeout to fail on */
timeout.tv_sec = 500; /* 5 minutes */
timeout.tv_usec = 0;
memcpy(&url->buffer[url->buffer_pos], buffer, size);
url->buffer_pos += size;
/* get file descriptors from the transfers */
curl_multi_fdset(multi_handle, &fdread, &fdwrite, &fdexcep, &maxfd);
/*fprintf(stderr, "callback %d size bytes\n", size);*/
rc = select(maxfd+1, &fdread, &fdwrite, &fdexcep, &timeout);
return size;
}
switch(rc) {
case -1:
/* select error */
break;
case 0:
break;
default:
/* timeout or readable/writable sockets */
do {
file->m = curl_multi_perform(multi_handle, &still_running);
/* use to attempt to fill the read buffer up to requested number of bytes */
static int
curl_fill_buffer(URL_FILE *file,int want,int waittime)
{
fd_set fdread;
fd_set fdwrite;
fd_set fdexcep;
int maxfd;
struct timeval timeout;
int rc;
if(file->bytes)
/* we have received data, return that now */
break;
/* only attempt to fill buffer if transactions still running and buffer
* doesnt exceed required size already
*/
if((!file->still_running) || (file->buffer_pos > want))
return 0;
} while(CURLM_CALL_MULTI_PERFORM == file->m);
/* attempt to fill buffer */
do
{
FD_ZERO(&fdread);
FD_ZERO(&fdwrite);
FD_ZERO(&fdexcep);
/* set a suitable timeout to fail on */
timeout.tv_sec = 60; /* 1 minute */
timeout.tv_usec = 0;
if(!still_running)
printf("NO MORE RUNNING AROUND!\n");
/* get file descriptors from the transfers */
curl_multi_fdset(multi_handle, &fdread, &fdwrite, &fdexcep, &maxfd);
break;
rc = select(maxfd+1, &fdread, &fdwrite, &fdexcep, &timeout);
switch(rc) {
case -1:
/* select error */
break;
case 0:
break;
default:
/* timeout or readable/writable sockets */
/* note we *could* be more efficient and not wait for
* CURLM_CALL_MULTI_PERFORM to clear here and check it on re-entry
* but that gets messy */
while(curl_multi_perform(multi_handle, &file->still_running) ==
CURLM_CALL_MULTI_PERFORM);
break;
}
} while(file->still_running && (file->buffer_pos < want));
return 1;
}
/* use to remove want bytes from the front of a files buffer */
static int
curl_use_buffer(URL_FILE *file,int want)
{
/* sort out buffer */
if((file->buffer_pos - want) <=0)
{
/* ditch buffer - write will recreate */
if(file->buffer)
free(file->buffer);
file->buffer=NULL;
file->buffer_pos=0;
file->buffer_len=0;
}
}
else
printf("(fread) Skip network read\n");
else
{
/* move rest down make it available for later */
memmove(file->buffer,
&file->buffer[want],
(file->buffer_pos - want));
if(file->bytes) {
/* data already available, return that */
int want = size * nmemb;
file->buffer_pos -= want;
}
return 0;
}
if(file->bytes < want)
want = file->bytes;
memcpy(ptr, file->readptr, want);
file->readptr += want;
file->bytes -= want;
printf("(fread) return %d bytes\n", want);
URL_FILE *
url_fopen(char *url,const char *operation)
{
/* this code could check for URLs or types in the 'url' and
basicly use the real fopen() for standard files */
URL_FILE *file;
(void)operation;
file = (URL_FILE *)malloc(sizeof(URL_FILE));
if(!file)
return NULL;
memset(file, 0, sizeof(URL_FILE));
if((file->handle.file=fopen(url,operation)))
{
file->type = CFTYPE_FILE; /* marked as URL */
}
else
{
file->type = CFTYPE_CURL; /* marked as URL */
file->handle.curl = curl_easy_init();
curl_easy_setopt(file->handle.curl, CURLOPT_URL, url);
curl_easy_setopt(file->handle.curl, CURLOPT_FILE, file);
curl_easy_setopt(file->handle.curl, CURLOPT_VERBOSE, FALSE);
curl_easy_setopt(file->handle.curl, CURLOPT_WRITEFUNCTION, write_callback);
if(!multi_handle)
multi_handle = curl_multi_init();
curl_multi_add_handle(multi_handle, file->handle.curl);
/* lets start the fetch */
while(curl_multi_perform(multi_handle, &file->still_running) ==
CURLM_CALL_MULTI_PERFORM );
if((file->buffer_pos == 0) && (!file->still_running))
{
/* if still_running is 0 now, we should return NULL */
/* make sure the easy handle is not in the multi handle anymore */
curl_multi_remove_handle(multi_handle, file->handle.curl);
/* cleanup */
curl_easy_cleanup(file->handle.curl);
free(file);
file = NULL;
}
}
return file;
}
int
url_fclose(URL_FILE *file)
{
int ret=0;/* default is good return */
switch(file->type)
{
case CFTYPE_FILE:
ret=fclose(file->handle.file); /* passthrough */
break;
case CFTYPE_CURL:
/* make sure the easy handle is not in the multi handle anymore */
curl_multi_remove_handle(multi_handle, file->handle.curl);
/* cleanup */
curl_easy_cleanup(file->handle.curl);
break;
default: /* unknown or supported type - oh dear */
ret=EOF;
errno=EBADF;
break;
}
if(file->buffer)
free(file->buffer);/* free any allocated buffer space */
free(file);
return ret;
}
int
url_feof(URL_FILE *file)
{
int ret=0;
switch(file->type)
{
case CFTYPE_FILE:
ret=feof(file->handle.file);
break;
case CFTYPE_CURL:
if((file->buffer_pos == 0) && (!file->still_running))
ret = 1;
break;
default: /* unknown or supported type - oh dear */
ret=-1;
errno=EBADF;
break;
}
return ret;
}
size_t
url_fread(void *ptr, size_t size, size_t nmemb, URL_FILE *file)
{
size_t want;
switch(file->type)
{
case CFTYPE_FILE:
want=fread(ptr,size,nmemb,file->handle.file);
break;
case CFTYPE_CURL:
want = nmemb * size;
curl_fill_buffer(file,want,1);
/* check if theres data in the buffer - if not curl_fill_buffer()
* either errored or EOF */
if(!file->buffer_pos)
return 0;
/* ensure only available data is considered */
if(file->buffer_pos < want)
want = file->buffer_pos;
/* xfer data to caller */
memcpy(ptr, file->buffer, want);
curl_use_buffer(file,want);
want = want / size; /* number of items - nb correct op - checked
* with glibc code*/
/*printf("(fread) return %d bytes %d left\n", want,file->buffer_pos);*/
break;
default: /* unknown or supported type - oh dear */
want=0;
errno=EBADF;
break;
}
return want;
}
return 0; /* no data available to return */
}
int main(int argc, char *argv[])
char *
url_fgets(char *ptr, int size, URL_FILE *file)
{
URL_FILE *handle;
int nread;
char buffer[256];
int want = size - 1;/* always need to leave room for zero termination */
int loop;
handle = url_fopen("http://www.haxx.se", "r");
switch(file->type)
{
case CFTYPE_FILE:
ptr = fgets(ptr,size,file->handle.file);
break;
if(!handle) {
printf("couldn't url_fopen()\n");
}
case CFTYPE_CURL:
curl_fill_buffer(file,want,1);
do {
nread = url_fread(buffer, sizeof(buffer), 1, handle);
/* check if theres data in the buffer - if not fill either errored or
* EOF */
if(!file->buffer_pos)
return NULL;
printf("We got: %d bytes\n", nread);
} while(nread);
/* ensure only available data is considered */
if(file->buffer_pos < want)
want = file->buffer_pos;
url_fclose(handle);
/*buffer contains data */
/* look for newline or eof */
for(loop=0;loop < want;loop++)
{
if(file->buffer[loop] == '\n')
{
want=loop+1;/* include newline */
break;
}
}
return 0;
/* xfer data to caller */
memcpy(ptr, file->buffer, want);
ptr[want]=0;/* allways null terminate */
curl_use_buffer(file,want);
/*printf("(fgets) return %d bytes %d left\n", want,file->buffer_pos);*/
break;
default: /* unknown or supported type - oh dear */
ptr=NULL;
errno=EBADF;
break;
}
return ptr;/*success */
}
void
url_rewind(URL_FILE *file)
{
switch(file->type)
{
case CFTYPE_FILE:
rewind(file->handle.file); /* passthrough */
break;
case CFTYPE_CURL:
/* halt transaction */
curl_multi_remove_handle(multi_handle, file->handle.curl);
/* restart */
curl_multi_add_handle(multi_handle, file->handle.curl);
/* ditch buffer - write will recreate - resets stream pos*/
if(file->buffer)
free(file->buffer);
file->buffer=NULL;
file->buffer_pos=0;
file->buffer_len=0;
break;
default: /* unknown or supported type - oh dear */
break;
}
}
/* Small main program to retrive from a url using fgets and fread saving the
* output to two test files (note the fgets method will corrupt binary files if
* they contain 0 chars */
int
main(int argc, char *argv[])
{
URL_FILE *handle;
FILE *outf;
int nread;
char buffer[256];
char *url;
if(argc < 2)
{
url="http://192.168.7.3/testfile";/* default to testurl */
}
else
{
url=argv[1];/* use passed url */
}
/* copy from url line by line with fgets */
outf=fopen("fgets.test","w+");
if(!outf)
{
perror("couldnt open fgets output file\n");
return 1;
}
handle = url_fopen(url, "r");
if(!handle)
{
printf("couldn't url_fopen()\n");
fclose(outf);
return 2;
}
while(!url_feof(handle))
{
url_fgets(buffer,sizeof(buffer),handle);
fwrite(buffer,1,strlen(buffer),outf);
}
url_fclose(handle);
fclose(outf);
/* Copy from url with fread */
outf=fopen("fread.test","w+");
if(!outf)
{
perror("couldnt open fread output file\n");
return 1;
}
handle = url_fopen("testfile", "r");
if(!handle) {
printf("couldn't url_fopen()\n");
fclose(outf);
return 2;
}
do {
nread = url_fread(buffer, 1,sizeof(buffer), handle);
fwrite(buffer,1,nread,outf);
} while(nread);
url_fclose(handle);
fclose(outf);
/* Test rewind */
outf=fopen("rewind.test","w+");
if(!outf)
{
perror("couldnt open fread output file\n");
return 1;
}
handle = url_fopen("testfile", "r");
if(!handle) {
printf("couldn't url_fopen()\n");
fclose(outf);
return 2;
}
nread = url_fread(buffer, 1,sizeof(buffer), handle);
fwrite(buffer,1,nread,outf);
url_rewind(handle);
buffer[0]='\n';
fwrite(buffer,1,1,outf);
nread = url_fread(buffer, 1,sizeof(buffer), handle);
fwrite(buffer,1,nread,outf);
url_fclose(handle);
fclose(outf);
return 0;/* all done */
}

View File

@@ -11,6 +11,9 @@
#include <stdio.h>
#include <curl/curl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
/*
* This example shows an FTP upload, with a rename of the file just after

View File

@@ -74,6 +74,10 @@ int main(int argc, char **argv)
* bytes big and contains the remote file.
*
* Do something nice with it!
*
* You should be aware of the fact that at this point we might have an
* allocated data block, and nothing has yet deallocated that data. So when
* you're done with it, you should free() it as a nice application.
*/
return 0;

31
docs/examples/makefile.dj Normal file
View File

@@ -0,0 +1,31 @@
#
# Adapted for djgpp / Watt-32 / DOS by
# Gisle Vanem <giva@bgnett.no>
#
include ../../packages/DOS/common.dj
CFLAGS += -I../../include
LIBS = ../../lib/libcurl.a
ifeq ($(USE_SSL),1)
LIBS += $(OPENSSL_ROOT)/lib/libssl.a $(OPENSSL_ROOT)/lib/libcrypt.a
endif
LIBS += $(WATT32_ROOT)/lib/libwatt.a $(ZLIB_ROOT)/libz.a
PROGRAMS = fopen.exe ftpget.exe ftpgetre.exe ftpuploa.exe getinmem.exe \
http-pos.exe httpput.exe multi-ap.exe multi-do.exe \
multi-po.exe multi-si.exe persista.exe post-cal.exe \
postit2.exe sepheade.exe simple.exe simpless.exe
all: $(PROGRAMS)
.c.exe:
$(CC) $(CFLAGS) -o $@ $^ $(LIBS)
@echo
clean:
rm -f $(PROGRAMS)

View File

@@ -83,7 +83,8 @@ int main(int argc, char **argv)
default:
/* one or more of curl's file descriptors say there's data to read
or write */
curl_multi_perform(multi_handle, &still_running);
while(CURLM_CALL_MULTI_PERFORM ==
curl_multi_perform(multi_handle, &still_running));
break;
}
}

View File

@@ -80,7 +80,8 @@ int main(int argc, char **argv)
case 0:
default:
/* timeout or readable/writable sockets */
curl_multi_perform(multi_handle, &still_running);
while(CURLM_CALL_MULTI_PERFORM ==
curl_multi_perform(multi_handle, &still_running));
break;
}
}

View File

@@ -74,7 +74,8 @@ int main(int argc, char **argv)
case 0:
default:
/* timeout or readable/writable sockets */
curl_multi_perform(multi_handle, &still_running);
while(CURLM_CALL_MULTI_PERFORM ==
curl_multi_perform(multi_handle, &still_running));
break;
}
}

View File

@@ -21,7 +21,7 @@
size_t write_data(void *ptr, size_t size, size_t nmemb, void *stream)
{
written = fwrite(ptr, size, nmemb, (FILE *)stream);
int written = fwrite(ptr, size, nmemb, (FILE *)stream);
return written;
}

View File

@@ -40,7 +40,7 @@ int main(int argc, char **argv)
FILE *headerfile;
const char *pCertFile = "testcert.pem";
const char *pCACertFile="cacert.pem"
const char *pCACertFile="cacert.pem";
const char *pKeyName;
const char *pKeyType;
@@ -66,7 +66,7 @@ int main(int argc, char **argv)
curl = curl_easy_init();
if(curl) {
/* what call to write: */
curl_easy_setopt(curl, CURLOPT_URL, "HTTPS://curl.haxx.se");
curl_easy_setopt(curl, CURLOPT_URL, "HTTPS://your.favourite.ssl.site");
curl_easy_setopt(curl, CURLOPT_WRITEHEADER, headerfile);
while(1) /* do some ugly short cut... */

View File

@@ -232,6 +232,7 @@ Multi-threading issues
For SIGPIPE info see the UNIX Socket FAQ at
http://www.unixguide.net/network/socketfaq/2.22.shtml
Also, note that CURLOPT_DNS_USE_GLOBAL_CACHE is not thread-safe.
When It Doesn't Work
@@ -255,6 +256,9 @@ When It Doesn't Work
possible of your code that uses libcurl, operating system name and version,
compiler name and version etc.
If CURLOPT_VERBOSE is not enough, you increase the level of debug data your
application receive by using the CURLOPT_DEBUGFUNCTION.
Getting some in-depth knowledge about the protocols involved is never wrong,
and if you're trying to do funny things, you might very well understand
libcurl and how to use it better if you study the appropriate RFC documents
@@ -293,8 +297,8 @@ Upload Data to a Remote Site
curl_easy_setopt(easyhandle, CURLOPT_UPLOAD, TRUE);
A few protocols won't behave properly when uploads are done without any prior
knowledge of the expected file size. HTTP PUT is one example [1]. So, set the
upload file size using the CURLOPT_INFILESIZE like this:
knowledge of the expected file size. So, set the upload file size using the
CURLOPT_INFILESIZE for all known file sizes like this[1]:
curl_easy_setopt(easyhandle, CURLOPT_INFILESIZE, file_size);
@@ -404,7 +408,7 @@ HTTP POSTing
headers = curl_slist_append(headers, "Content-Type: text/xml");
/* post binary data */
curl_easy_setopt(easyhandle, CURLOPT_POSTFIELD, binaryptr);
curl_easy_setopt(easyhandle, CURLOPT_POSTFIELDS, binaryptr);
/* set the size of the postfields data */
curl_easy_setopt(easyhandle, CURLOPT_POSTFIELDSIZE, 23);
@@ -726,6 +730,35 @@ Persistancy Is The Way to Happiness
CURLOPT_FORBID_REUSE to TRUE.
HTTP Headers Used by libcurl
When you use libcurl to do HTTP requeests, it'll pass along a series of
headers automaticly. It might be good for you to know and understand these
ones.
Host
This header is required by HTTP 1.1 and even many 1.0 servers and should
be the name of the server we want to talk to. This includes the port
number if anything but default.
Pragma
"no-cache". Tells a possible proxy to not grap a copy from the cache but
to fetch a fresh one.
Accept:
"image/gif, image/x-xbitmap, image/jpeg, image/pjpeg, */*". Cloned from a
browser once a hundred years ago.
Expect:
When doing multi-part formposts, libcurl will set this header to
"100-continue" to ask the server for an "OK" message before it proceeds
with sending the data part of the post.
Customizing Operations
There is an ongoing development today where more and more protocols are built
@@ -738,101 +771,129 @@ Customizing Operations
libcurl is your friend here too.
If just changing the actual HTTP request keyword is what you want, like when
GET, HEAD or POST is not good enough for you, CURLOPT_CUSTOMREQUEST is there
for you. It is very simple to use:
CUSTOMREQUEST
curl_easy_setopt(easyhandle, CURLOPT_CUSTOMREQUEST, "MYOWNRUQUEST");
If just changing the actual HTTP request keyword is what you want, like
when GET, HEAD or POST is not good enough for you, CURLOPT_CUSTOMREQUEST
is there for you. It is very simple to use:
When using the custom request, you change the request keyword of the actual
request you are performing. Thus, by default you make GET request but you can
also make a POST operation (as described before) and then replace the POST
keyword if you want to. You're the boss.
curl_easy_setopt(easyhandle, CURLOPT_CUSTOMREQUEST, "MYOWNRUQUEST");
HTTP-like protocols pass a series of headers to the server when doing the
request, and you're free to pass any amount of extra headers that you think
fit. Adding headers are this easy:
When using the custom request, you change the request keyword of the
actual request you are performing. Thus, by default you make GET request
but you can also make a POST operation (as described before) and then
replace the POST keyword if you want to. You're the boss.
struct curl_slist *headers=NULL; /* init to NULL is important */
Modify Headers
headers = curl_slist_append(headers, "Hey-server-hey: how are you?");
headers = curl_slist_append(headers, "X-silly-content: yes");
HTTP-like protocols pass a series of headers to the server when doing the
request, and you're free to pass any amount of extra headers that you
think fit. Adding headers are this easy:
/* pass our list of custom made headers */
curl_easy_setopt(easyhandle, CURLOPT_HTTPHEADER, headers);
struct curl_slist *headers=NULL; /* init to NULL is important */
curl_easy_perform(easyhandle); /* transfer http */
headers = curl_slist_append(headers, "Hey-server-hey: how are you?");
headers = curl_slist_append(headers, "X-silly-content: yes");
curl_slist_free_all(headers); /* free the header list */
/* pass our list of custom made headers */
curl_easy_setopt(easyhandle, CURLOPT_HTTPHEADER, headers);
... and if you think some of the internally generated headers, such as
User-Agent:, Accept: or Host: don't contain the data you want them to
contain, you can replace them by simply setting them too:
curl_easy_perform(easyhandle); /* transfer http */
headers = curl_slist_append(headers, "User-Agent: 007");
headers = curl_slist_append(headers, "Host: munged.host.line");
curl_slist_free_all(headers); /* free the header list */
If you replace an existing header with one with no contents, you will prevent
the header from being sent. Like if you want to completely prevent the
"Accept:" header to be sent, you can disable it with code similar to this:
... and if you think some of the internally generated headers, such as
Accept: or Host: don't contain the data you want them to contain, you can
replace them by simply setting them too:
headers = curl_slist_append(headers, "Accept:");
headers = curl_slist_append(headers, "Accept: Agent-007");
headers = curl_slist_append(headers, "Host: munged.host.line");
Both replacing and cancelling internal headers should be done with careful
consideration and you should be aware that you may violate the HTTP protocol
when doing so.
Delete Headers
There's only one aspect left in the HTTP requests that we haven't yet
mentioned how to modify: the version field. All HTTP requests includes the
version number to tell the server which version we support. libcurl speak
HTTP 1.1 by default. Some very old servers don't like getting 1.1-requests
and when dealing with stubborn old things like that, you can tell libcurl to
use 1.0 instead by doing something like this:
If you replace an existing header with one with no contents, you will
prevent the header from being sent. Like if you want to completely prevent
the "Accept:" header to be sent, you can disable it with code similar to
this:
curl_easy_setopt(easyhandle, CURLOPT_HTTP_VERSION, CURLHTTP_VERSION_1_0);
headers = curl_slist_append(headers, "Accept:");
Not all protocols are HTTP-like, and thus the above may not help you when you
want to make for example your FTP transfers to behave differently.
Both replacing and cancelling internal headers should be done with careful
consideration and you should be aware that you may violate the HTTP
protocol when doing so.
Sending custom commands to a FTP server means that you need to send the
comands exactly as the FTP server expects them (RFC959 is a good guide here),
and you can only use commands that work on the control-connection alone. All
kinds of commands that requires data interchange and thus needs a
data-connection must be left to libcurl's own judgement. Also be aware that
libcurl will do its very best to change directory to the target directory
before doing any transfer, so if you change directory (with CWD or similar)
you might confuse libcurl and then it might not attempt to transfer the file
in the correct remote directory.
Enforcing chunked transfer-encoding
A little example that deletes a given file before an operation:
By making sure a request uses the custom header "Transfer-Encoding:
chunked" when doing a non-GET HTTP operation, libcurl will switch over to
"chunked" upload, even though the size of the data to upload might be
known. By default, libcurl usually switches over to chunked upload
automaticly if the upload data size is unknown.
headers = curl_slist_append(headers, "DELE file-to-remove");
HTTP Version
/* pass the list of custom commands to the handle */
curl_easy_setopt(easyhandle, CURLOPT_QUOTE, headers);
There's only one aspect left in the HTTP requests that we haven't yet
mentioned how to modify: the version field. All HTTP requests includes the
version number to tell the server which version we support. libcurl speak
HTTP 1.1 by default. Some very old servers don't like getting 1.1-requests
and when dealing with stubborn old things like that, you can tell libcurl
to use 1.0 instead by doing something like this:
curl_easy_perform(easyhandle); /* transfer ftp data! */
curl_easy_setopt(easyhandle, CURLOPT_HTTP_VERSION,
CURLHTTP_VERSION_1_0);
curl_slist_free_all(headers); /* free the header list */
FTP Custom Commands
If you would instead want this operation (or chain of operations) to happen
_after_ the data transfer took place the option to curl_easy_setopt() would
instead be called CURLOPT_POSTQUOTE and used the exact same way.
Not all protocols are HTTP-like, and thus the above may not help you when
you want to make for example your FTP transfers to behave differently.
The custom FTP command will be issued to the server in the same order they
are added to the list, and if a command gets an error code returned back from
the server, no more commands will be issued and libcurl will bail out with an
error code (CURLE_FTP_QUOTE_ERROR). Note that if you use CURLOPT_QUOTE to
send commands before a transfer, no transfer will actually take place when a
quote command has failed.
Sending custom commands to a FTP server means that you need to send the
comands exactly as the FTP server expects them (RFC959 is a good guide
here), and you can only use commands that work on the control-connection
alone. All kinds of commands that requires data interchange and thus needs
a data-connection must be left to libcurl's own judgement. Also be aware
that libcurl will do its very best to change directory to the target
directory before doing any transfer, so if you change directory (with CWD
or similar) you might confuse libcurl and then it might not attempt to
transfer the file in the correct remote directory.
If you set the CURLOPT_HEADER to true, you will tell libcurl to get
information about the target file and output "headers" about it. The headers
will be in "HTTP-style", looking like they do in HTTP.
A little example that deletes a given file before an operation:
The option to enable headers or to run custom FTP commands may be useful to
combine with CURLOPT_NOBODY. If this option is set, no actual file content
transfer will be performed.
headers = curl_slist_append(headers, "DELE file-to-remove");
/* pass the list of custom commands to the handle */
curl_easy_setopt(easyhandle, CURLOPT_QUOTE, headers);
curl_easy_perform(easyhandle); /* transfer ftp data! */
curl_slist_free_all(headers); /* free the header list */
If you would instead want this operation (or chain of operations) to
happen _after_ the data transfer took place the option to
curl_easy_setopt() would instead be called CURLOPT_POSTQUOTE and used the
exact same way.
The custom FTP command will be issued to the server in the same order they
are added to the list, and if a command gets an error code returned back
from the server, no more commands will be issued and libcurl will bail out
with an error code (CURLE_FTP_QUOTE_ERROR). Note that if you use
CURLOPT_QUOTE to send commands before a transfer, no transfer will
actually take place when a quote command has failed.
If you set the CURLOPT_HEADER to true, you will tell libcurl to get
information about the target file and output "headers" about it. The
headers will be in "HTTP-style", looking like they do in HTTP.
The option to enable headers or to run custom FTP commands may be useful
to combine with CURLOPT_NOBODY. If this option is set, no actual file
content transfer will be performed.
FTP Custom CUSTOMREQUEST
If you do what list the contents of a FTP directory using your own defined
FTP command, CURLOPT_CUSTOMREQUEST will do just that. "NLST" is the
default one for listing directories but you're free to pass in your idea
of a good alternative.
Cookies Without Chocolate Chips
@@ -1007,19 +1068,30 @@ SSL, Certificates and Other Tricks
[ seeding, passwords, keys, certificates, ENGINE, ca certs ]
Multiple Transfers Using the multi Interface
The easy interface as described in detail in this document is a synchronous
interface that transfers one file at a time and doesn't return until its
done.
The multi interface on the other hand, allows your program to transfer
multiple files in both directions at the same time, without forcing you to
use multiple threads.
[fill in lots of more multi stuff here]
Future
[ multi interface, sharing between handles, mutexes, pipelining ]
[ sharing between handles, mutexes, pipelining ]
-----
Footnotes:
[1] = HTTP PUT without knowing the size prior to transfer is indeed possible,
but libcurl does not support the chunked transfers on uploading that is
necessary for this feature to work. We'd gratefully appreciate patches
that bring this functionality...
[1] = libcurl 7.10.3 and later have the ability to switch over to chunked
Tranfer-Encoding in cases were HTTP uploads are done with data of an
unknown size.
[2] = This happens on Windows machines when libcurl is built and used as a
DLL. However, you can still do this on Windows if you link with a static

View File

@@ -1,3 +1,5 @@
Makefile
Makefile.in
*html
*ps
*pdf

View File

@@ -28,7 +28,6 @@ man_MANS = \
curl_mprintf.3 \
curl_global_init.3 \
curl_global_cleanup.3 \
libcurl.3 \
curl_multi_add_handle.3 \
curl_multi_cleanup.3 \
curl_multi_fdset.3 \
@@ -36,7 +35,11 @@ man_MANS = \
curl_multi_init.3 \
curl_multi_perform.3 \
curl_multi_remove_handle.3 \
curl_share_cleanup.3 curl_share_init.3 curl_share_setopt.3 \
libcurl.3 \
libcurl-easy.3 \
libcurl-multi.3 \
libcurl-share.3 \
libcurl-errors.3
HTMLPAGES = \
@@ -63,7 +66,6 @@ HTMLPAGES = \
curl_mprintf.html \
curl_global_init.html \
curl_global_cleanup.html \
libcurl.html \
curl_multi_add_handle.html \
curl_multi_cleanup.html \
curl_multi_fdset.html \
@@ -71,20 +73,69 @@ HTMLPAGES = \
curl_multi_init.html \
curl_multi_perform.html \
curl_multi_remove_handle.html \
curl_share_cleanup.html curl_share_init.html curl_share_setopt.html \
libcurl.html \
libcurl-multi.html \
libcurl-errors.html \
index.html
libcurl-easy.html \
libcurl-share.html \
libcurl-errors.html
EXTRA_DIST = $(man_MANS) $(HTMLPAGES)
PDFPAGES = \
curl_easy_cleanup.pdf \
curl_easy_getinfo.pdf \
curl_easy_init.pdf \
curl_easy_perform.pdf \
curl_easy_setopt.pdf \
curl_easy_duphandle.pdf \
curl_formadd.pdf \
curl_formparse.pdf \
curl_formfree.pdf \
curl_getdate.pdf \
curl_getenv.pdf \
curl_slist_append.pdf \
curl_slist_free_all.pdf \
curl_version.pdf \
curl_version_info.pdf \
curl_escape.pdf \
curl_unescape.pdf \
curl_free.pdf \
curl_strequal.pdf \
curl_strnequal.pdf \
curl_mprintf.pdf \
curl_global_init.pdf \
curl_global_cleanup.pdf \
curl_multi_add_handle.pdf \
curl_multi_cleanup.pdf \
curl_multi_fdset.pdf \
curl_multi_info_read.pdf \
curl_multi_init.pdf \
curl_multi_perform.pdf \
curl_multi_remove_handle.pdf \
curl_share_cleanup.pdf curl_share_init.pdf curl_share_setopt.pdf \
libcurl.pdf \
libcurl-multi.pdf \
libcurl-easy.pdf \
libcurl-share.pdf \
libcurl-errors.pdf
CLEANFILES = $(HTMLPAGES) $(PDFPAGES)
EXTRA_DIST = $(man_MANS) $(HTMLPAGES) index.html $(PDFPAGES)
MAN2HTML= gnroff -man $< | man2html >$@
SUFFIXES = .1 .3 .html
SUFFIXES = .3 .html
html: $(HTMLPAGES)
.3.html:
$(MAN2HTML)
.1.html:
$(MAN2HTML)
pdf: $(PDFPAGES)
.3.pdf:
@(foo=`echo $@ | sed -e 's/\.[0-9]$$//g'`; \
groff -Tps -man $< >$$foo.ps; \
ps2pdf $$foo.ps $@; \
rm $$foo.ps; \
echo "converted $< to $@")

View File

@@ -2,7 +2,7 @@
.\" nroff -man [file]
.\" $Id$
.\"
.TH curl_easy_cleanup 3 "4 March 2002" "libcurl 7.7" "libcurl Manual"
.TH curl_easy_cleanup 3 "13 Nov 2002" "libcurl 7.7" "libcurl Manual"
.SH NAME
curl_easy_cleanup - End a libcurl easy session
.SH SYNOPSIS
@@ -18,6 +18,9 @@ opposite of the \fIcurl_easy_init\fP function and must be called with the same
This will effectively close all connections this handle has used and possibly
has kept open until now. Don't call this function if you intend to transfer
more files.
When you've called this, you can safely remove all the strings you've
previously told libcurl to use, as it won't use them anymore now.
.SH RETURN VALUE
None
.SH "SEE ALSO"

View File

@@ -2,7 +2,7 @@
.\" nroff -man [file]
.\" $Id$
.\"
.TH curl_easy_init 3 "25 Apr 2002" "libcurl 7.9.7" "libcurl Manual"
.TH curl_easy_getinfo 3 "25 Apr 2002" "libcurl 7.9.7" "libcurl Manual"
.SH NAME
curl_easy_getinfo - Extract information from a curl session (added in 7.4)
.SH SYNOPSIS
@@ -78,7 +78,8 @@ uploaded.
.TP
.B CURLINFO_SIZE_DOWNLOAD
Pass a pointer to a double to receive the total amount of bytes that were
downloaded.
downloaded. The amount is only for the latest transfer and will be reset again
for each new transfer.
.TP
.B CURLINFO_SPEED_DOWNLOAD
Pass a pointer to a double to receive the average download speed that curl
@@ -115,8 +116,12 @@ Pass a pointer to a 'char *' to receive the content-type of the downloaded
object. This is the value read from the Content-Type: field. If you get NULL,
it means that the server didn't send a valid Content-Type header or that the
protocol used doesn't support this. (Added in 7.9.4)
.TP
.B CURLINFO_PRIVATE
Pass a pointer to a 'char *' to receive the pointer to the private data
associated with the curl handle (set with the CURLOPT_PRIVATE option to curl_easy_setopt).
(Added in 7.10.3)
.PP
.SH RETURN VALUE
If the operation was successful, CURLE_OK is returned. Otherwise an
appropriate error code will be returned.

View File

@@ -1,8 +1,7 @@
.\" You can view this file with:
.\" nroff -man [file]
.\" $Id$
.\"
.TH curl_easy_setopt 3 "18 Sep 2002" "libcurl 7.10" "libcurl Manual"
.TH curl_easy_setopt 3 "8 Aug 2003" "libcurl 7.10.7" "libcurl Manual"
.SH NAME
curl_easy_setopt - set options for a curl easy handle
.SH SYNOPSIS
@@ -23,7 +22,8 @@ curl_easy_setopt() calls in the setup phase.
\fBNOTE:\fP strings passed to libcurl as 'char *' arguments, will not be
copied by the library. Instead you should keep them available until libcurl no
longer needs them. Failing to do so will cause very odd behavior or even
crashes.
crashes. libcurl will need them until you call curl_easy_cleanup() or you set
the same option again to use a different pointer.
\fBNOTE2:\fP options set with this function call are valid for the forthcoming
data transfers that are performed when you invoke \fIcurl_easy_perform\fP.
@@ -70,12 +70,12 @@ Function pointer that should match the following prototype: \fBsize_t
function( void *ptr, size_t size, size_t nmemb, void *stream);\fP This
function gets called by libcurl as soon as there is data reveiced that needs
to be saved. The size of the data pointed to by \fIptr\fP is \fIsize\fP
multiplied with \fInmemb\fP. Return the number of bytes actually taken care
of. If that amount differs from the amount passed to your function, it'll
signal an error to the library and it will abort the transfer and return
\fICURLE_WRITE_ERROR\fP.
multiplied with \fInmemb\fP, it will not be zero terminated. Return the number
of bytes actually taken care of. If that amount differs from the amount passed
to your function, it'll signal an error to the library and it will abort the
transfer and return \fICURLE_WRITE_ERROR\fP.
Set the \fIstream\fP argument with the \fBCURLOPT_FILE\fP option.
Set the \fIstream\fP argument with the \fBCURLOPT_WRITEDATA\fP option.
\fBNOTE:\fP you will be passed as much data as possible in all invokes, but
you cannot possibly make any assumptions. It may be one byte, it may be
@@ -143,12 +143,11 @@ operation and an error (CURLE_BAD_PASSWORD_ENTERED) will be returned.
is a zero-terminated string that is text that prefixes the input request.
\fIbuffer\fP is a pointer to data where the entered password should be stored
and \fIbuflen\fP is the maximum number of bytes that may be written in the
buffer. (Added in 7.4.2)
buffer.
.TP
.B CURLOPT_PASSWDDATA
Pass a void * to whatever data you want. The passed pointer will be the first
argument sent to the specifed \fICURLOPT_PASSWDFUNCTION\fP function. (Added in
7.4.2)
argument sent to the specifed \fICURLOPT_PASSWDFUNCTION\fP function.
.TP
.B CURLOPT_HEADERFUNCTION
Function pointer that should match the following prototype: \fIsize_t
@@ -161,12 +160,12 @@ multiplied with \fInmemb\fP. The pointer named \fIstream\fP will be the one
you passed to libcurl with the \fICURLOPT_WRITEHEADER\fP option. Return the
number of bytes actually written or return -1 to signal error to the library
(it will cause it to abort the transfer with a \fICURLE_WRITE_ERROR\fP return
code). (Added in 7.7.2)
code).
.TP
.B CURLOPT_WRITEHEADER
Pass a pointer to be used to write the header part of the received data to. If
you don't use your own callback to take care of the writing, this must be a
valid FILE *. See also the \fICURLOPT_HEADERFUNCTION\fP option below on how to
valid FILE *. See also the \fICURLOPT_HEADERFUNCTION\fP option above on how to
set a custom get-all-headers callback.
.TP
.B CURLOPT_DEBUGFUNCTION
@@ -175,6 +174,10 @@ curl_debug_callback (CURL *, curl_infotype, char *, size_t, void *);\fP
This function will receive debug information if CURLOPT_VERBOSE is
enabled. The curl_infotype argument specifies what kind of information it
is. This funtion must return 0.
NOTE: the data pointed to by the char * passed to this function WILL NOT be
zero terminated, but will be exactly of the size as told by the size_t
argument.
.TP
.B CURLOPT_DEBUGDATA
Pass a pointer to whatever you want passed in to your CURLOPT_DEBUGFUNCTION in
@@ -235,7 +238,7 @@ you tunnel through the HTTP proxy. Such tunneling is activated with
Pass a long with this option to set the proxy port to connect to unless it is
specified in the proxy string \fICURLOPT_PROXY\fP.
.TP
.B CURLOPT_PROXTYPE
.B CURLOPT_PROXYTYPE
Pass a long with this option to set type of the proxy. Available options for
this are CURLPROXY_HTTP and CURLPROXY_SOCKS5, with the HTTP one being
default. (Added in 7.10)
@@ -244,23 +247,23 @@ default. (Added in 7.10)
Set the parameter to non-zero to get the library to tunnel all operations
through a given HTTP proxy. Note that there is a big difference between using
a proxy and to tunnel through it. If you don't know what this means, you
probably don't want this tunneling option. (Added in 7.3)
probably don't want this tunneling option.
.TP
.B CURLOPT_INTERFACE
Pass a char * as parameter. This set the interface name to use as outgoing
network interface. The name can be an interface name, an IP address or a host
name. (Added in 7.3)
name.
.TP
.B CURLOPT_DNS_CACHE_TIMEOUT
Pass a long, this sets the timeout in seconds. Name resolves will be kept in
memory for this number of seconds. Set to zero (0) to completely disable
caching, or set to -1 to make the cached entries remain forever. By default,
libcurl caches info for 60 seconds. (Added in 7.9.3)
libcurl caches info for 60 seconds.
.TP
.B CURLOPT_DNS_USE_GLOBAL_CACHE
Pass a long. If the value is non-zero, it tells curl to use a global DNS cache
that will survive between easy handle creations and deletions. This is not
thread-safe and this will use a global varible. (Added in 7.9.3)
thread-safe and this will use a global varible.
.TP
.B CURLOPT_BUFFERSIZE
Pass a long specifying your prefered size for the receive buffer in libcurl.
@@ -268,7 +271,7 @@ The main point of this would be that the write callback gets called more often
and with smaller chunks. This is just treated as a request, not an order. You
cannot be guaranteed to actually get the given size. (Added in 7.10)
.PP
.SH NAMES and PASSWORDS OPTIONS
.SH NAMES and PASSWORDS OPTIONS (Authentication)
.TP 0.4i
.B CURLOPT_NETRC
This parameter controls the preference of libcurl between using user names and
@@ -311,17 +314,95 @@ user.
.TP
.B CURLOPT_USERPWD
Pass a char * as parameter, which should be [user name]:[password] to use for
the connection. If the password is left out, you will be prompted for it.
\fICURLOPT_PASSWDFUNCTION\fP can be used to set your own prompt function.
the connection. If both the colon and password is left out, you will be
prompted for it while using a colon with no password will make libcurl use an
empty password. \fICURLOPT_PASSWDFUNCTION\fP can be used to set your own
prompt function.
When using HTTP and CURLOPT_FOLLOWLOCATION, libcurl might perform several
requests to possibly different hosts. libcurl will only send this user and
password information to hosts using the initial host name (unless
CURLOPT_UNRESTRICTED_AUTH is set), so if libcurl follows locations to other
hosts it will not send the user and password to those. This is enforced to
prevent accidental information leakage.
.TP
.B CURLOPT_PROXYUSERPWD
Pass a char * as parameter, which should be [user name]:[password] to use for
the connection to the HTTP proxy. If the password is left out, you will be
prompted for it. \fICURLOPT_PASSWDFUNCTION\fP can be used to set your own
prompt function.
.TP
.B CURLOPT_HTTPAUTH
Pass a long as parameter, which is set to a bitmask, to tell libcurl what
authentication method(s) you want it to use. The available bits are listed
below. If more than one bit is set, libcurl will first query the site to see
what authentication methods it supports and then pick the best one you allow
it to use. Note that for some methods, this will induce an extra network
round-trip. Set the actual name and password with the \fICURLOPT_USERPWD\fP
option. (Added in 7.10.6)
.RS
.TP 5
.B CURLAUTH_BASIC
HTTP Basic authentication. This is the default choice, and the only method
that is in wide-spread use and supported virtually everywhere. This is sending
the user name and password over the network in plain text, easily captured by
others.
.TP
.B CURLAUTH_DIGEST
HTTP Digest authentication. Digest authentication is defined in RFC2617 and
is a more secure way to do authentication over public networks than the
regular old-fashioned Basic method.
.TP
.B CURLAUTH_GSSNEGOTIATE
HTTP GSS-Negotiate authentication. The GSS-Negotiate method was designed by
Microsoft and is used in their web aplications. It is primarily meant as a
support for Kerberos5 authentication but may be also used along with another
authentication methods. For more information see IETF draft
draft-brezak-spnego-http-04.txt.
.TP
.B CURLAUTH_NTLM
HTTP NTLM authentication. A proprietary protocol invented and used by
Microsoft. It uses a challenge-response and hash concept similar to Digest to
prevent the password from being evesdropped.
.TP
.B CURLAUTH_ANY
This is a convenience macro that sets all bits and thus makes libcurl pick any
it finds suitable. libcurl will automaticly select the one it finds most
secure.
.TP
.B CURLAUTH_ANYSAFE
This is a convenience macro that sets all bits except Basic and thus makes
libcurl pick any it finds suitable. libcurl will automaticly select the one it
finds most secure.
.RE
.TP
.B CURLOPT_PROXYAUTH
Pass a long as parameter, which is set to a bitmask, to tell libcurl what
authentication method(s) you want it to use for your proxy authentication. If
more than one bit is set, libcurl will first query the site to see what
authentication methods it supports and then pick the best one you allow it to
use. Note that for some methods, this will induce an extra network
round-trip. Set the actual name and password with the
\fICURLOPT_PROXYUSERPWD\fP option. The bitmask can be constructed by or'ing
together the bits listed above for the \fICURLOPT_HTTPAUTH\fP option. As of
this writing, only Basic and NTLM work. (Added in 7.10.7)
.PP
.SH HTTP OPTIONS
.TP 0.4i
.B CURLOPT_ENCODING
Sets the contents of the Accept-Encoding: header sent in an HTTP
request, and enables decoding of a response when a Content-Encoding:
header is received. Three encodings are supported: \fIidentity\fP,
which does nothing, \fIdeflate\fP which requests the server to
compress its response using the zlib algorithm, and \fIgzip\fP which
requests the gzip algorithm. If a zero-length string is set, then an
Accept-Encoding: header containing all supported encodings is sent.
This is a request, not an order; the server may or may not do it. This
option must be set (to any non-NULL value) or else any unsolicited
encoding done by the server is ignored. See the special file
lib/README.encoding for details.
.TP
.B CURLOPT_FOLLOWLOCATION
A non-zero parameter tells the library to follow any Location: header that the
server sends as part of a HTTP header.
@@ -331,11 +412,16 @@ new location and follow new Location: headers all the way until no more such
headers are returned. \fICURLOPT_MAXREDIRS\fP can be used to limit the number
of redirects libcurl will follow.
.TP
.B CURLOPT_UNRESTRICTED_AUTH
A non-zero parameter tells the library it can continue to send authentication
(user+password) when following locations, even when hostname changed. Note
that this is meaningful only when setting \fICURLOPT_FOLLOWLOCATION\fP.
.TP
.B CURLOPT_MAXREDIRS
Pass a long. The set number will be the redirection limit. If that many
redirections have been followed, the next redirect will cause an error
(\fICURLE_TOO_MANY_REDIRECTS\fP). This option only makes sense if the
\fICURLOPT_FOLLOWLOCATION\fP is used at the same time. (Added in 7.5)
\fICURLOPT_FOLLOWLOCATION\fP is used at the same time.
.TP
.B CURLOPT_PUT
A non-zero parameter tells the library to use HTTP PUT to transfer data. The
@@ -351,9 +437,14 @@ will imply this option.
.TP
.B CURLOPT_POSTFIELDS
Pass a char * as parameter, which should be the full data to post in a HTTP
post operation. This is a normal application/x-www-form-urlencoded kind, which
is the most commonly used one by HTML forms. See also the CURLOPT_POST. Since
7.8, using CURLOPT_POSTFIELDS implies CURLOPT_POST.
post operation. You need to make sure that the data is formatted the way you
want the server to receive it. libcurl will not convert or encode it for
you. Most web servers will assume this data to be url-encoded. Take note.
This POST is a normal application/x-www-form-urlencoded kind (and libcurl will
set that Content-Type by default when this option is used), which is the most
commonly used one by HTML forms. See also the CURLOPT_POST. Using
CURLOPT_POSTFIELDS implies CURLOPT_POST.
\fBNote:\fP to make multipart/formdata posts (aka rfc1867-posts), check out
the \fICURLOPT_HTTPPOST\fP option.
@@ -362,8 +453,7 @@ the \fICURLOPT_HTTPPOST\fP option.
If you want to post data to the server without letting libcurl do a strlen()
to measure the data size, this option must be used. When this option is used
you can post fully binary data, which otherwise is likely to fail. If this
size is set to zero, the library will use strlen() to get the size. (Added in
libcurl 7.2)
size is set to zero, the library will use strlen() to get the size.
.TP
.B CURLOPT_HTTPPOST
Tells libcurl you want a multipart/formdata HTTP POST to be made and you
@@ -395,11 +485,35 @@ list. If you add a header that is otherwise generated and used by libcurl
internally, your added one will be used instead. If you add a header with no
contents as in 'Accept:' (no data on the right side of the colon), the
internally used header will get disabled. Thus, using this option you can add
new headers, replace internal headers and remove internal headers.
new headers, replace internal headers and remove internal headers. The
headers included in the linked list must not be CRLF-terminated, because
curl adds CRLF after each header item. Failure to comply with this will
result in strange bugs because the server will most likely ignore part
of the headers you specified.
The first line in a request (usually containing a GET or POST) is not a header
and cannot be replaced using this option. Only the lines following the
request-line are headers.
\fBNOTE:\fPThe most commonly replaced headers have "shortcuts" in the options
CURLOPT_COOKIE, CURLOPT_USERAGENT and CURLOPT_REFERER.
.TP
.B CURLOPT_HTTP200ALIASES
Pass a pointer to a linked list of aliases to be treated as valid HTTP 200
responses. Some servers respond with a custom header response line. For
example, IceCast servers respond with "ICY 200 OK". By including this string
in your list of aliases, the response will be treated as a valid HTTP header
line such as "HTTP/1.0 200 OK". (Added in 7.10.3)
The linked list should be a fully valid list of struct curl_slist structs, and
be properly filled in. Use \fIcurl_slist_append(3)\fP to create the list and
\fIcurl_slist_free_all(3)\fP to clean up an entire list.
\fBNOTE:\fPThe alias itself is not parsed for any version strings. So if your
alias is "MYHTTP/9.9", Libcurl will not treat the server as responding with
HTTP version 9.9. Instead Libcurl will use the value set by option
\fICURLOPT_HTTP_VERSION\fP.
.TP
.B CURLOPT_COOKIE
Pass a pointer to a zero terminated string as parameter. It will be used to
set a cookie in the http request. The format of the string should be
@@ -429,7 +543,14 @@ internally known cookies to the specified file when \fIcurl_easy_cleanup(3)\fP
is called. If no cookies are known, no file will be created. Specify "-" to
instead have the cookies written to stdout. Using this option also enables
cookies for this session, so if you for example follow a location it will make
matching cookies get sent accordingly. (Added in 7.9)
matching cookies get sent accordingly.
.B NOTE
If the cookie jar file can't be created or written to (when the
curl_easy_cleanup() is called), libcurl will not and cannot report an error
for this. Using CURLOPT_VERBOSE or CURLOPT_DEBUGFUNCTION will get a warning to
display, but that is the only visible feedback you get about this possibly
lethal situation.
.TP
.B CURLOPT_TIMECONDITION
Pass a long as parameter. This defines how the CURLOPT_TIMEVALUE time value is
@@ -444,7 +565,7 @@ CURLOPT_TIMECONDITION.
.B CURLOPT_HTTPGET
Pass a long. If the long is non-zero, this forces the HTTP request to get back
to GET. Only really usable if POST, PUT or a custom request have been used
previously using the same curl handle. (Added in 7.8.1)
previously using the same curl handle.
.TP
.B CURLOPT_HTTP_VERSION
Pass a long, set to one of the values described below. They force libcurl to
@@ -472,6 +593,9 @@ tells the remote server to connect to our specified IP address. The string may
be a plain IP address, a host name, an network interface name (under Unix) or
just a '-' letter to let the library use your systems default IP
address. Default FTP operations are passive, and thus won't use PORT.
You disable PORT again and go back to using the passive version by setting
this option to NULL.
.TP
.B CURLOPT_QUOTE
Pass a pointer to a linked list of FTP commands to pass to the server prior to
@@ -509,11 +633,23 @@ and symbolic links.
A non-zero parameter tells the library to append to the remote file instead of
overwrite it. This is only useful when uploading to a ftp site.
.TP
.B CURLOPT_FTP_USE_EPRT
Pass a long. If the value is non-zero, it tells curl to use the EPRT (and
LPRT) command when doing active FTP downloads (which is enabled by
CURLOPT_FTPPORT). Using EPRT means that it will first attempt to use EPRT and
then LPRT before using PORT, but if you pass FALSE (zero) to this option, it
will not try using EPRT or LPRT, only plain PORT. (Added in 7.10.5)
.TP
.B CURLOPT_FTP_USE_EPSV
Pass a long. If the value is non-zero, it tells curl to use the EPSV command
when doing passive FTP downloads (which it always does by default). Using EPSV
means that it will first attempt to use EPSV before using PASV, but if you
pass FALSE (zero) to this option, it will not try using EPSV, only plain PASV.
.TP
.B CURLOPT_FTP_CREATE_MISSING_DIRS
Pass a long. If the value is non-zero, curl will attempt to create any remote
directory that it fails to CWD into. CWD is the command that changes working
directory. (Added in 7.10.7)
.PP
.SH PROTOCOL OPTIONS
.TP 0.4i
@@ -542,17 +678,25 @@ want the transfer to start from.
.TP
.B CURLOPT_CUSTOMREQUEST
Pass a pointer to a zero terminated string as parameter. It will be user
instead of GET or HEAD when doing the HTTP request. This is useful for doing
DELETE or other more or less obscure HTTP requests. Don't do this at will,
make sure your server supports the command first.
instead of GET or HEAD when doing a HTTP request, or instead of LIST or NLST
when doing an ftp directory listing. This is useful for doing DELETE or other
more or less obscure HTTP requests. Don't do this at will, make sure your
server supports the command first.
NOTE: many people have wrongly used this option to replace the entire request
with their own, including multiple headers and POST contents. While that might
work in many cases, it will cause libcurl to send invalid requests and it
could possibly confuse the remote server badly. Use \fICURLOPT_POST\fP and
\fICURLOPT_POSTFIELDS\fP to set POST data. Use \fICURLOPT_HTTPHEADER\fP to
replace or extend the set of headers sent by libcurl. Use
\fICURLOPT_HTTP_VERSION\fP to change HTTP version.
.TP
.B CURLOPT_FILETIME
Pass a long. If it is a non-zero value, libcurl will attempt to get the
modification date of the remote document in this operation. This requires that
the remote server sends the time or replies to a time querying command. The
\fIcurl_easy_getinfo(3)\fP function with the \fICURLINFO_FILETIME\fP argument
can be used after a transfer to extract the received time (if any). (Added in
7.5)
can be used after a transfer to extract the received time (if any).
.TP
.B CURLOPT_NOBODY
A non-zero parameter tells the library to not include the body-part in the
@@ -577,7 +721,7 @@ aborting perfectly normal operations. This option will cause curl to use the
SIGALRM to enable time-outing system calls.
\fBNOTE:\fP this is not recommended to use in unix multi-threaded programs, as
it uses signals unless CURLOPT_NOSIGNAL (see below) is set.
it uses signals unless CURLOPT_NOSIGNAL (see above) is set.
.TP
.B CURLOPT_LOW_SPEED_LIMIT
Pass a long as parameter. It contains the transfer speed in bytes per second
@@ -603,7 +747,7 @@ open connections to increase.
\fBNOTE:\fP if you already have performed transfers with this curl handle,
setting a smaller MAXCONNECTS than before may cause open connections to get
closed unnecessarily. (Added in 7.7)
closed unnecessarily.
.TP
.B CURLOPT_CLOSEPOLICY
Pass a long. This option sets what policy libcurl should use when the
@@ -614,7 +758,7 @@ the connection that was least recently used, that connection is also least
likely to be capable of re-use. Use \fICURLCLOSEPOLICY_OLDEST\fP to make
libcurl close the oldest connection, the one that was created first among the
ones in the connection cache. The other close policies are not support
yet. (Added in 7.7)
yet.
.TP
.B CURLOPT_FRESH_CONNECT
Pass a long. Set to non-zero to make the next transfer use a new (fresh)
@@ -622,7 +766,7 @@ connection by force. If the connection cache is full before this connection,
one of the existing connections will be closed as according to the selected or
default policy. This option should be used with caution and only if you
understand what it does. Set this to 0 to have libcurl attempt re-using an
existing connection (default behavior). (Added in 7.7)
existing connection (default behavior).
.TP
.B CURLOPT_FORBID_REUSE
Pass a long. Set to non-zero to make the next transfer explicitly close the
@@ -630,7 +774,7 @@ connection when done. Normally, libcurl keep all connections alive when done
with one transfer in case there comes a succeeding one that can re-use them.
This option should be used with caution and only if you understand what it
does. Set to 0 to have libcurl keep the connection open for possibly later
re-use (default behavior). (Added in 7.7)
re-use (default behavior).
.TP
.B CURLOPT_CONNECTTIMEOUT
Pass a long. It should contain the maximum time in seconds that you allow the
@@ -640,7 +784,7 @@ connection timeout (it will then only timeout on the system's internal
timeouts). See also the \fICURLOPT_TIMEOUT\fP option.
\fBNOTE:\fP this is not recommended to use in unix multi-threaded programs, as
it uses signals unless CURLOPT_NOSIGNAL (see below) is set.
it uses signals unless CURLOPT_NOSIGNAL (see above) is set.
.PP
.SH SSL and SECURITY OPTIONS
.TP 0.4i
@@ -667,12 +811,11 @@ a certificate but you need one to load your private key.
.B CURLOPT_SSLKEY
Pass a pointer to a zero terminated string as parameter. The string should be
the file name of your private key. The default format is "PEM" and can be
changed with \fICURLOPT_SSLKEYTYPE\fP. (Added in 7.9.3)
changed with \fICURLOPT_SSLKEYTYPE\fP.
.TP
.B CURLOPT_SSLKEYTYPE
Pass a pointer to a zero terminated string as parameter. The string should be
the format of your private key. Supported formats are "PEM", "DER" and "ENG".
(Added in 7.9.3)
\fBNOTE:\fPThe format "ENG" enables you to load the private key from a crypto
engine. in this case \fICURLOPT_SSLKEY\fP is used as an identifier passed to
@@ -683,19 +826,18 @@ Pass a pointer to a zero terminated string as parameter. It will be used as
the password required to use the \fICURLOPT_SSLKEY\fP private key. If the
password is not supplied, you will be prompted for
it. \fICURLOPT_PASSWDFUNCTION\fP can be used to set your own prompt function.
(Added in 7.9.3)
.TP
.B CURLOPT_SSL_ENGINE
Pass a pointer to a zero terminated string as parameter. It will be used as
the identifier for the crypto engine you want to use for your private
key. (Added in 7.9.3)
key.
\fBNOTE:\fPIf the crypto device cannot be loaded,
\fICURLE_SSL_ENGINE_NOTFOUND\fP is returned.
.TP
.B CURLOPT_SSL_ENGINEDEFAULT
Sets the actual crypto engine as the default for (asymetric) crypto
operations. (Added in 7.9.3)
operations.
\fBNOTE:\fPIf the crypto device cannot be set,
\fICURLE_SSL_ENGINE_SETFAILED\fP is returned.
@@ -706,15 +848,18 @@ Pass a long as parameter. Set what version of SSL to attempt to use, 2 or
servers make this difficult why you at times may have to use this option.
.TP
.B CURLOPT_SSL_VERIFYPEER
Pass a long that is set to a non-zero value to make curl verify the peer's
certificate. The certificate to verify against must be specified with the
CURLOPT_CAINFO option (Added in 7.4.2) or a certificate directory must be specified
with the CURLOPT_CAPATH option (Added in 7.9.8).
Pass a long that is set to a zero value to stop curl from verifying the peer's
certificate (7.10 starting setting this option to TRUE by default). Alternate
certificates to verify against can be specified with the CURLOPT_CAINFO option
or a certificate directory can be specified with the CURLOPT_CAPATH option
(Added in 7.9.8). As of 7.10, curl installs a default bundle.
CURLOPT_SSL_VERIFYHOST may also need to be set to 1 or 0 if
CURLOPT_SSL_VERIFYPEER is disabled (it defaults to 2).
.TP
.B CURLOPT_CAINFO
Pass a char * to a zero terminated string naming a file holding one or more
certificates to verify the peer with. This only makes sense when used in
combination with the CURLOPT_SSL_VERIFYPEER option. (Added in 7.4.2)
combination with the CURLOPT_SSL_VERIFYPEER option.
.TP
.B CURLOPT_CAPATH
Pass a char * to a zero terminated string naming a directory holding multiple
@@ -736,7 +881,7 @@ socket. It will be used to seed the random engine for SSL.
.B CURLOPT_SSL_VERIFYHOST
Pass a long. Set if we should verify the Common name from the peer certificate
in the SSL handshake, set 1 to check existence, 2 to ensure that it matches
the provided hostname. (Added in 7.8.1)
the provided hostname. This is by default set to 2. (default changed in 7.10)
.TP
.B CURLOPT_SSL_CIPHER_LIST
Pass a char *, pointing to a zero terminated string holding the list of
@@ -755,7 +900,14 @@ Pass a char * as parameter. Set the krb4 security level, this also enables
krb4 awareness. This is a string, 'clear', 'safe', 'confidential' or
\&'private'. If the string is set but doesn't match one of these, 'private'
will be used. Set the string to NULL to disable kerberos4. The kerberos
support only works for FTP. (Added in 7.3)
support only works for FTP.
.PP
.SH OTHER OPTIONS
.TP 0.4i
.B CURLOPT_PRIVATE
Pass a char * as parameter, pointing to data that should be associated with
the curl handle. The pointer can be subsequently retrieved using the
CURLINFO_PRIVATE options to curl_easy_getinfo. (Added in 7.10.3)
.PP
.SH RETURN VALUE
CURLE_OK (zero) means that the option was set properly, non-zero means an
@@ -763,7 +915,3 @@ error occurred as \fI<curl/curl.h>\fP defines. See the \fIlibcurl-errors.3\fP
man page for the full list with descriptions.
.SH "SEE ALSO"
.BR curl_easy_init "(3), " curl_easy_cleanup "(3), "
.SH BUGS
If you find any bugs, or just have questions, subscribe to one of the mailing
lists and post. We won't bite.

View File

@@ -19,78 +19,104 @@ the \fIfirstitem\fP pointer as parameter to \fBCURLOPT_HTTPPOST\fP.
\fIlastitem\fP is set after each call and on repeated invokes it should be
left as set to allow repeated invokes to find the end of the list faster.
After the \fIlastitem\fP pointer follow the real arguments. (If the following
description confuses you, jump directly to the examples):
\fBCURLFORM_COPYNAME\fP or \fBCURLFORM_PTRNAME\fP followed by a string is used
for the name of the section. Optionally one may use \fBCURLFORM_NAMELENGTH\fP
to specify the length of the name (allowing null characters within the
name). All options that use the word COPY in their names copy the given
contents, while the ones with PTR in their names simply points to the (static)
data you must make sure remain until curl no longer needs it.
The options for providing values are: \fBCURLFORM_COPYCONTENTS\fP,
\fBCURLFORM_PTRCONTENTS\fP, \fBCURLFORM_FILE\fP, \fBCURLFORM_BUFFER\fP,
or \fBCURLFORM_FILECONTENT\fP followed by a char or void pointer
(allowed for PTRCONTENTS).
\fBCURLFORM_FILECONTENT\fP does a normal post like \fBCURLFORM_COPYCONTENTS\fP
but the actual value is read from the filename given as a string.
Other arguments may be \fBCURLFORM_CONTENTTYPE\fP if the user wishes to
specify one (for FILE if no type is given the library tries to provide the
correct one; for CONTENTS no Content-Type is sent in this case).
For \fBCURLFORM_PTRCONTENTS\fP or \fBCURLFORM_COPYNAME\fP the user may also
add \fBCURLFORM_CONTENTSLENGTH\fP followed by the length as a long (if not
given the library will use strlen to determine the length).
For \fBCURLFORM_FILE\fP the user may send multiple files in one section by
providing multiple \fBCURLFORM_FILE\fP arguments each followed by the filename
(and each FILE is allowed to have a CONTENTTYPE).
\fBCURLFORM_BUFFER\fP
tells libcurl that a buffer is to be used to upload data instead of using a
file. The value of the next parameter is used as the value of the "filename"
parameter in the content header.
\fBCURLFORM_BUFFERPTR\fP
tells libcurl that the address of the next parameter is a pointer to the buffer
containing data to upload. The buffer containing this data must not be freed
until after curl_easy_cleanup is called.
\fBCURLFORM_BUFFERLENGTH\fP
tells libcurl that the length of the buffer to upload is the value of the
next parameter.
Another possibility to send options to curl_formadd() is the
\fBCURLFORM_ARRAY\fP option, that passes a struct curl_forms array pointer as
its value. Each curl_forms structure element has a CURLformoption and a char
pointer. The final element in the array must be a CURLFORM_END. All available
options can be used in an array, except the CURLFORM_ARRAY option itself!
Should you need to specify extra headers for the form POST section, use
\fBCURLFORM_CONTENTHEADER\fP. This takes a curl_slist prepared in the usual way
using \fBcurl_slist_append\fP and appends the list of headers to those Curl
automatically generates for \fBCURLFORM_CONTENTTYPE\fP and the content
disposition. The list must exist while the POST occurs, if you free it before
the post completes you may experience problems.
The last argument in such an array must always be \fBCURLFORM_END\fP.
After the \fIlastitem\fP pointer follow the real arguments.
The pointers \fI*firstitem\fP and \fI*lastitem\fP should both be pointing to
NULL in the first call to this function. All list-data will be allocated by
the function itself. You must call \fIcurl_formfree\fP after the form post has
been done to free the resources again.
This function will copy all input data except the data pointed to by the
arguments after \fBCURLFORM_PTRNAME\fP and \fBCURLFORM_PTRCONTENTS\fP and keep
its own version of it allocated until you call \fIcurl_formfree\fP. When
you've passed the pointer to \fIcurl_easy_setopt\fP, you must not free the
list until after you've called \fIcurl_easy_cleanup\fP for the curl handle. If
you provide a pointer as an arguments after \fBCURLFORM_PTRNAME\fP or
\fBCURLFORM_PTRCONTENTS\fP you must ensure that the pointer stays valid until
you call \fIcurl_form_free\fP and \fIcurl_easy_cleanup\fP.
First, there are some basics you need to understand about multipart/formdata
posts. Each part consists of at least a NAME and a CONTENTS part. If the part
is made for file upload, there are also a stored CONTENT-TYPE and a
FILENAME. Below here, we'll discuss on what options you use to set these
properties in the parts you want to add to your post.
.SH OPTIONS
.B CURLFORM_COPYNAME
followed by string is used to set the name of this part. libcurl copies the
given data, so your application doesn't need to keep it around after this
function call. If the name isn't zero terminated properly, or if you'd like it
to contain zero bytes, you need to set the length of the name with
\fBCURLFORM_NAMELENGTH\fP.
.B CURLFORM_PTRNAME
followed by a string is used for the name of this part. libcurl will use the
pointer and refer to the data in your application, you must make sure it
remains until curl no longer needs it. If the name isn't zero terminated
properly, or if you'd like it to contain zero bytes, you need to set the
length of the name with \fBCURLFORM_NAMELENGTH\fP.
.B CURLFORM_COPYCONTENTS
followed by a string is used for the contents of this part, the actual data to
send away. libcurl copies the given data, so your application doesn't need to
keep it around after this function call. If the data isn't zero terminated
properly, or if you'd like it to contain zero bytes, you need to set the
length of the name with \fBCURLFORM_CONTENTSLENGTH\fP.
.B CURLFORM_PTRCONTENTS
followed by a string is used for the contents of this part, the actual data to
send away. libcurl will use the pointer and refer to the data in your
application, you must make sure it remains until curl no longer needs it. If
the data isn't zero terminated properly, or if you'd like it to contain zero
bytes, you need to set the length of the name with
\fBCURLFORM_CONTENTSLENGTH\fP.
.B CURLFORM_FILECONTENT
followed by a file name, makes that file read and the contents will be used in
as data in this part.
.B CURLFORM_FILE
followed by a file name, makes this part a file upload part. It sets the file
name field to the actual file name used here, it gets the contents of the file
and passes as data and sets the content-type if the given file match one of
the new internally known file extension. For \fBCURLFORM_FILE\fP the user may
send one or more files in one part by providing multiple \fBCURLFORM_FILE\fP
arguments each followed by the filename (and each CURLFORM_FILE is allowed to
have a CURLFORM_CONTENTTYPE).
.B CURLFORM_CONTENTTYPE
followed by a pointer to a string with a content-type will make curl use this
given content-type for this file upload part, possibly instead of an
internally chosen one.
.B CURLFORM_FILENAME
followed by a pointer to a string to a name, will make libcurl use the given
name in the file upload part, intead of the actual file name given to
\fICURLFORM_FILE\fP.
.B BCURLFORM_BUFFER
followed by a string, tells libcurl that a buffer is to be used to upload data
instead of using a file. The given string is used as the value of the file
name field in the content header.
.B CURLFORM_BUFFERPTR
followed by a pointer to a data area, tells libcurl the address of the buffer
containing data to upload (as indicated with \fICURLFORM_BUFFER\fP). The
buffer containing this data must not be freed until after curl_easy_cleanup is
called.
.B CURLFORM_BUFFERLENGTH
followed by a long with the size of the \fICURLFORM_BUFFERPTR\fP data area,
tells libcurl the length of the buffer to upload.
.B CURLFORM_ARRAY
Another possibility to send options to curl_formadd() is the
\fBCURLFORM_ARRAY\fP option, that passes a struct curl_forms array pointer as
its value. Each curl_forms structure element has a CURLformoption and a char
pointer. The final element in the array must be a CURLFORM_END. All available
options can be used in an array, except the CURLFORM_ARRAY option itself! The
last argument in such an array must always be \fBCURLFORM_END\fP.
.B CURLFORM_CONTENTHEADER
specifies extra headers for the form POST section. This takes a curl_slist
prepared in the usual way using \fBcurl_slist_append\fP and appends the list
of headers to those libcurl automatically generates. The list must exist while
the POST occurs, if you free it before the post completes you may experience
problems.
When you've passed the HttpPost pointer to \fIcurl_easy_setopt\fP (using the
\fICURLOPT_HTTPPOST\fP option), you must not free the list until after you've
called \fIcurl_easy_cleanup\fP for the curl handle.
See example below.
.SH RETURN VALUE

View File

@@ -2,16 +2,16 @@
.\" nroff -man [file]
.\" $Id:
.\"
.TH curl_free 3 "24 Sept 2002" "libcurl 7.10" "libcurl Manual"
.TH curl_free 3 "12 Aug 2003" "libcurl 7.10" "libcurl Manual"
.SH NAME
curl_free - reclaim memory that has been obtained through a libcurl call
.SH SYNOPSIS
.B #include <curl/curl.h>
.sp
.BI "void *curl_free( char *" ptr " );"
.BI "void curl_free( char *" ptr " );"
.ad
.SH DESCRIPTION
curl_free reclaims memory that has been obtained through a libcurl call.
Use curl_free() instead of free() to avoid anomalies that can result from differences in memory management between your application and libcurl.
.SH "SEE ALSO"
.I curl_unescape(), curl_free()
.I curl_unescape()

View File

@@ -2,7 +2,7 @@
.\"
.TH curl_multi_fdset 3 "3 May 2002" "libcurl 7.9.5" "libcurl Manual"
.SH NAME
curl_multi_fdset - add an easy handle to a multi session
curl_multi_fdset - extracts file descriptor information from a multi handle
.SH SYNOPSIS
#include <curl/curl.h>

View File

@@ -1,6 +1,6 @@
.\" $Id$
.\"
.TH curl_multi_info_read 3 "1 March 2002" "libcurl 7.9.5" "libcurl Manual"
.TH curl_multi_info_read 3 "27 Feb 2002" "libcurl 7.10.3" "libcurl Manual"
.SH NAME
curl_multi_info_read - read multi stack informationals
.SH SYNOPSIS
@@ -10,15 +10,15 @@ CURLMsg *curl_multi_info_read( CURLM *multi_handle,
int *msgs_in_queue);
.ad
.SH DESCRIPTION
Ask the multi handle if there's any messages/informationals from the
individual transfers. Messages include informationals such as an error code
from the transfer or just the fact that a transfer is completed. More details
on these should be written down as well.
Ask the multi handle if there are any messages/informationals from the
individual transfers. Messages may include informationals such as an error
code from the transfer or just the fact that a transfer is completed. More
details on these should be written down as well.
Repeated calls to this function will return a new struct each time, until a
special "end of msgs" struct is returned as a signal that there is no more to
get at this point. The integer pointed to with \fImsgs_in_queue\fP will
contain the number of remaining messages after this function was called.
NULL is returned as a signal that there is no more to get at this point. The
integer pointed to with \fImsgs_in_queue\fP will contain the number of
remaining messages after this function was called.
The data the returned pointer points to will not survive calling
curl_multi_cleanup().
@@ -26,10 +26,19 @@ curl_multi_cleanup().
The 'CURLMsg' struct is very simple and only contain very basic informations.
If more involved information is wanted, the particular "easy handle" in
present in that struct and can thus be used in subsequent regular
curl_easy_getinfo() calls (or similar).
curl_easy_getinfo() calls (or similar):
struct CURLMsg {
CURLMSG msg; /* what this message means */
CURL *easy_handle; /* the handle it concerns */
union {
void *whatever; /* message-specific data */
CURLcode result; /* return code for transfer */
} data;
};
.SH "RETURN VALUE"
A pointer to a filled-in struct, or NULL if it failed or ran out of
structs. It also writes the number of messages left in the queue (after this
read) in the integer the second argument points to.
.SH "SEE ALSO"
.BR curl_multi_cleanup "(3)," curl_multi_init "(3)," curl_multi_perform "(3)"
.BR curl_multi_cleanup "(3), " curl_multi_init "(3), " curl_multi_perform "(3)"

View File

@@ -2,7 +2,7 @@
.\"
.TH curl_multi_perform 3 "1 March 2002" "libcurl 7.9.5" "libcurl Manual"
.SH NAME
curl_multi_perform - add an easy handle to a multi session
curl_multi_perform - reads/writes available data from each easy handle
.SH SYNOPSIS
#include <curl/curl.h>
@@ -19,6 +19,12 @@ integer-pointer.
.SH "RETURN VALUE"
CURLMcode type, general libcurl multi interface error code.
If you receive \fICURLM_CALL_MULTI_PERFORM\fP, this basicly means that you
should call \fIcurl_multi_perform\fP again, before you select() on more
actions. You don't have to do it immediately, but the return code means that
libcurl may have more data available to return or that there may be more data
to send off before it is "satisfied".
NOTE that this only returns errors etc regarding the whole multi stack. There
might still have occurred problems on invidual transfers even when this
function returns OK.

View File

@@ -2,7 +2,7 @@
.\"
.TH curl_multi_remove_handle 3 "6 March 2002" "libcurl 7.9.5" "libcurl Manual"
.SH NAME
curl_multi_remove_handle - add an easy handle to a multi session
curl_multi_remove_handle - remove an easy handle from a multi session
.SH SYNOPSIS
#include <curl/curl.h>
@@ -14,6 +14,9 @@ specified easy handle be removed from this multi handle's control.
When the easy handle has been removed from a multi stack, it is again
perfectly legal to invoke \fIcurl_easy_perform()\fP on this easy handle.
Removing a handle while being used, will effectively halt all transfers in
progress.
.SH RETURN VALUE
CURLMcode type, general libcurl multi interface error code.
.SH "SEE ALSO"

View File

@@ -0,0 +1,19 @@
.\" $Id$
.\"
.TH curl_share_cleanup 3 "8 Aug 2003" "libcurl 7.10.7" "libcurl Manual"
.SH NAME
curl_share_cleanup - Clean up a shared object
.SH SYNOPSIS
.B #include <curl/curl.h>
.sp
.BI "CURLSHcode curl_share_cleanup( );"
.ad
.SH DESCRIPTION
This function deletes a shared object. The share handle cannot be used anymore
when this function has been called.
.SH RETURN VALUE
If this function returns non-zero, the object was not properly deleted and it
still remains!
.SH "SEE ALSO"
.BR curl_share_init "(3), " curl_share_setopt "(3)"

View File

@@ -0,0 +1,21 @@
.\" $Id$
.\"
.TH curl_share_init 3 "8 Aug 2003" "libcurl 7.10.7" "libcurl Manual"
.SH NAME
curl_share_init - Create a shared object
.SH SYNOPSIS
.B #include <curl/curl.h>
.sp
.BI "CURLSH *curl_share_init( );"
.ad
.SH DESCRIPTION
This function returns a CURLSH handle to be used as input to all the other
share-functions, sometimes refered to as a share handle on some places in the
documentation. This init call MUST have a corresponding call to
\fIcurl_share_cleanup\fP when all operations using the share are complete.
.SH RETURN VALUE
If this function returns NULL, something went wrong and you got no share
object to use.
.SH "SEE ALSO"
.BR curl_share_cleanup "(3), " curl_share_setopt "(3)"

View File

@@ -0,0 +1,46 @@
.\" $Id$
.\"
.TH curl_share_setopt 3 "8 Aug 2003" "libcurl 7.10.7" "libcurl Manual"
.SH NAME
curl_share_setopt - Set options for a shared object
.SH SYNOPSIS
.B #include <curl/curl.h>
.sp
CURLSHcode curl_share_setopt(CURLSH *share, CURLSHoption option, parameter);
.ad
.SH DESCRIPTION
Set the \fIoption\fP to \fIparameter\fP for the given \fIshare\fP.
.SH OPTIONS
.TP 0.4i
.B CURLSHOPT_LOCKFUNC
The \fIparameter\fP must be a pointer to a function matching the following
prototype:
void lock_function(CURL *handle, curl_lock_data data, curl_lock_access access,
void *userptr);
\fIdata\fP defines what data libcurl wants to lock, and you must make sure that
only one lock is given at any time for each kind of data.
\fIaccess\fP defines what access type libcurl wants, shared or single.
\fIuserptr\fP is the pointer you set with \fICURLSHOPT_USERDAT\fP.
.TP
.B CURLSHOPT_UNLOCKFUNC
hej
.TP
.B CURLSHOPT_SHARE
hej
.TP
.B CURLSHOPT_UNSHARE
hej
.TP
.B CURLSHOPT_USERDATA
hej
.PP
.SH RETURN VALUE
If this function returns non-zero, something was wrong!
.SH "SEE ALSO"
.BR curl_share_cleanup "(3), " curl_share_init "(3)"

View File

@@ -2,7 +2,7 @@
.\" nroff -man [file]
.\" $Id$
.\"
.TH curl_slist_append 3 "5 March 2001" "libcurl 7.0" "libcurl Manual"
.TH curl_slist_append 3 "19 Jun 2003" "libcurl 7.10.4" "libcurl Manual"
.SH NAME
curl_slist_append - add a string to an slist
.SH SYNOPSIS
@@ -13,17 +13,27 @@ curl_slist_append - add a string to an slist
.ad
.SH DESCRIPTION
curl_slist_append() appends a specified string to a linked list of
strings. The existing
.I list
should be passed as the first argument while the new list is returned from
this function. The specified
.I string
has been appended when this function returns.
strings. The existing \fIlist\fP should be passed as the first argument while
the new list is returned from this function. The specified \fIstring\fP has
been appended when this function returns. curl_slist_append() copies the
string.
The list should be freed again (after usage) with \fBcurl_slist_free_all()\fP.
.SH RETURN VALUE
A null pointer is returned if anything went wrong, otherwise the new list
pointer is returned.
.SH EXAMPLE
CURL handle;
curl_slist *slist=NULL;
slist = curl_slist_append(slist, "pragma:");
curl_easy_setopt(handle, CURLOPT_HTTPHEADER, slist);
curl_easy_perform(handle);
curl_slist_free_all(slist); /* free the list again */
.SH "SEE ALSO"
.BR curl_slist_free_all "(3), "
.SH BUGS
Surely there are some, you tell me!
None.

View File

@@ -13,15 +13,7 @@ curl_version - returns the libcurl version string
.SH DESCRIPTION
Returns a human readable string with the version number of libcurl and some of
its important components (like OpenSSL version).
Note: this returns the actual running lib's version, you might have installed
a newer lib's include files in your system which may turn your LIBCURL_VERSION
#define value to differ from this result.
.SH RETURN VALUE
A pointer to a zero terminated string.
.SH "SEE ALSO"
The
.I LIBCURL_VERSION
#define in <curl/curl.h>
.SH BUGS
Surely there are some, you tell me!
.BR curl_version_info "(3)"

View File

@@ -2,7 +2,7 @@
.\" nroff -man [file]
.\" $Id$
.\"
.TH curl_version_info 3 "30 Sep 2002" "libcurl 7.10" "libcurl Manual"
.TH curl_version_info 3 "12 Aug 2003" "libcurl 7.10.7" "libcurl Manual"
.SH NAME
curl_version_info - returns run-time libcurl version info
.SH SYNOPSIS
@@ -69,6 +69,21 @@ supports SSL (HTTPS/FTPS)
.TP
.B CURL_VERSION_LIBZ
supports HTTP deflate using libz
.TP
.B CURL_VERSION_NTLM
supports HTTP NTLM (added in 7.10.6)
.TP
.B CURL_VERSION_GSSNEGOTIATE
supports HTTP GSS-Negotiate (added in 7.10.6)
.TP
.B CURL_VERSION_DEBUG
libcurl was built with extra debug capabilities built-in. This is mainly of
interest for libcurl hackers. (added in 7.10.6)
.TP
.B CURL_VERSION_ASYNCHDNS
libcurl was built with support for asynchronous name lookups, which allows
more exact timeouts (even on Windows) and less blocking when using the multi
interface. (added in 7.10.7)
.PP
\fIssl_version\fP is an ascii string for the OpenSSL version used. If libcurl
has no SSL support, this is NULL.
@@ -83,11 +98,8 @@ libcurl has no libz support, this is NULL.
names protocols that libcurl supports (using lowercase letters). The protocol
names are the same as would be used in URLs. The array is terminated by a NULL
entry.
.SH RETURN VALUE
A pointer to a curl_version_info_data struct.
.SH "SEE ALSO"
\fIcurl_version(3)\fP
.SH BUGS
No known bugs.

View File

@@ -12,7 +12,9 @@
<h2>Overviews</h2>
<A HREF="libcurl.html">libcurl</A>
<br><a href="libcurl-easy.html">libcurl-easy</a>
<br><a href="libcurl-multi.html">libcurl-multi</a>
<br><a href="libcurl-share.html">libcurl-share</a>
<br><a href="libcurl-errors.html">libcurl-errors</a>
<br><a href="../libcurl-the-guide">libcurl-the-guide</a> (plain text)
@@ -40,6 +42,9 @@
<br><a href="curl_multi_init.html">curl_multi_init</a>
<br><a href="curl_multi_perform.html">curl_multi_perform</a>
<br><a href="curl_multi_remove_handle.html">curl_multi_remove_handle</a>
<br><a href="curl_share_cleanup.html">curl_share_cleanup</A>
<br><a href="curl_share_init.html">curl_share_init</A>
<br><a href="curl_share_setopt.html">curl_share_setopt</A>
<br><a href="curl_slist_append.html">curl_slist_append</A>
<br><a href="curl_slist_free_all.html">curl_slist_free_all</A>
<br><a href="curl_strequal.html">curl_strequal</A>

View File

@@ -0,0 +1,29 @@
.\" You can view this file with:
.\" nroff -man [file]
.\" $Id$
.\"
.TH libcurl 3 "12 Aug 2003" "libcurl 7.10.7" "libcurl easy interface"
.SH NAME
libcurl-easy \- easy interface overview
.SH DESCRIPTION
When using libcurl's "easy" interface you init your session and get a handle
(often referred to as an "easy handle" in various docs and sources), which you
use as input to the easy interface functions you use. Use
\fIcurl_easy_init()\fP to get the handle.
You continue by setting all the options you want in the upcoming transfer, the
most important among them is the URL itself (you can't transfer anything
without a specified URL as you may have figured out yourself). You might want
to set some callbacks as well that will be called from the library when data
is available etc. \fIcurl_easy_setopt()\fP is used for all this.
When all is setup, you tell libcurl to perform the transfer using
\fIcurl_easy_perform()\fP. It will then do the entire operation and won't
return until it is done (successfully or not).
After the transfer has been made, you can set new options and make another
transfer, or if you're done, cleanup the session by calling
\fIcurl_easy_cleanup()\fP. If you want persistant connections, you don't
cleanup immediately, but instead run ahead and perform other transfers using
the same easy handle.

View File

@@ -2,7 +2,7 @@
.\" nroff -man [file]
.\" $Id$
.\"
.TH libcurl-errors 3 "10 April 2002" "libcurl 7.9.6" "libcurl errors"
.TH libcurl-errors 3 "18 Dec 2002" "libcurl 7.10.3" "libcurl errors"
.SH NAME
error codes in libcurl
.SH DESCRIPTION
@@ -104,7 +104,7 @@ After a completed file transfer, the FTP server did not respond a proper
When sending custom "QUOTE" commands to the remote server, one of the commands
returned an error code that was 400 or higher.
.TP
.B CURLE_HTTP_NOT_FOUND (22)
.B CURLE_HTTP_RETURNED_ERROR (22)
This is returned if CURLOPT_FAILONERROR is set TRUE and the HTTP server
returns an error code that is >= 400.
.TP

View File

@@ -2,7 +2,7 @@
.\" nroff -man [file]
.\" $Id$
.\"
.TH libcurl-multi 5 "20 March 2001" "libcurl 7.9.5" "libcurl multi interface"
.TH libcurl-multi 3 "13 Oct 2001" "libcurl 7.10.1" "libcurl multi interface"
.SH NAME
libcurl-multi \- how to use the multi interface
.SH DESCRIPTION
@@ -37,7 +37,7 @@ curl_multi_* functions.
Each single transfer is built up with an easy handle. You must create them,
and setup the appropriate options for each easy handle, as outlined in the
\fIlibcurl(3)\fP man page.
\fIlibcurl(3)\fP man page, using \fIcurl_easy_setopt(3)\fP.
When the easy handle is setup for a transfer, then instead of using
\fIcurl_easy_perform\fP (as when using the easy interface for transfers), you
@@ -49,11 +49,11 @@ handles.
Should you change your mind, the easy handle is again removed from the multi
stack using \fIcurl_multi_remove_handle\fP. Once removed from the multi
handle, you can again use other easy interface functions like
curl_easy_perform or whatever you think is necessary.
\fIcurl_easy_perform\fP on the handle or whatever you think is necessary.
Adding the easy handles to the multi handle does not start any
transfer. Remember that one of the main ideas with this interface is to let
your application drive. You drive the transfers by invoking
Adding the easy handle to the multi handle does not start the transfer.
Remember that one of the main ideas with this interface is to let your
application drive. You drive the transfers by invoking
\fIcurl_multi_perform\fP. libcurl will then transfer data if there is anything
available to transfer. It'll use the callbacks and everything else you have
setup in the individual easy handles. It'll transfer data on all current
@@ -62,24 +62,39 @@ all, it may be none.
Your application can acquire knowledge from libcurl when it would like to get
invoked to transfer data, so that you don't have to busy-loop and call that
\fIcurl_multi_perform\fP like a mad man! \fIcurl_multi_fdset\fP offers an
\fIcurl_multi_perform\fP like crazy. \fIcurl_multi_fdset\fP offers an
interface using which you can extract fd_sets from libcurl to use in select()
or poll() calls in order to get to know when the transfers in the multi stack
might need attention. This also makes it very easy for your program to wait
for input on your own private file descriptors at the same time or perhaps
timeout every now and then, should you want that.
A little note here about the return codes from the multi functions, and
especially the \fIcurl_multi_perform\fP: if you receive
\fICURLM_CALL_MULTI_PERFORM\fP, this basicly means that you should call
\fIcurl_multi_perform\fP again, before you select() on more actions. You don't
have to do it immediately, but the return code means that libcurl may have
more data available to return or that there may be more data to send off
before it is "satisfied".
\fIcurl_multi_perform\fP stores the number of still running transfers in one
of its input arguments, and by reading that you can figure out when all the
transfers in the multi handles are done. 'done' does not mean successful. One
or more of the transfers may have failed.
or more of the transfers may have failed. Tracking when this number changes,
you know when one or more transfers are done.
To get information about completed transfers, to figure out success or not and
similar, \fIcurl_multi_info_read\fP should be called. It can return a message
about a current or previous transfer. Repeated invokes of the function get
more messages until the message queue is empty.
more messages until the message queue is empty. The information you receive
there includes an easy handle pointer which you may use to identify which easy
handle the information regards.
When all transfers in the multi stack are done, cleanup the multi handle with
\fIcurl_multi_cleanup\fP. Be careful and please note that you \fBMUST\fP
invoke separate \fIcurl_easy_cleanup\fP calls on every single easy handle to
clean them up properly.
If you want to re-use an easy handle that was added to the multi handle for
transfer, you must first remove it from the multi stack and then re-add it
again (possbily after having altered some options at your own choice).

View File

@@ -0,0 +1,46 @@
.\" You can view this file with:
.\" nroff -man [file]
.\" $Id$
.\"
.TH libcurl-share 3 "8 Aug 2003" "libcurl 7.10.7" "libcurl share interface"
.SH NAME
libcurl-share \- how to use the share interface
.SH DESCRIPTION
This is an overview on how to use the libcurl share interface in your C
programs. There are specific man pages for each function mentioned in
here.
All functions in the share interface are prefixed with curl_share.
.SH "OBJECTIVES"
The share interface was added to enable sharing of data between curl
\&"handles".
.SH "ONE SET OF DATA - MANY TRANSFERS"
You can have multiple easy handles share data between them. Have them update
and use the \fBsame\fP cookie database or DNS cache! This way, each single
transfer will take advantage from data updates made by the other transfer(s).
.SH "SHARE OBJECT"
You create a shared object with \fIcurl_share_init()\fP. It returns a handle
for a newly created one.
You tell the shared object what data you want it to share by using
\fIcurl_share_setopt()\fP. Currently you can only share DNS and/or COOKIE
data.
Since you can use this share from multiple threads, and libcurl has no
internal thread synchronization, you must provide mutex callbacks if you're
using this multi-threaded. You set lock and unlock functions with
\fIcurl_share_setopt()\fP too.
Then, you make an easy handle to use this share, you set the CURLOPT_SHARE
option with \fIcurl_easy_setopt\fP, and pass in share handle. You can make any
number of easy handles share the same share handle.
To make an easy handle stop using that particular share, you set CURLOPT_SHARE
to NULL for that easy handle. To make a handle stop sharing a particular data,
you can CURLSHOPT_UNSHARE it.
When you're done using the share, make sure that no easy handle is still using
it, and call \fIcurl_share_cleanup()\fP on the handle.
.SH "SEE ALSO"
.BR curl_share_init "(3), " curl_share_setopt "(3), " curl_share_cleanup "(3)"

View File

@@ -7,38 +7,37 @@
libcurl \- client-side URL transfers
.SH DESCRIPTION
This is an overview on how to use libcurl in your C programs. There are
specific man pages for each function mentioned in here. There's also the
libcurl-the-guide document for a complete tutorial to programming with
libcurl.
specific man pages for each function mentioned in here. There are also the
\fIlibcurl-easy\fP man page, the \fIlibcurl-multi\fP man page, the
\fIlibcurl-share\fP man page and the \fIlibcurl-the-guide\fP document for
further reading on how to do programming with libcurl.
There are a dozen custom bindings that bring libcurl access to your favourite
language. Look elsewhere for documentation on those.
There exist more than a dozen custom bindings that bring libcurl access to
your favourite language. Look elsewhere for documentation on those.
All applications that use libcurl should call \fIcurl_global_init()\fP exactly
once before any libcurl function can be used. After all usage of libcurl is
complete, it \fBmust\fP call \fIcurl_global_cleanup()\fP. In between those two
calls, you can use libcurl as described below.
When using libcurl's "easy" interface you init your session and get a handle,
which you use as input to the easy interface functions you use. Use
\fIcurl_easy_init()\fP to get the handle. There is also the so called "multi"
interface, try the \fIlibcurl-multi(3)\fP man page for an overview of that.
To transfer files, you always set up an "easy handle" using
\fIcurl_easy_init()\fP, but when you want the file(s) transfered you have the
option of using the "easy" interface, or the "multi" interface.
You continue by setting all the options you want in the upcoming transfer,
most important among them is the URL itself (you can't transfer anything
without a specified URL as you may have figured out yourself). You might want
to set some callbacks as well that will be called from the library when data
is available etc. \fIcurl_easy_setopt()\fP is there for this.
The easy interface is a synchronous interface with which you call
\fIcurl_easy_perform\fP and let it perform the transfer. When it is completed,
the function return and you can continue. More details are found in the
\fIlibcurl-easy\fP man page.
When all is setup, you tell libcurl to perform the transfer using
\fIcurl_easy_perform()\fP. It will then do the entire operation and won't
return until it is done (successfully or not).
The multi interface on the other hand is an asynchronous interface, that you
call and that performs only a little piece of the tranfer on each invoke. It
is perfect if you want to do things while the transfer is in progress, or
similar. The multi interface allows you to select() on libcurl action, and
even to easily download multiple files simultaneously using a single thread.
After the transfer has been made, you can set new options and make another
transfer, or if you're done, cleanup the session by calling
\fIcurl_easy_cleanup()\fP. If you want persistant connections, you don't
cleanup immediately, but instead run ahead and perform other transfers using
the same handle. See the chapter below for Persistant Connections.
You can have multiple easy handles share certain data, even if they are used
in different threads. This magic is setup using the share interface, as
described in the \fIlibcurl-share\fP man page.
There is also a series of other helpful functions to use. They are:
@@ -107,14 +106,15 @@ Persistent connections means that libcurl can re-use the same connection for
several transfers, if the conditions are right.
libcurl will *always* attempt to use persistent connections. Whenever you use
curl_easy_perform(), libcurl will attempt to use an existing connection to do
the transfer, and if none exists it'll open a new one that will be subject for
re-use on a possible following call to curl_easy_perform().
\fIcurl_easy_perform()\fP or \fIcurl_multi_perform()\fP, libcurl will attempt
to use an existing connection to do the transfer, and if none exists it'll
open a new one that will be subject for re-use on a possible following call to
\fIcurl_easy_perform()\fP or \fIcurl_multi_perform()\fP.
To allow libcurl to take full advantage of persistent connections, you should
do as many of your file transfers as possible using the same curl handle. When
you call curl_easy_cleanup(), all the possibly open connections held by
you call \fIcurl_easy_cleanup()\fP, all the possibly open connections held by
libcurl will be closed and forgotten.
Note that the options set with curl_easy_setopt() will be used in on every
repeat curl_easy_perform() call
Note that the options set with \fIcurl_easy_setopt()\fP will be used in on
every repeated \fIcurl_easy_perform()\fP call.

View File

@@ -13,3 +13,26 @@ of environment. You should include files from here using...
... style and point the compiler's include path to the directory holding the
curl subdirectory. It makes it more likely to survive future modifications.
NOTE FOR LIBCURL HACKERS
All the include files in this tree are written and intended to be installed on
a system that may serve multiple platforms and multiple applications, all
using libcurl (possibly even different libcurl installations using different
versions). Therefore, all header files in here must obey these rules:
* They cannot depend on or use configure-generated results from libcurl's or
curl's directories. Other applications may not run configure as (lib)curl
does, and using platform dependent info here may break other platforms.
* We cannot assume anything else but very basic compiler features being
present. While libcurl requires an ANSI C compiler to build, some of the
earlier ANSI compilers clearly can't deal with some preprocessor operators.
* Newlines must remain unix-style for older compilers' sake.
* Comments must be written in the old-style /* unnested C-fashion */
To figure out how to do good and portable checks for features, operating
systems or specific hardwarare, a very good resource is Bjorn Reese's
collection at http://predef.sf.net/

View File

@@ -7,7 +7,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -23,23 +23,39 @@
* $Id$
***************************************************************************/
/* If you have problems, all libcurl docs and details are found here:
http://curl.haxx.se/libcurl/
*/
/* This is the version number of the libcurl package from which this header
file origins: */
#define LIBCURL_VERSION "7.10.7"
/* This is the numeric version of the libcurl version number, meant for easier
parsing and comparions by programs. The LIBCURL_VERSION_NUM define will
always follow this syntax:
0xXXYYZZ
Where XX, YY and ZZ are the main version, release and patch numbers in
hexadecimal. All three numbers are always represented using two digits. 1.2
would appear as "0x010200" while version 9.11.7 appears as "0x090b07".
This 6-digit hexadecimal number does not show pre-release number, and it is
always a greater number in a more recent release. It makes comparisons with
greater than and less than work.
*/
#define LIBCURL_VERSION_NUM 0x070a07
#include <stdio.h>
/* The include stuff here is mainly for time_t! */
/* The include stuff here below is mainly for time_t! */
#ifdef vms
# include <types.h>
# include <time.h>
#else
# include <sys/types.h>
# ifdef TIME_WITH_SYS_TIME
# include <sys/time.h>
# include <time.h>
# else
# ifdef HAVE_SYS_TIME_H
# include <sys/time.h>
# else
# include <time.h>
# endif
# endif
# include <time.h>
#endif /* defined (vms) */
#ifndef TRUE
@@ -55,39 +71,36 @@
extern "C" {
#endif
/* stupid #define trick to preserve functionality with older code, but
making it use our name space for the future */
/* silly trick to preserve functionality with older code, but making it use
our name space for the future */
#define HttpPost curl_httppost
struct curl_httppost {
struct curl_httppost *next; /* next entry in the list */
char *name; /* pointer to allocated name */
long namelength; /* length of name length */
char *contents; /* pointer to allocated data contents */
long contentslength; /* length of contents field */
/* CMC: Added support for buffer uploads */
char *buffer; /* pointer to allocated buffer contents */
long bufferlength; /* length of buffer field */
char *contenttype; /* Content-Type */
struct curl_httppost *next; /* next entry in the list */
char *name; /* pointer to allocated name */
long namelength; /* length of name length */
char *contents; /* pointer to allocated data contents */
long contentslength; /* length of contents field */
char *buffer; /* pointer to allocated buffer contents */
long bufferlength; /* length of buffer field */
char *contenttype; /* Content-Type */
struct curl_slist* contentheader; /* list of extra headers for this form */
struct curl_httppost *more; /* if one field name has more than one file, this
link should link to following files */
long flags; /* as defined below */
#define HTTPPOST_FILENAME (1<<0) /* specified content is a file name */
#define HTTPPOST_READFILE (1<<1) /* specified content is a file name */
#define HTTPPOST_PTRNAME (1<<2) /* name is only stored pointer
do not free in formfree */
struct curl_httppost *more; /* if one field name has more than one
file, this link should link to following
files */
long flags; /* as defined below */
#define HTTPPOST_FILENAME (1<<0) /* specified content is a file name */
#define HTTPPOST_READFILE (1<<1) /* specified content is a file name */
#define HTTPPOST_PTRNAME (1<<2) /* name is only stored pointer
do not free in formfree */
#define HTTPPOST_PTRCONTENTS (1<<3) /* contents is only stored pointer
do not free in formfree */
#define HTTPPOST_BUFFER (1<<4) /* upload file from buffer */
#define HTTPPOST_PTRBUFFER (1<<5) /* upload file from pointer contents */
/* CMC: Added support for buffer uploads */
#define HTTPPOST_BUFFER (1<<4) /* upload file from buffer */
#define HTTPPOST_PTRBUFFER (1<<5) /* upload file from pointer contents */
char *showfilename; /* The file name to show. If not set, the actual
file name will be used (if this is a file part) */
char *showfilename; /* The file name to show. If not set, the
actual file name will be used (if this
is a file part) */
};
typedef int (*curl_progress_callback)(void *clientp,
@@ -96,7 +109,9 @@ typedef int (*curl_progress_callback)(void *clientp,
double ultotal,
double ulnow);
#define CURL_MAX_WRITE_SIZE 20480
/* Tests have proven that 20K is a very bad buffer size for uploads on
Windows, while 16K for some odd reason performed a lot better. */
#define CURL_MAX_WRITE_SIZE 16384
typedef size_t (*curl_write_callback)(char *buffer,
size_t size,
@@ -128,7 +143,7 @@ typedef int (*curl_debug_callback)
curl_infotype type, /* what kind of data */
char *data, /* points to the data */
size_t size, /* size of the data pointed to */
void *userp); /* whatever the user please */
void *userptr); /* whatever the user please */
/* All possible error codes from all sorts of curl functions. Future versions
may return other values, stay prepared.
@@ -160,7 +175,7 @@ typedef enum {
CURLE_FTP_COULDNT_RETR_FILE, /* 19 */
CURLE_FTP_WRITE_ERROR, /* 20 */
CURLE_FTP_QUOTE_ERROR, /* 21 */
CURLE_HTTP_NOT_FOUND, /* 22 */
CURLE_HTTP_RETURNED_ERROR, /* 22 */
CURLE_WRITE_ERROR, /* 23 */
CURLE_MALFORMAT_USER, /* 24 - user name is illegally specified */
CURLE_FTP_COULDNT_STOR_FILE, /* 25 - failed FTP upload */
@@ -205,12 +220,29 @@ typedef enum {
CURL_LAST /* never use! */
} CURLcode;
typedef CURLcode (*curl_ssl_ctx_callback)(CURL *curl, /* easy handle */
void *ssl_ctx, /* actually an
OpenSSL SSL_CTX */
void *userptr);
/* Make a spelling correction for the operation timed-out define */
#define CURLE_OPERATION_TIMEDOUT CURLE_OPERATION_TIMEOUTED
#define CURLE_HTTP_NOT_FOUND CURLE_HTTP_RETURNED_ERROR
typedef enum {
CURLPROXY_HTTP = 0,
CURLPROXY_SOCKS4 = 4,
CURLPROXY_SOCKS5 = 5
} curl_proxytype;
#define CURLAUTH_NONE 0 /* nothing */
#define CURLAUTH_BASIC (1<<0) /* Basic (default) */
#define CURLAUTH_DIGEST (1<<1) /* Digest */
#define CURLAUTH_GSSNEGOTIATE (1<<2) /* GSS-Negotiate */
#define CURLAUTH_NTLM (1<<3) /* NTLM */
#define CURLAUTH_ANY ~0 /* all types set */
#define CURLAUTH_ANYSAFE (~CURLAUTH_BASIC)
/* this was the error code 50 in 7.7.3 and a few earlier versions, this
is no longer used by libcurl but is instead #defined here only to not
make programs break */
@@ -242,7 +274,15 @@ typedef enum {
* platforms.
*/
#if defined(__STDC__) || defined(_MSC_VER) || defined(__cplusplus) || \
defined(__HP_aCC)
defined(__HP_aCC) || defined(__BORLANDC__)
/* This compiler is believed to have an ISO compatible preprocessor */
#define CURL_ISOCPP
#else
/* This compiler is believed NOT to have an ISO compatible preprocessor */
#undef CURL_ISOCPP
#endif
#ifdef CURL_ISOCPP
#define CINIT(name,type,number) CURLOPT_ ## name = CURLOPTTYPE_ ## type + number
#else
/* The macro "##" is ISO C, we assume pre-ISO C doesn't support it. */
@@ -252,6 +292,12 @@ typedef enum {
#define CINIT(name,type,number) CURLOPT_/**/name = type + number
#endif
/*
* This macro-mania below setups the CURLOPT_[what] enum, to be used with
* curl_easy_setopt(). The first argument in the CINIT() macro is the [what]
* word.
*/
typedef enum {
CINIT(NOTHING, LONG, 0), /********* the first one is unused ************/
@@ -261,24 +307,19 @@ typedef enum {
/* The full URL to get/put */
CINIT(URL, OBJECTPOINT, 2),
/* Port number to connect to, if other than default. Specify the CONF_PORT
flag in the CURLOPT_FLAGS to activate this */
/* Port number to connect to, if other than default. */
CINIT(PORT, LONG, 3),
/* Name of proxy to use. Specify the CONF_PROXY flag in the CURLOPT_FLAGS to
activate this */
/* Name of proxy to use. */
CINIT(PROXY, OBJECTPOINT, 4),
/* Name and password to use when fetching. Specify the CONF_USERPWD flag in
the CURLOPT_FLAGS to activate this */
/* "name:password" to use when fetching. */
CINIT(USERPWD, OBJECTPOINT, 5),
/* Name and password to use with Proxy. Specify the CONF_PROXYUSERPWD
flag in the CURLOPT_FLAGS to activate this */
/* "name:password" to use with proxy. */
CINIT(PROXYUSERPWD, OBJECTPOINT, 6),
/* Range to get, specified as an ASCII string. Specify the CONF_RANGE flag
in the CURLOPT_FLAGS to activate this */
/* Range to get, specified as an ASCII string. */
CINIT(RANGE, OBJECTPOINT, 7),
/* not used */
@@ -399,7 +440,6 @@ typedef enum {
as described elsewhere. */
CINIT(WRITEINFO, OBJECTPOINT, 40),
/* Previous FLAG bits */
CINIT(VERBOSE, LONG, 41), /* talk a lot */
CINIT(HEADER, LONG, 42), /* throw the header out too */
CINIT(NOPROGRESS, LONG, 43), /* shut off the progress meter */
@@ -599,6 +639,43 @@ typedef enum {
the response to be compressed. */
CINIT(ENCODING, OBJECTPOINT, 102),
/* Set pointer to private data */
CINIT(PRIVATE, OBJECTPOINT, 103),
/* Set aliases for HTTP 200 in the HTTP Response header */
CINIT(HTTP200ALIASES, OBJECTPOINT, 104),
/* Continue to send authentication (user+password) when following locations,
even when hostname changed. This can potentionally send off the name
and password to whatever host the server decides. */
CINIT(UNRESTRICTED_AUTH, LONG, 105),
/* Specificly switch on or off the FTP engine's use of the EPRT command ( it
also disables the LPRT attempt). By default, those ones will always be
attempted before the good old traditional PORT command. */
CINIT(FTP_USE_EPRT, LONG, 106),
/* Set this to a bitmask value to enable the particular authentications
methods you like. Use this in combination with CURLOPT_USERPWD.
Note that setting multiple bits may cause extra network round-trips. */
CINIT(HTTPAUTH, LONG, 107),
/* Set the ssl context callback function, currently only for OpenSSL ssl_ctx
in second argument. The function must be matching the
curl_ssl_ctx_callback proto. */
CINIT(SSL_CTX_FUNCTION, FUNCTIONPOINT, 108),
/* Set the userdata for the ssl context callback function's third
argument */
CINIT(SSL_CTX_DATA, OBJECTPOINT, 109),
/* FTP Option that causes missing dirs to be created on the remote server */
CINIT(FTP_CREATE_MISSING_DIRS, LONG, 110),
/* Set this to a bitmask value to enable the particular authentications
methods you like. Use this in combination with CURLOPT_PROXYUSERPWD.
Note that setting multiple bits may cause extra network round-trips. */
CINIT(PROXYAUTH, LONG, 111),
CURLOPT_LASTENTRY /* the last unused */
} CURLoption;
@@ -668,29 +745,29 @@ typedef enum {
#endif
/* These functions are in the libcurl, they're here for portable reasons and
they are used by the 'curl' client. They really should be moved to some kind
of "portability library" since it has nothing to do with file transfers and
/* These functions are in libcurl, they're here for portable reasons and they
are used by the 'curl' client. They really should be moved to some kind of
"portability library" since it has nothing to do with file transfers and
might be usable to other programs...
NOTE: they return TRUE if the strings match *case insensitively*.
*/
extern int (curl_strequal)(const char *s1, const char *s2);
extern int (curl_strnequal)(const char *s1, const char *s2, size_t n);
#define strequal(a,b) curl_strequal(a,b)
#define strnequal(a,b,c) curl_strnequal(a,b,c)
/* DEPRECATED function to build formdata */
#ifdef CURL_OLDSTYLE
/* DEPRECATED function to build formdata. Stop using this, it will cease
to exist. */
int curl_formparse(char *, struct curl_httppost **,
struct curl_httppost **_post);
#endif
/* name is uppercase CURLFORM_<name> */
#ifdef CFINIT
#undef CFINIT
#endif
#if defined(__STDC__) || defined(_MSC_VER) || defined(__cplusplus) || \
defined(__HP_aCC)
#ifdef CURL_ISOCPP
#define CFINIT(name) CURLFORM_ ## name
#else
/* The macro "##" is ISO C, we assume pre-ISO C doesn't support it. */
@@ -762,47 +839,122 @@ typedef enum {
CURL_FORMADD_LAST /* last */
} CURLFORMcode;
/*
* NAME curl_formadd()
*
* DESCRIPTION
*
* Pretty advanved function for building multi-part formposts. Each invoke
* adds one part that together construct a full post. Then use
* CURLOPT_HTTPPOST to send it off to libcurl.
*/
CURLFORMcode curl_formadd(struct curl_httppost **httppost,
struct curl_httppost **last_post,
...);
struct curl_httppost **last_post,
...);
/* cleanup a form: */
/*
* NAME curl_formfree()
*
* DESCRIPTION
*
* Free a multipart formpost previously built with curl_formadd().
*/
void curl_formfree(struct curl_httppost *form);
/* Unix and Win32 getenv function call, this returns a malloc()'ed string that
MUST be free()ed after usage is complete. */
/*
* NAME curl_getenv()
*
* DESCRIPTION
*
* Returns a malloc()'ed string that MUST be curl_free()ed after usage is
* complete.
*/
char *curl_getenv(const char *variable);
/* Returns a static ascii string of the libcurl version. */
/*
* NAME curl_version()
*
* DESCRIPTION
*
* Returns a static ascii string of the libcurl version.
*/
char *curl_version(void);
/* Escape and unescape URL encoding in strings. The functions return a new
* allocated string or NULL if an error occurred. */
/*
* NAME curl_escape()
*
* DESCRIPTION
*
* Escapes URL strings (converts all letters consider illegal in URLs to their
* %XX versions). This function returns a new allocated string or NULL if an
* error occurred.
*/
char *curl_escape(const char *string, int length);
/*
* NAME curl_unescape()
*
* DESCRIPTION
*
* Unescapes URL encoding in strings (converts all %XX codes to their 8bit
* versions). This function returns a new allocated string or NULL if an error
* occurred.
*/
char *curl_unescape(const char *string, int length);
/* 20020912 WJM. Provide for a de-allocation in the same translation unit
that did the allocation. Added in libcurl 7.10 */
/*
* NAME curl_free()
*
* DESCRIPTION
*
* Provided for de-allocation in the same translation unit that did the
* allocation. Added in libcurl 7.10
*/
void curl_free(void *p);
/* curl_global_init() should be invoked exactly once for each application that
uses libcurl */
/*
* NAME curl_global_init()
*
* DESCRIPTION
*
* curl_global_init() should be invoked exactly once for each application that
* uses libcurl
*/
CURLcode curl_global_init(long flags);
/* curl_global_cleanup() should be invoked exactly once for each application
that uses libcurl */
/*
* NAME curl_global_cleanup()
*
* DESCRIPTION
*
* curl_global_cleanup() should be invoked exactly once for each application
* that uses libcurl
*/
void curl_global_cleanup(void);
/* This is the version number */
#define LIBCURL_VERSION "7.10.1"
#define LIBCURL_VERSION_NUM 0x070a01
/* linked-list structure for the CURLOPT_QUOTE option (and other) */
struct curl_slist {
char *data;
struct curl_slist *next;
char *data;
struct curl_slist *next;
};
/*
* NAME curl_slist_append()
*
* DESCRIPTION
*
* Appends a string to a linked list. If no list exists, it will be created
* first. Returns the new list, after appending.
*/
struct curl_slist *curl_slist_append(struct curl_slist *, const char *);
/*
* NAME curl_slist_free_all()
*
* DESCRIPTION
*
* free a previously built curl_slist.
*/
void curl_slist_free_all(struct curl_slist *);
/*
@@ -840,27 +992,19 @@ typedef enum {
CURLINFO_REQUEST_SIZE = CURLINFO_LONG + 12,
CURLINFO_SSL_VERIFYRESULT = CURLINFO_LONG + 13,
CURLINFO_FILETIME = CURLINFO_LONG + 14,
CURLINFO_CONTENT_LENGTH_DOWNLOAD = CURLINFO_DOUBLE + 15,
CURLINFO_CONTENT_LENGTH_UPLOAD = CURLINFO_DOUBLE + 16,
CURLINFO_STARTTRANSFER_TIME = CURLINFO_DOUBLE + 17,
CURLINFO_CONTENT_TYPE = CURLINFO_STRING + 18,
CURLINFO_REDIRECT_TIME = CURLINFO_DOUBLE + 19,
CURLINFO_REDIRECT_COUNT = CURLINFO_LONG + 20,
CURLINFO_PRIVATE = CURLINFO_STRING + 21,
CURLINFO_HTTP_CONNECTCODE = CURLINFO_LONG + 22,
/* Fill in new entries below here! */
CURLINFO_CONTENT_TYPE = CURLINFO_STRING + 18,
CURLINFO_REDIRECT_TIME = CURLINFO_DOUBLE + 19,
CURLINFO_REDIRECT_COUNT = CURLINFO_LONG + 20,
/* Fill in new entries here! */
CURLINFO_LASTONE = 21
CURLINFO_LASTONE = 23
} CURLINFO;
/* unfortunately, the easy.h and multi.h include files need options and info
stuff before they can be included! */
#include "easy.h" /* nothing in curl is fun without the easy stuff */
#include "multi.h"
typedef enum {
CURLCLOSEPOLICY_NONE, /* first, never use this */
@@ -884,35 +1028,61 @@ typedef enum {
* Setup defines, protos etc for the sharing stuff.
*/
/* Different types of locks that a share can aquire */
/* Different data locks for a single share */
typedef enum {
CURL_LOCK_TYPE_NONE = 0,
CURL_LOCK_TYPE_COOKIE = 1<<0,
CURL_LOCK_TYPE_DNS = 1<<1,
CURL_LOCK_TYPE_SSL_SESSION = 2<<1,
CURL_LOCK_TYPE_CONNECT = 2<<2,
CURL_LOCK_TYPE_LAST
} curl_lock_type;
CURL_LOCK_DATA_NONE = 0,
/* CURL_LOCK_DATA_SHARE is used internaly to say that
* the locking is just made to change the internal state of the share
* itself.
*/
CURL_LOCK_DATA_SHARE,
CURL_LOCK_DATA_COOKIE,
CURL_LOCK_DATA_DNS,
CURL_LOCK_DATA_SSL_SESSION,
CURL_LOCK_DATA_CONNECT,
CURL_LOCK_DATA_LAST
} curl_lock_data;
typedef void (*curl_lock_function)(CURL *, curl_lock_type, void *);
typedef void (*curl_unlock_function)(CURL *, curl_lock_type, void *);
/* Different lock access types */
typedef enum {
CURL_LOCK_ACCESS_NONE = 0, /* unspecified action */
CURL_LOCK_ACCESS_SHARED = 1, /* for read perhaps */
CURL_LOCK_ACCESS_SINGLE = 2, /* for write perhaps */
CURL_LOCK_ACCESS_LAST /* never use */
} curl_lock_access;
typedef struct {
unsigned int specifier;
unsigned int locked;
unsigned int dirty;
curl_lock_function lockfunc;
curl_unlock_function unlockfunc;
void *clientdata;
} curl_share;
typedef void (*curl_lock_function)(CURL *handle,
curl_lock_data data,
curl_lock_access access,
void *userptr);
typedef void (*curl_unlock_function)(CURL *handle,
curl_lock_data data,
void *userptr);
curl_share *curl_share_init (void);
CURLcode curl_share_setopt (curl_share *, curl_lock_type, int);
CURLcode curl_share_set_lock_function (curl_share *, curl_lock_function);
CURLcode curl_share_set_unlock_function (curl_share *, curl_unlock_function);
CURLcode curl_share_set_lock_data (curl_share *, void *);
CURLcode curl_share_destroy (curl_share *);
typedef void CURLSH;
typedef enum {
CURLSHE_OK, /* all is fine */
CURLSHE_BAD_OPTION, /* 1 */
CURLSHE_IN_USE, /* 2 */
CURLSHE_INVALID, /* 3 */
CURLSHE_LAST /* never use */
} CURLSHcode;
typedef enum {
CURLSHOPT_NONE, /* don't use */
CURLSHOPT_SHARE, /* specify a data type to share */
CURLSHOPT_UNSHARE, /* specify shich data type to stop sharing */
CURLSHOPT_LOCKFUNC, /* pass in a 'curl_lock_function' pointer */
CURLSHOPT_UNLOCKFUNC, /* pass in a 'curl_unlock_function' pointer */
CURLSHOPT_USERDATA, /* pass in a user data pointer used in the lock/unlock
callback functions */
CURLSHOPT_LAST /* never use */
} CURLSHoption;
CURLSH *curl_share_init(void);
CURLSHcode curl_share_setopt(CURLSH *, CURLSHoption option, ...);
CURLSHcode curl_share_cleanup(CURLSH *);
/****************************************************************************
* Structures for querying information about the curl library at runtime.
@@ -947,12 +1117,28 @@ typedef struct {
#define CURL_VERSION_KERBEROS4 (1<<1)
#define CURL_VERSION_SSL (1<<2)
#define CURL_VERSION_LIBZ (1<<3)
#define CURL_VERSION_NTLM (1<<4)
#define CURL_VERSION_GSSNEGOTIATE (1<<5)
#define CURL_VERSION_DEBUG (1<<6) /* built with debug capabilities */
#define CURL_VERSION_ASYNCHDNS (1<<7)
/* returns a pointer to a static copy of the version info struct */
/*
* NAME curl_version_info()
*
* DESCRIPTION
*
* This function returns a pointer to a static copy of the version info
* struct. See above.
*/
curl_version_info_data *curl_version_info(CURLversion);
#ifdef __cplusplus
}
#endif
/* unfortunately, the easy.h and multi.h include files need options and info
stuff before they can be included! */
#include "easy.h" /* nothing in curl is fun without the easy stuff */
#include "multi.h"
#endif /* __CURL_CURL_H */

View File

@@ -7,7 +7,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms

View File

@@ -1,54 +1,25 @@
/*************************************************************************
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
* $Id$
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
* MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE AUTHORS AND
* CONTRIBUTORS ACCEPT NO RESPONSIBILITY IN ANY CONCEIVABLE MANNER.
*
*************************************************************************
*
* Preliminary documentation
*
* printf conversions:
*
* conversion ::= '%%' | '%' [position] ( number | float | string )
* position ::= digits '$'
* number ::= [number-flags] ( 'd' | 'i' | 'o' | 'x' | 'X' | 'u')
* number-flags ::= 'h' | 'l' | 'L' ...
* float ::= [float-flags] ( 'f' | 'e' | 'E' | 'g' | 'G' )
* string ::= [string-flags] 's'
* string-flags ::= padding | '#'
* digits ::= (digit)+
* digit ::= 0-9
*
* c
* p
* n
*
* qualifiers
*
* - : left adjustment
* + : show sign
* SPACE : padding
* # : alterative
* . : precision
* * : width
* 0 : padding / size
* 1-9 : size
* h : short
* l : long
* ll : longlong
* L : long double
* Z : long / longlong
* q : longlong
*
************************************************************************/
***************************************************************************/
#ifndef H_MPRINTF
#define H_MPRINTF

View File

@@ -7,7 +7,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -44,19 +44,35 @@
o Enable the application to select() on its own file descriptors and curl's
file descriptors simultaneous easily.
Example sources using this interface is here: ../multi/
*/
#if defined(_WIN32) && !defined(WIN32)
/* Chris Lewis mentioned that he doesn't get WIN32 defined, only _WIN32 so we
make this adjustment to catch this. */
#define WIN32 1
#endif
#if defined(WIN32) && !defined(__GNUC__) || defined(__MINGW32__)
#include <winsock.h>
#else
#ifdef _AIX
/* HP-UX systems version 9, 10 and 11 lack sys/select.h and so does oldish
libc5-based Linux systems. Only include it on system that are known to
require it! */
#include <sys/select.h>
#endif
#include <sys/socket.h>
#include <sys/time.h>
#include <sys/types.h>
#endif
#include "curl.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef void CURLM;
typedef enum {
@@ -71,7 +87,7 @@ typedef enum {
typedef enum {
CURLMSG_NONE, /* first, not used */
CURLMSG_DONE, /* This easy handle has completed. 'whatever' points to
CURLMSG_DONE, /* This easy handle has completed. 'result' contains
the CURLcode of the transfer */
CURLMSG_LAST /* last, not used */
} CURLMSG;
@@ -187,4 +203,8 @@ CURLMcode curl_multi_cleanup(CURLM *multi_handle);
CURLMsg *curl_multi_info_read(CURLM *multi_handle,
int *msgs_in_queue);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif

View File

@@ -7,7 +7,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -23,9 +23,7 @@
* $Id$
***************************************************************************/
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
size_t fread (void *, size_t, size_t, FILE *);
size_t fwrite (const void *, size_t, size_t, FILE *);

View File

@@ -7,7 +7,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms

View File

@@ -5,4 +5,6 @@ Makefile
.deps
.libs
config.h
stamp-h1
stamp-*
ca-bundle.h
getdate.c

View File

@@ -5,22 +5,25 @@
AUTOMAKE_OPTIONS = foreign nostdinc
EXTRA_DIST = getdate.y Makefile.b32 Makefile.b32.resp Makefile.m32 \
Makefile.vc6 Makefile.riscos libcurl.def dllinit.c curllib.dsp \
Makefile.vc6 Makefile.riscos libcurl.def curllib.dsp \
curllib.dsw config-vms.h config-win32.h config-riscos.h config-mac.h \
config.h.in ca-bundle.crt README.encoding
config.h.in ca-bundle.crt README.encoding README.memoryleak \
README.ares makefile.dj config.dj
lib_LTLIBRARIES = libcurl.la
if ARES
ARESINC = -I$(top_srcdir)/ares
endif
# we use srcdir/include for the static global include files
# we use builddir/lib for the generated lib/config.h file to get found
# we use srcdir/lib for the lib-private header files
INCLUDES = -I$(top_srcdir)/include -I$(top_builddir)/lib -I$(top_srcdir)/lib
INCLUDES = -I$(top_srcdir)/include -I$(top_builddir)/lib -I$(top_srcdir)/lib $(ARESINC)
# The -no-undefined flag is CRUCIAL for this to build fine on Cygwin. If we
# find a case in which we need to remove this flag, we should most likely
# write a configure check that detects when this flag is needed and when its
# not.
libcurl_la_LDFLAGS = -no-undefined -version-info 2:2:0
LDFLAGS += -L$(top_srcdir)/lib
VERSION=-version-info 2:2:0
# This flag accepts an argument of the form current[:revision[:age]]. So,
# passing -version-info 3:12:1 sets current to 3, revision to 12, and age to
@@ -50,6 +53,19 @@ libcurl_la_LDFLAGS = -no-undefined -version-info 2:2:0
# set age to 0.
#
if NO_UNDEFINED
# The -no-undefined flag is CRUCIAL for this to build fine on Cygwin.
UNDEF = -no-undefined
else
UNDEF =
endif
if ARES
ARESLIB = -lares -L$(top_builddir)/ares
endif
libcurl_la_LDFLAGS = $(UNDEF) $(VERSION) $(ARESLIB)
libcurl_la_SOURCES = arpa_telnet.h file.c getpass.h netrc.h timeval.c \
base64.c file.h hostip.c progress.c timeval.h base64.h formdata.c \
hostip.h progress.h cookie.c formdata.h http.c sendf.c cookie.h ftp.c \
@@ -60,16 +76,29 @@ getpass.c netrc.c telnet.h getinfo.c getinfo.h transfer.c strequal.c \
strequal.h easy.c security.h security.c krb4.c krb4.h memdebug.c \
memdebug.h inet_ntoa_r.h http_chunks.c http_chunks.h strtok.c strtok.h \
connect.c connect.h llist.c llist.h hash.c hash.h multi.c \
content_encoding.c content_encoding.h
content_encoding.c content_encoding.h share.c share.h http_digest.c \
md5.c md5.h http_digest.h http_negotiate.c http_negotiate.h \
http_ntlm.c http_ntlm.h ca-bundle.h
noinst_HEADERS = setup.h transfer.h
BUILT_SOURCES = $(srcdir)/getdate.c $(srcdir)/ca-bundle.h
# Say $(srcdir), so GNU make does not report an ambiguity with the .y.c rule.
$(srcdir)/getdate.c: getdate.y
cd $(srcdir) && \
$(YACC) $(YFLAGS) getdate.y; \
mv -f y.tab.c getdate.c
$(srcdir)/ca-bundle.h: Makefile.in Makefile
chmod 0644 $@
echo "/* The file is generated automaticly */" > $@
if CABUNDLE
echo '#define CURL_CA_BUNDLE @CURL_CA_BUNDLE@' >> $@
else
echo '#undef CURL_CA_BUNDLE /* unknown */' >> $@
endif
install-data-hook:
@if test -n "@CURL_CA_BUNDLE@"; then \
$(mkinstalldirs) `dirname $(DESTDIR)@CURL_CA_BUNDLE@`; \
@@ -79,4 +108,5 @@ install-data-hook:
# this hook is mainly for non-unix systems to build even if configure
# isn't run
dist-hook:
cp $(srcdir)/ca-bundle.h.in $(distdir)/ca-bundle.h
chmod 0644 $(distdir)/ca-bundle.h
echo "/* ca bundle path set in here*/" > $(distdir)/ca-bundle.h

View File

@@ -59,7 +59,12 @@ SOURCES = \
easy.c \
strequal.c \
strtok.c \
connect.c
connect.c \
hash.c \
share.c \
llist.c \
multi.c \
content_encoding.c
OBJECTS = $(SOURCES:.c=.obj)

View File

@@ -28,4 +28,9 @@
+easy.obj &
+strequal.obj &
+strtok.obj &
+connect.obj
+connect.obj &
+hash.obj &
+share.obj &
+llist.obj &
+multi.obj &
+content_encoding.obj

View File

@@ -1,6 +1,6 @@
#############################################################
#
## Makefile for building libcurl.a with MingW32 (GCC-2.95) and
## Makefile for building libcurl.a with MingW32 (GCC-3.2) and
## optionally OpenSSL (0.9.6)
## Use: make -f Makefile.m32
##
@@ -9,10 +9,11 @@
CC = gcc
AR = ar
RM = rm -f
RANLIB = ranlib
STRIP = strip -g
OPENSSL_PATH = ../../openssl-0.9.6d
ZLIB_PATH = ../../zlib-1.1.3
OPENSSL_PATH = ../../openssl-0.9.7a
ZLIB_PATH = ../../zlib-1.1.4
########################################################
## Nothing more to do below this line!
@@ -21,36 +22,40 @@ INCLUDES = -I. -I.. -I../include -I../src
CFLAGS = -g -O2 -DMINGW32
ifdef SSL
INCLUDES += -I"$(OPENSSL_PATH)/outinc" -I"$(OPENSSL_PATH)/outinc/openssl"
CFLAGS += -DUSE_SSLEAY
DLL_LIBS = -L$(OPENSSL_PATH)/out -leay32 -lssl32 -lRSAglue
CFLAGS += -DUSE_SSLEAY -DHAVE_OPENSSL_ENGINE_H
DLL_LIBS = -L$(OPENSSL_PATH)/out -leay32 -lssl32
endif
ifdef ZLIB
INCLUDES += -I"$(ZLIB_PATH)"
CFLAGS += -DHAVE_ZLIB
DLL_LIBS += -L$(ZLIB_PATH) -lz
INCLUDES += -I"$(ZLIB_PATH)"
CFLAGS += -DHAVE_LIBZ -DHAVE_ZLIB_H
DLL_LIBS += -L$(ZLIB_PATH) -lz
endif
COMPILE = $(CC) $(INCLUDES) $(CFLAGS)
libcurl_a_LIBRARIES = libcurl.a
libcurl_a_SOURCES = arpa_telnet.h file.c getpass.h netrc.h timeval.c base64.c \
file.h hostip.c progress.c timeval.h base64.h formdata.c hostip.h progress.h \
cookie.c formdata.h http.c sendf.c cookie.h ftp.c http.h sendf.h url.c dict.c \
ftp.h if2ip.c speedcheck.c url.h dict.h getdate.c if2ip.h speedcheck.h \
urldata.h transfer.c getdate.h ldap.c ssluse.c version.c transfer.h getenv.c \
ldap.h ssluse.h escape.c getenv.h mprintf.c telnet.c escape.h getpass.c netrc.c \
telnet.h getinfo.c strequal.c strequal.h easy.c security.h \
security.c krb4.h krb4.c memdebug.h memdebug.c inet_ntoa_r.h http_chunks.h http_chunks.c \
strtok.c connect.c hash.c llist.c multi.c \
content_encoding.h content_encoding.c
file.h hostip.c progress.c timeval.h base64.h formdata.c hostip.h \
progress.h cookie.c formdata.h http.c sendf.c cookie.h ftp.c \
http.h sendf.h url.c dict.c ftp.h if2ip.c speedcheck.c url.h \
dict.h getdate.c if2ip.h speedcheck.h urldata.h transfer.c getdate.h \
ldap.c ssluse.c version.c transfer.h getenv.c \
ldap.h ssluse.h escape.c getenv.h mprintf.c telnet.c escape.h \
getpass.c netrc.c telnet.h getinfo.c strequal.c strequal.h easy.c \
security.h security.c krb4.h krb4.c memdebug.h memdebug.c \
inet_ntoa_r.h http_chunks.h http_chunks.c \
strtok.c connect.c hash.c llist.c multi.c share.c share.h \
content_encoding.h content_encoding.c http_digest.h http_digest.c \
http_negotiate.c http_negotiate.h http_ntlm.c http_ntlm.h md5.h \
md5.c
libcurl_a_OBJECTS = file.o timeval.o base64.o hostip.o progress.o \
formdata.o cookie.o http.o sendf.o ftp.o url.o dict.o if2ip.o \
speedcheck.o getdate.o transfer.o ldap.o ssluse.o version.o \
getenv.o escape.o mprintf.o telnet.o getpass.o netrc.o getinfo.o \
strequal.o easy.o security.o krb4.o memdebug.o http_chunks.o \
strtok.o connect.o hash.o llist.o multi.o \
content_encoding.o
strtok.o connect.o hash.o llist.o multi.o share.o \
content_encoding.o http_digest.o http_negotiate.o http_ntlm.o md5.o
LIBRARIES = $(libcurl_a_LIBRARIES)
SOURCES = $(libcurl_a_SOURCES)
@@ -60,16 +65,18 @@ OBJECTS = $(libcurl_a_OBJECTS)
all: libcurl.a libcurl.dll libcurldll.a
libcurl.a: $(libcurl_a_OBJECTS) $(libcurl_a_DEPENDENCIES)
-@erase libcurl.a
$(RM) libcurl.a
$(AR) cru libcurl.a $(libcurl_a_OBJECTS)
$(RANLIB) libcurl.a
$(STRIP) $@
DLLINITOBJ =
# remove the last line above to keep debug info
libcurl.dll libcurldll.a: libcurl.a libcurl.def dllinit.o
-@erase $@
dllwrap --dllname $@ --output-lib libcurldll.a --export-all --def libcurl.def $(libcurl_a_LIBRARIES) dllinit.o $(DLL_LIBS) -lwsock32 -lws2_32 -lwinmm
libcurl.dll libcurldll.a: libcurl.a libcurl.def $(DLLINITOBJ)
$(RM) $@
dllwrap --dllname $@ --output-lib libcurldll.a --export-all --def libcurl.def $(libcurl_a_LIBRARIES) $(DLLINITOBJ) $(DLL_LIBS) -lwsock32 -lws2_32 -lwinmm
$(STRIP) $@
# remove the last line above to keep debug info
@@ -84,9 +91,9 @@ libcurl.dll libcurldll.a: libcurl.a libcurl.def dllinit.o
$(COMPILE) -c $<
clean:
-@erase $(libcurl_a_OBJECTS)
$(RM) $(libcurl_a_OBJECTS)
distrib: clean
-@erase $(libcurl_a_LIBRARIES)
$(RM) $(libcurl_a_LIBRARIES)

View File

@@ -33,7 +33,7 @@
LIB_NAME = libcurl
LIB_NAME_DEBUG = libcurld
!IFNDEF OPENSSL_PATH
OPENSSL_PATH = ../../openssl-0.9.6
OPENSSL_PATH = ../../openssl-0.9.7a
!ENDIF
#############################################################
@@ -48,7 +48,8 @@ LNKDLL = link.exe /DLL /def:libcurl.def
LNKLIB = link.exe -lib
LFLAGS = /nologo
LINKLIBS = ws2_32.lib winmm.lib
SSLLIBS = libeay32.lib ssleay32.lib RSAglue.lib
SSLLIBS = libeay32.lib ssleay32.lib
# RSAglue.lib was formerly needed in the SSLLIBS
CFGSET = FALSE
######################
@@ -116,7 +117,7 @@ CFGSET = TRUE
!IF "$(CFG)" == "debug-dll"
TARGET =$(LIB_NAME_DEBUG).dll
DIROBJ =.\$(CFG)
LNK = $(LNKDLL) /out:$(TARGET) /IMPLIB:"$(LIB_NAME_DEBUG).lib"
LNK = $(LNKDLL) /DEBUG /out:$(TARGET) /IMPLIB:"$(LIB_NAME_DEBUG).lib" /PDB:"$(LIB_NAME_DEBUG).pdb"
CC = $(CCDEBUG)
CFGSET = TRUE
!ENDIF
@@ -139,7 +140,8 @@ CFGSET = TRUE
!IF "$(CFG)" == "debug-ssl-dll"
TARGET =$(LIB_NAME_DEBUG).dll
DIROBJ =.\$(CFG)
LNK = $(LNKDLL) $(LFLAGSSSL) /out:$(TARGET) /IMPLIB:"$(LIB_NAME_DEBUG).lib"
LFLAGSSSL = /LIBPATH:$(OPENSSL_PATH)/out32dll
LNK = $(LNKDLL) $(LFLAGSSSL) /DEBUG /out:$(TARGET) /IMPLIB:"$(LIB_NAME_DEBUG).lib" /PDB:"$(LIB_NAME_DEBUG).pdb"
LINKLIBS = $(LINKLIBS) $(SSLLIBS)
CC = $(CCDEBUG) $(CFLAGSSSL)
CFGSET = TRUE
@@ -199,7 +201,12 @@ X_OBJS= \
$(DIROBJ)\connect.obj \
$(DIROBJ)\hash.obj \
$(DIROBJ)\llist.obj \
$(DIROBJ)\multi.obj
$(DIROBJ)\share.obj \
$(DIROBJ)\multi.obj \
$(DIROBJ)\http_digest.obj \
$(DIROBJ)\http_negotiate.obj \
$(DIROBJ)\http_ntlm.obj \
$(DIROBJ)\md5.obj
all : $(TARGET)

44
lib/README.ares Normal file
View File

@@ -0,0 +1,44 @@
$Id$
_ _ ____ _
___| | | | _ \| |
/ __| | | | |_) | |
| (__| |_| | _ <| |___
\___|\___/|_| \_\_____|
How To Build libcurl to use ares for asynch name resolves
=========================================================
ares:
ftp://athena-dist.mit.edu/pub/ATHENA/ares/ares-1.1.1.tar.gz
http://curl.haxx.se/dev/ares-1.1.1.tar.gz
ares patch:
http://curl.haxx.se/dev/ares2.diff
Mac OS X quirk:
ares 1.1.1 contains too old versions of config.guess and config.sub. Copy
the ones from the curl source tree in to the ares source tree before you
run configure.
Build ares
==========
1. unpack the ares-1.1.1 archive
2. apply patch (if you're on Mac OS X or windows)
3. ./configure
4. make
Build libcurl to use ares
=========================
1. Move the ares source/build tree to subdirectory in the curl root named
'ares'.
2. ./buildconf
3. ./configure --enable-ares
4. make
If the configure script detects IPv6 support), you need to explicitly disable
that (--disable-ipv6) since ares isn't IPv6 compatible (yet).
Please let me know how it builds, runs, works or whatever. I had to do some
fairly big changes in some code parts to get this to work.

View File

@@ -5,15 +5,15 @@
HTTP/1.1 [RFC 2616] specifies that a client may request that a server encode
its response. This is usually used to compress a response using one of a set
of commonly available compression techniques. These schemes are `deflate'
(the zlib algorithm), `gzip' and `compress' [sec 3.5, RFC 2616]. A client
requests that the sever perform an encoding by including an Accept-Encoding
header in the request document. The value of the header should be one of the
recognized tokens `deflate', ... (there's a way to register new
schemes/tokens, see sec 3.5 of the spec). A server MAY honor the client's
encoding request. When a response is encoded, the server includes a
Content-Encoding header in the response. The value of the Content-Encoding
header indicates which scheme was used to encode the data.
of commonly available compression techniques. These schemes are `deflate' (the
zlib algorithm), `gzip' and `compress' [sec 3.5, RFC 2616]. A client requests
that the sever perform an encoding by including an Accept-Encoding header in
the request document. The value of the header should be one of the recognized
tokens `deflate', ... (there's a way to register new schemes/tokens, see sec
3.5 of the spec). A server MAY honor the client's encoding request. When a
response is encoded, the server includes a Content-Encoding header in the
response. The value of the Content-Encoding header indicates which scheme was
used to encode the data.
A client may tell a server that it can understand several different encoding
schemes. In this case the server may choose any one of those and use it to
@@ -24,11 +24,10 @@ information on the Accept-Encoding header.
* Current support for content encoding:
I added support for the 'deflate' content encoding to both libcurl and curl.
Both regular and chunked transfers should work although I've tested only the
former. The library zlib is required for this feature. Places where I
modified the source code are commented and typically include my initials and
the date (e.g., 08/29/02 jhrg).
Support for the 'deflate' and 'gzip' content encoding are supported by
libcurl. Both regular and chunked transfers should work fine. The library
zlib is required for this feature. 'deflate' support was added by James
Gallagher, and support for the 'gzip' encoding was added by Dan Fandrich.
* The libcurl interface:
@@ -39,15 +38,23 @@ To cause libcurl to request a content encoding use:
where <string> is the intended value of the Accept-Encoding header.
Currently, libcurl only understands how to process responses that use the
`deflate' Content-Encoding, so the only value for CURLOPT_ENCODING that will
work (besides "identity," which does nothing) is "deflate." If a response is
encoded using either the `gzip' or `compress' methods, libcurl will return an
error indicating that the response could not be decoded. If <string> is null
or empty no Accept-Encoding header is generated.
"deflate" or "gzip" Content-Encoding, so the only values for CURLOPT_ENCODING
that will work (besides "identity," which does nothing) are "deflate" and
"gzip" If a response is encoded using the "compress" or methods, libcurl will
return an error indicating that the response could not be decoded. If
<string> is NULL no Accept-Encoding header is generated. If <string> is a
zero-length string, then an Accept-Encoding header containing all supported
encodings will be generated.
The CURLOPT_ENCODING must be set to any non-NULL value for content to be
automatically decoded. If it is not set and the server still sends encoded
content (despite not having been asked), the data is returned in its raw form
and the Content-Encoding type is not checked.
* The curl interface:
Use the --compressed option with curl to cause it to ask servers to compress
responses using deflate.
responses using deflate.
James Gallagher <jgallagher@gso.uri.edu>
Dan Fandrich <dan@coneharvesters.com>

56
lib/README.memoryleak Normal file
View File

@@ -0,0 +1,56 @@
$Id$
_ _ ____ _
___| | | | _ \| |
/ __| | | | |_) | |
| (__| |_| | _ <| |___
\___|\___/|_| \_\_____|
How To Track Down Suspected Memory Leaks in libcurl
===================================================
Single-threaded
Please note that this memory leak system is not adjusted to work in more
than one thread. If you want/need to use it in a multi-threaded app. Please
adjust accordingly.
Build
Rebuild libcurl with -DCURLDEBUG (usually, rerunning configure with
--enable-debug fixes this). 'make clean' first, then 'make' so that all
files actually are rebuilt properly. It will also make sense to build
libcurl with the debug option (usually -g to the compiler) so that debugging
it will be easier if you actually do find a leak in the library.
This will create a library that has memory debugging enabled.
Modify Your Application
Add a line in your application code:
curl_memdebug("filename");
This will make the malloc debug system output a full trace of all resource
using functions to the given file name. Make sure you rebuild your program
and that you link with the same libcurl you built for this purpose as
described above.
Run Your Application
Run your program as usual. Watch the specified memory trace file grow.
Make your program exit and use the proper libcurl cleanup functions etc. So
that all non-leaks are returned/freed properly.
Analyze the Flow
Use the tests/memanalyze.pl perl script to analyze the memdump file:
tests/memanalyze.pl < memdump
This now outputs a report on what resources that were allocated but never
freed etc. This report is very fine for posting to the list!
If this doesn't produce any output, no leak was detected in libcurl. Then
the leak is mostly likely to be in your code.

View File

@@ -7,7 +7,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -26,15 +26,15 @@
/*
* Telnet option defines. Add more here if in need.
*/
#define TELOPT_BINARY 0 /* binary 8bit data */
#define TELOPT_SGA 3 /* Supress Go Ahead */
#define TELOPT_EXOPL 255 /* EXtended OPtions List */
#define TELOPT_TTYPE 24 /* Terminal TYPE */
#define TELOPT_XDISPLOC 35 /* X DISPlay LOCation */
#define CURL_TELOPT_BINARY 0 /* binary 8bit data */
#define CURL_TELOPT_SGA 3 /* Supress Go Ahead */
#define CURL_TELOPT_EXOPL 255 /* EXtended OPtions List */
#define CURL_TELOPT_TTYPE 24 /* Terminal TYPE */
#define CURL_TELOPT_XDISPLOC 35 /* X DISPlay LOCation */
#define TELOPT_NEW_ENVIRON 39 /* NEW ENVIRONment variables */
#define NEW_ENV_VAR 0
#define NEW_ENV_VALUE 1
#define CURL_TELOPT_NEW_ENVIRON 39 /* NEW ENVIRONment variables */
#define CURL_NEW_ENV_VAR 0
#define CURL_NEW_ENV_VALUE 1
/*
* The telnet options represented as strings
@@ -53,27 +53,27 @@ static const char *telnetoptions[]=
"OLD-ENVIRON", "AUTHENTICATION", "ENCRYPT", "NEW-ENVIRON"
};
#define TELOPT_MAXIMUM TELOPT_NEW_ENVIRON
#define CURL_TELOPT_MAXIMUM CURL_TELOPT_NEW_ENVIRON
#define TELOPT_OK(x) ((x) <= TELOPT_MAXIMUM)
#define TELOPT(x) telnetoptions[x]
#define CURL_TELOPT_OK(x) ((x) <= CURL_TELOPT_MAXIMUM)
#define CURL_TELOPT(x) telnetoptions[x]
#define NTELOPTS 40
#define CURL_NTELOPTS 40
/*
* First some defines
*/
#define xEOF 236 /* End Of File */
#define SE 240 /* Sub negotiation End */
#define NOP 241 /* No OPeration */
#define DM 242 /* Data Mark */
#define GA 249 /* Go Ahead, reverse the line */
#define SB 250 /* SuBnegotiation */
#define WILL 251 /* Our side WILL use this option */
#define WONT 252 /* Our side WON'T use this option */
#define DO 253 /* DO use this option! */
#define DONT 254 /* DON'T use this option! */
#define IAC 255 /* Interpret As Command */
#define CURL_xEOF 236 /* End Of File */
#define CURL_SE 240 /* Sub negotiation End */
#define CURL_NOP 241 /* No OPeration */
#define CURL_DM 242 /* Data Mark */
#define CURL_GA 249 /* Go Ahead, reverse the line */
#define CURL_SB 250 /* SuBnegotiation */
#define CURL_WILL 251 /* Our side WILL use this option */
#define CURL_WONT 252 /* Our side WON'T use this option */
#define CURL_DO 253 /* DO use this option! */
#define CURL_DONT 254 /* DON'T use this option! */
#define CURL_IAC 255 /* Interpret As Command */
/*
* Then those numbers represented as strings:
@@ -86,16 +86,16 @@ static const char *telnetcmds[]=
"WILL", "WONT", "DO", "DONT", "IAC"
};
#define TELCMD_MINIMUM xEOF /* the first one */
#define TELCMD_MAXIMUM IAC /* surprise, 255 is the last one! ;-) */
#define CURL_TELCMD_MINIMUM CURL_xEOF /* the first one */
#define CURL_TELCMD_MAXIMUM CURL_IAC /* surprise, 255 is the last one! ;-) */
#define TELQUAL_IS 0
#define TELQUAL_SEND 1
#define TELQUAL_INFO 2
#define TELQUAL_NAME 3
#define CURL_TELQUAL_IS 0
#define CURL_TELQUAL_SEND 1
#define CURL_TELQUAL_INFO 2
#define CURL_TELQUAL_NAME 3
#define TELCMD_OK(x) ( ((unsigned int)(x) >= TELCMD_MINIMUM) && \
((unsigned int)(x) <= TELCMD_MAXIMUM) )
#define TELCMD(x) telnetcmds[(x)-TELCMD_MINIMUM]
#define CURL_TELCMD_OK(x) ( ((unsigned int)(x) >= CURL_TELCMD_MINIMUM) && \
((unsigned int)(x) <= CURL_TELCMD_MAXIMUM) )
#define CURL_TELCMD(x) telnetcmds[(x)-CURL_TELCMD_MINIMUM]
#endif
#endif

View File

@@ -5,7 +5,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -42,7 +42,7 @@
#include "base64.h"
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
#include "memdebug.h"
#endif
@@ -61,6 +61,8 @@ static void decodeQuantum(unsigned char *dest, char *src)
x = (x << 6) + 62;
else if(src[i] == '/')
x = (x << 6) + 63;
else if(src[i] == '=')
x = (x << 6);
}
dest[2] = (unsigned char)(x & 255); x >>= 8;
@@ -78,6 +80,7 @@ static void base64Decode(unsigned char *dest, char *src, int *rawLength)
int length = 0;
int equalsTerm = 0;
int i;
int numQuantums;
unsigned char lastQuantum[3];
while((src[length] != '=') && src[length])
@@ -85,16 +88,18 @@ static void base64Decode(unsigned char *dest, char *src, int *rawLength)
while(src[length+equalsTerm] == '=')
equalsTerm++;
numQuantums = (length + equalsTerm) / 4;
if(rawLength)
*rawLength = (length * 3 / 4) - equalsTerm;
*rawLength = (numQuantums * 3) - equalsTerm;
for(i = 0; i < length/4 - 1; i++) {
for(i = 0; i < numQuantums - 1; i++) {
decodeQuantum(dest, src);
dest += 3; src += 4;
}
decodeQuantum(lastQuantum, src);
for(i = 0; i < 3 - equalsTerm; i++) dest[i] = lastQuantum[i];
for(i = 0; i < 3 - equalsTerm; i++)
dest[i] = lastQuantum[i];
}
@@ -130,7 +135,7 @@ int Curl_base64_encode(const void *inp, int insize, char **outptr)
while(insize > 0) {
for (i = inputparts = 0; i < 3; i++) {
if(*indata) {
if(insize > 0) {
inputparts++;
ibuf[i] = *indata;
indata++;
@@ -194,77 +199,90 @@ int Curl_base64_decode(const char *str, void *data)
#define TEST_NEED_SUCK
void *suck(int *);
int main(int argc, char **argv, char **envp) {
char *base64;
int base64Len;
unsigned char *data;
int dataLen;
int main(int argc, char **argv, char **envp)
{
char *base64;
int base64Len;
unsigned char *data;
int dataLen;
data = (unsigned char *)suck(&dataLen);
base64Len = Curl_base64_encode(data, dataLen, &base64);
data = (unsigned char *)suck(&dataLen);
base64Len = Curl_base64_encode(data, dataLen, &base64);
fprintf(stderr, "%d\n", base64Len);
fprintf(stdout, "%s", base64);
free(base64); free(data);
return 0;
fprintf(stderr, "%d\n", base64Len);
fprintf(stdout, "%s", base64);
free(base64); free(data);
return 0;
}
#endif
#ifdef TEST_DECODE
/* decoding test harness. Read in a base64 string from stdin and write out the
* length returned by Curl_base64_decode, followed by the decoded data itself
*
* gcc -DTEST_DECODE base64.c -o base64 mprintf.o memdebug.o
*/
#include <stdio.h>
#define TEST_NEED_SUCK
void *suck(int *);
int main(int argc, char **argv, char **envp) {
char *base64;
int base64Len;
unsigned char *data;
int dataLen;
int main(int argc, char **argv, char **envp)
{
char *base64;
int base64Len;
unsigned char *data;
int dataLen;
int i, j;
base64 = (char *)suck(&base64Len);
data = (unsigned char *)malloc(base64Len * 3/4 + 8);
dataLen = Curl_base64_decode(base64, data);
base64 = (char *)suck(&base64Len);
data = (unsigned char *)malloc(base64Len * 3/4 + 8);
dataLen = Curl_base64_decode(base64, data);
fprintf(stderr, "%d\n", dataLen);
fprintf(stderr, "%d\n", dataLen);
fwrite(data,1,dataLen,stdout);
for(i=0; i < dataLen; i+=0x10) {
printf("0x%02x: ", i);
for(j=0; j < 0x10; j++)
if((j+i) < dataLen)
printf("%02x ", data[i+j]);
else
printf(" ");
printf(" | ");
free(base64); free(data);
return 0;
for(j=0; j < 0x10; j++)
if((j+i) < dataLen)
printf("%c", isgraph(data[i+j])?data[i+j]:'.');
else
break;
puts("");
}
free(base64); free(data);
return 0;
}
#endif
#ifdef TEST_NEED_SUCK
/* this function 'sucks' in as much as possible from stdin */
void *suck(int *lenptr) {
int cursize = 8192;
unsigned char *buf = NULL;
int lastread;
int len = 0;
void *suck(int *lenptr)
{
int cursize = 8192;
unsigned char *buf = NULL;
int lastread;
int len = 0;
do {
cursize *= 2;
buf = (unsigned char *)realloc(buf, cursize);
memset(buf + len, 0, cursize - len);
lastread = fread(buf + len, 1, cursize - len, stdin);
len += lastread;
} while(!feof(stdin));
lenptr[0] = len;
return (void *)buf;
do {
cursize *= 2;
buf = (unsigned char *)realloc(buf, cursize);
memset(buf + len, 0, cursize - len);
lastread = fread(buf + len, 1, cursize - len, stdin);
len += lastread;
} while(!feof(stdin));
lenptr[0] = len;
return (void *)buf;
}
#endif
/*
* local variables:
* eval: (load-file "../curl-mode.el")
* end:
* vim600: fdm=marker
* vim: et sw=2 ts=2 sts=2 tw=78
*/

View File

@@ -7,7 +7,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms

View File

@@ -2287,82 +2287,6 @@ Certificate Ingredients:
ec:b9:94:6a:aa:12:4f:1a:dd:f5:77:b5:25:8c:f2:8a:0a:f1:
fc:52:5b:58
TC TrustCenter, Germany, Class 0 CA
===================================
MD5 Fingerprint: 35:85:49:8E:6E:57:FE:BD:97:F1:C9:46:23:3A:B6:7D
PEM Data:
-----BEGIN CERTIFICATE-----
MIIENTCCA56gAwIBAgIBATANBgkqhkiG9w0BAQQFADCBvDELMAkGA1UEBhMCREUx
EDAOBgNVBAgTB0hhbWJ1cmcxEDAOBgNVBAcTB0hhbWJ1cmcxOjA4BgNVBAoTMVRD
IFRydXN0Q2VudGVyIGZvciBTZWN1cml0eSBpbiBEYXRhIE5ldHdvcmtzIEdtYkgx
IjAgBgNVBAsTGVRDIFRydXN0Q2VudGVyIENsYXNzIDAgQ0ExKTAnBgkqhkiG9w0B
CQEWGmNlcnRpZmljYXRlQHRydXN0Y2VudGVyLmRlMB4XDTk4MDMwOTEzNTQ0OFoX
DTA1MTIzMTEzNTQ0OFowgbwxCzAJBgNVBAYTAkRFMRAwDgYDVQQIEwdIYW1idXJn
MRAwDgYDVQQHEwdIYW1idXJnMTowOAYDVQQKEzFUQyBUcnVzdENlbnRlciBmb3Ig
U2VjdXJpdHkgaW4gRGF0YSBOZXR3b3JrcyBHbWJIMSIwIAYDVQQLExlUQyBUcnVz
dENlbnRlciBDbGFzcyAwIENBMSkwJwYJKoZIhvcNAQkBFhpjZXJ0aWZpY2F0ZUB0
cnVzdGNlbnRlci5kZTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA333mvr/V
8C9tTg7R4I0LfztU6IrisJ8oxYrGubMzJ/UnyhpMVBJrtLJGsx1Ls/QhC0sCLqHC
NJyFoMR4EdvbaycrCSoYTkDMn3EZZ5l0onw/wdiLI8hjO4ohq1zeHvSN3LQYwwVz
9Gq0ofoBCCsBD203W6o4hmc51+Vf+uR+zKMCAwEAAaOCAUMwggE/MEAGCWCGSAGG
+EIBAwQzFjFodHRwczovL3d3dy50cnVzdGNlbnRlci5kZS9jZ2ktYmluL2NoZWNr
LXJldi5jZ2k/MEAGCWCGSAGG+EIBBAQzFjFodHRwczovL3d3dy50cnVzdGNlbnRl
ci5kZS9jZ2ktYmluL2NoZWNrLXJldi5jZ2k/MDwGCWCGSAGG+EIBBwQvFi1odHRw
czovL3d3dy50cnVzdGNlbnRlci5kZS9jZ2ktYmluL1JlbmV3LmNnaT8wPgYJYIZI
AYb4QgEIBDEWL2h0dHA6Ly93d3cudHJ1c3RjZW50ZXIuZGUvZ3VpZGVsaW5lcy9p
bmRleC5odG1sMCgGCWCGSAGG+EIBDQQbFhlUQyBUcnVzdENlbnRlciBDbGFzcyAw
IENBMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG9w0BAQQFAAOBgQBNB39fCTAZ
kqoFR3qUdVQqrs/82AxC4UU4KySVssqHynnEw5eQXmIYxsk4YUxoNdNMFBHrxM2h
qdjFnmgnMgc1RQT4XyGgYB4cAEgEWNLFy65tMm49d5WMhcflrlCddUp7/wsneepN
pFn/7FrqJqU5g6TReM6nqX683SvKEpMDSg==
-----END CERTIFICATE-----
Certificate Ingredients:
Data:
Version: 3 (0x2)
Serial Number: 1 (0x1)
Signature Algorithm: md5WithRSAEncryption
Issuer: C=DE, ST=Hamburg, L=Hamburg, O=TC TrustCenter for Security in Data Networks GmbH, OU=TC TrustCenter Class 0 CA/Email=certificate@trustcenter.de
Validity
Not Before: Mar 9 13:54:48 1998 GMT
Not After : Dec 31 13:54:48 2005 GMT
Subject: C=DE, ST=Hamburg, L=Hamburg, O=TC TrustCenter for Security in Data Networks GmbH, OU=TC TrustCenter Class 0 CA/Email=certificate@trustcenter.de
Subject Public Key Info:
Public Key Algorithm: rsaEncryption
RSA Public Key: (1024 bit)
Modulus (1024 bit):
00:df:7d:e6:be:bf:d5:f0:2f:6d:4e:0e:d1:e0:8d:
0b:7f:3b:54:e8:8a:e2:b0:9f:28:c5:8a:c6:b9:b3:
33:27:f5:27:ca:1a:4c:54:12:6b:b4:b2:46:b3:1d:
4b:b3:f4:21:0b:4b:02:2e:a1:c2:34:9c:85:a0:c4:
78:11:db:db:6b:27:2b:09:2a:18:4e:40:cc:9f:71:
19:67:99:74:a2:7c:3f:c1:d8:8b:23:c8:63:3b:8a:
21:ab:5c:de:1e:f4:8d:dc:b4:18:c3:05:73:f4:6a:
b4:a1:fa:01:08:2b:01:0f:6d:37:5b:aa:38:86:67:
39:d7:e5:5f:fa:e4:7e:cc:a3
Exponent: 65537 (0x10001)
X509v3 extensions:
Netscape Revocation Url:
https://www.trustcenter.de/cgi-bin/check-rev.cgi?
Netscape CA Revocation Url:
https://www.trustcenter.de/cgi-bin/check-rev.cgi?
Netscape Renewal Url:
https://www.trustcenter.de/cgi-bin/Renew.cgi?
Netscape CA Policy Url:
http://www.trustcenter.de/guidelines/index.html
Netscape Comment:
TC TrustCenter Class 0 CA
Netscape Cert Type:
SSL CA, S/MIME CA, Object Signing CA
Signature Algorithm: md5WithRSAEncryption
4d:07:7f:5f:09:30:19:92:aa:05:47:7a:94:75:54:2a:ae:cf:
fc:d8:0c:42:e1:45:38:2b:24:95:b2:ca:87:ca:79:c4:c3:97:
90:5e:62:18:c6:c9:38:61:4c:68:35:d3:4c:14:11:eb:c4:cd:
a1:a9:d8:c5:9e:68:27:32:07:35:45:04:f8:5f:21:a0:60:1e:
1c:00:48:04:58:d2:c5:cb:ae:6d:32:6e:3d:77:95:8c:85:c7:
e5:ae:50:9d:75:4a:7b:ff:0b:27:79:ea:4d:a4:59:ff:ec:5a:
ea:26:a5:39:83:a4:d1:78:ce:a7:a9:7e:bc:dd:2b:ca:12:93:
03:4a
TC TrustCenter, Germany, Class 1 CA
===================================
MD5 Fingerprint: 64:3F:F8:3E:52:14:4A:59:BA:93:56:04:0B:23:02:D1

View File

@@ -369,3 +369,13 @@
/* Define if you have the `sigsetjmp' function. */
#define HAVE_SIGSETJMP 1
/* Define to 1 if you have the <setjmp.h> header file. */
#define HAVE_SETJMP_H 1
/*
* This needs to be defined for OpenSSL 0.9.7 and other versions that have the
* ENGINE stuff supported. If an include of "openssl/engine.h" fails, then
* undefine the define below.
*/
#define HAVE_OPENSSL_ENGINE_H 1

92
lib/config.dj Normal file
View File

@@ -0,0 +1,92 @@
#ifndef _CURL_CONFIG_DJGPP_H
#define _CURL_CONFIG_DJGPP_H
#define OS "djgpp"
#define PACKAGE "curl"
#define CURL_CA_BUNDLE "/dev/env/CURL_CA_BUNDLE"
#if (DJGPP_MINOR >= 4)
/* #define HAVE_DLOPEN 1 maybe not (DXE3) */
#endif
#if 1 /* use ioctlsocket() via fsext'ed fcntl() */
#define HAVE_O_NONBLOCK 1
#else
#define HAVE_IOCTLSOCKET 1
#endif
#define HAVE_ALARM 1
#define HAVE_ARPA_INET_H 1
#define HAVE_CLOSESOCKET 1
#define HAVE_FCNTL_H 1
#define HAVE_GETHOSTBYADDR 1
#define HAVE_GETHOSTNAME 1
#define HAVE_GETPASS 1
#define HAVE_GETSERVBYNAME 1
#define HAVE_GETTIMEOFDAY 1
#define HAVE_INET_ADDR 1
#define HAVE_INET_NTOA 1
#define HAVE_IO_H 1
#define HAVE_MALLOC_H 1
#define HAVE_MEMORY_H 1
#define HAVE_NETDB_H 1
#define HAVE_NETINET_IN_H 1
#define HAVE_NET_IF_H 1
#define HAVE_PERROR 1
#define HAVE_SELECT 1
#define HAVE_SETJMP_H 1
#define HAVE_SETVBUF 1
#define HAVE_SIGNAL 1
#define HAVE_SIGACTION 1
#define HAVE_SIGSETJMP 1
#define HAVE_SOCKET 1
#define HAVE_STRCASECMP 1
#define HAVE_STRDUP 1
#define HAVE_STRFTIME 1
#define HAVE_STRICMP 1
#define HAVE_STRSTR 1
#define HAVE_SYS_SOCKET_H 1
#define HAVE_SYS_STAT_H 1
#define HAVE_SYS_TYPES_H 1
#define HAVE_TERMIOS_H 1
#define HAVE_TIME_H 1
#define HAVE_UNAME 1
#define HAVE_UNISTD_H 1
#define HAVE_VPRINTF 1
#define RETSIGTYPE void
#define SIZEOF_LONG_DOUBLE 16
#define SIZEOF_LONG_LONG 8
#define STDC_HEADERS 1
#define TIME_WITH_SYS_TIME 1
#define BSD
#define USE_ZLIB
/* #define MALLOCDEBUG */
#ifdef HAVE_OPENSSL_ENGINE_H /* on cmd-line */
#define HAVE_OPENSSL_X509_H 1
#define HAVE_OPENSSL_SSL_H 1
#define HAVE_OPENSSL_RSA_H 1
#define HAVE_OPENSSL_PEM_H 1
#define HAVE_OPENSSL_ERR_H 1
#define HAVE_OPENSSL_CRYPTO_H 1
#define HAVE_LIBSSL 1
#define HAVE_LIBCRYPTO 1
#define OPENSSL_NO_KRB5 1
#endif
#define in_addr_t u_long
#define socklen_t int
#define ssize_t int
#include <stdlib.h>
#include <string.h>
#include <tcp.h> /* Watt-32 API */
#undef word
#endif /* _CURL_CONFIG_DJGPP_H */

View File

@@ -5,7 +5,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -70,6 +70,7 @@
#define EINPROGRESS WSAEINPROGRESS
#define EWOULDBLOCK WSAEWOULDBLOCK
#define EISCONN WSAEISCONN
#define ENOTSOCK WSAENOTSOCK
#endif
#include "urldata.h"
@@ -77,12 +78,11 @@
#include "if2ip.h"
/* The last #include file should be: */
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
#include "memdebug.h"
#endif
static
int geterrno(void)
int Curl_ourerrno(void)
{
#ifdef WIN32
return (int)GetLastError();
@@ -144,7 +144,11 @@ int Curl_nonblock(int socket, /* operate on this */
}
/*
* Return 0 on fine connect, -1 on error and 1 on timeout.
* waitconnect() returns:
* 0 fine connect
* -1 select() error
* 1 select() timeout
* 2 select() returned with an error condition
*/
static
int waitconnect(int sockfd, /* socket */
@@ -176,10 +180,9 @@ int waitconnect(int sockfd, /* socket */
/* timeout, no connect today */
return 1;
if(FD_ISSET(sockfd, &errfd)) {
if(FD_ISSET(sockfd, &errfd))
/* error condition caught */
return 2;
}
/* we have a connect! */
return 0;
@@ -188,17 +191,8 @@ int waitconnect(int sockfd, /* socket */
static CURLcode bindlocal(struct connectdata *conn,
int sockfd)
{
#if !defined(WIN32)||defined(__CYGWIN32__)
/* We don't generally like checking for OS-versions, we should make this
HAVE_XXXX based, although at the moment I don't have a decent test for
this! */
#ifdef HAVE_INET_NTOA
#ifndef INADDR_NONE
#define INADDR_NONE (in_addr_t) ~0
#endif
struct SessionHandle *data = conn->data;
/*************************************************************
@@ -206,16 +200,24 @@ static CURLcode bindlocal(struct connectdata *conn,
*************************************************************/
if (strlen(data->set.device)<255) {
struct sockaddr_in sa;
Curl_addrinfo *h=NULL;
struct Curl_dns_entry *h=NULL;
size_t size;
char myhost[256] = "";
in_addr_t in;
int rc;
if(Curl_if2ip(data->set.device, myhost, sizeof(myhost))) {
/* First check if the given name is an IP address */
in=inet_addr(data->set.device);
if((in == CURL_INADDR_NONE) &&
Curl_if2ip(data->set.device, myhost, sizeof(myhost))) {
/*
* We now have the numerical IPv4-style x.y.z.w in the 'myhost' buffer
*/
h = Curl_resolv(data, myhost, 0);
rc = Curl_resolv(conn, myhost, 0, &h);
if(rc == 1)
rc = Curl_wait_for_resolv(conn, &h);
}
else {
if(strlen(data->set.device)>1) {
@@ -223,11 +225,14 @@ static CURLcode bindlocal(struct connectdata *conn,
* This was not an interface, resolve the name as a host name
* or IP number
*/
h = Curl_resolv(data, data->set.device, 0);
if(h) {
rc = Curl_resolv(conn, data->set.device, 0, &h);
if(rc == 1)
rc = Curl_wait_for_resolv(conn, &h);
if(h)
/* we know data->set.device is shorter than the myhost array */
strcpy(myhost, data->set.device);
}
}
}
@@ -244,17 +249,31 @@ static CURLcode bindlocal(struct connectdata *conn,
infof(data, "We bind local end to %s\n", myhost);
in=inet_addr(myhost);
if (INADDR_NONE != in) {
if (CURL_INADDR_NONE != in) {
if ( h ) {
memset((char *)&sa, 0, sizeof(sa));
Curl_addrinfo *addr = h->addr;
Curl_resolv_unlock(data, h);
/* we don't need it anymore after this function has returned */
#ifdef ENABLE_IPV6
memcpy((char *)&sa.sin_addr, h->ai_addr, h->ai_addrlen);
sa.sin_family = h->ai_family;
(void)sa; /* prevent compiler warning */
if( bind(sockfd, addr->ai_addr, addr->ai_addrlen) >= 0) {
/* we succeeded to bind */
struct sockaddr_in6 add;
size = sizeof(add);
if(getsockname(sockfd, (struct sockaddr *) &add,
(socklen_t *)&size)<0) {
failf(data, "getsockname() failed");
return CURLE_HTTP_PORT_FAILED;
}
}
#else
memcpy((char *)&sa.sin_addr, h->h_addr, h->h_length);
memset((char *)&sa, 0, sizeof(sa));
memcpy((char *)&sa.sin_addr, addr->h_addr, addr->h_length);
sa.sin_family = AF_INET;
#endif
sa.sin_addr.s_addr = in;
sa.sin_port = 0; /* get any port */
@@ -269,6 +288,7 @@ static CURLcode bindlocal(struct connectdata *conn,
return CURLE_HTTP_PORT_FAILED;
}
}
#endif
else {
switch(errno) {
case EBADF:
@@ -318,7 +338,6 @@ static CURLcode bindlocal(struct connectdata *conn,
} /* end of device selection support */
#endif /* end of HAVE_INET_NTOA */
#endif /* end of not WIN32 */
return CURLE_HTTP_PORT_FAILED;
}
@@ -332,7 +351,7 @@ int socketerror(int sockfd)
if( -1 == getsockopt(sockfd, SOL_SOCKET, SO_ERROR,
(void *)&err, &errSize))
err = geterrno();
err = Curl_ourerrno();
return err;
}
@@ -375,6 +394,11 @@ CURLcode Curl_is_connected(struct connectdata *conn,
return CURLE_OPERATION_TIMEOUTED;
}
}
if(conn->bits.tcpconnect) {
/* we are connected already! */
*connected = TRUE;
return CURLE_OK;
}
/* check for connect without timeout as we want to return immediately */
rc = waitconnect(sockfd, 0);
@@ -387,8 +411,15 @@ CURLcode Curl_is_connected(struct connectdata *conn,
return CURLE_OK;
}
/* nope, not connected for real */
failf(data, "Connection failed, socket error: %d", err);
return CURLE_COULDNT_CONNECT;
}
else if(1 != rc) {
int error = Curl_ourerrno();
failf(data, "Failed connect to %s:%d, errno: %d",
conn->hostname, conn->port, error);
return CURLE_COULDNT_CONNECT;
}
/*
* If the connection phase is "done" here, we should attempt to connect
* to the "next address" in the Curl_hostaddr structure that we resolved
@@ -408,7 +439,7 @@ CURLcode Curl_is_connected(struct connectdata *conn,
*/
CURLcode Curl_connecthost(struct connectdata *conn, /* context */
Curl_addrinfo *remotehost, /* use one in here */
struct Curl_dns_entry *remotehost, /* use this one */
int port, /* connect to this */
int *sockconn, /* the connected socket */
Curl_ipconnect **addr, /* the one we used */
@@ -477,7 +508,7 @@ CURLcode Curl_connecthost(struct connectdata *conn, /* context */
struct addrinfo *ai;
port =0; /* prevent compiler warning */
for (ai = remotehost; ai; ai = ai->ai_next, aliasindex++) {
for (ai = remotehost->addr; ai; ai = ai->ai_next, aliasindex++) {
sockfd = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol);
if (sockfd < 0)
continue;
@@ -496,7 +527,7 @@ CURLcode Curl_connecthost(struct connectdata *conn, /* context */
rc = connect(sockfd, ai->ai_addr, ai->ai_addrlen);
if(-1 == rc) {
int error=geterrno();
int error=Curl_ourerrno();
switch (error) {
case EINPROGRESS:
@@ -537,6 +568,9 @@ CURLcode Curl_connecthost(struct connectdata *conn, /* context */
failf(data, "socket error: %d", err);
/* we are _not_ connected, it was a false alert, continue please */
}
else if(2 == rc)
/* waitconnect() returned error */
;
else if(data->state.used_interface == Curl_if_multi) {
/* When running the multi interface, we bail out here */
rc = 0;
@@ -569,7 +603,7 @@ CURLcode Curl_connecthost(struct connectdata *conn, /* context */
/*
* Connecting with IPv4-only support
*/
if(!remotehost->h_addr_list[0]) {
if(!remotehost->addr->h_addr_list[0]) {
/* If there is no addresses in the address list, then we return
error right away */
failf(data, "no address available");
@@ -596,23 +630,23 @@ CURLcode Curl_connecthost(struct connectdata *conn, /* context */
/* This is the loop that attempts to connect to all IP-addresses we
know for the given host. One by one. */
for(rc=-1, aliasindex=0;
rc && (struct in_addr *)remotehost->h_addr_list[aliasindex];
rc && (struct in_addr *)remotehost->addr->h_addr_list[aliasindex];
aliasindex++) {
struct sockaddr_in serv_addr;
/* do this nasty work to do the connect */
memset((char *) &serv_addr, '\0', sizeof(serv_addr));
memcpy((char *)&(serv_addr.sin_addr),
(struct in_addr *)remotehost->h_addr_list[aliasindex],
(struct in_addr *)remotehost->addr->h_addr_list[aliasindex],
sizeof(struct in_addr));
serv_addr.sin_family = remotehost->h_addrtype;
serv_addr.sin_family = remotehost->addr->h_addrtype;
serv_addr.sin_port = htons((unsigned short)port);
rc = connect(sockfd, (struct sockaddr *)&serv_addr,
sizeof(serv_addr));
if(-1 == rc) {
int error=geterrno();
int error=Curl_ourerrno();
switch (error) {
case EINPROGRESS:
@@ -639,6 +673,15 @@ CURLcode Curl_connecthost(struct connectdata *conn, /* context */
}
}
/* The '1 == rc' comes from the waitconnect(), and not from connect().
We can be sure of this since connect() cannot return 1. */
if((1 == rc) && (data->state.used_interface == Curl_if_multi)) {
/* Timeout when running the multi interface, we return here with a
CURLE_OK return code. */
rc = 0;
break;
}
if(0 == rc) {
int err = socketerror(sockfd);
if ((0 == err) || (EISCONN == err)) {
@@ -651,12 +694,6 @@ CURLcode Curl_connecthost(struct connectdata *conn, /* context */
}
if(0 != rc) {
if(data->state.used_interface == Curl_if_multi) {
/* When running the multi interface, we bail out here */
rc = 0;
break;
}
/* get a new timeout for next attempt */
after = Curl_tvnow();
timeout_ms -= Curl_tvdiff(after, before);
@@ -681,7 +718,7 @@ CURLcode Curl_connecthost(struct connectdata *conn, /* context */
if(addr)
/* this is the address we've connected to */
*addr = (struct in_addr *)remotehost->h_addr_list[aliasindex];
*addr = (struct in_addr *)remotehost->addr->h_addr_list[aliasindex];
#endif
/* allow NULL-pointers to get passed in */

View File

@@ -7,7 +7,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -31,10 +31,12 @@ CURLcode Curl_is_connected(struct connectdata *conn,
bool *connected);
CURLcode Curl_connecthost(struct connectdata *conn,
Curl_addrinfo *host, /* connect to this */
struct Curl_dns_entry *host, /* connect to this */
int port, /* connect to this port number */
int *sockconn, /* not set if error is returned */
Curl_ipconnect **addr, /* the one we used */
bool *connected /* truly connected? */
);
int Curl_ourerrno(void);
#endif

View File

@@ -1,16 +1,16 @@
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
@@ -25,13 +25,26 @@
#ifdef HAVE_LIBZ
#include <stdlib.h>
#include <string.h>
#include "urldata.h"
#include <curl/curl.h>
#include <curl/types.h>
#include "sendf.h"
#define DSIZ 4096 /* buffer size for decompressed data */
#define DSIZ 0x10000 /* buffer size for decompressed data */
#define GZIP_MAGIC_0 0x1f
#define GZIP_MAGIC_1 0x8b
/* gzip flag byte */
#define ASCII_FLAG 0x01 /* bit 0 set: file probably ascii text */
#define HEAD_CRC 0x02 /* bit 1 set: header CRC present */
#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */
#define ORIG_NAME 0x08 /* bit 3 set: original file name present */
#define COMMENT 0x10 /* bit 4 set: file comment present */
#define RESERVED 0xE0 /* bits 5..7: reserved */
static CURLcode
process_zlib_error(struct SessionHandle *data, z_stream *z)
@@ -55,7 +68,7 @@ exit_zlib(z_stream *z, bool *zlib_init, CURLcode result)
}
CURLcode
Curl_unencode_deflate_write(struct SessionHandle *data,
Curl_unencode_deflate_write(struct SessionHandle *data,
struct Curl_transfer_keeper *k,
ssize_t nread)
{
@@ -63,7 +76,7 @@ Curl_unencode_deflate_write(struct SessionHandle *data,
int result; /* Curl_client_write status */
char decomp[DSIZ]; /* Put the decompressed data here. */
z_stream *z = &k->z; /* zlib state structure */
/* Initialize zlib? */
if (!k->zlib_init) {
z->zalloc = (alloc_func)Z_NULL;
@@ -74,7 +87,7 @@ Curl_unencode_deflate_write(struct SessionHandle *data,
k->zlib_init = 1;
}
/* Set the compressed input when this fucntion is called */
/* Set the compressed input when this function is called */
z->next_in = (Bytef *)k->str;
z->avail_in = nread;
@@ -87,11 +100,12 @@ Curl_unencode_deflate_write(struct SessionHandle *data,
status = inflate(z, Z_SYNC_FLUSH);
if (status == Z_OK || status == Z_STREAM_END) {
result = Curl_client_write(data, CLIENTWRITE_BODY, decomp,
DSIZ - z->avail_out);
/* if !CURLE_OK, clean up, return */
if (result) {
return exit_zlib(z, &k->zlib_init, result);
if (DSIZ - z->avail_out) {
result = Curl_client_write(data, CLIENTWRITE_BODY, decomp,
DSIZ - z->avail_out);
/* if !CURLE_OK, clean up, return */
if (result)
return exit_zlib(z, &k->zlib_init, result);
}
/* Done?; clean up, return */
@@ -103,7 +117,233 @@ Curl_unencode_deflate_write(struct SessionHandle *data,
}
/* Done with these bytes, exit */
if (status == Z_OK && z->avail_in == 0 && z->avail_out > 0)
if (status == Z_OK && z->avail_in == 0 && z->avail_out > 0)
return result;
}
else { /* Error; exit loop, handle below */
return exit_zlib(z, &k->zlib_init, process_zlib_error(data, z));
}
}
}
/* Skip over the gzip header */
static enum {
GZIP_OK,
GZIP_BAD,
GZIP_UNDERFLOW
} check_gzip_header(unsigned char const *data, ssize_t len, ssize_t *headerlen)
{
int method, flags;
const ssize_t totallen = len;
/* The shortest header is 10 bytes */
if (len < 10)
return GZIP_UNDERFLOW;
if ((data[0] != GZIP_MAGIC_0) || (data[1] != GZIP_MAGIC_1))
return GZIP_BAD;
method = data[2];
flags = data[3];
if (method != Z_DEFLATED || (flags & RESERVED) != 0) {
/* Can't handle this compression method or unknown flag */
return GZIP_BAD;
}
/* Skip over time, xflags, OS code and all previous bytes */
len -= 10;
data += 10;
if (flags & EXTRA_FIELD) {
ssize_t extra_len;
if (len < 2)
return GZIP_UNDERFLOW;
extra_len = (data[1] << 8) | data[0];
if (len < (extra_len+2))
return GZIP_UNDERFLOW;
len -= (extra_len + 2);
}
if (flags & ORIG_NAME) {
/* Skip over NUL-terminated file name */
while (len && *data) {
--len;
++data;
}
if (!len || *data)
return GZIP_UNDERFLOW;
/* Skip over the NUL */
--len;
++data;
}
if (flags & COMMENT) {
/* Skip over NUL-terminated comment */
while (len && *data) {
--len;
++data;
}
if (!len || *data)
return GZIP_UNDERFLOW;
/* Skip over the NUL */
--len;
++data;
}
if (flags & HEAD_CRC) {
if (len < 2)
return GZIP_UNDERFLOW;
len -= 2;
data += 2;
}
*headerlen = totallen - len;
return GZIP_OK;
}
CURLcode
Curl_unencode_gzip_write(struct SessionHandle *data,
struct Curl_transfer_keeper *k,
ssize_t nread)
{
int status; /* zlib status */
int result; /* Curl_client_write status */
char decomp[DSIZ]; /* Put the decompressed data here. */
z_stream *z = &k->z; /* zlib state structure */
/* Initialize zlib? */
if (!k->zlib_init) {
z->zalloc = (alloc_func)Z_NULL;
z->zfree = (free_func)Z_NULL;
z->opaque = 0; /* of dubious use 08/27/02 jhrg */
if (inflateInit2(z, -MAX_WBITS) != Z_OK)
return process_zlib_error(data, z);
k->zlib_init = 1; /* Initial call state */
}
/* This next mess is to get around the potential case where there isn't
enough data passed in to skip over the gzip header. If that happens,
we malloc a block and copy what we have then wait for the next call. If
there still isn't enough (this is definitely a worst-case scenario), we
make the block bigger, copy the next part in and keep waiting. */
/* Skip over gzip header? */
if (k->zlib_init == 1) {
/* Initial call state */
ssize_t hlen;
switch (check_gzip_header((unsigned char *)k->str, nread, &hlen)) {
case GZIP_OK:
z->next_in = (Bytef *)k->str + hlen;
z->avail_in = nread - hlen;
k->zlib_init = 3; /* Inflating stream state */
break;
case GZIP_UNDERFLOW:
/* We need more data so we can find the end of the gzip header.
It's possible that the memory block we malloc here will never be
freed if the transfer abruptly aborts after this point. Since it's
unlikely that circumstances will be right for this code path to be
followed in the first place, and it's even more unlikely for a transfer
to fail immediately afterwards, it should seldom be a problem. */
z->avail_in = nread;
z->next_in = malloc(z->avail_in);
if (z->next_in == NULL) {
return exit_zlib(z, &k->zlib_init, CURLE_OUT_OF_MEMORY);
}
memcpy(z->next_in, k->str, z->avail_in);
k->zlib_init = 2; /* Need more gzip header data state */
/* We don't have any data to inflate yet */
return CURLE_OK;
case GZIP_BAD:
default:
return exit_zlib(z, &k->zlib_init, process_zlib_error(data, z));
}
}
else if (k->zlib_init == 2) {
/* Need more gzip header data state */
ssize_t hlen;
unsigned char *oldblock = z->next_in;
z->avail_in += nread;
z->next_in = realloc(z->next_in, z->avail_in);
if (z->next_in == NULL) {
free(oldblock);
return exit_zlib(z, &k->zlib_init, CURLE_OUT_OF_MEMORY);
}
/* Append the new block of data to the previous one */
memcpy(z->next_in + z->avail_in - nread, k->str, nread);
switch (check_gzip_header(z->next_in, z->avail_in, &hlen)) {
case GZIP_OK:
/* This is the zlib stream data */
free(z->next_in);
/* Don't point into the malloced block since we just freed it */
z->next_in = (Bytef *)k->str + hlen + nread - z->avail_in;
z->avail_in = z->avail_in - hlen;
k->zlib_init = 3; /* Inflating stream state */
break;
case GZIP_UNDERFLOW:
/* We still don't have any data to inflate! */
return CURLE_OK;
case GZIP_BAD:
default:
free(z->next_in);
return exit_zlib(z, &k->zlib_init, process_zlib_error(data, z));
}
}
else {
/* Inflating stream state */
z->next_in = (Bytef *)k->str;
z->avail_in = nread;
}
if (z->avail_in == 0) {
/* We don't have any data to inflate; wait until next time */
return CURLE_OK;
}
/* because the buffer size is fixed, iteratively decompress
and transfer to the client via client_write. */
for (;;) {
/* (re)set buffer for decompressed output for every iteration */
z->next_out = (Bytef *)&decomp[0];
z->avail_out = DSIZ;
status = inflate(z, Z_SYNC_FLUSH);
if (status == Z_OK || status == Z_STREAM_END) {
if(DSIZ - z->avail_out) {
result = Curl_client_write(data, CLIENTWRITE_BODY, decomp,
DSIZ - z->avail_out);
/* if !CURLE_OK, clean up, return */
if (result)
return exit_zlib(z, &k->zlib_init, result);
}
/* Done?; clean up, return */
/* We should really check the gzip CRC here */
if (status == Z_STREAM_END) {
if (inflateEnd(z) == Z_OK)
return exit_zlib(z, &k->zlib_init, result);
else
return exit_zlib(z, &k->zlib_init, process_zlib_error(data, z));
}
/* Done with these bytes, exit */
if (status == Z_OK && z->avail_in == 0 && z->avail_out > 0)
return result;
}
else { /* Error; exit loop, handle below */
@@ -112,11 +352,3 @@ Curl_unencode_deflate_write(struct SessionHandle *data,
}
}
#endif /* HAVE_LIBZ */
/*
* local variables:
* eval: (load-file "../curl-mode.el")
* end:
* vim600: fdm=marker
* vim: et sw=2 ts=2 sts=2 tw=78
*/

View File

@@ -5,7 +5,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -20,15 +20,22 @@
*
* $Id$
***************************************************************************/
#include "setup.h"
/*
* Comma-separated list all supported Content-Encodings ('identity' is implied)
*/
#ifdef HAVE_LIBZ
#define ALL_CONTENT_ENCODINGS "deflate, gzip"
#else
#define ALL_CONTENT_ENCODINGS "identity"
#endif
CURLcode Curl_unencode_deflate_write(struct SessionHandle *data,
struct Curl_transfer_keeper *k,
ssize_t nread);
/*
* local variables:
* eval: (load-file "../curl-mode.el")
* end:
* vim600: fdm=marker
* vim: et sw=2 ts=2 sts=2 tw=78
*/
CURLcode
Curl_unencode_gzip_write(struct SessionHandle *data,
struct Curl_transfer_keeper *k,
ssize_t nread);

View File

@@ -5,7 +5,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -77,6 +77,7 @@ Example set of cookies:
13-Jun-1988 03:04:55 GMT; domain=.fidelity.com; path=/; secure
****/
#include "setup.h"
#ifndef CURL_DISABLE_HTTP
@@ -85,13 +86,15 @@ Example set of cookies:
#include <string.h>
#include <ctype.h>
#include "urldata.h"
#include "cookie.h"
#include "getdate.h"
#include "strequal.h"
#include "strtok.h"
#include "sendf.h"
/* The last #include file should be: */
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
#include "memdebug.h"
#endif
@@ -110,6 +113,17 @@ free_cookiemess(struct Cookie *co)
free(co);
}
static bool tailmatch(const char *little, const char *bigone)
{
unsigned int littlelen = strlen(little);
unsigned int biglen = strlen(bigone);
if(littlelen > biglen)
return FALSE;
return strequal(little, bigone+biglen-littlelen);
}
/****************************************************************************
*
* Curl_cookie_add()
@@ -119,10 +133,18 @@ free_cookiemess(struct Cookie *co)
***************************************************************************/
struct Cookie *
Curl_cookie_add(struct CookieInfo *c,
Curl_cookie_add(struct SessionHandle *data,
/* The 'data' pointer here may be NULL at times, and thus
must only be used very carefully for things that can deal
with data being NULL. Such as infof() and similar */
struct CookieInfo *c,
bool httpheader, /* TRUE if HTTP header-style line */
char *lineptr, /* first character of the line */
char *domain) /* default domain */
char *domain, /* default domain */
char *path) /* full path used when this cookie is set,
used to get default path for the cookie
unless set */
{
struct Cookie *clist;
char what[MAX_COOKIE_LINE];
@@ -133,6 +155,7 @@ Curl_cookie_add(struct CookieInfo *c,
struct Cookie *lastc=NULL;
time_t now = time(NULL);
bool replace_old = FALSE;
bool badcookie = FALSE; /* cookies are good by default. mmmmm yummy */
/* First, alloc and init a new struct for it */
co = (struct Cookie *)malloc(sizeof(struct Cookie));
@@ -185,8 +208,73 @@ Curl_cookie_add(struct CookieInfo *c,
co->path=strdup(whatptr);
}
else if(strequal("domain", name)) {
co->domain=strdup(whatptr);
co->field1= (whatptr[0]=='.')?2:1;
/* note that this name may or may not have a preceeding dot, but
we don't care about that, we treat the names the same anyway */
char *ptr=whatptr;
int dotcount=1;
unsigned int i;
static const char *seventhree[]= {
"com", "edu", "net", "org", "gov", "mil", "int"
};
/* Count the dots, we need to make sure that there are THREE dots
in the normal domains, or TWO in the seventhree-domains. */
if('.' == whatptr[0])
/* don't count the initial dot, assume it */
ptr++;
do {
ptr = strchr(ptr, '.');
if(ptr) {
ptr++;
dotcount++;
}
} while(ptr);
for(i=0;
i<sizeof(seventhree)/sizeof(seventhree[0]); i++) {
if(tailmatch(seventhree[i], whatptr)) {
dotcount++; /* we allow one dot less for these */
break;
}
}
/* The original Netscape cookie spec defined that this domain name
MUST have three dots (or two if one of the seven holy TLDs),
but it seems that these kinds of cookies are in use "out there"
so we cannot be that strict. I've therefore lowered the check
to not allow less than two dots. */
if(dotcount < 2) {
/* Received and skipped a cookie with a domain using too few
dots. */
badcookie=TRUE; /* mark this as a bad cookie */
infof(data, "skipped cookie with illegal dotcount domain: %s",
whatptr);
}
else {
/* Now, we make sure that our host is within the given domain,
or the given domain is not valid and thus cannot be set. */
if(!domain || tailmatch(whatptr, domain)) {
char *ptr=whatptr;
if(ptr[0] == '.')
ptr++;
co->domain=strdup(ptr); /* dont prefix with dots internally */
co->tailmatch=TRUE; /* we always do that if the domain name was
given */
}
else {
/* we did not get a tailmatch and then the attempted set domain
is not a domain to which the current host belongs. Mark as
bad. */
badcookie=TRUE;
infof(data, "skipped cookie with bad tailmatch domain: %s",
whatptr);
}
}
}
else if(strequal("version", name)) {
co->version=strdup(whatptr);
@@ -248,8 +336,11 @@ Curl_cookie_add(struct CookieInfo *c,
semiptr=strchr(ptr, '\0');
} while(semiptr);
if(NULL == co->name) {
/* we didn't get a cookie name, this is an illegal line, bail out */
if(badcookie || (NULL == co->name)) {
/* we didn't get a cookie name or a bad one,
this is an illegal line, bail out */
if(co->expirestr)
free(co->expirestr);
if(co->domain)
free(co->domain);
if(co->path)
@@ -263,8 +354,20 @@ Curl_cookie_add(struct CookieInfo *c,
}
if(NULL == co->domain)
/* no domain given in the header line, set the default now */
/* no domain was given in the header line, set the default now */
co->domain=domain?strdup(domain):NULL;
if((NULL == co->path) && path) {
/* no path was given in the header line, set the default now */
char *endslash = strrchr(path, '/');
if(endslash) {
int pathlen = endslash-path+1; /* include the ending slash */
co->path=malloc(pathlen+1); /* one extra for the zero byte */
if(co->path) {
memcpy(co->path, path, pathlen);
co->path[pathlen]=0; /* zero terminate */
}
}
}
}
else {
/* This line is NOT a HTTP header style line, we do offer support for
@@ -296,9 +399,12 @@ Curl_cookie_add(struct CookieInfo *c,
/* Now loop through the fields and init the struct we already have
allocated */
for(ptr=firstptr, fields=0; ptr; ptr=strtok_r(NULL, "\t", &tok_buf), fields++) {
for(ptr=firstptr, fields=0; ptr;
ptr=strtok_r(NULL, "\t", &tok_buf), fields++) {
switch(fields) {
case 0:
if(ptr[0]=='.') /* skip preceeding dots */
ptr++;
co->domain = strdup(ptr);
break;
case 1:
@@ -311,10 +417,8 @@ Curl_cookie_add(struct CookieInfo *c,
As far as I can see, it is set to true when the cookie says
.domain.com and to false when the domain is complete www.domain.com
We don't currently take advantage of this knowledge.
*/
co->field1=strequal(ptr, "TRUE")+1; /* store information */
co->tailmatch=strequal(ptr, "TRUE"); /* store information */
break;
case 2:
/* It turns out, that sometimes the file format allows the path
@@ -344,13 +448,16 @@ Curl_cookie_add(struct CookieInfo *c,
}
}
if(7 != fields) {
if(6 == fields) {
/* we got a cookie with blank contents, fix it */
co->value = strdup("");
}
else if(7 != fields) {
/* we did not find the sufficient number of fields to recognize this
as a valid line, abort and go home */
free_cookiemess(co);
return NULL;
}
}
if(!c->running && /* read from a file */
@@ -373,13 +480,8 @@ Curl_cookie_add(struct CookieInfo *c,
/* the names are identical */
if(clist->domain && co->domain) {
if(strequal(clist->domain, co->domain) ||
(clist->domain[0]=='.' &&
strequal(&(clist->domain[1]), co->domain)) ||
(co->domain[0]=='.' &&
strequal(clist->domain, &(co->domain[1]))) )
/* The domains are identical, or at least identical if you skip the
preceeding dot */
if(strequal(clist->domain, co->domain))
/* The domains are identical */
replace_old=TRUE;
}
else if(!clist->domain && !co->domain)
@@ -460,6 +562,12 @@ Curl_cookie_add(struct CookieInfo *c,
clist = clist->next;
}
if(c->running)
/* Only show this when NOT reading the cookies from a file */
infof(data, "%s cookie %s=\"%s\" for domain %s, path %s, expire %d\n",
replace_old?"Replaced":"Added", co->name, co->value,
co->domain, co->path, co->expires);
if(!replace_old) {
/* then make the last item point on this new one */
if(lastc)
@@ -469,7 +577,6 @@ Curl_cookie_add(struct CookieInfo *c,
}
c->numcookies++; /* one more cookie in the jar */
return co;
}
@@ -483,7 +590,8 @@ Curl_cookie_add(struct CookieInfo *c,
* If 'newsession' is TRUE, discard all "session cookies" on read from file.
*
****************************************************************************/
struct CookieInfo *Curl_cookie_init(char *file,
struct CookieInfo *Curl_cookie_init(struct SessionHandle *data,
char *file,
struct CookieInfo *inc,
bool newsession)
{
@@ -519,7 +627,7 @@ struct CookieInfo *Curl_cookie_init(char *file,
char *lineptr;
bool headerline;
while(fgets(line, MAX_COOKIE_LINE, fp)) {
if(strnequal("Set-Cookie:", line, 11)) {
if(checkprefix("Set-Cookie:", line)) {
/* This is a cookie line, get it! */
lineptr=&line[11];
headerline=TRUE;
@@ -531,7 +639,7 @@ struct CookieInfo *Curl_cookie_init(char *file,
while(*lineptr && isspace((int)*lineptr))
lineptr++;
Curl_cookie_add(c, headerline, lineptr, NULL);
Curl_cookie_add(data, c, headerline, lineptr, NULL, NULL);
}
if(fromfile)
fclose(fp);
@@ -560,9 +668,6 @@ struct Cookie *Curl_cookie_getlist(struct CookieInfo *c,
struct Cookie *newco;
struct Cookie *co;
time_t now = time(NULL);
int hostlen=strlen(host);
int domlen;
struct Cookie *mainco=NULL;
if(!c || !c->cookies)
@@ -571,43 +676,42 @@ struct Cookie *Curl_cookie_getlist(struct CookieInfo *c,
co = c->cookies;
while(co) {
/* only process this cookie if it is not expired or had no expire
date AND that if the cookie requires we're secure we must only
continue if we are! */
/* only process this cookie if it is not expired or had no expire
date AND that if the cookie requires we're secure we must only
continue if we are! */
if( (co->expires<=0 || (co->expires> now)) &&
(co->secure?secure:TRUE) ) {
/* now check if the domain is correct */
if(!co->domain ||
(co->tailmatch && tailmatch(co->domain, host)) ||
(!co->tailmatch && strequal(host, co->domain)) ) {
/* the right part of the host matches the domain stuff in the
cookie data */
/* now check the left part of the path with the cookies path
requirement */
if(!co->path ||
checkprefix(co->path, path) ) {
/* now check if the domain is correct */
domlen=co->domain?strlen(co->domain):0;
if(!co->domain ||
((domlen<=hostlen) &&
strequal(host+(hostlen-domlen), co->domain)) ) {
/* the right part of the host matches the domain stuff in the
cookie data */
/* and now, we know this is a match and we should create an
entry for the return-linked-list */
newco = (struct Cookie *)malloc(sizeof(struct Cookie));
if(newco) {
/* first, copy the whole source cookie: */
memcpy(newco, co, sizeof(struct Cookie));
/* now check the left part of the path with the cookies path
requirement */
if(!co->path ||
strnequal(path, co->path, strlen(co->path))) {
/* and now, we know this is a match and we should create an
entry for the return-linked-list */
newco = (struct Cookie *)malloc(sizeof(struct Cookie));
if(newco) {
/* first, copy the whole source cookie: */
memcpy(newco, co, sizeof(struct Cookie));
/* then modify our next */
newco->next = mainco;
/* point the main to us */
mainco = newco;
}
}
}
}
co = co->next;
/* then modify our next */
newco->next = mainco;
/* point the main to us */
mainco = newco;
}
}
}
}
co = co->next;
}
return mainco; /* return the new list */
@@ -715,15 +819,19 @@ int Curl_cookie_output(struct CookieInfo *c, char *dumphere)
while(co) {
fprintf(out,
"%s\t" /* domain */
"%s\t" /* field1 */
"%s%s\t" /* domain */
"%s\t" /* tailmatch */
"%s\t" /* path */
"%s\t" /* secure */
"%u\t" /* expires */
"%s\t" /* name */
"%s\n", /* value */
/* Make sure all domains are prefixed with a dot if they allow
tailmatching. This is Mozilla-style. */
(co->tailmatch && co->domain && co->domain[0] != '.')? ".":"",
co->domain?co->domain:"unknown",
co->field1==2?"TRUE":"FALSE",
co->tailmatch?"TRUE":"FALSE",
co->path?co->path:"/",
co->secure?"TRUE":"FALSE",
(unsigned int)co->expires,
@@ -740,39 +848,4 @@ int Curl_cookie_output(struct CookieInfo *c, char *dumphere)
return 0;
}
#ifdef CURL_COOKIE_DEBUG
/*
* On my Solaris box, this command line builds this test program:
*
* gcc -g -o cooktest -DCURL_COOKIE_DEBUG -DHAVE_CONFIG_H -I.. -I../include cookie.c strequal.o getdate.o memdebug.o mprintf.o strtok.o -lnsl -lsocket
*
*/
int main(int argc, char **argv)
{
struct CookieInfo *c=NULL;
if(argc>1) {
c = Curl_cookie_init(argv[1], c);
Curl_cookie_add(c, TRUE, "PERSONALIZE=none;expires=Monday, 13-Jun-1988 03:04:55 GMT; domain=.fidelity.com; path=/ftgw; secure");
Curl_cookie_add(c, TRUE, "foobar=yes; domain=.haxx.se; path=/looser;");
c = Curl_cookie_init(argv[1], c);
Curl_cookie_output(c);
Curl_cookie_cleanup(c);
return 0;
}
return 1;
}
#endif
#endif /* CURL_DISABLE_HTTP */
/*
* local variables:
* eval: (load-file "../curl-mode.el")
* end:
* vim600: fdm=marker
* vim: et sw=2 ts=2 sts=2 tw=78
*/

View File

@@ -7,7 +7,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -40,8 +40,7 @@ struct Cookie {
char *domain; /* domain = <this> */
long expires; /* expires = <this> */
char *expirestr; /* the plain text version */
char field1; /* read from a cookie file, 1 => FALSE, 2=> TRUE */
bool tailmatch; /* weather we do tail-matchning of the domain name */
/* RFC 2109 keywords. Version=1 means 2109-compliant cookie sending */
char *version; /* Version = <value> */
@@ -69,14 +68,18 @@ struct CookieInfo {
#define MAX_NAME 256
#define MAX_NAME_TXT "255"
struct SessionHandle;
/*
* Add a cookie to the internal list of cookies. The domain argument is only
* used if the header boolean is TRUE.
* Add a cookie to the internal list of cookies. The domain and path arguments
* are only used if the header boolean is TRUE.
*/
struct Cookie *Curl_cookie_add(struct CookieInfo *, bool header, char *line,
char *domain);
struct CookieInfo *Curl_cookie_init(char *, struct CookieInfo *, bool);
struct Cookie *Curl_cookie_add(struct SessionHandle *data,
struct CookieInfo *, bool header, char *line,
char *domain, char *path);
struct CookieInfo *Curl_cookie_init(struct SessionHandle *data,
char *, struct CookieInfo *, bool);
struct Cookie *Curl_cookie_getlist(struct CookieInfo *, char *, char *, bool);
void Curl_cookie_freelist(struct Cookie *);
void Curl_cookie_cleanup(struct CookieInfo *);

Some files were not shown because too many files have changed in this diff Show More