Compare commits

..

695 Commits

Author SHA1 Message Date
Daniel Stenberg
5c2df3e1a4 7.10.7 2003-08-15 07:08:02 +00:00
Daniel Stenberg
6fc55467f4 removed lots of "added in [version]" where [version] is resonably old 2003-08-15 06:35:41 +00:00
Daniel Stenberg
a147a07956 check for long long
changed the use of AC_CHECK_TYPE as the previous approach is deprecated
require 2.57 properly
2003-08-14 22:44:06 +00:00
Daniel Stenberg
a10581d459 Possible code for large file support, added within #if 0 so far. 2003-08-14 22:42:18 +00:00
Daniel Stenberg
cc2d6942bb new Russian mirror both web and download 2003-08-14 22:38:35 +00:00
Daniel Stenberg
3974c02bb1 supprt for the new memlimit stuff 2003-08-14 22:38:03 +00:00
Daniel Stenberg
09b5ddaea5 added one "added in blabla" and removed a few 2003-08-14 22:00:56 +00:00
Daniel Stenberg
acbcd68d89 Curl_SSL_InitSessions can return error, so check the return code and bail
out if necessary
2003-08-14 15:06:36 +00:00
Daniel Stenberg
4281470fca Curl_llist_destroy() checks the input for non-NULL 2003-08-14 15:06:08 +00:00
Daniel Stenberg
68a4aa6773 new proto for Curl_hash_init 2003-08-14 15:05:25 +00:00
Daniel Stenberg
905b160097 1. check allocs
2. don't leave allocated memory behind when returning error
2003-08-14 15:05:13 +00:00
Daniel Stenberg
52596c339b return failure when the host cache creation fails 2003-08-14 15:02:25 +00:00
Daniel Stenberg
73500267ee activate the new memory limit tests if requested
only set cookiejar if selected
2003-08-14 15:01:52 +00:00
Daniel Stenberg
e6011e33a6 return failure when an alloc function fails 2003-08-14 15:01:20 +00:00
Daniel Stenberg
3454319c17 prevent memory leak when going out of memory 2003-08-14 14:20:03 +00:00
Daniel Stenberg
02c78ecf81 allow out-of-memory testing by setting a limit. That number of memory
allocation calls will succeed, the following will return NULL!
2003-08-14 14:19:36 +00:00
Daniel Stenberg
caca034302 better freeing when bailing out due to bad output glob 2003-08-14 13:38:19 +00:00
Daniel Stenberg
fb366ed35f free data on failure 2003-08-14 13:37:55 +00:00
Daniel Stenberg
b352ffca15 test87 verifies the new and better check for bad -o #[num] stuff 2003-08-14 13:37:32 +00:00
Daniel Stenberg
2d94856efd ignore the .pid files 2003-08-14 13:01:07 +00:00
Daniel Stenberg
ae66bd1284 ignore lib506 too 2003-08-14 13:00:34 +00:00
Daniel Stenberg
89d9d4e6c8 better report on why tests are skipped, and also show a count of the amount
of test cases that were "considered".
2003-08-14 12:59:54 +00:00
Daniel Stenberg
fe60fc4730 In case the output urlglob file name returned is NULL, then there was
badness in the string and we help our user by returning an error.
2003-08-14 11:53:53 +00:00
Daniel Stenberg
46690d5e1c modified the #[num] code to be more robust, to return NULL on errors and
to support numbers larger than 9
2003-08-14 11:53:09 +00:00
Daniel Stenberg
beaea8cb25 corrected this test case 2003-08-14 11:51:22 +00:00
Daniel Stenberg
409ec90c85 test urlglobbing range requests 2003-08-14 11:50:58 +00:00
Daniel Stenberg
4d423eeabe test86 added 2003-08-14 11:50:40 +00:00
Daniel Stenberg
019e612225 David Byron's fix that makes this script use 'cygpath' instead of 'pwd' if
this runs on windows, to find out the current working directory.
2003-08-12 21:18:39 +00:00
Daniel Stenberg
6550d271f0 7.10.7-pre4 commit 2003-08-12 12:48:40 +00:00
Daniel Stenberg
c46da65263 nicer make target for the pdf conversion 2003-08-12 09:08:05 +00:00
Daniel Stenberg
b46745759b don't treat index.html as the generated HTML pages 2003-08-12 08:58:46 +00:00
Daniel Stenberg
9687571a84 added the new man pages 2003-08-12 08:58:23 +00:00
Daniel Stenberg
c13236de25 corrected return type 2003-08-12 08:58:15 +00:00
Daniel Stenberg
8ffbb6acd4 added the new curl_share_* man pages, the libcurl-easy, the libcurl-share,
made the generated pdf and html files get removed on 'make clean'. Made
the pdf conversion remove the temporary .ps files.
2003-08-12 08:51:23 +00:00
Daniel Stenberg
a3e5d81765 separated the easy-specific stuff into a new libcurl-easy.3 man page and
made the libcurl.3 one a more generic overview
2003-08-12 08:46:02 +00:00
Daniel Stenberg
e2aecfe80f added the asynchdns bit 2003-08-12 08:26:38 +00:00
Daniel Stenberg
a3c1248214 Bugfix from Serge Semashko that fixes a bug introduced when we applied his
NTLM patch. Test case 84 and 85 verify this.
2003-08-12 08:20:16 +00:00
Daniel Stenberg
b933639222 more auth tests 2003-08-12 08:19:23 +00:00
Daniel Stenberg
27619fc450 Added support for CURLINFO_HTTP_CONNECTCODE 2003-08-11 23:15:41 +00:00
Daniel Stenberg
96fecba190 bindlocal works for Windows! 2003-08-11 23:15:13 +00:00
Daniel Stenberg
50257d4f50 Check CURL_VERSION_ASYNCHDNS for feature output 2003-08-11 23:13:41 +00:00
Daniel Stenberg
3eb4ae031c Set the CURL_VERSION_ASYNCHDNS bit if USE_ARES is defined. 2003-08-11 23:13:09 +00:00
Daniel Stenberg
6a4ec3be81 Added CURLINFO_HTTP_CONNECTCODE
Added CURL_VERSION_ASYNCHDNS
2003-08-11 23:12:46 +00:00
Daniel Stenberg
cc9ac6ad14 mention curl_version_info 2003-08-11 23:07:38 +00:00
Daniel Stenberg
644990a835 mention the pre3 release 2003-08-11 22:48:26 +00:00
Daniel Stenberg
d3b81ea3f7 Vincent Sanders's massive update of this example code. One could argue
weather this is still an "example" or a whole new API layer! ;-)
2003-08-11 21:34:52 +00:00
Daniel Stenberg
3660f67534 edits 2003-08-11 16:17:51 +00:00
Daniel Stenberg
203cc4a5c3 two more known bugs 2003-08-11 15:15:25 +00:00
Daniel Stenberg
c7be232fee added include "http.h" to prevent a warning 2003-08-11 14:55:30 +00:00
Daniel Stenberg
2617b379be define USE_ARES nicer if enabled 2003-08-11 13:18:06 +00:00
Daniel Stenberg
84ed5e755a use safefree instead 2003-08-11 12:30:21 +00:00
Daniel Stenberg
2f17615790 forgot the backslash 2003-08-11 12:26:18 +00:00
Daniel Stenberg
acfa131c8c memory leak fixed when re-using connections with proxy user+passwd 2003-08-11 12:25:30 +00:00
Daniel Stenberg
793d0e27e1 --proxy-ntlm added 2003-08-11 12:23:55 +00:00
Daniel Stenberg
fdf0c443c3 81 + 82 test NTLM proxy stuff 2003-08-11 12:23:33 +00:00
Daniel Stenberg
1b39b53321 remodeled the help text to avoid those annoying puts() problems when a
string reaches > 512 bytes...
2003-08-11 12:04:46 +00:00
Daniel Stenberg
1679993e3b CURLOPT_PROXYAUTH explained 2003-08-11 11:54:14 +00:00
Daniel Stenberg
4c831f8b68 CURLOPT_PROXYAUTH added by Serge Semashko 2003-08-11 11:48:01 +00:00
Daniel Stenberg
7a19923afa Serge Semashko added CURLOPT_PROXYAUTH support, and now NTLM for proxies
work.
2003-08-11 11:47:45 +00:00
Daniel Stenberg
3e122a765d Christian Beutenmueller corrected the CURLOPT_FILE referer, as we nowadays
call it CURLOPT_WRITEDATA.
2003-08-11 11:29:33 +00:00
Daniel Stenberg
d873ba8c9f added test80 2003-08-11 11:09:26 +00:00
Daniel Stenberg
8093338f39 tunnel through proxy, with both proxy and regular authentication 2003-08-11 11:09:03 +00:00
Daniel Stenberg
07660eea1e -Z and -@ no longer work, they are now officially available for other
options, more frequently used, in a future release
2003-08-11 10:34:25 +00:00
Daniel Stenberg
a2b2d4cd5c added test 79, a basic test that fetches an FTP URL over a HTTP proxy 2003-08-11 10:12:35 +00:00
Daniel Stenberg
96e217b496 the new cookie functions that require 'data' passed in 2003-08-11 09:56:06 +00:00
Daniel Stenberg
2dd1518d63 support sending off cookies without contents 2003-08-11 09:55:48 +00:00
Daniel Stenberg
168703b7bf Added some infof() calls, that require the data pointer so now several
cookie functions need that.

I also fixed the cookie loader to properly load and deal with cookies without
contents (or rather with a blank content).
2003-08-11 09:55:11 +00:00
Daniel Stenberg
0f2d680f1f added Dirk Manske 2003-08-11 07:30:24 +00:00
Daniel Stenberg
b7930b6ebd removed the dashes 2003-08-11 07:28:42 +00:00
Daniel Stenberg
8fa43b469a documenting the share interface 2003-08-11 07:25:02 +00:00
Daniel Stenberg
894e52f61a removed the BUGS section 2003-08-11 07:24:21 +00:00
Daniel Stenberg
3c294691aa remove the BUGS section 2003-08-11 07:23:49 +00:00
Daniel Stenberg
acbf932861 fix lines that start with " 2003-08-11 07:23:19 +00:00
Daniel Stenberg
26f5c53be8 test case 506 added, written by Dirk Manske 2003-08-11 06:44:46 +00:00
Daniel Stenberg
8dd069604c Dirk Manske's bugfix for the share stuff 2003-08-11 06:30:02 +00:00
Daniel Stenberg
5dadbd094e don't claim the PASV connect is connected unless it *really* is! 2003-08-10 17:11:41 +00:00
Daniel Stenberg
514a8739b6 make sure the string is long enough 2003-08-08 17:56:47 +00:00
Daniel Stenberg
12e78a082e Gisle Vanem fixed a single-byte overflow 2003-08-08 17:18:21 +00:00
Daniel Stenberg
9273096a8a David Byron's fix for file:// URLs with drive letters included. 2003-08-08 17:12:04 +00:00
Daniel Stenberg
686c6133f8 chmod the cabundle file before we attempt to write to it, to make
make distcheck run fine
2003-08-08 11:13:18 +00:00
Daniel Stenberg
1d1276cc3a ftp create dirs work done 2003-08-08 11:05:18 +00:00
Daniel Stenberg
d987676ef0 added CURLOPT_FTP_CREATE_MISSING_DIRS 2003-08-08 11:04:35 +00:00
Daniel Stenberg
6e4658c89d ftp-create-dirs test when MKD fails 2003-08-08 10:32:08 +00:00
Daniel Stenberg
b7cbcf7434 --ftp-create-dirs 2003-08-08 10:26:08 +00:00
Daniel Stenberg
e347d06a49 introducing --ftp-create_dirs 2003-08-08 10:24:13 +00:00
Daniel Stenberg
2077e9365a --ftp-create-dirs test 2003-08-08 10:23:46 +00:00
Daniel Stenberg
6e3adc9b14 Support COUNT in the control file, to set the number of times the custom
REPLY is to be sent back before getting blanked and reverted to the built-in
action. Now, we can make CWD fail once and then succeed when retried.
2003-08-08 10:21:47 +00:00
Daniel Stenberg
7954eee639 re-arranged the cwd/mkd stuff a bit 2003-08-08 09:55:16 +00:00
Daniel Stenberg
f9f1f0e316 Early Ehlinger's CURLOPT_FTP_CREATE_MISSING_DIRS patch was applied 2003-08-08 09:13:19 +00:00
Daniel Stenberg
a9afe6aa84 new -z tests 2003-08-08 08:13:11 +00:00
Daniel Stenberg
6d36796135 corrected main meta data title 2003-08-08 07:07:15 +00:00
Daniel Stenberg
9e81fd5703 added CLEANFILES to make distcheck run fine 2003-08-07 14:14:54 +00:00
Daniel Stenberg
609059b6ec infilesize must be a long to work on 64bit archs 2003-08-07 13:20:58 +00:00
Daniel Stenberg
6af73f417a use 644 for the chmod 2003-08-07 06:43:11 +00:00
Daniel Stenberg
32468a0072 argh, it wasn't *that* easy to generate the ca-bundle header in the build
dir instead of the source dir, reverting that change
2003-08-06 23:59:15 +00:00
Daniel Stenberg
6800c45104 fixed syntax error 2003-08-06 23:56:24 +00:00
Daniel Stenberg
0d8c754ffd better cleaning up of memory in case of failures in the get-loop (it was
taken care of by the exit-free anyway but caused test case 75 and 76 to
report memory leaks).

Also re-indented a small section.
2003-08-06 23:48:08 +00:00
Daniel Stenberg
1b80276496 better cleaning up allocated memory in case of failures 2003-08-06 23:47:01 +00:00
Daniel Stenberg
bf9a138276 more tests 2003-08-06 23:45:59 +00:00
Daniel Stenberg
b3f9c636b9 new urlglob test 2003-08-06 23:10:58 +00:00
Daniel Stenberg
18975d44a6 minor cleanup 2003-08-06 23:10:36 +00:00
Daniel Stenberg
b201db5cec explain more how the test case number awareness is sent to the test server(s) 2003-08-06 22:47:55 +00:00
Daniel Stenberg
bbe23945e4 fix the treatment of the variable width specifier '*', which caused a bug
in the urlglobbing just now, fixed in the debian bug tracker as Bug#203827
2003-08-06 22:32:47 +00:00
Daniel Stenberg
bbdc0394ff make an uninstall hook in the same manner we already did an install hook
as otherwise will make distcheck fail
2003-08-06 22:15:12 +00:00
Daniel Stenberg
38a9b14965 chmod the hugehelp.c in the dist hook to make distcheck run fine 2003-08-06 22:14:39 +00:00
Daniel Stenberg
77ba0d3686 generate the ca-bundle.h in the build dir, and also make sure to chmod
the file in the dist-hook to make distcheck run fine
2003-08-06 22:14:16 +00:00
Daniel Stenberg
065c8d7a95 Domenico Andreoli fixed the section number in the main meta data 2003-08-06 21:23:42 +00:00
Daniel Stenberg
c704d1545c include "share.h" for the cookie sharing 2003-08-06 15:26:24 +00:00
Daniel Stenberg
62b65a5f20 make it build without ares support
make sure it set async false even when using ipv6 (made test case 20 fail
before)
2003-08-06 15:26:02 +00:00
Daniel Stenberg
665a7a3848 505 was missing 2003-08-06 13:49:20 +00:00
Daniel Stenberg
256b9f31e1 more fix 2003-08-06 13:22:29 +00:00
Daniel Stenberg
a3037e1173 updated the ares instruction 2003-08-06 13:21:19 +00:00
Daniel Stenberg
f3e7a5d755 LDFLAGS fix to make the GSSAPI build again 2003-08-05 15:22:15 +00:00
Daniel Stenberg
5f0cba7775 added README.ares 2003-08-05 14:54:15 +00:00
Daniel Stenberg
673759fe7e how to build with ares 2003-08-05 14:52:31 +00:00
Daniel Stenberg
b73612392d ares awareness/usage/support added. If configure --enable-ares is used, we
build libcurl to use ares for asynch name resolves.
2003-08-05 14:40:59 +00:00
Daniel Stenberg
f85935f0f9 Add --enable-ares support, which will make us build curl with ares for
asynch name resolves. Still very experimental, beware!
2003-08-05 13:37:29 +00:00
Daniel Stenberg
1e7e53c87e clean up the dir tree hierarchy in *_done() to make persistant connection
FTP use the correct directories!

Reported in bug report #783116
2003-08-05 13:04:10 +00:00
Daniel Stenberg
b9fdf3cc3b added test 146 for a ftp persitency test, as reported on the list 2003-08-05 13:00:00 +00:00
Daniel Stenberg
c462601362 persistant connection test 2003-08-05 12:59:23 +00:00
Daniel Stenberg
859877dcfc auth problems 2003-08-05 12:32:02 +00:00
Daniel Stenberg
c04ce95106 cleaned up after David Byron's comment on the libcurl list, aug 5 2003 2003-08-04 23:13:39 +00:00
Daniel Stenberg
98ee12bc35 Jan Sundin reported a case where curl ignored a cookie that browsers don't,
which turned up to be due to the number of dots in the 'domain'. I've now
  made curl follow the the original netscape cookie spec less strict on that
  part.
2003-08-04 23:05:57 +00:00
Daniel Stenberg
fdda786fa2 added test 73 2003-08-04 22:58:06 +00:00
Daniel Stenberg
831be4f4dd Verifies Jan Sundin's cookie bug, dated aug 4 2003. 2003-08-04 22:57:58 +00:00
Daniel Stenberg
41ae97e710 Dirk Manske's patch that introduces cookie support to the share interface. 2003-08-04 15:02:42 +00:00
Daniel Stenberg
f72ba7f79d Mark Fletcher provided an excellent bug report that identified a problem
with FOLLOWLOCATION and chunked transfer-encoding, as libcurl would not
 properly ignore the body contents of 3XX response that included the
 Location: header.
2003-08-03 22:18:14 +00:00
Daniel Stenberg
296046510b serios info leakage! 2003-08-03 21:33:25 +00:00
Daniel Stenberg
db9f87f697 When proxy authentication is used in a CONNECT request (as used for all SSL
connects and otherwise enforced tunnel-thru-proxy requests), the same
authentication header is also wrongly sent to the remote host.

The name and password can then be captured by an evil host and possibly get
used for malicious purposes.
2003-08-02 23:36:35 +00:00
Daniel Stenberg
3270ea55dd updated as the second proxy-auth header was a proof of a serious info leak
bug!!
2003-08-02 23:35:59 +00:00
Daniel Stenberg
a358ac24f4 Joerg Mueller-Tolk fixed a minor mistake 2003-08-01 14:20:48 +00:00
Daniel Stenberg
8bedd43b28 recent action 2003-08-01 12:33:19 +00:00
Daniel Stenberg
9ea2087ede David Byron's makefile fix to allow 7.10.6 to build fine using VC 2003-08-01 07:53:27 +00:00
Daniel Stenberg
9f7c634133 add a check for 'ar' since the lack of it bit Jared Ingersoll
we might need to check for some other tools too that on Solaris are put
in those weird dirs...
2003-07-30 15:10:26 +00:00
Daniel Stenberg
da20d68a12 removed silly target that only works when building from CVS 2003-07-30 14:26:36 +00:00
Daniel Stenberg
d3e512c738 Jrg Mller-Tolk updated this to build fine with 7.10.6 2003-07-30 14:19:44 +00:00
Daniel Stenberg
339f84fe1f ftp proxy support would be nice 2003-07-30 13:41:59 +00:00
Daniel Stenberg
2d41b735ec updated to match the recent ftp patch that makes it check for resumability 2003-07-30 07:52:02 +00:00
Daniel Stenberg
e3b4dd08ff Daniel Noguerol made the ftp code output "Accept-Ranges: bytes" in similar
style like other faked HTTP headers when NOBODY and HEADER are used.
2003-07-30 07:51:33 +00:00
Daniel Stenberg
6809a906bb Make sure to generate an uncompressed hugehelp.c file for inclusion in
the distribution archive, as it isn't sure zlib is present everywhere. Those
who care much for compressed help should regenerate the file.
2003-07-30 07:33:41 +00:00
Daniel Stenberg
1c35cbcc07 Reverted the 'filetime' struct field back to a 'long' as time_t is sometimes
unsigned and we want this to be able to hold -1 for illegal/unset values.
2003-07-30 07:22:28 +00:00
Daniel Stenberg
5f8989a436 CURLDEBUG not MALLOCDEBUG 2003-07-29 11:07:38 +00:00
Daniel Stenberg
aa7b0648ff Fixes based on Gisle Vanem's input since this script failed due to
possibly crlf newlines.
2003-07-28 23:00:56 +00:00
Daniel Stenberg
2fbe61960f Digest *OR* Basic authorization test 2003-07-28 22:17:37 +00:00
Daniel Stenberg
bdb5e5a250 7.10.6 2003-07-28 12:13:48 +00:00
Daniel Stenberg
48a580e609 clear http->send_buffer when we have freed the memory it pointed to 2003-07-28 10:21:57 +00:00
Daniel Stenberg
1361fc69b9 updated to the new ftp dir parsing code that allows a preceeding double
slash
2003-07-28 09:02:15 +00:00
Daniel Stenberg
93352e56d8 As noticed by Kevin Roth, we shall not speak of root dir when it isn't
necessarily the root...
2003-07-28 08:53:12 +00:00
Daniel Stenberg
d9246ff24d Franois Pons brought a patch that once again made curl deal with ftp and
"double slash" as indicating the root directory. In the RFC1738-fix of April
30, that ability was removed (since it is not the "right" way).
2003-07-28 08:50:02 +00:00
Daniel Stenberg
9301bc3444 use the correct 'test71' file name for the temp file 2003-07-28 08:23:46 +00:00
Daniel Stenberg
76352c4e2d got a bug report on -F in config files, so I wrote up this test to verify
that is works... and it did! ;-)
2003-07-28 08:21:07 +00:00
Daniel Stenberg
428f41bd12 having it in CVS causes us problems *grrr* 2003-07-25 09:46:07 +00:00
Daniel Stenberg
99c32e460f Andrs Garca updated with the added files etc 2003-07-25 08:59:55 +00:00
Daniel Stenberg
83f249cf65 With an unknown CA path, we undef the variable. To build properly without
SSL/CA.
2003-07-25 08:47:34 +00:00
Daniel Stenberg
2c2baa93ea only check for CA bundle path if build with SSL support
set a conditional for the makefile if we know the CA path or not
2003-07-25 08:47:10 +00:00
Daniel Stenberg
f0278ca114 Removed #include <sys/resource.h>, as pointed out by Henry Bland we don't
need it.
2003-07-25 08:30:58 +00:00
Daniel Stenberg
297b1b5013 the test compared numericly if though it could contain a string, and I
lowered the number of retries to 10
2003-07-23 17:28:36 +00:00
Daniel Stenberg
e9f63bf4e8 When we re-use an existing connection we must make sure that we don't
accidentally re-use the connect_addr field, as that might no longer be
around. Fix verified by Tracy Boehrer who basicly debugged and tracked down
this problem.
2003-07-23 17:06:21 +00:00
Daniel Stenberg
556ce1c6a1 minor code style fix 2003-07-23 12:55:24 +00:00
Daniel Stenberg
cc4ff62681 Split out the changes from the year 2002 into a separate file, named
CHANGES.2002.
2003-07-23 11:59:20 +00:00
Daniel Stenberg
0423fd9b55 SSLCERTS was moved into the docs/ directory 2003-07-23 11:39:05 +00:00
Daniel Stenberg
789ab20bf7 moved SSLCERTS into the docs/ directory 2003-07-23 11:38:19 +00:00
Daniel Stenberg
b47462bd68 Daniel Kouril's fix to make the GSS-Negotiate work fine. 2003-07-23 11:28:59 +00:00
Daniel Stenberg
1a94fee42d Juan F. Codagnone's fixes to build properly on Windows again 2003-07-23 08:21:21 +00:00
Daniel Stenberg
a91ce6a5d6 Plain default version of this file, to allow users to build easier from
CVS. This will be updated by the configure script, and a default is placed
here by the maketgz script.
2003-07-23 08:11:28 +00:00
Daniel Stenberg
981ffd9fce reversed the check for GSSAPI when request that auth 2003-07-22 11:15:46 +00:00
Daniel Stenberg
e76c960624 CURLDEBUG, not MALLOCDEBUG 2003-07-22 10:00:37 +00:00
Daniel Stenberg
416c92cc6f More support for NTLM on proxies, now proxy state and nonce is stored in
a separate struct properly.
2003-07-22 09:59:36 +00:00
Daniel Stenberg
fb731eb3e7 The NTLM functions now take a 'proxy' argument as well. 2003-07-22 09:58:57 +00:00
Daniel Stenberg
6f2a4d290f Added a separate struct for the proxyntlm data, as it will/can be different
than the remote server's. That is, both the server and the proxy can in
fact require NTLM auth.
2003-07-22 09:58:18 +00:00
Daniel Stenberg
cefc8ba938 CURLDEBUG is the symbol now 2003-07-22 09:57:09 +00:00
Daniel Stenberg
d0bd644eef Don't depend on the TIME_WITH_SYS_TIME define. win32 doesn't have sys/time.h
and I don't think we need it.
2003-07-22 08:23:16 +00:00
Daniel Stenberg
071c95128e moved the proxyuser and proxypasswd fields from the sessionhandle to the
connectdata to work as expected
2003-07-21 13:16:30 +00:00
Daniel Stenberg
1a192c489b adjusted to support NTLM for proxies 2003-07-21 13:16:01 +00:00
Daniel Stenberg
56014e74a0 krb4-fixes for the moved user+password fields within the structs 2003-07-21 09:19:48 +00:00
Daniel Stenberg
172271498d pre4-commit 2003-07-21 08:25:31 +00:00
Daniel Stenberg
f2882cb88c pre4 2003-07-21 08:25:21 +00:00
Daniel Stenberg
152f1fee40 the CWD-null bug fix 2003-07-21 07:54:20 +00:00
Daniel Stenberg
968234e6ae the fixed skip-blanks in the FTP CWD code called for this adjustment 2003-07-20 00:19:44 +00:00
Daniel Stenberg
5e133e2dff David Gardner pointed out in bug report 770755 that using the FTP command CWD
with a blank argument is a bad idea. Now skip blanks.
2003-07-20 00:18:11 +00:00
Daniel Stenberg
0049c09fc3 If NTLM is requested, only re-use connections that have the exact same
credentials.
2003-07-20 00:02:47 +00:00
Daniel Stenberg
a2a63c27f4 explains my fixes just committed 2003-07-19 23:58:21 +00:00
Daniel Stenberg
c50a601f1a modified to work fine with the new persistant connection working test suite
HTTP server
2003-07-19 23:57:08 +00:00
Daniel Stenberg
bc0fd6db71 swsclose added 2003-07-19 23:56:44 +00:00
Daniel Stenberg
52b631fade Access the user and passwd fields from the connectdata struct now instead
of the sessionhandle struct, as that was not good.
2003-07-19 23:56:33 +00:00
Daniel Stenberg
2f0bc9d1f7 No longer stores user+password in the sessionhandle, now doing that in the
connectdata struct instead. Each being an allocated pointer.

The passwdgiven field was turned into a local variable in the only
function it was being used.
2003-07-19 23:55:15 +00:00
Daniel Stenberg
5ef6520d4e fixed the CONNECT thing again 2003-07-19 23:54:15 +00:00
Daniel Stenberg
2c1925161e If the data contents contains the word 'swsclose', then this server will
disconnect the client after the response have been sent. This also happens
if the respons is zero byte long.

In all other cases (unless an error happens), it will now maintain the
connection to allow proper persistant connection testing. This was required
for the NTLM testing to work so I finally had to fix this. Of course most of
the existing HTTP tests will be adjusted to work with this new rule of test
file syntax for HTTP tests.

Also fixed the log function to deal with varargs for better logging.
2003-07-19 23:44:22 +00:00
Daniel Stenberg
0529b349d5 recent changes 2003-07-16 00:04:45 +00:00
Daniel Stenberg
b4620364a2 more fixes from Doug Kaufman for DJGPP builds for DOS 2003-07-15 23:47:25 +00:00
Daniel Stenberg
634aef3895 updated to work with Dan Winship's NTLM domain stuff fix 2003-07-15 23:38:06 +00:00
Daniel Stenberg
06c86d1a8c Moved the NTLM credentials to the connectdata struct instead, as NTLM
authenticates connections and not single requests. This should make it work
better when we mix requests from multiple hosts. Problem pointed out by
Cris Bailiff.
2003-07-15 23:36:50 +00:00
Daniel Stenberg
79749f8eb4 Fix to the endless loop of bad Basic authentication as reported in Cris
Bailiff's bug report 768275.
2003-07-15 23:06:02 +00:00
Daniel Stenberg
b036986b3e Dan Winship's patch added that makes use of DOMAIN\USER or DOMAIN/USER
for the user field. I changed it slightly to stay with strchr() only instead
of strpbrk() for portability reasons.
2003-07-15 22:58:36 +00:00
Daniel Stenberg
938f1d1da7 Dan Winship's fix to make the new auth stuff such as NTLM to work with
the multi interface
2003-07-15 22:46:01 +00:00
Daniel Stenberg
58b6b3df06 Dan Winship pointed out this flaw 2003-07-15 22:44:48 +00:00
Daniel Stenberg
f9c3347f7c re-use existing variable instead of declaring a new local one 2003-07-05 13:27:02 +00:00
Daniel Stenberg
5b72eb0b03 Some of Doug Kaufman's changes for the DOS port 2003-07-05 13:13:49 +00:00
Daniel Stenberg
6dd4c13bc0 the latest changes 2003-07-04 18:18:17 +00:00
Daniel Stenberg
e4e7db551f HAVE_SETVBUF removed, no longer used 2003-07-04 18:17:58 +00:00
Daniel Stenberg
ebfde8da56 removed 2003-07-04 18:15:53 +00:00
Daniel Stenberg
756bc0f4b7 Dan Grayson pointed out that we set the CURL_CA_BUNDLE variable wrongly in
the configure script. We set it differently now and generate the
lib/ca-bundle.h file entirely.
2003-07-04 18:15:25 +00:00
Daniel Stenberg
269d491b6a remove the usage of setvbuf() and use fflush() instead if no buffering should
be done on the output
2003-07-04 17:16:34 +00:00
Daniel Stenberg
449e5bc2ad CURLDEBUG not MALLOCDEBUG anymore 2003-07-04 16:37:16 +00:00
Daniel Stenberg
8736c11d84 adjusted to the NTLM updates 2003-07-04 16:36:31 +00:00
Daniel Stenberg
45fc760985 Peter Sylvester's patch was applied that introduces the following:
CURLOPT_SSL_CTX_FUNCTION to set a callback that gets called with the
   OpenSSL's ssl_ctx pointer passed in and allow a callback to act on it. If
   anything but CURLE_OK is returned, that will also be returned by libcurl
   all the way back. If this function changes the CURLOPT_URL, libcurl will
   detect this and instead go use the new URL.

   CURLOPT_SSL_CTX_DATA is a pointer you set to get passed to the callback set
   with CURLOPT_SSL_CTX_FUNCTION.
2003-07-04 16:29:23 +00:00
Daniel Stenberg
7968e3c2de David Byron's patch that allows a client to make the server quit with a
magic url.
2003-07-01 15:21:42 +00:00
Daniel Stenberg
964a41c75c new CVS info 2003-07-01 12:12:10 +00:00
Daniel Stenberg
5931d53637 Gisle Vanem found a lib handle leak in the ldap code 2003-07-01 10:12:52 +00:00
Daniel Stenberg
3ed3ae5bcf When I introduced the DIST_SUBDIRS usage, I broken the 'make install' for
include files and docs, so now I've added a custom install hook to run
make install for docs and install when data is installed at the top-level.
2003-06-27 14:37:38 +00:00
Sterling Hughes
6519cc70c5 revert out my bogus commit. ;-) 2003-06-26 21:30:48 +00:00
Sterling Hughes
505a4f27fa test commit 2003-06-26 21:17:29 +00:00
Daniel Stenberg
79144eba99 new tests 2003-06-26 11:45:04 +00:00
Daniel Stenberg
26e17d89c9 produce a skip-report at the end of all tests, and thus record and count
them properly
2003-06-26 11:44:01 +00:00
Daniel Stenberg
4322c1106f beautified and added comments all over 2003-06-26 11:42:54 +00:00
Daniel Stenberg
73071dfd4f mention the new flag bits we support 2003-06-26 11:41:24 +00:00
Daniel Stenberg
b7c14b3c27 mention that it copies the string you add 2003-06-26 11:41:08 +00:00
Daniel Stenberg
3130b44535 added lots, mostly the new auth-related option(s) 2003-06-26 11:40:44 +00:00
Daniel Stenberg
a2bd73334f added lots of auth stuff and updated other things too 2003-06-26 11:40:04 +00:00
Daniel Stenberg
1a393f5625 mention COOKIES, removed added entries, corrected the FPL-SSL link/reference 2003-06-26 11:38:53 +00:00
Daniel Stenberg
d4951e837e mention the other formats the docs come in 2003-06-26 11:37:13 +00:00
Daniel Stenberg
26f6365e93 adjusted to recent changes 2003-06-26 11:36:32 +00:00
Daniel Stenberg
3a552b1e63 we do support NTLM now... 2003-06-26 11:35:48 +00:00
Daniel Stenberg
69eb1790da CURLDEBUG is the symbol to use, no longer MALLOCDEBUG 2003-06-26 11:34:36 +00:00
Daniel Stenberg
a1af6f3614 adjusted the compressed generation to be more helpful in comments etc 2003-06-26 11:34:07 +00:00
Daniel Stenberg
3aced61465 support for the new auth stuff
more output on --version/-V
mention --manual on the help output text
2003-06-26 11:33:29 +00:00
Daniel Stenberg
6f02ddfce8 new httpauth support, changed filetime variable kind 2003-06-26 11:31:50 +00:00
Daniel Stenberg
c2faa39b62 added CURLOPT_HTTPAUTH support 2003-06-26 11:30:59 +00:00
Daniel Stenberg
2d3734b8b5 Adjusted to work properly with the new authentication stuff
Added code to deal with white spaces in relocation headers.
2003-06-26 11:30:26 +00:00
Daniel Stenberg
ed908b7f89 use CURLDEBUG instead of MALLOCDEBUG 2003-06-26 11:28:26 +00:00
Daniel Stenberg
f7d795a364 use CURLDEBUG 2003-06-26 11:27:38 +00:00
Daniel Stenberg
8919b39d54 adjusted to use the same API as the OpenSSL version of the MD5 functions 2003-06-26 11:27:22 +00:00
Daniel Stenberg
84cedc094e added ntlm flag bits 2003-06-26 11:26:50 +00:00
Daniel Stenberg
3b2b2496d7 Many fixes, most of them based on comments by Eric Glass 2003-06-26 11:26:26 +00:00
Daniel Stenberg
445684c409 new proto for Curl_input_negotiate 2003-06-26 11:25:42 +00:00
Daniel Stenberg
898e067ccc kill warnings 2003-06-26 11:25:23 +00:00
Daniel Stenberg
12859e345f major adjustments to the new authentication support 2003-06-26 11:24:55 +00:00
Daniel Stenberg
89f4af695e include GSS in the debug string if available, support a few new flag
booleans
2003-06-26 11:22:48 +00:00
Daniel Stenberg
308bc9d919 use CURLDEBUG instead of MALLOCDEBUG for preprocessor conditions 2003-06-26 11:22:12 +00:00
Daniel Stenberg
db566c54ae use CURLDEBUG instead of MALLOCDEBUG 2003-06-26 11:16:37 +00:00
Daniel Stenberg
81d403e207 one typecast less for the localtime(), use CURLDEBUG instead of MALLOCDEBUG 2003-06-26 06:52:48 +00:00
Daniel Stenberg
2bd71d70ff use CURLDEBUG instead of MALLOCDEBUG 2003-06-26 06:50:32 +00:00
Daniel Stenberg
1eef6f44ba CURLDEBUG instead of MALLOCDEBUG 2003-06-26 06:47:20 +00:00
Daniel Stenberg
204f03912f We noe use CURLDEBUG instead of MALLOCDEBUG 2003-06-26 06:45:15 +00:00
Daniel Stenberg
f8c3b3aa18 moved from former CVS 2003-06-26 06:21:29 +00:00
Daniel Stenberg
d4df981463 Added time_t 2003-06-26 06:19:37 +00:00
Daniel Stenberg
497c6d516d up to date with the actual situation 2003-06-25 23:40:48 +00:00
Daniel Stenberg
8288862b7e Cris Bailiff's patch that should make us do NTLM correctly. When we've
authenticated our connection, we can continue without any Authorization:
headers as long as our connection is maintained.
2003-06-13 10:15:55 +00:00
Daniel Stenberg
9aae16c236 stdout is good enough 2003-06-13 09:09:04 +00:00
Daniel Stenberg
80c194a70a work more on pids, less on pidfiles to be able to do better kills at the
end of the test where the pidfiles aren't found, but "our" server is running
2003-06-13 09:04:08 +00:00
Daniel Stenberg
c832b2db5b fixed NTLM test 67, added test 68 for bad NTLM name/password 2003-06-13 08:03:45 +00:00
Daniel Stenberg
27018882ec Cris Bailiff's bugfix 2003-06-13 07:56:38 +00:00
Daniel Stenberg
caf6e9c540 use more curlish strings, these should be able to change... 2003-06-13 07:14:46 +00:00
Daniel Stenberg
e727fb82f2 Marty Kuhrt's #include fixes for VMS 2003-06-13 06:48:04 +00:00
Daniel Stenberg
c78df56801 get and use only the first line of the curl --version output 2003-06-12 23:05:12 +00:00
Daniel Stenberg
d13202f43b modified 2003-06-12 23:03:08 +00:00
Daniel Stenberg
9d139a6b35 Make the HTTP auth stuff work, Dan Fandrich made --version output a list
of all supported protocols.
2003-06-12 23:02:36 +00:00
Daniel Stenberg
d2abe44e6f remove the dumpit file after use 2003-06-12 19:17:08 +00:00
Daniel Stenberg
bc67228576 corrected a comment 2003-06-12 17:40:56 +00:00
Daniel Stenberg
ecf32c964a CURLHTTP* renamed to CURLAUTH* and NEGOTIATE is now GSSNEGOTIATE as there's
a "plain" Negotiate as well.
2003-06-12 17:34:27 +00:00
Daniel Stenberg
e58f30b82a NTLM test case 2003-06-12 16:39:35 +00:00
Daniel Stenberg
654e3f1101 require the netrc_debug feature the same way we now can require SSL
present client-side
2003-06-12 16:38:14 +00:00
Daniel Stenberg
86689dc524 now test cases can be set to be dependent on the presence of "SSL" in the
client/library
2003-06-12 16:22:52 +00:00
Daniel Stenberg
5f62a0c1ca make it build with older OpenSSL 2003-06-12 13:55:40 +00:00
Daniel Stenberg
ad1bf0f389 attempt to make older OpenSSL versions work with the DES stuff 2003-06-12 13:18:10 +00:00
Daniel Stenberg
9c7703ace1 Based on Dan Fandrich's patch and gzip unpack function, we now compress
the 'hugehelp' text if libz and gzip are available at build time.
2003-06-12 12:54:34 +00:00
Daniel Stenberg
4a8155b53c store HAVE_LIBZ as an automake conditional 2003-06-12 12:53:38 +00:00
Daniel Stenberg
80d6d5c5c4 fixing details for NTLM 2003-06-11 16:14:45 +00:00
Daniel Stenberg
c624be8388 more how I envision it _should_ work, but it still doesn't... 2003-06-11 15:33:09 +00:00
Daniel Stenberg
09df1cd41e to support "redirects" after the full body is transfered 2003-06-11 15:31:40 +00:00
Daniel Stenberg
52c5b57200 made a nicer output for the decode test, as it served as a nice tool for me ;-) 2003-06-11 15:31:06 +00:00
Daniel Stenberg
5ea04a852e when we get the auth headers, we still need to read out the full body response
as otherwise we can re-send requests on the same connection nicely
2003-06-11 15:30:30 +00:00
Daniel Stenberg
a2eef05198 correct mistakes 2003-06-11 14:05:13 +00:00
Daniel Stenberg
55f75af353 describe the NTLM mechanism too 2003-06-11 13:44:58 +00:00
Daniel Stenberg
fb6a51b8fd basic NTLM support 2003-06-11 13:44:31 +00:00
Daniel Stenberg
252cc2213e ntlm added 2003-06-11 13:42:53 +00:00
Daniel Stenberg
73c5f24fa4 Initial take at NTLM authentication. It doesn't really work at this point
but the infrastructure is there.
2003-06-11 13:38:55 +00:00
Daniel Stenberg
4c80e103a0 clarify the CUSTOMREQUEST and HTTPHEADER options slightly 2003-06-10 13:06:38 +00:00
Daniel Stenberg
39ea557360 CURLOPT_HTTPAUTH docu 2003-06-10 12:58:40 +00:00
Daniel Stenberg
d0cc92a01a Set auth type differently, we use one CURLOPT_HTTPAUTH instead as we plan
to add more method in the future.
2003-06-10 12:49:16 +00:00
Daniel Stenberg
d7980c1a45 Daniel Kouril for the HTTP negotiate stuff 2003-06-10 12:25:01 +00:00
Daniel Stenberg
e56ae1426c Daniel Kouril's patch that adds HTTP negotiation support to libcurl was
added.
2003-06-10 12:22:19 +00:00
Daniel Stenberg
696843c020 we fix more 2003-06-10 12:07:10 +00:00
Daniel Stenberg
6ff5621dd7 more generic 2003-06-10 12:05:12 +00:00
Daniel Stenberg
e7fb72a732 Pass the error stream pointer to the URL globber, so that it can report
errors correctly to the user, if need be.

Also fixed so that a missing ] in the globbing process no longer leads
to core dump.
2003-06-10 09:42:22 +00:00
Daniel Stenberg
8d30d34e0c When doing very big GET requests over HTTPS, we need to add some extra
funky logic in order to make re-tries work fine with OpenSSL. This corrects
the problem David Orrell noticed.
2003-06-06 14:58:26 +00:00
Daniel Stenberg
bc7fe85f8a Just moved around some logic in Curl_write() to make it easier to debug. 2003-06-06 14:56:50 +00:00
Daniel Stenberg
89352d92c5 spellfix 2003-06-06 06:44:05 +00:00
Daniel Stenberg
c32390d84c Reversed the logic to only include the <sys/select.h> header on systems
known to really NEED it as another system that doesn't have it came up:
very old Linux libc5-based systems (as addition to all HPUX versions).

The only known system at this point is AIX.
2003-06-05 14:04:44 +00:00
Daniel Stenberg
45ca866a2d LDAP problem added as mention in bug report #735752 2003-06-03 08:10:53 +00:00
Daniel Stenberg
ceef206c21 include the time headers just like we used to do in the curl/curl.h header
once upon the time
2003-06-03 08:07:06 +00:00
Daniel Stenberg
7c6424f0a9 we want the time defines too 2003-06-03 08:06:23 +00:00
Daniel Stenberg
bc942de6f1 Content-Length: now overrides other means of knowing when the stream has
ended.
2003-06-03 07:53:18 +00:00
Daniel Stenberg
06984df5cb Make the Content-Length info override the Connection: close header, so that
libcurl will stop reading when the number of bytes have arrived and not wait
for a closed socket.
2003-06-02 14:57:08 +00:00
Daniel Stenberg
4f136a3a76 the 500-599 test case range 2003-06-02 14:48:27 +00:00
Daniel Stenberg
363bf3ba30 ignore more 2003-06-02 13:55:40 +00:00
Daniel Stenberg
acb895956a ignore 2003-06-02 13:53:13 +00:00
Daniel Stenberg
21e87b9bb3 David Byron's fix to get the progress-bar use the local size too when
doing a resumed download.
2003-06-02 13:42:42 +00:00
Daniel Stenberg
c896ebcf12 makefile fiddle
changed how http requests are sent - now in one chunk more often
HPUX include fix in the external headers
better SSL work-arounds for bad SSL servers
modified error message when CURLE_HTTP_RETURNED_ERROR is returned
2003-06-02 13:31:25 +00:00
Daniel Stenberg
d288222e80 work-around SSL implementation flaws better, pointed out in bug report
#745122.
2003-06-02 13:27:03 +00:00
Daniel Stenberg
4eb2a6c9a3 make a more descriptive error message when CURLE_HTTP_RETURNED_ERROR is
returned
2003-06-02 13:14:57 +00:00
Daniel Stenberg
2563731c4d haven't updates this in a loooong time 2003-05-28 10:24:20 +00:00
Daniel Stenberg
4e410111db Posting static data using POST and chunked encoded now also appends the
data to the initial request buffer, if the total post data is less than
100K.
2003-05-28 07:54:33 +00:00
Daniel Stenberg
5670563a26 include sys/time.h, we didn't have a time() proto anymore. Did one of the
changes in curl/curl.h make this occur?
2003-05-27 22:56:01 +00:00
Daniel Stenberg
6caa656d01 Documented which rules the public headers must follow when we write
preprocessor checks for condititions.
2003-05-27 12:51:46 +00:00
Daniel Stenberg
c12af7aed1 oops, removed a # too many 2003-05-27 12:51:15 +00:00
Daniel Stenberg
dcb6d1c01d remove usage of HAVE_* defines, we cannot and shall not depend on any such
defines in the public external header files
2003-05-27 12:45:51 +00:00
Daniel Stenberg
18234cbdac sys/select.h is not present on HPUX, avoid including it 2003-05-27 12:34:48 +00:00
Daniel Stenberg
06bf988dc1 made it work ;-) 2003-05-27 12:18:00 +00:00
Daniel Stenberg
55ff4c3f08 if cvs update fails, attempt again after 5 seconds and retry 50 times
before giving up
2003-05-27 12:03:24 +00:00
Daniel Stenberg
4915002168 Only build in lib and src by default, make the others dist-subdirs.
Make the test stuff get built when we run 'make test' instead.
2003-05-27 08:51:09 +00:00
Daniel Stenberg
5bd8d60e41 Rudy Koento experienced problems with curl's recent habit of POSTing data in
two separate send() calls, first the headers and then the data. I've now made
a fix that for static and known content that isn't to be chunked-encoded,
everything is now sent in one single system call again. This is also better
for network performance reasons.
2003-05-27 08:33:08 +00:00
Daniel Stenberg
fc872808c5 runs on DOS now 2003-05-27 07:37:34 +00:00
Daniel Stenberg
0f4feda382 include file flaw and yet another socks5-fix 2003-05-27 06:41:06 +00:00
Daniel Stenberg
90b0f38316 Another socks5-fix. Make sure that when we use a socks-proxy, it is not the
same as using a httpproxy so we must make sure to better check for http
proxies before we do HTTP proxy stuff. This included authorization and
URI usage in the request etc.
2003-05-27 06:28:25 +00:00
Daniel Stenberg
18f630ab21 CURLOPT_HTTPDIGEST is added 2003-05-27 06:25:56 +00:00
Daniel Stenberg
e97fd44151 language 2003-05-26 12:32:22 +00:00
Daniel Stenberg
b75679778f ftp ASCII transfers in general need fixing 2003-05-26 08:19:06 +00:00
Daniel Stenberg
35a84ad576 Chris Lewis mentioned that he doesn't get WIN32 defined, only _WIN32 so we
make an adjustment to catch this.
2003-05-26 07:57:53 +00:00
Daniel Stenberg
4ed28be75a even more 2003-05-23 11:24:39 +00:00
Daniel Stenberg
e2f4656a86 Ricardo Cadime found a socket leak when listing directories without
contents. Test cases 144 and 145 were added to verify the fix.

Now we deal with return code 450 properly and other codes also do proper
cleanup.
2003-05-23 11:14:09 +00:00
Daniel Stenberg
1e14da5c60 more ftp testing using NLST and no contents and bad return code 2003-05-23 11:10:35 +00:00
Daniel Stenberg
b2ef79ef3d Rudy Koento's problem fixed, test case 66 verifies this. 2003-05-23 09:47:57 +00:00
Daniel Stenberg
f488874ff5 test 66 returns one line of data with no header (HTTP) 2003-05-23 09:46:19 +00:00
Daniel Stenberg
23258648da --digest added, --compressed rephrased 2003-05-23 08:06:31 +00:00
Daniel Stenberg
6b84ebe501 include digest.h for proto 2003-05-23 06:44:24 +00:00
Daniel Stenberg
07dd067f73 DJGPP fix by Gisle Vanem 2003-05-23 06:43:14 +00:00
Daniel Stenberg
420744d048 more more more 2003-05-22 22:47:48 +00:00
Daniel Stenberg
01108e3a63 warning-free is better 2003-05-22 22:45:38 +00:00
Daniel Stenberg
8026b1e194 Introducing --digest 2003-05-22 22:40:01 +00:00
Daniel Stenberg
a39d77227f Better Digest stuff 2003-05-22 22:39:38 +00:00
Daniel Stenberg
9f69deec7d Added CURLOPT_HTTPDIGEST support
SOCKS5 fix as suggested by Jis in bugreport #741841.
2003-05-22 22:38:46 +00:00
Daniel Stenberg
e912f772e0 Document the <dataNUM> thing we use, 2003-05-22 22:37:00 +00:00
Daniel Stenberg
0102726aeb Digest support added 2003-05-22 22:36:39 +00:00
Daniel Stenberg
1e7aa04040 Digest testing added 2003-05-22 22:36:22 +00:00
Daniel Stenberg
00a7c6fe6b proper header added 2003-05-22 16:23:27 +00:00
Daniel Stenberg
87f8c0d471 hush the compiler 2003-05-22 16:12:30 +00:00
Daniel Stenberg
334d78cd18 Initial Digest support. At least partly working. 2003-05-22 16:09:54 +00:00
Daniel Stenberg
2356325592 David Balazic pointed out the lack of checks for a valid %XX code when
we unescape a string. We now check and decode only valid %XX strings.
2003-05-21 15:53:59 +00:00
Daniel Stenberg
d78ec593fa fix the makefile in packages/DOS too 2003-05-21 08:12:52 +00:00
Daniel Stenberg
d5043133e6 Gisle Vanem made curl build with djgpp on DOS. 2003-05-21 08:08:48 +00:00
Daniel Stenberg
509f69a457 Gisle Vanem's fix to make the 'curl -M' output nicer 2003-05-21 07:21:44 +00:00
Daniel Stenberg
662c659220 missing semicolon, by Gisle Vanem 2003-05-20 12:44:55 +00:00
Daniel Stenberg
9a6566e774 Gisle Vanem's code for not trusting h_aliases to always be non-NULL 2003-05-20 09:41:39 +00:00
Daniel Stenberg
4da0428d9e Remind about the gpg command lines 2003-05-20 06:33:13 +00:00
Daniel Stenberg
8ee1177206 support user name and password in proxy environment variables 2003-05-19 13:14:26 +00:00
Daniel Stenberg
e9154b2549 the proxy environment variables now may contain user name and password 2003-05-19 13:09:41 +00:00
Daniel Stenberg
d398a0dd58 remove debug output 2003-05-19 13:08:48 +00:00
Daniel Stenberg
7723a24297 setenv support added to allow test cases to require a set of environment
variables
2003-05-19 13:06:10 +00:00
Daniel Stenberg
95a4b8db68 7.10.5 commit 2003-05-19 11:45:10 +00:00
Daniel Stenberg
663c1898a3 known AIX ipv6 problems 2003-05-16 10:57:53 +00:00
Daniel Stenberg
465de793e8 Skip any preceeding dots from the domain name of cookies when we keep them
in memory, only add it when we save the cookie. This makes all tailmatching
and domain string matching internally a lot easier.

This was also the reason for a remaining bug I introduced in my overhaul.
2003-05-15 22:28:19 +00:00
Daniel Stenberg
de9b76cef0 change the order of the in_addr_t tests, so that 'unsigned long' is tested
for first, as it seems to be what many systems use
2003-05-15 21:13:36 +00:00
Daniel Stenberg
1747a8d3d9 1. George Comninos' progress meter fix
2. I also added the pre-releases and dates to the log
2003-05-15 08:13:19 +00:00
Daniel Stenberg
1094e79749 documented CURLOPT_FTP_USE_EPRT 2003-05-14 09:03:51 +00:00
Daniel Stenberg
22569681bc George Comninos provided a fix that calls the progress meter when waiting
for FTP command responses take >1 second.
2003-05-14 06:31:00 +00:00
Daniel Stenberg
e615d117a0 Setup and use CURL_INADDR_NONE all over instead of INADDR_NONE. We setup
the define accordingly in the hostip.h header to work nicely all over.
2003-05-13 12:12:17 +00:00
Daniel Stenberg
a51258b6bb before using if2ip(), check if the address is an ip address and skip it if
it is.
2003-05-13 12:11:31 +00:00
Daniel Stenberg
8894bd07b6 libtool 1.4.2 is enough 2003-05-13 09:38:09 +00:00
Daniel Stenberg
ec45a9e825 fix comment 2003-05-13 09:37:45 +00:00
Daniel Stenberg
871358a6e5 before checking for network interfaces using if2ip(), check that the given
name isn't an ip address
2003-05-12 13:06:48 +00:00
Daniel Stenberg
2e2e0fba60 no more complaining when I have 1.5 and it tests for 1.4.2 2003-05-12 13:05:11 +00:00
Daniel Stenberg
4a5139e3f4 fixes from the last week+ 2003-05-12 12:49:22 +00:00
Daniel Stenberg
8f85933d7c Dan F clarified the CURLOPT_ENCODING description after his changes to
allow "" to enable all support formats.
2003-05-12 12:47:35 +00:00
Daniel Stenberg
246f3a63f6 Dan Fandrich added --compressed docu 2003-05-12 12:46:45 +00:00
Daniel Stenberg
e99eff4eb0 setting ENCODING to "" means enable-all-you-support 2003-05-12 12:45:57 +00:00
Daniel Stenberg
c0197f19cf Dan Fandrich changed CURLOPT_ENCODING to select all supported encodings if
set to "".  This frees the application from having to know which encodings
 the library supports.
2003-05-12 12:45:14 +00:00
Daniel Stenberg
3994d67eea Dan Fandrich lowered the libtool requirement 2003-05-12 12:38:52 +00:00
Daniel Stenberg
9ead79c9d4 when we have accepted the server's connection in a PORT sequence, we set
the new socket to non-blocking
2003-05-12 12:37:35 +00:00
Daniel Stenberg
9371aed46c avoid the write loop 2003-05-12 12:37:05 +00:00
Daniel Stenberg
940707ad66 incoming proxy headers shall be sent to the debug function has HEADERs not
DATA
2003-05-12 12:29:00 +00:00
Daniel Stenberg
e6c267fb4c oops, run libtoolize as the first tool 2003-05-09 08:17:41 +00:00
Daniel Stenberg
93538fccd6 run libtoolize too 2003-05-09 08:13:02 +00:00
Daniel Stenberg
83a7fad308 run libtoolize to generate these files 2003-05-09 08:12:46 +00:00
Daniel Stenberg
3c7e33388e CURLOPT_FTP_USE_EPRT added 2003-05-09 07:42:47 +00:00
Daniel Stenberg
7b0f35edb6 --disable-eprt added 2003-05-09 07:39:50 +00:00
Daniel Stenberg
94a157d0b0 support for CURLOPT_FTP_USE_EPRT added 2003-05-09 07:39:29 +00:00
Daniel Stenberg
ca04620253 AIX wants sys/select.h 2003-05-09 07:37:27 +00:00
Daniel Stenberg
073ef0b36a clarify on the curl name issue and that there may be other libcurl-based
tools that provide GUI
2003-05-09 07:07:13 +00:00
Daniel Stenberg
c41c05d4f4 Kevin Delafield reported another case where we didn't correctly check for
EAGAIN but only EWOULDBLOCK, which caused badness on HPUX. We also check for
 and act the same on EINTR errors as well now.
2003-05-06 08:19:36 +00:00
Daniel Stenberg
f1ea54e07a fixed the required tools' version numbers 2003-05-05 14:19:54 +00:00
Daniel Stenberg
a139ce901a the writable argv check now should not exit when building a cross-compiled
curl
2003-05-04 16:07:19 +00:00
Daniel Stenberg
7431957113 put back the libtool test, now for 1.5
require autoconf 2.57
require automake 1.7
2003-05-03 16:25:49 +00:00
Daniel Stenberg
1752d80915 If there is a custom Host: header specified, we use that host name to
extract the correct set of cookies to send. This functionality is verified
by test case 62.
2003-05-02 09:13:19 +00:00
Daniel Stenberg
aa7420e109 send correct cookies when using a custom Host: 2003-05-02 09:12:26 +00:00
Daniel Stenberg
a290d4b9db fixed the format slightly 2003-05-02 09:11:53 +00:00
Daniel Stenberg
19a4314e7f corrected a comment about gzip not being supported 2003-05-01 17:49:47 +00:00
Daniel Stenberg
d166e85e0a FTP URL with type=a 2003-05-01 17:48:59 +00:00
Daniel Stenberg
f213e857ab Andy Cedilnik fixed some compiler warnings 2003-05-01 13:37:36 +00:00
Daniel Stenberg
eb6130baa7 ourerrno became Curl_ourerrno() and is now available to all libcurl 2003-05-01 13:37:05 +00:00
Daniel Stenberg
f69ea2c68a Use the proper Curl_ourerrno() function instead of plain errno, for better
portability. Also use Andy Cedilnik's compiler warning fixes.
2003-05-01 13:36:28 +00:00
Daniel Stenberg
078441d477 the test numbers are now only for human readability, the numbers no longer
enforces protocol/server
2003-04-30 20:29:31 +00:00
Daniel Stenberg
95f6b15a67 no longer assume that the test number implies servers to run 2003-04-30 20:28:49 +00:00
Daniel Stenberg
ee29dbdb8f Each test case now specifies which server(s) it needs, without relying on the
test number.
2003-04-30 20:25:39 +00:00
Daniel Stenberg
15f3f4c93f we say welcome to test 142 2003-04-30 20:08:01 +00:00
Daniel Stenberg
6932e94e0e verify that curl fails fine when an FTP URL with a too deep dir hierarchy
is used
2003-04-30 20:07:37 +00:00
Daniel Stenberg
3ef06d7efe when making up the list of path parts, save the last entry pointing to NULL
as otherwise we'll go nuts
2003-04-30 20:04:17 +00:00
Daniel Stenberg
fb012b48e9 recent action 2003-04-30 20:01:22 +00:00
Daniel Stenberg
bc77bf217f if there's a cookiehost allocated, free that too 2003-04-30 19:58:36 +00:00
Daniel Stenberg
37d1e9351e ok, make the test run ok too 2003-04-30 19:56:53 +00:00
Daniel Stenberg
4494c0dee0 various new cookie tests with a custom Host: header set 2003-04-30 19:49:51 +00:00
Daniel Stenberg
26afc604ac modified to work with modified code 2003-04-30 17:16:25 +00:00
Daniel Stenberg
9aefcada19 modified to produce nicer output when a single test fails 2003-04-30 17:15:38 +00:00
Daniel Stenberg
69fc363760 make the diffs with 'diff -u' to make them nicer and easier to read 2003-04-30 17:15:00 +00:00
Daniel Stenberg
bea02ddebe stop parsing Host: host names at colons too 2003-04-30 17:12:29 +00:00
Daniel Stenberg
3fb257c39c modified to the new cookie function proto 2003-04-30 17:05:19 +00:00
Daniel Stenberg
7c96c5a39b extract host name from custom Host: headers to use for cookies 2003-04-30 17:04:53 +00:00
Daniel Stenberg
efd836d971 Many cookie fixes:
o Save domains in jars like Mozilla does. It means all domains set in
    Set-Cookie: headers are dot-prefixed.
  o Save and use the 'tailmatch' field in the Mozilla/Netscape cookie jars (the
    second column).
  o Reject cookies using illegal domains in the Set-Cookie: line. Concerns
    both domains with too few dots or domains that are outside the currently
    operating server host's domain.
  o Set the path part by default to the one used in the request, if none was
    set in the Set-Cookie line.
2003-04-30 17:03:43 +00:00
Daniel Stenberg
836aaa1647 changes need for the new ftp path treatment and the new cookie code 2003-04-30 17:01:00 +00:00
Daniel Stenberg
bf2b3dbf3e David Balazic's patch to make the FTP operations "do right" according to
RFC1738, which means it'll use one CWD for each pathpart.
2003-04-30 16:59:42 +00:00
Daniel Stenberg
b4fa2ff995 two more platforms Rich Gray built curl on 2003-04-30 07:32:43 +00:00
Daniel Stenberg
2f9cabc30b Peter Kovacs provided a patch that makes the CURLINFO_CONNECT_TIME work fine
when using the multi interface (too).
2003-04-29 18:03:30 +00:00
Daniel Stenberg
63593f5597 mention configure --help 2003-04-29 16:55:17 +00:00
Daniel Stenberg
c0acaa5d2c CURLOPT_FTPPORT could support port number too 2003-04-28 17:29:32 +00:00
Daniel Stenberg
2e46f8d0a6 corrected the comment which wasn't correct 2003-04-28 13:48:16 +00:00
Daniel Stenberg
51da6aaa07 RSAglue.lib is no longer needed with recent OpenSSL versions 2003-04-25 15:08:46 +00:00
Daniel Stenberg
c8b79e36db Dan Fandrich added support for the gzip Content-Encoding for --compressed 2003-04-24 06:34:31 +00:00
Daniel Stenberg
208374bcc9 Bryan Kemp's reported problems with curl and PUT from stdin and a faked
content-length made me add test case 60, that does exactly this, but it
seems to run fine...
2003-04-23 12:09:58 +00:00
Daniel Stenberg
7f0a6e7203 last 10 days or so 2003-04-22 23:30:04 +00:00
Daniel Stenberg
54ebb9cfd4 libtool 1.5 stuff 2003-04-22 23:29:27 +00:00
Daniel Stenberg
49e9c1495b stop checking for libtool, we don't run that in this script 2003-04-22 23:26:00 +00:00
Daniel Stenberg
a84b0fbd52 Dan Fandrich corrected the error messages on "bad encoding". 2003-04-22 22:33:39 +00:00
Daniel Stenberg
c95814c04d Dan Fandrich's gzip bugfix 2003-04-22 22:32:02 +00:00
Daniel Stenberg
9f8123f1b8 Dan Fandrich's fix 2003-04-22 22:31:02 +00:00
Daniel Stenberg
8b23db4f4d Peter Sylvester pointed out that curl_easy_setopt() will always (wrongly)
return CURLE_OK no matter what happens.
2003-04-22 21:42:39 +00:00
Daniel Stenberg
d77cc13374 two dashes is enough 2003-04-16 12:46:20 +00:00
Daniel Stenberg
9a12db1aa2 typecast the setting of the size, as it might be an off_t which is bigger
than long and libcurl expects a long...
2003-04-15 14:18:37 +00:00
Daniel Stenberg
eb54d34bec If MALLOCDEBUG, include the lib's setup.h here so that the proper defines
are set before all system headers, as otherwise we get compiler warnings
on my Solaris at least.
2003-04-15 14:01:57 +00:00
Daniel Stenberg
4b1203d4c9 include config.h before all system headers, so that _FILE_OFFSET_BITS and
similar is set properly by us first
2003-04-15 13:32:26 +00:00
Daniel Stenberg
183a9c6244 extended the -F section 2003-04-15 09:58:27 +00:00
Daniel Stenberg
1f2294d585 treat uploaded .html files as text/html by default 2003-04-15 09:29:39 +00:00
Daniel Stenberg
0b839c4f77 return the same error for the sslv2 "certificate verify failed" code 2003-04-14 22:00:36 +00:00
Daniel Stenberg
1d4fd1fcae new wording by Kevin Roth 2003-04-14 14:54:18 +00:00
Daniel Stenberg
b1d8d72c16 ignore all stamp-h* 2003-04-14 13:09:44 +00:00
Daniel Stenberg
bafb68b844 With the recent fix of libcurl, it shall now return CURLE_SSL_CACERT when
it had problems withe CA cert and thus we offer a huge blurb of verbose
help to explain to the poor user why this happens.
2003-04-14 13:09:09 +00:00
Daniel Stenberg
21873b52e9 Restored the SSL error codes since they was broken in the 7.10.4 release,
also now attempt to detect and return the specific CACERT error code.
2003-04-14 12:53:29 +00:00
Daniel Stenberg
0aa8b82871 FTP CWD response fixed
gzip content-encoding added
chunked content-encoding fixed
2003-04-14 07:13:08 +00:00
Daniel Stenberg
f9781afafd clarified the CURLINFO_SIZE_DOWNLOAD somewhat on Juan F. Codagnone's
suggestion
2003-04-11 16:52:30 +00:00
Daniel Stenberg
fece361a55 Nic fixed so that Curl_client_write() must not be called with 0 lenth data.
I edited somewhat and removed trailing whitespaces.
2003-04-11 16:31:18 +00:00
Daniel Stenberg
7b51b2f128 Nic Hines fixed this bug when deflate or gzip contents were downloaded using
chunked encoding.
2003-04-11 16:23:43 +00:00
Daniel Stenberg
22d88fb28e ah, move the zero byte too or havoc will occur 2003-04-11 16:23:06 +00:00
Daniel Stenberg
f7c5b28e76 verify the new url parser fix 2003-04-11 16:22:27 +00:00
Daniel Stenberg
5760f2a307 support ? as separator instead of / even if not protocol was given 2003-04-11 16:08:41 +00:00
Daniel Stenberg
ee46efb5a5 these guys deserve a mentioning here as well 2003-04-11 08:57:19 +00:00
Daniel Stenberg
eb6ffebfc7 Dan the man on the list 2003-04-11 08:55:08 +00:00
Daniel Stenberg
c06c44f286 Dan Fandrich's added gzip support documented. 2003-04-11 08:51:24 +00:00
Daniel Stenberg
019c4088cf Dan Fandrich's gzip patch applied 2003-04-11 08:49:20 +00:00
Daniel Stenberg
0b0a88b78d when saving a cookie jar fails, you don't get an error code or anything,
just a warning in the verbose output stream
2003-04-11 08:19:06 +00:00
Daniel Stenberg
028e9cc56f According to RFC959, CWD is supposed to return 250 on success, but
there seem to be non-compliant FTP servers out there that return 200,
 so we accept any '2xy' response now.
2003-04-11 08:10:54 +00:00
Daniel Stenberg
e0d8615ece show a verbose warning message in case cookie-saving fails, after
Ralph Mitchell's notification.
2003-04-11 07:39:16 +00:00
Daniel Stenberg
c8ecbda40b new ftp tests 2003-04-10 11:43:47 +00:00
Daniel Stenberg
2324c10d43 another week has passed 2003-04-10 11:36:56 +00:00
Daniel Stenberg
89cfa76291 Vlad Krupin's URL parsing patch to fix the URL parsing when the URL has no
slash after the host name, but still a ? and following "parameters".
2003-04-10 09:44:39 +00:00
Daniel Stenberg
072070a22c oops, committed test code not meant to be here 2003-04-09 12:02:06 +00:00
Daniel Stenberg
3c3ad134ea the default debugfunction shows incoming headers as well 2003-04-09 11:57:06 +00:00
Daniel Stenberg
a4ffcfd4d5 timecond support added
made the Last-Modified (faked) header look correct using GMT always
2003-04-09 11:56:31 +00:00
Daniel Stenberg
136670c58a three new ftp tests 2003-04-09 11:55:24 +00:00
Daniel Stenberg
28169725fa <mdtm> added 2003-04-09 11:53:09 +00:00
Daniel Stenberg
5b13106f54 MDTM support added 2003-04-09 11:52:24 +00:00
Daniel Stenberg
1a2db0dfb1 James Bursa fixed a flaw in the content-type extracting code that could
miss the first letter
2003-04-08 14:48:38 +00:00
Daniel Stenberg
696f95bb0a share.c added 2003-04-08 10:35:35 +00:00
Daniel Stenberg
acec588fe3 --disable-eprt perhaps? 2003-04-07 06:41:24 +00:00
Daniel Stenberg
6ed0da8e98 Ryan Weaver's fix to prevent the ca bundle to get installed even when
building curl without SSL support!
2003-04-06 12:29:45 +00:00
Daniel Stenberg
7fd91d70bd adjusted the formpost testcases to the new boundary string construction 2003-04-04 12:30:35 +00:00
Daniel Stenberg
61788a0389 Changed how boundary strings are generated. This new way uses 28 dashes
and 12 following hexadecimal letters, which seems to be what IE uses.
This makes curl work smoother with more stupidly written server apps.

Worked this out together with Martijn Broenland.
2003-04-04 12:24:01 +00:00
Daniel Stenberg
0821447b5b spell fix 2003-04-03 16:11:47 +00:00
Daniel Stenberg
3cba274ba6 kill a compiler warning on cygwin 2003-04-03 14:16:15 +00:00
Daniel Stenberg
df7bbcfd21 Added log output for when the writing of the input HTTP request is successful
or unsuccessful. Used to track down the recent cygwin test suite problems.
2003-04-03 13:43:15 +00:00
Daniel Stenberg
021d406f0c Modified how we log data to server.input, as we can't keep the file open
very much as it makes it troublesome on certain operating systems.
2003-04-03 13:42:06 +00:00
Daniel Stenberg
294569c502 new 2003-04-03 13:39:36 +00:00
Daniel Stenberg
bfd00ac2ed 7.10.4 commit 2003-04-02 07:48:56 +00:00
Daniel Stenberg
735a4714f4 Version 7.10.4 2003-04-02 07:42:39 +00:00
Daniel Stenberg
827fd47198 documented the new killserver tag 2003-04-01 08:43:09 +00:00
Daniel Stenberg
e26b917661 kill the ftp server afterwards, it is just so messed up 2003-04-01 08:42:37 +00:00
Daniel Stenberg
92872a2a3c log when we've returned verification that we are the test server 2003-04-01 08:42:14 +00:00
Daniel Stenberg
16ddb09cb4 support the new <killserver> tag 2003-04-01 08:41:49 +00:00
Daniel Stenberg
d37031f14e ignore lib505 too 2003-04-01 07:13:28 +00:00
Daniel Stenberg
b4e84ca7d2 lib505.c is a new test case for ftp uploading with rename 2003-04-01 07:13:04 +00:00
Daniel Stenberg
47970b9e6f Added support for the RNFR/RNTO commands 2003-04-01 07:10:08 +00:00
Daniel Stenberg
f65f120d50 updated 2003-03-31 22:16:12 +00:00
Daniel Stenberg
df00ec3c82 move the ssl config clone call to before the connectionexists call and then
also subsequently free the ssl struct if the connection struct is to be
deleted
2003-03-31 21:43:05 +00:00
Sterling Hughes
ad6fca28f9 testing, ignore this commit 2003-03-31 15:59:17 +00:00
Daniel Stenberg
fd33923496 7.10.4-pre6 commit 2003-03-31 14:02:43 +00:00
Daniel Stenberg
a55649dc82 added dist-hook that clears the tests/log dir properly as otherwise
'make distcheck' doesn't pass
2003-03-31 11:37:47 +00:00
Daniel Stenberg
9558f229db Fixup after talks with Richard Bramante. We should now make better
comparisons before re-using SSL connections and re-using SSL connection IDs.
2003-03-31 05:13:26 +00:00
Daniel Stenberg
7917bfb1c9 --location-trusted added, which does a normal location plus the new
CURLOPT_UNRESTRICTED_AUTH option set TRUE.

Patch by Guillaume Cottenceau.
2003-03-31 04:42:20 +00:00
Daniel Stenberg
25f611ca42 Guillaume Cottenceau's patch that adds CURLOPT_UNRESTRICTED_AUTH that
disables the host name check in the FOLLOWLOCATION code. With that option
set, libcurl will send user+password to all hosts.
2003-03-31 04:41:05 +00:00
Daniel Stenberg
e6eb49e7e0 10 days of fixes 2003-03-31 04:05:03 +00:00
Daniel Stenberg
9a075f53dc clarify USERPWD somewhat more 2003-03-31 04:04:40 +00:00
Daniel Stenberg
4b3f800c03 Frankie Fong filed bug report #708708 which identified a problem with
ConnectionExists() when first doing a proxy connecto to a HTTPS site and then
switching over to a HTTP connection to the same host.

This fix corrects the problem.
2003-03-31 03:42:01 +00:00
Daniel Stenberg
82bc76b243 Dan Shearer's fix from bug report #618892, which makes 'curl -O' output
an error message about a missing URL.
2003-03-29 11:03:30 +00:00
Daniel Stenberg
18b9b04907 send as much as possible of the POST at once 2003-03-28 12:56:07 +00:00
Daniel Stenberg
87f1f08b36 added section titles and a CONTACT paragraph asking people to use the mailing
lists
2003-03-27 15:09:33 +00:00
Daniel Stenberg
7d7ebbe9f7 fixed the pkg-config stuff for rh9 2003-03-26 19:05:24 +00:00
Daniel Stenberg
5111ce782f add URLs to patch and diff 2003-03-26 11:48:03 +00:00
Daniel Stenberg
20b0e563ce mention the URL to the mailing lists 2003-03-26 11:44:04 +00:00
Daniel Stenberg
8b6cf239a3 attempt to extract openssl information using pkg-config 2003-03-25 22:40:43 +00:00
Daniel Stenberg
bcc285cffd Renamed configure.in to configure.ac, as this is the supposedly new preferred
name for it.
2003-03-25 15:56:33 +00:00
Daniel Stenberg
d5ba030942 use init and copyright to get a better header in the generated script 2003-03-25 15:54:06 +00:00
Daniel Stenberg
803f43592a white space and indent fix 2003-03-25 14:23:12 +00:00
Daniel Stenberg
904b9ccaa3 ignore getdate.c 2003-03-24 23:11:16 +00:00
Daniel Stenberg
89721ff04a Richard Bramante's provided a fix for a handle re-use problem seen when you
change options on an SSL-enabled connection between requests.
2003-03-24 23:10:38 +00:00
Daniel Stenberg
6164823921 Removed the "TC TrustCenter, Germany, Class 0 CA." certificate:
"It is a DEMO certificate and was never intended to be in any list of trusted
CA certificates."

(quote by Gtz Babin-Ebell, trustcenter.de)
2003-03-24 11:06:57 +00:00
Daniel Stenberg
f8b3c47f99 all those changes 2003-03-24 10:47:06 +00:00
Daniel Stenberg
56dd2da962 Hopefully this change addresses these two bug reports: 707003 and 706624.
We need to make sure that when we init a 'connectdata' struct and then
afterwards check for and re-use another one, we must be careful so that the
newly set values are transmitted and used in the surviving connectdata struct.
2003-03-21 08:09:48 +00:00
Daniel Stenberg
68bb74d172 lib/getdate.c.cvs may need a rename if you don't have yacc or bison 2003-03-20 15:12:43 +00:00
Daniel Stenberg
bf5e12c8e0 if the cvs update fails, don't continue further 2003-03-20 14:38:42 +00:00
Daniel Stenberg
de11f5e53a make the ENGINE depend on the USE_SSLEAY define too 2003-03-19 21:28:39 +00:00
Daniel Stenberg
b125e8e23a set binary mode for some file handling and it might work better on some
cygwin installations (using DOS-style files somehow?)
2003-03-19 09:26:29 +00:00
Daniel Stenberg
b28b616eb2 typecast the conversion from const char * to char * 2003-03-19 09:16:59 +00:00
Daniel Stenberg
4534ca238d Rename getdate.c to getdate.c.cvs, since the "normal" build procedure do
imply that yacc/bison exists and can generate this file. Those without one
of those tools can then checkout and rename the getdate.c.cvs file.
2003-03-19 09:09:40 +00:00
Daniel Stenberg
cee0e94294 clarify that 22 can be returned on --fail for all HTTP errors being 400
or above
2003-03-18 10:01:51 +00:00
Daniel Stenberg
8319ea7078 more defensive checking as platforms differ... 2003-03-17 17:20:26 +00:00
Daniel Stenberg
5334a58f9b Andy Cedilnik's corrections 2003-03-17 12:38:08 +00:00
Daniel Stenberg
2616bdc4cd it just never ends 2003-03-16 18:41:00 +00:00
Daniel Stenberg
c149b3f797 regenerated from getdate.y 2003-03-16 16:20:56 +00:00
Daniel Stenberg
d349eb3d43 Juan F. Codagnone pointed out a missing thing from the march 2 fix 2003-03-16 16:15:24 +00:00
Daniel Stenberg
9b43ade1c0 typecase getpid() to int to prevent compiler warning 2003-03-16 10:46:52 +00:00
Daniel Stenberg
ad05d0a8d9 figure out select()'s argument types 2003-03-15 21:04:01 +00:00
Daniel Stenberg
e6bfbe9683 Gisle Vanem's fix to get this working nicely on windows 2003-03-15 21:02:20 +00:00
Daniel Stenberg
c871efca4b Gisle Vanem fixed a name collision with structure '"CONTEXT" in <winnt.h> 2003-03-15 21:00:47 +00:00
Daniel Stenberg
12c72b419c missing newline added 2003-03-15 17:26:59 +00:00
Daniel Stenberg
a15b7691ca no server needed 2003-03-15 17:12:46 +00:00
Daniel Stenberg
01618d323b allow 'none' as server 2003-03-15 17:11:13 +00:00
Daniel Stenberg
bb6d0e37e3 Sort out the ENGINE problems people seem to be having. Now we put all ENGINE
related stuff within HAVE_OPENSSL_ENGINE_H and we don't make any private
typedef or similar if the header is missing...
2003-03-15 16:51:43 +00:00
Daniel Stenberg
24a6100897 * use the pid returned back from test-servers and kill them before starting
them the first time
* verify that the server we start really comes up fine and works as
  expected before continue
* count test cases where the server can't be run (for whatever reason)
* prefix lots of messages with RUN: to make it easier to realize which script
  is saying what when running tests verbose
* remove the generic sleep(1) from each test, makes the suite fly! ;-)

I hope these changes will make the tests run somewhat more reliably on more
platforms.
2003-03-15 16:43:58 +00:00
Daniel Stenberg
0251563c98 report pid back in the WE ROOLZ message 2003-03-15 16:39:15 +00:00
Daniel Stenberg
72673a351b removed the "banner" when the server is starting 2003-03-15 16:05:47 +00:00
Daniel Stenberg
96b7131844 detect lack of perl before running tests 2003-03-15 15:08:55 +00:00
Daniel Stenberg
dcc2f16416 Rick Jones' minor thing to build better on HPUX 11 2003-03-15 14:47:09 +00:00
Daniel Stenberg
c2b8a04000 Nico Baggus little adjustment to build with OpenSSL 0.9.7 (the ENGINE thing) 2003-03-14 17:21:06 +00:00
Daniel Stenberg
d65587b06c improved "deeper" check 2003-03-14 12:44:56 +00:00
Daniel Stenberg
1ab0134522 No longer halts operation if select or socket are missing, since in most
cases this is wrong... and if they're truly missing, we won't succeed to
link later on anyway.
2003-03-13 23:02:33 +00:00
Daniel Stenberg
afffce80f0 Philippe Raoult needed this to build on FreeBSD 2003-03-13 21:41:02 +00:00
Daniel Stenberg
70b80b0160 Extra function-find magic for platforms that don't like the way the
default AC_CHECK_FUNCS() work. HPUX 11 is one of them.
2003-03-13 17:06:55 +00:00
Daniel Stenberg
64067a04b5 output what cvs returned, see if we can make the script bail out when
cvs update fails
2003-03-13 15:56:31 +00:00
Daniel Stenberg
38cf0268c0 use include path from BUILD dir since we want the config.h 2003-03-13 15:54:46 +00:00
Daniel Stenberg
2d2034703f Things are moving along... 2003-03-12 14:29:47 +00:00
Daniel Stenberg
571ceeff90 When we append stuff to the URL, we must make sure the text is properly
URL encoded before. Test case 58 added to verify this.
2003-03-12 14:20:16 +00:00
Daniel Stenberg
34c4ba4321 -m on curl on windows with telnet doesn't work 2003-03-12 14:14:52 +00:00
Daniel Stenberg
babb372eb9 This verifies that my fix for bug report #700275 works. 2003-03-12 14:04:23 +00:00
Daniel Stenberg
5b9b82c1cd test58 added 2003-03-12 14:03:53 +00:00
Daniel Stenberg
075c534270 improved the header checks
--enable-libgcc
check for a sed before using it
2003-03-12 13:42:07 +00:00
Daniel Stenberg
f317f8b149 Add include files to prevent warnings on some (HPUX) systems. 2003-03-12 08:54:45 +00:00
Daniel Stenberg
f8d552dde5 include "config.h" from the lib's private dir 2003-03-12 08:54:11 +00:00
Daniel Stenberg
606f72bb13 Setup include path to the lib dir to enable inclusion of "config.h" 2003-03-12 08:53:44 +00:00
Daniel Stenberg
327e6a2b4f Made set_local_option() properly static as reported by Rick Jones 2003-03-12 08:44:00 +00:00
Daniel Stenberg
be8f6c7f5c Prefix defines and symbols with CURL_ to reduce the risk of colliding with
various system's other defines.
2003-03-12 08:40:45 +00:00
Daniel Stenberg
073448e0ea support a few more common typedefs 2003-03-12 08:11:24 +00:00
Daniel Stenberg
f136f435b5 Massige use of AC_HELP_STRING() all over makes the --help output so much
nicer!
2003-03-12 08:07:30 +00:00
Daniel Stenberg
ebea6b487b another week of changes, especially libtool gave us an adventure to remember 2003-03-11 19:22:10 +00:00
Daniel Stenberg
e5b7dc56e6 syntax error 2003-03-11 19:12:07 +00:00
Daniel Stenberg
c2d8025a0a Christophe Demory fixed the check to work better for non-blocking on HP-UX
systems. Bug report #701749.
2003-03-11 19:07:41 +00:00
Daniel Stenberg
853e240e1d Use ssize_t instead of 'int' to make the 64 bit sparc compiler happier.
Fix by Richard Gorton.
2003-03-11 18:58:21 +00:00
Daniel Stenberg
8755a6d1ac Richard Gorton improved the random_the_seed() function for systems where
we don't find/know of a good random source. This way, we get a better
randomness which in turn should make SSL connections more secure.
2003-03-11 18:55:34 +00:00
Daniel Stenberg
9f723061cb don't check for netinet/if_ether.h, we don't include it and it causes
configure warnings on many systems
2003-03-11 17:16:02 +00:00
Daniel Stenberg
652683fc04 Martin C. Martin's fix to produce an error message in case of failure
in the Curl_is_connected() function.
2003-03-11 16:28:23 +00:00
Daniel Stenberg
83a463891d added things to fix at the next major release/change 2003-03-10 20:46:54 +00:00
Daniel Stenberg
957b618fdc clarify 2003-03-10 20:43:59 +00:00
Daniel Stenberg
ebe5191b63 no the data is not freed, this is left for the app to do when needed 2003-03-10 17:01:11 +00:00
Daniel Stenberg
c426234df7 AAAARG
libtool 1.4.3 is scary as hell and caused just about every build on all sorts
of platforms to stop working, thanks to the fact that it ruquires a SED
variables somehow set by the configure script. It works fine on my linux
running autoconf 2.57 and automake 1.7 but others seem not to do as fine.

Reverting back to the ltmain.sh we had previously, which I believe is 1.4.2
including handmade patches for FreeBSD.

ALERT ALERT ALERT before we try 1.4.3 or similar versions again, check the
${SED} stuff and similar carefully.
2003-03-10 14:52:33 +00:00
Daniel Stenberg
8c3a10392e Include sys/types.h as well. Ray DeGennaro reports successful compiling on
AIX when this fix is applied and I cannot see how this will break any
systems.
2003-03-10 12:25:32 +00:00
Daniel Stenberg
d0e44946e9 figure out the path to a 'sed' as otherwise libtool gets crazy 2003-03-07 13:36:32 +00:00
Daniel Stenberg
46a593d968 libtoolize 1.4.3 brought these 2003-03-07 09:03:09 +00:00
Daniel Stenberg
eb0cc34951 Removed define, risc os build, POST-GET bug fixed, AIX 4.3 problems solved
and two makefiles fixed.
2003-03-04 06:41:50 +00:00
Daniel Stenberg
1c7dfda4bc output the md5sum as the last step 2003-03-03 23:26:39 +00:00
Daniel Stenberg
3c0e4a2fa1 Added share.obj 2003-03-03 22:39:34 +00:00
Daniel Stenberg
c753072ae1 moved the disable-thread warning to the switch code so that the AIX 4.3
automatic disable won't cause a warning
2003-03-03 22:31:58 +00:00
Daniel Stenberg
243942a7aa Detect AIX 4.3 or later, and if found disable the check for the thread-safe
*_r() functions as they're not needed (and if fact mess things up for us).
Brought to our attention by the friendly Troels Walsted Hansen in bug report
#696217.
2003-03-03 22:30:25 +00:00
Daniel Stenberg
8d5ac8b43c AIX 4.3 or later should use gethostbyname() and not the *_r() version. 2003-03-03 22:23:48 +00:00
Daniel Stenberg
17962b3d2e Added typecast to please the MSVC compiler. 2003-03-03 06:45:27 +00:00
Daniel Stenberg
f70acd5979 another typecast added to please the borland compiler 2003-03-03 06:42:52 +00:00
Daniel Stenberg
ffe5c46224 Add (void) on our uses of the swrite() macro when we don't read the return
code as this makes compiler warnings. We *should* fix the code to deal with
the return codes instead...
2003-03-03 06:40:36 +00:00
Daniel Stenberg
3242ea5f66 Init postdata properly before issuing a request, so that there isn't any
lingering POST-stuff that confuses GET requests. Juan F. Codagnone reported
this problem in bug report #653859.
2003-03-02 17:43:42 +00:00
Daniel Stenberg
39a282bffc moved a variable declaration to remove a compiler warnings with the MSVC
compiler, mentioned by Andi Jahja
2003-03-02 17:20:59 +00:00
Daniel Stenberg
29583004ce include the engine stuff 2003-02-28 15:50:05 +00:00
Daniel Stenberg
0a1a185874 Andres Garcia Garcia updated to build with the most recent OpenSSL and
the recent libcurl changes.
2003-02-28 15:49:32 +00:00
Daniel Stenberg
8f809e2a93 James Bursa made it compile on RISC OS as well. 2003-02-28 13:11:10 +00:00
Daniel Stenberg
f216059b49 James Bursa wrote a section about cross-compiling for RISC OS 2003-02-28 13:10:54 +00:00
Daniel Stenberg
9121b1f41d the strequal and strnequal should now be called with the proper curl_ prefix 2003-02-28 12:20:08 +00:00
Daniel Stenberg
60e015d0c1 Removed the defines for strequal() and strnequal(). 2003-02-28 12:17:56 +00:00
Daniel Stenberg
7e049fca61 recent stuff 2003-02-28 08:40:57 +00:00
Daniel Stenberg
0f0e4de6a4 mention what happens if size is set to -1 2003-02-28 07:55:01 +00:00
Daniel Stenberg
bc1102922b spell out that POSTFIELDS should be url-encoded in most cases 2003-02-28 07:53:31 +00:00
Daniel Stenberg
a3d3642a30 spell better 2003-02-27 23:10:38 +00:00
Daniel Stenberg
248eb47329 Updated to better reflect reality. Also displays how the CURLMsg struct
looks like.
2003-02-27 14:25:54 +00:00
Daniel Stenberg
52ebf50607 It appears that there are FTP-servers that return size 0 for files
when SIZE is used on the file while being in BINARY mode. To work
around that (stupid) behavior, we attempt to parse the RETR response
even if the SIZE returned size zero.

Debugging help from Salvatore Sorrentino on February 26, 2003.
2003-02-27 12:50:54 +00:00
Daniel Stenberg
d34a4b126e test138 is for RETR without size and without a working SIZE 2003-02-26 17:09:47 +00:00
Daniel Stenberg
de96719a45 support <size>-1</size> to completely disable the SIZE command 2003-02-26 17:05:36 +00:00
Daniel Stenberg
9876ed09fe added support for RETRNOSIZE in the control file to tell RETR to not
include size in the 150-reply
2003-02-26 16:57:00 +00:00
Daniel Stenberg
17cbbe3dc5 added a test case for RETR that doesn't get the size in the 150-reply 2003-02-26 16:56:00 +00:00
Daniel Stenberg
b995af17eb added index.html 2003-02-26 13:46:38 +00:00
Daniel Stenberg
6589579850 random updates 2003-02-26 13:01:29 +00:00
Daniel Stenberg
5ddc260fc2 No longer loop to read multiple times before returning back from the transfer
function, as this could easily end up looping for a very long time (more or
less until the whole transfer was done) and no library-using app would want
that.

Found thanks to a report by Kyle Sallee.
2003-02-26 12:42:25 +00:00
Daniel Stenberg
db5c9cd4c0 updated, now features less mentions about older versions 2003-02-25 08:52:32 +00:00
Daniel Stenberg
9b6d010aef better sslcerts link 2003-02-25 08:36:29 +00:00
Daniel Stenberg
065b87e949 7.10.4-pre2 commit 2003-02-24 18:14:48 +00:00
Daniel Stenberg
a6206a3aef Fixes to bring back the the "Expect: 100-continue" functionality. If the
header is used, we must wait for a 100-code (or timeout), before we send the
data. The timeout is merely 1000 ms at this point. We may have reason to set
a longer timeout in the future.
2003-02-24 16:53:53 +00:00
Daniel Stenberg
30639ed72b Kjetil Jacobsen found out that setting CURLOPT_MAXCONNECTS to a value higher
than 5 could cause a segfault.
2003-02-24 14:50:20 +00:00
Daniel Stenberg
9d02a39e13 fixed language for limit-rate 2003-02-24 13:28:32 +00:00
Daniel Stenberg
fc0af0d4d1 daily was weekly, added a little thing about feb 2003 2003-02-24 08:18:17 +00:00
Daniel Stenberg
a683416081 added an EXAMPLE section 2003-02-21 15:19:50 +00:00
Daniel Stenberg
9faf57ee8d how to disable FTP PORT 2003-02-17 23:23:11 +00:00
Daniel Stenberg
44b9ccb7e2 This script clearly misses to remove the build dir at times when it exits,
so we now remove everything matching "build-*" when the script starts.
2003-02-17 09:15:26 +00:00
Daniel Stenberg
de003d9cf8 mention --trace and --trace-ascii in the -v/--versbose section to remind
people how to get even more details shown
2003-02-17 09:02:51 +00:00
Daniel Stenberg
8a2a523c70 mention more cacert magic 2003-02-14 22:28:12 +00:00
Daniel Stenberg
c3dfe50aaf Fix Curl_is_connected() even more to deal with waitconnect() return codes
even better (also based on input from Martin).
2003-02-14 09:11:51 +00:00
Daniel Stenberg
9659d862c7 Matthew Clarke built curl on AIX 3.2.5 2003-02-14 09:06:07 +00:00
Daniel Stenberg
69ab4cd391 include <sys/socket.h> to compile the fd_set stuff properly on all systems 2003-02-14 09:03:03 +00:00
Daniel Stenberg
498f3985b3 geterrno() renamed to ourerrno() to prevent the name clash that occurred in
AIX 3.2.5 and possibly other OSF-like system headers.
2003-02-14 09:01:01 +00:00
Daniel Stenberg
977175d4fd Martin C. Martin's fix for multi-interface connects to non-listening ports. 2003-02-14 08:02:55 +00:00
Daniel Stenberg
3ddc7b9390 Christopher R. Palmer fixed Curl_base64_encode() to deal with zeroes in the
data to encode.
2003-02-13 18:30:10 +00:00
Daniel Stenberg
10e9bf623f language 2003-02-08 14:36:18 +00:00
Daniel Stenberg
48a5c64e94 include stdarg.h since we use va_* stuff 2003-02-06 19:28:17 +00:00
Daniel Stenberg
140606ccd5 I made curl run fine on a XScale/PXA250 2003-02-05 08:09:33 +00:00
Daniel Stenberg
f56d006f93 Re-arranged the SSL connection code (again). The recent fix was not a very
good one. This should work fine again.
2003-02-05 07:43:05 +00:00
Jean-Philippe Barette-LaPierre
beb13a1d3e added the sharing of DNS cache 2003-02-04 23:48:46 +00:00
Daniel Stenberg
fa47138327 VMS has setjmp.h 2003-02-04 22:28:36 +00:00
Daniel Stenberg
9421d4510a Nico Baggus updated build script for VMS 2003-02-04 22:28:19 +00:00
Daniel Stenberg
ff8abfca85 assume zlib 1.1.4 - pointed out by Kevin Roth 2003-02-04 18:24:55 +00:00
Daniel Stenberg
5c858965b8 HAVE_LIBZ is the actual name of the define we use 2003-02-04 18:23:41 +00:00
Daniel Stenberg
e3f83cb17a make it more obvious what this is by not even trying to show a manual 2003-02-04 18:22:31 +00:00
Daniel Stenberg
de6008e01a James Bursa corrected a bad comment 2003-02-04 18:12:41 +00:00
Daniel Stenberg
6417e696df fixes during the last couple of days 2003-02-04 12:33:13 +00:00
Daniel Stenberg
5d28f3781b Improved error reporting in case of bad SSL_connect()s, and we also no
longer use the SSL functions that store the error message in a static buffer
since that is not very multi-thread friendly.
2003-02-04 12:29:57 +00:00
Daniel Stenberg
10026bb62e scan through the PATH as well, to find stunnel 2003-02-03 22:15:33 +00:00
Daniel Stenberg
21c16f923c Julian Noble pointed out that capath is indeed working fine on Windows
these days since the c_rehash tool is written (fixed) to do the proper
action even on file systems that don't support symlinks.
2003-02-03 21:36:52 +00:00
Daniel Stenberg
32cef52f0d Kevin Roth corrected the zlib stuff to work better. 2003-01-31 07:07:28 +00:00
Daniel Stenberg
e7dd7c54ff don't check for the CA cert bundle if --insecure is used 2003-01-30 14:48:07 +00:00
Daniel Stenberg
b0b50bd12a typecast the argument to isspace() to an int to prevent warnings on some
compilers
2003-01-30 06:06:24 +00:00
Daniel Stenberg
f2c6057490 curl now uses stricter VERIFYHOST by default and only uses a lesser check
if --insecure is used. Reported by Hamish Mackenzie.
2003-01-30 05:15:57 +00:00
Daniel Stenberg
169b2eeb94 Fixes bug #669059. We now extract the Content-Type better and more accurate. 2003-01-30 05:04:02 +00:00
Daniel Stenberg
f81d027f60 test case 57 - verifies that the Content-Type extraction does not stop on
the first space anymore but cuts off the trailing spaces only.

Bug report #669059.
2003-01-30 05:03:19 +00:00
Daniel Stenberg
872eeb7339 changes from the last week or so 2003-01-29 13:56:04 +00:00
Daniel Stenberg
7f67a28c2a HAVE_WRITABLE_ARGV is set if argv[] is writable on the system, and then
we attempt to hide some of the more sensitive command line arguments
2003-01-29 13:16:03 +00:00
Daniel Stenberg
30a46e1135 John McGowan found a problem where the DEBUGFUNCTION was called with bad
data on uploads.
2003-01-29 12:52:45 +00:00
Daniel Stenberg
3a01478ce8 add the new emacs file and removed the former one 2003-01-29 12:15:16 +00:00
Daniel Stenberg
53d71fab60 example showing how a .emacs using curl-style.el could look like, thanks
to Mats Lidell for awesome elisp hacking!
2003-01-29 12:14:37 +00:00
Daniel Stenberg
be891f112c this is the former emacs file we no longer use, go with curl-style.el and
be happy!
2003-01-29 11:55:16 +00:00
Daniel Stenberg
89934239d7 reset conn->size to -1 on the ftp-do function to make it not go on to
ftp_done() with the previous transfer's value, as Dave Halbakken found out.
He also verified this fixed corrected the problem.
2003-01-29 10:54:39 +00:00
Daniel Stenberg
8986037fdd previous changes 2003-01-29 10:17:10 +00:00
Daniel Stenberg
a7c72b7abf removed the local variables for emacs and vim, use the new sample.emacs
way for emacs, and vim users should provide a similar non-polluting style
2003-01-29 10:14:20 +00:00
Daniel Stenberg
409ac80710 removed weirdo {{{ and }}} comments
removed emacs local-variables stuff
2003-01-29 10:12:06 +00:00
Daniel Stenberg
fc7bebdf55 the README.curl is named MANUAL these days 2003-01-28 16:33:05 +00:00
Daniel Stenberg
ca52549557 revised and better 2003-01-28 08:03:13 +00:00
Daniel Stenberg
8948a65654 removed -Wcast-align from --enable-debug with gcc, it just gives too many
warnings that I can't be concerned about at this point.
2003-01-27 14:26:06 +00:00
Daniel Stenberg
b4e33cfcc7 Removed the long-living compiler warnings on the des_pcbc_encrypt() function
calls!
2003-01-27 14:19:22 +00:00
Daniel Stenberg
86742e8334 tests that were not run due to restraints (the netrc-tests) were counted
as skipped twice, and thus the total number of tests appeared wrong
2003-01-27 13:51:35 +00:00
Daniel Stenberg
173b35eaf8 made it work
made it cause less compiler warnings
made it require 7.9.7 to build
2003-01-27 10:25:20 +00:00
Daniel Stenberg
2b054e5309 Bertrand Demiddelaer found and fixed this memory leak. 2003-01-24 11:13:59 +00:00
Daniel Stenberg
a302ff1605 string.h keeps the proto for memset() on some platforms, used for FD_ZERO 2003-01-23 19:41:30 +00:00
Daniel Stenberg
f7bb4e6138 added a default to the switch() in order to prevent a compiler warning 2003-01-23 12:00:15 +00:00
Daniel Stenberg
5c5489916b fix the configure option query 2003-01-23 07:37:21 +00:00
Daniel Stenberg
5627cf7167 mention what kind of error you may get if this is not followed 2003-01-23 06:15:26 +00:00
Daniel Stenberg
c05dae4a68 spell 2003-01-23 06:09:35 +00:00
Daniel Stenberg
57e61e3743 This is the new Emacs style for curl hacking, based on work written by
Mats Lidell in project Rockbox.
2003-01-23 06:00:02 +00:00
Daniel Stenberg
a6c395c156 Duncan Wilcox reported a crash with --interface on FreeBSD when ipv6-enabled
and this has been verified to correct the problem.
2003-01-23 05:38:20 +00:00
Daniel Stenberg
543e0b1e0f oops, broken comment fixed 2003-01-22 18:50:51 +00:00
Daniel Stenberg
64b0ff875f extern C this to work in C++ conditions 2003-01-22 18:30:58 +00:00
Daniel Stenberg
a034208a00 reversed the actions on the cmp check for detecting if we're re-running
a test on the same CVS setup as previous, as they seemed to be wrong.

We're not actually using the result for anything at this point though.
2003-01-22 12:29:19 +00:00
Daniel Stenberg
5f1251586b use LANG set to C to prevent localized dates etc 2003-01-22 09:46:33 +00:00
Daniel Stenberg
6f6cffdc32 pass the options to configure properly 2003-01-22 07:57:52 +00:00
Daniel Stenberg
21a98ef264 check for empty confopts before asking for it 2003-01-22 07:41:35 +00:00
Daniel Stenberg
aa90436435 put the configure options in the setup file was well
make -i
show lib/config.h
and some initial checks to prevent this running multiple times without the
CVS having changed
2003-01-22 06:59:52 +00:00
Daniel Stenberg
61225052f9 updated copyright years 2003-01-21 17:25:58 +00:00
Daniel Stenberg
dee3163d95 when a chunked error is noticed, store the error number in the error string
to enable better error-tracking
2003-01-21 16:03:38 +00:00
Daniel Stenberg
8b0668b99e skip the chmod 2003-01-21 15:09:20 +00:00
Daniel Stenberg
8471a82c85 run 'make test-full' instead of 'make test' to get more details in case of
errors
2003-01-21 10:36:35 +00:00
Daniel Stenberg
ed4dff63b9 make test-full in the root dir should run verbose tests but not stop on
single failures
2003-01-21 10:35:34 +00:00
Daniel Stenberg
838e776542 use 'make test-full' instead of only 'make test' as it gives a lot of more
info in case of failures
2003-01-21 10:33:29 +00:00
Daniel Stenberg
39c12790bc Added a 'test-full' target to run the tests in verbose mode. 2003-01-21 10:32:40 +00:00
Daniel Stenberg
126e6d6645 pass srcdir to the ftps-server as well 2003-01-21 10:29:06 +00:00
Daniel Stenberg
5796a1b282 runtests.pl now passes the sourcedir path to the httpsserver.pl script 2003-01-21 10:14:25 +00:00
Daniel Stenberg
90982529fc automake 1.5 should be enough 2003-01-21 09:36:15 +00:00
Daniel Stenberg
aba51d6b60 use process id in build directory name to do better 2003-01-20 20:20:51 +00:00
Daniel Stenberg
49bc4567bb first attempt at script for distributed testing on various unix hosts 2003-01-20 20:07:49 +00:00
Daniel Stenberg
2ac52705c6 output summary with easy identifyable string prefixes 2003-01-20 15:43:50 +00:00
Daniel Stenberg
d0eb56dd97 made this script detect proper versions of the tools we need to build a full
curl on a unix host from CVS
2003-01-20 15:24:54 +00:00
Daniel Stenberg
b9c60df04b added description in all AC_DEFINE() calls 2003-01-20 15:16:56 +00:00
Daniel Stenberg
8c236e4dfa not used anymore 2003-01-20 15:16:17 +00:00
Daniel Stenberg
154a59f21f Five more names we owe a big THANKS for their donations to the project. 2003-01-20 14:49:57 +00:00
Daniel Stenberg
9689e1c548 today's patches and Markus' correction 2003-01-20 14:40:06 +00:00
Daniel Stenberg
5a83976c99 Markus F.X.J. Oberhumer's patch that reduces memory usage quite a bit by
only allocating the scratch memory buffer once it is needed and not always
in the handle.
2003-01-20 12:52:34 +00:00
Daniel Stenberg
b5276a9a69 given passwords in netrc must be respected accordingly 2003-01-20 12:00:46 +00:00
Daniel Stenberg
30377baa5e steps I *MUST* perform when I release a package 2003-01-20 11:29:40 +00:00
Daniel Stenberg
aa8b7dd336 reverted bad header replacement 2003-01-16 21:10:10 +00:00
Daniel Stenberg
f26a338a54 copyright year update in the source header 2003-01-16 21:08:12 +00:00
Daniel Stenberg
c4383f1d99 fixes Marcus brought 2003-01-16 21:07:50 +00:00
Daniel Stenberg
4527995e66 Allow CURLINFO_PRIVATE to be NULL, patch by Markus Oberhumer 2003-01-16 10:59:53 +00:00
Daniel Stenberg
b0fbb98f41 Markus Oberhumer fixed the -cflags option 2003-01-16 10:58:49 +00:00
Daniel Stenberg
63667dfd96 no TABs in source code 2003-01-15 11:44:33 +00:00
Daniel Stenberg
77c388c928 removed a TAB 2003-01-15 11:43:03 +00:00
Daniel Stenberg
a69b814ded Kevin fixed the bad list address 2003-01-15 08:04:09 +00:00
Daniel Stenberg
c51ada766d previous legal file, no longer accurate nor used 2003-01-14 12:55:08 +00:00
Daniel Stenberg
ef2709f97c COPYING is the name of the file 2003-01-14 12:54:11 +00:00
367 changed files with 16246 additions and 15083 deletions

View File

@@ -9,3 +9,6 @@ config.status
curl-config
autom4te.cache
depcomp
config.guess
config.sub
ltmain.sh

2425
CHANGES

File diff suppressed because it is too large Load Diff

1504
CHANGES.2002 Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
COPYRIGHT AND PERMISSION NOTICE
Copyright (c) 1996 - 2002, Daniel Stenberg, <daniel@haxx.se>.
Copyright (c) 1996 - 2003, Daniel Stenberg, <daniel@haxx.se>.
All rights reserved.

View File

@@ -15,8 +15,9 @@ Compile and build instructions follow below.
CHANGES.0 contains ancient changes.
CHANGES.$year contains changes for the particular year.
memanalyze.pl is for analyzing the output generated by curl if -DMALLOCDEBUG
is used when compiling
tests/memanalyze.pl
is for analyzing the output generated by curl if -DCURLDEBUG
is used when compiling (run configure with --enable-debug)
buildconf builds the makefiles and configure stuff
@@ -39,17 +40,24 @@ REQUIREMENTS
You need the following software installed:
o autoconf 2.50 (or later)
o automake 1.5 (or later)
o libtool 1.4 (or later)
o autoconf 2.57 (or later)
o automake 1.7 (or later)
o libtool 1.4.2 (or later)
o GNU m4 (required by autoconf)
o nroff + perl (if you don't have nroff and perl and you for some reason
don't want to install them, you can rename the source file
src/hugehelp.c.cvs to src/hugehelp.c and avoid having to generate this
file. This will of course give you an older version of the file that isn't
up-to-date. That file was checked in once and won't be updated very
regularly.)
o nroff + perl
If you don't have nroff and perl and you for some reason don't want to
install them, you can rename the source file src/hugehelp.c.cvs to
src/hugehelp.c and avoid having to generate this file. This will of course
give you an older version of the file that isn't up-to-date. That file was
checked in once and won't be updated very regularly.
o yacc/bison
If you don't have yacc or bison, you must rename the lib/getdate.c.cvs file
to lib/getdate.c to be able to build libcurl. yacc/bison is normally used
to generate the lib/getdate.c file from the lib/getdate.y source file.
MAC OS X

25
LEGAL
View File

@@ -1,25 +0,0 @@
Copyright (C) 1998-2002, Daniel Stenberg, <daniel@haxx.se>, et al.
Everyone is permitted to copy and distribute verbatim copies of this license
document, but changing it is not allowed.
In order to be useful for every potential user, the curl and libcurl are
dual-licensed under the MPL and the MIT/X-derivate licenses.
You may opt to use, copy, modify, merge, publish, distribute and/or sell
copies of the Software, and permit persons to whom the Software is furnished
to do so, under the terms of the MPL or the MIT/X-derivate licenses. You may
pick one of these licenses. The files MITX.txt and MPL-1.1.txt contain the
license texts.
As a courtesy to the open-source and free software community, we ask you to
dual-license any modifications that you make as well, under the terms of this
document.
Please remember to always keep the licensing information included in
individual source files up-to-date, so as to avoid misleading anyone as to
the status of these files.
I will use a submission policy according to which I will only enter
contributions into the CVS tree if the contributor agrees to both licenses
and this dual-license approach.

View File

@@ -4,15 +4,17 @@
AUTOMAKE_OPTIONS = foreign
EXTRA_DIST = CHANGES COPYING maketgz SSLCERTS reconf Makefile.dist \
curl-config.in build_vms.com curl-mode.el
EXTRA_DIST = CHANGES COPYING maketgz reconf Makefile.dist \
curl-config.in build_vms.com curl-style.el sample.emacs testcurl.sh
bin_SCRIPTS = curl-config
SUBDIRS = docs lib src include tests packages
SUBDIRS = lib src
DIST_SUBDIRS = $(SUBDIRS) tests include packages docs
# create a root makefile in the distribution:
dist-hook:
rm -rf $(top_builddir)/tests/log
cp $(srcdir)/Makefile.dist $(distdir)/Makefile
html:
@@ -24,7 +26,10 @@ pdf:
check: test
test:
@(cd tests; $(MAKE) quiet-test)
@(cd tests; $(MAKE) all quiet-test)
test-full:
@(cd tests; $(MAKE) all full-test)
#
# Build source and binary rpms. For rpm-3.0 and above, the ~/.rpmmacros
@@ -73,3 +78,13 @@ pkgadd:
# resulting .tar.bz2 file will end up at packages/Win32/cygwin
cygwinbin:
$(MAKE) -C packages/Win32/cygwin cygwinbin
# We extend the standard install with a custom hook:
install-data-hook:
cd include && $(MAKE) install
cd docs && $(MAKE) install
# We extend the standard uninstall with a custom hook:
uninstall-hook:
cd include && $(MAKE) uninstall
cd docs && $(MAKE) uninstall

View File

@@ -59,6 +59,10 @@ vc-ssl-dll:
cd ..\src
nmake -f Makefile.vc6
djgpp:
make -C lib -f Makefile.dj
make -C src -f Makefile.dj
cygwin:
./configure
make

37
README
View File

@@ -11,19 +11,31 @@ README
MANUAL document. Find out how to install Curl by reading the INSTALL
document.
libcurl is a library that Curl is using to do its job. It is readily
libcurl is the library curl is using to do its job. It is readily
available to be used by your software. Read the libcurl.3 man page to
find out how!
learn how!
You find answers to the most frequent questions we get in the FAQ document.
Study the LEGAL file for distribution terms and similar.
Study the COPYING file for distribution terms and similar.
CONTACT
If you have problems, questions, ideas or suggestions, please contact us
by posting to a suitable mailing list. See http://curl.haxx.se/mail/
Many major contributors to the project are listed in the THANKS document.
WEB SITE
Visit the curl web site or mirrors for the latest news:
http://curl.haxx.se/
http://curl.sf.net/
http://curl.planetmirror.com/
Sweden -- http://curl.haxx.se/
Russia -- http://curl.tsuren.net/
US -- http://curl.sf.net/
Australia -- http://curl.planetmirror.com/
DOWNLOAD
The official download mirror sites are:
@@ -33,21 +45,26 @@ README
Australia -- http://curl.planetmirror.com/download/
US -- http://curl.sourceforge.net/download/
Hongkong -- http://www.execve.net/curl/
Russia -- http://curl.tsuren.net/download/
CVS
To download the very latest source off the CVS server do this:
cvs -d :pserver:anonymous@cvs.curl.sourceforge.net:/cvsroot/curl login
cvs -d :pserver:cvsread@cvs.php.net:/repository login
(just press enter when asked for password)
(enter "phpfi" when asked for password)
cvs -d :pserver:anonymous@cvs.curl.sourceforge.net:/cvsroot/curl co curl
cvs -d :pserver:cvsread@cvs.php.net:/repository co curl
(you'll get a directory named curl created, filled with the source code)
cvs -d :pserver:anonymous@cvs.curl.sourceforge.net:/cvsroot/curl logout
cvs -d :pserver:cvsread@cvs.php.net:/repository logout
(you're off the hook!)
NOTICE
Curl contains pieces of source code that is Copyright (c) 1998, 1999
Kungliga Tekniska H<>gskolan. This notice is included here to comply with the
distribution terms.

View File

@@ -1,90 +0,0 @@
/* Name of this package! */
#undef PACKAGE
/* Version number of this archive. */
#undef VERSION
/* Define if you have the getpass function. */
#undef HAVE_GETPASS
/* Define cpu-machine-OS */
#undef OS
/* Define if you have the gethostbyaddr_r() function with 5 arguments */
#undef HAVE_GETHOSTBYADDR_R_5
/* Define if you have the gethostbyaddr_r() function with 7 arguments */
#undef HAVE_GETHOSTBYADDR_R_7
/* Define if you have the gethostbyaddr_r() function with 8 arguments */
#undef HAVE_GETHOSTBYADDR_R_8
/* Define if you have the gethostbyname_r() function with 3 arguments */
#undef HAVE_GETHOSTBYNAME_R_3
/* Define if you have the gethostbyname_r() function with 5 arguments */
#undef HAVE_GETHOSTBYNAME_R_5
/* Define if you have the gethostbyname_r() function with 6 arguments */
#undef HAVE_GETHOSTBYNAME_R_6
/* Define if you have the inet_ntoa_r function declared. */
#undef HAVE_INET_NTOA_R_DECL
/* Define if you need the _REENTRANT define for some functions */
#undef NEED_REENTRANT
/* Define if you have the Kerberos4 libraries (including -ldes) */
#undef KRB4
/* Define if you want to enable IPv6 support */
#undef ENABLE_IPV6
/* Define this to 'int' if ssize_t is not an available typedefed type */
#undef ssize_t
/* Define this to 'int' if socklen_t is not an available typedefed type */
#undef socklen_t
/* Define this as a suitable file to read random data from */
#undef RANDOM_FILE
/* Define this to your Entropy Gathering Daemon socket pathname */
#undef EGD_SOCKET
/* Define if you have a working OpenSSL installation */
#undef OPENSSL_ENABLED
/* Define the one correct non-blocking socket method below */
#undef HAVE_FIONBIO
#undef HAVE_IOCTLSOCKET
#undef HAVE_IOCTLSOCKET_CASE
#undef HAVE_O_NONBLOCK
#undef HAVE_DISABLED_NONBLOCKING
/* Define this to 'int' if in_addr_t is not an available typedefed type */
#undef in_addr_t
/* Define to disable DICT */
#undef CURL_DISABLE_DICT
/* Define to disable FILE */
#undef CURL_DISABLE_FILE
/* Define to disable FTP */
#undef CURL_DISABLE_FTP
/* Define to disable GOPHER */
#undef CURL_DISABLE_GOPHER
/* Define to disable HTTP */
#undef CURL_DISABLE_HTTP
/* Define to disable LDAP */
#undef CURL_DISABLE_LDAP
/* Define to disable TELNET */
#undef CURL_DISABLE_TELNET
/* Define if you have zlib present */
#undef HAVE_LIBZ

View File

@@ -38,7 +38,7 @@ AC_DEFUN(CURL_CHECK_NONBLOCKING_SOCKET,
],[
dnl the O_NONBLOCK test was fine
nonblock="O_NONBLOCK"
AC_DEFINE(HAVE_O_NONBLOCK)
AC_DEFINE(HAVE_O_NONBLOCK, 1, [use O_NONBLOCK for non-blocking sockets])
],[
dnl the code was bad, try a different program now, test 2
@@ -52,7 +52,7 @@ dnl the code was bad, try a different program now, test 2
],[
dnl FIONBIO test was good
nonblock="FIONBIO"
AC_DEFINE(HAVE_FIONBIO)
AC_DEFINE(HAVE_FIONBIO, 1, [use FIONBIO for non-blocking sockets])
],[
dnl FIONBIO test was also bad
dnl the code was bad, try a different program now, test 3
@@ -66,7 +66,7 @@ dnl the code was bad, try a different program now, test 3
],[
dnl ioctlsocket test was good
nonblock="ioctlsocket"
AC_DEFINE(HAVE_IOCTLSOCKET)
AC_DEFINE(HAVE_IOCTLSOCKET, 1, [use ioctlsocket() for non-blocking sockets])
],[
dnl ioctlsocket didnt compile!
@@ -79,11 +79,11 @@ dnl ioctlsocket didnt compile!
],[
dnl ioctlsocket test was good
nonblock="IoctlSocket"
AC_DEFINE(HAVE_IOCTLSOCKET_CASE)
AC_DEFINE(HAVE_IOCTLSOCKET_CASE, 1, [use Ioctlsocket() for non-blocking sockets])
],[
dnl ioctlsocket didnt compile!
nonblock="nada"
AC_DEFINE(HAVE_DISABLED_NONBLOCKING)
AC_DEFINE(HAVE_DISABLED_NONBLOCKING, 1, [disabled non-blocking sockets])
])
dnl end of forth test
@@ -152,10 +152,8 @@ AC_DEFUN([TYPE_IN_ADDR_T],
AC_MSG_CHECKING([for in_addr_t equivalent])
AC_CACHE_VAL([curl_cv_in_addr_t_equiv],
[
# Systems have either "struct sockaddr *" or
# "void *" as the second argument to getpeername
curl_cv_in_addr_t_equiv=
for t in int size_t unsigned long "unsigned long"; do
for t in "unsigned long" int size_t unsigned long; do
AC_TRY_COMPILE([
#include <sys/types.h>
#include <sys/socket.h>
@@ -272,15 +270,15 @@ AC_DEFUN(CURL_CHECK_INET_NTOA_R,
AC_MSG_CHECKING(whether inet_ntoa_r is declared)
AC_EGREP_CPP(inet_ntoa_r,[
#include <arpa/inet.h>],[
AC_DEFINE(HAVE_INET_NTOA_R_DECL)
AC_DEFINE(HAVE_INET_NTOA_R_DECL, 1, [inet_ntoa_r() is declared])
AC_MSG_RESULT(yes)],[
AC_MSG_RESULT(no)
AC_MSG_CHECKING(whether inet_ntoa_r with -D_REENTRANT is declared)
AC_EGREP_CPP(inet_ntoa_r,[
#define _REENTRANT
#include <arpa/inet.h>],[
AC_DEFINE(HAVE_INET_NTOA_R_DECL)
AC_DEFINE(NEED_REENTRANT)
AC_DEFINE(HAVE_INET_NTOA_R_DECL, 1, [inet_ntoa_r() is declared])
AC_DEFINE(NEED_REENTRANT, 1, [need REENTRANT defined])
AC_MSG_RESULT(yes)],
AC_MSG_RESULT(no))])])
])
@@ -302,7 +300,7 @@ struct hostent_data hdata;
int rc;
rc = gethostbyaddr_r(address, length, type, &h, &hdata);],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_GETHOSTBYADDR_R_5)
AC_DEFINE(HAVE_GETHOSTBYADDR_R_5, 1, [gethostbyaddr_r() takes 5 args])
ac_cv_gethostbyaddr_args=5],[
AC_MSG_RESULT(no)
AC_MSG_CHECKING(if gethostbyaddr_r with -D_REENTRANT takes 5 arguments)
@@ -318,8 +316,8 @@ struct hostent_data hdata;
int rc;
rc = gethostbyaddr_r(address, length, type, &h, &hdata);],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_GETHOSTBYADDR_R_5)
AC_DEFINE(NEED_REENTRANT)
AC_DEFINE(HAVE_GETHOSTBYADDR_R_5, 1, [gethostbyaddr_r() takes 5 args])
AC_DEFINE(NEED_REENTRANT, 1, [need REENTRANT])
ac_cv_gethostbyaddr_args=5],[
AC_MSG_RESULT(no)
AC_MSG_CHECKING(if gethostbyaddr_r takes 7 arguments)
@@ -337,7 +335,7 @@ struct hostent * hp;
hp = gethostbyaddr_r(address, length, type, &h,
buffer, 8192, &h_errnop);],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_GETHOSTBYADDR_R_7)
AC_DEFINE(HAVE_GETHOSTBYADDR_R_7, 1, [gethostbyaddr_r() takes 7 args] )
ac_cv_gethostbyaddr_args=7],[
AC_MSG_RESULT(no)
AC_MSG_CHECKING(if gethostbyaddr_r takes 8 arguments)
@@ -356,7 +354,7 @@ int rc;
rc = gethostbyaddr_r(address, length, type, &h,
buffer, 8192, &hp, &h_errnop);],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_GETHOSTBYADDR_R_8)
AC_DEFINE(HAVE_GETHOSTBYADDR_R_8, 1, [gethostbyaddr_r() takes 8 args])
ac_cv_gethostbyaddr_args=8],[
AC_MSG_RESULT(no)
have_missing_r_funcs="$have_missing_r_funcs gethostbyaddr_r"])])])])])
@@ -380,7 +378,7 @@ gethostbyname_r(const char *, struct hostent *, struct hostent_data *);],[
struct hostent_data data;
gethostbyname_r(NULL, NULL, NULL);],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_GETHOSTBYNAME_R_3)
AC_DEFINE(HAVE_GETHOSTBYNAME_R_3, 1, [gethostbyname_r() takes 3 args])
ac_cv_gethostbyname_args=3],[
AC_MSG_RESULT(no)
AC_MSG_CHECKING([if gethostbyname_r with -D_REENTRANT takes 3 arguments])
@@ -398,8 +396,8 @@ gethostbyname_r(const char *,struct hostent *, struct hostent_data *);],[
struct hostent_data data;
gethostbyname_r(NULL, NULL, NULL);],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_GETHOSTBYNAME_R_3)
AC_DEFINE(NEED_REENTRANT)
AC_DEFINE(HAVE_GETHOSTBYNAME_R_3, 1, [gethostbyname_r() takes 3 args])
AC_DEFINE(NEED_REENTRANT, 1, [needs REENTRANT])
ac_cv_gethostbyname_args=3],[
AC_MSG_RESULT(no)
AC_MSG_CHECKING([if gethostbyname_r takes 5 arguments])
@@ -413,7 +411,7 @@ struct hostent *
gethostbyname_r(const char *, struct hostent *, char *, int, int *);],[
gethostbyname_r(NULL, NULL, NULL, 0, NULL);],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_GETHOSTBYNAME_R_5)
AC_DEFINE(HAVE_GETHOSTBYNAME_R_5, 1, [gethostbyname_r() takes 5 args])
ac_cv_gethostbyname_args=5],[
AC_MSG_RESULT(no)
AC_MSG_CHECKING([if gethostbyname_r takes 6 arguments])
@@ -428,7 +426,7 @@ gethostbyname_r(const char *, struct hostent *, char *, size_t,
struct hostent **, int *);],[
gethostbyname_r(NULL, NULL, NULL, 0, NULL, NULL);],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_GETHOSTBYNAME_R_6)
AC_DEFINE(HAVE_GETHOSTBYNAME_R_6, 1, [gethostbyname_r() takes 6 args])
ac_cv_gethostbyname_args=6],[
AC_MSG_RESULT(no)
have_missing_r_funcs="$have_missing_r_funcs gethostbyname_r"],

View File

@@ -1,69 +1,69 @@
$!
$
$ on control_y then goto Common_Exit!
$ orig = f$environment("DEFAULT")
$ loc = f$environment("PROCEDURE")
$ def = f$parse("X.X;1",loc) - "X.X;1"
$
$ set def 'def'
$ cc_qual = "/define=HAVE_CONFIG_H=1/include=(""../include/"",""../"",""../../openssl-0_9_6c/include/"")"
$ if p1 .eqs. "LISTING" then cc_qual = cc_qual + "/LIST/MACHINE"
$ if p1 .eqs. "DEBUG" then cc_qual = cc_qual + "/LIST/MACHINE/DEBUG"
$ msg_qual = ""
$ call build "[.lib]" "*.c"
$ call build "[.src]" "*.c"
$ call build "[.src]" "*.msg"
$!
$
$ on control_y then goto Common_Exit!
$ orig = f$environment("DEFAULT")
$ loc = f$environment("PROCEDURE")
$ def = f$parse("X.X;1",loc) - "X.X;1"
$
$ set def 'def'
$ cc_qual = "/define=HAVE_CONFIG_H=1/include=(""../include/"",""../"",""../../openssl-0_9_7/include/"")"
$ if p1 .eqs. "LISTING" then cc_qual = cc_qual + "/LIST/MACHINE"
$ if p1 .eqs. "DEBUG" then cc_qual = cc_qual + "/LIST/MACHINE/DEBUG"
$ msg_qual = ""
$ call build "[.lib]" "*.c"
$ call build "[.src]" "*.c"
$ call build "[.src]" "*.msg"
$ link /exe=curl.exe [.src]curl/lib/include=main,[.lib]curl/lib, -
[-.openssl-0_9_6c.axp.exe.ssl]libssl/lib, -
[-.openssl-0_9_6c.axp.exe.crypto]libcrypto/lib
$
$
$ goto Common_Exit
$build: subroutine
$ set noon
$ set default 'p1'
$ search = p2
$ reset = f$search("reset")
$ if f$search("CURL.OLB") .eqs. ""
$ then
$ LIB/CREATE/OBJECT CURL.OLB
$ endif
$ reset = f$search("reset",1)
$Loop:
$ file = f$search(search,1)
$ if file .eqs. "" then goto EndLoop
$ obj = f$search(f$parse(".OBJ;",file),2)
$ if (obj .nes. "")
$ then
$ if (f$cvtime(f$file(file,"rdt")) .gts. f$cvtime(f$file(obj,"rdt")))
$ then
$ call compile 'file'
$ lib/object curl.OLB 'f$parse(".obj;",file)'
$ else
$! write sys$output "File: ''file' is up to date"
$ endif
$ else
$! write sys$output "Object for file: ''file' does not exist"
$ call compile 'file'
$ lib/object curl.OLB 'f$parse(".obj;",file)'
$ endif
$ goto Loop
$EndLoop:
$ purge
$ set def 'def'
$ endsubroutine ! Build
$
$compile: subroutine
$ set noon
$ file = p1
$ qual = p2+p3+p4+p5+p6+p7+p8
$ typ = f$parse(file,,,"TYPE") - "."
$ cmd_c = "CC "+cc_qual
$ cmd_msg = "MESSAGE "+msg_qual
$ x = cmd_'typ'
$ 'x' 'file'
$ ENDSUBROUTINE ! Compile
$
$Common_Exit:
$ set default 'orig'
$ exit
[-.openssl-0_9_7.axp.exe.ssl]libssl/lib, -
[-.openssl-0_9_7.axp.exe.crypto]libcrypto/lib
$
$
$ goto Common_Exit
$build: subroutine
$ set noon
$ set default 'p1'
$ search = p2
$ reset = f$search("reset")
$ if f$search("CURL.OLB") .eqs. ""
$ then
$ LIB/CREATE/OBJECT CURL.OLB
$ endif
$ reset = f$search("reset",1)
$Loop:
$ file = f$search(search,1)
$ if file .eqs. "" then goto EndLoop
$ obj = f$search(f$parse(".OBJ;",file),2)
$ if (obj .nes. "")
$ then
$ if (f$cvtime(f$file(file,"rdt")) .gts. f$cvtime(f$file(obj,"rdt")))
$ then
$ call compile 'file'
$ lib/object curl.OLB 'f$parse(".obj;",file)'
$ else
$! write sys$output "File: ''file' is up to date"
$ endif
$ else
$! write sys$output "Object for file: ''file' does not exist"
$ call compile 'file'
$ lib/object curl.OLB 'f$parse(".obj;",file)'
$ endif
$ goto Loop
$EndLoop:
$ purge
$ set def 'def'
$ endsubroutine ! Build
$
$compile: subroutine
$ set noon
$ file = p1
$ qual = p2+p3+p4+p5+p6+p7+p8
$ typ = f$parse(file,,,"TYPE") - "."
$ cmd_c = "CC "+cc_qual
$ cmd_msg = "MESSAGE "+msg_qual
$ x = cmd_'typ'
$ 'x' 'file'
$ ENDSUBROUTINE ! Compile
$
$Common_Exit:
$ set default 'orig'
$ exit

126
buildconf
View File

@@ -5,7 +5,125 @@ die(){
exit
}
aclocal || die "The command 'aclocal' failed"
autoheader || die "The command 'autoheader' failed"
autoconf || die "The command 'autoconf' failed"
automake -a || die "The command 'automake $MAKEFILES' failed"
#--------------------------------------------------------------------------
# autoconf 2.57 or newer
#
need_autoconf="2.57"
ac_version=`${AUTOCONF:-autoconf} --version 2>/dev/null|head -1| sed -e 's/^[^0-9]*//' -e 's/[a-z]* *$//'`
if test -z "$ac_version"; then
echo "buildconf: autoconf not found."
echo " You need autoconf version $need_autoconf or newer installed."
exit 1
fi
IFS=.; set $ac_version; IFS=' '
if test "$1" = "2" -a "$2" -lt "57" || test "$1" -lt "2"; then
echo "buildconf: autoconf version $ac_version found."
echo " You need autoconf version $need_autoconf or newer installed."
echo " If you have a sufficient autoconf installed, but it"
echo " is not named 'autoconf', then try setting the"
echo " AUTOCONF environment variable."
exit 1
fi
echo "buildconf: autoconf version $ac_version (ok)"
#--------------------------------------------------------------------------
# autoheader 2.50 or newer
#
ah_version=`${AUTOHEADER:-autoheader} --version 2>/dev/null|head -1| sed -e 's/^[^0-9]*//' -e 's/[a-z]* *$//'`
if test -z "$ah_version"; then
echo "buildconf: autoheader not found."
echo " You need autoheader version 2.50 or newer installed."
exit 1
fi
IFS=.; set $ah_version; IFS=' '
if test "$1" = "2" -a "$2" -lt "50" || test "$1" -lt "2"; then
echo "buildconf: autoheader version $ah_version found."
echo " You need autoheader version 2.50 or newer installed."
echo " If you have a sufficient autoheader installed, but it"
echo " is not named 'autoheader', then try setting the"
echo " AUTOHEADER environment variable."
exit 1
fi
echo "buildconf: autoheader version $ah_version (ok)"
#--------------------------------------------------------------------------
# automake 1.7 or newer
#
need_automake="1.7"
am_version=`${AUTOMAKE:-automake} --version 2>/dev/null|head -1| sed -e 's/^.* \([0-9]\)/\1/' -e 's/[a-z]* *$//'`
if test -z "$am_version"; then
echo "buildconf: automake not found."
echo " You need automake version $need_automake or newer installed."
exit 1
fi
IFS=.; set $am_version; IFS=' '
if test "$1" = "1" -a "$2" -lt "7" || test "$1" -lt "1"; then
echo "buildconf: automake version $am_version found."
echo " You need automake version $need_automake or newer installed."
echo " If you have a sufficient automake installed, but it"
echo " is not named 'autommake', then try setting the"
echo " AUTOMAKE environment variable."
exit 1
fi
echo "buildconf: automake version $am_version (ok)"
#--------------------------------------------------------------------------
# libtool check
#
LIBTOOL_WANTED_MAJOR=1
LIBTOOL_WANTED_MINOR=4
LIBTOOL_WANTED_PATCH=2
LIBTOOL_WANTED_VERSION=1.4.2
libtool=`which glibtool 2>/dev/null`
if test ! -x "$libtool"; then
libtool=`which libtool`
fi
#lt_pversion=`${LIBTOOL:-$libtool} --version 2>/dev/null|head -1| sed -e 's/^.* \([0-9]\)/\1/' -e 's/[a-z]* *$//'`
lt_pversion=`$libtool --version 2>/dev/null|head -1|sed -e 's/^[^0-9]*//g' -e 's/[- ].*//'`
if test -z "$lt_pversion"; then
echo "buildconf: libtool not found."
echo " You need libtool version $LIBTOOL_WANTED_VERSION or newer installed"
exit 1
fi
lt_version=`echo $lt_pversion` #|sed -e 's/\([a-z]*\)$/.\1/'`
IFS=.; set $lt_version; IFS=' '
lt_status="good"
if test "$1" = "$LIBTOOL_WANTED_MAJOR"; then
if test "$2" -lt "$LIBTOOL_WANTED_MINOR"; then
lt_status="bad"
elif test ! -z "$LIBTOOL_WANTED_PATCH"; then
if test -n "$3"; then
if test "$3" -lt "$LIBTOOL_WANTED_PATCH"; then
lt_status="bad"
fi
fi
fi
fi
if test $lt_status != "good"; then
echo "buildconf: libtool version $lt_pversion found."
echo " You need libtool version $LIBTOOL_WANTED_VERSION or newer installed"
exit 1
fi
echo "buildconf: libtool version $lt_version (ok)"
# ------------------------------------------------------------
# run the correct scripts now
echo "buildconf: running libtoolize"
${LIBTOOLIZE:-libtoolize} --copy --automake || die "The command '${LIBTOOLIZE:-libtoolize} --copy --automake' failed"
echo "buildconf: running aclocal"
${ACLOCAL:-aclocal} || die "The command '${AUTOHEADER:-aclocal}' failed"
echo "buildconf: running autoheader"
${AUTOHEADER:-autoheader} || die "The command '${AUTOHEADER:-autoheader}' failed"
echo "buildconf: running autoconf"
${AUTOCONF:-autoconf} || die "The command '${AUTOCONF:-autoconf}' failed"
echo "buildconf: running automake"
${AUTOMAKE:-automake} -a || die "The command '${AUTOMAKE:-automake} -a' failed"
exit 0

1317
config.guess vendored

File diff suppressed because it is too large Load Diff

1411
config.sub vendored

File diff suppressed because it is too large Load Diff

View File

@@ -1,23 +1,37 @@
dnl $Id$
dnl Process this file with autoconf to produce a configure script.
dnl Ensure that this file is processed with autoconf 2.50 or newer
dnl Don't even think about removing this check!
AC_PREREQ(2.50)
AC_PREREQ(2.57)
dnl We don't know the version number "staticly" so we use a dash here
AC_INIT(curl, [-], [curl-bug@haxx.se])
dnl configure script copyright
AC_COPYRIGHT([Copyright (c) 1998 - 2003 Daniel Stenberg, <daniel@haxx.se>
This configure script may be copied, distributed and modified under the
terms of the curl license; see COPYING for more details])
dnl First some basic init macros
AC_INIT
AC_CONFIG_SRCDIR([lib/urldata.h])
AM_CONFIG_HEADER(lib/config.h src/config.h tests/server/config.h lib/ca-bundle.h)
AM_CONFIG_HEADER(lib/config.h src/config.h tests/server/config.h )
AM_MAINTAINER_MODE
dnl SED is needed by some of the tools
AC_PATH_PROG( SED, sed, , $PATH:/usr/bin:/usr/local/bin)
AC_SUBST(SED)
dnl AR is used by libtool, and try the odd Solaris path too
AC_PATH_PROG( AR, ar, , $PATH:/usr/bin:/usr/local/bin:/usr/ccs/bin)
AC_SUBST(AR)
dnl figure out the libcurl version
VERSION=`sed -ne 's/^#define LIBCURL_VERSION "\(.*\)"/\1/p' ${srcdir}/include/curl/curl.h`
VERSION=`$SED -ne 's/^#define LIBCURL_VERSION "\(.*\)"/\1/p' ${srcdir}/include/curl/curl.h`
AM_INIT_AUTOMAKE(curl,$VERSION)
AC_MSG_CHECKING([curl version])
AC_MSG_RESULT($VERSION)
dnl
dnl we extract the numerical version for curl-config only
VERSIONNUM=`sed -ne 's/^#define LIBCURL_VERSION_NUM 0x\(.*\)/\1/p' ${srcdir}/include/curl/curl.h`
VERSIONNUM=`$SED -ne 's/^#define LIBCURL_VERSION_NUM 0x\(.*\)/\1/p' ${srcdir}/include/curl/curl.h`
AC_SUBST(VERSIONNUM)
dnl Solaris pkgadd support definitions
@@ -72,8 +86,8 @@ dnl switch off particular protocols
dnl
AC_MSG_CHECKING([whether to support http])
AC_ARG_ENABLE(http,
[ --enable-http Enable HTTP support
--disable-http Disable HTTP support],
AC_HELP_STRING([--enable-http],[Enable HTTP support])
AC_HELP_STRING([--disable-http],[Disable HTTP support]),
[ case "$enableval" in
no)
AC_MSG_RESULT(no)
@@ -90,8 +104,8 @@ AC_ARG_ENABLE(http,
)
AC_MSG_CHECKING([whether to support ftp])
AC_ARG_ENABLE(ftp,
[ --enable-ftp Enable FTP support
--disable-ftp Disable FTP support],
AC_HELP_STRING([--enable-ftp],[Enable FTP support])
AC_HELP_STRING([--disable-ftp],[Disable FTP support]),
[ case "$enableval" in
no)
AC_MSG_RESULT(no)
@@ -105,8 +119,8 @@ AC_ARG_ENABLE(ftp,
)
AC_MSG_CHECKING([whether to support gopher])
AC_ARG_ENABLE(gopher,
[ --enable-gopher Enable GOPHER support
--disable-gopher Disable GOPHER support],
AC_HELP_STRING([--enable-gopher],[Enable GOPHER support])
AC_HELP_STRING([--disable-gopher],[Disable GOPHER support]),
[ case "$enableval" in
no)
AC_MSG_RESULT(no)
@@ -120,8 +134,8 @@ AC_ARG_ENABLE(gopher,
)
AC_MSG_CHECKING([whether to support file])
AC_ARG_ENABLE(file,
[ --enable-file Enable FILE support
--disable-file Disable FILE support],
AC_HELP_STRING([--enable-file],[Enable FILE support])
AC_HELP_STRING([--disable-file],[Disable FILE support]),
[ case "$enableval" in
no)
AC_MSG_RESULT(no)
@@ -135,8 +149,8 @@ AC_ARG_ENABLE(file,
)
AC_MSG_CHECKING([whether to support ldap])
AC_ARG_ENABLE(ldap,
[ --enable-ldap Enable LDAP support
--disable-ldap Disable LDAP support],
AC_HELP_STRING([--enable-ldap],[Enable LDAP support])
AC_HELP_STRING([--disable-ldap],[Disable LDAP support]),
[ case "$enableval" in
no)
AC_MSG_RESULT(no)
@@ -150,12 +164,12 @@ AC_ARG_ENABLE(ldap,
)
AC_MSG_CHECKING([whether to support dict])
AC_ARG_ENABLE(dict,
[ --enable-dict Enable DICT support
--disable-dict Disable DICT support],
AC_HELP_STRING([--enable-dict],[Enable DICT support])
AC_HELP_STRING([--disable-dict],[Disable DICT support]),
[ case "$enableval" in
no)
AC_MSG_RESULT(no)
AC_DEFINE(CURL_DISABLE_DICT, 1 [to disable DICT])
AC_DEFINE(CURL_DISABLE_DICT, 1, [to disable DICT])
AC_SUBST(CURL_DISABLE_DICT)
;;
*) AC_MSG_RESULT(yes)
@@ -165,8 +179,8 @@ AC_ARG_ENABLE(dict,
)
AC_MSG_CHECKING([whether to support telnet])
AC_ARG_ENABLE(telnet,
[ --enable-telnet Enable TELNET support
--disable-telnet Disable TELNET support],
AC_HELP_STRING([--enable-telnet],[Enable TELNET support])
AC_HELP_STRING([--disable-telnet],[Disable TELNET support]),
[ case "$enableval" in
no)
AC_MSG_RESULT(no)
@@ -186,8 +200,8 @@ dnl **********************************************************************
AC_MSG_CHECKING([whether to enable ipv6])
AC_ARG_ENABLE(ipv6,
[ --enable-ipv6 Enable ipv6 (with ipv4) support
--disable-ipv6 Disable ipv6 support],
AC_HELP_STRING([--enable-ipv6],[Enable ipv6 (with ipv4) support])
AC_HELP_STRING([--disable-ipv6],[Disable ipv6 support]),
[ case "$enableval" in
no)
AC_MSG_RESULT(no)
@@ -271,12 +285,26 @@ AC_CHECK_FUNC(connect, , [ AC_CHECK_LIB(socket, connect) ])
dnl dl lib?
AC_CHECK_FUNC(dlclose, , [ AC_CHECK_LIB(dl, dlopen) ])
AC_MSG_CHECKING([whether to use libgcc])
AC_ARG_ENABLE(libgcc,
AC_HELP_STRING([--enable-libgcc],[use libgcc when linking]),
[ case "$enableval" in
yes)
LIBS="$LIBS -lgcc"
AC_MSG_RESULT(yes)
;;
*) AC_MSG_RESULT(no)
;;
esac ],
AC_MSG_RESULT(no)
)
dnl **********************************************************************
dnl Check how non-blocking sockets are set
dnl **********************************************************************
AC_ARG_ENABLE(nonblocking,
[ --enable-nonblocking Makes the script detect how to do it
--disable-nonblocking Makes the script disable non-blocking sockets],
AC_HELP_STRING([--enable-nonblocking],[Enable detecting how to do it])
AC_HELP_STRING([--disable-nonblocking],[Disable non-blocking socket detection]),
[
if test "$enableval" = "no" ; then
AC_MSG_WARN([non-blocking sockets disabled])
@@ -295,7 +323,8 @@ dnl Check for the random seed preferences
dnl **********************************************************************
AC_ARG_WITH(egd-socket,
[ --with-egd-socket=FILE Entropy Gathering Daemon socket pathname],
AC_HELP_STRING([--with-egd-socket=FILE],
[Entropy Gathering Daemon socket pathname]),
[ EGD_SOCKET="$withval" ]
)
if test -n "$EGD_SOCKET" ; then
@@ -305,7 +334,7 @@ fi
dnl Check for user-specified random device
AC_ARG_WITH(random,
[ --with-random=FILE read randomness from FILE (default=/dev/urandom)],
AC_HELP_STRING([--with-random=FILE],[read randomness from FILE (default=/dev/urandom)]),
[ RANDOM_FILE="$withval" ],
[
dnl Check for random device
@@ -318,19 +347,38 @@ if test -n "$RANDOM_FILE" ; then
[a suitable file to read random data from])
fi
dnl **********************************************************************
dnl Check if the operating system allows programs to write to their own argv[]
dnl **********************************************************************
AC_MSG_CHECKING([if argv can be written to])
AC_RUN_IFELSE([[
int main(int argc, char ** argv) {
argv[0][0] = ' ';
return (argv[0][0] == ' ')?0:1;
}
]],
AC_DEFINE(HAVE_WRITABLE_ARGV, 1, [Define this symbol if your OS supports changing the contents of argv])
AC_MSG_RESULT(yes),
AC_MSG_RESULT(no),
AC_MSG_RESULT(no)
AC_MSG_WARN([the previous check could not be made default was used])
)
dnl **********************************************************************
dnl Check for the presence of Kerberos4 libraries and headers
dnl **********************************************************************
AC_ARG_WITH(krb4-includes,
[ --with-krb4-includes[=DIR] Specify location of kerberos4 headers],[
AC_HELP_STRING([--with-krb4-includes=DIR],
[Specify location of kerberos4 headers]),[
CPPFLAGS="$CPPFLAGS -I$withval"
KRB4INC="$withval"
want_krb4=yes
])
AC_ARG_WITH(krb4-libs,
[ --with-krb4-libs[=DIR] Specify location of kerberos4 libs],[
AC_HELP_STRING([--with-krb4-libs=DIR],[Specify location of kerberos4 libs]),[
LDFLAGS="$LDFLAGS -L$withval"
KRB4LIB="$withval"
want_krb4=yes
@@ -339,7 +387,7 @@ AC_ARG_WITH(krb4-libs,
OPT_KRB4=off
AC_ARG_WITH(krb4,dnl
[ --with-krb4[=DIR] where to look for Kerberos4],[
AC_HELP_STRING([--with-krb4=DIR],[where to look for Kerberos4]),[
OPT_KRB4="$withval"
if test X"$OPT_KRB4" != Xyes
then
@@ -409,6 +457,88 @@ else
AC_MSG_RESULT(no)
fi
dnl **********************************************************************
dnl Check for GSS-API libraries
dnl **********************************************************************
AC_ARG_WITH(gssapi-includes,
AC_HELP_STRING([--with-gssapi-includes=DIR],
[Specify location of GSSAPI header]),
[ GSSAPI_INCS="-I$withval"
want_gss="yes" ]
)
AC_ARG_WITH(gssapi-libs,
AC_HELP_STRING([--with-gssapi-libs=DIR],
[Specify location of GSSAPI libs]),
[ GSSAPI_LIBS="-L$withval -lgssapi"
want_gss="yes" ]
)
AC_ARG_WITH(gssapi,
AC_HELP_STRING([--with-gssapi=DIR],
[Where to look for GSSAPI]),
[ GSSAPI_ROOT="$withval"
want_gss="yes" ]
)
AC_MSG_CHECKING([if GSSAPI support is requested])
if test x"$want_gss" = xyes; then
if test -z "$GSSAPI_INCS"; then
if test -f "$GSSAPI_ROOT/bin/krb5-config"; then
gss_cppflags=`$GSSAPI_ROOT/bin/krb5-config --cflags gssapi`
CPPFLAGS="$CPPFLAGS $gss_cppflags"
else
CPPFLAGS="$GSSAPI_ROOT/include"
fi
else
CPPFLAGS="$CPPFLAGS $GSSAPI_INCS"
fi
if test -z "$GSSAPI_LIB_DIR"; then
if test -f "$GSSAPI_ROOT/bin/krb5-config"; then
gss_ldflags=`$GSSAPI_ROOT/bin/krb5-config --libs gssapi`
LDFLAGS="$LDFLAGS $gss_ldflags"
else
LDFLAGS="$LDFLAGS $GSSAPI_ROOT/lib -lgssapi"
fi
else
LDFLAGS="$LDFLAGS $GSSAPI_LIB_DIR"
fi
AC_MSG_RESULT(yes)
AC_DEFINE(GSSAPI, 1, [if you have the gssapi libraries])
else
AC_MSG_RESULT(no)
fi
dnl Detect the pkg-config tool, as it may have extra info about the
dnl openssl installation we can use. I *believe* this is what we are
dnl expected to do on really recent Redhat Linux hosts.
AC_PATH_PROG( PKGCONFIG, pkg-config, no, $PATH:/usr/bin:/usr/local/bin)
if test "$PKGCONFIG" != "no" ; then
AC_MSG_CHECKING([for OpenSSL options using pkg-config])
$PKGCONFIG --exists openssl
SSL_EXISTS=$?
if test "$SSL_EXISTS" -eq "0"; then
SSL_LIBS=`$PKGCONFIG --libs-only-l openssl 2>/dev/null`
SSL_LDFLAGS=`$PKGCONFIG --libs-only-L openssl 2>/dev/null`
SSL_CPPFLAGS=`$PKGCONFIG --cflags-only-I openssl 2>/dev/null`
LIBS="$LIBS $SSL_LIBS"
CPPFLAGS="$CPPFLAGS $SSL_CPPFLAGS"
LDFLAGS="$LDFLAGS $SSL_LDFLAGS"
AC_MSG_RESULT([yes])
else
AC_MSG_RESULT([no])
fi
fi
dnl **********************************************************************
dnl Check for the presence of SSL libraries and headers
@@ -416,8 +546,10 @@ dnl **********************************************************************
dnl Default to compiler & linker defaults for SSL files & libraries.
OPT_SSL=off
dnl Default to no CA bundle
ca="no"
AC_ARG_WITH(ssl,dnl
AC_HELP_STRING([--with-ssl=PATH], [where to look for SSL, PATH points to the SSL installation (default: /usr/local/ssl)])
AC_HELP_STRING([--with-ssl=PATH],[where to look for SSL, PATH points to the SSL installation (default: /usr/local/ssl)])
AC_HELP_STRING([--without-ssl], [disable SSL]),
OPT_SSL=$withval)
@@ -499,6 +631,36 @@ else
AC_SUBST(OPENSSL_ENABLED)
AC_MSG_CHECKING([CA cert bundle install path])
AC_ARG_WITH(ca-bundle,
AC_HELP_STRING([--with-ca-bundle=FILE], [File name to install the CA bundle as])
AC_HELP_STRING([--without-ca-bundle], [Don't install the CA bundle]),
[ ca="$withval" ],
[
if test "x$prefix" != xNONE; then
ca="\${prefix}/share/curl/curl-ca-bundle.crt"
else
ca="$ac_default_prefix/share/curl/curl-ca-bundle.crt"
fi
] )
if test X"$OPT_SSL" = Xno; then
ca="no"
fi
if test "x$ca" != "xno"; then
CURL_CA_BUNDLE='"'$ca'"'
AC_SUBST(CURL_CA_BUNDLE)
fi
AC_MSG_RESULT([$ca])
dnl these can only exist if openssl exists
AC_CHECK_FUNCS( RAND_status \
RAND_screen \
RAND_egd )
fi
if test X"$OPT_SSL" != Xoff &&
@@ -506,15 +668,10 @@ else
AC_MSG_ERROR([OpenSSL libs and/or directories were not found where specified!])
fi
dnl these can only exist if openssl exists
AC_CHECK_FUNCS( RAND_status \
RAND_screen \
RAND_egd )
fi
AM_CONDITIONAL(CABUNDLE, test x$ca != xno)
dnl **********************************************************************
dnl Check for the presence of ZLIB libraries and headers
dnl **********************************************************************
@@ -525,9 +682,9 @@ _cppflags=$CPPFLAGS
_ldflags=$LDFLAGS
OPT_ZLIB="/usr/local"
AC_ARG_WITH(zlib,
AC_HELP_STRING([--with-zlib=PATH], [search for zlib in PATH])
AC_HELP_STRING([--without-zlib], [disable use of zlib]),
[OPT_ZLIB="$withval"])
AC_HELP_STRING([--with-zlib=PATH],[search for zlib in PATH])
AC_HELP_STRING([--without-zlib],[disable use of zlib]),
[OPT_ZLIB="$withval"])
case "$OPT_ZLIB" in
no)
@@ -557,16 +714,35 @@ case "$OPT_ZLIB" in
;;
esac
dnl set variable for use in automakefile(s)
AM_CONDITIONAL(HAVE_LIBZ, test x"$HAVE_LIBZ" = x1)
dnl Default is to try the thread-safe versions of a few functions
OPT_THREAD=on
dnl detect AIX 4.3 or later
dnl see full docs on this reasoning in the lib/hostip.c source file
AC_MSG_CHECKING([AIX 4.3 or later])
AC_PREPROC_IFELSE([
#if defined(_AIX) && defined(_AIX43)
printf("just fine");
#else
#error "this is not AIX 4.3 or later"
#endif
],
[ AC_MSG_RESULT([yes])
OPT_THREAD=off ],
[ AC_MSG_RESULT([no]) ]
)
AC_ARG_ENABLE(thread,dnl
[ --disable-thread tell configure to not look for thread-safe functions],
AC_HELP_STRING([--disable-thread],[don't look for thread-safe functions]),
OPT_THREAD=off
AC_MSG_WARN(libcurl will not get built using thread-safe functions)
)
if test X"$OPT_THREAD" = Xoff
then
AC_MSG_WARN(libcurl will not get built using thread-safe functions)
AC_DEFINE(DISABLED_THREADSAFE, 1, \
Set to explicitly specify we don't want to use thread-safe functions)
else
@@ -593,21 +769,23 @@ dnl **********************************************************************
dnl Checks for header files.
AC_HEADER_STDC
AC_CHECK_HEADERS( \
dnl First check for the very most basic headers. Then we can use these
dnl ones as default-headers when checking for the rest!
AC_CHECK_HEADERS(
sys/types.h \
sys/time.h \
sys/select.h \
sys/socket.h \
unistd.h \
malloc.h \
stdlib.h \
arpa/inet.h \
net/if.h \
netinet/in.h \
netinet/if_ether.h \
netdb.h \
sys/select.h \
sys/socket.h \
sys/sockio.h \
sys/stat.h \
sys/types.h \
sys/time.h \
sys/param.h \
termios.h \
termio.h \
@@ -622,7 +800,26 @@ AC_CHECK_HEADERS( \
utime.h \
sys/utime.h \
sys/poll.h \
setjmp.h
setjmp.h,
dnl to do if not found
[],
dnl to do if found
[],
dnl default includes
[
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#ifdef HAVE_SYS_SELECT_H
#include <sys/select.h>
#endif
#ifdef HAVE_SYS_SOCKET_H
#include <sys/socket.h>
#endif
]
)
dnl Checks for typedefs, structures, and compiler characteristics.
@@ -630,19 +827,20 @@ AC_C_CONST
AC_TYPE_SIZE_T
AC_HEADER_TIME
# mprintf() checks:
AC_CHECK_SIZEOF(off_t)
# check for 'long double'
# AC_CHECK_SIZEOF(long double, 8)
# check for 'long long'
# AC_CHECK_SIZEOF(long long, 4)
AC_CHECK_TYPE(long long,
[AC_DEFINE(HAVE_LONGLONG, 1, [if your compiler supports 'long long'])])
# check for ssize_t
AC_CHECK_TYPE(ssize_t, int)
AC_CHECK_TYPE(ssize_t, ,
AC_DEFINE(ssize_t, int, [the signed version of size_t]))
TYPE_SOCKLEN_T
TYPE_IN_ADDR_T
AC_FUNC_SELECT_ARGTYPES
dnl Checks for library functions.
dnl AC_PROG_GCC_TRADITIONAL
AC_TYPE_SIGNAL
@@ -665,7 +863,6 @@ AC_CHECK_FUNCS( socket \
tcgetattr \
perror \
closesocket \
setvbuf \
sigaction \
signal \
getpass_r \
@@ -675,7 +872,21 @@ AC_CHECK_FUNCS( socket \
dlopen \
utime \
sigsetjmp \
poll
poll,
dnl if found
[],
dnl if not found, $ac_func is the name we check for
func="$ac_func"
AC_MSG_CHECKING([deeper for $func])
AC_TRY_LINK( [],
[ $func ();],
AC_MSG_RESULT(yes!)
eval "ac_cv_func_$func=yes"
def=`echo "HAVE_$func" | tr 'a-z' 'A-Z'`
AC_DEFINE_UNQUOTED($def, 1, [If you have $func]),
AC_MSG_RESULT(but still no)
)
)
dnl sigsetjmp() might be a macro and no function so if it isn't found already
@@ -691,15 +902,6 @@ if test "$ac_cv_func_sigsetjmp" != "yes"; then
)
fi
dnl removed 'getpass' check on October 26, 2000
if test "$ac_cv_func_select" != "yes"; then
AC_MSG_ERROR(Can't work without an existing select() function)
fi
if test "$ac_cv_func_socket" != "yes"; then
AC_MSG_ERROR(Can't work without an existing socket() function)
fi
AC_PATH_PROG( PERL, perl, ,
$PATH:/usr/local/bin/perl:/usr/bin/:/usr/local/bin )
AC_SUBST(PERL)
@@ -708,32 +910,6 @@ AC_PATH_PROGS( NROFF, gnroff nroff, ,
$PATH:/usr/bin/:/usr/local/bin )
AC_SUBST(NROFF)
AC_MSG_CHECKING([CA cert bundle install path])
AC_ARG_WITH(ca-bundle,
AC_HELP_STRING([--with-ca-bundle=FILE], [File name to install the CA bundle as])
AC_HELP_STRING([--without-ca-bundle], [Don't install the CA bundle]),
[ ca="$withval" ],
[
if test "x$prefix" != xNONE; then
ca="$prefix/share/curl/curl-ca-bundle.crt"
else
ca="$ac_default_prefix/share/curl/curl-ca-bundle.crt"
fi
] )
if test "x$ca" = "xno"; then
dnl let's not keep "no" as path name, blank it instead
ca=""
else
AC_DEFINE_UNQUOTED(CURL_CA_BUNDLE, "$ca", [CA bundle full path name])
fi
CURL_CA_BUNDLE="$ca"
AC_SUBST(CURL_CA_BUNDLE)
AC_MSG_RESULT([$ca])
AC_PROG_YACC
dnl AC_PATH_PROG( RANLIB, ranlib, /usr/bin/ranlib,
@@ -745,18 +921,18 @@ dnl lame option to switch on debug options
dnl
AC_MSG_CHECKING([whether to enable debug options])
AC_ARG_ENABLE(debug,
[ --enable-debug Enable pedantic debug options
--disable-debug Disable debug options],
AC_HELP_STRING([--enable-debug],[Enable pedantic debug options])
AC_HELP_STRING([--disable-debug],[Disable debug options]),
[ case "$enableval" in
no)
AC_MSG_RESULT(no)
;;
*) AC_MSG_RESULT(yes)
CPPFLAGS="$CPPFLAGS -DMALLOCDEBUG"
CPPFLAGS="$CPPFLAGS -DCURLDEBUG"
CFLAGS="$CFLAGS -g"
if test "$GCC" = "yes"; then
CFLAGS="$CFLAGS -W -Wall -Wwrite-strings -pedantic -Wundef -Wpointer-arith -Wcast-align -Wnested-externs"
CFLAGS="$CFLAGS -W -Wall -Wwrite-strings -pedantic -Wundef -Wpointer-arith -Wnested-externs"
fi
dnl strip off optimizer flags
NEWFLAGS=""
@@ -776,6 +952,31 @@ AC_ARG_ENABLE(debug,
AC_MSG_RESULT(no)
)
ares="no"
AC_MSG_CHECKING([whether to enable ares])
AC_ARG_ENABLE(ares,
AC_HELP_STRING([--enable-ares],[Enable using ares for name lookups])
AC_HELP_STRING([--disable-ares],[Disable using ares for name lookups]),
[ case "$enableval" in
no)
AC_MSG_RESULT(no)
;;
*) AC_MSG_RESULT(yes)
if test "x$IPV6_ENABLED" = "x1"; then
AC_MSG_ERROR([ares doesn't work with ipv6, disable ipv6 to use ares])
fi
AC_DEFINE(USE_ARES, 1, [Define if you want to enable ares support])
ares="yes"
;;
esac ],
AC_MSG_RESULT(no)
)
AM_CONDITIONAL(ARES, test x$ares = xyes)
AC_CONFIG_FILES([Makefile \
docs/Makefile \
docs/examples/Makefile \
@@ -796,6 +997,7 @@ AC_CONFIG_FILES([Makefile \
packages/Linux/RPM/curl.spec \
packages/Linux/RPM/curl-ssl.spec \
packages/Solaris/Makefile \
packages/DOS/Makefile \
packages/EPM/curl.list \
packages/EPM/Makefile \
curl-config

View File

@@ -107,8 +107,11 @@ while test $# -gt 0; do
;;
--cflags)
#echo -I@includedir@
echo ""
if test "X@includedir@" = "X/usr/include"; then
echo ""
else
echo "-I@includedir@"
fi
;;
--libs)

View File

@@ -1,21 +0,0 @@
;;;; Emacs Lisp help for writing curl code. ;;;;
;;; In C files, put something like this to load this file automatically:
;;
;; /* -----------------------------------------------------------------
;; * local variables:
;; * eval: (load-file "../curl-mode.el")
;; * end:
;; */
;;
;; (note: make sure to get the path right in the argument to load-file).
;;; The curl hacker's C conventions
;;; we use intent-level 2
(setq c-basic-offset 2)
;;; never ever use tabs to indent!
(setq indent-tabs-mode nil)
;;; I like this, stolen from Subversion! ;-)
(setq angry-mob-with-torches-and-pitchforks t)

50
curl-style.el Normal file
View File

@@ -0,0 +1,50 @@
;;;; Emacs Lisp help for writing curl code. ;;;;
;;;; $Id$
;;; The curl hacker's C conventions.
;;; After loading this file and added the mode-hook you can in C
;;; files, put something like this to use the curl style
;;; automatically:
;;
;; /* -----------------------------------------------------------------
;; * local variables:
;; * eval: (set c-file-style "curl")
;; * end:
;; */
;;
(defconst curl-c-style
'((c-basic-offset . 2)
(c-comment-only-line-offset . 0)
(c-hanging-braces-alist . ((substatement-open before after)))
(c-offsets-alist . ((topmost-intro . 0)
(topmost-intro-cont . 0)
(substatement . +)
(substatement-open . 0)
(statement-case-intro . +)
(statement-case-open . 0)
(case-label . 0)
))
)
"Curl C Programming Style")
;; Customizations for all of c-mode, c++-mode, and objc-mode
(defun curl-c-mode-common-hook ()
"Curl C mode hook"
;; add curl style and set it for the current buffer
(c-add-style "curl" curl-c-style t)
(setq tab-width 8
indent-tabs-mode nil ; Use spaces. Not tabs.
comment-column 40
c-font-lock-extra-types (append '("bool" "CURL" "CURLcode" "ssize_t" "size_t" "socklen_t" "fd_set" "time_t"))
)
;; keybindings for C, C++, and Objective-C. We can put these in
;; c-mode-base-map because of inheritance ...
(define-key c-mode-base-map "\M-q" 'c-fill-paragraph)
(setq c-recognize-knr-p nil)
)
;; Set this is in your .emacs if you want to use the c-mode-hook as
;; defined here right out of the box.
; (add-hook 'c-mode-common-hook 'curl-c-mode-common-hook)

View File

@@ -8,7 +8,7 @@ $Id$
BUGS
Curl and libcurl have grown substantially since the beginning. At the time
of writing (end of April 2002), there are 32000 lines of source code, and by
of writing (end of March 2003), there are 35000 lines of source code, and by
the time you read this it has probably grown even more.
Of course there are lots of bugs left. And lots of misfeatures.
@@ -19,18 +19,21 @@ BUGS
WHERE TO REPORT
If you can't fix a bug yourself and submit a fix for it, try to report an as
detailed report as possible to the curl mailing list to allow one of us to
detailed report as possible to a curl mailing list to allow one of us to
have a go at a solution. You should also post your bug/problem at curl's bug
tracking system over at
http://sourceforge.net/bugs/?group_id=976
(but please read the section below first before doing that)
(but please read the sections below first before doing that)
If you feel you need to ask around first, find a suitable mailing list and
post there. The lists are available on http://curl.haxx.se/mail/
WHAT TO REPORT
When reporting a bug, you should include information that will help us
understand what's wrong what you expected to happen and how to repeat the
understand what's wrong, what you expected to happen and how to repeat the
bad behavior. You therefore need to tell us:
- your operating system's name and version number (uname -a under a unix

View File

@@ -145,5 +145,15 @@ How To Make a Patch
diff -ur curl-original-dir curl-modfied-sources-dir > my-fixes.diff
GNU diff exists for virtually all platforms, including all kinds of unixes
and Windows.
The GNU diff and GNU patch tools exist for virtually all platforms, including
all kinds of unixes and Windows:
For unix-like operating systems:
http://www.fsf.org/software/patch/patch.html
http://www.gnu.org/directory/diffutils.html
For Windows:
http://gnuwin32.sourceforge.net/packages/patch.htm
http://gnuwin32.sourceforge.net/packages/diffutils.htm

105
docs/FAQ
View File

@@ -1,4 +1,4 @@
Updated: January 13, 2003 (http://curl.haxx.se/docs/faq.html)
Updated: June 17, 2003 (http://curl.haxx.se/docs/faq.html)
_ _ ____ _
___| | | | _ \| |
/ __| | | | |_) | |
@@ -97,6 +97,12 @@ FAQ
We spell it cURL or just curl. We pronounce it with an initial k sound:
[kurl].
NOTE: there are numerous sub-projects and related projects that also use the
word curl in the project names in various combinations, but you should take
notice that this FAQ is directed at the command-line tool named curl (and
libcurl the library), and may therefore not be valid for other curl
projects.
1.2 What is libcurl?
libcurl is a reliable and portable library which provides you with an easy
@@ -132,11 +138,9 @@ FAQ
better. We do however believe in a few rules when it comes to the future of
curl:
* Curl is to remain a command line tool. If you want GUIs or fancy scripting
capabilities, you're free to write another tool that uses libcurl and that
offers this. There's no point in having a single tool that does every
imaginable thing. That's also one of the great advantages of having the
core of curl as a library.
* Curl -- the command line tool -- is to remain a non-graphical command line
tool. If you want GUIs or fancy scripting capabilities, you should look
for another tool that uses libcurl.
* We do not add things to curl that other small and available tools already
do very fine at the side. Curl's output is fine to pipe into another
@@ -175,20 +179,21 @@ FAQ
Project cURL is entirely free and open. No person gets paid for developing
curl. We do this voluntarily on our spare time.
We get some help from companies. Contactor Data hosts the curl web site and
the main mailing list, Haxx owns the curl web site's domain and
sourceforge.net hosts several project tools we take advantage from like the
bug tracker, mailing lists and more.
We get some help from companies. Contactor Data hosts the curl web site,
Haxx owns the curl web site's domain and sourceforge.net hosts several
project services we take advantage from, like the bug tracker, mailing lists
and more.
If you want to support our project with a donation or similar, one way of
doing that would be to buy "gift certificates" at useful online shopping
sites, such as amazon.com or thinkgeek.com. Another way would be to sponsor
us through a banner-program or even better: by helping us coding,
documenting, testing etc.
documenting, testing etc. You're welcome to send us a buck using paypal, as
described here: http://curl.haxx.se/donation.html
1.7 What about CURL from curl.com?
During the summer 2001, curl.com has been busy advertising their client-side
During the summer 2001, curl.com was busy advertising their client-side
programming language for the web, named CURL.
We are in no way associated with curl.com or their CURL programming
@@ -203,17 +208,17 @@ FAQ
1.8 I have a problem who do I mail?
Please do not attempt to mail any single individual unless you really need
to. Keep curl-related questions on a suitable mailing list. All available
mailing lists are listed in the MANUAL document and online at
Please do not mail any single individual unless you really need to. Keep
curl-related questions on a suitable mailing list. All available mailing
lists are listed in the MANUAL document and online at
http://curl.haxx.se/mail/
Keeping curl-related questions and discussions on mailing lists allows others
to join in and help, to share their ideas, contribute their suggestions and
spread their wisdom. Keeping discussions on public mailing lists also allows
for others to learn from this (both current and future users thanks to the
web based archives of the mailing lists), thus saving us from having to
repeat ourselves even more. Thanks for respecting this.
Keeping curl-related questions and discussions on mailing lists allows
others to join in and help, to share their ideas, contribute their
suggestions and spread their wisdom. Keeping discussions on public mailing
lists also allows for others to learn from this (both current and future
users thanks to the web based archives of the mailing lists), thus saving us
from having to repeat ourselves even more. Thanks for respecting this.
2. Install Related Problems
@@ -368,9 +373,10 @@ FAQ
http://curl.haxx.se/libcurl/
In December 2001, there are interfaces available for the following
languages: C/C++, Cocoa, Dylan, Java, Perl, PHP, Python, Rexx, Ruby, Scheme
and Tcl. By the time you read this, additional ones may have appeared!
In February 2003, there are interfaces available for the following
languages: Basic, C, C++, Cocoa, Dylan, Euphoria, Java, Lua, Object-Pascal,
Pascal, Perl, PHP, PostgreSQL, Python, Rexx, Ruby, Scheme and Tcl. By the
time you read this, additional ones may have appeared!
3.10 What about SOAP, WebDAV, XML-RPC or similar protocols over HTTP?
@@ -379,8 +385,8 @@ FAQ
XML-RPC are all such ones. You can use -X to set custom requests and -H to
set custom headers (or replace internally generated ones).
Using libcurl or PHP's curl modules is just as fine and you'd just use the
proper library options to do the same.
Using libcurl is of course just as fine and you'd just use the proper
library options to do the same.
3.11 How do I POST with a different Content-Type?
@@ -494,8 +500,7 @@ FAQ
curl '{curl,www}.haxx.se'
To be able to use those letters as actual parts of the URL (without using
them for the curl URL "globbing" system), use the -g/--globoff option (curl
7.6 and later):
them for the curl URL "globbing" system), use the -g/--globoff option:
curl -g 'www.site.com/weirdname[].html'
@@ -588,9 +593,11 @@ FAQ
4.9. Curl can't authenticate to the server that requires NTLM?
NTLM is a Microsoft proprietary protocol. Unfortunately, curl does not
currently support that. Proprietary formats are evil. You should not use
such ones.
This is supported in curl 7.10.6 or later. No earlier curl version knows
of this magic.
NTLM is a Microsoft proprietary protocol. Proprietary formats are evil. You
should not use such ones.
4.10 My HTTP request using HEAD, PUT or DELETE doesn't work!
@@ -630,7 +637,7 @@ FAQ
this check.
Details are also in the SSLCERTS file in the release archives, found online
here: http://curl.haxx.se/lxr/source/SSLCERTS
here: http://curl.haxx.se/docs/sslcerts.html
5. libcurl Issues
@@ -682,20 +689,15 @@ FAQ
5.3 How do I fetch multiple files with libcurl?
Starting with version 7.7, curl and libcurl will have excellent support for
transferring multiple files. You should just repeatedly set new URLs with
curl_easy_setopt() and then transfer it with curl_easy_perform(). The handle
you get from curl_easy_init() is not only reusable starting with libcurl
7.7, but also you're encouraged to reuse it if you can, as that will enable
libcurl to use persistent connections.
For libcurl prior to 7.7, there was no multiple file support. The only
available way to do multiple requests was to init/perform/cleanup for each
transfer.
libcurl has excellent support for transferring multiple files. You should
just repeatedly set new URLs with curl_easy_setopt() and then transfer it
with curl_easy_perform(). The handle you get from curl_easy_init() is not
only reusable, but you're even encouraged to reuse it if you can, as that
will enable libcurl to use persistent connections.
5.4 Does libcurl do Winsock initialization on win32 systems?
Yes (since 7.8.1) if told to in the curl_global_init() call.
Yes, if told to in the curl_global_init() call.
5.5 Does CURLOPT_FILE and CURLOPT_INFILE work on win32 ?
@@ -709,13 +711,11 @@ FAQ
5.6 What about Keep-Alive or persistent connections?
Starting with version 7.7, curl and libcurl will have excellent support for
persistent connections when transferring several files from the same server.
Curl will attempt to reuse connections for all URLs specified on the same
command line/config file, and libcurl will reuse connections for all
transfers that are made using the same libcurl handle.
Previous versions had no persistent connection support.
curl and libcurl have excellent support for persistent connections when
transferring several files from the same server. Curl will attempt to reuse
connections for all URLs specified on the same command line/config file, and
libcurl will reuse connections for all transfers that are made using the
same libcurl handle.
5.7 Link errors when building libcurl on Windows!
@@ -778,6 +778,5 @@ FAQ
discussions and a large amount of people have contributed with source code
knowing that this is the license we use. This license puts the restrictions
we want on curl/libcurl and it does not spread to other programs or
libraries that use it. The recent dual license modification should make it
possible for everyone to use libcurl or curl in their projects, no matter
what license they already have in use.
libraries that use it. It should be possible for everyone to use libcurl or
curl in their projects, no matter what license they already have in use.

View File

@@ -17,27 +17,30 @@ Misc
- progress bar/time specs while downloading
- "standard" proxy environment variables support
- config file support
- compiles on win32 (reported built on 29 operating systems)
- compiles on win32 (reported builds on 40+ operating systems)
- redirectable stderr
- use selected network interface for outgoing traffic
- selectable network interface for outgoing traffic
- IPv6 support
- persistant connections
- socks5 support
- supports user name + password in proxy environment variables
- operations through proxy "tunnel" (using CONNECT)
HTTP
- HTTP/1.1 compliant
- HTTP/1.1 compliant (optionally uses 1.0)
- GET
- PUT
- HEAD
- POST
- multipart POST
- authentication
- multipart formpost (RFC1867-style)
- authentication (Basic, Digest, NTLM(*1), GSS-Negotiate(*3))
- resume (both GET and PUT)
- follow redirects
- maximum amount of redirects to follow
- custom HTTP request
- cookie get/send fully parsed
- understands the netscape cookie file format
- custom headers (that can replace/remove internally generated headers)
- reads/writes the netscape cookie file format
- custom headers (replace/remove internally generated headers)
- custom user-agent string
- custom referer string
- range
@@ -45,12 +48,16 @@ HTTP
- time conditions
- via http-proxy
- retrieve file modification date
- Content-Encoding support for deflate and gzip
- "Transfer-Encoding: chunked" support for "uploads"
HTTPS (*1)
- (all the HTTP features)
- using certificates
- verify server certificate
- via http-proxy
- select desired encryption
- force usage of a specific SSL version (SSLv2, SSLv3 or TLSv1)
FTP
- download
@@ -90,5 +97,6 @@ GOPHER
FILE
- URL support
*1 = requires OpenSSL
*2 = requires OpenLDAP
*1 = requires OpenSSL
*2 = requires OpenLDAP
*3 = requires a GSSAPI-compliant library, such as Heimdal or similar.

View File

@@ -54,7 +54,7 @@ OpenSSL took over where SSLeay was abandoned.
May 1999, first Debian package.
August 1999, LDAP:// and FILE:// support added. The curl web site gets 1300
visits daily.
visits weekly.
Released curl 6.0 in September. 15000 lines of code.
@@ -67,7 +67,7 @@ the easy interface and turned out to be the beginning of actually getting
other software and programs to get based on and powered by libcurl. Almost
20000 lines of code.
August 2000, the curl web site gets 4000 visits daily.
August 2000, the curl web site gets 4000 visits weekly.
The PHP guys adopted libcurl already the same month, when the first ever third
party libcurl binding showed up. CURL has been a supported module in PHP since
@@ -92,7 +92,7 @@ code.
August 2001. curl is bundled in Mac OS X, 10.1. It was already becoming more
and more of a standard utility of Linux distributions and a regular in the BSD
ports collections. The curl web site gets 8000 visits daily. Curl Corporation
ports collections. The curl web site gets 8000 visits weekly. Curl Corporation
contacted Daniel to discuss "the name issue". After Daniel's reply, they have
never since got in touch again.
@@ -100,7 +100,7 @@ September 2001, libcurl 7.9 introduces cookie jar and curl_formadd(). During
the forthcoming 7.9.x releases, we introduced the multi interface slowly and
without much whistles.
June 2002, the curl web site gets 13000 visits daily. curl and libcurl is
June 2002, the curl web site gets 13000 visits weekly. curl and libcurl is
35000 lines of code. Reported successful compiles on more than 40 combinations
of CPUs and operating systems.
@@ -111,3 +111,6 @@ distributions and otherwise retrieved as part of other software.
September 2002, with the release of curl 7.10 it is released under the MIT
license only.
February 2003, the curl site averages at 20000 visits weekly. At any given
moment, there's an average of 3 people browsing the curl.haxx.se site.

25
docs/HOWTO-RELEASE Normal file
View File

@@ -0,0 +1,25 @@
Steps To Perform When Building a Public Release
* "make distcheck"
* ./maketgz
then upload the 3 curl packages maketgz created
* update these files:
www/_download.html
www/_changes.html
www/_newslog.html
www/Makefile
* commit the web changes
* 'cvs commit'
* 'cvs tag'
* write the release announcement, including:
- changes / bugfixes
- other curl-related news
- contributors
* mail release-announcement to curl-announce and curl-users

View File

@@ -31,6 +31,10 @@ UNIX
If you have checked out the sources from the CVS repository, read the
CVS-INFO on how to proceed.
Get a full listing of all available configure options by invoking it like:
./configure --help
If you want to install curl in a different file hierarchy than /usr/local,
you need to specify that already when running configure:
@@ -200,7 +204,14 @@ Win32
Before running nmake define the OPENSSL_PATH environment variable with
the root/base directory of OpenSSL, for example:
set OPENSSL_PATH=c:\openssl-0.9.6b
set OPENSSL_PATH=c:\openssl-0.9.7a
lib/Makefile.vc6 depends on zlib (http://www.gzip.org/zlib/) as well.
Please read the zlib documentation on how to compile zlib. Define the
ZLIB_PATH environment variable to the location of zlib.h and zlib.lib,
for example:
set ZLIB_PATH=c:\zlib-1.1.4
Then run 'nmake vc-ssl' or 'nmake vc-ssl-dll' in curl's root
directory. 'nmake vc-ssl' will create a libcurl static and dynamic
@@ -400,6 +411,17 @@ CROSS COMPILE
The '--prefix' parameter specifies where cURL will be installed. If
'configure' completes successfully, do 'make' and 'make install' as usual.
RISC OS
=======
The library can be cross-compiled using gccsdk as follows:
CC=riscos-gcc AR=riscos-ar RANLIB='riscos-ar -s' ./configure \
--host=arm-riscos-aof --without-random --disable-shared
make
where riscos-gcc and riscos-ar are links to the gccsdk tools.
You can then link your program with curl/lib/.libs/libcurl.a
PORTS
=====
This is a probably incomplete list of known hardware and operating systems
@@ -419,7 +441,7 @@ PORTS
- MIPS IRIX 6.2, 6.5
- MIPS Linux
- Pocket PC/Win CE 3.0
- Power AIX 4.2, 4.3.1, 4.3.2, 5.1
- Power AIX 3.2.5, 4.2, 4.3.1, 4.3.2, 5.1
- PowerPC Darwin 1.0
- PowerPC Linux
- PowerPC Mac OS 9
@@ -433,6 +455,7 @@ PORTS
- StrongARM NetBSD 1.4.1
- Ultrix 4.3a
- i386 BeOS
- i386 DOS
- i386 FreeBSD
- i386 HURD
- i386 Linux 1.3, 2.0, 2.2, 2.3, 2.4
@@ -443,11 +466,14 @@ PORTS
- i386 Solaris 2.7
- i386 Windows 95, 98, ME, NT, 2000
- i386 QNX 6
- i486 ncr-sysv4.3.03 (NCR MP-RAS)
- ia64 Linux 2.3.99
- m68k AmigaOS 3
- m68k Linux
- m68k OpenBSD
- m88k dg-dgux5.4R3.00
- s390 Linux
- XScale/PXA250 Linux 2.4
OpenSSL
=======

View File

@@ -3,6 +3,27 @@ join in and help us correct one or more of these! Also be sure to check the
changelog of the current development status, as one or more of these problems
may have been fixed since this was written!
* libcurl doesn't treat the content-length of compressed data properly, as
it seems HTTP servers send the *uncompressed* length in that header and
libcurl thinks of it as the *compressed* lenght. Some explanations are here:
http://curl.haxx.se/mail/lib-2003-06/0146.html
* Downloading 0 (zero) bytes files over FTP will not create a zero byte file
locally, which is because libcurl doesn't call the write callback with zero
bytes. Explained here: http://curl.haxx.se/mail/archive-2003-04/0143.html
* Using CURLOPT_FAILONERROR (-f/--fail) will make authentication to stop
working if you use anything but plain Basic auth.
* LDAP output is garbled. Hardly anyone seems to care about LDAP functionality
in curl/libcurl why this report has been closed and set to be solved later.
If you feel this is something you want fixed, get in touch and we'll start
working.
http://sourceforge.net/tracker/index.php?func=detail&aid=735752&group_id=976&atid=100976
* IPv6 support on AIX 4.3.3 doesn't work due to a missing sockaddr_storage
struct. It has been reported to work on AIX 5.1 though.
* Running 'make test' on Mac OS X gives 4 errors. This seems to be related
to some kind of libtool problem:
http://curl.haxx.se/mail/archive-2002-03/0029.html and
@@ -15,6 +36,8 @@ may have been fixed since this was written!
* configure --disable-http is not fully supported. All other protocols seem
to work to disable.
* The -m parameter does not work when using telnet with curl on Windows.
* If a HTTP server responds to a HEAD request and includes a body (thus
violating the RFC2616), curl won't wait to read the response but just stop
reading and return back. If a second request (let's assume a GET) is then

View File

@@ -11,7 +11,7 @@ SIMPLE USAGE
curl http://www.netscape.com/
Get the root README file from funet's ftp-server:
Get the README file the user's home directory at funet's ftp-server:
curl ftp://ftp.funet.fi/README
@@ -19,7 +19,7 @@ SIMPLE USAGE
curl http://www.weirdserver.com:8000/
Get a list of the root directory of an FTP site:
Get a list of a directory of an FTP site:
curl ftp://cool.haxx.se/
@@ -166,13 +166,21 @@ UPLOADING
VERBOSE / DEBUG
If curl fails where it isn't supposed to, if the servers don't let you
in, if you can't understand the responses: use the -v flag to get VERBOSE
fetching. Curl will output lots of info and all data it sends and
receives in order to let the user see all client-server interaction.
If curl fails where it isn't supposed to, if the servers don't let you in,
if you can't understand the responses: use the -v flag to get verbose
fetching. Curl will output lots of info and what it sends and receives in
order to let the user see all client-server interaction (but it won't show
you the actual data).
curl -v ftp://ftp.upload.com/
To get even more details and information on what curl does, try using the
--trace or --trace-ascii options with a given file name to log to, like
this:
curl --trace trace.txt www.haxx.se
DETAILED INFORMATION
Different protocols provide different ways of getting detailed information
@@ -235,7 +243,7 @@ POST (HTTP)
To post to this, you enter a curl command line like:
curl -d "user=foobar&pass=12345&id=blablabla&dig=submit" (continues)
curl -d "user=foobar&pass=12345&id=blablabla&ding=submit" (continues)
http://www.formpost.com/getthis/post.cgi
@@ -350,6 +358,13 @@ COOKIES
curl -b headers www.example.com
While saving headers to a file is a working way to store cookies, it is
however error-prone and not the prefered way to do this. Instead, make curl
save the incoming cookies using the well-known netscape cookie format like
this:
curl -c cookies.txt www.example.com
Note that by specifying -b you enable the "cookie awareness" and with -L
you can make curl follow a location: (which often is used in combination
with cookies). So that if a site sends cookies and a location, you can
@@ -363,7 +378,11 @@ COOKIES
the cookies received from www.example.com. curl will send to the server the
stored cookies which match the request as it follows the location. The
file "empty.txt" may be a non-existant file.
Alas, to both read and write cookies from a netscape cookie file, you can
set both -b and -c to use the same file:
curl -b cookies.txt -c cookies.txt www.example.com
PROGRESS METER
@@ -413,7 +432,8 @@ SPEED LIMIT
Forcing curl not to transfer data faster than a given rate is also possible,
which might be useful if you're using a limited bandwidth connection and you
don't want your transfer to use all of it.
don't want your transfer to use all of it (sometimes referred to as
"bandwith throttle").
Make curl transfer data no faster than 10 kilobytes per second:
@@ -427,6 +447,11 @@ SPEED LIMIT
curl -T upload --limit-rate 1M ftp://uploadshereplease.com
When using the --limit-rate option, the transfer rate is regulated on a
per-second basis, which will cause the total transfer speed to become lower
than the given number. Sometimes of course substantially lower, if your
transfer stalls during periods.
CONFIG FILE
Curl automatically tries to read the .curlrc file (or _curlrc file on win32

View File

@@ -10,7 +10,8 @@ man_MANS = \
HTMLPAGES = \
curl.html \
curl-config.html
curl-config.html \
index.html
PDFPAGES = \
curl.pdf \
@@ -18,7 +19,7 @@ PDFPAGES = \
SUBDIRS = examples libcurl
EXTRA_DIST = MANUAL BUGS CONTRIBUTE FAQ FEATURES INTERNALS \
EXTRA_DIST = MANUAL BUGS CONTRIBUTE FAQ FEATURES INTERNALS SSLCERTS \
README.win32 RESOURCES TODO TheArtOfHttpScripting THANKS \
VERSIONS KNOWN_BUGS BINDINGS $(man_MANS) $(HTMLPAGES) \
HISTORY INSTALL libcurl-the-guide $(PDFPAGES)

View File

@@ -13,8 +13,8 @@ README.win32
are win32-based.
The unix-style man pages are tricky to read on windows, so therefore are all
those pages also converted to HTML and those are also included in the
release archives.
those pages converted to HTML as well as pdf, and included in the release
archives.
The main curl.1 man page is also "built-in" in the command line tool. Use a
command line similar to this in order to extract a separate text file:

View File

@@ -6,9 +6,9 @@ default. This is done by installing a default CA cert bundle on 'make install'
(or similar), that CA bundle package is used by default on operations against
SSL servers.
Alas, if you communicate with HTTPS servers using certifcates that are signed
Alas, if you communicate with HTTPS servers using certificates that are signed
by CAs present in the bundle, you will not notice any changed behavior and you
will seeminglessly get a higher security level on your SSL connections since
will seamlessly get a higher security level on your SSL connections since you
can be sure that the remote server really is the one it claims to be.
If the remote server uses a self-signed certificate, or if you don't install
@@ -26,10 +26,14 @@ included in the bundle, then you need to do one of the following:
With the curl command tool: --cacert [file]
This upgrade procedure has been deemed The Right Thing even though it adds
this extra trouble for some users, since it adds security to a majority of the
SSL connections that previously weren't really secure.
Neglecting to use one of the above menthods when dealing with a server using a
certficate that isn't signed by one of the certficates in the installed CA
cert bundle, will cause SSL to report an error ("certificate verify failed")
during the handshake and SSL will then refuse further communication with that
server.
It turned out many people were using previous versions of curl/libcurl without
realizing the need for the CA cert options to get truly secure SSL
connections.
This procedure has been deemed The Right Thing even though it adds this extra
trouble for some users, since it adds security to a majority of the SSL
connections that previously weren't really secure. It turned out many people
were using previous versions of curl/libcurl without realizing the need for
the CA cert options to get truly secure SSL connections.

View File

@@ -2,83 +2,93 @@ This project has been alive for several years. Countless people have provided
feedback that have improved curl. Here follows a (incomplete) list of people
that have contributed with non-trivial parts:
- Daniel Stenberg <daniel@haxx.se>
- Rafael Sagula <sagula@inf.ufrgs.br>
- Sampo Kellomaki <sampo@iki.fi>
- Linas Vepstas <linas@linas.org>
- Bjorn Reese <breese@mail1.stofanet.dk>
- Johan Anderson <johan@homemail.com>
- Kjell Ericson <Kjell.Ericson@haxx.se>
- Troy Engel <tengel@sonic.net>
- Ryan Nelson <ryan@inch.com>
- Bj<EFBFBD>rn Stenberg <bjorn@haxx.se>
- Angus Mackay <amackay@gus.ml.org>
- Eric Young <eay@cryptsoft.com>
- Simon Dick <simond@totally.irrelevant.org>
- Oren Tirosh <oren@monty.hishome.net>
- Steven G. Johnson <stevenj@alum.mit.edu>
- Gilbert Ramirez Jr. <gram@verdict.uthscsa.edu>
- Andr<EFBFBD>s Garc<72>a <ornalux@redestb.es>
- Douglas E. Wegscheid <wegscd@whirlpool.com>
- Mark Butler <butlerm@xmission.com>
- Eric Thelin <eric@generation-i.com>
- Marc Boucher <marc@mbsi.ca>
- Greg Onufer <Greg.Onufer@Eng.Sun.COM>
- Doug Kaufman <dkaufman@rahul.net>
- David Eriksson <david@2good.com>
- Ralph Beckmann <rabe@uni-paderborn.de>
- T. Yamada <tai@imasy.or.jp>
- Lars J. Aas <larsa@sim.no>
- J<EFBFBD>rn Hartroth <Joern.Hartroth@computer.org>
- Matthew Clarke <clamat@van.maves.ca>
- Linus Nielsen Feltzing <linus@haxx.se>
- Felix von Leitner <felix@convergence.de>
- Dan Zitter <dzitter@zitter.net>
- Jongki Suwandi <Jongki.Suwandi@eng.sun.com>
- Chris Maltby <chris@aurema.com>
- Ron Zapp <rzapper@yahoo.com>
- Paul Marquis <pmarquis@iname.com>
- Ellis Pritchard <ellis@citria.com>
- Damien Adant <dams@usa.net>
- Chris <cbayliss@csc.come>
- Marco G. Salvagno <mgs@whiz.cjb.net>
- Paul Marquis <pmarquis@iname.com>
- David LeBlanc <dleblanc@qnx.com>
- Rich Gray at Plus Technologies
- Luong Dinh Dung <u8luong@lhsystems.hu>
- Torsten Foertsch <torsten.foertsch@gmx.net>
- Kristian K<>hntopp <kris@koehntopp.de>
- Fred Noz <FNoz@siac.com>
- Caolan McNamara <caolan@csn.ul.ie>
- Albert Chin-A-Young <china@thewrittenword.com>
- Stephen Kick <skick@epicrealm.com>
- Martin Hedenfalk <mhe@stacken.kth.se>
- Richard Prescott <rip at step.polymtl.ca>
- Jason S. Priebe <priebe@wral-tv.com>
- T. Bharath <TBharath@responsenetworks.com>
- Alexander Kourakos <awk@users.sourceforge.net>
- James Griffiths <griffiths_james@yahoo.com>
- Loic Dachary <loic@senga.org>
- Robert Weaver <robert.weaver@sabre.com>
- Ingo Ralf Blum <ingoralfblum@ingoralfblum.com>
- Jun-ichiro itojun Hagino <itojun@iijlab.net>
- Frederic Lepied <flepied@mandrakesoft.com>
- Georg Horn <horn@koblenz-net.de>
- Cris Bailiff <c.bailiff@awayweb.com>
- Sterling Hughes <sterling@designmultimedia.com>
- S. Moonesamy
- Ingo Wilken <iw@WWW.Ecce-Terram.DE>
- Pawel A. Gajda <mis@k2.net.pl>
- Patrick Bihan-Faou
- Nico Baggus <Nico.Baggus@mail.ing.nl>
- Sergio Ballestrero
- Andrew Francis <locust@familyhealth.com.au>
- Tomasz Lacki <Tomasz.Lacki@primark.pl>
- Georg Huettenegger <georg@ist.org>
- John Lask <johnlask@hotmail.com>
- Eric Lavigne <erlavigne@wanadoo.fr>
- Marcus Webster <marcus.webster@phocis.com>
- G<EFBFBD>tz Babin-Ebell <babin<69>ebell@trustcenter.de>
- Andreas Damm <andreas-sourceforge@radab.org>
- Jacky Lam <sylam@emsoftltd.com>
- James Gallagher <jgallagher@gso.uri.edu>
Daniel Stenberg <daniel@haxx.se>
Rafael Sagula <sagula@inf.ufrgs.br>
Sampo Kellomaki <sampo@iki.fi>
Linas Vepstas <linas@linas.org>
Bjorn Reese <breese@mail1.stofanet.dk>
Johan Anderson <johan@homemail.com>
Kjell Ericson <Kjell.Ericson@haxx.se>
Troy Engel <tengel@sonic.net>
Ryan Nelson <ryan@inch.com>
Bj<EFBFBD>rn Stenberg <bjorn@haxx.se>
Angus Mackay <amackay@gus.ml.org>
Eric Young <eay@cryptsoft.com>
Simon Dick <simond@totally.irrelevant.org>
Oren Tirosh <oren@monty.hishome.net>
Steven G. Johnson <stevenj@alum.mit.edu>
Gilbert Ramirez Jr. <gram@verdict.uthscsa.edu>
Andr<EFBFBD>s Garc<72>a <ornalux@redestb.es>
Douglas E. Wegscheid <wegscd@whirlpool.com>
Mark Butler <butlerm@xmission.com>
Eric Thelin <eric@generation-i.com>
Marc Boucher <marc@mbsi.ca>
Greg Onufer <Greg.Onufer@Eng.Sun.COM>
Doug Kaufman <dkaufman@rahul.net>
David Eriksson <david@2good.com>
Ralph Beckmann <rabe@uni-paderborn.de>
T. Yamada <tai@imasy.or.jp>
Lars J. Aas <larsa@sim.no>
J<EFBFBD>rn Hartroth <Joern.Hartroth@computer.org>
Matthew Clarke <clamat@van.maves.ca>
Linus Nielsen Feltzing <linus@haxx.se>
Felix von Leitner <felix@convergence.de>
Dan Zitter <dzitter@zitter.net>
Jongki Suwandi <Jongki.Suwandi@eng.sun.com>
Chris Maltby <chris@aurema.com>
Ron Zapp <rzapper@yahoo.com>
Paul Marquis <pmarquis@iname.com>
Ellis Pritchard <ellis@citria.com>
Damien Adant <dams@usa.net>
Chris <cbayliss@csc.come>
Marco G. Salvagno <mgs@whiz.cjb.net>
Paul Marquis <pmarquis@iname.com>
David LeBlanc <dleblanc@qnx.com>
Rich Gray at Plus Technologies
Luong Dinh Dung <u8luong@lhsystems.hu>
Torsten Foertsch <torsten.foertsch@gmx.net>
Kristian K<>hntopp <kris@koehntopp.de>
Fred Noz <FNoz@siac.com>
Caolan McNamara <caolan@csn.ul.ie>
Albert Chin-A-Young <china@thewrittenword.com>
Stephen Kick <skick@epicrealm.com>
Martin Hedenfalk <mhe@stacken.kth.se>
Richard Prescott <rip at step.polymtl.ca>
Jason S. Priebe <priebe@wral-tv.com>
T. Bharath <TBharath@responsenetworks.com>
Alexander Kourakos <awk@users.sourceforge.net>
James Griffiths <griffiths_james@yahoo.com>
Loic Dachary <loic@senga.org>
Robert Weaver <robert.weaver@sabre.com>
Ingo Ralf Blum <ingoralfblum@ingoralfblum.com>
Jun-ichiro itojun Hagino <itojun@iijlab.net>
Frederic Lepied <flepied@mandrakesoft.com>
Georg Horn <horn@koblenz-net.de>
Cris Bailiff <c.bailiff@awayweb.com>
Sterling Hughes <sterling@designmultimedia.com>
S. Moonesamy
Ingo Wilken <iw@WWW.Ecce-Terram.DE>
Pawel A. Gajda <mis@k2.net.pl>
Patrick Bihan-Faou
Nico Baggus <Nico.Baggus@mail.ing.nl>
Sergio Ballestrero
Andrew Francis <locust@familyhealth.com.au>
Tomasz Lacki <Tomasz.Lacki@primark.pl>
Georg Huettenegger <georg@ist.org>
John Lask <johnlask@hotmail.com>
Eric Lavigne <erlavigne@wanadoo.fr>
Marcus Webster <marcus.webster@phocis.com>
G<EFBFBD>tz Babin-Ebell <babin<69>ebell@trustcenter.de>
Andreas Damm <andreas-sourceforge@radab.org>
Jacky Lam <sylam@emsoftltd.com>
James Gallagher <jgallagher@gso.uri.edu>
Kjetil Jacobsen <kjetilja@cs.uit.no>
Markus F.X.J. Oberhumer <markus@oberhumer.com>
Miklos Nemeth <mnemeth@kfkisystems.com>
Kevin Roth <kproth@users.sourceforge.net>
Ralph Mitchell <rmitchell@eds.com>
Dan Fandrich <dan@coneharvesters.com>
Jean-Philippe Barrette-LaPierre <jpb@rrette.com>
Richard Bramante <RBramante@on.com>
Daniel Kouril <kouril@ics.muni.cz>
Dirk Manske <dm@nettraffic.de>

107
docs/TODO
View File

@@ -10,59 +10,49 @@ TODO
send us patches that improve things! Also check the http://curl.haxx.se/dev
web section for various technical development notes.
All bugs documented in the KNOWN_BUGS document are subject for fixing!
LIBCURL
* Introduce an interface to libcurl that allows applications to easier get to
know what cookies that are received. Pushing interface that calls a
callback on each received cookie? Querying interface that asks about
existing cookies? We probably need both. Enable applications to modify
existing cookies as well.
* Make content encoding/decoding internally be made using a filter system.
existing cookies as well. http://curl.haxx.se/dev/COOKIES
* Introduce another callback interface for upload/download that makes one
less copy of data and thus a faster operation.
[http://curl.haxx.se/dev/no_copy_callbacks.txt]
* Add asynchronous name resolving (http://daniel.haxx.se/resolver/). This
should be made to work on most of the supported platforms, or otherwise it
isn't really interesting.
* Data sharing. Tell which easy handles within a multi handle that should
share cookies, connection cache, dns cache, ssl session cache. Full
suggestion found here: http://curl.haxx.se/dev/sharing.txt
* Mutexes. By adding mutex callback support, the 'data sharing' mentioned
above can be made between several easy handles running in different threads
too. The actual mutex implementations will be left for the application to
implement, libcurl will merely call 'getmutex' and 'leavemutex' callbacks.
Part of the sharing suggestion at: http://curl.haxx.se/dev/sharing.txt
* More data sharing. curl_share_* functions already exist and work, and they
can be extended to share more.
* Set the SO_KEEPALIVE socket option to make libcurl notice and disconnect
very long time idle connections.
* Go through the code and verify that libcurl deals with big files >2GB and
>4GB all over. Bug reports (and source reviews) indicate that it doesn't
currently work properly.
>4GB all over. Bug reports (and source reviews) show that it doesn't
currently work.
* CURLOPT_MAXFILESIZE. Prevent downloads that are larger than the specified
size. CURLE_FILESIZE_EXCEEDED would then be returned. Gautam Mani
requested. That is, the download should not even begin but be aborted
immediately.
* Allow the http_proxy (and other) environment variables to contain user and
password as well in the style: http://proxyuser:proxypasswd@proxy:port
Berend Reitsma suggested.
LIBCURL - multi interface
* Make sure we don't ever loop because of non-blocking sockets return
EWOULDBLOCK or similar. This FTP command sending etc.
* Add curl_multi_timeout() to make libcurl's ares-functionality better.
* Make uploads treated better. We need a way to tell libcurl we have data to
write, as the current system expects us to upload data each time the socket
is writable and there is no way to say that we want to upload data soon
just not right now, without that aborting the upload.
* Make sure we don't ever loop because of non-blocking sockets return
EWOULDBLOCK or similar. This FTP command sending, the SSL connection etc.
* Make transfers treated more carefully. We need a way to tell libcurl we
have data to write, as the current system expects us to upload data each
time the socket is writable and there is no way to say that we want to
upload data soon just not right now, without that aborting the upload. The
opposite situation should be possible as well, that we tell libcurl we're
ready to accept read data. Today libcurl feeds the data as soon as it is
available for reading, no matter what.
DOCUMENTATION
@@ -70,51 +60,28 @@ TODO
FTP
* FTP ASCII upload does not follow RFC959 section 3.1.1.1: "The sender
converts the data from an internal character representation to the standard
8-bit NVT-ASCII representation (see the Telnet specification). The
receiver will convert the data from the standard form to his own internal
form."
* Support the most common FTP proxies, Philip Newton provided a list
allegedly from ncftp:
http://curl.haxx.se/mail/archive-2003-04/0126.html
* Make CURLOPT_FTPPORT support an additional port number on the IP/if/name,
like "blabla:[port]" or possibly even "blabla:[portfirst]-[portsecond]".
* FTP ASCII transfers do not follow RFC959. They don't convert the data
accordingly.
* Since USERPWD always override the user and password specified in URLs, we
might need another way to specify user+password for anonymous ftp logins.
* An option to only download remote FTP files if they're newer than the local
one is a good idea, and it would fit right into the same syntax as the
already working http dito works. It of course requires that 'MDTM' works,
and it isn't a standard FTP command.
* Add FTPS support with SSL for the data connection too. This should be made
according to the specs written in draft-murray-auth-ftp-ssl-08.txt,
"Securing FTP with TLS"
according to the specs written in draft-murray-auth-ftp-ssl-11.txt,
"Securing FTP with TLS", valid until September 27th 2003.
http://curl.haxx.se/rfc/draft-murray-auth-ftp-ssl-11.txt
HTTP
* Pass a list of host name to libcurl to which we allow the user name and
password to get sent to. Currently, it only get sent to the host name that
the first URL uses (to prevent others from being able to read it), but this
also prevents the authentication info from getting sent when following
locations to legitimate other host names.
* Authentication: NTLM. Support for that MS crap called NTLM
authentication. MS proxies and servers sometime require that. Since that
protocol is a proprietary one, it involves reverse engineering and network
sniffing. This should however be a library-based functionality. There are a
few different efforts "out there" to make open source HTTP clients support
this and it should be possible to take advantage of other people's hard
work. http://modntlm.sourceforge.net/ is one. There's a web page at
http://www.innovation.ch/java/ntlm.html that contains detailed reverse-
engineered info.
* RFC2617 compliance, "Digest Access Authentication" A valid test page seem
to exist at: http://hopf.math.nwu.edu/testpage/digest/ And some friendly
person's server source code is available at
http://hopf.math.nwu.edu/digestauth/index.html Then there's the Apache
mod_digest source code too of course. It seems as if Netscape doesn't
support this, and not many servers do. Although this is a lot better
authentication method than the more common "Basic". Basic sends the
password in cleartext over the network, this "Digest" method uses a
challange-response protocol which increases security quite a lot.
* Digest, NTLM and GSS-Negotiate support for HTTP proxies. They all work
on direct-connections to the server.
* Pipelining. Sending multiple requests before the previous one(s) are done.
This could possibly be implemented using the multi interface to queue
@@ -199,6 +166,9 @@ TODO
TEST SUITE
* If perl wasn't found by the configure script, don't attempt to run the
tests but explain something nice why it doesn't.
* Extend the test suite to include more protocols. The telnet could just do
ftp or http operations (for which we have test servers).
@@ -206,3 +176,10 @@ TODO
fork()s and it should become even more portable.
* Introduce a test suite that tests libcurl better and more explicitly.
NEXT MAJOR RELEASE
* curl_easy_cleanup() returns void, but curl_multi_cleanup() returns a
CURLMcode. These should be changed to be the same.
* curl_formparse() should be removed

View File

@@ -2,7 +2,7 @@
.\" nroff -man curl.1
.\" Written by Daniel Stenberg
.\"
.TH curl 1 "11 Sep 2002" "Curl 7.10" "Curl Manual"
.TH curl 1 "8 Aug 2003" "Curl 7.10.7" "Curl Manual"
.SH NAME
curl \- transfer a URL
.SH SYNOPSIS
@@ -10,14 +10,18 @@ curl \- transfer a URL
.I [URL...]
.SH DESCRIPTION
.B curl
is a client to get documents/files from or send documents to a server, using
any of the supported protocols (HTTP, HTTPS, FTP, GOPHER, DICT, TELNET, LDAP
or FILE). The command is designed to work without user interaction or any kind
of interactivity.
is a tool to transfer data from or to a server, using one of the supported
protocols (HTTP, HTTPS, FTP, FTPS, GOPHER, DICT, TELNET, LDAP or FILE). The
command is designed to work without user interaction.
curl offers a busload of useful tricks like proxy support, user
authentication, ftp upload, HTTP post, SSL (https:) connections, cookies, file
transfer resume and more.
transfer resume and more. As you will see below, the amount of features will
make your head spin!
curl is powered by libcurl for all transfer-related features. See
.BR libcurl (3)
for details.
.SH URL
The URL syntax is protocol dependent. You'll find a detailed description in
RFC 2396.
@@ -48,10 +52,8 @@ specified on a single command line and cannot be used between separate curl
invokes.
.SH OPTIONS
.IP "-a/--append"
(FTP)
When used in a ftp upload, this will tell curl to append to the target
file instead of overwriting it. If the file doesn't exist, it will
be created.
(FTP) When used in an FTP upload, this will tell curl to append to the target
file instead of overwriting it. If the file doesn't exist, it will be created.
If this option is used twice, the second one will disable append mode again.
.IP "-A/--user-agent <agent string>"
@@ -63,6 +65,16 @@ surround the string with single quote marks. This can also be set with the
If this option is set more than once, the last one will be the one that's
used.
.IP "--anyauth"
(HTTP) Tells curl to figure out authentication method by itself, and use the
most secure one the remote site claims it supports. This is done by first
doing a request and checking the response-headers, thus inducing an extra
network round-trip. This is used instead of setting a specific authentication
method, which you can do with \fI--digest\fP, \fI--ntlm\fP, and
\fI--negotiate\fP. (Added in 7.10.6)
If this option is used several times, the following occurrences make no
difference.
.IP "-b/--cookie <name=data>"
(HTTP)
Pass the data to the HTTP server as a cookie. It is supposedly the
@@ -90,12 +102,26 @@ also be enforced by using an URL that ends with ";type=A". This option causes
data sent to stdout to be in text mode for win32 systems.
If this option is used twice, the second one will disable ASCII usage.
.IP "--basic"
(HTTP) Tells curl to use HTTP Basic authentication. This is the default and
this option is usually pointless, unless you use it to override a previously
set option that sets a different authentication method (such as \fI--ntlm\fP,
\fI--digest\fP and \fI--negotiate\fP). (Added in 7.10.6)
If this option is used several times, the following occurrences make no
difference.
.IP "--ciphers <list of ciphers>"
(SSL) Specifies which ciphers to use in the connection. The list of ciphers
must be using valid ciphers. Read up on SSL cipher list details on this URL:
.I http://www.openssl.org/docs/apps/ciphers.html (Option added in curl 7.9)
.I http://www.openssl.org/docs/apps/ciphers.html
If this option is used several times, the last one will override the others.
.IP "--compressed"
(HTTP) Request a compressed response using one of the algorithms libcurl
supports, and return the uncompressed document. If this option is used and
the server sends an unsupported encoding, Curl will report an error.
If this option is used several times, each occurrence will toggle it on/off.
.IP "--connect-timeout <seconds>"
Maximum time in seconds that you allow the connection to the server to take.
This only limits the connection phase, once curl has connected this option is
@@ -108,7 +134,13 @@ operation. Curl writes all cookies previously read from a specified file as
well as all cookies received from remote server(s). If no cookies are known,
no file will be written. The file will be written using the Netscape cookie
file format. If you set the file name to a single dash, "-", the cookies will
be written to stdout. (Option added in curl 7.9)
be written to stdout.
.B NOTE
If the cookie jar can't be created or written to, the whole curl operation
won't fail or even report an error clearly. Using -v will get a warning
displayed, but that is the only visible feedback you get about this possibly
lethal situation.
If this option is used several times, the last specfied file name will be
used.
@@ -122,7 +154,7 @@ Use "-C -" to tell curl to automatically find out where/how to resume the
transfer. It then uses the given output/input files to figure that out.
If this option is used several times, the last one will be used.
.IP "---create-dirs"
.IP "--create-dirs"
When used in conjunction with the -o option, curl will create the necessary
local directory hierarchy as needed.
.IP "--crlf"
@@ -134,7 +166,7 @@ If this option is used twice, the second will again disable crlf converting.
that can emulate as if a user has filled in a HTML form and pressed the submit
button. Note that the data is sent exactly as specified with no extra
processing (with all newlines cut off). The data is expected to be
"url-encoded". This will cause curl to pass the data to the server using the
\&"url-encoded". This will cause curl to pass the data to the server using the
content-type application/x-www-form-urlencoded. Compare to -F. If more than
one -d/--data option is used on the same command line, the data pieces
specified will be merged together with a separating &-letter. Thus, using '-d
@@ -145,7 +177,7 @@ If you start the data with the letter @, the rest should be a file name to
read the data from, or - if you want curl to read the data from stdin. The
contents of the file must already be url-encoded. Multiple files can also be
specified. Posting data from a file named 'foobar' would thus be done with
"--data @foobar".
\&"--data @foobar".
To post data purely binary, you should instead use the --data-binary option.
@@ -166,9 +198,27 @@ want to post a binary file without the strip-newlines feature of the
If this option is used several times, the ones following the first will
append data.
.IP "--digest"
(HTTP) Enables HTTP Digest authentication. This is a authentication that
prevents the password from being sent over the wire in clear text. Use this in
combination with the normal -u/--user option to set user name and
password. See also \fI--ntlm\fP, \fP--negotiate\fI and \fI--anyauth\fP for
related options. (Added in curl 7.10.6)
If this option is used several times, the following occurrences make no
difference.
.IP "--disable-eprt"
(FTP) Tell curl to disable the use of the EPRT and LPRT commands when doing
active FTP transfers. Curl will normally always first attempt to use EPRT,
then LPRT before using PORT, but with this option, it will use PORT right
away. EPRT and LPRT are extensions to the original FTP protocol, may not work
on all servers but enable more functionality in a better way than the
traditional PORT command. (Aded in 7.10.5)
If this option is used several times, each occurrence will toggle this on/off.
.IP "--disable-epsv"
(FTP) Tell curl to disable the use of the EPSV command when doing passive FTP
downloads. Curl will normally always first attempt to use EPSV before PASV,
transfers. Curl will normally always first attempt to use EPSV before PASV,
but with this option, it will not try using EPSV.
If this option is used several times, each occurrence will toggle this on/off.
@@ -217,14 +267,21 @@ If this option is used several times, the last one will be used.
peer. The file may contain multiple CA certificates. The certificate(s) must
be in PEM format.
curl recognizes the environment variable named 'CURL_CA_BUNDLE' if that is
set, and uses the given path as a path to a CA cert bundle. This option
overrides that variable.
The windows version of curl will automatically look for a CA certs file named
\'curl-ca-bundle.crt\', either in the same directory as curl.exe, or in the
Current Working Directory, or in any folder along your PATH.
If this option is used several times, the last one will be used.
.IP "--capath <CA certificate directory>"
(HTTPS) Tells curl to use the specified certificate directory to verify the
peer. The certificates must be in PEM format, and the directory must have been
processed using the c_rehash utility supplied with openssl. Certificate directories
are not supported under Windows (because c_rehash uses symbolink links to
create them). Using --capath can allow curl to make https connections much
more efficiently than using --cacert if the --cacert file contains many CA certificates.
peer. The certificates must be in PEM format, and the directory must have been
processed using the c_rehash utility supplied with openssl. Using --capath can
allow curl to make https connections much more efficiently than using --cacert
if the --cacert file contains many CA certificates.
If this option is used several times, the last one will be used.
.IP "-f/--fail"
@@ -234,6 +291,12 @@ normal cases when a HTTP server fails to deliver a document, it returns a HTML
document stating so (which often also describes why and more). This flag will
prevent curl from outputting that and fail silently instead.
If this option is used twice, the second will again disable silent failure.
.IP "--ftp-create-dirs"
(FTP) When an FTP URL/operation uses a path that doesn't currently exist on
the server, the standard behaviour of curl is to fail. Using this option, curl
will instead attempt to create missing directories. (Added in 7.10.7)
If this option is used twice, the second will again disable silent failure.
.IP "-F/--form <name=content>"
(HTTP) This lets curl emulate a filled in form in which a user has pressed the
@@ -249,12 +312,18 @@ Example, to send your password file to the server, where
\&'password' is the name of the form-field to which /etc/passwd will be the
input:
.B curl
-F password=@/etc/passwd www.mypasswords.com
\fBcurl\fP -F password=@/etc/passwd www.mypasswords.com
To read the file's content from stdin insted of a file, use - where the file
name should've been. This goes for both @ and < constructs.
You can also tell curl what Content-Type to use for the file upload part, by
using 'type=', in a manner similar to:
\fBcurl\fP -F "web=@index.html;type=text/html" url.com
See further examples and details in the MANUAL.
This option can be used multiple times.
.IP "-g/--globoff"
This option switches off the "URL globbing parser". When you set this option,
@@ -265,7 +334,7 @@ contents but they should be encoded according to the URI standard.
When used, this option will make all data specified with -d/--data or
--data-binary to be used in a HTTP GET request instead of the POST request
that otherwise would be used. The data will be appended to the URL with a '?'
separator. (Option added in curl 7.9)
separator.
If used in combination with -I, the POST data will instead be appended to the
URL with a HEAD request.
@@ -347,8 +416,8 @@ url = "http://curl.haxx.se/docs/"
This option can be used multiple times.
.IP "--limit-rate <speed>"
Specify the maximum transfer rate you want curl to use. This feature is useful
if you have a limited pipe and you'd prefer you have your transfer not use
your entire bandwidth.
if you have a limited pipe and you'd like your transfer not use your entire
bandwidth.
The given speed is measured in bytes/second, unless a suffix is
appended. Appending 'k' or 'K' will count the number as kilobytes, 'm' or M'
@@ -374,9 +443,18 @@ If this option is used twice, the second will again disable list only.
(HTTP/HTTPS) If the server reports that the requested page has a different
location (indicated with the header line Location:) this flag will let curl
attempt to reattempt the get on the new place. If used together with -i or -I,
headers from all requested pages will be shown. If this flag is used when
making a HTTP POST, curl will automatically switch to GET after the initial
POST has been done.
headers from all requested pages will be shown. If authentication is used,
curl will only send its credentials to the initial host, so if a redirect
takes curl to a different host, it won't intercept the user+password. See also
\fI--location-trusted\fP on how to change this.
If this option is used twice, the second will again disable location following.
.IP "--location-trusted"
(HTTP/HTTPS) Like \fI--location\fP, but will allow sending the name + password
to all hosts that the site may redirect to. This may or may not introduce a
security breach if the site redirects you do a site to which you'll send your
authentication info (which is plaintext in the case of HTTP Basic
authentication).
If this option is used twice, the second will again disable location following.
.IP "-m/--max-time <seconds>"
@@ -410,6 +488,19 @@ to allow curl to ftp to the machine host.domain.com with user name
.B "machine host.domain.com login myself password secret"
If this option is used twice, the second will again disable netrc usage.
.IP "--negotiate"
(HTTP) Enables GSS-Negotiate authentication. The GSS-Negotiate method was
designed by Microsoft and is used in their web aplications. It is primarily
meant as a support for Kerberos5 authentication but may be also used along
with another authentication methods. For more information see IETF draft
draft-brezak-spnego-http-04.txt. (Added in 7.10.6)
\fBNOTE\fP that this option requiures that the library was built with GSSAPI
support. This is not very common. Use \fIcurl --version\fP to see if your
version supports GSS-Negotiate.
If this option is used several times, the following occurrences make no
difference.
.IP "-N/--no-buffer"
Disables the buffering of the output stream. In normal work situations, curl
will use a standard buffered output stream that will have the effect that it
@@ -417,6 +508,19 @@ will output the data in chunks, not necessarily exactly when the data arrives.
Using this option will disable that buffering.
If this option is used twice, the second will again switch on buffering.
.IP "--ntlm"
(HTTP) Enables NTLM authentication. The NTLM authentication method was
designed by Microsoft and is used by IIS web servers. It is a proprietary
protocol, reversed engineered by clever people and implemented in curl based
on their efforts. This kind of behavior should not be endorsed, you should
encourage everyone who uses NTLM to switch to a public and documented
authentication method instead. Such as Digest. (Added in 7.10.6)
\fBNOTE\fP that this option requiures that the library was built with SSL
support. Use \fIcurl --version\fP to see if your version supports NTLM.
If this option is used several times, the following occurrences make no
difference.
.IP "-o/--output <file>"
Write output to <file> instead of stdout. If you are using {} or [] to fetch
multiple documents, you can use '#' followed by a number in the <file>
@@ -433,8 +537,8 @@ You may use this option as many times as you have number of URLs.
See also the --create-dirs option to create the local directories dynamically.
.IP "-O/--remote-name"
Write output to a local file named like the remote file we get. (Only
the file part of the remote file is used, the path is cut off.)
Write output to a local file named like the remote file we get. (Only the file
part of the remote file is used, the path is cut off.)
You may use this option as many times as you have number of URLs.
.IP "-p/--proxytunnel"
@@ -573,7 +677,7 @@ descriptive information, to the given output file. Use "-" as filename to have
the output sent to stdout.
If this option is used several times, the last one will be used. (Added in
curl 7.9.7)
7.9.7)
.IP "--trace-ascii <file>"
Enables a full trace dump of all incoming and outgoing data, including
descriptive information, to the given output file. Use "-" as filename to have
@@ -584,11 +688,14 @@ the ASCII part of the dump. It makes smaller output that might be easier to
read for untrained humans.
If this option is used several times, the last one will be used. (Added in
curl 7.9.7)
7.9.7)
.IP "-u/--user <user:password>"
Specify user and password to use when fetching. See README.curl for detailed
examples of how to use this. If no password is specified, curl will
ask for it interactively.
Specify user and password to use when fetching. Read the MANUAL for detailed
examples of how to use this. If no password is specified, curl will ask for it
interactively.
You can also use the --digest option to enable Digest authentication when
communicating with HTTP 1.1 servers.
If this option is used several times, the last one will be used.
.IP "-U/--proxy-user <user:password>"
@@ -614,10 +721,22 @@ info provided by curl.
Note that if you want to see HTTP headers in the output, \fI-i/--include\fP
might be option you're looking for.
If you think this option still doesn't give you enough details, consider using
\fI--trace\fP or \fI--trace-ascii\fP instead.
If this option is used twice, the second will again disable verbose.
.IP "-V/--version"
Displays the full version of curl, libcurl and other 3rd party libraries
linked with the executable.
Displays information about curl and the libcurl version it uses.
The first line includes the full version of curl, libcurl and other 3rd party
libraries linked with the executable.
The second line (starts with "Protocols:") shows all protocols that libcurl
reports to support.
The third line (starts with "Features:") shows specific features libcurl
reports to offer.
.IP "-w/--write-out <format>"
Defines what to display after a completed and successful operation. The format
is a string that may contain plain text mixed with any number of variables. The
@@ -696,7 +815,7 @@ at port 1080.
This option overrides existing environment variables that sets proxy to
use. If there's an environment variable setting a proxy, you can set proxy to
"" to override it.
\&"" to override it.
\fBNote\fP that all operations that are performed over a HTTP proxy will
transparantly be converted to HTTP. It means that certain protocol specific
@@ -834,7 +953,8 @@ FTP write error. The transfer was reported bad by the server.
.IP 21
FTP quote error. A quote command returned error from the server.
.IP 22
HTTP not found. The requested page was not found. This return code only
HTTP page not retrieved. The requested url was not found or returned another
error with the HTTP error code being 400 or above. This return code only
appears if --fail is used.
.IP 23
Write error. Curl couldn't write data to a local filesystem or similar.
@@ -917,8 +1037,6 @@ Unrecognized transfer encoding
.IP XX
There will appear more error codes here in future releases. The existing ones
are meant to never change.
.SH BUGS
If you do find bugs, mail them to curl-bug@haxx.se.
.SH AUTHORS / CONTRIBUTORS
Daniel Stenberg is the main author, but the whole list of contributors is
found in the separate THANKS file.

View File

@@ -9,7 +9,7 @@ EXTRA_DIST = README curlgtk.c sepheaders.c simple.c postit2.c \
multithread.c getinmemory.c ftpupload.c httpput.c \
simplessl.c ftpgetresp.c http-post.c post-callback.c \
multi-app.c multi-double.c multi-single.c multi-post.c \
fopen.c simplepost.c
fopen.c simplepost.c makefile.dj
all:
@echo "done"

View File

@@ -1,222 +1,565 @@
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* $Id$
* This example source code introduces a c library buffered I/O interface to
* URL reads it supports fopen(), fread(), fgets(), feof(), fclose(),
* rewind(). Supported functions have identical prototypes to their normal c
* lib namesakes and are preceaded by url_ .
*
* This example source code introduces an fopen()/fread()/fclose() emulation
* for URL reads. Using an approach similar to this, you could replace your
* program's fopen() with this url_fopen() and fread() with url_fread() and
* it should be possible to read remote streams instead of (only) local files.
* Using this code you can replace your program's fopen() with url_fopen()
* and fread() with url_fread() and it become possible to read remote streams
* instead of (only) local files. Local files (ie those that can be directly
* fopened) will drop back to using the underlying clib implementations
*
* See the main() function at the bottom that shows a tiny app in action.
* See the main() function at the bottom that shows an app that retrives from a
* specified url using fgets() and fread() and saves as two output files.
*
* This source code is a proof of concept. It will need further attention to
* become production-use useful and solid.
* Coyright (c)2003 Simtec Electronics
*
* Re-implemented by Vincent Sanders <vince@kyllikki.org> with extensive
* reference to original curl example code
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* This example requires libcurl 7.9.7 or later.
*/
#include <stdio.h>
#include <string.h>
#include <sys/time.h>
#include <stdlib.h>
#include <errno.h>
#include <curl/curl.h>
#include <curl/types.h>
#include <curl/easy.h>
struct data {
int type;
union {
CURL *curl;
FILE *file;
} handle;
#if (LIBCURL_VERSION_NUM < 0x070907)
#error "too old libcurl version, get the latest!"
#endif
/* TODO: We should perhaps document the biggest possible buffer chunk we can
get from libcurl in one single callback... */
char buffer[CURL_MAX_WRITE_SIZE];
char *readptr; /* read from here */
int bytes; /* bytes available from read pointer */
enum fcurl_type_e { CFTYPE_NONE=0, CFTYPE_FILE=1, CFTYPE_CURL=2 };
CURLMcode m; /* stored from a previous url_fread() */
struct fcurl_data
{
enum fcurl_type_e type; /* type of handle */
union {
CURL *curl;
FILE *file;
} handle; /* handle */
char *buffer; /* buffer to store cached data*/
int buffer_len; /* currently allocated buffers length */
int buffer_pos; /* end of data in buffer*/
int still_running; /* Is background url fetch still in progress */
};
typedef struct data URL_FILE;
typedef struct fcurl_data URL_FILE;
/* exported functions */
URL_FILE *url_fopen(char *url,const char *operation);
int url_fclose(URL_FILE *file);
int url_feof(URL_FILE *file);
size_t url_fread(void *ptr, size_t size, size_t nmemb, URL_FILE *file);
char * url_fgets(char *ptr, int size, URL_FILE *file);
void url_rewind(URL_FILE *file);
/* we use a global one for convenience */
CURLM *multi_handle;
static
size_t write_callback(char *buffer,
size_t size,
size_t nitems,
void *userp)
/* curl calls this routine to get more data */
static size_t
write_callback(char *buffer,
size_t size,
size_t nitems,
void *userp)
{
URL_FILE *url = (URL_FILE *)userp;
size *= nitems;
char *newbuff;
int rembuff;
memcpy(url->readptr, buffer, size);
url->readptr += size;
url->bytes += size;
URL_FILE *url = (URL_FILE *)userp;
size *= nitems;
return size;
}
rembuff=url->buffer_len - url->buffer_pos;//remaining space in buffer
URL_FILE *url_fopen(char *url, char *operation)
{
/* this code could check for URLs or types in the 'url' and
basicly use the real fopen() for standard files */
if(size > rembuff)
{
//not enuf space in buffer
newbuff=realloc(url->buffer,url->buffer_len + (size - rembuff));
if(newbuff==NULL)
{
fprintf(stderr,"callback buffer grow failed\n");
size=rembuff;
}
else
{
/* realloc suceeded increase buffer size*/
url->buffer_len+=size - rembuff;
url->buffer=newbuff;
URL_FILE *file;
int still_running;
file = (URL_FILE *)malloc(sizeof(URL_FILE));
if(!file)
return NULL;
memset(file, 0, sizeof(URL_FILE));
file->type = 1; /* marked as URL, use 0 for plain file */
file->handle.curl = curl_easy_init();
curl_easy_setopt(file->handle.curl, CURLOPT_URL, url);
curl_easy_setopt(file->handle.curl, CURLOPT_FILE, file);
curl_easy_setopt(file->handle.curl, CURLOPT_VERBOSE, FALSE);
curl_easy_setopt(file->handle.curl, CURLOPT_WRITEFUNCTION, write_callback);
if(!multi_handle)
multi_handle = curl_multi_init();
curl_multi_add_handle(multi_handle, file->handle.curl);
while(CURLM_CALL_MULTI_PERFORM ==
curl_multi_perform(multi_handle, &still_running));
/* if still_running would be 0 now, we should return NULL */
return file;
}
void url_fclose(URL_FILE *file)
{
/* make sure the easy handle is not in the multi handle anymore */
curl_multi_remove_handle(multi_handle, file->handle.curl);
/* cleanup */
curl_easy_cleanup(file->handle.curl);
}
size_t url_fread(void *ptr, size_t size, size_t nmemb, URL_FILE *file)
{
fd_set fdread;
fd_set fdwrite;
fd_set fdexcep;
int maxfd;
struct timeval timeout;
int rc;
int still_running = 0;
if(!file->bytes) { /* no data available at this point */
file->readptr = file->buffer; /* reset read pointer */
if(CURLM_CALL_MULTI_PERFORM == file->m) {
while(CURLM_CALL_MULTI_PERFORM ==
curl_multi_perform(multi_handle, &still_running)) {
if(file->bytes) {
printf("(fread) WOAH! THis happened!\n");
break;
}
}
if(!still_running) {
printf("NO MORE RUNNING AROUND!\n");
return 0;
}
/*printf("Callback buffer grown to %d bytes\n",url->buffer_len);*/
}
}
FD_ZERO(&fdread);
FD_ZERO(&fdwrite);
FD_ZERO(&fdexcep);
/* set a suitable timeout to fail on */
timeout.tv_sec = 500; /* 5 minutes */
timeout.tv_usec = 0;
memcpy(&url->buffer[url->buffer_pos], buffer, size);
url->buffer_pos += size;
/* get file descriptors from the transfers */
curl_multi_fdset(multi_handle, &fdread, &fdwrite, &fdexcep, &maxfd);
/*fprintf(stderr, "callback %d size bytes\n", size);*/
rc = select(maxfd+1, &fdread, &fdwrite, &fdexcep, &timeout);
return size;
}
switch(rc) {
case -1:
/* select error */
break;
case 0:
break;
default:
/* timeout or readable/writable sockets */
do {
file->m = curl_multi_perform(multi_handle, &still_running);
/* use to attempt to fill the read buffer up to requested number of bytes */
static int
curl_fill_buffer(URL_FILE *file,int want,int waittime)
{
fd_set fdread;
fd_set fdwrite;
fd_set fdexcep;
int maxfd;
struct timeval timeout;
int rc;
if(file->bytes)
/* we have received data, return that now */
break;
/* only attempt to fill buffer if transactions still running and buffer
* doesnt exceed required size already
*/
if((!file->still_running) || (file->buffer_pos > want))
return 0;
} while(CURLM_CALL_MULTI_PERFORM == file->m);
/* attempt to fill buffer */
do
{
FD_ZERO(&fdread);
FD_ZERO(&fdwrite);
FD_ZERO(&fdexcep);
/* set a suitable timeout to fail on */
timeout.tv_sec = 60; /* 1 minute */
timeout.tv_usec = 0;
if(!still_running)
printf("NO MORE RUNNING AROUND!\n");
/* get file descriptors from the transfers */
curl_multi_fdset(multi_handle, &fdread, &fdwrite, &fdexcep, &maxfd);
break;
rc = select(maxfd+1, &fdread, &fdwrite, &fdexcep, &timeout);
switch(rc) {
case -1:
/* select error */
break;
case 0:
break;
default:
/* timeout or readable/writable sockets */
/* note we *could* be more efficient and not wait for
* CURLM_CALL_MULTI_PERFORM to clear here and check it on re-entry
* but that gets messy */
while(curl_multi_perform(multi_handle, &file->still_running) ==
CURLM_CALL_MULTI_PERFORM);
break;
}
} while(file->still_running && (file->buffer_pos < want));
return 1;
}
/* use to remove want bytes from the front of a files buffer */
static int
curl_use_buffer(URL_FILE *file,int want)
{
/* sort out buffer */
if((file->buffer_pos - want) <=0)
{
/* ditch buffer - write will recreate */
if(file->buffer)
free(file->buffer);
file->buffer=NULL;
file->buffer_pos=0;
file->buffer_len=0;
}
}
else
printf("(fread) Skip network read\n");
else
{
/* move rest down make it available for later */
memmove(file->buffer,
&file->buffer[want],
(file->buffer_pos - want));
if(file->bytes) {
/* data already available, return that */
int want = size * nmemb;
file->buffer_pos -= want;
}
return 0;
}
if(file->bytes < want)
want = file->bytes;
memcpy(ptr, file->readptr, want);
file->readptr += want;
file->bytes -= want;
printf("(fread) return %d bytes\n", want);
URL_FILE *
url_fopen(char *url,const char *operation)
{
/* this code could check for URLs or types in the 'url' and
basicly use the real fopen() for standard files */
URL_FILE *file;
(void)operation;
file = (URL_FILE *)malloc(sizeof(URL_FILE));
if(!file)
return NULL;
memset(file, 0, sizeof(URL_FILE));
if((file->handle.file=fopen(url,operation)))
{
file->type = CFTYPE_FILE; /* marked as URL */
}
else
{
file->type = CFTYPE_CURL; /* marked as URL */
file->handle.curl = curl_easy_init();
curl_easy_setopt(file->handle.curl, CURLOPT_URL, url);
curl_easy_setopt(file->handle.curl, CURLOPT_FILE, file);
curl_easy_setopt(file->handle.curl, CURLOPT_VERBOSE, FALSE);
curl_easy_setopt(file->handle.curl, CURLOPT_WRITEFUNCTION, write_callback);
if(!multi_handle)
multi_handle = curl_multi_init();
curl_multi_add_handle(multi_handle, file->handle.curl);
/* lets start the fetch */
while(curl_multi_perform(multi_handle, &file->still_running) ==
CURLM_CALL_MULTI_PERFORM );
if((file->buffer_pos == 0) && (!file->still_running))
{
/* if still_running is 0 now, we should return NULL */
/* make sure the easy handle is not in the multi handle anymore */
curl_multi_remove_handle(multi_handle, file->handle.curl);
/* cleanup */
curl_easy_cleanup(file->handle.curl);
free(file);
file = NULL;
}
}
return file;
}
int
url_fclose(URL_FILE *file)
{
int ret=0;/* default is good return */
switch(file->type)
{
case CFTYPE_FILE:
ret=fclose(file->handle.file); /* passthrough */
break;
case CFTYPE_CURL:
/* make sure the easy handle is not in the multi handle anymore */
curl_multi_remove_handle(multi_handle, file->handle.curl);
/* cleanup */
curl_easy_cleanup(file->handle.curl);
break;
default: /* unknown or supported type - oh dear */
ret=EOF;
errno=EBADF;
break;
}
if(file->buffer)
free(file->buffer);/* free any allocated buffer space */
free(file);
return ret;
}
int
url_feof(URL_FILE *file)
{
int ret=0;
switch(file->type)
{
case CFTYPE_FILE:
ret=feof(file->handle.file);
break;
case CFTYPE_CURL:
if((file->buffer_pos == 0) && (!file->still_running))
ret = 1;
break;
default: /* unknown or supported type - oh dear */
ret=-1;
errno=EBADF;
break;
}
return ret;
}
size_t
url_fread(void *ptr, size_t size, size_t nmemb, URL_FILE *file)
{
size_t want;
switch(file->type)
{
case CFTYPE_FILE:
want=fread(ptr,size,nmemb,file->handle.file);
break;
case CFTYPE_CURL:
want = nmemb * size;
curl_fill_buffer(file,want,1);
/* check if theres data in the buffer - if not curl_fill_buffer()
* either errored or EOF */
if(!file->buffer_pos)
return 0;
/* ensure only available data is considered */
if(file->buffer_pos < want)
want = file->buffer_pos;
/* xfer data to caller */
memcpy(ptr, file->buffer, want);
curl_use_buffer(file,want);
want = want / size; /* number of items - nb correct op - checked
* with glibc code*/
/*printf("(fread) return %d bytes %d left\n", want,file->buffer_pos);*/
break;
default: /* unknown or supported type - oh dear */
want=0;
errno=EBADF;
break;
}
return want;
}
return 0; /* no data available to return */
}
int main(int argc, char *argv[])
char *
url_fgets(char *ptr, int size, URL_FILE *file)
{
URL_FILE *handle;
int nread;
char buffer[256];
int want = size - 1;/* always need to leave room for zero termination */
int loop;
handle = url_fopen("http://www.haxx.se", "r");
switch(file->type)
{
case CFTYPE_FILE:
ptr = fgets(ptr,size,file->handle.file);
break;
if(!handle) {
printf("couldn't url_fopen()\n");
}
case CFTYPE_CURL:
curl_fill_buffer(file,want,1);
do {
nread = url_fread(buffer, sizeof(buffer), 1, handle);
/* check if theres data in the buffer - if not fill either errored or
* EOF */
if(!file->buffer_pos)
return NULL;
printf("We got: %d bytes\n", nread);
} while(nread);
/* ensure only available data is considered */
if(file->buffer_pos < want)
want = file->buffer_pos;
url_fclose(handle);
/*buffer contains data */
/* look for newline or eof */
for(loop=0;loop < want;loop++)
{
if(file->buffer[loop] == '\n')
{
want=loop+1;/* include newline */
break;
}
}
return 0;
/* xfer data to caller */
memcpy(ptr, file->buffer, want);
ptr[want]=0;/* allways null terminate */
curl_use_buffer(file,want);
/*printf("(fgets) return %d bytes %d left\n", want,file->buffer_pos);*/
break;
default: /* unknown or supported type - oh dear */
ptr=NULL;
errno=EBADF;
break;
}
return ptr;/*success */
}
void
url_rewind(URL_FILE *file)
{
switch(file->type)
{
case CFTYPE_FILE:
rewind(file->handle.file); /* passthrough */
break;
case CFTYPE_CURL:
/* halt transaction */
curl_multi_remove_handle(multi_handle, file->handle.curl);
/* restart */
curl_multi_add_handle(multi_handle, file->handle.curl);
/* ditch buffer - write will recreate - resets stream pos*/
if(file->buffer)
free(file->buffer);
file->buffer=NULL;
file->buffer_pos=0;
file->buffer_len=0;
break;
default: /* unknown or supported type - oh dear */
break;
}
}
/* Small main program to retrive from a url using fgets and fread saving the
* output to two test files (note the fgets method will corrupt binary files if
* they contain 0 chars */
int
main(int argc, char *argv[])
{
URL_FILE *handle;
FILE *outf;
int nread;
char buffer[256];
char *url;
if(argc < 2)
{
url="http://192.168.7.3/testfile";/* default to testurl */
}
else
{
url=argv[1];/* use passed url */
}
/* copy from url line by line with fgets */
outf=fopen("fgets.test","w+");
if(!outf)
{
perror("couldnt open fgets output file\n");
return 1;
}
handle = url_fopen(url, "r");
if(!handle)
{
printf("couldn't url_fopen()\n");
fclose(outf);
return 2;
}
while(!url_feof(handle))
{
url_fgets(buffer,sizeof(buffer),handle);
fwrite(buffer,1,strlen(buffer),outf);
}
url_fclose(handle);
fclose(outf);
/* Copy from url with fread */
outf=fopen("fread.test","w+");
if(!outf)
{
perror("couldnt open fread output file\n");
return 1;
}
handle = url_fopen("testfile", "r");
if(!handle) {
printf("couldn't url_fopen()\n");
fclose(outf);
return 2;
}
do {
nread = url_fread(buffer, 1,sizeof(buffer), handle);
fwrite(buffer,1,nread,outf);
} while(nread);
url_fclose(handle);
fclose(outf);
/* Test rewind */
outf=fopen("rewind.test","w+");
if(!outf)
{
perror("couldnt open fread output file\n");
return 1;
}
handle = url_fopen("testfile", "r");
if(!handle) {
printf("couldn't url_fopen()\n");
fclose(outf);
return 2;
}
nread = url_fread(buffer, 1,sizeof(buffer), handle);
fwrite(buffer,1,nread,outf);
url_rewind(handle);
buffer[0]='\n';
fwrite(buffer,1,1,outf);
nread = url_fread(buffer, 1,sizeof(buffer), handle);
fwrite(buffer,1,nread,outf);
url_fclose(handle);
fclose(outf);
return 0;/* all done */
}

View File

@@ -11,6 +11,9 @@
#include <stdio.h>
#include <curl/curl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
/*
* This example shows an FTP upload, with a rename of the file just after

View File

@@ -74,6 +74,10 @@ int main(int argc, char **argv)
* bytes big and contains the remote file.
*
* Do something nice with it!
*
* You should be aware of the fact that at this point we might have an
* allocated data block, and nothing has yet deallocated that data. So when
* you're done with it, you should free() it as a nice application.
*/
return 0;

31
docs/examples/makefile.dj Normal file
View File

@@ -0,0 +1,31 @@
#
# Adapted for djgpp / Watt-32 / DOS by
# Gisle Vanem <giva@bgnett.no>
#
include ../../packages/DOS/common.dj
CFLAGS += -I../../include
LIBS = ../../lib/libcurl.a
ifeq ($(USE_SSL),1)
LIBS += $(OPENSSL_ROOT)/lib/libssl.a $(OPENSSL_ROOT)/lib/libcrypt.a
endif
LIBS += $(WATT32_ROOT)/lib/libwatt.a $(ZLIB_ROOT)/libz.a
PROGRAMS = fopen.exe ftpget.exe ftpgetre.exe ftpuploa.exe getinmem.exe \
http-pos.exe httpput.exe multi-ap.exe multi-do.exe \
multi-po.exe multi-si.exe persista.exe post-cal.exe \
postit2.exe sepheade.exe simple.exe simpless.exe
all: $(PROGRAMS)
.c.exe:
$(CC) $(CFLAGS) -o $@ $^ $(LIBS)
@echo
clean:
rm -f $(PROGRAMS)

View File

@@ -21,7 +21,7 @@
size_t write_data(void *ptr, size_t size, size_t nmemb, void *stream)
{
written = fwrite(ptr, size, nmemb, (FILE *)stream);
int written = fwrite(ptr, size, nmemb, (FILE *)stream);
return written;
}

View File

@@ -40,7 +40,7 @@ int main(int argc, char **argv)
FILE *headerfile;
const char *pCertFile = "testcert.pem";
const char *pCACertFile="cacert.pem"
const char *pCACertFile="cacert.pem";
const char *pKeyName;
const char *pKeyType;

View File

@@ -28,7 +28,6 @@ man_MANS = \
curl_mprintf.3 \
curl_global_init.3 \
curl_global_cleanup.3 \
libcurl.3 \
curl_multi_add_handle.3 \
curl_multi_cleanup.3 \
curl_multi_fdset.3 \
@@ -36,7 +35,11 @@ man_MANS = \
curl_multi_init.3 \
curl_multi_perform.3 \
curl_multi_remove_handle.3 \
curl_share_cleanup.3 curl_share_init.3 curl_share_setopt.3 \
libcurl.3 \
libcurl-easy.3 \
libcurl-multi.3 \
libcurl-share.3 \
libcurl-errors.3
HTMLPAGES = \
@@ -63,7 +66,6 @@ HTMLPAGES = \
curl_mprintf.html \
curl_global_init.html \
curl_global_cleanup.html \
libcurl.html \
curl_multi_add_handle.html \
curl_multi_cleanup.html \
curl_multi_fdset.html \
@@ -71,9 +73,12 @@ HTMLPAGES = \
curl_multi_init.html \
curl_multi_perform.html \
curl_multi_remove_handle.html \
curl_share_cleanup.html curl_share_init.html curl_share_setopt.html \
libcurl.html \
libcurl-multi.html \
libcurl-errors.html \
index.html
libcurl-easy.html \
libcurl-share.html \
libcurl-errors.html
PDFPAGES = \
curl_easy_cleanup.pdf \
@@ -99,7 +104,6 @@ PDFPAGES = \
curl_mprintf.pdf \
curl_global_init.pdf \
curl_global_cleanup.pdf \
libcurl.pdf \
curl_multi_add_handle.pdf \
curl_multi_cleanup.pdf \
curl_multi_fdset.pdf \
@@ -107,26 +111,31 @@ PDFPAGES = \
curl_multi_init.pdf \
curl_multi_perform.pdf \
curl_multi_remove_handle.pdf \
curl_share_cleanup.pdf curl_share_init.pdf curl_share_setopt.pdf \
libcurl.pdf \
libcurl-multi.pdf \
libcurl-easy.pdf \
libcurl-share.pdf \
libcurl-errors.pdf
EXTRA_DIST = $(man_MANS) $(HTMLPAGES) $(PDFPAGES)
CLEANFILES = $(HTMLPAGES) $(PDFPAGES)
EXTRA_DIST = $(man_MANS) $(HTMLPAGES) index.html $(PDFPAGES)
MAN2HTML= gnroff -man $< | man2html >$@
SUFFIXES = .1 .3 .html
SUFFIXES = .3 .html
html: $(HTMLPAGES)
.3.html:
$(MAN2HTML)
.1.html:
$(MAN2HTML)
pdf: $(PDFPAGES)
pdf:
for file in $(man_MANS); do \
foo=`echo $$file | sed -e 's/\.[0-9]$$//g'`; \
groff -Tps -man $$file >$$foo.ps; \
ps2pdf $$foo.ps $$foo.pdf; \
done
.3.pdf:
@(foo=`echo $@ | sed -e 's/\.[0-9]$$//g'`; \
groff -Tps -man $< >$$foo.ps; \
ps2pdf $$foo.ps $@; \
rm $$foo.ps; \
echo "converted $< to $@")

View File

@@ -2,7 +2,7 @@
.\" nroff -man [file]
.\" $Id$
.\"
.TH curl_easy_init 3 "25 Apr 2002" "libcurl 7.9.7" "libcurl Manual"
.TH curl_easy_getinfo 3 "25 Apr 2002" "libcurl 7.9.7" "libcurl Manual"
.SH NAME
curl_easy_getinfo - Extract information from a curl session (added in 7.4)
.SH SYNOPSIS
@@ -78,7 +78,8 @@ uploaded.
.TP
.B CURLINFO_SIZE_DOWNLOAD
Pass a pointer to a double to receive the total amount of bytes that were
downloaded.
downloaded. The amount is only for the latest transfer and will be reset again
for each new transfer.
.TP
.B CURLINFO_SPEED_DOWNLOAD
Pass a pointer to a double to receive the average download speed that curl

View File

@@ -1,7 +1,7 @@
.\" nroff -man [file]
.\" $Id$
.\"
.TH curl_easy_setopt 3 "3 Dec 2002" "libcurl 7.10.3" "libcurl Manual"
.TH curl_easy_setopt 3 "8 Aug 2003" "libcurl 7.10.7" "libcurl Manual"
.SH NAME
curl_easy_setopt - set options for a curl easy handle
.SH SYNOPSIS
@@ -75,7 +75,7 @@ of bytes actually taken care of. If that amount differs from the amount passed
to your function, it'll signal an error to the library and it will abort the
transfer and return \fICURLE_WRITE_ERROR\fP.
Set the \fIstream\fP argument with the \fBCURLOPT_FILE\fP option.
Set the \fIstream\fP argument with the \fBCURLOPT_WRITEDATA\fP option.
\fBNOTE:\fP you will be passed as much data as possible in all invokes, but
you cannot possibly make any assumptions. It may be one byte, it may be
@@ -143,12 +143,11 @@ operation and an error (CURLE_BAD_PASSWORD_ENTERED) will be returned.
is a zero-terminated string that is text that prefixes the input request.
\fIbuffer\fP is a pointer to data where the entered password should be stored
and \fIbuflen\fP is the maximum number of bytes that may be written in the
buffer. (Added in 7.4.2)
buffer.
.TP
.B CURLOPT_PASSWDDATA
Pass a void * to whatever data you want. The passed pointer will be the first
argument sent to the specifed \fICURLOPT_PASSWDFUNCTION\fP function. (Added in
7.4.2)
argument sent to the specifed \fICURLOPT_PASSWDFUNCTION\fP function.
.TP
.B CURLOPT_HEADERFUNCTION
Function pointer that should match the following prototype: \fIsize_t
@@ -161,7 +160,7 @@ multiplied with \fInmemb\fP. The pointer named \fIstream\fP will be the one
you passed to libcurl with the \fICURLOPT_WRITEHEADER\fP option. Return the
number of bytes actually written or return -1 to signal error to the library
(it will cause it to abort the transfer with a \fICURLE_WRITE_ERROR\fP return
code). (Added in 7.7.2)
code).
.TP
.B CURLOPT_WRITEHEADER
Pass a pointer to be used to write the header part of the received data to. If
@@ -248,23 +247,23 @@ default. (Added in 7.10)
Set the parameter to non-zero to get the library to tunnel all operations
through a given HTTP proxy. Note that there is a big difference between using
a proxy and to tunnel through it. If you don't know what this means, you
probably don't want this tunneling option. (Added in 7.3)
probably don't want this tunneling option.
.TP
.B CURLOPT_INTERFACE
Pass a char * as parameter. This set the interface name to use as outgoing
network interface. The name can be an interface name, an IP address or a host
name. (Added in 7.3)
name.
.TP
.B CURLOPT_DNS_CACHE_TIMEOUT
Pass a long, this sets the timeout in seconds. Name resolves will be kept in
memory for this number of seconds. Set to zero (0) to completely disable
caching, or set to -1 to make the cached entries remain forever. By default,
libcurl caches info for 60 seconds. (Added in 7.9.3)
libcurl caches info for 60 seconds.
.TP
.B CURLOPT_DNS_USE_GLOBAL_CACHE
Pass a long. If the value is non-zero, it tells curl to use a global DNS cache
that will survive between easy handle creations and deletions. This is not
thread-safe and this will use a global varible. (Added in 7.9.3)
thread-safe and this will use a global varible.
.TP
.B CURLOPT_BUFFERSIZE
Pass a long specifying your prefered size for the receive buffer in libcurl.
@@ -272,7 +271,7 @@ The main point of this would be that the write callback gets called more often
and with smaller chunks. This is just treated as a request, not an order. You
cannot be guaranteed to actually get the given size. (Added in 7.10)
.PP
.SH NAMES and PASSWORDS OPTIONS
.SH NAMES and PASSWORDS OPTIONS (Authentication)
.TP 0.4i
.B CURLOPT_NETRC
This parameter controls the preference of libcurl between using user names and
@@ -315,22 +314,94 @@ user.
.TP
.B CURLOPT_USERPWD
Pass a char * as parameter, which should be [user name]:[password] to use for
the connection. If the password is left out, you will be prompted for it.
\fICURLOPT_PASSWDFUNCTION\fP can be used to set your own prompt function.
the connection. If both the colon and password is left out, you will be
prompted for it while using a colon with no password will make libcurl use an
empty password. \fICURLOPT_PASSWDFUNCTION\fP can be used to set your own
prompt function.
When using HTTP and CURLOPT_FOLLOWLOCATION, libcurl might perform several
requests to possibly different hosts. libcurl will only send this user and
password information to hosts using the initial host name (unless
CURLOPT_UNRESTRICTED_AUTH is set), so if libcurl follows locations to other
hosts it will not send the user and password to those. This is enforced to
prevent accidental information leakage.
.TP
.B CURLOPT_PROXYUSERPWD
Pass a char * as parameter, which should be [user name]:[password] to use for
the connection to the HTTP proxy. If the password is left out, you will be
prompted for it. \fICURLOPT_PASSWDFUNCTION\fP can be used to set your own
prompt function.
.TP
.B CURLOPT_HTTPAUTH
Pass a long as parameter, which is set to a bitmask, to tell libcurl what
authentication method(s) you want it to use. The available bits are listed
below. If more than one bit is set, libcurl will first query the site to see
what authentication methods it supports and then pick the best one you allow
it to use. Note that for some methods, this will induce an extra network
round-trip. Set the actual name and password with the \fICURLOPT_USERPWD\fP
option. (Added in 7.10.6)
.RS
.TP 5
.B CURLAUTH_BASIC
HTTP Basic authentication. This is the default choice, and the only method
that is in wide-spread use and supported virtually everywhere. This is sending
the user name and password over the network in plain text, easily captured by
others.
.TP
.B CURLAUTH_DIGEST
HTTP Digest authentication. Digest authentication is defined in RFC2617 and
is a more secure way to do authentication over public networks than the
regular old-fashioned Basic method.
.TP
.B CURLAUTH_GSSNEGOTIATE
HTTP GSS-Negotiate authentication. The GSS-Negotiate method was designed by
Microsoft and is used in their web aplications. It is primarily meant as a
support for Kerberos5 authentication but may be also used along with another
authentication methods. For more information see IETF draft
draft-brezak-spnego-http-04.txt.
.TP
.B CURLAUTH_NTLM
HTTP NTLM authentication. A proprietary protocol invented and used by
Microsoft. It uses a challenge-response and hash concept similar to Digest to
prevent the password from being evesdropped.
.TP
.B CURLAUTH_ANY
This is a convenience macro that sets all bits and thus makes libcurl pick any
it finds suitable. libcurl will automaticly select the one it finds most
secure.
.TP
.B CURLAUTH_ANYSAFE
This is a convenience macro that sets all bits except Basic and thus makes
libcurl pick any it finds suitable. libcurl will automaticly select the one it
finds most secure.
.RE
.TP
.B CURLOPT_PROXYAUTH
Pass a long as parameter, which is set to a bitmask, to tell libcurl what
authentication method(s) you want it to use for your proxy authentication. If
more than one bit is set, libcurl will first query the site to see what
authentication methods it supports and then pick the best one you allow it to
use. Note that for some methods, this will induce an extra network
round-trip. Set the actual name and password with the
\fICURLOPT_PROXYUSERPWD\fP option. The bitmask can be constructed by or'ing
together the bits listed above for the \fICURLOPT_HTTPAUTH\fP option. As of
this writing, only Basic and NTLM work. (Added in 7.10.7)
.PP
.SH HTTP OPTIONS
.TP 0.4i
.B CURLOPT_ENCODING
Two encodings are supported \fIdentity\fP, which does nothing, and
\fIdeflate\fP to request the server to compress its reponse using the
zlib algorithm. This is not an order, the server may or may not do it.
See the special file lib/README.encoding for details.
Sets the contents of the Accept-Encoding: header sent in an HTTP
request, and enables decoding of a response when a Content-Encoding:
header is received. Three encodings are supported: \fIidentity\fP,
which does nothing, \fIdeflate\fP which requests the server to
compress its response using the zlib algorithm, and \fIgzip\fP which
requests the gzip algorithm. If a zero-length string is set, then an
Accept-Encoding: header containing all supported encodings is sent.
This is a request, not an order; the server may or may not do it. This
option must be set (to any non-NULL value) or else any unsolicited
encoding done by the server is ignored. See the special file
lib/README.encoding for details.
.TP
.B CURLOPT_FOLLOWLOCATION
A non-zero parameter tells the library to follow any Location: header that the
@@ -341,11 +412,16 @@ new location and follow new Location: headers all the way until no more such
headers are returned. \fICURLOPT_MAXREDIRS\fP can be used to limit the number
of redirects libcurl will follow.
.TP
.B CURLOPT_UNRESTRICTED_AUTH
A non-zero parameter tells the library it can continue to send authentication
(user+password) when following locations, even when hostname changed. Note
that this is meaningful only when setting \fICURLOPT_FOLLOWLOCATION\fP.
.TP
.B CURLOPT_MAXREDIRS
Pass a long. The set number will be the redirection limit. If that many
redirections have been followed, the next redirect will cause an error
(\fICURLE_TOO_MANY_REDIRECTS\fP). This option only makes sense if the
\fICURLOPT_FOLLOWLOCATION\fP is used at the same time. (Added in 7.5)
\fICURLOPT_FOLLOWLOCATION\fP is used at the same time.
.TP
.B CURLOPT_PUT
A non-zero parameter tells the library to use HTTP PUT to transfer data. The
@@ -361,9 +437,14 @@ will imply this option.
.TP
.B CURLOPT_POSTFIELDS
Pass a char * as parameter, which should be the full data to post in a HTTP
post operation. This is a normal application/x-www-form-urlencoded kind, which
is the most commonly used one by HTML forms. See also the CURLOPT_POST. Since
7.8, using CURLOPT_POSTFIELDS implies CURLOPT_POST.
post operation. You need to make sure that the data is formatted the way you
want the server to receive it. libcurl will not convert or encode it for
you. Most web servers will assume this data to be url-encoded. Take note.
This POST is a normal application/x-www-form-urlencoded kind (and libcurl will
set that Content-Type by default when this option is used), which is the most
commonly used one by HTML forms. See also the CURLOPT_POST. Using
CURLOPT_POSTFIELDS implies CURLOPT_POST.
\fBNote:\fP to make multipart/formdata posts (aka rfc1867-posts), check out
the \fICURLOPT_HTTPPOST\fP option.
@@ -372,8 +453,7 @@ the \fICURLOPT_HTTPPOST\fP option.
If you want to post data to the server without letting libcurl do a strlen()
to measure the data size, this option must be used. When this option is used
you can post fully binary data, which otherwise is likely to fail. If this
size is set to zero, the library will use strlen() to get the size. (Added in
libcurl 7.2)
size is set to zero, the library will use strlen() to get the size.
.TP
.B CURLOPT_HTTPPOST
Tells libcurl you want a multipart/formdata HTTP POST to be made and you
@@ -411,6 +491,10 @@ curl adds CRLF after each header item. Failure to comply with this will
result in strange bugs because the server will most likely ignore part
of the headers you specified.
The first line in a request (usually containing a GET or POST) is not a header
and cannot be replaced using this option. Only the lines following the
request-line are headers.
\fBNOTE:\fPThe most commonly replaced headers have "shortcuts" in the options
CURLOPT_COOKIE, CURLOPT_USERAGENT and CURLOPT_REFERER.
.TP
@@ -459,7 +543,14 @@ internally known cookies to the specified file when \fIcurl_easy_cleanup(3)\fP
is called. If no cookies are known, no file will be created. Specify "-" to
instead have the cookies written to stdout. Using this option also enables
cookies for this session, so if you for example follow a location it will make
matching cookies get sent accordingly. (Added in 7.9)
matching cookies get sent accordingly.
.B NOTE
If the cookie jar file can't be created or written to (when the
curl_easy_cleanup() is called), libcurl will not and cannot report an error
for this. Using CURLOPT_VERBOSE or CURLOPT_DEBUGFUNCTION will get a warning to
display, but that is the only visible feedback you get about this possibly
lethal situation.
.TP
.B CURLOPT_TIMECONDITION
Pass a long as parameter. This defines how the CURLOPT_TIMEVALUE time value is
@@ -474,7 +565,7 @@ CURLOPT_TIMECONDITION.
.B CURLOPT_HTTPGET
Pass a long. If the long is non-zero, this forces the HTTP request to get back
to GET. Only really usable if POST, PUT or a custom request have been used
previously using the same curl handle. (Added in 7.8.1)
previously using the same curl handle.
.TP
.B CURLOPT_HTTP_VERSION
Pass a long, set to one of the values described below. They force libcurl to
@@ -502,6 +593,9 @@ tells the remote server to connect to our specified IP address. The string may
be a plain IP address, a host name, an network interface name (under Unix) or
just a '-' letter to let the library use your systems default IP
address. Default FTP operations are passive, and thus won't use PORT.
You disable PORT again and go back to using the passive version by setting
this option to NULL.
.TP
.B CURLOPT_QUOTE
Pass a pointer to a linked list of FTP commands to pass to the server prior to
@@ -539,11 +633,23 @@ and symbolic links.
A non-zero parameter tells the library to append to the remote file instead of
overwrite it. This is only useful when uploading to a ftp site.
.TP
.B CURLOPT_FTP_USE_EPRT
Pass a long. If the value is non-zero, it tells curl to use the EPRT (and
LPRT) command when doing active FTP downloads (which is enabled by
CURLOPT_FTPPORT). Using EPRT means that it will first attempt to use EPRT and
then LPRT before using PORT, but if you pass FALSE (zero) to this option, it
will not try using EPRT or LPRT, only plain PORT. (Added in 7.10.5)
.TP
.B CURLOPT_FTP_USE_EPSV
Pass a long. If the value is non-zero, it tells curl to use the EPSV command
when doing passive FTP downloads (which it always does by default). Using EPSV
means that it will first attempt to use EPSV before using PASV, but if you
pass FALSE (zero) to this option, it will not try using EPSV, only plain PASV.
.TP
.B CURLOPT_FTP_CREATE_MISSING_DIRS
Pass a long. If the value is non-zero, curl will attempt to create any remote
directory that it fails to CWD into. CWD is the command that changes working
directory. (Added in 7.10.7)
.PP
.SH PROTOCOL OPTIONS
.TP 0.4i
@@ -572,17 +678,25 @@ want the transfer to start from.
.TP
.B CURLOPT_CUSTOMREQUEST
Pass a pointer to a zero terminated string as parameter. It will be user
instead of GET or HEAD when doing the HTTP request. This is useful for doing
DELETE or other more or less obscure HTTP requests. Don't do this at will,
make sure your server supports the command first.
instead of GET or HEAD when doing a HTTP request, or instead of LIST or NLST
when doing an ftp directory listing. This is useful for doing DELETE or other
more or less obscure HTTP requests. Don't do this at will, make sure your
server supports the command first.
NOTE: many people have wrongly used this option to replace the entire request
with their own, including multiple headers and POST contents. While that might
work in many cases, it will cause libcurl to send invalid requests and it
could possibly confuse the remote server badly. Use \fICURLOPT_POST\fP and
\fICURLOPT_POSTFIELDS\fP to set POST data. Use \fICURLOPT_HTTPHEADER\fP to
replace or extend the set of headers sent by libcurl. Use
\fICURLOPT_HTTP_VERSION\fP to change HTTP version.
.TP
.B CURLOPT_FILETIME
Pass a long. If it is a non-zero value, libcurl will attempt to get the
modification date of the remote document in this operation. This requires that
the remote server sends the time or replies to a time querying command. The
\fIcurl_easy_getinfo(3)\fP function with the \fICURLINFO_FILETIME\fP argument
can be used after a transfer to extract the received time (if any). (Added in
7.5)
can be used after a transfer to extract the received time (if any).
.TP
.B CURLOPT_NOBODY
A non-zero parameter tells the library to not include the body-part in the
@@ -633,7 +747,7 @@ open connections to increase.
\fBNOTE:\fP if you already have performed transfers with this curl handle,
setting a smaller MAXCONNECTS than before may cause open connections to get
closed unnecessarily. (Added in 7.7)
closed unnecessarily.
.TP
.B CURLOPT_CLOSEPOLICY
Pass a long. This option sets what policy libcurl should use when the
@@ -644,7 +758,7 @@ the connection that was least recently used, that connection is also least
likely to be capable of re-use. Use \fICURLCLOSEPOLICY_OLDEST\fP to make
libcurl close the oldest connection, the one that was created first among the
ones in the connection cache. The other close policies are not support
yet. (Added in 7.7)
yet.
.TP
.B CURLOPT_FRESH_CONNECT
Pass a long. Set to non-zero to make the next transfer use a new (fresh)
@@ -652,7 +766,7 @@ connection by force. If the connection cache is full before this connection,
one of the existing connections will be closed as according to the selected or
default policy. This option should be used with caution and only if you
understand what it does. Set this to 0 to have libcurl attempt re-using an
existing connection (default behavior). (Added in 7.7)
existing connection (default behavior).
.TP
.B CURLOPT_FORBID_REUSE
Pass a long. Set to non-zero to make the next transfer explicitly close the
@@ -660,7 +774,7 @@ connection when done. Normally, libcurl keep all connections alive when done
with one transfer in case there comes a succeeding one that can re-use them.
This option should be used with caution and only if you understand what it
does. Set to 0 to have libcurl keep the connection open for possibly later
re-use (default behavior). (Added in 7.7)
re-use (default behavior).
.TP
.B CURLOPT_CONNECTTIMEOUT
Pass a long. It should contain the maximum time in seconds that you allow the
@@ -697,12 +811,11 @@ a certificate but you need one to load your private key.
.B CURLOPT_SSLKEY
Pass a pointer to a zero terminated string as parameter. The string should be
the file name of your private key. The default format is "PEM" and can be
changed with \fICURLOPT_SSLKEYTYPE\fP. (Added in 7.9.3)
changed with \fICURLOPT_SSLKEYTYPE\fP.
.TP
.B CURLOPT_SSLKEYTYPE
Pass a pointer to a zero terminated string as parameter. The string should be
the format of your private key. Supported formats are "PEM", "DER" and "ENG".
(Added in 7.9.3)
\fBNOTE:\fPThe format "ENG" enables you to load the private key from a crypto
engine. in this case \fICURLOPT_SSLKEY\fP is used as an identifier passed to
@@ -713,19 +826,18 @@ Pass a pointer to a zero terminated string as parameter. It will be used as
the password required to use the \fICURLOPT_SSLKEY\fP private key. If the
password is not supplied, you will be prompted for
it. \fICURLOPT_PASSWDFUNCTION\fP can be used to set your own prompt function.
(Added in 7.9.3)
.TP
.B CURLOPT_SSL_ENGINE
Pass a pointer to a zero terminated string as parameter. It will be used as
the identifier for the crypto engine you want to use for your private
key. (Added in 7.9.3)
key.
\fBNOTE:\fPIf the crypto device cannot be loaded,
\fICURLE_SSL_ENGINE_NOTFOUND\fP is returned.
.TP
.B CURLOPT_SSL_ENGINEDEFAULT
Sets the actual crypto engine as the default for (asymetric) crypto
operations. (Added in 7.9.3)
operations.
\fBNOTE:\fPIf the crypto device cannot be set,
\fICURLE_SSL_ENGINE_SETFAILED\fP is returned.
@@ -739,15 +851,15 @@ servers make this difficult why you at times may have to use this option.
Pass a long that is set to a zero value to stop curl from verifying the peer's
certificate (7.10 starting setting this option to TRUE by default). Alternate
certificates to verify against can be specified with the CURLOPT_CAINFO option
(Added in 7.4.2) or a certificate directory can be specified with the
CURLOPT_CAPATH option (Added in 7.9.8). As of 7.10, curl installs a default
bundle. CURLOPT_SSL_VERIFYHOST may also need to be set to 1 or 0 if
or a certificate directory can be specified with the CURLOPT_CAPATH option
(Added in 7.9.8). As of 7.10, curl installs a default bundle.
CURLOPT_SSL_VERIFYHOST may also need to be set to 1 or 0 if
CURLOPT_SSL_VERIFYPEER is disabled (it defaults to 2).
.TP
.B CURLOPT_CAINFO
Pass a char * to a zero terminated string naming a file holding one or more
certificates to verify the peer with. This only makes sense when used in
combination with the CURLOPT_SSL_VERIFYPEER option. (Added in 7.4.2)
combination with the CURLOPT_SSL_VERIFYPEER option.
.TP
.B CURLOPT_CAPATH
Pass a char * to a zero terminated string naming a directory holding multiple
@@ -769,8 +881,7 @@ socket. It will be used to seed the random engine for SSL.
.B CURLOPT_SSL_VERIFYHOST
Pass a long. Set if we should verify the Common name from the peer certificate
in the SSL handshake, set 1 to check existence, 2 to ensure that it matches
the provided hostname. This is by default set to 2. (Added in 7.8.1, default
changed in 7.10)
the provided hostname. This is by default set to 2. (default changed in 7.10)
.TP
.B CURLOPT_SSL_CIPHER_LIST
Pass a char *, pointing to a zero terminated string holding the list of
@@ -789,7 +900,7 @@ Pass a char * as parameter. Set the krb4 security level, this also enables
krb4 awareness. This is a string, 'clear', 'safe', 'confidential' or
\&'private'. If the string is set but doesn't match one of these, 'private'
will be used. Set the string to NULL to disable kerberos4. The kerberos
support only works for FTP. (Added in 7.3)
support only works for FTP.
.PP
.SH OTHER OPTIONS
.TP 0.4i
@@ -804,7 +915,3 @@ error occurred as \fI<curl/curl.h>\fP defines. See the \fIlibcurl-errors.3\fP
man page for the full list with descriptions.
.SH "SEE ALSO"
.BR curl_easy_init "(3), " curl_easy_cleanup "(3), "
.SH BUGS
If you find any bugs, or just have questions, subscribe to one of the mailing
lists and post. We won't bite.

View File

@@ -2,16 +2,16 @@
.\" nroff -man [file]
.\" $Id:
.\"
.TH curl_free 3 "24 Sept 2002" "libcurl 7.10" "libcurl Manual"
.TH curl_free 3 "12 Aug 2003" "libcurl 7.10" "libcurl Manual"
.SH NAME
curl_free - reclaim memory that has been obtained through a libcurl call
.SH SYNOPSIS
.B #include <curl/curl.h>
.sp
.BI "void *curl_free( char *" ptr " );"
.BI "void curl_free( char *" ptr " );"
.ad
.SH DESCRIPTION
curl_free reclaims memory that has been obtained through a libcurl call.
Use curl_free() instead of free() to avoid anomalies that can result from differences in memory management between your application and libcurl.
.SH "SEE ALSO"
.I curl_unescape(), curl_free()
.I curl_unescape()

View File

@@ -1,6 +1,6 @@
.\" $Id$
.\"
.TH curl_multi_info_read 3 "1 March 2002" "libcurl 7.9.5" "libcurl Manual"
.TH curl_multi_info_read 3 "27 Feb 2002" "libcurl 7.10.3" "libcurl Manual"
.SH NAME
curl_multi_info_read - read multi stack informationals
.SH SYNOPSIS
@@ -10,15 +10,15 @@ CURLMsg *curl_multi_info_read( CURLM *multi_handle,
int *msgs_in_queue);
.ad
.SH DESCRIPTION
Ask the multi handle if there's any messages/informationals from the
individual transfers. Messages include informationals such as an error code
from the transfer or just the fact that a transfer is completed. More details
on these should be written down as well.
Ask the multi handle if there are any messages/informationals from the
individual transfers. Messages may include informationals such as an error
code from the transfer or just the fact that a transfer is completed. More
details on these should be written down as well.
Repeated calls to this function will return a new struct each time, until a
special "end of msgs" struct is returned as a signal that there is no more to
get at this point. The integer pointed to with \fImsgs_in_queue\fP will
contain the number of remaining messages after this function was called.
NULL is returned as a signal that there is no more to get at this point. The
integer pointed to with \fImsgs_in_queue\fP will contain the number of
remaining messages after this function was called.
The data the returned pointer points to will not survive calling
curl_multi_cleanup().
@@ -26,10 +26,19 @@ curl_multi_cleanup().
The 'CURLMsg' struct is very simple and only contain very basic informations.
If more involved information is wanted, the particular "easy handle" in
present in that struct and can thus be used in subsequent regular
curl_easy_getinfo() calls (or similar).
curl_easy_getinfo() calls (or similar):
struct CURLMsg {
CURLMSG msg; /* what this message means */
CURL *easy_handle; /* the handle it concerns */
union {
void *whatever; /* message-specific data */
CURLcode result; /* return code for transfer */
} data;
};
.SH "RETURN VALUE"
A pointer to a filled-in struct, or NULL if it failed or ran out of
structs. It also writes the number of messages left in the queue (after this
read) in the integer the second argument points to.
.SH "SEE ALSO"
.BR curl_multi_cleanup "(3)," curl_multi_init "(3)," curl_multi_perform "(3)"
.BR curl_multi_cleanup "(3), " curl_multi_init "(3), " curl_multi_perform "(3)"

View File

@@ -14,6 +14,9 @@ specified easy handle be removed from this multi handle's control.
When the easy handle has been removed from a multi stack, it is again
perfectly legal to invoke \fIcurl_easy_perform()\fP on this easy handle.
Removing a handle while being used, will effectively halt all transfers in
progress.
.SH RETURN VALUE
CURLMcode type, general libcurl multi interface error code.
.SH "SEE ALSO"

View File

@@ -0,0 +1,19 @@
.\" $Id$
.\"
.TH curl_share_cleanup 3 "8 Aug 2003" "libcurl 7.10.7" "libcurl Manual"
.SH NAME
curl_share_cleanup - Clean up a shared object
.SH SYNOPSIS
.B #include <curl/curl.h>
.sp
.BI "CURLSHcode curl_share_cleanup( );"
.ad
.SH DESCRIPTION
This function deletes a shared object. The share handle cannot be used anymore
when this function has been called.
.SH RETURN VALUE
If this function returns non-zero, the object was not properly deleted and it
still remains!
.SH "SEE ALSO"
.BR curl_share_init "(3), " curl_share_setopt "(3)"

View File

@@ -0,0 +1,21 @@
.\" $Id$
.\"
.TH curl_share_init 3 "8 Aug 2003" "libcurl 7.10.7" "libcurl Manual"
.SH NAME
curl_share_init - Create a shared object
.SH SYNOPSIS
.B #include <curl/curl.h>
.sp
.BI "CURLSH *curl_share_init( );"
.ad
.SH DESCRIPTION
This function returns a CURLSH handle to be used as input to all the other
share-functions, sometimes refered to as a share handle on some places in the
documentation. This init call MUST have a corresponding call to
\fIcurl_share_cleanup\fP when all operations using the share are complete.
.SH RETURN VALUE
If this function returns NULL, something went wrong and you got no share
object to use.
.SH "SEE ALSO"
.BR curl_share_cleanup "(3), " curl_share_setopt "(3)"

View File

@@ -0,0 +1,46 @@
.\" $Id$
.\"
.TH curl_share_setopt 3 "8 Aug 2003" "libcurl 7.10.7" "libcurl Manual"
.SH NAME
curl_share_setopt - Set options for a shared object
.SH SYNOPSIS
.B #include <curl/curl.h>
.sp
CURLSHcode curl_share_setopt(CURLSH *share, CURLSHoption option, parameter);
.ad
.SH DESCRIPTION
Set the \fIoption\fP to \fIparameter\fP for the given \fIshare\fP.
.SH OPTIONS
.TP 0.4i
.B CURLSHOPT_LOCKFUNC
The \fIparameter\fP must be a pointer to a function matching the following
prototype:
void lock_function(CURL *handle, curl_lock_data data, curl_lock_access access,
void *userptr);
\fIdata\fP defines what data libcurl wants to lock, and you must make sure that
only one lock is given at any time for each kind of data.
\fIaccess\fP defines what access type libcurl wants, shared or single.
\fIuserptr\fP is the pointer you set with \fICURLSHOPT_USERDAT\fP.
.TP
.B CURLSHOPT_UNLOCKFUNC
hej
.TP
.B CURLSHOPT_SHARE
hej
.TP
.B CURLSHOPT_UNSHARE
hej
.TP
.B CURLSHOPT_USERDATA
hej
.PP
.SH RETURN VALUE
If this function returns non-zero, something was wrong!
.SH "SEE ALSO"
.BR curl_share_cleanup "(3), " curl_share_init "(3)"

View File

@@ -2,7 +2,7 @@
.\" nroff -man [file]
.\" $Id$
.\"
.TH curl_slist_append 3 "5 March 2001" "libcurl 7.0" "libcurl Manual"
.TH curl_slist_append 3 "19 Jun 2003" "libcurl 7.10.4" "libcurl Manual"
.SH NAME
curl_slist_append - add a string to an slist
.SH SYNOPSIS
@@ -13,17 +13,27 @@ curl_slist_append - add a string to an slist
.ad
.SH DESCRIPTION
curl_slist_append() appends a specified string to a linked list of
strings. The existing
.I list
should be passed as the first argument while the new list is returned from
this function. The specified
.I string
has been appended when this function returns.
strings. The existing \fIlist\fP should be passed as the first argument while
the new list is returned from this function. The specified \fIstring\fP has
been appended when this function returns. curl_slist_append() copies the
string.
The list should be freed again (after usage) with \fBcurl_slist_free_all()\fP.
.SH RETURN VALUE
A null pointer is returned if anything went wrong, otherwise the new list
pointer is returned.
.SH EXAMPLE
CURL handle;
curl_slist *slist=NULL;
slist = curl_slist_append(slist, "pragma:");
curl_easy_setopt(handle, CURLOPT_HTTPHEADER, slist);
curl_easy_perform(handle);
curl_slist_free_all(slist); /* free the list again */
.SH "SEE ALSO"
.BR curl_slist_free_all "(3), "
.SH BUGS
Surely there are some, you tell me!
None.

View File

@@ -13,15 +13,7 @@ curl_version - returns the libcurl version string
.SH DESCRIPTION
Returns a human readable string with the version number of libcurl and some of
its important components (like OpenSSL version).
Note: this returns the actual running lib's version, you might have installed
a newer lib's include files in your system which may turn your LIBCURL_VERSION
#define value to differ from this result.
.SH RETURN VALUE
A pointer to a zero terminated string.
.SH "SEE ALSO"
The
.I LIBCURL_VERSION
#define in <curl/curl.h>
.SH BUGS
Surely there are some, you tell me!
.BR curl_version_info "(3)"

View File

@@ -2,7 +2,7 @@
.\" nroff -man [file]
.\" $Id$
.\"
.TH curl_version_info 3 "30 Sep 2002" "libcurl 7.10" "libcurl Manual"
.TH curl_version_info 3 "12 Aug 2003" "libcurl 7.10.7" "libcurl Manual"
.SH NAME
curl_version_info - returns run-time libcurl version info
.SH SYNOPSIS
@@ -69,6 +69,21 @@ supports SSL (HTTPS/FTPS)
.TP
.B CURL_VERSION_LIBZ
supports HTTP deflate using libz
.TP
.B CURL_VERSION_NTLM
supports HTTP NTLM (added in 7.10.6)
.TP
.B CURL_VERSION_GSSNEGOTIATE
supports HTTP GSS-Negotiate (added in 7.10.6)
.TP
.B CURL_VERSION_DEBUG
libcurl was built with extra debug capabilities built-in. This is mainly of
interest for libcurl hackers. (added in 7.10.6)
.TP
.B CURL_VERSION_ASYNCHDNS
libcurl was built with support for asynchronous name lookups, which allows
more exact timeouts (even on Windows) and less blocking when using the multi
interface. (added in 7.10.7)
.PP
\fIssl_version\fP is an ascii string for the OpenSSL version used. If libcurl
has no SSL support, this is NULL.
@@ -83,11 +98,8 @@ libcurl has no libz support, this is NULL.
names protocols that libcurl supports (using lowercase letters). The protocol
names are the same as would be used in URLs. The array is terminated by a NULL
entry.
.SH RETURN VALUE
A pointer to a curl_version_info_data struct.
.SH "SEE ALSO"
\fIcurl_version(3)\fP
.SH BUGS
No known bugs.

View File

@@ -12,7 +12,9 @@
<h2>Overviews</h2>
<A HREF="libcurl.html">libcurl</A>
<br><a href="libcurl-easy.html">libcurl-easy</a>
<br><a href="libcurl-multi.html">libcurl-multi</a>
<br><a href="libcurl-share.html">libcurl-share</a>
<br><a href="libcurl-errors.html">libcurl-errors</a>
<br><a href="../libcurl-the-guide">libcurl-the-guide</a> (plain text)
@@ -40,6 +42,9 @@
<br><a href="curl_multi_init.html">curl_multi_init</a>
<br><a href="curl_multi_perform.html">curl_multi_perform</a>
<br><a href="curl_multi_remove_handle.html">curl_multi_remove_handle</a>
<br><a href="curl_share_cleanup.html">curl_share_cleanup</A>
<br><a href="curl_share_init.html">curl_share_init</A>
<br><a href="curl_share_setopt.html">curl_share_setopt</A>
<br><a href="curl_slist_append.html">curl_slist_append</A>
<br><a href="curl_slist_free_all.html">curl_slist_free_all</A>
<br><a href="curl_strequal.html">curl_strequal</A>

View File

@@ -0,0 +1,29 @@
.\" You can view this file with:
.\" nroff -man [file]
.\" $Id$
.\"
.TH libcurl 3 "12 Aug 2003" "libcurl 7.10.7" "libcurl easy interface"
.SH NAME
libcurl-easy \- easy interface overview
.SH DESCRIPTION
When using libcurl's "easy" interface you init your session and get a handle
(often referred to as an "easy handle" in various docs and sources), which you
use as input to the easy interface functions you use. Use
\fIcurl_easy_init()\fP to get the handle.
You continue by setting all the options you want in the upcoming transfer, the
most important among them is the URL itself (you can't transfer anything
without a specified URL as you may have figured out yourself). You might want
to set some callbacks as well that will be called from the library when data
is available etc. \fIcurl_easy_setopt()\fP is used for all this.
When all is setup, you tell libcurl to perform the transfer using
\fIcurl_easy_perform()\fP. It will then do the entire operation and won't
return until it is done (successfully or not).
After the transfer has been made, you can set new options and make another
transfer, or if you're done, cleanup the session by calling
\fIcurl_easy_cleanup()\fP. If you want persistant connections, you don't
cleanup immediately, but instead run ahead and perform other transfers using
the same easy handle.

View File

@@ -2,7 +2,7 @@
.\" nroff -man [file]
.\" $Id$
.\"
.TH libcurl-multi 5 "13 Oct 2001" "libcurl 7.10.1" "libcurl multi interface"
.TH libcurl-multi 3 "13 Oct 2001" "libcurl 7.10.1" "libcurl multi interface"
.SH NAME
libcurl-multi \- how to use the multi interface
.SH DESCRIPTION

View File

@@ -0,0 +1,46 @@
.\" You can view this file with:
.\" nroff -man [file]
.\" $Id$
.\"
.TH libcurl-share 3 "8 Aug 2003" "libcurl 7.10.7" "libcurl share interface"
.SH NAME
libcurl-share \- how to use the share interface
.SH DESCRIPTION
This is an overview on how to use the libcurl share interface in your C
programs. There are specific man pages for each function mentioned in
here.
All functions in the share interface are prefixed with curl_share.
.SH "OBJECTIVES"
The share interface was added to enable sharing of data between curl
\&"handles".
.SH "ONE SET OF DATA - MANY TRANSFERS"
You can have multiple easy handles share data between them. Have them update
and use the \fBsame\fP cookie database or DNS cache! This way, each single
transfer will take advantage from data updates made by the other transfer(s).
.SH "SHARE OBJECT"
You create a shared object with \fIcurl_share_init()\fP. It returns a handle
for a newly created one.
You tell the shared object what data you want it to share by using
\fIcurl_share_setopt()\fP. Currently you can only share DNS and/or COOKIE
data.
Since you can use this share from multiple threads, and libcurl has no
internal thread synchronization, you must provide mutex callbacks if you're
using this multi-threaded. You set lock and unlock functions with
\fIcurl_share_setopt()\fP too.
Then, you make an easy handle to use this share, you set the CURLOPT_SHARE
option with \fIcurl_easy_setopt\fP, and pass in share handle. You can make any
number of easy handles share the same share handle.
To make an easy handle stop using that particular share, you set CURLOPT_SHARE
to NULL for that easy handle. To make a handle stop sharing a particular data,
you can CURLSHOPT_UNSHARE it.
When you're done using the share, make sure that no easy handle is still using
it, and call \fIcurl_share_cleanup()\fP on the handle.
.SH "SEE ALSO"
.BR curl_share_init "(3), " curl_share_setopt "(3), " curl_share_cleanup "(3)"

View File

@@ -7,38 +7,37 @@
libcurl \- client-side URL transfers
.SH DESCRIPTION
This is an overview on how to use libcurl in your C programs. There are
specific man pages for each function mentioned in here. There's also the
libcurl-the-guide document for a complete tutorial to programming with
libcurl.
specific man pages for each function mentioned in here. There are also the
\fIlibcurl-easy\fP man page, the \fIlibcurl-multi\fP man page, the
\fIlibcurl-share\fP man page and the \fIlibcurl-the-guide\fP document for
further reading on how to do programming with libcurl.
There are a dozen custom bindings that bring libcurl access to your favourite
language. Look elsewhere for documentation on those.
There exist more than a dozen custom bindings that bring libcurl access to
your favourite language. Look elsewhere for documentation on those.
All applications that use libcurl should call \fIcurl_global_init()\fP exactly
once before any libcurl function can be used. After all usage of libcurl is
complete, it \fBmust\fP call \fIcurl_global_cleanup()\fP. In between those two
calls, you can use libcurl as described below.
When using libcurl's "easy" interface you init your session and get a handle,
which you use as input to the easy interface functions you use. Use
\fIcurl_easy_init()\fP to get the handle. There is also the so called "multi"
interface, try the \fIlibcurl-multi(3)\fP man page for an overview of that.
To transfer files, you always set up an "easy handle" using
\fIcurl_easy_init()\fP, but when you want the file(s) transfered you have the
option of using the "easy" interface, or the "multi" interface.
You continue by setting all the options you want in the upcoming transfer,
most important among them is the URL itself (you can't transfer anything
without a specified URL as you may have figured out yourself). You might want
to set some callbacks as well that will be called from the library when data
is available etc. \fIcurl_easy_setopt()\fP is there for this.
The easy interface is a synchronous interface with which you call
\fIcurl_easy_perform\fP and let it perform the transfer. When it is completed,
the function return and you can continue. More details are found in the
\fIlibcurl-easy\fP man page.
When all is setup, you tell libcurl to perform the transfer using
\fIcurl_easy_perform()\fP. It will then do the entire operation and won't
return until it is done (successfully or not).
The multi interface on the other hand is an asynchronous interface, that you
call and that performs only a little piece of the tranfer on each invoke. It
is perfect if you want to do things while the transfer is in progress, or
similar. The multi interface allows you to select() on libcurl action, and
even to easily download multiple files simultaneously using a single thread.
After the transfer has been made, you can set new options and make another
transfer, or if you're done, cleanup the session by calling
\fIcurl_easy_cleanup()\fP. If you want persistant connections, you don't
cleanup immediately, but instead run ahead and perform other transfers using
the same handle. See the chapter below for Persistant Connections.
You can have multiple easy handles share certain data, even if they are used
in different threads. This magic is setup using the share interface, as
described in the \fIlibcurl-share\fP man page.
There is also a series of other helpful functions to use. They are:
@@ -107,14 +106,15 @@ Persistent connections means that libcurl can re-use the same connection for
several transfers, if the conditions are right.
libcurl will *always* attempt to use persistent connections. Whenever you use
curl_easy_perform(), libcurl will attempt to use an existing connection to do
the transfer, and if none exists it'll open a new one that will be subject for
re-use on a possible following call to curl_easy_perform().
\fIcurl_easy_perform()\fP or \fIcurl_multi_perform()\fP, libcurl will attempt
to use an existing connection to do the transfer, and if none exists it'll
open a new one that will be subject for re-use on a possible following call to
\fIcurl_easy_perform()\fP or \fIcurl_multi_perform()\fP.
To allow libcurl to take full advantage of persistent connections, you should
do as many of your file transfers as possible using the same curl handle. When
you call curl_easy_cleanup(), all the possibly open connections held by
you call \fIcurl_easy_cleanup()\fP, all the possibly open connections held by
libcurl will be closed and forgotten.
Note that the options set with curl_easy_setopt() will be used in on every
repeat curl_easy_perform() call
Note that the options set with \fIcurl_easy_setopt()\fP will be used in on
every repeated \fIcurl_easy_perform()\fP call.

View File

@@ -13,3 +13,26 @@ of environment. You should include files from here using...
... style and point the compiler's include path to the directory holding the
curl subdirectory. It makes it more likely to survive future modifications.
NOTE FOR LIBCURL HACKERS
All the include files in this tree are written and intended to be installed on
a system that may serve multiple platforms and multiple applications, all
using libcurl (possibly even different libcurl installations using different
versions). Therefore, all header files in here must obey these rules:
* They cannot depend on or use configure-generated results from libcurl's or
curl's directories. Other applications may not run configure as (lib)curl
does, and using platform dependent info here may break other platforms.
* We cannot assume anything else but very basic compiler features being
present. While libcurl requires an ANSI C compiler to build, some of the
earlier ANSI compilers clearly can't deal with some preprocessor operators.
* Newlines must remain unix-style for older compilers' sake.
* Comments must be written in the old-style /* unnested C-fashion */
To figure out how to do good and portable checks for features, operating
systems or specific hardwarare, a very good resource is Bjorn Reese's
collection at http://predef.sf.net/

View File

@@ -7,7 +7,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -23,23 +23,39 @@
* $Id$
***************************************************************************/
/* If you have problems, all libcurl docs and details are found here:
http://curl.haxx.se/libcurl/
*/
/* This is the version number of the libcurl package from which this header
file origins: */
#define LIBCURL_VERSION "7.10.7"
/* This is the numeric version of the libcurl version number, meant for easier
parsing and comparions by programs. The LIBCURL_VERSION_NUM define will
always follow this syntax:
0xXXYYZZ
Where XX, YY and ZZ are the main version, release and patch numbers in
hexadecimal. All three numbers are always represented using two digits. 1.2
would appear as "0x010200" while version 9.11.7 appears as "0x090b07".
This 6-digit hexadecimal number does not show pre-release number, and it is
always a greater number in a more recent release. It makes comparisons with
greater than and less than work.
*/
#define LIBCURL_VERSION_NUM 0x070a07
#include <stdio.h>
/* The include stuff here is mainly for time_t! */
/* The include stuff here below is mainly for time_t! */
#ifdef vms
# include <types.h>
# include <time.h>
#else
# include <sys/types.h>
# ifdef TIME_WITH_SYS_TIME
# include <sys/time.h>
# include <time.h>
# else
# ifdef HAVE_SYS_TIME_H
# include <sys/time.h>
# else
# include <time.h>
# endif
# endif
# include <time.h>
#endif /* defined (vms) */
#ifndef TRUE
@@ -55,39 +71,36 @@
extern "C" {
#endif
/* stupid #define trick to preserve functionality with older code, but
making it use our name space for the future */
/* silly trick to preserve functionality with older code, but making it use
our name space for the future */
#define HttpPost curl_httppost
struct curl_httppost {
struct curl_httppost *next; /* next entry in the list */
char *name; /* pointer to allocated name */
long namelength; /* length of name length */
char *contents; /* pointer to allocated data contents */
long contentslength; /* length of contents field */
/* CMC: Added support for buffer uploads */
char *buffer; /* pointer to allocated buffer contents */
long bufferlength; /* length of buffer field */
char *contenttype; /* Content-Type */
struct curl_httppost *next; /* next entry in the list */
char *name; /* pointer to allocated name */
long namelength; /* length of name length */
char *contents; /* pointer to allocated data contents */
long contentslength; /* length of contents field */
char *buffer; /* pointer to allocated buffer contents */
long bufferlength; /* length of buffer field */
char *contenttype; /* Content-Type */
struct curl_slist* contentheader; /* list of extra headers for this form */
struct curl_httppost *more; /* if one field name has more than one file, this
link should link to following files */
long flags; /* as defined below */
#define HTTPPOST_FILENAME (1<<0) /* specified content is a file name */
#define HTTPPOST_READFILE (1<<1) /* specified content is a file name */
#define HTTPPOST_PTRNAME (1<<2) /* name is only stored pointer
do not free in formfree */
struct curl_httppost *more; /* if one field name has more than one
file, this link should link to following
files */
long flags; /* as defined below */
#define HTTPPOST_FILENAME (1<<0) /* specified content is a file name */
#define HTTPPOST_READFILE (1<<1) /* specified content is a file name */
#define HTTPPOST_PTRNAME (1<<2) /* name is only stored pointer
do not free in formfree */
#define HTTPPOST_PTRCONTENTS (1<<3) /* contents is only stored pointer
do not free in formfree */
#define HTTPPOST_BUFFER (1<<4) /* upload file from buffer */
#define HTTPPOST_PTRBUFFER (1<<5) /* upload file from pointer contents */
/* CMC: Added support for buffer uploads */
#define HTTPPOST_BUFFER (1<<4) /* upload file from buffer */
#define HTTPPOST_PTRBUFFER (1<<5) /* upload file from pointer contents */
char *showfilename; /* The file name to show. If not set, the actual
file name will be used (if this is a file part) */
char *showfilename; /* The file name to show. If not set, the
actual file name will be used (if this
is a file part) */
};
typedef int (*curl_progress_callback)(void *clientp,
@@ -130,7 +143,7 @@ typedef int (*curl_debug_callback)
curl_infotype type, /* what kind of data */
char *data, /* points to the data */
size_t size, /* size of the data pointed to */
void *userp); /* whatever the user please */
void *userptr); /* whatever the user please */
/* All possible error codes from all sorts of curl functions. Future versions
may return other values, stay prepared.
@@ -207,6 +220,11 @@ typedef enum {
CURL_LAST /* never use! */
} CURLcode;
typedef CURLcode (*curl_ssl_ctx_callback)(CURL *curl, /* easy handle */
void *ssl_ctx, /* actually an
OpenSSL SSL_CTX */
void *userptr);
/* Make a spelling correction for the operation timed-out define */
#define CURLE_OPERATION_TIMEDOUT CURLE_OPERATION_TIMEOUTED
#define CURLE_HTTP_NOT_FOUND CURLE_HTTP_RETURNED_ERROR
@@ -217,6 +235,14 @@ typedef enum {
CURLPROXY_SOCKS5 = 5
} curl_proxytype;
#define CURLAUTH_NONE 0 /* nothing */
#define CURLAUTH_BASIC (1<<0) /* Basic (default) */
#define CURLAUTH_DIGEST (1<<1) /* Digest */
#define CURLAUTH_GSSNEGOTIATE (1<<2) /* GSS-Negotiate */
#define CURLAUTH_NTLM (1<<3) /* NTLM */
#define CURLAUTH_ANY ~0 /* all types set */
#define CURLAUTH_ANYSAFE (~CURLAUTH_BASIC)
/* this was the error code 50 in 7.7.3 and a few earlier versions, this
is no longer used by libcurl but is instead #defined here only to not
make programs break */
@@ -266,6 +292,12 @@ typedef enum {
#define CINIT(name,type,number) CURLOPT_/**/name = type + number
#endif
/*
* This macro-mania below setups the CURLOPT_[what] enum, to be used with
* curl_easy_setopt(). The first argument in the CINIT() macro is the [what]
* word.
*/
typedef enum {
CINIT(NOTHING, LONG, 0), /********* the first one is unused ************/
@@ -275,24 +307,19 @@ typedef enum {
/* The full URL to get/put */
CINIT(URL, OBJECTPOINT, 2),
/* Port number to connect to, if other than default. Specify the CONF_PORT
flag in the CURLOPT_FLAGS to activate this */
/* Port number to connect to, if other than default. */
CINIT(PORT, LONG, 3),
/* Name of proxy to use. Specify the CONF_PROXY flag in the CURLOPT_FLAGS to
activate this */
/* Name of proxy to use. */
CINIT(PROXY, OBJECTPOINT, 4),
/* Name and password to use when fetching. Specify the CONF_USERPWD flag in
the CURLOPT_FLAGS to activate this */
/* "name:password" to use when fetching. */
CINIT(USERPWD, OBJECTPOINT, 5),
/* Name and password to use with Proxy. Specify the CONF_PROXYUSERPWD
flag in the CURLOPT_FLAGS to activate this */
/* "name:password" to use with proxy. */
CINIT(PROXYUSERPWD, OBJECTPOINT, 6),
/* Range to get, specified as an ASCII string. Specify the CONF_RANGE flag
in the CURLOPT_FLAGS to activate this */
/* Range to get, specified as an ASCII string. */
CINIT(RANGE, OBJECTPOINT, 7),
/* not used */
@@ -413,7 +440,6 @@ typedef enum {
as described elsewhere. */
CINIT(WRITEINFO, OBJECTPOINT, 40),
/* Previous FLAG bits */
CINIT(VERBOSE, LONG, 41), /* talk a lot */
CINIT(HEADER, LONG, 42), /* throw the header out too */
CINIT(NOPROGRESS, LONG, 43), /* shut off the progress meter */
@@ -619,6 +645,38 @@ typedef enum {
/* Set aliases for HTTP 200 in the HTTP Response header */
CINIT(HTTP200ALIASES, OBJECTPOINT, 104),
/* Continue to send authentication (user+password) when following locations,
even when hostname changed. This can potentionally send off the name
and password to whatever host the server decides. */
CINIT(UNRESTRICTED_AUTH, LONG, 105),
/* Specificly switch on or off the FTP engine's use of the EPRT command ( it
also disables the LPRT attempt). By default, those ones will always be
attempted before the good old traditional PORT command. */
CINIT(FTP_USE_EPRT, LONG, 106),
/* Set this to a bitmask value to enable the particular authentications
methods you like. Use this in combination with CURLOPT_USERPWD.
Note that setting multiple bits may cause extra network round-trips. */
CINIT(HTTPAUTH, LONG, 107),
/* Set the ssl context callback function, currently only for OpenSSL ssl_ctx
in second argument. The function must be matching the
curl_ssl_ctx_callback proto. */
CINIT(SSL_CTX_FUNCTION, FUNCTIONPOINT, 108),
/* Set the userdata for the ssl context callback function's third
argument */
CINIT(SSL_CTX_DATA, OBJECTPOINT, 109),
/* FTP Option that causes missing dirs to be created on the remote server */
CINIT(FTP_CREATE_MISSING_DIRS, LONG, 110),
/* Set this to a bitmask value to enable the particular authentications
methods you like. Use this in combination with CURLOPT_PROXYUSERPWD.
Note that setting multiple bits may cause extra network round-trips. */
CINIT(PROXYAUTH, LONG, 111),
CURLOPT_LASTENTRY /* the last unused */
} CURLoption;
@@ -687,21 +745,22 @@ typedef enum {
#endif
/* These functions are in the libcurl, they're here for portable reasons and
they are used by the 'curl' client. They really should be moved to some kind
of "portability library" since it has nothing to do with file transfers and
/* These functions are in libcurl, they're here for portable reasons and they
are used by the 'curl' client. They really should be moved to some kind of
"portability library" since it has nothing to do with file transfers and
might be usable to other programs...
NOTE: they return TRUE if the strings match *case insensitively*.
*/
extern int (curl_strequal)(const char *s1, const char *s2);
extern int (curl_strnequal)(const char *s1, const char *s2, size_t n);
#define strequal(a,b) curl_strequal(a,b)
#define strnequal(a,b,c) curl_strnequal(a,b,c)
/* DEPRECATED function to build formdata */
#ifdef CURL_OLDSTYLE
/* DEPRECATED function to build formdata. Stop using this, it will cease
to exist. */
int curl_formparse(char *, struct curl_httppost **,
struct curl_httppost **_post);
#endif
/* name is uppercase CURLFORM_<name> */
#ifdef CFINIT
@@ -780,47 +839,122 @@ typedef enum {
CURL_FORMADD_LAST /* last */
} CURLFORMcode;
/*
* NAME curl_formadd()
*
* DESCRIPTION
*
* Pretty advanved function for building multi-part formposts. Each invoke
* adds one part that together construct a full post. Then use
* CURLOPT_HTTPPOST to send it off to libcurl.
*/
CURLFORMcode curl_formadd(struct curl_httppost **httppost,
struct curl_httppost **last_post,
...);
struct curl_httppost **last_post,
...);
/* cleanup a form: */
/*
* NAME curl_formfree()
*
* DESCRIPTION
*
* Free a multipart formpost previously built with curl_formadd().
*/
void curl_formfree(struct curl_httppost *form);
/* Unix and Win32 getenv function call, this returns a malloc()'ed string that
MUST be free()ed after usage is complete. */
/*
* NAME curl_getenv()
*
* DESCRIPTION
*
* Returns a malloc()'ed string that MUST be curl_free()ed after usage is
* complete.
*/
char *curl_getenv(const char *variable);
/* Returns a static ascii string of the libcurl version. */
/*
* NAME curl_version()
*
* DESCRIPTION
*
* Returns a static ascii string of the libcurl version.
*/
char *curl_version(void);
/* Escape and unescape URL encoding in strings. The functions return a new
* allocated string or NULL if an error occurred. */
/*
* NAME curl_escape()
*
* DESCRIPTION
*
* Escapes URL strings (converts all letters consider illegal in URLs to their
* %XX versions). This function returns a new allocated string or NULL if an
* error occurred.
*/
char *curl_escape(const char *string, int length);
/*
* NAME curl_unescape()
*
* DESCRIPTION
*
* Unescapes URL encoding in strings (converts all %XX codes to their 8bit
* versions). This function returns a new allocated string or NULL if an error
* occurred.
*/
char *curl_unescape(const char *string, int length);
/* 20020912 WJM. Provide for a de-allocation in the same translation unit
that did the allocation. Added in libcurl 7.10 */
/*
* NAME curl_free()
*
* DESCRIPTION
*
* Provided for de-allocation in the same translation unit that did the
* allocation. Added in libcurl 7.10
*/
void curl_free(void *p);
/* curl_global_init() should be invoked exactly once for each application that
uses libcurl */
/*
* NAME curl_global_init()
*
* DESCRIPTION
*
* curl_global_init() should be invoked exactly once for each application that
* uses libcurl
*/
CURLcode curl_global_init(long flags);
/* curl_global_cleanup() should be invoked exactly once for each application
that uses libcurl */
/*
* NAME curl_global_cleanup()
*
* DESCRIPTION
*
* curl_global_cleanup() should be invoked exactly once for each application
* that uses libcurl
*/
void curl_global_cleanup(void);
/* This is the version number */
#define LIBCURL_VERSION "7.10.3"
#define LIBCURL_VERSION_NUM 0x070a03
/* linked-list structure for the CURLOPT_QUOTE option (and other) */
struct curl_slist {
char *data;
struct curl_slist *next;
char *data;
struct curl_slist *next;
};
/*
* NAME curl_slist_append()
*
* DESCRIPTION
*
* Appends a string to a linked list. If no list exists, it will be created
* first. Returns the new list, after appending.
*/
struct curl_slist *curl_slist_append(struct curl_slist *, const char *);
/*
* NAME curl_slist_free_all()
*
* DESCRIPTION
*
* free a previously built curl_slist.
*/
void curl_slist_free_all(struct curl_slist *);
/*
@@ -858,22 +992,17 @@ typedef enum {
CURLINFO_REQUEST_SIZE = CURLINFO_LONG + 12,
CURLINFO_SSL_VERIFYRESULT = CURLINFO_LONG + 13,
CURLINFO_FILETIME = CURLINFO_LONG + 14,
CURLINFO_CONTENT_LENGTH_DOWNLOAD = CURLINFO_DOUBLE + 15,
CURLINFO_CONTENT_LENGTH_UPLOAD = CURLINFO_DOUBLE + 16,
CURLINFO_STARTTRANSFER_TIME = CURLINFO_DOUBLE + 17,
CURLINFO_CONTENT_TYPE = CURLINFO_STRING + 18,
CURLINFO_REDIRECT_TIME = CURLINFO_DOUBLE + 19,
CURLINFO_REDIRECT_COUNT = CURLINFO_LONG + 20,
CURLINFO_PRIVATE = CURLINFO_STRING + 21,
CURLINFO_HTTP_CONNECTCODE = CURLINFO_LONG + 22,
/* Fill in new entries below here! */
CURLINFO_CONTENT_TYPE = CURLINFO_STRING + 18,
CURLINFO_REDIRECT_TIME = CURLINFO_DOUBLE + 19,
CURLINFO_REDIRECT_COUNT = CURLINFO_LONG + 20,
CURLINFO_PRIVATE = CURLINFO_STRING + 21,
/* Fill in new entries here! */
CURLINFO_LASTONE = 22
CURLINFO_LASTONE = 23
} CURLINFO;
typedef enum {
@@ -902,10 +1031,15 @@ typedef enum {
/* Different data locks for a single share */
typedef enum {
CURL_LOCK_DATA_NONE = 0,
CURL_LOCK_DATA_COOKIE = 1,
CURL_LOCK_DATA_DNS = 2,
CURL_LOCK_DATA_SSL_SESSION = 3,
CURL_LOCK_DATA_CONNECT = 4,
/* CURL_LOCK_DATA_SHARE is used internaly to say that
* the locking is just made to change the internal state of the share
* itself.
*/
CURL_LOCK_DATA_SHARE,
CURL_LOCK_DATA_COOKIE,
CURL_LOCK_DATA_DNS,
CURL_LOCK_DATA_SSL_SESSION,
CURL_LOCK_DATA_CONNECT,
CURL_LOCK_DATA_LAST
} curl_lock_data;
@@ -983,8 +1117,19 @@ typedef struct {
#define CURL_VERSION_KERBEROS4 (1<<1)
#define CURL_VERSION_SSL (1<<2)
#define CURL_VERSION_LIBZ (1<<3)
#define CURL_VERSION_NTLM (1<<4)
#define CURL_VERSION_GSSNEGOTIATE (1<<5)
#define CURL_VERSION_DEBUG (1<<6) /* built with debug capabilities */
#define CURL_VERSION_ASYNCHDNS (1<<7)
/* returns a pointer to a static copy of the version info struct */
/*
* NAME curl_version_info()
*
* DESCRIPTION
*
* This function returns a pointer to a static copy of the version info
* struct. See above.
*/
curl_version_info_data *curl_version_info(CURLversion);
#ifdef __cplusplus

View File

@@ -7,7 +7,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms

View File

@@ -1,54 +1,25 @@
/*************************************************************************
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
* $Id$
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
* MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE AUTHORS AND
* CONTRIBUTORS ACCEPT NO RESPONSIBILITY IN ANY CONCEIVABLE MANNER.
*
*************************************************************************
*
* Preliminary documentation
*
* printf conversions:
*
* conversion ::= '%%' | '%' [position] ( number | float | string )
* position ::= digits '$'
* number ::= [number-flags] ( 'd' | 'i' | 'o' | 'x' | 'X' | 'u')
* number-flags ::= 'h' | 'l' | 'L' ...
* float ::= [float-flags] ( 'f' | 'e' | 'E' | 'g' | 'G' )
* string ::= [string-flags] 's'
* string-flags ::= padding | '#'
* digits ::= (digit)+
* digit ::= 0-9
*
* c
* p
* n
*
* qualifiers
*
* - : left adjustment
* + : show sign
* SPACE : padding
* # : alterative
* . : precision
* * : width
* 0 : padding / size
* 1-9 : size
* h : short
* l : long
* ll : longlong
* L : long double
* Z : long / longlong
* q : longlong
*
************************************************************************/
***************************************************************************/
#ifndef H_MPRINTF
#define H_MPRINTF

View File

@@ -7,7 +7,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -44,19 +44,35 @@
o Enable the application to select() on its own file descriptors and curl's
file descriptors simultaneous easily.
Example sources using this interface is here: ../multi/
*/
#if defined(_WIN32) && !defined(WIN32)
/* Chris Lewis mentioned that he doesn't get WIN32 defined, only _WIN32 so we
make this adjustment to catch this. */
#define WIN32 1
#endif
#if defined(WIN32) && !defined(__GNUC__) || defined(__MINGW32__)
#include <winsock.h>
#else
#ifdef _AIX
/* HP-UX systems version 9, 10 and 11 lack sys/select.h and so does oldish
libc5-based Linux systems. Only include it on system that are known to
require it! */
#include <sys/select.h>
#endif
#include <sys/socket.h>
#include <sys/time.h>
#include <sys/types.h>
#endif
#include "curl.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef void CURLM;
typedef enum {
@@ -71,7 +87,7 @@ typedef enum {
typedef enum {
CURLMSG_NONE, /* first, not used */
CURLMSG_DONE, /* This easy handle has completed. 'whatever' points to
CURLMSG_DONE, /* This easy handle has completed. 'result' contains
the CURLcode of the transfer */
CURLMSG_LAST /* last, not used */
} CURLMSG;
@@ -187,4 +203,8 @@ CURLMcode curl_multi_cleanup(CURLM *multi_handle);
CURLMsg *curl_multi_info_read(CURLM *multi_handle,
int *msgs_in_queue);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif

View File

@@ -7,7 +7,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -23,9 +23,7 @@
* $Id$
***************************************************************************/
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
size_t fread (void *, size_t, size_t, FILE *);
size_t fwrite (const void *, size_t, size_t, FILE *);

View File

@@ -7,7 +7,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms

View File

@@ -7,3 +7,4 @@ Makefile
config.h
stamp-*
ca-bundle.h
getdate.c

View File

@@ -5,16 +5,23 @@
AUTOMAKE_OPTIONS = foreign nostdinc
EXTRA_DIST = getdate.y Makefile.b32 Makefile.b32.resp Makefile.m32 \
Makefile.vc6 Makefile.riscos libcurl.def curllib.dsp \
Makefile.vc6 Makefile.riscos libcurl.def curllib.dsp \
curllib.dsw config-vms.h config-win32.h config-riscos.h config-mac.h \
config.h.in ca-bundle.crt README.encoding README.memoryleak
config.h.in ca-bundle.crt README.encoding README.memoryleak \
README.ares makefile.dj config.dj
lib_LTLIBRARIES = libcurl.la
if ARES
ARESINC = -I$(top_srcdir)/ares
endif
# we use srcdir/include for the static global include files
# we use builddir/lib for the generated lib/config.h file to get found
# we use srcdir/lib for the lib-private header files
INCLUDES = -I$(top_srcdir)/include -I$(top_builddir)/lib -I$(top_srcdir)/lib
INCLUDES = -I$(top_srcdir)/include -I$(top_builddir)/lib -I$(top_srcdir)/lib $(ARESINC)
LDFLAGS += -L$(top_srcdir)/lib
VERSION=-version-info 2:2:0
@@ -47,15 +54,18 @@ VERSION=-version-info 2:2:0
#
if NO_UNDEFINED
# The -no-undefined flag is CRUCIAL for this to build fine on Cygwin. If we
# find a case in which we need to remove this flag, we should most likely
# write a configure check that detects when this flag is needed and when its
# not.
libcurl_la_LDFLAGS = -no-undefined $(VERSION)
# The -no-undefined flag is CRUCIAL for this to build fine on Cygwin.
UNDEF = -no-undefined
else
libcurl_la_LDFLAGS = $(VERSION)
UNDEF =
endif
if ARES
ARESLIB = -lares -L$(top_builddir)/ares
endif
libcurl_la_LDFLAGS = $(UNDEF) $(VERSION) $(ARESLIB)
libcurl_la_SOURCES = arpa_telnet.h file.c getpass.h netrc.h timeval.c \
base64.c file.h hostip.c progress.c timeval.h base64.h formdata.c \
hostip.h progress.h cookie.c formdata.h http.c sendf.c cookie.h ftp.c \
@@ -66,16 +76,29 @@ getpass.c netrc.c telnet.h getinfo.c getinfo.h transfer.c strequal.c \
strequal.h easy.c security.h security.c krb4.c krb4.h memdebug.c \
memdebug.h inet_ntoa_r.h http_chunks.c http_chunks.h strtok.c strtok.h \
connect.c connect.h llist.c llist.h hash.c hash.h multi.c \
content_encoding.c content_encoding.h share.h
content_encoding.c content_encoding.h share.c share.h http_digest.c \
md5.c md5.h http_digest.h http_negotiate.c http_negotiate.h \
http_ntlm.c http_ntlm.h ca-bundle.h
noinst_HEADERS = setup.h transfer.h
BUILT_SOURCES = $(srcdir)/getdate.c $(srcdir)/ca-bundle.h
# Say $(srcdir), so GNU make does not report an ambiguity with the .y.c rule.
$(srcdir)/getdate.c: getdate.y
cd $(srcdir) && \
$(YACC) $(YFLAGS) getdate.y; \
mv -f y.tab.c getdate.c
$(srcdir)/ca-bundle.h: Makefile.in Makefile
chmod 0644 $@
echo "/* The file is generated automaticly */" > $@
if CABUNDLE
echo '#define CURL_CA_BUNDLE @CURL_CA_BUNDLE@' >> $@
else
echo '#undef CURL_CA_BUNDLE /* unknown */' >> $@
endif
install-data-hook:
@if test -n "@CURL_CA_BUNDLE@"; then \
$(mkinstalldirs) `dirname $(DESTDIR)@CURL_CA_BUNDLE@`; \
@@ -85,4 +108,5 @@ install-data-hook:
# this hook is mainly for non-unix systems to build even if configure
# isn't run
dist-hook:
cp $(srcdir)/ca-bundle.h.in $(distdir)/ca-bundle.h
chmod 0644 $(distdir)/ca-bundle.h
echo "/* ca bundle path set in here*/" > $(distdir)/ca-bundle.h

View File

@@ -61,6 +61,7 @@ SOURCES = \
strtok.c \
connect.c \
hash.c \
share.c \
llist.c \
multi.c \
content_encoding.c

View File

@@ -30,6 +30,7 @@
+strtok.obj &
+connect.obj &
+hash.obj &
+share.obj &
+llist.obj &
+multi.obj &
+content_encoding.obj

View File

@@ -12,8 +12,8 @@ AR = ar
RM = rm -f
RANLIB = ranlib
STRIP = strip -g
OPENSSL_PATH = ../../openssl-0.9.6g
ZLIB_PATH = ../../zlib-1.1.3
OPENSSL_PATH = ../../openssl-0.9.7a
ZLIB_PATH = ../../zlib-1.1.4
########################################################
## Nothing more to do below this line!
@@ -22,36 +22,40 @@ INCLUDES = -I. -I.. -I../include -I../src
CFLAGS = -g -O2 -DMINGW32
ifdef SSL
INCLUDES += -I"$(OPENSSL_PATH)/outinc" -I"$(OPENSSL_PATH)/outinc/openssl"
CFLAGS += -DUSE_SSLEAY
DLL_LIBS = -L$(OPENSSL_PATH)/out -leay32 -lssl32 -lRSAglue
CFLAGS += -DUSE_SSLEAY -DHAVE_OPENSSL_ENGINE_H
DLL_LIBS = -L$(OPENSSL_PATH)/out -leay32 -lssl32
endif
ifdef ZLIB
INCLUDES += -I"$(ZLIB_PATH)"
CFLAGS += -DHAVE_ZLIB
DLL_LIBS += -L$(ZLIB_PATH) -lz
INCLUDES += -I"$(ZLIB_PATH)"
CFLAGS += -DHAVE_LIBZ -DHAVE_ZLIB_H
DLL_LIBS += -L$(ZLIB_PATH) -lz
endif
COMPILE = $(CC) $(INCLUDES) $(CFLAGS)
libcurl_a_LIBRARIES = libcurl.a
libcurl_a_SOURCES = arpa_telnet.h file.c getpass.h netrc.h timeval.c base64.c \
file.h hostip.c progress.c timeval.h base64.h formdata.c hostip.h progress.h \
cookie.c formdata.h http.c sendf.c cookie.h ftp.c http.h sendf.h url.c dict.c \
ftp.h if2ip.c speedcheck.c url.h dict.h getdate.c if2ip.h speedcheck.h \
urldata.h transfer.c getdate.h ldap.c ssluse.c version.c transfer.h getenv.c \
ldap.h ssluse.h escape.c getenv.h mprintf.c telnet.c escape.h getpass.c netrc.c \
telnet.h getinfo.c strequal.c strequal.h easy.c security.h \
security.c krb4.h krb4.c memdebug.h memdebug.c inet_ntoa_r.h http_chunks.h http_chunks.c \
strtok.c connect.c hash.c llist.c multi.c \
content_encoding.h content_encoding.c
file.h hostip.c progress.c timeval.h base64.h formdata.c hostip.h \
progress.h cookie.c formdata.h http.c sendf.c cookie.h ftp.c \
http.h sendf.h url.c dict.c ftp.h if2ip.c speedcheck.c url.h \
dict.h getdate.c if2ip.h speedcheck.h urldata.h transfer.c getdate.h \
ldap.c ssluse.c version.c transfer.h getenv.c \
ldap.h ssluse.h escape.c getenv.h mprintf.c telnet.c escape.h \
getpass.c netrc.c telnet.h getinfo.c strequal.c strequal.h easy.c \
security.h security.c krb4.h krb4.c memdebug.h memdebug.c \
inet_ntoa_r.h http_chunks.h http_chunks.c \
strtok.c connect.c hash.c llist.c multi.c share.c share.h \
content_encoding.h content_encoding.c http_digest.h http_digest.c \
http_negotiate.c http_negotiate.h http_ntlm.c http_ntlm.h md5.h \
md5.c
libcurl_a_OBJECTS = file.o timeval.o base64.o hostip.o progress.o \
formdata.o cookie.o http.o sendf.o ftp.o url.o dict.o if2ip.o \
speedcheck.o getdate.o transfer.o ldap.o ssluse.o version.o \
getenv.o escape.o mprintf.o telnet.o getpass.o netrc.o getinfo.o \
strequal.o easy.o security.o krb4.o memdebug.o http_chunks.o \
strtok.o connect.o hash.o llist.o multi.o \
content_encoding.o
strtok.o connect.o hash.o llist.o multi.o share.o \
content_encoding.o http_digest.o http_negotiate.o http_ntlm.o md5.o
LIBRARIES = $(libcurl_a_LIBRARIES)
SOURCES = $(libcurl_a_SOURCES)

View File

@@ -33,7 +33,7 @@
LIB_NAME = libcurl
LIB_NAME_DEBUG = libcurld
!IFNDEF OPENSSL_PATH
OPENSSL_PATH = ../../openssl-0.9.6
OPENSSL_PATH = ../../openssl-0.9.7a
!ENDIF
#############################################################
@@ -48,7 +48,8 @@ LNKDLL = link.exe /DLL /def:libcurl.def
LNKLIB = link.exe -lib
LFLAGS = /nologo
LINKLIBS = ws2_32.lib winmm.lib
SSLLIBS = libeay32.lib ssleay32.lib RSAglue.lib
SSLLIBS = libeay32.lib ssleay32.lib
# RSAglue.lib was formerly needed in the SSLLIBS
CFGSET = FALSE
######################
@@ -200,7 +201,12 @@ X_OBJS= \
$(DIROBJ)\connect.obj \
$(DIROBJ)\hash.obj \
$(DIROBJ)\llist.obj \
$(DIROBJ)\multi.obj
$(DIROBJ)\share.obj \
$(DIROBJ)\multi.obj \
$(DIROBJ)\http_digest.obj \
$(DIROBJ)\http_negotiate.obj \
$(DIROBJ)\http_ntlm.obj \
$(DIROBJ)\md5.obj
all : $(TARGET)

44
lib/README.ares Normal file
View File

@@ -0,0 +1,44 @@
$Id$
_ _ ____ _
___| | | | _ \| |
/ __| | | | |_) | |
| (__| |_| | _ <| |___
\___|\___/|_| \_\_____|
How To Build libcurl to use ares for asynch name resolves
=========================================================
ares:
ftp://athena-dist.mit.edu/pub/ATHENA/ares/ares-1.1.1.tar.gz
http://curl.haxx.se/dev/ares-1.1.1.tar.gz
ares patch:
http://curl.haxx.se/dev/ares2.diff
Mac OS X quirk:
ares 1.1.1 contains too old versions of config.guess and config.sub. Copy
the ones from the curl source tree in to the ares source tree before you
run configure.
Build ares
==========
1. unpack the ares-1.1.1 archive
2. apply patch (if you're on Mac OS X or windows)
3. ./configure
4. make
Build libcurl to use ares
=========================
1. Move the ares source/build tree to subdirectory in the curl root named
'ares'.
2. ./buildconf
3. ./configure --enable-ares
4. make
If the configure script detects IPv6 support), you need to explicitly disable
that (--disable-ipv6) since ares isn't IPv6 compatible (yet).
Please let me know how it builds, runs, works or whatever. I had to do some
fairly big changes in some code parts to get this to work.

View File

@@ -5,15 +5,15 @@
HTTP/1.1 [RFC 2616] specifies that a client may request that a server encode
its response. This is usually used to compress a response using one of a set
of commonly available compression techniques. These schemes are `deflate'
(the zlib algorithm), `gzip' and `compress' [sec 3.5, RFC 2616]. A client
requests that the sever perform an encoding by including an Accept-Encoding
header in the request document. The value of the header should be one of the
recognized tokens `deflate', ... (there's a way to register new
schemes/tokens, see sec 3.5 of the spec). A server MAY honor the client's
encoding request. When a response is encoded, the server includes a
Content-Encoding header in the response. The value of the Content-Encoding
header indicates which scheme was used to encode the data.
of commonly available compression techniques. These schemes are `deflate' (the
zlib algorithm), `gzip' and `compress' [sec 3.5, RFC 2616]. A client requests
that the sever perform an encoding by including an Accept-Encoding header in
the request document. The value of the header should be one of the recognized
tokens `deflate', ... (there's a way to register new schemes/tokens, see sec
3.5 of the spec). A server MAY honor the client's encoding request. When a
response is encoded, the server includes a Content-Encoding header in the
response. The value of the Content-Encoding header indicates which scheme was
used to encode the data.
A client may tell a server that it can understand several different encoding
schemes. In this case the server may choose any one of those and use it to
@@ -24,11 +24,10 @@ information on the Accept-Encoding header.
* Current support for content encoding:
I added support for the 'deflate' content encoding to both libcurl and curl.
Both regular and chunked transfers should work although I've tested only the
former. The library zlib is required for this feature. Places where I
modified the source code are commented and typically include my initials and
the date (e.g., 08/29/02 jhrg).
Support for the 'deflate' and 'gzip' content encoding are supported by
libcurl. Both regular and chunked transfers should work fine. The library
zlib is required for this feature. 'deflate' support was added by James
Gallagher, and support for the 'gzip' encoding was added by Dan Fandrich.
* The libcurl interface:
@@ -39,15 +38,23 @@ To cause libcurl to request a content encoding use:
where <string> is the intended value of the Accept-Encoding header.
Currently, libcurl only understands how to process responses that use the
`deflate' Content-Encoding, so the only value for CURLOPT_ENCODING that will
work (besides "identity," which does nothing) is "deflate." If a response is
encoded using either the `gzip' or `compress' methods, libcurl will return an
error indicating that the response could not be decoded. If <string> is null
or empty no Accept-Encoding header is generated.
"deflate" or "gzip" Content-Encoding, so the only values for CURLOPT_ENCODING
that will work (besides "identity," which does nothing) are "deflate" and
"gzip" If a response is encoded using the "compress" or methods, libcurl will
return an error indicating that the response could not be decoded. If
<string> is NULL no Accept-Encoding header is generated. If <string> is a
zero-length string, then an Accept-Encoding header containing all supported
encodings will be generated.
The CURLOPT_ENCODING must be set to any non-NULL value for content to be
automatically decoded. If it is not set and the server still sends encoded
content (despite not having been asked), the data is returned in its raw form
and the Content-Encoding type is not checked.
* The curl interface:
Use the --compressed option with curl to cause it to ask servers to compress
responses using deflate.
responses using deflate.
James Gallagher <jgallagher@gso.uri.edu>
Dan Fandrich <dan@coneharvesters.com>

View File

@@ -17,7 +17,7 @@ Single-threaded
Build
Rebuild libcurl with -DMALLOCDEBUG (usually, rerunning configure with
Rebuild libcurl with -DCURLDEBUG (usually, rerunning configure with
--enable-debug fixes this). 'make clean' first, then 'make' so that all
files actually are rebuilt properly. It will also make sense to build
libcurl with the debug option (usually -g to the compiler) so that debugging

View File

@@ -7,7 +7,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -26,15 +26,15 @@
/*
* Telnet option defines. Add more here if in need.
*/
#define TELOPT_BINARY 0 /* binary 8bit data */
#define TELOPT_SGA 3 /* Supress Go Ahead */
#define TELOPT_EXOPL 255 /* EXtended OPtions List */
#define TELOPT_TTYPE 24 /* Terminal TYPE */
#define TELOPT_XDISPLOC 35 /* X DISPlay LOCation */
#define CURL_TELOPT_BINARY 0 /* binary 8bit data */
#define CURL_TELOPT_SGA 3 /* Supress Go Ahead */
#define CURL_TELOPT_EXOPL 255 /* EXtended OPtions List */
#define CURL_TELOPT_TTYPE 24 /* Terminal TYPE */
#define CURL_TELOPT_XDISPLOC 35 /* X DISPlay LOCation */
#define TELOPT_NEW_ENVIRON 39 /* NEW ENVIRONment variables */
#define NEW_ENV_VAR 0
#define NEW_ENV_VALUE 1
#define CURL_TELOPT_NEW_ENVIRON 39 /* NEW ENVIRONment variables */
#define CURL_NEW_ENV_VAR 0
#define CURL_NEW_ENV_VALUE 1
/*
* The telnet options represented as strings
@@ -53,27 +53,27 @@ static const char *telnetoptions[]=
"OLD-ENVIRON", "AUTHENTICATION", "ENCRYPT", "NEW-ENVIRON"
};
#define TELOPT_MAXIMUM TELOPT_NEW_ENVIRON
#define CURL_TELOPT_MAXIMUM CURL_TELOPT_NEW_ENVIRON
#define TELOPT_OK(x) ((x) <= TELOPT_MAXIMUM)
#define TELOPT(x) telnetoptions[x]
#define CURL_TELOPT_OK(x) ((x) <= CURL_TELOPT_MAXIMUM)
#define CURL_TELOPT(x) telnetoptions[x]
#define NTELOPTS 40
#define CURL_NTELOPTS 40
/*
* First some defines
*/
#define xEOF 236 /* End Of File */
#define SE 240 /* Sub negotiation End */
#define NOP 241 /* No OPeration */
#define DM 242 /* Data Mark */
#define GA 249 /* Go Ahead, reverse the line */
#define SB 250 /* SuBnegotiation */
#define WILL 251 /* Our side WILL use this option */
#define WONT 252 /* Our side WON'T use this option */
#define DO 253 /* DO use this option! */
#define DONT 254 /* DON'T use this option! */
#define IAC 255 /* Interpret As Command */
#define CURL_xEOF 236 /* End Of File */
#define CURL_SE 240 /* Sub negotiation End */
#define CURL_NOP 241 /* No OPeration */
#define CURL_DM 242 /* Data Mark */
#define CURL_GA 249 /* Go Ahead, reverse the line */
#define CURL_SB 250 /* SuBnegotiation */
#define CURL_WILL 251 /* Our side WILL use this option */
#define CURL_WONT 252 /* Our side WON'T use this option */
#define CURL_DO 253 /* DO use this option! */
#define CURL_DONT 254 /* DON'T use this option! */
#define CURL_IAC 255 /* Interpret As Command */
/*
* Then those numbers represented as strings:
@@ -86,16 +86,16 @@ static const char *telnetcmds[]=
"WILL", "WONT", "DO", "DONT", "IAC"
};
#define TELCMD_MINIMUM xEOF /* the first one */
#define TELCMD_MAXIMUM IAC /* surprise, 255 is the last one! ;-) */
#define CURL_TELCMD_MINIMUM CURL_xEOF /* the first one */
#define CURL_TELCMD_MAXIMUM CURL_IAC /* surprise, 255 is the last one! ;-) */
#define TELQUAL_IS 0
#define TELQUAL_SEND 1
#define TELQUAL_INFO 2
#define TELQUAL_NAME 3
#define CURL_TELQUAL_IS 0
#define CURL_TELQUAL_SEND 1
#define CURL_TELQUAL_INFO 2
#define CURL_TELQUAL_NAME 3
#define TELCMD_OK(x) ( ((unsigned int)(x) >= TELCMD_MINIMUM) && \
((unsigned int)(x) <= TELCMD_MAXIMUM) )
#define TELCMD(x) telnetcmds[(x)-TELCMD_MINIMUM]
#define CURL_TELCMD_OK(x) ( ((unsigned int)(x) >= CURL_TELCMD_MINIMUM) && \
((unsigned int)(x) <= CURL_TELCMD_MAXIMUM) )
#define CURL_TELCMD(x) telnetcmds[(x)-CURL_TELCMD_MINIMUM]
#endif
#endif

View File

@@ -5,7 +5,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -42,7 +42,7 @@
#include "base64.h"
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
#include "memdebug.h"
#endif
@@ -135,7 +135,7 @@ int Curl_base64_encode(const void *inp, int insize, char **outptr)
while(insize > 0) {
for (i = inputparts = 0; i < 3; i++) {
if(*indata) {
if(insize > 0) {
inputparts++;
ibuf[i] = *indata;
indata++;
@@ -220,6 +220,8 @@ int main(int argc, char **argv, char **envp)
#ifdef TEST_DECODE
/* decoding test harness. Read in a base64 string from stdin and write out the
* length returned by Curl_base64_decode, followed by the decoded data itself
*
* gcc -DTEST_DECODE base64.c -o base64 mprintf.o memdebug.o
*/
#include <stdio.h>
@@ -232,13 +234,31 @@ int main(int argc, char **argv, char **envp)
int base64Len;
unsigned char *data;
int dataLen;
int i, j;
base64 = (char *)suck(&base64Len);
data = (unsigned char *)malloc(base64Len * 3/4 + 8);
dataLen = Curl_base64_decode(base64, data);
fprintf(stderr, "%d\n", dataLen);
fwrite(data,1,dataLen,stdout);
for(i=0; i < dataLen; i+=0x10) {
printf("0x%02x: ", i);
for(j=0; j < 0x10; j++)
if((j+i) < dataLen)
printf("%02x ", data[i+j]);
else
printf(" ");
printf(" | ");
for(j=0; j < 0x10; j++)
if((j+i) < dataLen)
printf("%c", isgraph(data[i+j])?data[i+j]:'.');
else
break;
puts("");
}
free(base64); free(data);
return 0;
@@ -266,11 +286,3 @@ void *suck(int *lenptr)
return (void *)buf;
}
#endif
/*
* local variables:
* eval: (load-file "../curl-mode.el")
* end:
* vim600: fdm=marker
* vim: et sw=2 ts=2 sts=2 tw=78
*/

View File

@@ -7,7 +7,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms

View File

@@ -2287,82 +2287,6 @@ Certificate Ingredients:
ec:b9:94:6a:aa:12:4f:1a:dd:f5:77:b5:25:8c:f2:8a:0a:f1:
fc:52:5b:58
TC TrustCenter, Germany, Class 0 CA
===================================
MD5 Fingerprint: 35:85:49:8E:6E:57:FE:BD:97:F1:C9:46:23:3A:B6:7D
PEM Data:
-----BEGIN CERTIFICATE-----
MIIENTCCA56gAwIBAgIBATANBgkqhkiG9w0BAQQFADCBvDELMAkGA1UEBhMCREUx
EDAOBgNVBAgTB0hhbWJ1cmcxEDAOBgNVBAcTB0hhbWJ1cmcxOjA4BgNVBAoTMVRD
IFRydXN0Q2VudGVyIGZvciBTZWN1cml0eSBpbiBEYXRhIE5ldHdvcmtzIEdtYkgx
IjAgBgNVBAsTGVRDIFRydXN0Q2VudGVyIENsYXNzIDAgQ0ExKTAnBgkqhkiG9w0B
CQEWGmNlcnRpZmljYXRlQHRydXN0Y2VudGVyLmRlMB4XDTk4MDMwOTEzNTQ0OFoX
DTA1MTIzMTEzNTQ0OFowgbwxCzAJBgNVBAYTAkRFMRAwDgYDVQQIEwdIYW1idXJn
MRAwDgYDVQQHEwdIYW1idXJnMTowOAYDVQQKEzFUQyBUcnVzdENlbnRlciBmb3Ig
U2VjdXJpdHkgaW4gRGF0YSBOZXR3b3JrcyBHbWJIMSIwIAYDVQQLExlUQyBUcnVz
dENlbnRlciBDbGFzcyAwIENBMSkwJwYJKoZIhvcNAQkBFhpjZXJ0aWZpY2F0ZUB0
cnVzdGNlbnRlci5kZTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA333mvr/V
8C9tTg7R4I0LfztU6IrisJ8oxYrGubMzJ/UnyhpMVBJrtLJGsx1Ls/QhC0sCLqHC
NJyFoMR4EdvbaycrCSoYTkDMn3EZZ5l0onw/wdiLI8hjO4ohq1zeHvSN3LQYwwVz
9Gq0ofoBCCsBD203W6o4hmc51+Vf+uR+zKMCAwEAAaOCAUMwggE/MEAGCWCGSAGG
+EIBAwQzFjFodHRwczovL3d3dy50cnVzdGNlbnRlci5kZS9jZ2ktYmluL2NoZWNr
LXJldi5jZ2k/MEAGCWCGSAGG+EIBBAQzFjFodHRwczovL3d3dy50cnVzdGNlbnRl
ci5kZS9jZ2ktYmluL2NoZWNrLXJldi5jZ2k/MDwGCWCGSAGG+EIBBwQvFi1odHRw
czovL3d3dy50cnVzdGNlbnRlci5kZS9jZ2ktYmluL1JlbmV3LmNnaT8wPgYJYIZI
AYb4QgEIBDEWL2h0dHA6Ly93d3cudHJ1c3RjZW50ZXIuZGUvZ3VpZGVsaW5lcy9p
bmRleC5odG1sMCgGCWCGSAGG+EIBDQQbFhlUQyBUcnVzdENlbnRlciBDbGFzcyAw
IENBMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG9w0BAQQFAAOBgQBNB39fCTAZ
kqoFR3qUdVQqrs/82AxC4UU4KySVssqHynnEw5eQXmIYxsk4YUxoNdNMFBHrxM2h
qdjFnmgnMgc1RQT4XyGgYB4cAEgEWNLFy65tMm49d5WMhcflrlCddUp7/wsneepN
pFn/7FrqJqU5g6TReM6nqX683SvKEpMDSg==
-----END CERTIFICATE-----
Certificate Ingredients:
Data:
Version: 3 (0x2)
Serial Number: 1 (0x1)
Signature Algorithm: md5WithRSAEncryption
Issuer: C=DE, ST=Hamburg, L=Hamburg, O=TC TrustCenter for Security in Data Networks GmbH, OU=TC TrustCenter Class 0 CA/Email=certificate@trustcenter.de
Validity
Not Before: Mar 9 13:54:48 1998 GMT
Not After : Dec 31 13:54:48 2005 GMT
Subject: C=DE, ST=Hamburg, L=Hamburg, O=TC TrustCenter for Security in Data Networks GmbH, OU=TC TrustCenter Class 0 CA/Email=certificate@trustcenter.de
Subject Public Key Info:
Public Key Algorithm: rsaEncryption
RSA Public Key: (1024 bit)
Modulus (1024 bit):
00:df:7d:e6:be:bf:d5:f0:2f:6d:4e:0e:d1:e0:8d:
0b:7f:3b:54:e8:8a:e2:b0:9f:28:c5:8a:c6:b9:b3:
33:27:f5:27:ca:1a:4c:54:12:6b:b4:b2:46:b3:1d:
4b:b3:f4:21:0b:4b:02:2e:a1:c2:34:9c:85:a0:c4:
78:11:db:db:6b:27:2b:09:2a:18:4e:40:cc:9f:71:
19:67:99:74:a2:7c:3f:c1:d8:8b:23:c8:63:3b:8a:
21:ab:5c:de:1e:f4:8d:dc:b4:18:c3:05:73:f4:6a:
b4:a1:fa:01:08:2b:01:0f:6d:37:5b:aa:38:86:67:
39:d7:e5:5f:fa:e4:7e:cc:a3
Exponent: 65537 (0x10001)
X509v3 extensions:
Netscape Revocation Url:
https://www.trustcenter.de/cgi-bin/check-rev.cgi?
Netscape CA Revocation Url:
https://www.trustcenter.de/cgi-bin/check-rev.cgi?
Netscape Renewal Url:
https://www.trustcenter.de/cgi-bin/Renew.cgi?
Netscape CA Policy Url:
http://www.trustcenter.de/guidelines/index.html
Netscape Comment:
TC TrustCenter Class 0 CA
Netscape Cert Type:
SSL CA, S/MIME CA, Object Signing CA
Signature Algorithm: md5WithRSAEncryption
4d:07:7f:5f:09:30:19:92:aa:05:47:7a:94:75:54:2a:ae:cf:
fc:d8:0c:42:e1:45:38:2b:24:95:b2:ca:87:ca:79:c4:c3:97:
90:5e:62:18:c6:c9:38:61:4c:68:35:d3:4c:14:11:eb:c4:cd:
a1:a9:d8:c5:9e:68:27:32:07:35:45:04:f8:5f:21:a0:60:1e:
1c:00:48:04:58:d2:c5:cb:ae:6d:32:6e:3d:77:95:8c:85:c7:
e5:ae:50:9d:75:4a:7b:ff:0b:27:79:ea:4d:a4:59:ff:ec:5a:
ea:26:a5:39:83:a4:d1:78:ce:a7:a9:7e:bc:dd:2b:ca:12:93:
03:4a
TC TrustCenter, Germany, Class 1 CA
===================================
MD5 Fingerprint: 64:3F:F8:3E:52:14:4A:59:BA:93:56:04:0B:23:02:D1

View File

@@ -369,3 +369,13 @@
/* Define if you have the `sigsetjmp' function. */
#define HAVE_SIGSETJMP 1
/* Define to 1 if you have the <setjmp.h> header file. */
#define HAVE_SETJMP_H 1
/*
* This needs to be defined for OpenSSL 0.9.7 and other versions that have the
* ENGINE stuff supported. If an include of "openssl/engine.h" fails, then
* undefine the define below.
*/
#define HAVE_OPENSSL_ENGINE_H 1

92
lib/config.dj Normal file
View File

@@ -0,0 +1,92 @@
#ifndef _CURL_CONFIG_DJGPP_H
#define _CURL_CONFIG_DJGPP_H
#define OS "djgpp"
#define PACKAGE "curl"
#define CURL_CA_BUNDLE "/dev/env/CURL_CA_BUNDLE"
#if (DJGPP_MINOR >= 4)
/* #define HAVE_DLOPEN 1 maybe not (DXE3) */
#endif
#if 1 /* use ioctlsocket() via fsext'ed fcntl() */
#define HAVE_O_NONBLOCK 1
#else
#define HAVE_IOCTLSOCKET 1
#endif
#define HAVE_ALARM 1
#define HAVE_ARPA_INET_H 1
#define HAVE_CLOSESOCKET 1
#define HAVE_FCNTL_H 1
#define HAVE_GETHOSTBYADDR 1
#define HAVE_GETHOSTNAME 1
#define HAVE_GETPASS 1
#define HAVE_GETSERVBYNAME 1
#define HAVE_GETTIMEOFDAY 1
#define HAVE_INET_ADDR 1
#define HAVE_INET_NTOA 1
#define HAVE_IO_H 1
#define HAVE_MALLOC_H 1
#define HAVE_MEMORY_H 1
#define HAVE_NETDB_H 1
#define HAVE_NETINET_IN_H 1
#define HAVE_NET_IF_H 1
#define HAVE_PERROR 1
#define HAVE_SELECT 1
#define HAVE_SETJMP_H 1
#define HAVE_SETVBUF 1
#define HAVE_SIGNAL 1
#define HAVE_SIGACTION 1
#define HAVE_SIGSETJMP 1
#define HAVE_SOCKET 1
#define HAVE_STRCASECMP 1
#define HAVE_STRDUP 1
#define HAVE_STRFTIME 1
#define HAVE_STRICMP 1
#define HAVE_STRSTR 1
#define HAVE_SYS_SOCKET_H 1
#define HAVE_SYS_STAT_H 1
#define HAVE_SYS_TYPES_H 1
#define HAVE_TERMIOS_H 1
#define HAVE_TIME_H 1
#define HAVE_UNAME 1
#define HAVE_UNISTD_H 1
#define HAVE_VPRINTF 1
#define RETSIGTYPE void
#define SIZEOF_LONG_DOUBLE 16
#define SIZEOF_LONG_LONG 8
#define STDC_HEADERS 1
#define TIME_WITH_SYS_TIME 1
#define BSD
#define USE_ZLIB
/* #define MALLOCDEBUG */
#ifdef HAVE_OPENSSL_ENGINE_H /* on cmd-line */
#define HAVE_OPENSSL_X509_H 1
#define HAVE_OPENSSL_SSL_H 1
#define HAVE_OPENSSL_RSA_H 1
#define HAVE_OPENSSL_PEM_H 1
#define HAVE_OPENSSL_ERR_H 1
#define HAVE_OPENSSL_CRYPTO_H 1
#define HAVE_LIBSSL 1
#define HAVE_LIBCRYPTO 1
#define OPENSSL_NO_KRB5 1
#endif
#define in_addr_t u_long
#define socklen_t int
#define ssize_t int
#include <stdlib.h>
#include <string.h>
#include <tcp.h> /* Watt-32 API */
#undef word
#endif /* _CURL_CONFIG_DJGPP_H */

View File

@@ -5,7 +5,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -70,6 +70,7 @@
#define EINPROGRESS WSAEINPROGRESS
#define EWOULDBLOCK WSAEWOULDBLOCK
#define EISCONN WSAEISCONN
#define ENOTSOCK WSAENOTSOCK
#endif
#include "urldata.h"
@@ -77,12 +78,11 @@
#include "if2ip.h"
/* The last #include file should be: */
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
#include "memdebug.h"
#endif
static
int geterrno(void)
int Curl_ourerrno(void)
{
#ifdef WIN32
return (int)GetLastError();
@@ -144,7 +144,11 @@ int Curl_nonblock(int socket, /* operate on this */
}
/*
* Return 0 on fine connect, -1 on error and 1 on timeout.
* waitconnect() returns:
* 0 fine connect
* -1 select() error
* 1 select() timeout
* 2 select() returned with an error condition
*/
static
int waitconnect(int sockfd, /* socket */
@@ -187,17 +191,8 @@ int waitconnect(int sockfd, /* socket */
static CURLcode bindlocal(struct connectdata *conn,
int sockfd)
{
#if !defined(WIN32)||defined(__CYGWIN32__)
/* We don't generally like checking for OS-versions, we should make this
HAVE_XXXX based, although at the moment I don't have a decent test for
this! */
#ifdef HAVE_INET_NTOA
#ifndef INADDR_NONE
#define INADDR_NONE (in_addr_t) ~0
#endif
struct SessionHandle *data = conn->data;
/*************************************************************
@@ -209,12 +204,20 @@ static CURLcode bindlocal(struct connectdata *conn,
size_t size;
char myhost[256] = "";
in_addr_t in;
int rc;
if(Curl_if2ip(data->set.device, myhost, sizeof(myhost))) {
/* First check if the given name is an IP address */
in=inet_addr(data->set.device);
if((in == CURL_INADDR_NONE) &&
Curl_if2ip(data->set.device, myhost, sizeof(myhost))) {
/*
* We now have the numerical IPv4-style x.y.z.w in the 'myhost' buffer
*/
h = Curl_resolv(data, myhost, 0);
rc = Curl_resolv(conn, myhost, 0, &h);
if(rc == 1)
rc = Curl_wait_for_resolv(conn, &h);
}
else {
if(strlen(data->set.device)>1) {
@@ -222,11 +225,14 @@ static CURLcode bindlocal(struct connectdata *conn,
* This was not an interface, resolve the name as a host name
* or IP number
*/
h = Curl_resolv(data, data->set.device, 0);
if(h) {
rc = Curl_resolv(conn, data->set.device, 0, &h);
if(rc == 1)
rc = Curl_wait_for_resolv(conn, &h);
if(h)
/* we know data->set.device is shorter than the myhost array */
strcpy(myhost, data->set.device);
}
}
}
@@ -243,22 +249,31 @@ static CURLcode bindlocal(struct connectdata *conn,
infof(data, "We bind local end to %s\n", myhost);
in=inet_addr(myhost);
if (INADDR_NONE != in) {
if (CURL_INADDR_NONE != in) {
if ( h ) {
Curl_addrinfo *addr = h->addr;
Curl_resolv_unlock(h);
Curl_resolv_unlock(data, h);
/* we don't need it anymore after this function has returned */
memset((char *)&sa, 0, sizeof(sa));
#ifdef ENABLE_IPV6
memcpy((char *)&sa.sin_addr, addr->ai_addr, addr->ai_addrlen);
sa.sin_family = addr->ai_family;
(void)sa; /* prevent compiler warning */
if( bind(sockfd, addr->ai_addr, addr->ai_addrlen) >= 0) {
/* we succeeded to bind */
struct sockaddr_in6 add;
size = sizeof(add);
if(getsockname(sockfd, (struct sockaddr *) &add,
(socklen_t *)&size)<0) {
failf(data, "getsockname() failed");
return CURLE_HTTP_PORT_FAILED;
}
}
#else
memset((char *)&sa, 0, sizeof(sa));
memcpy((char *)&sa.sin_addr, addr->h_addr, addr->h_length);
sa.sin_family = AF_INET;
#endif
sa.sin_addr.s_addr = in;
sa.sin_port = 0; /* get any port */
@@ -273,6 +288,7 @@ static CURLcode bindlocal(struct connectdata *conn,
return CURLE_HTTP_PORT_FAILED;
}
}
#endif
else {
switch(errno) {
case EBADF:
@@ -322,7 +338,6 @@ static CURLcode bindlocal(struct connectdata *conn,
} /* end of device selection support */
#endif /* end of HAVE_INET_NTOA */
#endif /* end of not WIN32 */
return CURLE_HTTP_PORT_FAILED;
}
@@ -336,7 +351,7 @@ int socketerror(int sockfd)
if( -1 == getsockopt(sockfd, SOL_SOCKET, SO_ERROR,
(void *)&err, &errSize))
err = geterrno();
err = Curl_ourerrno();
return err;
}
@@ -396,10 +411,15 @@ CURLcode Curl_is_connected(struct connectdata *conn,
return CURLE_OK;
}
/* nope, not connected for real */
if(err)
return CURLE_COULDNT_CONNECT;
failf(data, "Connection failed, socket error: %d", err);
return CURLE_COULDNT_CONNECT;
}
else if(1 != rc) {
int error = Curl_ourerrno();
failf(data, "Failed connect to %s:%d, errno: %d",
conn->hostname, conn->port, error);
return CURLE_COULDNT_CONNECT;
}
/*
* If the connection phase is "done" here, we should attempt to connect
* to the "next address" in the Curl_hostaddr structure that we resolved
@@ -507,7 +527,7 @@ CURLcode Curl_connecthost(struct connectdata *conn, /* context */
rc = connect(sockfd, ai->ai_addr, ai->ai_addrlen);
if(-1 == rc) {
int error=geterrno();
int error=Curl_ourerrno();
switch (error) {
case EINPROGRESS:
@@ -548,6 +568,9 @@ CURLcode Curl_connecthost(struct connectdata *conn, /* context */
failf(data, "socket error: %d", err);
/* we are _not_ connected, it was a false alert, continue please */
}
else if(2 == rc)
/* waitconnect() returned error */
;
else if(data->state.used_interface == Curl_if_multi) {
/* When running the multi interface, we bail out here */
rc = 0;
@@ -623,7 +646,7 @@ CURLcode Curl_connecthost(struct connectdata *conn, /* context */
sizeof(serv_addr));
if(-1 == rc) {
int error=geterrno();
int error=Curl_ourerrno();
switch (error) {
case EINPROGRESS:

View File

@@ -7,7 +7,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -37,4 +37,6 @@ CURLcode Curl_connecthost(struct connectdata *conn,
Curl_ipconnect **addr, /* the one we used */
bool *connected /* truly connected? */
);
int Curl_ourerrno(void);
#endif

View File

@@ -1,16 +1,16 @@
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
@@ -25,13 +25,26 @@
#ifdef HAVE_LIBZ
#include <stdlib.h>
#include <string.h>
#include "urldata.h"
#include <curl/curl.h>
#include <curl/types.h>
#include "sendf.h"
#define DSIZ 4096 /* buffer size for decompressed data */
#define DSIZ 0x10000 /* buffer size for decompressed data */
#define GZIP_MAGIC_0 0x1f
#define GZIP_MAGIC_1 0x8b
/* gzip flag byte */
#define ASCII_FLAG 0x01 /* bit 0 set: file probably ascii text */
#define HEAD_CRC 0x02 /* bit 1 set: header CRC present */
#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */
#define ORIG_NAME 0x08 /* bit 3 set: original file name present */
#define COMMENT 0x10 /* bit 4 set: file comment present */
#define RESERVED 0xE0 /* bits 5..7: reserved */
static CURLcode
process_zlib_error(struct SessionHandle *data, z_stream *z)
@@ -55,7 +68,7 @@ exit_zlib(z_stream *z, bool *zlib_init, CURLcode result)
}
CURLcode
Curl_unencode_deflate_write(struct SessionHandle *data,
Curl_unencode_deflate_write(struct SessionHandle *data,
struct Curl_transfer_keeper *k,
ssize_t nread)
{
@@ -63,7 +76,7 @@ Curl_unencode_deflate_write(struct SessionHandle *data,
int result; /* Curl_client_write status */
char decomp[DSIZ]; /* Put the decompressed data here. */
z_stream *z = &k->z; /* zlib state structure */
/* Initialize zlib? */
if (!k->zlib_init) {
z->zalloc = (alloc_func)Z_NULL;
@@ -74,7 +87,7 @@ Curl_unencode_deflate_write(struct SessionHandle *data,
k->zlib_init = 1;
}
/* Set the compressed input when this fucntion is called */
/* Set the compressed input when this function is called */
z->next_in = (Bytef *)k->str;
z->avail_in = nread;
@@ -87,11 +100,12 @@ Curl_unencode_deflate_write(struct SessionHandle *data,
status = inflate(z, Z_SYNC_FLUSH);
if (status == Z_OK || status == Z_STREAM_END) {
result = Curl_client_write(data, CLIENTWRITE_BODY, decomp,
DSIZ - z->avail_out);
/* if !CURLE_OK, clean up, return */
if (result) {
return exit_zlib(z, &k->zlib_init, result);
if (DSIZ - z->avail_out) {
result = Curl_client_write(data, CLIENTWRITE_BODY, decomp,
DSIZ - z->avail_out);
/* if !CURLE_OK, clean up, return */
if (result)
return exit_zlib(z, &k->zlib_init, result);
}
/* Done?; clean up, return */
@@ -103,7 +117,233 @@ Curl_unencode_deflate_write(struct SessionHandle *data,
}
/* Done with these bytes, exit */
if (status == Z_OK && z->avail_in == 0 && z->avail_out > 0)
if (status == Z_OK && z->avail_in == 0 && z->avail_out > 0)
return result;
}
else { /* Error; exit loop, handle below */
return exit_zlib(z, &k->zlib_init, process_zlib_error(data, z));
}
}
}
/* Skip over the gzip header */
static enum {
GZIP_OK,
GZIP_BAD,
GZIP_UNDERFLOW
} check_gzip_header(unsigned char const *data, ssize_t len, ssize_t *headerlen)
{
int method, flags;
const ssize_t totallen = len;
/* The shortest header is 10 bytes */
if (len < 10)
return GZIP_UNDERFLOW;
if ((data[0] != GZIP_MAGIC_0) || (data[1] != GZIP_MAGIC_1))
return GZIP_BAD;
method = data[2];
flags = data[3];
if (method != Z_DEFLATED || (flags & RESERVED) != 0) {
/* Can't handle this compression method or unknown flag */
return GZIP_BAD;
}
/* Skip over time, xflags, OS code and all previous bytes */
len -= 10;
data += 10;
if (flags & EXTRA_FIELD) {
ssize_t extra_len;
if (len < 2)
return GZIP_UNDERFLOW;
extra_len = (data[1] << 8) | data[0];
if (len < (extra_len+2))
return GZIP_UNDERFLOW;
len -= (extra_len + 2);
}
if (flags & ORIG_NAME) {
/* Skip over NUL-terminated file name */
while (len && *data) {
--len;
++data;
}
if (!len || *data)
return GZIP_UNDERFLOW;
/* Skip over the NUL */
--len;
++data;
}
if (flags & COMMENT) {
/* Skip over NUL-terminated comment */
while (len && *data) {
--len;
++data;
}
if (!len || *data)
return GZIP_UNDERFLOW;
/* Skip over the NUL */
--len;
++data;
}
if (flags & HEAD_CRC) {
if (len < 2)
return GZIP_UNDERFLOW;
len -= 2;
data += 2;
}
*headerlen = totallen - len;
return GZIP_OK;
}
CURLcode
Curl_unencode_gzip_write(struct SessionHandle *data,
struct Curl_transfer_keeper *k,
ssize_t nread)
{
int status; /* zlib status */
int result; /* Curl_client_write status */
char decomp[DSIZ]; /* Put the decompressed data here. */
z_stream *z = &k->z; /* zlib state structure */
/* Initialize zlib? */
if (!k->zlib_init) {
z->zalloc = (alloc_func)Z_NULL;
z->zfree = (free_func)Z_NULL;
z->opaque = 0; /* of dubious use 08/27/02 jhrg */
if (inflateInit2(z, -MAX_WBITS) != Z_OK)
return process_zlib_error(data, z);
k->zlib_init = 1; /* Initial call state */
}
/* This next mess is to get around the potential case where there isn't
enough data passed in to skip over the gzip header. If that happens,
we malloc a block and copy what we have then wait for the next call. If
there still isn't enough (this is definitely a worst-case scenario), we
make the block bigger, copy the next part in and keep waiting. */
/* Skip over gzip header? */
if (k->zlib_init == 1) {
/* Initial call state */
ssize_t hlen;
switch (check_gzip_header((unsigned char *)k->str, nread, &hlen)) {
case GZIP_OK:
z->next_in = (Bytef *)k->str + hlen;
z->avail_in = nread - hlen;
k->zlib_init = 3; /* Inflating stream state */
break;
case GZIP_UNDERFLOW:
/* We need more data so we can find the end of the gzip header.
It's possible that the memory block we malloc here will never be
freed if the transfer abruptly aborts after this point. Since it's
unlikely that circumstances will be right for this code path to be
followed in the first place, and it's even more unlikely for a transfer
to fail immediately afterwards, it should seldom be a problem. */
z->avail_in = nread;
z->next_in = malloc(z->avail_in);
if (z->next_in == NULL) {
return exit_zlib(z, &k->zlib_init, CURLE_OUT_OF_MEMORY);
}
memcpy(z->next_in, k->str, z->avail_in);
k->zlib_init = 2; /* Need more gzip header data state */
/* We don't have any data to inflate yet */
return CURLE_OK;
case GZIP_BAD:
default:
return exit_zlib(z, &k->zlib_init, process_zlib_error(data, z));
}
}
else if (k->zlib_init == 2) {
/* Need more gzip header data state */
ssize_t hlen;
unsigned char *oldblock = z->next_in;
z->avail_in += nread;
z->next_in = realloc(z->next_in, z->avail_in);
if (z->next_in == NULL) {
free(oldblock);
return exit_zlib(z, &k->zlib_init, CURLE_OUT_OF_MEMORY);
}
/* Append the new block of data to the previous one */
memcpy(z->next_in + z->avail_in - nread, k->str, nread);
switch (check_gzip_header(z->next_in, z->avail_in, &hlen)) {
case GZIP_OK:
/* This is the zlib stream data */
free(z->next_in);
/* Don't point into the malloced block since we just freed it */
z->next_in = (Bytef *)k->str + hlen + nread - z->avail_in;
z->avail_in = z->avail_in - hlen;
k->zlib_init = 3; /* Inflating stream state */
break;
case GZIP_UNDERFLOW:
/* We still don't have any data to inflate! */
return CURLE_OK;
case GZIP_BAD:
default:
free(z->next_in);
return exit_zlib(z, &k->zlib_init, process_zlib_error(data, z));
}
}
else {
/* Inflating stream state */
z->next_in = (Bytef *)k->str;
z->avail_in = nread;
}
if (z->avail_in == 0) {
/* We don't have any data to inflate; wait until next time */
return CURLE_OK;
}
/* because the buffer size is fixed, iteratively decompress
and transfer to the client via client_write. */
for (;;) {
/* (re)set buffer for decompressed output for every iteration */
z->next_out = (Bytef *)&decomp[0];
z->avail_out = DSIZ;
status = inflate(z, Z_SYNC_FLUSH);
if (status == Z_OK || status == Z_STREAM_END) {
if(DSIZ - z->avail_out) {
result = Curl_client_write(data, CLIENTWRITE_BODY, decomp,
DSIZ - z->avail_out);
/* if !CURLE_OK, clean up, return */
if (result)
return exit_zlib(z, &k->zlib_init, result);
}
/* Done?; clean up, return */
/* We should really check the gzip CRC here */
if (status == Z_STREAM_END) {
if (inflateEnd(z) == Z_OK)
return exit_zlib(z, &k->zlib_init, result);
else
return exit_zlib(z, &k->zlib_init, process_zlib_error(data, z));
}
/* Done with these bytes, exit */
if (status == Z_OK && z->avail_in == 0 && z->avail_out > 0)
return result;
}
else { /* Error; exit loop, handle below */
@@ -112,11 +352,3 @@ Curl_unencode_deflate_write(struct SessionHandle *data,
}
}
#endif /* HAVE_LIBZ */
/*
* local variables:
* eval: (load-file "../curl-mode.el")
* end:
* vim600: fdm=marker
* vim: et sw=2 ts=2 sts=2 tw=78
*/

View File

@@ -5,7 +5,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -20,15 +20,22 @@
*
* $Id$
***************************************************************************/
#include "setup.h"
/*
* Comma-separated list all supported Content-Encodings ('identity' is implied)
*/
#ifdef HAVE_LIBZ
#define ALL_CONTENT_ENCODINGS "deflate, gzip"
#else
#define ALL_CONTENT_ENCODINGS "identity"
#endif
CURLcode Curl_unencode_deflate_write(struct SessionHandle *data,
struct Curl_transfer_keeper *k,
ssize_t nread);
/*
* local variables:
* eval: (load-file "../curl-mode.el")
* end:
* vim600: fdm=marker
* vim: et sw=2 ts=2 sts=2 tw=78
*/
CURLcode
Curl_unencode_gzip_write(struct SessionHandle *data,
struct Curl_transfer_keeper *k,
ssize_t nread);

View File

@@ -5,7 +5,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -77,6 +77,7 @@ Example set of cookies:
13-Jun-1988 03:04:55 GMT; domain=.fidelity.com; path=/; secure
****/
#include "setup.h"
#ifndef CURL_DISABLE_HTTP
@@ -85,13 +86,15 @@ Example set of cookies:
#include <string.h>
#include <ctype.h>
#include "urldata.h"
#include "cookie.h"
#include "getdate.h"
#include "strequal.h"
#include "strtok.h"
#include "sendf.h"
/* The last #include file should be: */
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
#include "memdebug.h"
#endif
@@ -110,6 +113,17 @@ free_cookiemess(struct Cookie *co)
free(co);
}
static bool tailmatch(const char *little, const char *bigone)
{
unsigned int littlelen = strlen(little);
unsigned int biglen = strlen(bigone);
if(littlelen > biglen)
return FALSE;
return strequal(little, bigone+biglen-littlelen);
}
/****************************************************************************
*
* Curl_cookie_add()
@@ -119,10 +133,18 @@ free_cookiemess(struct Cookie *co)
***************************************************************************/
struct Cookie *
Curl_cookie_add(struct CookieInfo *c,
Curl_cookie_add(struct SessionHandle *data,
/* The 'data' pointer here may be NULL at times, and thus
must only be used very carefully for things that can deal
with data being NULL. Such as infof() and similar */
struct CookieInfo *c,
bool httpheader, /* TRUE if HTTP header-style line */
char *lineptr, /* first character of the line */
char *domain) /* default domain */
char *domain, /* default domain */
char *path) /* full path used when this cookie is set,
used to get default path for the cookie
unless set */
{
struct Cookie *clist;
char what[MAX_COOKIE_LINE];
@@ -133,6 +155,7 @@ Curl_cookie_add(struct CookieInfo *c,
struct Cookie *lastc=NULL;
time_t now = time(NULL);
bool replace_old = FALSE;
bool badcookie = FALSE; /* cookies are good by default. mmmmm yummy */
/* First, alloc and init a new struct for it */
co = (struct Cookie *)malloc(sizeof(struct Cookie));
@@ -185,8 +208,73 @@ Curl_cookie_add(struct CookieInfo *c,
co->path=strdup(whatptr);
}
else if(strequal("domain", name)) {
co->domain=strdup(whatptr);
co->field1= (whatptr[0]=='.')?2:1;
/* note that this name may or may not have a preceeding dot, but
we don't care about that, we treat the names the same anyway */
char *ptr=whatptr;
int dotcount=1;
unsigned int i;
static const char *seventhree[]= {
"com", "edu", "net", "org", "gov", "mil", "int"
};
/* Count the dots, we need to make sure that there are THREE dots
in the normal domains, or TWO in the seventhree-domains. */
if('.' == whatptr[0])
/* don't count the initial dot, assume it */
ptr++;
do {
ptr = strchr(ptr, '.');
if(ptr) {
ptr++;
dotcount++;
}
} while(ptr);
for(i=0;
i<sizeof(seventhree)/sizeof(seventhree[0]); i++) {
if(tailmatch(seventhree[i], whatptr)) {
dotcount++; /* we allow one dot less for these */
break;
}
}
/* The original Netscape cookie spec defined that this domain name
MUST have three dots (or two if one of the seven holy TLDs),
but it seems that these kinds of cookies are in use "out there"
so we cannot be that strict. I've therefore lowered the check
to not allow less than two dots. */
if(dotcount < 2) {
/* Received and skipped a cookie with a domain using too few
dots. */
badcookie=TRUE; /* mark this as a bad cookie */
infof(data, "skipped cookie with illegal dotcount domain: %s",
whatptr);
}
else {
/* Now, we make sure that our host is within the given domain,
or the given domain is not valid and thus cannot be set. */
if(!domain || tailmatch(whatptr, domain)) {
char *ptr=whatptr;
if(ptr[0] == '.')
ptr++;
co->domain=strdup(ptr); /* dont prefix with dots internally */
co->tailmatch=TRUE; /* we always do that if the domain name was
given */
}
else {
/* we did not get a tailmatch and then the attempted set domain
is not a domain to which the current host belongs. Mark as
bad. */
badcookie=TRUE;
infof(data, "skipped cookie with bad tailmatch domain: %s",
whatptr);
}
}
}
else if(strequal("version", name)) {
co->version=strdup(whatptr);
@@ -248,8 +336,11 @@ Curl_cookie_add(struct CookieInfo *c,
semiptr=strchr(ptr, '\0');
} while(semiptr);
if(NULL == co->name) {
/* we didn't get a cookie name, this is an illegal line, bail out */
if(badcookie || (NULL == co->name)) {
/* we didn't get a cookie name or a bad one,
this is an illegal line, bail out */
if(co->expirestr)
free(co->expirestr);
if(co->domain)
free(co->domain);
if(co->path)
@@ -263,8 +354,20 @@ Curl_cookie_add(struct CookieInfo *c,
}
if(NULL == co->domain)
/* no domain given in the header line, set the default now */
/* no domain was given in the header line, set the default now */
co->domain=domain?strdup(domain):NULL;
if((NULL == co->path) && path) {
/* no path was given in the header line, set the default now */
char *endslash = strrchr(path, '/');
if(endslash) {
int pathlen = endslash-path+1; /* include the ending slash */
co->path=malloc(pathlen+1); /* one extra for the zero byte */
if(co->path) {
memcpy(co->path, path, pathlen);
co->path[pathlen]=0; /* zero terminate */
}
}
}
}
else {
/* This line is NOT a HTTP header style line, we do offer support for
@@ -296,9 +399,12 @@ Curl_cookie_add(struct CookieInfo *c,
/* Now loop through the fields and init the struct we already have
allocated */
for(ptr=firstptr, fields=0; ptr; ptr=strtok_r(NULL, "\t", &tok_buf), fields++) {
for(ptr=firstptr, fields=0; ptr;
ptr=strtok_r(NULL, "\t", &tok_buf), fields++) {
switch(fields) {
case 0:
if(ptr[0]=='.') /* skip preceeding dots */
ptr++;
co->domain = strdup(ptr);
break;
case 1:
@@ -311,10 +417,8 @@ Curl_cookie_add(struct CookieInfo *c,
As far as I can see, it is set to true when the cookie says
.domain.com and to false when the domain is complete www.domain.com
We don't currently take advantage of this knowledge.
*/
co->field1=strequal(ptr, "TRUE")+1; /* store information */
co->tailmatch=strequal(ptr, "TRUE"); /* store information */
break;
case 2:
/* It turns out, that sometimes the file format allows the path
@@ -344,13 +448,16 @@ Curl_cookie_add(struct CookieInfo *c,
}
}
if(7 != fields) {
if(6 == fields) {
/* we got a cookie with blank contents, fix it */
co->value = strdup("");
}
else if(7 != fields) {
/* we did not find the sufficient number of fields to recognize this
as a valid line, abort and go home */
free_cookiemess(co);
return NULL;
}
}
if(!c->running && /* read from a file */
@@ -373,13 +480,8 @@ Curl_cookie_add(struct CookieInfo *c,
/* the names are identical */
if(clist->domain && co->domain) {
if(strequal(clist->domain, co->domain) ||
(clist->domain[0]=='.' &&
strequal(&(clist->domain[1]), co->domain)) ||
(co->domain[0]=='.' &&
strequal(clist->domain, &(co->domain[1]))) )
/* The domains are identical, or at least identical if you skip the
preceeding dot */
if(strequal(clist->domain, co->domain))
/* The domains are identical */
replace_old=TRUE;
}
else if(!clist->domain && !co->domain)
@@ -460,6 +562,12 @@ Curl_cookie_add(struct CookieInfo *c,
clist = clist->next;
}
if(c->running)
/* Only show this when NOT reading the cookies from a file */
infof(data, "%s cookie %s=\"%s\" for domain %s, path %s, expire %d\n",
replace_old?"Replaced":"Added", co->name, co->value,
co->domain, co->path, co->expires);
if(!replace_old) {
/* then make the last item point on this new one */
if(lastc)
@@ -469,7 +577,6 @@ Curl_cookie_add(struct CookieInfo *c,
}
c->numcookies++; /* one more cookie in the jar */
return co;
}
@@ -483,7 +590,8 @@ Curl_cookie_add(struct CookieInfo *c,
* If 'newsession' is TRUE, discard all "session cookies" on read from file.
*
****************************************************************************/
struct CookieInfo *Curl_cookie_init(char *file,
struct CookieInfo *Curl_cookie_init(struct SessionHandle *data,
char *file,
struct CookieInfo *inc,
bool newsession)
{
@@ -531,7 +639,7 @@ struct CookieInfo *Curl_cookie_init(char *file,
while(*lineptr && isspace((int)*lineptr))
lineptr++;
Curl_cookie_add(c, headerline, lineptr, NULL);
Curl_cookie_add(data, c, headerline, lineptr, NULL, NULL);
}
if(fromfile)
fclose(fp);
@@ -560,9 +668,6 @@ struct Cookie *Curl_cookie_getlist(struct CookieInfo *c,
struct Cookie *newco;
struct Cookie *co;
time_t now = time(NULL);
int hostlen=strlen(host);
int domlen;
struct Cookie *mainco=NULL;
if(!c || !c->cookies)
@@ -571,43 +676,42 @@ struct Cookie *Curl_cookie_getlist(struct CookieInfo *c,
co = c->cookies;
while(co) {
/* only process this cookie if it is not expired or had no expire
date AND that if the cookie requires we're secure we must only
continue if we are! */
/* only process this cookie if it is not expired or had no expire
date AND that if the cookie requires we're secure we must only
continue if we are! */
if( (co->expires<=0 || (co->expires> now)) &&
(co->secure?secure:TRUE) ) {
/* now check if the domain is correct */
if(!co->domain ||
(co->tailmatch && tailmatch(co->domain, host)) ||
(!co->tailmatch && strequal(host, co->domain)) ) {
/* the right part of the host matches the domain stuff in the
cookie data */
/* now check the left part of the path with the cookies path
requirement */
if(!co->path ||
checkprefix(co->path, path) ) {
/* now check if the domain is correct */
domlen=co->domain?strlen(co->domain):0;
if(!co->domain ||
((domlen<=hostlen) &&
strequal(host+(hostlen-domlen), co->domain)) ) {
/* the right part of the host matches the domain stuff in the
cookie data */
/* and now, we know this is a match and we should create an
entry for the return-linked-list */
newco = (struct Cookie *)malloc(sizeof(struct Cookie));
if(newco) {
/* first, copy the whole source cookie: */
memcpy(newco, co, sizeof(struct Cookie));
/* now check the left part of the path with the cookies path
requirement */
if(!co->path ||
checkprefix(co->path, path) ) {
/* and now, we know this is a match and we should create an
entry for the return-linked-list */
newco = (struct Cookie *)malloc(sizeof(struct Cookie));
if(newco) {
/* first, copy the whole source cookie: */
memcpy(newco, co, sizeof(struct Cookie));
/* then modify our next */
newco->next = mainco;
/* point the main to us */
mainco = newco;
}
}
}
}
co = co->next;
/* then modify our next */
newco->next = mainco;
/* point the main to us */
mainco = newco;
}
}
}
}
co = co->next;
}
return mainco; /* return the new list */
@@ -715,15 +819,19 @@ int Curl_cookie_output(struct CookieInfo *c, char *dumphere)
while(co) {
fprintf(out,
"%s\t" /* domain */
"%s\t" /* field1 */
"%s%s\t" /* domain */
"%s\t" /* tailmatch */
"%s\t" /* path */
"%s\t" /* secure */
"%u\t" /* expires */
"%s\t" /* name */
"%s\n", /* value */
/* Make sure all domains are prefixed with a dot if they allow
tailmatching. This is Mozilla-style. */
(co->tailmatch && co->domain && co->domain[0] != '.')? ".":"",
co->domain?co->domain:"unknown",
co->field1==2?"TRUE":"FALSE",
co->tailmatch?"TRUE":"FALSE",
co->path?co->path:"/",
co->secure?"TRUE":"FALSE",
(unsigned int)co->expires,
@@ -740,39 +848,4 @@ int Curl_cookie_output(struct CookieInfo *c, char *dumphere)
return 0;
}
#ifdef CURL_COOKIE_DEBUG
/*
* On my Solaris box, this command line builds this test program:
*
* gcc -g -o cooktest -DCURL_COOKIE_DEBUG -DHAVE_CONFIG_H -I.. -I../include cookie.c strequal.o getdate.o memdebug.o mprintf.o strtok.o -lnsl -lsocket
*
*/
int main(int argc, char **argv)
{
struct CookieInfo *c=NULL;
if(argc>1) {
c = Curl_cookie_init(argv[1], c);
Curl_cookie_add(c, TRUE, "PERSONALIZE=none;expires=Monday, 13-Jun-1988 03:04:55 GMT; domain=.fidelity.com; path=/ftgw; secure");
Curl_cookie_add(c, TRUE, "foobar=yes; domain=.haxx.se; path=/looser;");
c = Curl_cookie_init(argv[1], c);
Curl_cookie_output(c);
Curl_cookie_cleanup(c);
return 0;
}
return 1;
}
#endif
#endif /* CURL_DISABLE_HTTP */
/*
* local variables:
* eval: (load-file "../curl-mode.el")
* end:
* vim600: fdm=marker
* vim: et sw=2 ts=2 sts=2 tw=78
*/

View File

@@ -7,7 +7,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -40,8 +40,7 @@ struct Cookie {
char *domain; /* domain = <this> */
long expires; /* expires = <this> */
char *expirestr; /* the plain text version */
char field1; /* read from a cookie file, 1 => FALSE, 2=> TRUE */
bool tailmatch; /* weather we do tail-matchning of the domain name */
/* RFC 2109 keywords. Version=1 means 2109-compliant cookie sending */
char *version; /* Version = <value> */
@@ -69,14 +68,18 @@ struct CookieInfo {
#define MAX_NAME 256
#define MAX_NAME_TXT "255"
struct SessionHandle;
/*
* Add a cookie to the internal list of cookies. The domain argument is only
* used if the header boolean is TRUE.
* Add a cookie to the internal list of cookies. The domain and path arguments
* are only used if the header boolean is TRUE.
*/
struct Cookie *Curl_cookie_add(struct CookieInfo *, bool header, char *line,
char *domain);
struct CookieInfo *Curl_cookie_init(char *, struct CookieInfo *, bool);
struct Cookie *Curl_cookie_add(struct SessionHandle *data,
struct CookieInfo *, bool header, char *line,
char *domain, char *path);
struct CookieInfo *Curl_cookie_init(struct SessionHandle *data,
char *, struct CookieInfo *, bool);
struct Cookie *Curl_cookie_getlist(struct CookieInfo *, char *, char *, bool);
void Curl_cookie_freelist(struct Cookie *);
void Curl_cookie_cleanup(struct CookieInfo *);

View File

@@ -243,6 +243,10 @@ SOURCE=.\url.c
# End Source File
# Begin Source File
SOURCE=.\share.c
# End Source File
# Begin Source File
SOURCE=.\version.c
# End Source File
# End Group
@@ -385,6 +389,22 @@ SOURCE=.\url.h
SOURCE=.\urldata.h
# End Source File
# Begin Source File
SOURCE=.\http_digest.c
# End Source File
# Begin Source File
SOURCE=.\md5.c
# End Source File
# Begin Source File
SOURCE=.\http_digest.h
# End Source File
# Begin Source File
SOURCE=.\md5.h
# End Source File
# End Group
# Begin Group "Resource Files"

View File

@@ -5,7 +5,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -44,7 +44,6 @@
#endif
#include <netinet/in.h>
#include <sys/time.h>
#include <sys/resource.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
@@ -231,11 +230,3 @@ CURLcode Curl_dict(struct connectdata *conn)
return CURLE_OK;
}
/*
* local variables:
* eval: (load-file "../curl-mode.el")
* end:
* vim600: fdm=marker
* vim: et sw=2 ts=2 sts=2 tw=78
*/

View File

@@ -8,7 +8,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms

View File

@@ -5,7 +5,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -46,7 +46,6 @@
#endif
#include <netinet/in.h>
#include <sys/time.h>
#include <sys/resource.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
@@ -77,6 +76,7 @@
#include "url.h"
#include "getinfo.h"
#include "hostip.h"
#include "share.h"
#define _MPRINTF_REPLACE /* use our functions only */
#include <curl/mprintf.h>
@@ -200,6 +200,7 @@ CURLcode curl_easy_setopt(CURL *curl, CURLoption tag, ...)
long param_long = 0;
void *param_obj = NULL;
struct SessionHandle *data = curl;
CURLcode ret=CURLE_FAILED_INIT;
va_start(arg, tag);
@@ -213,35 +214,45 @@ CURLcode curl_easy_setopt(CURL *curl, CURLoption tag, ...)
if(tag < CURLOPTTYPE_OBJECTPOINT) {
/* This is a LONG type */
param_long = va_arg(arg, long);
Curl_setopt(data, tag, param_long);
ret = Curl_setopt(data, tag, param_long);
}
else if(tag < CURLOPTTYPE_FUNCTIONPOINT) {
/* This is a object pointer type */
param_obj = va_arg(arg, void *);
Curl_setopt(data, tag, param_obj);
ret = Curl_setopt(data, tag, param_obj);
}
else {
param_func = va_arg(arg, func_T );
Curl_setopt(data, tag, param_func);
ret = Curl_setopt(data, tag, param_func);
}
va_end(arg);
return CURLE_OK;
return ret;
}
CURLcode curl_easy_perform(CURL *curl)
{
struct SessionHandle *data = (struct SessionHandle *)curl;
if (Curl_global_host_cache_use(data) && data->hostcache != Curl_global_host_cache_get()) {
if (data->hostcache) {
Curl_hash_destroy(data->hostcache);
}
data->hostcache = Curl_global_host_cache_get();
}
if ( ! (data->share && data->share->hostcache) ) {
if (!data->hostcache) {
data->hostcache = Curl_hash_alloc(7, Curl_freednsinfo);
if (Curl_global_host_cache_use(data) &&
data->hostcache != Curl_global_host_cache_get()) {
if (data->hostcache)
Curl_hash_destroy(data->hostcache);
data->hostcache = Curl_global_host_cache_get();
}
if (!data->hostcache) {
data->hostcache = Curl_hash_alloc(7, Curl_freednsinfo);
if(!data->hostcache)
/* While we possibly could survive and do good without a host cache,
the fact that creating it failed indicates that things are truly
screwed up and we should bail out! */
return CURLE_OUT_OF_MEMORY;
}
}
return Curl_perform(data);
@@ -250,8 +261,10 @@ CURLcode curl_easy_perform(CURL *curl)
void curl_easy_cleanup(CURL *curl)
{
struct SessionHandle *data = (struct SessionHandle *)curl;
if (!Curl_global_host_cache_use(data)) {
Curl_hash_destroy(data->hostcache);
if ( ! (data->share && data->share->hostcache) ) {
if ( !Curl_global_host_cache_use(data)) {
Curl_hash_destroy(data->hostcache);
}
}
Curl_close(data);
}
@@ -313,7 +326,8 @@ CURL *curl_easy_duphandle(CURL *incurl)
if(data->cookies)
/* If cookies are enabled in the parent handle, we enable them
in the clone as well! */
outcurl->cookies = Curl_cookie_init(data->cookies->filename,
outcurl->cookies = Curl_cookie_init(data,
data->cookies->filename,
outcurl->cookies,
data->set.cookiesession);
@@ -333,11 +347,3 @@ CURL *curl_easy_duphandle(CURL *incurl)
return outcurl;
}
/*
* local variables:
* eval: (load-file "../curl-mode.el")
* end:
* vim600: fdm=marker
* vim: et sw=2 ts=2 sts=2 tw=78
*/

View File

@@ -5,7 +5,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -33,7 +33,7 @@
#include <string.h>
/* The last #include file should be: */
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
#include "memdebug.h"
#endif
@@ -79,6 +79,10 @@ char *curl_escape(const char *string, int length)
return ns;
}
#define ishex(in) ((in >= 'a' && in <= 'f') || \
(in >= 'A' && in <= 'F') || \
(in >= '0' && in <= '9'))
char *curl_unescape(const char *string, int length)
{
int alloc = (length?length:(int)strlen(string))+1;
@@ -93,13 +97,19 @@ char *curl_unescape(const char *string, int length)
while(--alloc > 0) {
in = *string;
if('%' == in) {
/* encoded part */
if(sscanf(string+1, "%02X", &hex)) {
in = hex;
string+=2;
alloc-=2;
}
if(('%' == in) && ishex(string[1]) && ishex(string[2])) {
/* this is two hexadecimal digits following a '%' */
char hexstr[3];
char *ptr;
hexstr[0] = string[1];
hexstr[1] = string[2];
hexstr[2] = 0;
hex = strtol(hexstr, &ptr, 16);
in = hex;
string+=2;
alloc-=2;
}
ns[index++] = in;
@@ -109,15 +119,10 @@ char *curl_unescape(const char *string, int length)
return ns;
}
/* For operating systems/environments that use different malloc/free
ssystems for the app and for this library, we provide a free that uses
the library's memory system */
void curl_free(void *p)
{
free(p);
}
/*
* local variables:
* eval: (load-file "../curl-mode.el")
* end:
* vim600: fdm=marker
* vim: et sw=2 ts=2 sts=2 tw=78
*/

View File

@@ -8,7 +8,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms

View File

@@ -5,7 +5,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -48,7 +48,6 @@
#include <netinet/in.h>
#endif
#include <sys/time.h>
#include <sys/resource.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
@@ -88,18 +87,19 @@
#include <curl/mprintf.h>
/* The last #include file should be: */
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
#include "memdebug.h"
#endif
/* Emulate a connect-then-transfer protocol. We connect to the file here */
CURLcode Curl_file_connect(struct connectdata *conn)
{
char *actual_path = curl_unescape(conn->path, 0);
char *real_path = curl_unescape(conn->path, 0);
struct FILE *file;
int fd;
#if defined(WIN32) || defined(__EMX__)
int i;
char *actual_path;
#endif
file = (struct FILE *)malloc(sizeof(struct FILE));
@@ -110,6 +110,29 @@ CURLcode Curl_file_connect(struct connectdata *conn)
conn->proto.file = file;
#if defined(WIN32) || defined(__EMX__)
/* If the first character is a slash, and there's
something that looks like a drive at the beginning of
the path, skip the slash. If we remove the initial
slash in all cases, paths without drive letters end up
relative to the current directory which isn't how
browsers work.
Some browsers accept | instead of : as the drive letter
separator, so we do too.
On other platforms, we need the slash to indicate an
absolute pathname. On Windows, absolute paths start
with a drive letter.
*/
actual_path = real_path;
if ((actual_path[0] == '/') &&
actual_path[1] &&
(actual_path[2] == ':' || actual_path[2] == '|'))
{
actual_path[2] = ':';
actual_path++;
}
/* change path separators from '/' to '\\' for Windows and OS/2 */
for (i=0; actual_path[i] != '\0'; ++i)
if (actual_path[i] == '/')
@@ -117,9 +140,9 @@ CURLcode Curl_file_connect(struct connectdata *conn)
fd = open(actual_path, O_RDONLY | O_BINARY); /* no CR/LF translation! */
#else
fd = open(actual_path, O_RDONLY);
fd = open(real_path, O_RDONLY);
#endif
free(actual_path);
free(real_path);
if(fd == -1) {
failf(conn->data, "Couldn't open file %s", conn->path);
@@ -211,12 +234,4 @@ CURLcode Curl_file(struct connectdata *conn)
return res;
}
/*
* local variables:
* eval: (load-file "../curl-mode.el")
* end:
* vim600: fdm=marker
* vim: et sw=2 ts=2 sts=2 tw=78
*/
#endif

View File

@@ -8,7 +8,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms

View File

@@ -5,7 +5,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -124,15 +124,12 @@ Content-Disposition: form-data; name="FILECONTENT"
#include "strequal.h"
/* The last #include file should be: */
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
#include "memdebug.h"
#endif
/* Length of the random boundary string. The risk of this being used
in binary data is very close to zero, 64^32 makes
6277101735386680763835789423207666416102355444464034512896
combinations... */
#define BOUNDARY_LENGTH 32
/* Length of the random boundary string. */
#define BOUNDARY_LENGTH 40
/* What kind of Content-Type to use on un-specified files with unrecognized
extensions. */
@@ -520,7 +517,7 @@ static const char * ContentTypeForFilename (const char *filename,
{".jpg", "image/jpeg"},
{".jpeg", "image/jpeg"},
{".txt", "text/plain"},
{".html", "text/plain"}
{".html", "text/html"}
};
if(prevtype)
@@ -1049,22 +1046,23 @@ char *Curl_FormBoundary(void)
the same form won't be identical */
int i;
static char table62[]=
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
static char table16[]="abcdef0123456789";
retstring = (char *)malloc(BOUNDARY_LENGTH);
retstring = (char *)malloc(BOUNDARY_LENGTH+1);
if(!retstring)
return NULL; /* failed */
srand(time(NULL)+randomizer++); /* seed */
strcpy(retstring, "curl"); /* bonus commercials 8*) */
strcpy(retstring, "----------------------------");
for(i=4; i<(BOUNDARY_LENGTH-1); i++) {
retstring[i] = table62[rand()%62];
}
retstring[BOUNDARY_LENGTH-1]=0; /* zero terminate */
for(i=strlen(retstring); i<BOUNDARY_LENGTH; i++)
retstring[i] = table16[rand()%16];
/* 28 dashes and 12 hexadecimal digits makes 12^16 (184884258895036416)
combinations */
retstring[BOUNDARY_LENGTH]=0; /* zero terminate */
return retstring;
}
@@ -1596,11 +1594,3 @@ int main(int argc, char **argv)
#endif
#endif /* CURL_DISABLE_HTTP */
/*
* local variables:
* eval: (load-file "../curl-mode.el")
* end:
* vim600: fdm=marker
* vim: et sw=2 ts=2 sts=2 tw=78
*/

View File

@@ -8,7 +8,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms

422
lib/ftp.c
View File

@@ -5,7 +5,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -94,17 +94,29 @@
#include <curl/mprintf.h>
/* The last #include file should be: */
#ifdef MALLOCDEBUG
#ifdef CURLDEBUG
#include "memdebug.h"
#endif
/* Local API functions */
static CURLcode ftp_sendquote(struct connectdata *conn, struct curl_slist *quote);
static CURLcode ftp_sendquote(struct connectdata *conn,
struct curl_slist *quote);
static CURLcode ftp_cwd(struct connectdata *conn, char *path);
static CURLcode ftp_mkd(struct connectdata *conn, char *path);
static CURLcode cwd_and_mkd(struct connectdata *conn, char *path);
/* easy-to-use macro: */
#define FTPSENDF(x,y,z) if((result = Curl_ftpsendf(x,y,z))) return result
static void freedirs(struct FTP *ftp)
{
int i;
for (i=0; ftp->dirs[i]; i++){
free(ftp->dirs[i]);
ftp->dirs[i]=NULL;
}
}
/***********************************************************************
*
* AllowServerConnect()
@@ -158,6 +170,7 @@ static CURLcode AllowServerConnect(struct SessionHandle *data,
infof(data, "Connection accepted from server\n");
conn->secondarysocket = s;
Curl_nonblock(s, TRUE); /* enable non-blocking */
}
break;
}
@@ -173,7 +186,7 @@ static CURLcode AllowServerConnect(struct SessionHandle *data,
* response and extract the relevant return code for the invoking function.
*/
CURLcode Curl_GetFTPResponse(int *nreadp, /* return number of bytes read */
CURLcode Curl_GetFTPResponse(ssize_t *nreadp, /* return number of bytes read */
struct connectdata *conn,
int *ftpcode) /* return the ftp-code */
{
@@ -237,7 +250,7 @@ CURLcode Curl_GetFTPResponse(int *nreadp, /* return number of bytes read */
if(!ftp->cache) {
readfd = rkeepfd; /* set every lap */
interval.tv_sec = timeout;
interval.tv_sec = 1; /* use 1 second timeout intervals */
interval.tv_usec = 0;
switch (select (sockfd+1, &readfd, NULL, NULL, &interval)) {
@@ -246,9 +259,10 @@ CURLcode Curl_GetFTPResponse(int *nreadp, /* return number of bytes read */
failf(data, "Transfer aborted due to select() error: %d", errno);
break;
case 0: /* timeout */
result = CURLE_OPERATION_TIMEDOUT;
failf(data, "Transfer aborted due to timeout");
break;
if(Curl_pgrsUpdate(conn))
return CURLE_ABORTED_BY_CALLBACK;
continue; /* just continue in our loop for the timeout duration */
default:
break;
}
@@ -387,7 +401,7 @@ CURLcode Curl_GetFTPResponse(int *nreadp, /* return number of bytes read */
CURLcode Curl_ftp_connect(struct connectdata *conn)
{
/* this is FTP and no proxy */
int nread;
ssize_t nread;
struct SessionHandle *data=conn->data;
char *buf = data->state.buffer; /* this is our buffer */
struct FTP *ftp;
@@ -407,9 +421,9 @@ CURLcode Curl_ftp_connect(struct connectdata *conn)
/* get some initial data into the ftp struct */
ftp->bytecountp = &conn->bytecount;
/* no need to duplicate them, the data struct won't change */
ftp->user = data->state.user;
ftp->passwd = data->state.passwd;
/* no need to duplicate them, this connectdata struct won't change */
ftp->user = conn->user;
ftp->passwd = conn->passwd;
ftp->response_time = 3600; /* set default response time-out */
if (data->set.tunnel_thru_httpproxy) {
@@ -510,7 +524,7 @@ CURLcode Curl_ftp_connect(struct connectdata *conn)
/* we may need to issue a KAUTH here to have access to the files
* do it if user supplied a password
*/
if(data->state.passwd && *data->state.passwd) {
if(conn->passwd && *conn->passwd) {
result = Curl_krb_kauth(conn);
if(result)
return result;
@@ -596,6 +610,14 @@ CURLcode Curl_ftp_done(struct connectdata *conn)
int ftpcode;
CURLcode result=CURLE_OK;
/* free the dir tree parts */
freedirs(ftp);
if(ftp->file) {
free(ftp->file);
ftp->file = NULL;
}
if(data->set.upload) {
if((-1 != data->set.infilesize) &&
(data->set.infilesize != *ftp->bytecountp) &&
@@ -707,33 +729,6 @@ CURLcode ftp_sendquote(struct connectdata *conn, struct curl_slist *quote)
return CURLE_OK;
}
/***********************************************************************
*
* ftp_cwd()
*
* Send 'CWD' to the remote server to Change Working Directory.
* It is the ftp version of the unix 'cd' command.
*/
static
CURLcode ftp_cwd(struct connectdata *conn, char *path)
{
ssize_t nread;
int ftpcode;
CURLcode result;
FTPSENDF(conn, "CWD %s", path);
result = Curl_GetFTPResponse(&nread, conn, &ftpcode);
if (result)
return result;
if (ftpcode != 250) {
failf(conn->data, "Couldn't cd to %s", path);
return CURLE_FTP_ACCESS_DENIED;
}
return CURLE_OK;
}
/***********************************************************************
*
* ftp_getfiletime()
@@ -933,6 +928,7 @@ ftp_pasv_verbose(struct connectdata *conn,
# endif
# else
(void)hostent_buf; /* avoid compiler warning */
answer = gethostbyaddr((char *) &address, sizeof(address), AF_INET);
# endif
#else
@@ -961,7 +957,7 @@ ftp_pasv_verbose(struct connectdata *conn,
#else
const int niflags = NI_NUMERICHOST | NI_NUMERICSERV;
#endif
port = 0; /* unused, prevent warning */
(void)port; /* prevent compiler warning */
if (getnameinfo(addr->ai_addr, addr->ai_addrlen,
nbuf, sizeof(nbuf), sbuf, sizeof(sbuf), niflags)) {
snprintf(nbuf, sizeof(nbuf), "?");
@@ -1076,7 +1072,8 @@ CURLcode ftp_use_port(struct connectdata *conn)
return CURLE_FTP_PORT_FAILED;
}
for (modep = (char **)mode; modep && *modep; modep++) {
for (modep = (char **)(data->set.ftp_use_eprt?&mode[0]:&mode[2]);
modep && *modep; modep++) {
int lprtaf, eprtaf;
int alen=0, plen=0;
@@ -1177,7 +1174,6 @@ CURLcode ftp_use_port(struct connectdata *conn)
return result;
if (ftpcode != 200) {
failf(data, "Server does not grok %s", *modep);
continue;
}
else
@@ -1186,6 +1182,7 @@ CURLcode ftp_use_port(struct connectdata *conn)
if (!*modep) {
sclose(portsock);
failf(data, "PORT command attempts failed");
return CURLE_FTP_PORT_FAILED;
}
/* we set the secondary socket variable to this for now, it
@@ -1206,13 +1203,25 @@ CURLcode ftp_use_port(struct connectdata *conn)
bool sa_filled_in = FALSE;
if(data->set.ftpport) {
if(Curl_if2ip(data->set.ftpport, myhost, sizeof(myhost))) {
h = Curl_resolv(data, myhost, 0);
in_addr_t in;
int rc;
/* First check if the given name is an IP address */
in=inet_addr(data->set.ftpport);
if((in == CURL_INADDR_NONE) &&
Curl_if2ip(data->set.ftpport, myhost, sizeof(myhost))) {
rc = Curl_resolv(conn, myhost, 0, &h);
if(rc == 1)
rc = Curl_wait_for_resolv(conn, &h);
}
else {
int len = strlen(data->set.ftpport);
if(len>1)
h = Curl_resolv(data, data->set.ftpport, 0);
if(len>1) {
rc = Curl_resolv(conn, data->set.ftpport, 0, &h);
if(rc == 1)
rc = Curl_wait_for_resolv(conn, &h);
}
if(h)
strcpy(myhost, data->set.ftpport); /* buffer overflow risk */
}
@@ -1233,7 +1242,7 @@ CURLcode ftp_use_port(struct connectdata *conn)
if(h)
/* when we return from here, we can forget about this */
Curl_resolv_unlock(h);
Curl_resolv_unlock(data, h);
if ( h || sa_filled_in) {
if( (portsock = socket(AF_INET, SOCK_STREAM, 0)) >= 0 ) {
@@ -1351,6 +1360,7 @@ CURLcode ftp_use_pasv(struct connectdata *conn,
CURLcode result;
struct Curl_dns_entry *addr=NULL;
Curl_ipconnect *conninfo;
int rc;
/*
Here's the excecutive summary on what to do:
@@ -1475,14 +1485,20 @@ CURLcode ftp_use_pasv(struct connectdata *conn,
* We don't want to rely on a former host lookup that might've expired
* now, instead we remake the lookup here and now!
*/
addr = Curl_resolv(data, conn->proxyhost, conn->port);
rc = Curl_resolv(conn, conn->proxyhost, conn->port, &addr);
if(rc == 1)
rc = Curl_wait_for_resolv(conn, &addr);
connectport =
(unsigned short)conn->port; /* we connect to the proxy's port */
}
else {
/* normal, direct, ftp connection */
addr = Curl_resolv(data, newhostp, newport);
rc = Curl_resolv(conn, newhostp, newport, &addr);
if(rc == 1)
rc = Curl_wait_for_resolv(conn, &addr);
if(!addr) {
failf(data, "Can't resolve new host %s:%d", newhostp, newport);
return CURLE_FTP_CANT_GET_HOST;
@@ -1497,7 +1513,7 @@ CURLcode ftp_use_pasv(struct connectdata *conn,
&conninfo,
connected);
Curl_resolv_unlock(addr); /* we're done using this address */
Curl_resolv_unlock(data, addr); /* we're done using this address */
/*
* When this is used from the multi interface, this might've returned with
@@ -1741,7 +1757,7 @@ CURLcode Curl_ftp_nextconnect(struct connectdata *conn)
if(result)
return result;
/* Send any PREQUOTE strings after transfer type is set? (Wesley Laxton)*/
/* Send any PREQUOTE strings after transfer type is set? */
if(data->set.prequote) {
if ((result = ftp_sendquote(conn, data->set.prequote)) != CURLE_OK)
return result;
@@ -1854,9 +1870,19 @@ CURLcode Curl_ftp_nextconnect(struct connectdata *conn)
int size=-1; /* default unknown size */
/*
* It appears that there are FTP-servers that return size 0 for files
* when SIZE is used on the file while being in BINARY mode. To work
* around that (stupid) behavior, we attempt to parse the RETR response
* even if the SIZE returned size zero.
*
* Debugging help from Salvatore Sorrentino on February 26, 2003.
*/
if(!dirlist &&
!data->set.ftp_ascii &&
(-1 == downloadsize)) {
(downloadsize < 1)) {
/*
* It seems directory listings either don't show the size or very
* often uses size 0 anyway. ASCII transfers may very well turn out
@@ -1908,8 +1934,14 @@ CURLcode Curl_ftp_nextconnect(struct connectdata *conn)
return result;
}
else {
failf(data, "%s", buf+4);
return CURLE_FTP_COULDNT_RETR_FILE;
if(dirlist && (ftpcode == 450)) {
/* simply no matching files */
ftp->no_transfer = TRUE; /* don't think we should download anything */
}
else {
failf(data, "%s", buf+4);
return CURLE_FTP_COULDNT_RETR_FILE;
}
}
}
@@ -1944,26 +1976,61 @@ CURLcode ftp_perform(struct connectdata *conn,
if ((result = ftp_sendquote(conn, data->set.quote)) != CURLE_OK)
return result;
}
/* This is a re-used connection. Since we change directory to where the
transfer is taking place, we must now get back to the original dir
where we ended up after login: */
if (conn->bits.reuse && ftp->entrypath) {
if ((result = ftp_cwd(conn, ftp->entrypath)) != CURLE_OK)
if ((result = cwd_and_mkd(conn, ftp->entrypath)) != CURLE_OK)
return result;
}
/* change directory first! */
if(ftp->dir && ftp->dir[0]) {
if ((result = ftp_cwd(conn, ftp->dir)) != CURLE_OK)
{
int i; /* counter for loop */
for (i=0; ftp->dirs[i]; i++) {
/* RFC 1738 says empty components should be respected too, but
that is plain stupid since CWD can't be used with an empty argument */
if ((result = cwd_and_mkd(conn, ftp->dirs[i])) != CURLE_OK)
return result;
}
}
/* Requested time of file? */
if(data->set.get_filetime && ftp->file) {
/* Requested time of file or time-depended transfer? */
if((data->set.get_filetime || data->set.timecondition) &&
ftp->file) {
result = ftp_getfiletime(conn, ftp->file);
if(result)
return result;
switch( result )
{
case CURLE_FTP_COULDNT_RETR_FILE:
case CURLE_OK:
if(data->set.timecondition) {
if((data->info.filetime > 0) && (data->set.timevalue > 0)) {
switch(data->set.timecondition) {
case TIMECOND_IFMODSINCE:
default:
if(data->info.filetime < data->set.timevalue) {
infof(data, "The requested document is not new enough\n");
ftp->no_transfer = TRUE; /* mark this to not transfer data */
return CURLE_OK;
}
break;
case TIMECOND_IFUNMODSINCE:
if(data->info.filetime > data->set.timevalue) {
infof(data, "The requested document is not old enough\n");
ftp->no_transfer = TRUE; /* mark this to not transfer data */
return CURLE_OK;
}
break;
} /* switch */
}
else {
infof(data, "Skipping time comparison\n");
}
}
break;
default:
return result;
} /* switch */
}
/* If we have selected NOBODY and HEADER, it means that we only want file
@@ -1974,6 +2041,8 @@ CURLcode ftp_perform(struct connectdata *conn,
may not support it! It is however the only way we have to get a file's
size! */
ssize_t filesize;
ssize_t nread;
int ftpcode;
ftp->no_transfer = TRUE; /* this means no actual transfer is made */
@@ -1993,6 +2062,18 @@ CURLcode ftp_perform(struct connectdata *conn,
return result;
}
/* Determine if server can respond to REST command and therefore
whether it can do a range */
FTPSENDF(conn, "REST 0", NULL);
result = Curl_GetFTPResponse(&nread, conn, &ftpcode);
if ((CURLE_OK == result) && (ftpcode == 350)) {
result = Curl_client_write(data, CLIENTWRITE_BOTH,
(char *)"Accept-ranges: bytes\r\n", 0);
if(result)
return result;
}
/* If we asked for a time of the file and we actually got one as
well, we "emulate" a HTTP-style header in our output. */
@@ -2001,12 +2082,12 @@ CURLcode ftp_perform(struct connectdata *conn,
struct tm *tm;
#ifdef HAVE_LOCALTIME_R
struct tm buffer;
tm = (struct tm *)localtime_r(&data->info.filetime, &buffer);
tm = (struct tm *)localtime_r((time_t *)&data->info.filetime, &buffer);
#else
tm = localtime((unsigned long *)&data->info.filetime);
tm = localtime((time_t *)&data->info.filetime);
#endif
/* format: "Tue, 15 Nov 1994 12:45:26 GMT" */
strftime(buf, BUFSIZE-1, "Last-Modified: %a, %d %b %Y %H:%M:%S %Z\r\n",
strftime(buf, BUFSIZE-1, "Last-Modified: %a, %d %b %Y %H:%M:%S GMT\r\n",
tm);
result = Curl_client_write(data, CLIENTWRITE_BOTH, buf, 0);
if(result)
@@ -2033,7 +2114,7 @@ CURLcode ftp_perform(struct connectdata *conn,
else {
/* We have chosen (this is default) to use the PASV command */
result = ftp_use_pasv(conn, connected);
if(connected)
if(!result && *connected)
infof(data, "Connected the data stream with PASV!\n");
}
@@ -2051,33 +2132,71 @@ CURLcode ftp_perform(struct connectdata *conn,
*/
CURLcode Curl_ftp(struct connectdata *conn)
{
CURLcode retcode;
bool connected;
CURLcode retcode=CURLE_OK;
bool connected=0;
struct SessionHandle *data = conn->data;
struct FTP *ftp;
int dirlength=0; /* 0 forces strlen() */
char *slash_pos; /* position of the first '/' char in curpos */
char *cur_pos=conn->ppath; /* current position in ppath. point at the begin
of next path component */
int path_part=0;/* current path component */
/* the ftp struct is already inited in ftp_connect() */
ftp = conn->proto.ftp;
conn->size = -1; /* make sure this is unknown at this point */
/* We split the path into dir and file parts *before* we URLdecode
it */
ftp->file = strrchr(conn->ppath, '/');
if(ftp->file) {
if(ftp->file != conn->ppath)
dirlength=ftp->file-conn->ppath; /* don't count the traling slash */
Curl_pgrsSetUploadCounter(data, 0);
Curl_pgrsSetDownloadCounter(data, 0);
Curl_pgrsSetUploadSize(data, 0);
Curl_pgrsSetDownloadSize(data, 0);
ftp->file++; /* point to the first letter in the file name part or
remain NULL */
}
else {
ftp->file = conn->ppath; /* there's only a file part */
/* fixed : initialize ftp->dirs[xxx] to NULL !
is done in Curl_ftp_connect() */
/* parse the URL path into separate path components */
while((slash_pos=strchr(cur_pos, '/'))) {
/* 1 or 0 to indicate absolute directory */
bool absolute_dir = (cur_pos - conn->ppath > 0) && (path_part == 0);
/* seek out the next path component */
if (slash_pos-cur_pos) {
/* we skip empty path components, like "x//y" since the FTP command CWD
requires a parameter and a non-existant parameter a) doesn't work on
many servers and b) has no effect on the others. */
ftp->dirs[path_part] = curl_unescape(cur_pos - absolute_dir,
slash_pos - cur_pos + absolute_dir);
if (!ftp->dirs[path_part]) { /* run out of memory ... */
failf(data, "no memory");
freedirs(ftp);
return CURLE_OUT_OF_MEMORY;
}
}
else {
cur_pos = slash_pos + 1; /* jump to the rest of the string */
continue;
}
if(!retcode) {
cur_pos = slash_pos + 1; /* jump to the rest of the string */
if(++path_part >= (CURL_MAX_FTP_DIRDEPTH-1)) {
/* too deep, we need the last entry to be kept NULL at all
times to signal end of list */
failf(data, "too deep dir hierarchy");
freedirs(ftp);
return CURLE_URL_MALFORMAT;
}
}
}
ftp->file = cur_pos; /* the rest is the file name */
if(*ftp->file) {
ftp->file = curl_unescape(ftp->file, 0);
if(NULL == ftp->file) {
freedirs(ftp);
failf(data, "no memory");
return CURLE_OUT_OF_MEMORY;
}
@@ -2085,26 +2204,23 @@ CURLcode Curl_ftp(struct connectdata *conn)
else
ftp->file=NULL; /* instead of point to a zero byte, we make it a NULL
pointer */
ftp->urlpath = conn->ppath;
if(dirlength) {
ftp->dir = curl_unescape(ftp->urlpath, dirlength);
if(NULL == ftp->dir) {
if(ftp->file)
free(ftp->file);
failf(data, "no memory");
return CURLE_OUT_OF_MEMORY; /* failure */
}
}
else
ftp->dir = NULL;
retcode = ftp_perform(conn, &connected);
if(CURLE_OK == retcode) {
if(connected)
retcode = Curl_ftp_nextconnect(conn);
else
if(retcode && (conn->secondarysocket >= 0)) {
/* Failure detected, close the second socket if it was created already */
sclose(conn->secondarysocket);
conn->secondarysocket = -1;
}
if(ftp->no_transfer)
/* no data to transfer */
retcode=Curl_Transfer(conn, -1, -1, FALSE, NULL, -1, NULL);
else if(!connected)
/* since we didn't connect now, we want do_more to get called */
conn->bits.do_more = TRUE;
}
@@ -2176,24 +2292,112 @@ CURLcode Curl_ftp_disconnect(struct connectdata *conn)
if(ftp) {
if(ftp->entrypath)
free(ftp->entrypath);
if(ftp->cache)
if(ftp->cache) {
free(ftp->cache);
if(ftp->file)
ftp->cache = NULL;
}
if(ftp->file) {
free(ftp->file);
if(ftp->dir)
free(ftp->dir);
ftp->file = ftp->dir = NULL; /* zero */
ftp->file = NULL; /* zero */
}
freedirs(ftp);
}
return CURLE_OK;
}
/*
* local variables:
* eval: (load-file "../curl-mode.el")
* end:
* vim600: fdm=marker
* vim: et sw=2 ts=2 sts=2 tw=78
/***********************************************************************
*
* ftp_mkd()
*
* Makes a directory on the FTP server.
*
* Calls failf()
*/
CURLcode ftp_mkd(struct connectdata *conn, char *path)
{
CURLcode result=CURLE_OK;
int ftpcode; /* for ftp status */
ssize_t nread;
/* Create a directory on the remote server */
FTPSENDF(conn, "MKD %s", path);
result = Curl_GetFTPResponse(&nread, conn, &ftpcode);
if(result)
return result;
switch(ftpcode) {
case 257:
/* success! */
infof( conn->data , "Created remote directory %s\n" , path );
break;
case 550:
failf(conn->data, "Permission denied to make directory %s", path);
result = CURLE_FTP_ACCESS_DENIED;
break;
default:
failf(conn->data, "unrecognized MKD response: %d", ftpcode );
result = CURLE_FTP_ACCESS_DENIED;
break;
}
return result;
}
/***********************************************************************
*
* ftp_cwd()
*
* Send 'CWD' to the remote server to Change Working Directory. It is the ftp
* version of the unix 'cd' command. This function is only called from the
* cwd_and_mkd() function these days.
*
* This function does NOT call failf().
*/
static
CURLcode ftp_cwd(struct connectdata *conn, char *path)
{
ssize_t nread;
int ftpcode;
CURLcode result;
FTPSENDF(conn, "CWD %s", path);
result = Curl_GetFTPResponse(&nread, conn, &ftpcode);
if (!result) {
/* According to RFC959, CWD is supposed to return 250 on success, but
there seem to be non-compliant FTP servers out there that return 200,
so we accept any '2xy' code here. */
if (ftpcode/100 != 2)
result = CURLE_FTP_ACCESS_DENIED;
}
return result;
}
/***********************************************************************
*
* ftp_cwd_and_mkd()
*
* Change to the given directory. If the directory is not present, and we
* have been told to allow it, then create the directory and cd to it.
*
*/
static CURLcode cwd_and_mkd(struct connectdata *conn, char *path)
{
CURLcode result;
result = ftp_cwd(conn, path);
if (result) {
if(conn->data->set.ftp_create_missing_dirs) {
result = ftp_mkd(conn, path);
if (result)
/* ftp_mkd() calls failf() itself */
return result;
result = ftp_cwd(conn, path);
}
if(result)
failf(conn->data, "Couldn't cd to %s", path);
}
return result;
}
#endif /* CURL_DISABLE_FTP */

View File

@@ -7,7 +7,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
* Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -29,7 +29,7 @@ CURLcode Curl_ftp_done(struct connectdata *conn);
CURLcode Curl_ftp_connect(struct connectdata *conn);
CURLcode Curl_ftp_disconnect(struct connectdata *conn);
CURLcode Curl_ftpsendf(struct connectdata *, const char *fmt, ...);
CURLcode Curl_GetFTPResponse(int *nread, struct connectdata *conn,
CURLcode Curl_GetFTPResponse(ssize_t *nread, struct connectdata *conn,
int *ftpcode);
CURLcode Curl_ftp_nextconnect(struct connectdata *conn);
#endif

View File

@@ -195,7 +195,7 @@ typedef enum _MERIDIAN {
} MERIDIAN;
/* parse results and input string */
typedef struct _CONTEXT {
typedef struct _CURL_CONTEXT {
const char *yyInput;
int yyDayOrdinal;
int yyDayNumber;
@@ -218,14 +218,14 @@ typedef struct _CONTEXT {
int yyRelMonth;
int yyRelSeconds;
int yyRelYear;
} CONTEXT;
} CURL_CONTEXT;
/* enable use of extra argument to yyparse and yylex which can be used to pass
** in a user defined value (CONTEXT struct in our case)
** in a user defined value (CURL_CONTEXT struct in our case)
*/
#define YYPARSE_PARAM cookie
#define YYLEX_PARAM cookie
#define context ((CONTEXT *) cookie)
#define context ((CURL_CONTEXT *) cookie)
#line 215 "getdate.y"
typedef union {
@@ -397,7 +397,7 @@ static const short yycheck[] = { 0,
#define YYPURE 1
/* -*-C-*- Note some compilers choke on comments on `#line' lines. */
#line 3 "/usr/local/share/bison.simple"
#line 3 "/usr/lib/bison.simple"
/* This file comes from bison-1.28. */
/* Skeleton output parser for bison,
@@ -611,7 +611,7 @@ __yy_memcpy (char *to, char *from, unsigned int count)
#endif
#endif
#line 217 "/usr/local/share/bison.simple"
#line 217 "/usr/lib/bison.simple"
/* The user can define YYPARSE_PARAM as the name of an argument to be passed
into yyparse. The argument should have type void *.
@@ -1303,7 +1303,7 @@ case 50:
break;}
}
/* the action file gets copied in in place of this dollarsign */
#line 543 "/usr/local/share/bison.simple"
#line 543 "/usr/lib/bison.simple"
yyvsp -= yylen;
yyssp -= yylen;
@@ -1988,7 +1988,7 @@ curl_getdate (const char *p, const time_t *now)
{
struct tm tm, tm0, *tmp;
time_t Start;
CONTEXT cookie;
CURL_CONTEXT cookie;
#ifdef HAVE_LOCALTIME_R
struct tm keeptime;
#endif

View File

@@ -171,7 +171,7 @@ typedef enum _MERIDIAN {
} MERIDIAN;
/* parse results and input string */
typedef struct _CONTEXT {
typedef struct _CURL_CONTEXT {
const char *yyInput;
int yyDayOrdinal;
int yyDayNumber;
@@ -194,14 +194,14 @@ typedef struct _CONTEXT {
int yyRelMonth;
int yyRelSeconds;
int yyRelYear;
} CONTEXT;
} CURL_CONTEXT;
/* enable use of extra argument to yyparse and yylex which can be used to pass
** in a user defined value (CONTEXT struct in our case)
** in a user defined value (CURL_CONTEXT struct in our case)
*/
#define YYPARSE_PARAM cookie
#define YYLEX_PARAM cookie
#define context ((CONTEXT *) cookie)
#define context ((CURL_CONTEXT *) cookie)
%}
/* This grammar has 13 shift/reduce conflicts. */
@@ -944,7 +944,7 @@ curl_getdate (const char *p, const time_t *now)
{
struct tm tm, tm0, *tmp;
time_t Start;
CONTEXT cookie;
CURL_CONTEXT cookie;
#ifdef HAVE_LOCALTIME_R
struct tm keeptime;
#endif

Some files were not shown because too many files have changed in this diff Show More