Compare commits
144 Commits
curl-7_10_
...
curl-7_10_
Author | SHA1 | Date | |
---|---|---|---|
![]() |
5c2df3e1a4 | ||
![]() |
6fc55467f4 | ||
![]() |
a147a07956 | ||
![]() |
a10581d459 | ||
![]() |
cc2d6942bb | ||
![]() |
3974c02bb1 | ||
![]() |
09b5ddaea5 | ||
![]() |
acbcd68d89 | ||
![]() |
4281470fca | ||
![]() |
68a4aa6773 | ||
![]() |
905b160097 | ||
![]() |
52596c339b | ||
![]() |
73500267ee | ||
![]() |
e6011e33a6 | ||
![]() |
3454319c17 | ||
![]() |
02c78ecf81 | ||
![]() |
caca034302 | ||
![]() |
fb366ed35f | ||
![]() |
b352ffca15 | ||
![]() |
2d94856efd | ||
![]() |
ae66bd1284 | ||
![]() |
89d9d4e6c8 | ||
![]() |
fe60fc4730 | ||
![]() |
46690d5e1c | ||
![]() |
beaea8cb25 | ||
![]() |
409ec90c85 | ||
![]() |
4d423eeabe | ||
![]() |
019e612225 | ||
![]() |
6550d271f0 | ||
![]() |
c46da65263 | ||
![]() |
b46745759b | ||
![]() |
9687571a84 | ||
![]() |
c13236de25 | ||
![]() |
8ffbb6acd4 | ||
![]() |
a3e5d81765 | ||
![]() |
e2aecfe80f | ||
![]() |
a3c1248214 | ||
![]() |
b933639222 | ||
![]() |
27619fc450 | ||
![]() |
96fecba190 | ||
![]() |
50257d4f50 | ||
![]() |
3eb4ae031c | ||
![]() |
6a4ec3be81 | ||
![]() |
cc9ac6ad14 | ||
![]() |
644990a835 | ||
![]() |
d3b81ea3f7 | ||
![]() |
3660f67534 | ||
![]() |
203cc4a5c3 | ||
![]() |
c7be232fee | ||
![]() |
2617b379be | ||
![]() |
84ed5e755a | ||
![]() |
2f17615790 | ||
![]() |
acfa131c8c | ||
![]() |
793d0e27e1 | ||
![]() |
fdf0c443c3 | ||
![]() |
1b39b53321 | ||
![]() |
1679993e3b | ||
![]() |
4c831f8b68 | ||
![]() |
7a19923afa | ||
![]() |
3e122a765d | ||
![]() |
d873ba8c9f | ||
![]() |
8093338f39 | ||
![]() |
07660eea1e | ||
![]() |
a2b2d4cd5c | ||
![]() |
96e217b496 | ||
![]() |
2dd1518d63 | ||
![]() |
168703b7bf | ||
![]() |
0f2d680f1f | ||
![]() |
b7930b6ebd | ||
![]() |
8fa43b469a | ||
![]() |
894e52f61a | ||
![]() |
3c294691aa | ||
![]() |
acbf932861 | ||
![]() |
26f5c53be8 | ||
![]() |
8dd069604c | ||
![]() |
5dadbd094e | ||
![]() |
514a8739b6 | ||
![]() |
12e78a082e | ||
![]() |
9273096a8a | ||
![]() |
686c6133f8 | ||
![]() |
1d1276cc3a | ||
![]() |
d987676ef0 | ||
![]() |
6e4658c89d | ||
![]() |
b7cbcf7434 | ||
![]() |
e347d06a49 | ||
![]() |
2077e9365a | ||
![]() |
6e3adc9b14 | ||
![]() |
7954eee639 | ||
![]() |
f9f1f0e316 | ||
![]() |
a9afe6aa84 | ||
![]() |
6d36796135 | ||
![]() |
9e81fd5703 | ||
![]() |
609059b6ec | ||
![]() |
6af73f417a | ||
![]() |
32468a0072 | ||
![]() |
6800c45104 | ||
![]() |
0d8c754ffd | ||
![]() |
1b80276496 | ||
![]() |
bf9a138276 | ||
![]() |
b3f9c636b9 | ||
![]() |
18975d44a6 | ||
![]() |
b201db5cec | ||
![]() |
bbe23945e4 | ||
![]() |
bbdc0394ff | ||
![]() |
38a9b14965 | ||
![]() |
77ba0d3686 | ||
![]() |
065c8d7a95 | ||
![]() |
c704d1545c | ||
![]() |
62b65a5f20 | ||
![]() |
665a7a3848 | ||
![]() |
256b9f31e1 | ||
![]() |
a3037e1173 | ||
![]() |
f3e7a5d755 | ||
![]() |
5f0cba7775 | ||
![]() |
673759fe7e | ||
![]() |
b73612392d | ||
![]() |
f85935f0f9 | ||
![]() |
1e7e53c87e | ||
![]() |
b9fdf3cc3b | ||
![]() |
c462601362 | ||
![]() |
859877dcfc | ||
![]() |
c04ce95106 | ||
![]() |
98ee12bc35 | ||
![]() |
fdda786fa2 | ||
![]() |
831be4f4dd | ||
![]() |
41ae97e710 | ||
![]() |
f72ba7f79d | ||
![]() |
296046510b | ||
![]() |
db9f87f697 | ||
![]() |
3270ea55dd | ||
![]() |
a358ac24f4 | ||
![]() |
8bedd43b28 | ||
![]() |
9ea2087ede | ||
![]() |
9f7c634133 | ||
![]() |
da20d68a12 | ||
![]() |
d3e512c738 | ||
![]() |
339f84fe1f | ||
![]() |
2d41b735ec | ||
![]() |
e3b4dd08ff | ||
![]() |
6809a906bb | ||
![]() |
1c35cbcc07 | ||
![]() |
5f8989a436 | ||
![]() |
aa7b0648ff | ||
![]() |
2fbe61960f |
165
CHANGES
165
CHANGES
@@ -6,6 +6,171 @@
|
||||
|
||||
Changelog
|
||||
|
||||
Version 7.10.7 (15 August 2003)
|
||||
|
||||
Daniel (14 August)
|
||||
- I modified the memdebug system to return failure on memory allocation
|
||||
functions after a set amount of successful ones. This enables us to test
|
||||
out-of-memory situations in a controlled manner and we can make sure that
|
||||
curl/libcurl behaves good in those.
|
||||
|
||||
This made me find and fix several spots where we did not cleanup properly
|
||||
when bailing out due to errors (low memory).
|
||||
|
||||
- Corrected test case 74. Made using -o with bad #[num] codes complain and
|
||||
bail out. Made #[num] support numbers larger than 9 as well. Added test
|
||||
case 86 for a proper range globbing test as well.
|
||||
|
||||
Version 7.10.7-pre4 (12 August 2003)
|
||||
|
||||
Daniel (12 August)
|
||||
- curl_version_info() now returns a flag if libcurl was built with asynch DNS
|
||||
support, and this is now also displayed with 'curl -V'.
|
||||
|
||||
- Added a few new man pages to the docs/libcurl dir: curl_share_init,
|
||||
curl_share_setopt, curl_share_cleanup, libcurl-easy and libcurl-share.
|
||||
|
||||
Daniel (11 August)
|
||||
- Mike Cherepov made the local binding code work for Windows, which makes
|
||||
the option CURLOPT_INTERFACE work on Windows as well.
|
||||
|
||||
- Vincent Sanders updated the fopen.c example code a lot.
|
||||
|
||||
- --proxy-ntlm is now supported by the curl tool. It forces the proxy
|
||||
authentication to be made using NTLM. It does not yet work for HTTPS over
|
||||
proxies (or other proxy-tunneling options). Test case 81 and 82 do some
|
||||
simple initial ntlm testing.
|
||||
|
||||
- Found and fixed a minor memory leak on re-used connections with
|
||||
proxy-authentication.
|
||||
|
||||
- I removed -@ and -Z as valid short options. They were very rarely used (@
|
||||
wasn't even documented).
|
||||
|
||||
- Serge Semashko introduced CURLOPT_PROXYAUTH, and make it work when set to
|
||||
CURLAUTH_NTLM and/or CURLAUTH_BASIC. The PROXAUTH is similar to HTTPAUTH,
|
||||
but is for the proxy connection only, and HTTPAUTH is for the remote host.
|
||||
|
||||
- Fixed loading of cookies with blank contents from a cookie jar. Also made the
|
||||
cookie functions inform on added and skipped cookies (for cookie debugging).
|
||||
|
||||
Version 7.10.7-pre3 (8 August 2003)
|
||||
|
||||
Daniel (8 August)
|
||||
- Applied David Byron's fix for file:// URLs with drive letters included.
|
||||
|
||||
- I added the --ftp-create-dirs to the client code, which activates Early's
|
||||
CURLOPT_FTP_CREATE_MISSING_DIRS option, and wrote test case 147 to verify
|
||||
it. Added the option to the curl.1 man page too. Added the option to the
|
||||
curl_easy_setopt.3 man page too.
|
||||
|
||||
Daniel (7 August)
|
||||
- Test case 60 failed on ia64 and AMD Opteron. Fixed now.
|
||||
|
||||
- Fixed a printf problem that resulted in urlglobbing bugs (bug #203827 in the
|
||||
debian bug tracker). Added test case 74 to verify the fix and to discover if
|
||||
this breaks in the future.
|
||||
|
||||
- "make distcheck" works again.
|
||||
|
||||
Version 7.10.7-pre2 (6 August 2003)
|
||||
|
||||
Daniel (5 August)
|
||||
- Duncan Wilcox helped me verify that the latest incarnation of my ares patch
|
||||
builds fine on Mac OS X (see the new lib/README.ares) file for all details.
|
||||
|
||||
- Salvatore Sorrentino filed bug report #783116 and Early Ehlinger posted a
|
||||
bug report to the libcurl list, both identifying a problem with FTP
|
||||
persitent connections and how the dir hiearchy was not properly reset
|
||||
between files.
|
||||
|
||||
- David Byron's thoughts on a fixed Makefile in tests/ were applied.
|
||||
|
||||
- Jan Sundin reported a case where curl ignored a cookie that browsers don't,
|
||||
which turned up to be due to the number of dots in the 'domain'. I've now
|
||||
made curl follow the the original netscape cookie spec less strict on that
|
||||
part.
|
||||
|
||||
Daniel (4 August)
|
||||
- Dirk Manske added cookie support for the experimental, hidden and still
|
||||
undocumented share feature!
|
||||
|
||||
- Mark Fletcher provided an excellent bug report that identified a problem
|
||||
with FOLLOWLOCATION and chunked transfer-encoding, as libcurl would not
|
||||
properly ignore the body contents of 3XX response that included the
|
||||
Location: header.
|
||||
|
||||
Early (6 August)
|
||||
- Added option CURLOPT_FTP_CREATE_MISSING_DIRS
|
||||
This option will force the target file's path to be created if it
|
||||
does not already exist on the remote system.
|
||||
|
||||
Files affected:
|
||||
- include/curl/curl.h
|
||||
Added option CURLOPT_FTP_CREATE_MISSING_DIRS
|
||||
- lib/ftp.c
|
||||
Added function ftp_mkd, which issues a MKD command
|
||||
Added function ftp_force_cwd, which attempts a CWD,
|
||||
and does a MKD and retries the CWD if the original CWD
|
||||
fails
|
||||
Modified ftp_perform() to call its change directory function
|
||||
through a pointer. The pointer points to ftp_cwd by default,
|
||||
and is modified to point to ftp_force_cwd IFF
|
||||
data->set.ftp_create_missing_dirs is not 0.
|
||||
- lib/url.c
|
||||
Modified Curl_setopt to recognize CURLOPT_FTP_CREATE_MISSING_DIRS
|
||||
- lib/urldata.h
|
||||
Added ftp_create_missing_dirs to struct UserDefined
|
||||
|
||||
- Minor Bugfix for CURLOPT_TIMECONDITION with FTP - if the file was not
|
||||
present to do the time comparison, it would fail.
|
||||
Files affected:
|
||||
- lib/ftp.c
|
||||
In ftp_perform(), the call to ftp_getfiletime() used to be followed
|
||||
by
|
||||
if (result)
|
||||
return result;
|
||||
And then by the code that actually did the time comparison.
|
||||
The code that did the comparison handled the case where the filetime
|
||||
was not available (as indicated by info.filetime < 0 or set.timevalue
|
||||
< 0), so I replaced the if (result) return result with a switch(result)
|
||||
that allows CURLE_FTP_COULDNT_RETR_FILE to fall through to the
|
||||
normal time comparison.
|
||||
|
||||
Daniel (3 August)
|
||||
- When proxy authentication is used in a CONNECT request (as used for all SSL
|
||||
connects and otherwise enforced tunnel-thru-proxy requests), the same
|
||||
authentication header is also wrongly sent to the remote host.
|
||||
|
||||
This is a rather significant info leak. I've fixed it now and mailed a patch
|
||||
and warning to the mailing lists.
|
||||
|
||||
Daniel (1 August)
|
||||
- David Byron provided a patch to make 7.10.6 build correctly with the
|
||||
compressed hugehelp.c source file.
|
||||
|
||||
Version 7.10.7-pre1 (31 July 2003)
|
||||
|
||||
Daniel (30 July)
|
||||
- J<>rg M<>ller-Tolk updated the VC makefile.
|
||||
|
||||
- Daniel Noguerol made the ftp code output "Accept-Ranges: bytes" in similar
|
||||
style like other faked HTTP headers when NOBODY and HEADER are used. I
|
||||
updated two corresponding test cases too.
|
||||
|
||||
- Marty Kuhrt pointed out a compilation problem on VMS due to my having
|
||||
changed a type from long to time_t, and I'm now changing it back to work
|
||||
more portably...
|
||||
|
||||
He also indicated that distributing the src/hugehelp.c in a compressed state
|
||||
like I acccidentally did may not be the smartest move... I've now fixed the
|
||||
distribute procudere to automaticly generate an uncompressed version when I
|
||||
make release archives.
|
||||
|
||||
Daniel (29 July)
|
||||
- Gisle Vanem brought changes to the mkhelp script for the generation of the
|
||||
compressed help text on some platforms.
|
||||
|
||||
Version 7.10.6 (28 July 2003)
|
||||
|
||||
Daniel (28 July)
|
||||
|
@@ -83,3 +83,8 @@ cygwinbin:
|
||||
install-data-hook:
|
||||
cd include && $(MAKE) install
|
||||
cd docs && $(MAKE) install
|
||||
|
||||
# We extend the standard uninstall with a custom hook:
|
||||
uninstall-hook:
|
||||
cd include && $(MAKE) uninstall
|
||||
cd docs && $(MAKE) uninstall
|
||||
|
2
README
2
README
@@ -31,6 +31,7 @@ WEB SITE
|
||||
Visit the curl web site or mirrors for the latest news:
|
||||
|
||||
Sweden -- http://curl.haxx.se/
|
||||
Russia -- http://curl.tsuren.net/
|
||||
US -- http://curl.sf.net/
|
||||
Australia -- http://curl.planetmirror.com/
|
||||
|
||||
@@ -44,6 +45,7 @@ DOWNLOAD
|
||||
Australia -- http://curl.planetmirror.com/download/
|
||||
US -- http://curl.sourceforge.net/download/
|
||||
Hongkong -- http://www.execve.net/curl/
|
||||
Russia -- http://curl.tsuren.net/download/
|
||||
|
||||
CVS
|
||||
|
||||
|
45
configure.ac
45
configure.ac
@@ -1,9 +1,7 @@
|
||||
dnl $Id$
|
||||
dnl Process this file with autoconf to produce a configure script.
|
||||
|
||||
dnl Ensure that this file is processed with autoconf 2.50 or newer
|
||||
dnl Don't even think about removing this check!
|
||||
AC_PREREQ(2.50)
|
||||
AC_PREREQ(2.57)
|
||||
|
||||
dnl We don't know the version number "staticly" so we use a dash here
|
||||
AC_INIT(curl, [-], [curl-bug@haxx.se])
|
||||
@@ -17,9 +15,14 @@ AC_CONFIG_SRCDIR([lib/urldata.h])
|
||||
AM_CONFIG_HEADER(lib/config.h src/config.h tests/server/config.h )
|
||||
AM_MAINTAINER_MODE
|
||||
|
||||
dnl SED is needed by some of the tools
|
||||
AC_PATH_PROG( SED, sed, , $PATH:/usr/bin:/usr/local/bin)
|
||||
AC_SUBST(SED)
|
||||
|
||||
dnl AR is used by libtool, and try the odd Solaris path too
|
||||
AC_PATH_PROG( AR, ar, , $PATH:/usr/bin:/usr/local/bin:/usr/ccs/bin)
|
||||
AC_SUBST(AR)
|
||||
|
||||
dnl figure out the libcurl version
|
||||
VERSION=`$SED -ne 's/^#define LIBCURL_VERSION "\(.*\)"/\1/p' ${srcdir}/include/curl/curl.h`
|
||||
AM_INIT_AUTOMAKE(curl,$VERSION)
|
||||
@@ -824,15 +827,14 @@ AC_C_CONST
|
||||
AC_TYPE_SIZE_T
|
||||
AC_HEADER_TIME
|
||||
|
||||
# mprintf() checks:
|
||||
AC_CHECK_SIZEOF(off_t)
|
||||
|
||||
# check for 'long double'
|
||||
# AC_CHECK_SIZEOF(long double, 8)
|
||||
# check for 'long long'
|
||||
# AC_CHECK_SIZEOF(long long, 4)
|
||||
AC_CHECK_TYPE(long long,
|
||||
[AC_DEFINE(HAVE_LONGLONG, 1, [if your compiler supports 'long long'])])
|
||||
|
||||
# check for ssize_t
|
||||
AC_CHECK_TYPE(ssize_t, int)
|
||||
AC_CHECK_TYPE(ssize_t, ,
|
||||
AC_DEFINE(ssize_t, int, [the signed version of size_t]))
|
||||
|
||||
TYPE_SOCKLEN_T
|
||||
TYPE_IN_ADDR_T
|
||||
@@ -950,6 +952,31 @@ AC_HELP_STRING([--disable-debug],[Disable debug options]),
|
||||
AC_MSG_RESULT(no)
|
||||
)
|
||||
|
||||
ares="no"
|
||||
AC_MSG_CHECKING([whether to enable ares])
|
||||
AC_ARG_ENABLE(ares,
|
||||
AC_HELP_STRING([--enable-ares],[Enable using ares for name lookups])
|
||||
AC_HELP_STRING([--disable-ares],[Disable using ares for name lookups]),
|
||||
[ case "$enableval" in
|
||||
no)
|
||||
AC_MSG_RESULT(no)
|
||||
;;
|
||||
*) AC_MSG_RESULT(yes)
|
||||
|
||||
if test "x$IPV6_ENABLED" = "x1"; then
|
||||
AC_MSG_ERROR([ares doesn't work with ipv6, disable ipv6 to use ares])
|
||||
fi
|
||||
|
||||
AC_DEFINE(USE_ARES, 1, [Define if you want to enable ares support])
|
||||
|
||||
ares="yes"
|
||||
;;
|
||||
esac ],
|
||||
AC_MSG_RESULT(no)
|
||||
)
|
||||
|
||||
AM_CONDITIONAL(ARES, test x$ares = xyes)
|
||||
|
||||
AC_CONFIG_FILES([Makefile \
|
||||
docs/Makefile \
|
||||
docs/examples/Makefile \
|
||||
|
@@ -204,7 +204,14 @@ Win32
|
||||
Before running nmake define the OPENSSL_PATH environment variable with
|
||||
the root/base directory of OpenSSL, for example:
|
||||
|
||||
set OPENSSL_PATH=c:\openssl-0.9.6b
|
||||
set OPENSSL_PATH=c:\openssl-0.9.7a
|
||||
|
||||
lib/Makefile.vc6 depends on zlib (http://www.gzip.org/zlib/) as well.
|
||||
Please read the zlib documentation on how to compile zlib. Define the
|
||||
ZLIB_PATH environment variable to the location of zlib.h and zlib.lib,
|
||||
for example:
|
||||
|
||||
set ZLIB_PATH=c:\zlib-1.1.4
|
||||
|
||||
Then run 'nmake vc-ssl' or 'nmake vc-ssl-dll' in curl's root
|
||||
directory. 'nmake vc-ssl' will create a libcurl static and dynamic
|
||||
|
@@ -3,6 +3,18 @@ join in and help us correct one or more of these! Also be sure to check the
|
||||
changelog of the current development status, as one or more of these problems
|
||||
may have been fixed since this was written!
|
||||
|
||||
* libcurl doesn't treat the content-length of compressed data properly, as
|
||||
it seems HTTP servers send the *uncompressed* length in that header and
|
||||
libcurl thinks of it as the *compressed* lenght. Some explanations are here:
|
||||
http://curl.haxx.se/mail/lib-2003-06/0146.html
|
||||
|
||||
* Downloading 0 (zero) bytes files over FTP will not create a zero byte file
|
||||
locally, which is because libcurl doesn't call the write callback with zero
|
||||
bytes. Explained here: http://curl.haxx.se/mail/archive-2003-04/0143.html
|
||||
|
||||
* Using CURLOPT_FAILONERROR (-f/--fail) will make authentication to stop
|
||||
working if you use anything but plain Basic auth.
|
||||
|
||||
* LDAP output is garbled. Hardly anyone seems to care about LDAP functionality
|
||||
in curl/libcurl why this report has been closed and set to be solved later.
|
||||
If you feel this is something you want fixed, get in touch and we'll start
|
||||
|
@@ -243,7 +243,7 @@ POST (HTTP)
|
||||
|
||||
To post to this, you enter a curl command line like:
|
||||
|
||||
curl -d "user=foobar&pass=12345&id=blablabla&dig=submit" (continues)
|
||||
curl -d "user=foobar&pass=12345&id=blablabla&ding=submit" (continues)
|
||||
http://www.formpost.com/getthis/post.cgi
|
||||
|
||||
|
||||
|
179
docs/THANKS
179
docs/THANKS
@@ -2,92 +2,93 @@ This project has been alive for several years. Countless people have provided
|
||||
feedback that have improved curl. Here follows a (incomplete) list of people
|
||||
that have contributed with non-trivial parts:
|
||||
|
||||
- Daniel Stenberg <daniel@haxx.se>
|
||||
- Rafael Sagula <sagula@inf.ufrgs.br>
|
||||
- Sampo Kellomaki <sampo@iki.fi>
|
||||
- Linas Vepstas <linas@linas.org>
|
||||
- Bjorn Reese <breese@mail1.stofanet.dk>
|
||||
- Johan Anderson <johan@homemail.com>
|
||||
- Kjell Ericson <Kjell.Ericson@haxx.se>
|
||||
- Troy Engel <tengel@sonic.net>
|
||||
- Ryan Nelson <ryan@inch.com>
|
||||
- Bj<EFBFBD>rn Stenberg <bjorn@haxx.se>
|
||||
- Angus Mackay <amackay@gus.ml.org>
|
||||
- Eric Young <eay@cryptsoft.com>
|
||||
- Simon Dick <simond@totally.irrelevant.org>
|
||||
- Oren Tirosh <oren@monty.hishome.net>
|
||||
- Steven G. Johnson <stevenj@alum.mit.edu>
|
||||
- Gilbert Ramirez Jr. <gram@verdict.uthscsa.edu>
|
||||
- Andr<EFBFBD>s Garc<72>a <ornalux@redestb.es>
|
||||
- Douglas E. Wegscheid <wegscd@whirlpool.com>
|
||||
- Mark Butler <butlerm@xmission.com>
|
||||
- Eric Thelin <eric@generation-i.com>
|
||||
- Marc Boucher <marc@mbsi.ca>
|
||||
- Greg Onufer <Greg.Onufer@Eng.Sun.COM>
|
||||
- Doug Kaufman <dkaufman@rahul.net>
|
||||
- David Eriksson <david@2good.com>
|
||||
- Ralph Beckmann <rabe@uni-paderborn.de>
|
||||
- T. Yamada <tai@imasy.or.jp>
|
||||
- Lars J. Aas <larsa@sim.no>
|
||||
- J<EFBFBD>rn Hartroth <Joern.Hartroth@computer.org>
|
||||
- Matthew Clarke <clamat@van.maves.ca>
|
||||
- Linus Nielsen Feltzing <linus@haxx.se>
|
||||
- Felix von Leitner <felix@convergence.de>
|
||||
- Dan Zitter <dzitter@zitter.net>
|
||||
- Jongki Suwandi <Jongki.Suwandi@eng.sun.com>
|
||||
- Chris Maltby <chris@aurema.com>
|
||||
- Ron Zapp <rzapper@yahoo.com>
|
||||
- Paul Marquis <pmarquis@iname.com>
|
||||
- Ellis Pritchard <ellis@citria.com>
|
||||
- Damien Adant <dams@usa.net>
|
||||
- Chris <cbayliss@csc.come>
|
||||
- Marco G. Salvagno <mgs@whiz.cjb.net>
|
||||
- Paul Marquis <pmarquis@iname.com>
|
||||
- David LeBlanc <dleblanc@qnx.com>
|
||||
- Rich Gray at Plus Technologies
|
||||
- Luong Dinh Dung <u8luong@lhsystems.hu>
|
||||
- Torsten Foertsch <torsten.foertsch@gmx.net>
|
||||
- Kristian K<>hntopp <kris@koehntopp.de>
|
||||
- Fred Noz <FNoz@siac.com>
|
||||
- Caolan McNamara <caolan@csn.ul.ie>
|
||||
- Albert Chin-A-Young <china@thewrittenword.com>
|
||||
- Stephen Kick <skick@epicrealm.com>
|
||||
- Martin Hedenfalk <mhe@stacken.kth.se>
|
||||
- Richard Prescott <rip at step.polymtl.ca>
|
||||
- Jason S. Priebe <priebe@wral-tv.com>
|
||||
- T. Bharath <TBharath@responsenetworks.com>
|
||||
- Alexander Kourakos <awk@users.sourceforge.net>
|
||||
- James Griffiths <griffiths_james@yahoo.com>
|
||||
- Loic Dachary <loic@senga.org>
|
||||
- Robert Weaver <robert.weaver@sabre.com>
|
||||
- Ingo Ralf Blum <ingoralfblum@ingoralfblum.com>
|
||||
- Jun-ichiro itojun Hagino <itojun@iijlab.net>
|
||||
- Frederic Lepied <flepied@mandrakesoft.com>
|
||||
- Georg Horn <horn@koblenz-net.de>
|
||||
- Cris Bailiff <c.bailiff@awayweb.com>
|
||||
- Sterling Hughes <sterling@designmultimedia.com>
|
||||
- S. Moonesamy
|
||||
- Ingo Wilken <iw@WWW.Ecce-Terram.DE>
|
||||
- Pawel A. Gajda <mis@k2.net.pl>
|
||||
- Patrick Bihan-Faou
|
||||
- Nico Baggus <Nico.Baggus@mail.ing.nl>
|
||||
- Sergio Ballestrero
|
||||
- Andrew Francis <locust@familyhealth.com.au>
|
||||
- Tomasz Lacki <Tomasz.Lacki@primark.pl>
|
||||
- Georg Huettenegger <georg@ist.org>
|
||||
- John Lask <johnlask@hotmail.com>
|
||||
- Eric Lavigne <erlavigne@wanadoo.fr>
|
||||
- Marcus Webster <marcus.webster@phocis.com>
|
||||
- G<EFBFBD>tz Babin-Ebell <babin<69>ebell@trustcenter.de>
|
||||
- Andreas Damm <andreas-sourceforge@radab.org>
|
||||
- Jacky Lam <sylam@emsoftltd.com>
|
||||
- James Gallagher <jgallagher@gso.uri.edu>
|
||||
- Kjetil Jacobsen <kjetilja@cs.uit.no>
|
||||
- Markus F.X.J. Oberhumer <markus@oberhumer.com>
|
||||
- Miklos Nemeth <mnemeth@kfkisystems.com>
|
||||
- Kevin Roth <kproth@users.sourceforge.net>
|
||||
- Ralph Mitchell <rmitchell@eds.com>
|
||||
- Dan Fandrich <dan@coneharvesters.com>
|
||||
- Jean-Philippe Barrette-LaPierre <jpb@rrette.com>
|
||||
- Richard Bramante <RBramante@on.com>
|
||||
- Daniel Kouril <kouril@ics.muni.cz>
|
||||
Daniel Stenberg <daniel@haxx.se>
|
||||
Rafael Sagula <sagula@inf.ufrgs.br>
|
||||
Sampo Kellomaki <sampo@iki.fi>
|
||||
Linas Vepstas <linas@linas.org>
|
||||
Bjorn Reese <breese@mail1.stofanet.dk>
|
||||
Johan Anderson <johan@homemail.com>
|
||||
Kjell Ericson <Kjell.Ericson@haxx.se>
|
||||
Troy Engel <tengel@sonic.net>
|
||||
Ryan Nelson <ryan@inch.com>
|
||||
Bj<EFBFBD>rn Stenberg <bjorn@haxx.se>
|
||||
Angus Mackay <amackay@gus.ml.org>
|
||||
Eric Young <eay@cryptsoft.com>
|
||||
Simon Dick <simond@totally.irrelevant.org>
|
||||
Oren Tirosh <oren@monty.hishome.net>
|
||||
Steven G. Johnson <stevenj@alum.mit.edu>
|
||||
Gilbert Ramirez Jr. <gram@verdict.uthscsa.edu>
|
||||
Andr<EFBFBD>s Garc<72>a <ornalux@redestb.es>
|
||||
Douglas E. Wegscheid <wegscd@whirlpool.com>
|
||||
Mark Butler <butlerm@xmission.com>
|
||||
Eric Thelin <eric@generation-i.com>
|
||||
Marc Boucher <marc@mbsi.ca>
|
||||
Greg Onufer <Greg.Onufer@Eng.Sun.COM>
|
||||
Doug Kaufman <dkaufman@rahul.net>
|
||||
David Eriksson <david@2good.com>
|
||||
Ralph Beckmann <rabe@uni-paderborn.de>
|
||||
T. Yamada <tai@imasy.or.jp>
|
||||
Lars J. Aas <larsa@sim.no>
|
||||
J<EFBFBD>rn Hartroth <Joern.Hartroth@computer.org>
|
||||
Matthew Clarke <clamat@van.maves.ca>
|
||||
Linus Nielsen Feltzing <linus@haxx.se>
|
||||
Felix von Leitner <felix@convergence.de>
|
||||
Dan Zitter <dzitter@zitter.net>
|
||||
Jongki Suwandi <Jongki.Suwandi@eng.sun.com>
|
||||
Chris Maltby <chris@aurema.com>
|
||||
Ron Zapp <rzapper@yahoo.com>
|
||||
Paul Marquis <pmarquis@iname.com>
|
||||
Ellis Pritchard <ellis@citria.com>
|
||||
Damien Adant <dams@usa.net>
|
||||
Chris <cbayliss@csc.come>
|
||||
Marco G. Salvagno <mgs@whiz.cjb.net>
|
||||
Paul Marquis <pmarquis@iname.com>
|
||||
David LeBlanc <dleblanc@qnx.com>
|
||||
Rich Gray at Plus Technologies
|
||||
Luong Dinh Dung <u8luong@lhsystems.hu>
|
||||
Torsten Foertsch <torsten.foertsch@gmx.net>
|
||||
Kristian K<>hntopp <kris@koehntopp.de>
|
||||
Fred Noz <FNoz@siac.com>
|
||||
Caolan McNamara <caolan@csn.ul.ie>
|
||||
Albert Chin-A-Young <china@thewrittenword.com>
|
||||
Stephen Kick <skick@epicrealm.com>
|
||||
Martin Hedenfalk <mhe@stacken.kth.se>
|
||||
Richard Prescott <rip at step.polymtl.ca>
|
||||
Jason S. Priebe <priebe@wral-tv.com>
|
||||
T. Bharath <TBharath@responsenetworks.com>
|
||||
Alexander Kourakos <awk@users.sourceforge.net>
|
||||
James Griffiths <griffiths_james@yahoo.com>
|
||||
Loic Dachary <loic@senga.org>
|
||||
Robert Weaver <robert.weaver@sabre.com>
|
||||
Ingo Ralf Blum <ingoralfblum@ingoralfblum.com>
|
||||
Jun-ichiro itojun Hagino <itojun@iijlab.net>
|
||||
Frederic Lepied <flepied@mandrakesoft.com>
|
||||
Georg Horn <horn@koblenz-net.de>
|
||||
Cris Bailiff <c.bailiff@awayweb.com>
|
||||
Sterling Hughes <sterling@designmultimedia.com>
|
||||
S. Moonesamy
|
||||
Ingo Wilken <iw@WWW.Ecce-Terram.DE>
|
||||
Pawel A. Gajda <mis@k2.net.pl>
|
||||
Patrick Bihan-Faou
|
||||
Nico Baggus <Nico.Baggus@mail.ing.nl>
|
||||
Sergio Ballestrero
|
||||
Andrew Francis <locust@familyhealth.com.au>
|
||||
Tomasz Lacki <Tomasz.Lacki@primark.pl>
|
||||
Georg Huettenegger <georg@ist.org>
|
||||
John Lask <johnlask@hotmail.com>
|
||||
Eric Lavigne <erlavigne@wanadoo.fr>
|
||||
Marcus Webster <marcus.webster@phocis.com>
|
||||
G<EFBFBD>tz Babin-Ebell <babin<69>ebell@trustcenter.de>
|
||||
Andreas Damm <andreas-sourceforge@radab.org>
|
||||
Jacky Lam <sylam@emsoftltd.com>
|
||||
James Gallagher <jgallagher@gso.uri.edu>
|
||||
Kjetil Jacobsen <kjetilja@cs.uit.no>
|
||||
Markus F.X.J. Oberhumer <markus@oberhumer.com>
|
||||
Miklos Nemeth <mnemeth@kfkisystems.com>
|
||||
Kevin Roth <kproth@users.sourceforge.net>
|
||||
Ralph Mitchell <rmitchell@eds.com>
|
||||
Dan Fandrich <dan@coneharvesters.com>
|
||||
Jean-Philippe Barrette-LaPierre <jpb@rrette.com>
|
||||
Richard Bramante <RBramante@on.com>
|
||||
Daniel Kouril <kouril@ics.muni.cz>
|
||||
Dirk Manske <dm@nettraffic.de>
|
||||
|
29
docs/TODO
29
docs/TODO
@@ -10,6 +10,8 @@ TODO
|
||||
send us patches that improve things! Also check the http://curl.haxx.se/dev
|
||||
web section for various technical development notes.
|
||||
|
||||
All bugs documented in the KNOWN_BUGS document are subject for fixing!
|
||||
|
||||
LIBCURL
|
||||
|
||||
* Introduce an interface to libcurl that allows applications to easier get to
|
||||
@@ -18,32 +20,19 @@ TODO
|
||||
existing cookies? We probably need both. Enable applications to modify
|
||||
existing cookies as well. http://curl.haxx.se/dev/COOKIES
|
||||
|
||||
* Make content encoding/decoding internally be made using a filter system.
|
||||
|
||||
* Introduce another callback interface for upload/download that makes one
|
||||
less copy of data and thus a faster operation.
|
||||
[http://curl.haxx.se/dev/no_copy_callbacks.txt]
|
||||
|
||||
* Add asynchronous name resolving (http://libdenise.sf.net/). This should be
|
||||
made to work on most of the supported platforms, or otherwise it isn't
|
||||
really interesting.
|
||||
|
||||
* Data sharing. Tell which easy handles within a multi handle that should
|
||||
share cookies, connection cache, dns cache, ssl session cache. Full
|
||||
suggestion found here: http://curl.haxx.se/dev/sharing.txt
|
||||
|
||||
* Mutexes. By adding mutex callback support, the 'data sharing' mentioned
|
||||
above can be made between several easy handles running in different threads
|
||||
too. The actual mutex implementations will be left for the application to
|
||||
implement, libcurl will merely call 'getmutex' and 'leavemutex' callbacks.
|
||||
Part of the sharing suggestion at: http://curl.haxx.se/dev/sharing.txt
|
||||
* More data sharing. curl_share_* functions already exist and work, and they
|
||||
can be extended to share more.
|
||||
|
||||
* Set the SO_KEEPALIVE socket option to make libcurl notice and disconnect
|
||||
very long time idle connections.
|
||||
|
||||
* Go through the code and verify that libcurl deals with big files >2GB and
|
||||
>4GB all over. Bug reports (and source reviews) indicate that it doesn't
|
||||
currently work properly.
|
||||
>4GB all over. Bug reports (and source reviews) show that it doesn't
|
||||
currently work.
|
||||
|
||||
* CURLOPT_MAXFILESIZE. Prevent downloads that are larger than the specified
|
||||
size. CURLE_FILESIZE_EXCEEDED would then be returned. Gautam Mani
|
||||
@@ -52,6 +41,8 @@ TODO
|
||||
|
||||
LIBCURL - multi interface
|
||||
|
||||
* Add curl_multi_timeout() to make libcurl's ares-functionality better.
|
||||
|
||||
* Make sure we don't ever loop because of non-blocking sockets return
|
||||
EWOULDBLOCK or similar. This FTP command sending, the SSL connection etc.
|
||||
|
||||
@@ -69,6 +60,10 @@ TODO
|
||||
|
||||
FTP
|
||||
|
||||
* Support the most common FTP proxies, Philip Newton provided a list
|
||||
allegedly from ncftp:
|
||||
http://curl.haxx.se/mail/archive-2003-04/0126.html
|
||||
|
||||
* Make CURLOPT_FTPPORT support an additional port number on the IP/if/name,
|
||||
like "blabla:[port]" or possibly even "blabla:[portfirst]-[portsecond]".
|
||||
|
||||
|
22
docs/curl.1
22
docs/curl.1
@@ -2,7 +2,7 @@
|
||||
.\" nroff -man curl.1
|
||||
.\" Written by Daniel Stenberg
|
||||
.\"
|
||||
.TH curl 1 "18 June 2003" "Curl 7.10.6" "Curl Manual"
|
||||
.TH curl 1 "8 Aug 2003" "Curl 7.10.7" "Curl Manual"
|
||||
.SH NAME
|
||||
curl \- transfer a URL
|
||||
.SH SYNOPSIS
|
||||
@@ -113,7 +113,7 @@ difference.
|
||||
.IP "--ciphers <list of ciphers>"
|
||||
(SSL) Specifies which ciphers to use in the connection. The list of ciphers
|
||||
must be using valid ciphers. Read up on SSL cipher list details on this URL:
|
||||
.I http://www.openssl.org/docs/apps/ciphers.html (Option added in curl 7.9)
|
||||
.I http://www.openssl.org/docs/apps/ciphers.html
|
||||
|
||||
If this option is used several times, the last one will override the others.
|
||||
.IP "--compressed"
|
||||
@@ -134,7 +134,7 @@ operation. Curl writes all cookies previously read from a specified file as
|
||||
well as all cookies received from remote server(s). If no cookies are known,
|
||||
no file will be written. The file will be written using the Netscape cookie
|
||||
file format. If you set the file name to a single dash, "-", the cookies will
|
||||
be written to stdout. (Option added in curl 7.9)
|
||||
be written to stdout.
|
||||
|
||||
.B NOTE
|
||||
If the cookie jar can't be created or written to, the whole curl operation
|
||||
@@ -166,7 +166,7 @@ If this option is used twice, the second will again disable crlf converting.
|
||||
that can emulate as if a user has filled in a HTML form and pressed the submit
|
||||
button. Note that the data is sent exactly as specified with no extra
|
||||
processing (with all newlines cut off). The data is expected to be
|
||||
"url-encoded". This will cause curl to pass the data to the server using the
|
||||
\&"url-encoded". This will cause curl to pass the data to the server using the
|
||||
content-type application/x-www-form-urlencoded. Compare to -F. If more than
|
||||
one -d/--data option is used on the same command line, the data pieces
|
||||
specified will be merged together with a separating &-letter. Thus, using '-d
|
||||
@@ -177,7 +177,7 @@ If you start the data with the letter @, the rest should be a file name to
|
||||
read the data from, or - if you want curl to read the data from stdin. The
|
||||
contents of the file must already be url-encoded. Multiple files can also be
|
||||
specified. Posting data from a file named 'foobar' would thus be done with
|
||||
"--data @foobar".
|
||||
\&"--data @foobar".
|
||||
|
||||
To post data purely binary, you should instead use the --data-binary option.
|
||||
|
||||
@@ -291,6 +291,12 @@ normal cases when a HTTP server fails to deliver a document, it returns a HTML
|
||||
document stating so (which often also describes why and more). This flag will
|
||||
prevent curl from outputting that and fail silently instead.
|
||||
|
||||
If this option is used twice, the second will again disable silent failure.
|
||||
.IP "--ftp-create-dirs"
|
||||
(FTP) When an FTP URL/operation uses a path that doesn't currently exist on
|
||||
the server, the standard behaviour of curl is to fail. Using this option, curl
|
||||
will instead attempt to create missing directories. (Added in 7.10.7)
|
||||
|
||||
If this option is used twice, the second will again disable silent failure.
|
||||
.IP "-F/--form <name=content>"
|
||||
(HTTP) This lets curl emulate a filled in form in which a user has pressed the
|
||||
@@ -328,7 +334,7 @@ contents but they should be encoded according to the URI standard.
|
||||
When used, this option will make all data specified with -d/--data or
|
||||
--data-binary to be used in a HTTP GET request instead of the POST request
|
||||
that otherwise would be used. The data will be appended to the URL with a '?'
|
||||
separator. (Option added in curl 7.9)
|
||||
separator.
|
||||
|
||||
If used in combination with -I, the POST data will instead be appended to the
|
||||
URL with a HEAD request.
|
||||
@@ -809,7 +815,7 @@ at port 1080.
|
||||
|
||||
This option overrides existing environment variables that sets proxy to
|
||||
use. If there's an environment variable setting a proxy, you can set proxy to
|
||||
"" to override it.
|
||||
\&"" to override it.
|
||||
|
||||
\fBNote\fP that all operations that are performed over a HTTP proxy will
|
||||
transparantly be converted to HTTP. It means that certain protocol specific
|
||||
@@ -1031,8 +1037,6 @@ Unrecognized transfer encoding
|
||||
.IP XX
|
||||
There will appear more error codes here in future releases. The existing ones
|
||||
are meant to never change.
|
||||
.SH BUGS
|
||||
If you do find bugs, mail them to curl-bug@haxx.se.
|
||||
.SH AUTHORS / CONTRIBUTORS
|
||||
Daniel Stenberg is the main author, but the whole list of contributors is
|
||||
found in the separate THANKS file.
|
||||
|
@@ -1,28 +1,53 @@
|
||||
/*****************************************************************************
|
||||
* _ _ ____ _
|
||||
* Project ___| | | | _ \| |
|
||||
* / __| | | | |_) | |
|
||||
* | (__| |_| | _ <| |___
|
||||
* \___|\___/|_| \_\_____|
|
||||
*
|
||||
* $Id$
|
||||
* This example source code introduces a c library buffered I/O interface to
|
||||
* URL reads it supports fopen(), fread(), fgets(), feof(), fclose(),
|
||||
* rewind(). Supported functions have identical prototypes to their normal c
|
||||
* lib namesakes and are preceaded by url_ .
|
||||
*
|
||||
* This example source code introduces an fopen()/fread()/fclose() emulation
|
||||
* for URL reads. Using an approach similar to this, you could replace your
|
||||
* program's fopen() with this url_fopen() and fread() with url_fread() and
|
||||
* it should be possible to read remote streams instead of (only) local files.
|
||||
* Using this code you can replace your program's fopen() with url_fopen()
|
||||
* and fread() with url_fread() and it become possible to read remote streams
|
||||
* instead of (only) local files. Local files (ie those that can be directly
|
||||
* fopened) will drop back to using the underlying clib implementations
|
||||
*
|
||||
* See the main() function at the bottom that shows a tiny app in action.
|
||||
* See the main() function at the bottom that shows an app that retrives from a
|
||||
* specified url using fgets() and fread() and saves as two output files.
|
||||
*
|
||||
* This source code is a proof of concept. It will need further attention to
|
||||
* become production-use useful and solid.
|
||||
* Coyright (c)2003 Simtec Electronics
|
||||
*
|
||||
* Re-implemented by Vincent Sanders <vince@kyllikki.org> with extensive
|
||||
* reference to original curl example code
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. The name of the author may not be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* This example requires libcurl 7.9.7 or later.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <sys/time.h>
|
||||
#include <stdlib.h>
|
||||
#include <errno.h>
|
||||
|
||||
#include <curl/curl.h>
|
||||
|
||||
@@ -30,206 +55,511 @@
|
||||
#error "too old libcurl version, get the latest!"
|
||||
#endif
|
||||
|
||||
struct data {
|
||||
int type;
|
||||
union {
|
||||
CURL *curl;
|
||||
FILE *file;
|
||||
} handle;
|
||||
|
||||
/* This is the documented biggest possible buffer chunk we can get from
|
||||
libcurl in one single callback! */
|
||||
char buffer[CURL_MAX_WRITE_SIZE];
|
||||
enum fcurl_type_e { CFTYPE_NONE=0, CFTYPE_FILE=1, CFTYPE_CURL=2 };
|
||||
|
||||
char *readptr; /* read from here */
|
||||
int bytes; /* bytes available from read pointer */
|
||||
struct fcurl_data
|
||||
{
|
||||
enum fcurl_type_e type; /* type of handle */
|
||||
union {
|
||||
CURL *curl;
|
||||
FILE *file;
|
||||
} handle; /* handle */
|
||||
|
||||
CURLMcode m; /* stored from a previous url_fread() */
|
||||
char *buffer; /* buffer to store cached data*/
|
||||
int buffer_len; /* currently allocated buffers length */
|
||||
int buffer_pos; /* end of data in buffer*/
|
||||
int still_running; /* Is background url fetch still in progress */
|
||||
};
|
||||
|
||||
typedef struct data URL_FILE;
|
||||
typedef struct fcurl_data URL_FILE;
|
||||
|
||||
/* exported functions */
|
||||
URL_FILE *url_fopen(char *url,const char *operation);
|
||||
int url_fclose(URL_FILE *file);
|
||||
int url_feof(URL_FILE *file);
|
||||
size_t url_fread(void *ptr, size_t size, size_t nmemb, URL_FILE *file);
|
||||
char * url_fgets(char *ptr, int size, URL_FILE *file);
|
||||
void url_rewind(URL_FILE *file);
|
||||
|
||||
/* we use a global one for convenience */
|
||||
CURLM *multi_handle;
|
||||
|
||||
static
|
||||
size_t write_callback(char *buffer,
|
||||
size_t size,
|
||||
size_t nitems,
|
||||
void *userp)
|
||||
/* curl calls this routine to get more data */
|
||||
static size_t
|
||||
write_callback(char *buffer,
|
||||
size_t size,
|
||||
size_t nitems,
|
||||
void *userp)
|
||||
{
|
||||
URL_FILE *url = (URL_FILE *)userp;
|
||||
size *= nitems;
|
||||
char *newbuff;
|
||||
int rembuff;
|
||||
|
||||
memcpy(url->readptr, buffer, size);
|
||||
url->readptr += size;
|
||||
url->bytes += size;
|
||||
URL_FILE *url = (URL_FILE *)userp;
|
||||
size *= nitems;
|
||||
|
||||
fprintf(stderr, "callback %d size bytes\n", size);
|
||||
rembuff=url->buffer_len - url->buffer_pos;//remaining space in buffer
|
||||
|
||||
return size;
|
||||
if(size > rembuff)
|
||||
{
|
||||
//not enuf space in buffer
|
||||
newbuff=realloc(url->buffer,url->buffer_len + (size - rembuff));
|
||||
if(newbuff==NULL)
|
||||
{
|
||||
fprintf(stderr,"callback buffer grow failed\n");
|
||||
size=rembuff;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* realloc suceeded increase buffer size*/
|
||||
url->buffer_len+=size - rembuff;
|
||||
url->buffer=newbuff;
|
||||
|
||||
/*printf("Callback buffer grown to %d bytes\n",url->buffer_len);*/
|
||||
}
|
||||
}
|
||||
|
||||
memcpy(&url->buffer[url->buffer_pos], buffer, size);
|
||||
url->buffer_pos += size;
|
||||
|
||||
/*fprintf(stderr, "callback %d size bytes\n", size);*/
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
URL_FILE *url_fopen(char *url, char *operation)
|
||||
/* use to attempt to fill the read buffer up to requested number of bytes */
|
||||
static int
|
||||
curl_fill_buffer(URL_FILE *file,int want,int waittime)
|
||||
{
|
||||
/* this code could check for URLs or types in the 'url' and
|
||||
basicly use the real fopen() for standard files */
|
||||
fd_set fdread;
|
||||
fd_set fdwrite;
|
||||
fd_set fdexcep;
|
||||
int maxfd;
|
||||
struct timeval timeout;
|
||||
int rc;
|
||||
|
||||
URL_FILE *file;
|
||||
int still_running;
|
||||
(void)operation;
|
||||
/* only attempt to fill buffer if transactions still running and buffer
|
||||
* doesnt exceed required size already
|
||||
*/
|
||||
if((!file->still_running) || (file->buffer_pos > want))
|
||||
return 0;
|
||||
|
||||
file = (URL_FILE *)malloc(sizeof(URL_FILE));
|
||||
if(!file)
|
||||
return NULL;
|
||||
/* attempt to fill buffer */
|
||||
do
|
||||
{
|
||||
FD_ZERO(&fdread);
|
||||
FD_ZERO(&fdwrite);
|
||||
FD_ZERO(&fdexcep);
|
||||
|
||||
memset(file, 0, sizeof(URL_FILE));
|
||||
/* set a suitable timeout to fail on */
|
||||
timeout.tv_sec = 60; /* 1 minute */
|
||||
timeout.tv_usec = 0;
|
||||
|
||||
file->type = 1; /* marked as URL, use 0 for plain file */
|
||||
file->handle.curl = curl_easy_init();
|
||||
/* get file descriptors from the transfers */
|
||||
curl_multi_fdset(multi_handle, &fdread, &fdwrite, &fdexcep, &maxfd);
|
||||
|
||||
curl_easy_setopt(file->handle.curl, CURLOPT_URL, url);
|
||||
curl_easy_setopt(file->handle.curl, CURLOPT_FILE, file);
|
||||
curl_easy_setopt(file->handle.curl, CURLOPT_VERBOSE, FALSE);
|
||||
curl_easy_setopt(file->handle.curl, CURLOPT_WRITEFUNCTION, write_callback);
|
||||
rc = select(maxfd+1, &fdread, &fdwrite, &fdexcep, &timeout);
|
||||
|
||||
if(!multi_handle)
|
||||
multi_handle = curl_multi_init();
|
||||
switch(rc) {
|
||||
case -1:
|
||||
/* select error */
|
||||
break;
|
||||
|
||||
curl_multi_add_handle(multi_handle, file->handle.curl);
|
||||
case 0:
|
||||
break;
|
||||
|
||||
while(CURLM_CALL_MULTI_PERFORM ==
|
||||
curl_multi_perform(multi_handle, &still_running));
|
||||
default:
|
||||
/* timeout or readable/writable sockets */
|
||||
/* note we *could* be more efficient and not wait for
|
||||
* CURLM_CALL_MULTI_PERFORM to clear here and check it on re-entry
|
||||
* but that gets messy */
|
||||
while(curl_multi_perform(multi_handle, &file->still_running) ==
|
||||
CURLM_CALL_MULTI_PERFORM);
|
||||
|
||||
/* if still_running would be 0 now, we should return NULL */
|
||||
|
||||
return file;
|
||||
break;
|
||||
}
|
||||
} while(file->still_running && (file->buffer_pos < want));
|
||||
return 1;
|
||||
}
|
||||
|
||||
void url_fclose(URL_FILE *file)
|
||||
/* use to remove want bytes from the front of a files buffer */
|
||||
static int
|
||||
curl_use_buffer(URL_FILE *file,int want)
|
||||
{
|
||||
/* make sure the easy handle is not in the multi handle anymore */
|
||||
curl_multi_remove_handle(multi_handle, file->handle.curl);
|
||||
/* sort out buffer */
|
||||
if((file->buffer_pos - want) <=0)
|
||||
{
|
||||
/* ditch buffer - write will recreate */
|
||||
if(file->buffer)
|
||||
free(file->buffer);
|
||||
|
||||
/* cleanup */
|
||||
curl_easy_cleanup(file->handle.curl);
|
||||
file->buffer=NULL;
|
||||
file->buffer_pos=0;
|
||||
file->buffer_len=0;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* move rest down make it available for later */
|
||||
memmove(file->buffer,
|
||||
&file->buffer[want],
|
||||
(file->buffer_pos - want));
|
||||
|
||||
file->buffer_pos -= want;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
size_t url_fread(void *ptr, size_t size, size_t nmemb, URL_FILE *file)
|
||||
URL_FILE *
|
||||
url_fopen(char *url,const char *operation)
|
||||
{
|
||||
fd_set fdread;
|
||||
fd_set fdwrite;
|
||||
fd_set fdexcep;
|
||||
int maxfd;
|
||||
struct timeval timeout;
|
||||
int rc;
|
||||
int still_running = 0;
|
||||
/* this code could check for URLs or types in the 'url' and
|
||||
basicly use the real fopen() for standard files */
|
||||
|
||||
if(!file->bytes) { /* no data available at this point */
|
||||
URL_FILE *file;
|
||||
(void)operation;
|
||||
|
||||
file->readptr = file->buffer; /* reset read pointer */
|
||||
file = (URL_FILE *)malloc(sizeof(URL_FILE));
|
||||
if(!file)
|
||||
return NULL;
|
||||
|
||||
if(CURLM_CALL_MULTI_PERFORM == file->m) {
|
||||
while(CURLM_CALL_MULTI_PERFORM ==
|
||||
curl_multi_perform(multi_handle, &still_running)) {
|
||||
if(file->bytes) {
|
||||
printf("(fread) WOAH! THis happened!\n");
|
||||
break;
|
||||
}
|
||||
}
|
||||
if(!still_running) {
|
||||
printf("DONE RUNNING AROUND!\n");
|
||||
return 0;
|
||||
}
|
||||
memset(file, 0, sizeof(URL_FILE));
|
||||
|
||||
if((file->handle.file=fopen(url,operation)))
|
||||
{
|
||||
file->type = CFTYPE_FILE; /* marked as URL */
|
||||
}
|
||||
else
|
||||
{
|
||||
file->type = CFTYPE_CURL; /* marked as URL */
|
||||
file->handle.curl = curl_easy_init();
|
||||
|
||||
curl_easy_setopt(file->handle.curl, CURLOPT_URL, url);
|
||||
curl_easy_setopt(file->handle.curl, CURLOPT_FILE, file);
|
||||
curl_easy_setopt(file->handle.curl, CURLOPT_VERBOSE, FALSE);
|
||||
curl_easy_setopt(file->handle.curl, CURLOPT_WRITEFUNCTION, write_callback);
|
||||
|
||||
if(!multi_handle)
|
||||
multi_handle = curl_multi_init();
|
||||
|
||||
curl_multi_add_handle(multi_handle, file->handle.curl);
|
||||
|
||||
/* lets start the fetch */
|
||||
while(curl_multi_perform(multi_handle, &file->still_running) ==
|
||||
CURLM_CALL_MULTI_PERFORM );
|
||||
|
||||
if((file->buffer_pos == 0) && (!file->still_running))
|
||||
{
|
||||
/* if still_running is 0 now, we should return NULL */
|
||||
|
||||
/* make sure the easy handle is not in the multi handle anymore */
|
||||
curl_multi_remove_handle(multi_handle, file->handle.curl);
|
||||
|
||||
/* cleanup */
|
||||
curl_easy_cleanup(file->handle.curl);
|
||||
|
||||
free(file);
|
||||
|
||||
file = NULL;
|
||||
}
|
||||
}
|
||||
return file;
|
||||
}
|
||||
|
||||
int
|
||||
url_fclose(URL_FILE *file)
|
||||
{
|
||||
int ret=0;/* default is good return */
|
||||
|
||||
switch(file->type)
|
||||
{
|
||||
case CFTYPE_FILE:
|
||||
ret=fclose(file->handle.file); /* passthrough */
|
||||
break;
|
||||
|
||||
case CFTYPE_CURL:
|
||||
/* make sure the easy handle is not in the multi handle anymore */
|
||||
curl_multi_remove_handle(multi_handle, file->handle.curl);
|
||||
|
||||
/* cleanup */
|
||||
curl_easy_cleanup(file->handle.curl);
|
||||
break;
|
||||
|
||||
default: /* unknown or supported type - oh dear */
|
||||
ret=EOF;
|
||||
errno=EBADF;
|
||||
break;
|
||||
|
||||
}
|
||||
|
||||
if(file->buffer)
|
||||
free(file->buffer);/* free any allocated buffer space */
|
||||
|
||||
free(file);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
url_feof(URL_FILE *file)
|
||||
{
|
||||
int ret=0;
|
||||
|
||||
switch(file->type)
|
||||
{
|
||||
case CFTYPE_FILE:
|
||||
ret=feof(file->handle.file);
|
||||
break;
|
||||
|
||||
case CFTYPE_CURL:
|
||||
if((file->buffer_pos == 0) && (!file->still_running))
|
||||
ret = 1;
|
||||
break;
|
||||
default: /* unknown or supported type - oh dear */
|
||||
ret=-1;
|
||||
errno=EBADF;
|
||||
break;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
size_t
|
||||
url_fread(void *ptr, size_t size, size_t nmemb, URL_FILE *file)
|
||||
{
|
||||
size_t want;
|
||||
|
||||
switch(file->type)
|
||||
{
|
||||
case CFTYPE_FILE:
|
||||
want=fread(ptr,size,nmemb,file->handle.file);
|
||||
break;
|
||||
|
||||
case CFTYPE_CURL:
|
||||
want = nmemb * size;
|
||||
|
||||
curl_fill_buffer(file,want,1);
|
||||
|
||||
/* check if theres data in the buffer - if not curl_fill_buffer()
|
||||
* either errored or EOF */
|
||||
if(!file->buffer_pos)
|
||||
return 0;
|
||||
|
||||
/* ensure only available data is considered */
|
||||
if(file->buffer_pos < want)
|
||||
want = file->buffer_pos;
|
||||
|
||||
/* xfer data to caller */
|
||||
memcpy(ptr, file->buffer, want);
|
||||
|
||||
curl_use_buffer(file,want);
|
||||
|
||||
want = want / size; /* number of items - nb correct op - checked
|
||||
* with glibc code*/
|
||||
|
||||
/*printf("(fread) return %d bytes %d left\n", want,file->buffer_pos);*/
|
||||
break;
|
||||
|
||||
default: /* unknown or supported type - oh dear */
|
||||
want=0;
|
||||
errno=EBADF;
|
||||
break;
|
||||
|
||||
}
|
||||
return want;
|
||||
}
|
||||
|
||||
char *
|
||||
url_fgets(char *ptr, int size, URL_FILE *file)
|
||||
{
|
||||
int want = size - 1;/* always need to leave room for zero termination */
|
||||
int loop;
|
||||
|
||||
switch(file->type)
|
||||
{
|
||||
case CFTYPE_FILE:
|
||||
ptr = fgets(ptr,size,file->handle.file);
|
||||
break;
|
||||
|
||||
case CFTYPE_CURL:
|
||||
curl_fill_buffer(file,want,1);
|
||||
|
||||
/* check if theres data in the buffer - if not fill either errored or
|
||||
* EOF */
|
||||
if(!file->buffer_pos)
|
||||
return NULL;
|
||||
|
||||
/* ensure only available data is considered */
|
||||
if(file->buffer_pos < want)
|
||||
want = file->buffer_pos;
|
||||
|
||||
/*buffer contains data */
|
||||
/* look for newline or eof */
|
||||
for(loop=0;loop < want;loop++)
|
||||
{
|
||||
if(file->buffer[loop] == '\n')
|
||||
{
|
||||
want=loop+1;/* include newline */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* xfer data to caller */
|
||||
memcpy(ptr, file->buffer, want);
|
||||
ptr[want]=0;/* allways null terminate */
|
||||
|
||||
curl_use_buffer(file,want);
|
||||
|
||||
/*printf("(fgets) return %d bytes %d left\n", want,file->buffer_pos);*/
|
||||
break;
|
||||
|
||||
default: /* unknown or supported type - oh dear */
|
||||
ptr=NULL;
|
||||
errno=EBADF;
|
||||
break;
|
||||
}
|
||||
|
||||
return ptr;/*success */
|
||||
}
|
||||
|
||||
void
|
||||
url_rewind(URL_FILE *file)
|
||||
{
|
||||
switch(file->type)
|
||||
{
|
||||
case CFTYPE_FILE:
|
||||
rewind(file->handle.file); /* passthrough */
|
||||
break;
|
||||
|
||||
case CFTYPE_CURL:
|
||||
/* halt transaction */
|
||||
curl_multi_remove_handle(multi_handle, file->handle.curl);
|
||||
|
||||
/* restart */
|
||||
curl_multi_add_handle(multi_handle, file->handle.curl);
|
||||
|
||||
/* ditch buffer - write will recreate - resets stream pos*/
|
||||
if(file->buffer)
|
||||
free(file->buffer);
|
||||
|
||||
file->buffer=NULL;
|
||||
file->buffer_pos=0;
|
||||
file->buffer_len=0;
|
||||
|
||||
break;
|
||||
|
||||
default: /* unknown or supported type - oh dear */
|
||||
break;
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
/* Small main program to retrive from a url using fgets and fread saving the
|
||||
* output to two test files (note the fgets method will corrupt binary files if
|
||||
* they contain 0 chars */
|
||||
int
|
||||
main(int argc, char *argv[])
|
||||
{
|
||||
URL_FILE *handle;
|
||||
FILE *outf;
|
||||
|
||||
int nread;
|
||||
char buffer[256];
|
||||
char *url;
|
||||
|
||||
if(argc < 2)
|
||||
{
|
||||
url="http://192.168.7.3/testfile";/* default to testurl */
|
||||
}
|
||||
else
|
||||
{
|
||||
url=argv[1];/* use passed url */
|
||||
}
|
||||
|
||||
/* copy from url line by line with fgets */
|
||||
outf=fopen("fgets.test","w+");
|
||||
if(!outf)
|
||||
{
|
||||
perror("couldnt open fgets output file\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
handle = url_fopen(url, "r");
|
||||
if(!handle)
|
||||
{
|
||||
printf("couldn't url_fopen()\n");
|
||||
fclose(outf);
|
||||
return 2;
|
||||
}
|
||||
|
||||
while(!url_feof(handle))
|
||||
{
|
||||
url_fgets(buffer,sizeof(buffer),handle);
|
||||
fwrite(buffer,1,strlen(buffer),outf);
|
||||
}
|
||||
|
||||
url_fclose(handle);
|
||||
|
||||
fclose(outf);
|
||||
|
||||
|
||||
/* Copy from url with fread */
|
||||
outf=fopen("fread.test","w+");
|
||||
if(!outf)
|
||||
{
|
||||
perror("couldnt open fread output file\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
handle = url_fopen("testfile", "r");
|
||||
if(!handle) {
|
||||
printf("couldn't url_fopen()\n");
|
||||
fclose(outf);
|
||||
return 2;
|
||||
}
|
||||
|
||||
do {
|
||||
nread = url_fread(buffer, 1,sizeof(buffer), handle);
|
||||
fwrite(buffer,1,nread,outf);
|
||||
} while(nread);
|
||||
|
||||
FD_ZERO(&fdread);
|
||||
FD_ZERO(&fdwrite);
|
||||
FD_ZERO(&fdexcep);
|
||||
|
||||
/* set a suitable timeout to fail on */
|
||||
timeout.tv_sec = 500; /* 5 minutes */
|
||||
timeout.tv_usec = 0;
|
||||
url_fclose(handle);
|
||||
|
||||
/* get file descriptors from the transfers */
|
||||
curl_multi_fdset(multi_handle, &fdread, &fdwrite, &fdexcep, &maxfd);
|
||||
fclose(outf);
|
||||
|
||||
rc = select(maxfd+1, &fdread, &fdwrite, &fdexcep, &timeout);
|
||||
|
||||
switch(rc) {
|
||||
case -1:
|
||||
/* select error */
|
||||
break;
|
||||
case 0:
|
||||
break;
|
||||
default:
|
||||
/* timeout or readable/writable sockets */
|
||||
printf("select() returned %d!\n", rc);
|
||||
do {
|
||||
file->m = curl_multi_perform(multi_handle, &still_running);
|
||||
|
||||
if(file->bytes)
|
||||
/* we have received data, return that now */
|
||||
break;
|
||||
|
||||
} while(CURLM_CALL_MULTI_PERFORM == file->m);
|
||||
/* Test rewind */
|
||||
outf=fopen("rewind.test","w+");
|
||||
if(!outf)
|
||||
{
|
||||
perror("couldnt open fread output file\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
if(!still_running)
|
||||
printf("DONE RUNNING AROUND!\n");
|
||||
|
||||
break;
|
||||
}
|
||||
} while(still_running && (file->bytes <= 0));
|
||||
}
|
||||
else
|
||||
printf("(fread) Skip network read\n");
|
||||
handle = url_fopen("testfile", "r");
|
||||
if(!handle) {
|
||||
printf("couldn't url_fopen()\n");
|
||||
fclose(outf);
|
||||
return 2;
|
||||
}
|
||||
|
||||
if(file->bytes) {
|
||||
/* data already available, return that */
|
||||
int want = size * nmemb;
|
||||
nread = url_fread(buffer, 1,sizeof(buffer), handle);
|
||||
fwrite(buffer,1,nread,outf);
|
||||
url_rewind(handle);
|
||||
|
||||
if(file->bytes < want)
|
||||
want = file->bytes;
|
||||
buffer[0]='\n';
|
||||
fwrite(buffer,1,1,outf);
|
||||
|
||||
memcpy(ptr, file->readptr, want);
|
||||
file->readptr += want;
|
||||
file->bytes -= want;
|
||||
nread = url_fread(buffer, 1,sizeof(buffer), handle);
|
||||
fwrite(buffer,1,nread,outf);
|
||||
|
||||
printf("(fread) return %d bytes\n", want);
|
||||
|
||||
return want;
|
||||
}
|
||||
return 0; /* no data available to return */
|
||||
}
|
||||
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
URL_FILE *handle;
|
||||
int nread;
|
||||
char buffer[256];
|
||||
|
||||
(void)argc;
|
||||
(void)argv;
|
||||
|
||||
handle = url_fopen("http://curl.haxx.se/", "r");
|
||||
|
||||
if(!handle) {
|
||||
printf("couldn't url_fopen()\n");
|
||||
}
|
||||
|
||||
do {
|
||||
nread = url_fread(buffer, sizeof(buffer), 1, handle);
|
||||
|
||||
printf("We got: %d bytes\n", nread);
|
||||
} while(nread);
|
||||
|
||||
url_fclose(handle);
|
||||
|
||||
return 0;
|
||||
url_fclose(handle);
|
||||
|
||||
fclose(outf);
|
||||
|
||||
|
||||
return 0;/* all done */
|
||||
}
|
||||
|
@@ -28,7 +28,6 @@ man_MANS = \
|
||||
curl_mprintf.3 \
|
||||
curl_global_init.3 \
|
||||
curl_global_cleanup.3 \
|
||||
libcurl.3 \
|
||||
curl_multi_add_handle.3 \
|
||||
curl_multi_cleanup.3 \
|
||||
curl_multi_fdset.3 \
|
||||
@@ -36,7 +35,11 @@ man_MANS = \
|
||||
curl_multi_init.3 \
|
||||
curl_multi_perform.3 \
|
||||
curl_multi_remove_handle.3 \
|
||||
curl_share_cleanup.3 curl_share_init.3 curl_share_setopt.3 \
|
||||
libcurl.3 \
|
||||
libcurl-easy.3 \
|
||||
libcurl-multi.3 \
|
||||
libcurl-share.3 \
|
||||
libcurl-errors.3
|
||||
|
||||
HTMLPAGES = \
|
||||
@@ -63,7 +66,6 @@ HTMLPAGES = \
|
||||
curl_mprintf.html \
|
||||
curl_global_init.html \
|
||||
curl_global_cleanup.html \
|
||||
libcurl.html \
|
||||
curl_multi_add_handle.html \
|
||||
curl_multi_cleanup.html \
|
||||
curl_multi_fdset.html \
|
||||
@@ -71,9 +73,12 @@ HTMLPAGES = \
|
||||
curl_multi_init.html \
|
||||
curl_multi_perform.html \
|
||||
curl_multi_remove_handle.html \
|
||||
curl_share_cleanup.html curl_share_init.html curl_share_setopt.html \
|
||||
libcurl.html \
|
||||
libcurl-multi.html \
|
||||
libcurl-errors.html \
|
||||
index.html
|
||||
libcurl-easy.html \
|
||||
libcurl-share.html \
|
||||
libcurl-errors.html
|
||||
|
||||
PDFPAGES = \
|
||||
curl_easy_cleanup.pdf \
|
||||
@@ -99,7 +104,6 @@ PDFPAGES = \
|
||||
curl_mprintf.pdf \
|
||||
curl_global_init.pdf \
|
||||
curl_global_cleanup.pdf \
|
||||
libcurl.pdf \
|
||||
curl_multi_add_handle.pdf \
|
||||
curl_multi_cleanup.pdf \
|
||||
curl_multi_fdset.pdf \
|
||||
@@ -107,26 +111,31 @@ PDFPAGES = \
|
||||
curl_multi_init.pdf \
|
||||
curl_multi_perform.pdf \
|
||||
curl_multi_remove_handle.pdf \
|
||||
curl_share_cleanup.pdf curl_share_init.pdf curl_share_setopt.pdf \
|
||||
libcurl.pdf \
|
||||
libcurl-multi.pdf \
|
||||
libcurl-easy.pdf \
|
||||
libcurl-share.pdf \
|
||||
libcurl-errors.pdf
|
||||
|
||||
EXTRA_DIST = $(man_MANS) $(HTMLPAGES) $(PDFPAGES)
|
||||
CLEANFILES = $(HTMLPAGES) $(PDFPAGES)
|
||||
|
||||
EXTRA_DIST = $(man_MANS) $(HTMLPAGES) index.html $(PDFPAGES)
|
||||
|
||||
MAN2HTML= gnroff -man $< | man2html >$@
|
||||
|
||||
SUFFIXES = .1 .3 .html
|
||||
SUFFIXES = .3 .html
|
||||
|
||||
html: $(HTMLPAGES)
|
||||
|
||||
.3.html:
|
||||
$(MAN2HTML)
|
||||
|
||||
.1.html:
|
||||
$(MAN2HTML)
|
||||
pdf: $(PDFPAGES)
|
||||
|
||||
pdf:
|
||||
for file in $(man_MANS); do \
|
||||
foo=`echo $$file | sed -e 's/\.[0-9]$$//g'`; \
|
||||
groff -Tps -man $$file >$$foo.ps; \
|
||||
ps2pdf $$foo.ps $$foo.pdf; \
|
||||
done
|
||||
.3.pdf:
|
||||
@(foo=`echo $@ | sed -e 's/\.[0-9]$$//g'`; \
|
||||
groff -Tps -man $< >$$foo.ps; \
|
||||
ps2pdf $$foo.ps $@; \
|
||||
rm $$foo.ps; \
|
||||
echo "converted $< to $@")
|
||||
|
@@ -2,7 +2,7 @@
|
||||
.\" nroff -man [file]
|
||||
.\" $Id$
|
||||
.\"
|
||||
.TH curl_easy_init 3 "25 Apr 2002" "libcurl 7.9.7" "libcurl Manual"
|
||||
.TH curl_easy_getinfo 3 "25 Apr 2002" "libcurl 7.9.7" "libcurl Manual"
|
||||
.SH NAME
|
||||
curl_easy_getinfo - Extract information from a curl session (added in 7.4)
|
||||
.SH SYNOPSIS
|
||||
|
@@ -1,7 +1,7 @@
|
||||
.\" nroff -man [file]
|
||||
.\" $Id$
|
||||
.\"
|
||||
.TH curl_easy_setopt 3 "10 Jun 2003" "libcurl 7.10.6" "libcurl Manual"
|
||||
.TH curl_easy_setopt 3 "8 Aug 2003" "libcurl 7.10.7" "libcurl Manual"
|
||||
.SH NAME
|
||||
curl_easy_setopt - set options for a curl easy handle
|
||||
.SH SYNOPSIS
|
||||
@@ -75,7 +75,7 @@ of bytes actually taken care of. If that amount differs from the amount passed
|
||||
to your function, it'll signal an error to the library and it will abort the
|
||||
transfer and return \fICURLE_WRITE_ERROR\fP.
|
||||
|
||||
Set the \fIstream\fP argument with the \fBCURLOPT_FILE\fP option.
|
||||
Set the \fIstream\fP argument with the \fBCURLOPT_WRITEDATA\fP option.
|
||||
|
||||
\fBNOTE:\fP you will be passed as much data as possible in all invokes, but
|
||||
you cannot possibly make any assumptions. It may be one byte, it may be
|
||||
@@ -143,12 +143,11 @@ operation and an error (CURLE_BAD_PASSWORD_ENTERED) will be returned.
|
||||
is a zero-terminated string that is text that prefixes the input request.
|
||||
\fIbuffer\fP is a pointer to data where the entered password should be stored
|
||||
and \fIbuflen\fP is the maximum number of bytes that may be written in the
|
||||
buffer. (Added in 7.4.2)
|
||||
buffer.
|
||||
.TP
|
||||
.B CURLOPT_PASSWDDATA
|
||||
Pass a void * to whatever data you want. The passed pointer will be the first
|
||||
argument sent to the specifed \fICURLOPT_PASSWDFUNCTION\fP function. (Added in
|
||||
7.4.2)
|
||||
argument sent to the specifed \fICURLOPT_PASSWDFUNCTION\fP function.
|
||||
.TP
|
||||
.B CURLOPT_HEADERFUNCTION
|
||||
Function pointer that should match the following prototype: \fIsize_t
|
||||
@@ -161,7 +160,7 @@ multiplied with \fInmemb\fP. The pointer named \fIstream\fP will be the one
|
||||
you passed to libcurl with the \fICURLOPT_WRITEHEADER\fP option. Return the
|
||||
number of bytes actually written or return -1 to signal error to the library
|
||||
(it will cause it to abort the transfer with a \fICURLE_WRITE_ERROR\fP return
|
||||
code). (Added in 7.7.2)
|
||||
code).
|
||||
.TP
|
||||
.B CURLOPT_WRITEHEADER
|
||||
Pass a pointer to be used to write the header part of the received data to. If
|
||||
@@ -248,23 +247,23 @@ default. (Added in 7.10)
|
||||
Set the parameter to non-zero to get the library to tunnel all operations
|
||||
through a given HTTP proxy. Note that there is a big difference between using
|
||||
a proxy and to tunnel through it. If you don't know what this means, you
|
||||
probably don't want this tunneling option. (Added in 7.3)
|
||||
probably don't want this tunneling option.
|
||||
.TP
|
||||
.B CURLOPT_INTERFACE
|
||||
Pass a char * as parameter. This set the interface name to use as outgoing
|
||||
network interface. The name can be an interface name, an IP address or a host
|
||||
name. (Added in 7.3)
|
||||
name.
|
||||
.TP
|
||||
.B CURLOPT_DNS_CACHE_TIMEOUT
|
||||
Pass a long, this sets the timeout in seconds. Name resolves will be kept in
|
||||
memory for this number of seconds. Set to zero (0) to completely disable
|
||||
caching, or set to -1 to make the cached entries remain forever. By default,
|
||||
libcurl caches info for 60 seconds. (Added in 7.9.3)
|
||||
libcurl caches info for 60 seconds.
|
||||
.TP
|
||||
.B CURLOPT_DNS_USE_GLOBAL_CACHE
|
||||
Pass a long. If the value is non-zero, it tells curl to use a global DNS cache
|
||||
that will survive between easy handle creations and deletions. This is not
|
||||
thread-safe and this will use a global varible. (Added in 7.9.3)
|
||||
thread-safe and this will use a global varible.
|
||||
.TP
|
||||
.B CURLOPT_BUFFERSIZE
|
||||
Pass a long specifying your prefered size for the receive buffer in libcurl.
|
||||
@@ -376,6 +375,17 @@ This is a convenience macro that sets all bits except Basic and thus makes
|
||||
libcurl pick any it finds suitable. libcurl will automaticly select the one it
|
||||
finds most secure.
|
||||
.RE
|
||||
.TP
|
||||
.B CURLOPT_PROXYAUTH
|
||||
Pass a long as parameter, which is set to a bitmask, to tell libcurl what
|
||||
authentication method(s) you want it to use for your proxy authentication. If
|
||||
more than one bit is set, libcurl will first query the site to see what
|
||||
authentication methods it supports and then pick the best one you allow it to
|
||||
use. Note that for some methods, this will induce an extra network
|
||||
round-trip. Set the actual name and password with the
|
||||
\fICURLOPT_PROXYUSERPWD\fP option. The bitmask can be constructed by or'ing
|
||||
together the bits listed above for the \fICURLOPT_HTTPAUTH\fP option. As of
|
||||
this writing, only Basic and NTLM work. (Added in 7.10.7)
|
||||
.PP
|
||||
.SH HTTP OPTIONS
|
||||
.TP 0.4i
|
||||
@@ -411,7 +421,7 @@ that this is meaningful only when setting \fICURLOPT_FOLLOWLOCATION\fP.
|
||||
Pass a long. The set number will be the redirection limit. If that many
|
||||
redirections have been followed, the next redirect will cause an error
|
||||
(\fICURLE_TOO_MANY_REDIRECTS\fP). This option only makes sense if the
|
||||
\fICURLOPT_FOLLOWLOCATION\fP is used at the same time. (Added in 7.5)
|
||||
\fICURLOPT_FOLLOWLOCATION\fP is used at the same time.
|
||||
.TP
|
||||
.B CURLOPT_PUT
|
||||
A non-zero parameter tells the library to use HTTP PUT to transfer data. The
|
||||
@@ -443,8 +453,7 @@ the \fICURLOPT_HTTPPOST\fP option.
|
||||
If you want to post data to the server without letting libcurl do a strlen()
|
||||
to measure the data size, this option must be used. When this option is used
|
||||
you can post fully binary data, which otherwise is likely to fail. If this
|
||||
size is set to zero, the library will use strlen() to get the size. (Added in
|
||||
libcurl 7.2)
|
||||
size is set to zero, the library will use strlen() to get the size.
|
||||
.TP
|
||||
.B CURLOPT_HTTPPOST
|
||||
Tells libcurl you want a multipart/formdata HTTP POST to be made and you
|
||||
@@ -534,7 +543,7 @@ internally known cookies to the specified file when \fIcurl_easy_cleanup(3)\fP
|
||||
is called. If no cookies are known, no file will be created. Specify "-" to
|
||||
instead have the cookies written to stdout. Using this option also enables
|
||||
cookies for this session, so if you for example follow a location it will make
|
||||
matching cookies get sent accordingly. (Added in 7.9)
|
||||
matching cookies get sent accordingly.
|
||||
|
||||
.B NOTE
|
||||
If the cookie jar file can't be created or written to (when the
|
||||
@@ -556,7 +565,7 @@ CURLOPT_TIMECONDITION.
|
||||
.B CURLOPT_HTTPGET
|
||||
Pass a long. If the long is non-zero, this forces the HTTP request to get back
|
||||
to GET. Only really usable if POST, PUT or a custom request have been used
|
||||
previously using the same curl handle. (Added in 7.8.1)
|
||||
previously using the same curl handle.
|
||||
.TP
|
||||
.B CURLOPT_HTTP_VERSION
|
||||
Pass a long, set to one of the values described below. They force libcurl to
|
||||
@@ -636,6 +645,11 @@ Pass a long. If the value is non-zero, it tells curl to use the EPSV command
|
||||
when doing passive FTP downloads (which it always does by default). Using EPSV
|
||||
means that it will first attempt to use EPSV before using PASV, but if you
|
||||
pass FALSE (zero) to this option, it will not try using EPSV, only plain PASV.
|
||||
.TP
|
||||
.B CURLOPT_FTP_CREATE_MISSING_DIRS
|
||||
Pass a long. If the value is non-zero, curl will attempt to create any remote
|
||||
directory that it fails to CWD into. CWD is the command that changes working
|
||||
directory. (Added in 7.10.7)
|
||||
.PP
|
||||
.SH PROTOCOL OPTIONS
|
||||
.TP 0.4i
|
||||
@@ -682,8 +696,7 @@ Pass a long. If it is a non-zero value, libcurl will attempt to get the
|
||||
modification date of the remote document in this operation. This requires that
|
||||
the remote server sends the time or replies to a time querying command. The
|
||||
\fIcurl_easy_getinfo(3)\fP function with the \fICURLINFO_FILETIME\fP argument
|
||||
can be used after a transfer to extract the received time (if any). (Added in
|
||||
7.5)
|
||||
can be used after a transfer to extract the received time (if any).
|
||||
.TP
|
||||
.B CURLOPT_NOBODY
|
||||
A non-zero parameter tells the library to not include the body-part in the
|
||||
@@ -734,7 +747,7 @@ open connections to increase.
|
||||
|
||||
\fBNOTE:\fP if you already have performed transfers with this curl handle,
|
||||
setting a smaller MAXCONNECTS than before may cause open connections to get
|
||||
closed unnecessarily. (Added in 7.7)
|
||||
closed unnecessarily.
|
||||
.TP
|
||||
.B CURLOPT_CLOSEPOLICY
|
||||
Pass a long. This option sets what policy libcurl should use when the
|
||||
@@ -745,7 +758,7 @@ the connection that was least recently used, that connection is also least
|
||||
likely to be capable of re-use. Use \fICURLCLOSEPOLICY_OLDEST\fP to make
|
||||
libcurl close the oldest connection, the one that was created first among the
|
||||
ones in the connection cache. The other close policies are not support
|
||||
yet. (Added in 7.7)
|
||||
yet.
|
||||
.TP
|
||||
.B CURLOPT_FRESH_CONNECT
|
||||
Pass a long. Set to non-zero to make the next transfer use a new (fresh)
|
||||
@@ -753,7 +766,7 @@ connection by force. If the connection cache is full before this connection,
|
||||
one of the existing connections will be closed as according to the selected or
|
||||
default policy. This option should be used with caution and only if you
|
||||
understand what it does. Set this to 0 to have libcurl attempt re-using an
|
||||
existing connection (default behavior). (Added in 7.7)
|
||||
existing connection (default behavior).
|
||||
.TP
|
||||
.B CURLOPT_FORBID_REUSE
|
||||
Pass a long. Set to non-zero to make the next transfer explicitly close the
|
||||
@@ -761,7 +774,7 @@ connection when done. Normally, libcurl keep all connections alive when done
|
||||
with one transfer in case there comes a succeeding one that can re-use them.
|
||||
This option should be used with caution and only if you understand what it
|
||||
does. Set to 0 to have libcurl keep the connection open for possibly later
|
||||
re-use (default behavior). (Added in 7.7)
|
||||
re-use (default behavior).
|
||||
.TP
|
||||
.B CURLOPT_CONNECTTIMEOUT
|
||||
Pass a long. It should contain the maximum time in seconds that you allow the
|
||||
@@ -798,12 +811,11 @@ a certificate but you need one to load your private key.
|
||||
.B CURLOPT_SSLKEY
|
||||
Pass a pointer to a zero terminated string as parameter. The string should be
|
||||
the file name of your private key. The default format is "PEM" and can be
|
||||
changed with \fICURLOPT_SSLKEYTYPE\fP. (Added in 7.9.3)
|
||||
changed with \fICURLOPT_SSLKEYTYPE\fP.
|
||||
.TP
|
||||
.B CURLOPT_SSLKEYTYPE
|
||||
Pass a pointer to a zero terminated string as parameter. The string should be
|
||||
the format of your private key. Supported formats are "PEM", "DER" and "ENG".
|
||||
(Added in 7.9.3)
|
||||
|
||||
\fBNOTE:\fPThe format "ENG" enables you to load the private key from a crypto
|
||||
engine. in this case \fICURLOPT_SSLKEY\fP is used as an identifier passed to
|
||||
@@ -814,19 +826,18 @@ Pass a pointer to a zero terminated string as parameter. It will be used as
|
||||
the password required to use the \fICURLOPT_SSLKEY\fP private key. If the
|
||||
password is not supplied, you will be prompted for
|
||||
it. \fICURLOPT_PASSWDFUNCTION\fP can be used to set your own prompt function.
|
||||
(Added in 7.9.3)
|
||||
.TP
|
||||
.B CURLOPT_SSL_ENGINE
|
||||
Pass a pointer to a zero terminated string as parameter. It will be used as
|
||||
the identifier for the crypto engine you want to use for your private
|
||||
key. (Added in 7.9.3)
|
||||
key.
|
||||
|
||||
\fBNOTE:\fPIf the crypto device cannot be loaded,
|
||||
\fICURLE_SSL_ENGINE_NOTFOUND\fP is returned.
|
||||
.TP
|
||||
.B CURLOPT_SSL_ENGINEDEFAULT
|
||||
Sets the actual crypto engine as the default for (asymetric) crypto
|
||||
operations. (Added in 7.9.3)
|
||||
operations.
|
||||
|
||||
\fBNOTE:\fPIf the crypto device cannot be set,
|
||||
\fICURLE_SSL_ENGINE_SETFAILED\fP is returned.
|
||||
@@ -840,15 +851,15 @@ servers make this difficult why you at times may have to use this option.
|
||||
Pass a long that is set to a zero value to stop curl from verifying the peer's
|
||||
certificate (7.10 starting setting this option to TRUE by default). Alternate
|
||||
certificates to verify against can be specified with the CURLOPT_CAINFO option
|
||||
(Added in 7.4.2) or a certificate directory can be specified with the
|
||||
CURLOPT_CAPATH option (Added in 7.9.8). As of 7.10, curl installs a default
|
||||
bundle. CURLOPT_SSL_VERIFYHOST may also need to be set to 1 or 0 if
|
||||
or a certificate directory can be specified with the CURLOPT_CAPATH option
|
||||
(Added in 7.9.8). As of 7.10, curl installs a default bundle.
|
||||
CURLOPT_SSL_VERIFYHOST may also need to be set to 1 or 0 if
|
||||
CURLOPT_SSL_VERIFYPEER is disabled (it defaults to 2).
|
||||
.TP
|
||||
.B CURLOPT_CAINFO
|
||||
Pass a char * to a zero terminated string naming a file holding one or more
|
||||
certificates to verify the peer with. This only makes sense when used in
|
||||
combination with the CURLOPT_SSL_VERIFYPEER option. (Added in 7.4.2)
|
||||
combination with the CURLOPT_SSL_VERIFYPEER option.
|
||||
.TP
|
||||
.B CURLOPT_CAPATH
|
||||
Pass a char * to a zero terminated string naming a directory holding multiple
|
||||
@@ -870,8 +881,7 @@ socket. It will be used to seed the random engine for SSL.
|
||||
.B CURLOPT_SSL_VERIFYHOST
|
||||
Pass a long. Set if we should verify the Common name from the peer certificate
|
||||
in the SSL handshake, set 1 to check existence, 2 to ensure that it matches
|
||||
the provided hostname. This is by default set to 2. (Added in 7.8.1, default
|
||||
changed in 7.10)
|
||||
the provided hostname. This is by default set to 2. (default changed in 7.10)
|
||||
.TP
|
||||
.B CURLOPT_SSL_CIPHER_LIST
|
||||
Pass a char *, pointing to a zero terminated string holding the list of
|
||||
@@ -890,7 +900,7 @@ Pass a char * as parameter. Set the krb4 security level, this also enables
|
||||
krb4 awareness. This is a string, 'clear', 'safe', 'confidential' or
|
||||
\&'private'. If the string is set but doesn't match one of these, 'private'
|
||||
will be used. Set the string to NULL to disable kerberos4. The kerberos
|
||||
support only works for FTP. (Added in 7.3)
|
||||
support only works for FTP.
|
||||
.PP
|
||||
.SH OTHER OPTIONS
|
||||
.TP 0.4i
|
||||
@@ -905,7 +915,3 @@ error occurred as \fI<curl/curl.h>\fP defines. See the \fIlibcurl-errors.3\fP
|
||||
man page for the full list with descriptions.
|
||||
.SH "SEE ALSO"
|
||||
.BR curl_easy_init "(3), " curl_easy_cleanup "(3), "
|
||||
.SH BUGS
|
||||
If you find any bugs, or just have questions, subscribe to one of the mailing
|
||||
lists and post. We won't bite.
|
||||
|
||||
|
@@ -2,16 +2,16 @@
|
||||
.\" nroff -man [file]
|
||||
.\" $Id:
|
||||
.\"
|
||||
.TH curl_free 3 "24 Sept 2002" "libcurl 7.10" "libcurl Manual"
|
||||
.TH curl_free 3 "12 Aug 2003" "libcurl 7.10" "libcurl Manual"
|
||||
.SH NAME
|
||||
curl_free - reclaim memory that has been obtained through a libcurl call
|
||||
.SH SYNOPSIS
|
||||
.B #include <curl/curl.h>
|
||||
.sp
|
||||
.BI "void *curl_free( char *" ptr " );"
|
||||
.BI "void curl_free( char *" ptr " );"
|
||||
.ad
|
||||
.SH DESCRIPTION
|
||||
curl_free reclaims memory that has been obtained through a libcurl call.
|
||||
Use curl_free() instead of free() to avoid anomalies that can result from differences in memory management between your application and libcurl.
|
||||
.SH "SEE ALSO"
|
||||
.I curl_unescape(), curl_free()
|
||||
.I curl_unescape()
|
||||
|
19
docs/libcurl/curl_share_cleanup.3
Normal file
19
docs/libcurl/curl_share_cleanup.3
Normal file
@@ -0,0 +1,19 @@
|
||||
.\" $Id$
|
||||
.\"
|
||||
.TH curl_share_cleanup 3 "8 Aug 2003" "libcurl 7.10.7" "libcurl Manual"
|
||||
.SH NAME
|
||||
curl_share_cleanup - Clean up a shared object
|
||||
.SH SYNOPSIS
|
||||
.B #include <curl/curl.h>
|
||||
.sp
|
||||
.BI "CURLSHcode curl_share_cleanup( );"
|
||||
.ad
|
||||
.SH DESCRIPTION
|
||||
This function deletes a shared object. The share handle cannot be used anymore
|
||||
when this function has been called.
|
||||
|
||||
.SH RETURN VALUE
|
||||
If this function returns non-zero, the object was not properly deleted and it
|
||||
still remains!
|
||||
.SH "SEE ALSO"
|
||||
.BR curl_share_init "(3), " curl_share_setopt "(3)"
|
21
docs/libcurl/curl_share_init.3
Normal file
21
docs/libcurl/curl_share_init.3
Normal file
@@ -0,0 +1,21 @@
|
||||
.\" $Id$
|
||||
.\"
|
||||
.TH curl_share_init 3 "8 Aug 2003" "libcurl 7.10.7" "libcurl Manual"
|
||||
.SH NAME
|
||||
curl_share_init - Create a shared object
|
||||
.SH SYNOPSIS
|
||||
.B #include <curl/curl.h>
|
||||
.sp
|
||||
.BI "CURLSH *curl_share_init( );"
|
||||
.ad
|
||||
.SH DESCRIPTION
|
||||
This function returns a CURLSH handle to be used as input to all the other
|
||||
share-functions, sometimes refered to as a share handle on some places in the
|
||||
documentation. This init call MUST have a corresponding call to
|
||||
\fIcurl_share_cleanup\fP when all operations using the share are complete.
|
||||
.SH RETURN VALUE
|
||||
If this function returns NULL, something went wrong and you got no share
|
||||
object to use.
|
||||
.SH "SEE ALSO"
|
||||
.BR curl_share_cleanup "(3), " curl_share_setopt "(3)"
|
||||
|
46
docs/libcurl/curl_share_setopt.3
Normal file
46
docs/libcurl/curl_share_setopt.3
Normal file
@@ -0,0 +1,46 @@
|
||||
.\" $Id$
|
||||
.\"
|
||||
.TH curl_share_setopt 3 "8 Aug 2003" "libcurl 7.10.7" "libcurl Manual"
|
||||
.SH NAME
|
||||
curl_share_setopt - Set options for a shared object
|
||||
.SH SYNOPSIS
|
||||
.B #include <curl/curl.h>
|
||||
.sp
|
||||
CURLSHcode curl_share_setopt(CURLSH *share, CURLSHoption option, parameter);
|
||||
.ad
|
||||
.SH DESCRIPTION
|
||||
Set the \fIoption\fP to \fIparameter\fP for the given \fIshare\fP.
|
||||
.SH OPTIONS
|
||||
.TP 0.4i
|
||||
.B CURLSHOPT_LOCKFUNC
|
||||
The \fIparameter\fP must be a pointer to a function matching the following
|
||||
prototype:
|
||||
|
||||
void lock_function(CURL *handle, curl_lock_data data, curl_lock_access access,
|
||||
void *userptr);
|
||||
|
||||
\fIdata\fP defines what data libcurl wants to lock, and you must make sure that
|
||||
only one lock is given at any time for each kind of data.
|
||||
|
||||
\fIaccess\fP defines what access type libcurl wants, shared or single.
|
||||
|
||||
\fIuserptr\fP is the pointer you set with \fICURLSHOPT_USERDAT\fP.
|
||||
|
||||
.TP
|
||||
.B CURLSHOPT_UNLOCKFUNC
|
||||
hej
|
||||
.TP
|
||||
.B CURLSHOPT_SHARE
|
||||
hej
|
||||
.TP
|
||||
.B CURLSHOPT_UNSHARE
|
||||
hej
|
||||
.TP
|
||||
.B CURLSHOPT_USERDATA
|
||||
hej
|
||||
.PP
|
||||
.SH RETURN VALUE
|
||||
If this function returns non-zero, something was wrong!
|
||||
|
||||
.SH "SEE ALSO"
|
||||
.BR curl_share_cleanup "(3), " curl_share_init "(3)"
|
@@ -13,15 +13,7 @@ curl_version - returns the libcurl version string
|
||||
.SH DESCRIPTION
|
||||
Returns a human readable string with the version number of libcurl and some of
|
||||
its important components (like OpenSSL version).
|
||||
|
||||
Note: this returns the actual running lib's version, you might have installed
|
||||
a newer lib's include files in your system which may turn your LIBCURL_VERSION
|
||||
#define value to differ from this result.
|
||||
.SH RETURN VALUE
|
||||
A pointer to a zero terminated string.
|
||||
.SH "SEE ALSO"
|
||||
The
|
||||
.I LIBCURL_VERSION
|
||||
#define in <curl/curl.h>
|
||||
.SH BUGS
|
||||
Surely there are some, you tell me!
|
||||
.BR curl_version_info "(3)"
|
||||
|
@@ -2,7 +2,7 @@
|
||||
.\" nroff -man [file]
|
||||
.\" $Id$
|
||||
.\"
|
||||
.TH curl_version_info 3 "17 Jun 2003" "libcurl 7.10.6" "libcurl Manual"
|
||||
.TH curl_version_info 3 "12 Aug 2003" "libcurl 7.10.7" "libcurl Manual"
|
||||
.SH NAME
|
||||
curl_version_info - returns run-time libcurl version info
|
||||
.SH SYNOPSIS
|
||||
@@ -79,6 +79,11 @@ supports HTTP GSS-Negotiate (added in 7.10.6)
|
||||
.B CURL_VERSION_DEBUG
|
||||
libcurl was built with extra debug capabilities built-in. This is mainly of
|
||||
interest for libcurl hackers. (added in 7.10.6)
|
||||
.TP
|
||||
.B CURL_VERSION_ASYNCHDNS
|
||||
libcurl was built with support for asynchronous name lookups, which allows
|
||||
more exact timeouts (even on Windows) and less blocking when using the multi
|
||||
interface. (added in 7.10.7)
|
||||
.PP
|
||||
\fIssl_version\fP is an ascii string for the OpenSSL version used. If libcurl
|
||||
has no SSL support, this is NULL.
|
||||
@@ -97,5 +102,4 @@ entry.
|
||||
A pointer to a curl_version_info_data struct.
|
||||
.SH "SEE ALSO"
|
||||
\fIcurl_version(3)\fP
|
||||
.SH BUGS
|
||||
No known bugs.
|
||||
|
||||
|
@@ -12,7 +12,9 @@
|
||||
|
||||
<h2>Overviews</h2>
|
||||
<A HREF="libcurl.html">libcurl</A>
|
||||
<br><a href="libcurl-easy.html">libcurl-easy</a>
|
||||
<br><a href="libcurl-multi.html">libcurl-multi</a>
|
||||
<br><a href="libcurl-share.html">libcurl-share</a>
|
||||
<br><a href="libcurl-errors.html">libcurl-errors</a>
|
||||
<br><a href="../libcurl-the-guide">libcurl-the-guide</a> (plain text)
|
||||
|
||||
@@ -40,6 +42,9 @@
|
||||
<br><a href="curl_multi_init.html">curl_multi_init</a>
|
||||
<br><a href="curl_multi_perform.html">curl_multi_perform</a>
|
||||
<br><a href="curl_multi_remove_handle.html">curl_multi_remove_handle</a>
|
||||
<br><a href="curl_share_cleanup.html">curl_share_cleanup</A>
|
||||
<br><a href="curl_share_init.html">curl_share_init</A>
|
||||
<br><a href="curl_share_setopt.html">curl_share_setopt</A>
|
||||
<br><a href="curl_slist_append.html">curl_slist_append</A>
|
||||
<br><a href="curl_slist_free_all.html">curl_slist_free_all</A>
|
||||
<br><a href="curl_strequal.html">curl_strequal</A>
|
||||
|
29
docs/libcurl/libcurl-easy.3
Normal file
29
docs/libcurl/libcurl-easy.3
Normal file
@@ -0,0 +1,29 @@
|
||||
.\" You can view this file with:
|
||||
.\" nroff -man [file]
|
||||
.\" $Id$
|
||||
.\"
|
||||
.TH libcurl 3 "12 Aug 2003" "libcurl 7.10.7" "libcurl easy interface"
|
||||
.SH NAME
|
||||
libcurl-easy \- easy interface overview
|
||||
.SH DESCRIPTION
|
||||
When using libcurl's "easy" interface you init your session and get a handle
|
||||
(often referred to as an "easy handle" in various docs and sources), which you
|
||||
use as input to the easy interface functions you use. Use
|
||||
\fIcurl_easy_init()\fP to get the handle.
|
||||
|
||||
You continue by setting all the options you want in the upcoming transfer, the
|
||||
most important among them is the URL itself (you can't transfer anything
|
||||
without a specified URL as you may have figured out yourself). You might want
|
||||
to set some callbacks as well that will be called from the library when data
|
||||
is available etc. \fIcurl_easy_setopt()\fP is used for all this.
|
||||
|
||||
When all is setup, you tell libcurl to perform the transfer using
|
||||
\fIcurl_easy_perform()\fP. It will then do the entire operation and won't
|
||||
return until it is done (successfully or not).
|
||||
|
||||
After the transfer has been made, you can set new options and make another
|
||||
transfer, or if you're done, cleanup the session by calling
|
||||
\fIcurl_easy_cleanup()\fP. If you want persistant connections, you don't
|
||||
cleanup immediately, but instead run ahead and perform other transfers using
|
||||
the same easy handle.
|
||||
|
@@ -2,7 +2,7 @@
|
||||
.\" nroff -man [file]
|
||||
.\" $Id$
|
||||
.\"
|
||||
.TH libcurl-multi 5 "13 Oct 2001" "libcurl 7.10.1" "libcurl multi interface"
|
||||
.TH libcurl-multi 3 "13 Oct 2001" "libcurl 7.10.1" "libcurl multi interface"
|
||||
.SH NAME
|
||||
libcurl-multi \- how to use the multi interface
|
||||
.SH DESCRIPTION
|
||||
|
46
docs/libcurl/libcurl-share.3
Normal file
46
docs/libcurl/libcurl-share.3
Normal file
@@ -0,0 +1,46 @@
|
||||
.\" You can view this file with:
|
||||
.\" nroff -man [file]
|
||||
.\" $Id$
|
||||
.\"
|
||||
.TH libcurl-share 3 "8 Aug 2003" "libcurl 7.10.7" "libcurl share interface"
|
||||
.SH NAME
|
||||
libcurl-share \- how to use the share interface
|
||||
.SH DESCRIPTION
|
||||
This is an overview on how to use the libcurl share interface in your C
|
||||
programs. There are specific man pages for each function mentioned in
|
||||
here.
|
||||
|
||||
All functions in the share interface are prefixed with curl_share.
|
||||
|
||||
.SH "OBJECTIVES"
|
||||
The share interface was added to enable sharing of data between curl
|
||||
\&"handles".
|
||||
.SH "ONE SET OF DATA - MANY TRANSFERS"
|
||||
You can have multiple easy handles share data between them. Have them update
|
||||
and use the \fBsame\fP cookie database or DNS cache! This way, each single
|
||||
transfer will take advantage from data updates made by the other transfer(s).
|
||||
.SH "SHARE OBJECT"
|
||||
You create a shared object with \fIcurl_share_init()\fP. It returns a handle
|
||||
for a newly created one.
|
||||
|
||||
You tell the shared object what data you want it to share by using
|
||||
\fIcurl_share_setopt()\fP. Currently you can only share DNS and/or COOKIE
|
||||
data.
|
||||
|
||||
Since you can use this share from multiple threads, and libcurl has no
|
||||
internal thread synchronization, you must provide mutex callbacks if you're
|
||||
using this multi-threaded. You set lock and unlock functions with
|
||||
\fIcurl_share_setopt()\fP too.
|
||||
|
||||
Then, you make an easy handle to use this share, you set the CURLOPT_SHARE
|
||||
option with \fIcurl_easy_setopt\fP, and pass in share handle. You can make any
|
||||
number of easy handles share the same share handle.
|
||||
|
||||
To make an easy handle stop using that particular share, you set CURLOPT_SHARE
|
||||
to NULL for that easy handle. To make a handle stop sharing a particular data,
|
||||
you can CURLSHOPT_UNSHARE it.
|
||||
|
||||
When you're done using the share, make sure that no easy handle is still using
|
||||
it, and call \fIcurl_share_cleanup()\fP on the handle.
|
||||
.SH "SEE ALSO"
|
||||
.BR curl_share_init "(3), " curl_share_setopt "(3), " curl_share_cleanup "(3)"
|
@@ -7,38 +7,37 @@
|
||||
libcurl \- client-side URL transfers
|
||||
.SH DESCRIPTION
|
||||
This is an overview on how to use libcurl in your C programs. There are
|
||||
specific man pages for each function mentioned in here. There's also the
|
||||
libcurl-the-guide document for a complete tutorial to programming with
|
||||
libcurl.
|
||||
specific man pages for each function mentioned in here. There are also the
|
||||
\fIlibcurl-easy\fP man page, the \fIlibcurl-multi\fP man page, the
|
||||
\fIlibcurl-share\fP man page and the \fIlibcurl-the-guide\fP document for
|
||||
further reading on how to do programming with libcurl.
|
||||
|
||||
There are a dozen custom bindings that bring libcurl access to your favourite
|
||||
language. Look elsewhere for documentation on those.
|
||||
There exist more than a dozen custom bindings that bring libcurl access to
|
||||
your favourite language. Look elsewhere for documentation on those.
|
||||
|
||||
All applications that use libcurl should call \fIcurl_global_init()\fP exactly
|
||||
once before any libcurl function can be used. After all usage of libcurl is
|
||||
complete, it \fBmust\fP call \fIcurl_global_cleanup()\fP. In between those two
|
||||
calls, you can use libcurl as described below.
|
||||
|
||||
When using libcurl's "easy" interface you init your session and get a handle,
|
||||
which you use as input to the easy interface functions you use. Use
|
||||
\fIcurl_easy_init()\fP to get the handle. There is also the so called "multi"
|
||||
interface, try the \fIlibcurl-multi(3)\fP man page for an overview of that.
|
||||
To transfer files, you always set up an "easy handle" using
|
||||
\fIcurl_easy_init()\fP, but when you want the file(s) transfered you have the
|
||||
option of using the "easy" interface, or the "multi" interface.
|
||||
|
||||
You continue by setting all the options you want in the upcoming transfer,
|
||||
most important among them is the URL itself (you can't transfer anything
|
||||
without a specified URL as you may have figured out yourself). You might want
|
||||
to set some callbacks as well that will be called from the library when data
|
||||
is available etc. \fIcurl_easy_setopt()\fP is there for this.
|
||||
The easy interface is a synchronous interface with which you call
|
||||
\fIcurl_easy_perform\fP and let it perform the transfer. When it is completed,
|
||||
the function return and you can continue. More details are found in the
|
||||
\fIlibcurl-easy\fP man page.
|
||||
|
||||
When all is setup, you tell libcurl to perform the transfer using
|
||||
\fIcurl_easy_perform()\fP. It will then do the entire operation and won't
|
||||
return until it is done (successfully or not).
|
||||
The multi interface on the other hand is an asynchronous interface, that you
|
||||
call and that performs only a little piece of the tranfer on each invoke. It
|
||||
is perfect if you want to do things while the transfer is in progress, or
|
||||
similar. The multi interface allows you to select() on libcurl action, and
|
||||
even to easily download multiple files simultaneously using a single thread.
|
||||
|
||||
After the transfer has been made, you can set new options and make another
|
||||
transfer, or if you're done, cleanup the session by calling
|
||||
\fIcurl_easy_cleanup()\fP. If you want persistant connections, you don't
|
||||
cleanup immediately, but instead run ahead and perform other transfers using
|
||||
the same handle. See the chapter below for Persistant Connections.
|
||||
You can have multiple easy handles share certain data, even if they are used
|
||||
in different threads. This magic is setup using the share interface, as
|
||||
described in the \fIlibcurl-share\fP man page.
|
||||
|
||||
There is also a series of other helpful functions to use. They are:
|
||||
|
||||
@@ -107,14 +106,15 @@ Persistent connections means that libcurl can re-use the same connection for
|
||||
several transfers, if the conditions are right.
|
||||
|
||||
libcurl will *always* attempt to use persistent connections. Whenever you use
|
||||
curl_easy_perform(), libcurl will attempt to use an existing connection to do
|
||||
the transfer, and if none exists it'll open a new one that will be subject for
|
||||
re-use on a possible following call to curl_easy_perform().
|
||||
\fIcurl_easy_perform()\fP or \fIcurl_multi_perform()\fP, libcurl will attempt
|
||||
to use an existing connection to do the transfer, and if none exists it'll
|
||||
open a new one that will be subject for re-use on a possible following call to
|
||||
\fIcurl_easy_perform()\fP or \fIcurl_multi_perform()\fP.
|
||||
|
||||
To allow libcurl to take full advantage of persistent connections, you should
|
||||
do as many of your file transfers as possible using the same curl handle. When
|
||||
you call curl_easy_cleanup(), all the possibly open connections held by
|
||||
you call \fIcurl_easy_cleanup()\fP, all the possibly open connections held by
|
||||
libcurl will be closed and forgotten.
|
||||
|
||||
Note that the options set with curl_easy_setopt() will be used in on every
|
||||
repeat curl_easy_perform() call
|
||||
Note that the options set with \fIcurl_easy_setopt()\fP will be used in on
|
||||
every repeated \fIcurl_easy_perform()\fP call.
|
||||
|
@@ -29,7 +29,7 @@
|
||||
|
||||
/* This is the version number of the libcurl package from which this header
|
||||
file origins: */
|
||||
#define LIBCURL_VERSION "7.10.6"
|
||||
#define LIBCURL_VERSION "7.10.7"
|
||||
|
||||
/* This is the numeric version of the libcurl version number, meant for easier
|
||||
parsing and comparions by programs. The LIBCURL_VERSION_NUM define will
|
||||
@@ -45,7 +45,7 @@
|
||||
always a greater number in a more recent release. It makes comparisons with
|
||||
greater than and less than work.
|
||||
*/
|
||||
#define LIBCURL_VERSION_NUM 0x070a06
|
||||
#define LIBCURL_VERSION_NUM 0x070a07
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
@@ -669,6 +669,14 @@ typedef enum {
|
||||
argument */
|
||||
CINIT(SSL_CTX_DATA, OBJECTPOINT, 109),
|
||||
|
||||
/* FTP Option that causes missing dirs to be created on the remote server */
|
||||
CINIT(FTP_CREATE_MISSING_DIRS, LONG, 110),
|
||||
|
||||
/* Set this to a bitmask value to enable the particular authentications
|
||||
methods you like. Use this in combination with CURLOPT_PROXYUSERPWD.
|
||||
Note that setting multiple bits may cause extra network round-trips. */
|
||||
CINIT(PROXYAUTH, LONG, 111),
|
||||
|
||||
CURLOPT_LASTENTRY /* the last unused */
|
||||
} CURLoption;
|
||||
|
||||
@@ -984,22 +992,17 @@ typedef enum {
|
||||
CURLINFO_REQUEST_SIZE = CURLINFO_LONG + 12,
|
||||
CURLINFO_SSL_VERIFYRESULT = CURLINFO_LONG + 13,
|
||||
CURLINFO_FILETIME = CURLINFO_LONG + 14,
|
||||
|
||||
CURLINFO_CONTENT_LENGTH_DOWNLOAD = CURLINFO_DOUBLE + 15,
|
||||
CURLINFO_CONTENT_LENGTH_UPLOAD = CURLINFO_DOUBLE + 16,
|
||||
|
||||
CURLINFO_STARTTRANSFER_TIME = CURLINFO_DOUBLE + 17,
|
||||
CURLINFO_CONTENT_TYPE = CURLINFO_STRING + 18,
|
||||
CURLINFO_REDIRECT_TIME = CURLINFO_DOUBLE + 19,
|
||||
CURLINFO_REDIRECT_COUNT = CURLINFO_LONG + 20,
|
||||
CURLINFO_PRIVATE = CURLINFO_STRING + 21,
|
||||
CURLINFO_HTTP_CONNECTCODE = CURLINFO_LONG + 22,
|
||||
/* Fill in new entries below here! */
|
||||
|
||||
CURLINFO_CONTENT_TYPE = CURLINFO_STRING + 18,
|
||||
|
||||
CURLINFO_REDIRECT_TIME = CURLINFO_DOUBLE + 19,
|
||||
CURLINFO_REDIRECT_COUNT = CURLINFO_LONG + 20,
|
||||
|
||||
CURLINFO_PRIVATE = CURLINFO_STRING + 21,
|
||||
|
||||
/* Fill in new entries here! */
|
||||
|
||||
CURLINFO_LASTONE = 22
|
||||
CURLINFO_LASTONE = 23
|
||||
} CURLINFO;
|
||||
|
||||
typedef enum {
|
||||
@@ -1117,6 +1120,7 @@ typedef struct {
|
||||
#define CURL_VERSION_NTLM (1<<4)
|
||||
#define CURL_VERSION_GSSNEGOTIATE (1<<5)
|
||||
#define CURL_VERSION_DEBUG (1<<6) /* built with debug capabilities */
|
||||
#define CURL_VERSION_ASYNCHDNS (1<<7)
|
||||
|
||||
/*
|
||||
* NAME curl_version_info()
|
||||
|
@@ -5,17 +5,23 @@
|
||||
AUTOMAKE_OPTIONS = foreign nostdinc
|
||||
|
||||
EXTRA_DIST = getdate.y Makefile.b32 Makefile.b32.resp Makefile.m32 \
|
||||
Makefile.vc6 Makefile.riscos libcurl.def curllib.dsp \
|
||||
Makefile.vc6 Makefile.riscos libcurl.def curllib.dsp \
|
||||
curllib.dsw config-vms.h config-win32.h config-riscos.h config-mac.h \
|
||||
config.h.in ca-bundle.crt README.encoding README.memoryleak \
|
||||
makefile.dj config.dj
|
||||
config.h.in ca-bundle.crt README.encoding README.memoryleak \
|
||||
README.ares makefile.dj config.dj
|
||||
|
||||
lib_LTLIBRARIES = libcurl.la
|
||||
|
||||
if ARES
|
||||
ARESINC = -I$(top_srcdir)/ares
|
||||
endif
|
||||
|
||||
# we use srcdir/include for the static global include files
|
||||
# we use builddir/lib for the generated lib/config.h file to get found
|
||||
# we use srcdir/lib for the lib-private header files
|
||||
INCLUDES = -I$(top_srcdir)/include -I$(top_builddir)/lib -I$(top_srcdir)/lib
|
||||
INCLUDES = -I$(top_srcdir)/include -I$(top_builddir)/lib -I$(top_srcdir)/lib $(ARESINC)
|
||||
|
||||
LDFLAGS += -L$(top_srcdir)/lib
|
||||
|
||||
VERSION=-version-info 2:2:0
|
||||
|
||||
@@ -48,15 +54,18 @@ VERSION=-version-info 2:2:0
|
||||
#
|
||||
|
||||
if NO_UNDEFINED
|
||||
# The -no-undefined flag is CRUCIAL for this to build fine on Cygwin. If we
|
||||
# find a case in which we need to remove this flag, we should most likely
|
||||
# write a configure check that detects when this flag is needed and when its
|
||||
# not.
|
||||
libcurl_la_LDFLAGS = -no-undefined $(VERSION)
|
||||
# The -no-undefined flag is CRUCIAL for this to build fine on Cygwin.
|
||||
UNDEF = -no-undefined
|
||||
else
|
||||
libcurl_la_LDFLAGS = $(VERSION)
|
||||
UNDEF =
|
||||
endif
|
||||
|
||||
if ARES
|
||||
ARESLIB = -lares -L$(top_builddir)/ares
|
||||
endif
|
||||
|
||||
libcurl_la_LDFLAGS = $(UNDEF) $(VERSION) $(ARESLIB)
|
||||
|
||||
libcurl_la_SOURCES = arpa_telnet.h file.c getpass.h netrc.h timeval.c \
|
||||
base64.c file.h hostip.c progress.c timeval.h base64.h formdata.c \
|
||||
hostip.h progress.h cookie.c formdata.h http.c sendf.c cookie.h ftp.c \
|
||||
@@ -82,7 +91,7 @@ $(srcdir)/getdate.c: getdate.y
|
||||
mv -f y.tab.c getdate.c
|
||||
|
||||
$(srcdir)/ca-bundle.h: Makefile.in Makefile
|
||||
cd $(srcdir) && \
|
||||
chmod 0644 $@
|
||||
echo "/* The file is generated automaticly */" > $@
|
||||
if CABUNDLE
|
||||
echo '#define CURL_CA_BUNDLE @CURL_CA_BUNDLE@' >> $@
|
||||
@@ -99,4 +108,5 @@ install-data-hook:
|
||||
# this hook is mainly for non-unix systems to build even if configure
|
||||
# isn't run
|
||||
dist-hook:
|
||||
chmod 0644 $(distdir)/ca-bundle.h
|
||||
echo "/* ca bundle path set in here*/" > $(distdir)/ca-bundle.h
|
||||
|
@@ -204,6 +204,8 @@ X_OBJS= \
|
||||
$(DIROBJ)\share.obj \
|
||||
$(DIROBJ)\multi.obj \
|
||||
$(DIROBJ)\http_digest.obj \
|
||||
$(DIROBJ)\http_negotiate.obj \
|
||||
$(DIROBJ)\http_ntlm.obj \
|
||||
$(DIROBJ)\md5.obj
|
||||
|
||||
all : $(TARGET)
|
||||
@@ -226,6 +228,3 @@ clean:
|
||||
-@erase $(DIROBJ)\*.obj
|
||||
-@erase vc60.idb
|
||||
-@erase vc60.pch
|
||||
|
||||
getdate.c: getdate.c.cvs
|
||||
copy getdate.c.cvs getdate.c
|
||||
|
44
lib/README.ares
Normal file
44
lib/README.ares
Normal file
@@ -0,0 +1,44 @@
|
||||
$Id$
|
||||
_ _ ____ _
|
||||
___| | | | _ \| |
|
||||
/ __| | | | |_) | |
|
||||
| (__| |_| | _ <| |___
|
||||
\___|\___/|_| \_\_____|
|
||||
|
||||
How To Build libcurl to use ares for asynch name resolves
|
||||
=========================================================
|
||||
|
||||
ares:
|
||||
ftp://athena-dist.mit.edu/pub/ATHENA/ares/ares-1.1.1.tar.gz
|
||||
http://curl.haxx.se/dev/ares-1.1.1.tar.gz
|
||||
|
||||
ares patch:
|
||||
http://curl.haxx.se/dev/ares2.diff
|
||||
|
||||
Mac OS X quirk:
|
||||
ares 1.1.1 contains too old versions of config.guess and config.sub. Copy
|
||||
the ones from the curl source tree in to the ares source tree before you
|
||||
run configure.
|
||||
|
||||
Build ares
|
||||
==========
|
||||
|
||||
1. unpack the ares-1.1.1 archive
|
||||
2. apply patch (if you're on Mac OS X or windows)
|
||||
3. ./configure
|
||||
4. make
|
||||
|
||||
Build libcurl to use ares
|
||||
=========================
|
||||
|
||||
1. Move the ares source/build tree to subdirectory in the curl root named
|
||||
'ares'.
|
||||
2. ./buildconf
|
||||
3. ./configure --enable-ares
|
||||
4. make
|
||||
|
||||
If the configure script detects IPv6 support), you need to explicitly disable
|
||||
that (--disable-ipv6) since ares isn't IPv6 compatible (yet).
|
||||
|
||||
Please let me know how it builds, runs, works or whatever. I had to do some
|
||||
fairly big changes in some code parts to get this to work.
|
@@ -70,6 +70,7 @@
|
||||
#define EINPROGRESS WSAEINPROGRESS
|
||||
#define EWOULDBLOCK WSAEWOULDBLOCK
|
||||
#define EISCONN WSAEISCONN
|
||||
#define ENOTSOCK WSAENOTSOCK
|
||||
#endif
|
||||
|
||||
#include "urldata.h"
|
||||
@@ -190,11 +191,6 @@ int waitconnect(int sockfd, /* socket */
|
||||
static CURLcode bindlocal(struct connectdata *conn,
|
||||
int sockfd)
|
||||
{
|
||||
#if !defined(WIN32)||defined(__CYGWIN32__)
|
||||
/* We don't generally like checking for OS-versions, we should make this
|
||||
HAVE_XXXX based, although at the moment I don't have a decent test for
|
||||
this! */
|
||||
|
||||
#ifdef HAVE_INET_NTOA
|
||||
|
||||
struct SessionHandle *data = conn->data;
|
||||
@@ -208,6 +204,7 @@ static CURLcode bindlocal(struct connectdata *conn,
|
||||
size_t size;
|
||||
char myhost[256] = "";
|
||||
in_addr_t in;
|
||||
int rc;
|
||||
|
||||
/* First check if the given name is an IP address */
|
||||
in=inet_addr(data->set.device);
|
||||
@@ -217,7 +214,10 @@ static CURLcode bindlocal(struct connectdata *conn,
|
||||
/*
|
||||
* We now have the numerical IPv4-style x.y.z.w in the 'myhost' buffer
|
||||
*/
|
||||
h = Curl_resolv(data, myhost, 0);
|
||||
rc = Curl_resolv(conn, myhost, 0, &h);
|
||||
if(rc == 1)
|
||||
rc = Curl_wait_for_resolv(conn, &h);
|
||||
|
||||
}
|
||||
else {
|
||||
if(strlen(data->set.device)>1) {
|
||||
@@ -225,11 +225,14 @@ static CURLcode bindlocal(struct connectdata *conn,
|
||||
* This was not an interface, resolve the name as a host name
|
||||
* or IP number
|
||||
*/
|
||||
h = Curl_resolv(data, data->set.device, 0);
|
||||
if(h) {
|
||||
rc = Curl_resolv(conn, data->set.device, 0, &h);
|
||||
if(rc == 1)
|
||||
rc = Curl_wait_for_resolv(conn, &h);
|
||||
|
||||
if(h)
|
||||
/* we know data->set.device is shorter than the myhost array */
|
||||
strcpy(myhost, data->set.device);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -335,7 +338,6 @@ static CURLcode bindlocal(struct connectdata *conn,
|
||||
|
||||
} /* end of device selection support */
|
||||
#endif /* end of HAVE_INET_NTOA */
|
||||
#endif /* end of not WIN32 */
|
||||
|
||||
return CURLE_HTTP_PORT_FAILED;
|
||||
}
|
||||
|
66
lib/cookie.c
66
lib/cookie.c
@@ -86,10 +86,12 @@ Example set of cookies:
|
||||
#include <string.h>
|
||||
#include <ctype.h>
|
||||
|
||||
#include "urldata.h"
|
||||
#include "cookie.h"
|
||||
#include "getdate.h"
|
||||
#include "strequal.h"
|
||||
#include "strtok.h"
|
||||
#include "sendf.h"
|
||||
|
||||
/* The last #include file should be: */
|
||||
#ifdef CURLDEBUG
|
||||
@@ -131,7 +133,12 @@ static bool tailmatch(const char *little, const char *bigone)
|
||||
***************************************************************************/
|
||||
|
||||
struct Cookie *
|
||||
Curl_cookie_add(struct CookieInfo *c,
|
||||
Curl_cookie_add(struct SessionHandle *data,
|
||||
/* The 'data' pointer here may be NULL at times, and thus
|
||||
must only be used very carefully for things that can deal
|
||||
with data being NULL. Such as infof() and similar */
|
||||
|
||||
struct CookieInfo *c,
|
||||
bool httpheader, /* TRUE if HTTP header-style line */
|
||||
char *lineptr, /* first character of the line */
|
||||
char *domain, /* default domain */
|
||||
@@ -234,10 +241,18 @@ Curl_cookie_add(struct CookieInfo *c,
|
||||
break;
|
||||
}
|
||||
}
|
||||
if(dotcount < 3) {
|
||||
/* The original Netscape cookie spec defined that this domain name
|
||||
MUST have three dots (or two if one of the seven holy TLDs),
|
||||
but it seems that these kinds of cookies are in use "out there"
|
||||
so we cannot be that strict. I've therefore lowered the check
|
||||
to not allow less than two dots. */
|
||||
|
||||
if(dotcount < 2) {
|
||||
/* Received and skipped a cookie with a domain using too few
|
||||
dots. */
|
||||
badcookie=TRUE; /* mark this as a bad cookie */
|
||||
infof(data, "skipped cookie with illegal dotcount domain: %s",
|
||||
whatptr);
|
||||
}
|
||||
else {
|
||||
/* Now, we make sure that our host is within the given domain,
|
||||
@@ -256,6 +271,8 @@ Curl_cookie_add(struct CookieInfo *c,
|
||||
is not a domain to which the current host belongs. Mark as
|
||||
bad. */
|
||||
badcookie=TRUE;
|
||||
infof(data, "skipped cookie with bad tailmatch domain: %s",
|
||||
whatptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -431,13 +448,16 @@ Curl_cookie_add(struct CookieInfo *c,
|
||||
}
|
||||
}
|
||||
|
||||
if(7 != fields) {
|
||||
if(6 == fields) {
|
||||
/* we got a cookie with blank contents, fix it */
|
||||
co->value = strdup("");
|
||||
}
|
||||
else if(7 != fields) {
|
||||
/* we did not find the sufficient number of fields to recognize this
|
||||
as a valid line, abort and go home */
|
||||
free_cookiemess(co);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if(!c->running && /* read from a file */
|
||||
@@ -542,6 +562,12 @@ Curl_cookie_add(struct CookieInfo *c,
|
||||
clist = clist->next;
|
||||
}
|
||||
|
||||
if(c->running)
|
||||
/* Only show this when NOT reading the cookies from a file */
|
||||
infof(data, "%s cookie %s=\"%s\" for domain %s, path %s, expire %d\n",
|
||||
replace_old?"Replaced":"Added", co->name, co->value,
|
||||
co->domain, co->path, co->expires);
|
||||
|
||||
if(!replace_old) {
|
||||
/* then make the last item point on this new one */
|
||||
if(lastc)
|
||||
@@ -564,7 +590,8 @@ Curl_cookie_add(struct CookieInfo *c,
|
||||
* If 'newsession' is TRUE, discard all "session cookies" on read from file.
|
||||
*
|
||||
****************************************************************************/
|
||||
struct CookieInfo *Curl_cookie_init(char *file,
|
||||
struct CookieInfo *Curl_cookie_init(struct SessionHandle *data,
|
||||
char *file,
|
||||
struct CookieInfo *inc,
|
||||
bool newsession)
|
||||
{
|
||||
@@ -612,7 +639,7 @@ struct CookieInfo *Curl_cookie_init(char *file,
|
||||
while(*lineptr && isspace((int)*lineptr))
|
||||
lineptr++;
|
||||
|
||||
Curl_cookie_add(c, headerline, lineptr, NULL, NULL);
|
||||
Curl_cookie_add(data, c, headerline, lineptr, NULL, NULL);
|
||||
}
|
||||
if(fromfile)
|
||||
fclose(fp);
|
||||
@@ -821,31 +848,4 @@ int Curl_cookie_output(struct CookieInfo *c, char *dumphere)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CURL_COOKIE_DEBUG
|
||||
|
||||
/*
|
||||
* On my Solaris box, this command line builds this test program:
|
||||
*
|
||||
* gcc -g -o cooktest -DCURL_COOKIE_DEBUG -DHAVE_CONFIG_H -I.. -I../include cookie.c strequal.o getdate.o memdebug.o mprintf.o strtok.o -lnsl -lsocket
|
||||
*
|
||||
*/
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
struct CookieInfo *c=NULL;
|
||||
if(argc>1) {
|
||||
c = Curl_cookie_init(argv[1], c);
|
||||
Curl_cookie_add(c, TRUE, "PERSONALIZE=none;expires=Monday, 13-Jun-1988 03:04:55 GMT; domain=.fidelity.com; path=/ftgw; secure");
|
||||
Curl_cookie_add(c, TRUE, "foobar=yes; domain=.haxx.se; path=/looser;");
|
||||
c = Curl_cookie_init(argv[1], c);
|
||||
|
||||
Curl_cookie_output(c);
|
||||
Curl_cookie_cleanup(c);
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* CURL_DISABLE_HTTP */
|
||||
|
@@ -68,14 +68,18 @@ struct CookieInfo {
|
||||
#define MAX_NAME 256
|
||||
#define MAX_NAME_TXT "255"
|
||||
|
||||
struct SessionHandle;
|
||||
/*
|
||||
* Add a cookie to the internal list of cookies. The domain and path arguments
|
||||
* are only used if the header boolean is TRUE.
|
||||
*/
|
||||
struct Cookie *Curl_cookie_add(struct CookieInfo *, bool header, char *line,
|
||||
|
||||
struct Cookie *Curl_cookie_add(struct SessionHandle *data,
|
||||
struct CookieInfo *, bool header, char *line,
|
||||
char *domain, char *path);
|
||||
|
||||
struct CookieInfo *Curl_cookie_init(char *, struct CookieInfo *, bool);
|
||||
struct CookieInfo *Curl_cookie_init(struct SessionHandle *data,
|
||||
char *, struct CookieInfo *, bool);
|
||||
struct Cookie *Curl_cookie_getlist(struct CookieInfo *, char *, char *, bool);
|
||||
void Curl_cookie_freelist(struct Cookie *);
|
||||
void Curl_cookie_cleanup(struct CookieInfo *);
|
||||
|
36
lib/easy.c
36
lib/easy.c
@@ -76,6 +76,7 @@
|
||||
#include "url.h"
|
||||
#include "getinfo.h"
|
||||
#include "hostip.h"
|
||||
#include "share.h"
|
||||
|
||||
#define _MPRINTF_REPLACE /* use our functions only */
|
||||
#include <curl/mprintf.h>
|
||||
@@ -233,15 +234,25 @@ CURLcode curl_easy_perform(CURL *curl)
|
||||
{
|
||||
struct SessionHandle *data = (struct SessionHandle *)curl;
|
||||
|
||||
if (Curl_global_host_cache_use(data) && data->hostcache != Curl_global_host_cache_get()) {
|
||||
if (data->hostcache) {
|
||||
Curl_hash_destroy(data->hostcache);
|
||||
}
|
||||
data->hostcache = Curl_global_host_cache_get();
|
||||
}
|
||||
if ( ! (data->share && data->share->hostcache) ) {
|
||||
|
||||
if (!data->hostcache) {
|
||||
data->hostcache = Curl_hash_alloc(7, Curl_freednsinfo);
|
||||
if (Curl_global_host_cache_use(data) &&
|
||||
data->hostcache != Curl_global_host_cache_get()) {
|
||||
if (data->hostcache)
|
||||
Curl_hash_destroy(data->hostcache);
|
||||
data->hostcache = Curl_global_host_cache_get();
|
||||
}
|
||||
|
||||
if (!data->hostcache) {
|
||||
data->hostcache = Curl_hash_alloc(7, Curl_freednsinfo);
|
||||
|
||||
if(!data->hostcache)
|
||||
/* While we possibly could survive and do good without a host cache,
|
||||
the fact that creating it failed indicates that things are truly
|
||||
screwed up and we should bail out! */
|
||||
return CURLE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return Curl_perform(data);
|
||||
@@ -250,8 +261,10 @@ CURLcode curl_easy_perform(CURL *curl)
|
||||
void curl_easy_cleanup(CURL *curl)
|
||||
{
|
||||
struct SessionHandle *data = (struct SessionHandle *)curl;
|
||||
if (!Curl_global_host_cache_use(data)) {
|
||||
Curl_hash_destroy(data->hostcache);
|
||||
if ( ! (data->share && data->share->hostcache) ) {
|
||||
if ( !Curl_global_host_cache_use(data)) {
|
||||
Curl_hash_destroy(data->hostcache);
|
||||
}
|
||||
}
|
||||
Curl_close(data);
|
||||
}
|
||||
@@ -313,7 +326,8 @@ CURL *curl_easy_duphandle(CURL *incurl)
|
||||
if(data->cookies)
|
||||
/* If cookies are enabled in the parent handle, we enable them
|
||||
in the clone as well! */
|
||||
outcurl->cookies = Curl_cookie_init(data->cookies->filename,
|
||||
outcurl->cookies = Curl_cookie_init(data,
|
||||
data->cookies->filename,
|
||||
outcurl->cookies,
|
||||
data->set.cookiesession);
|
||||
|
||||
|
30
lib/file.c
30
lib/file.c
@@ -94,11 +94,12 @@
|
||||
/* Emulate a connect-then-transfer protocol. We connect to the file here */
|
||||
CURLcode Curl_file_connect(struct connectdata *conn)
|
||||
{
|
||||
char *actual_path = curl_unescape(conn->path, 0);
|
||||
char *real_path = curl_unescape(conn->path, 0);
|
||||
struct FILE *file;
|
||||
int fd;
|
||||
#if defined(WIN32) || defined(__EMX__)
|
||||
int i;
|
||||
char *actual_path;
|
||||
#endif
|
||||
|
||||
file = (struct FILE *)malloc(sizeof(struct FILE));
|
||||
@@ -109,6 +110,29 @@ CURLcode Curl_file_connect(struct connectdata *conn)
|
||||
conn->proto.file = file;
|
||||
|
||||
#if defined(WIN32) || defined(__EMX__)
|
||||
/* If the first character is a slash, and there's
|
||||
something that looks like a drive at the beginning of
|
||||
the path, skip the slash. If we remove the initial
|
||||
slash in all cases, paths without drive letters end up
|
||||
relative to the current directory which isn't how
|
||||
browsers work.
|
||||
|
||||
Some browsers accept | instead of : as the drive letter
|
||||
separator, so we do too.
|
||||
|
||||
On other platforms, we need the slash to indicate an
|
||||
absolute pathname. On Windows, absolute paths start
|
||||
with a drive letter.
|
||||
*/
|
||||
actual_path = real_path;
|
||||
if ((actual_path[0] == '/') &&
|
||||
actual_path[1] &&
|
||||
(actual_path[2] == ':' || actual_path[2] == '|'))
|
||||
{
|
||||
actual_path[2] = ':';
|
||||
actual_path++;
|
||||
}
|
||||
|
||||
/* change path separators from '/' to '\\' for Windows and OS/2 */
|
||||
for (i=0; actual_path[i] != '\0'; ++i)
|
||||
if (actual_path[i] == '/')
|
||||
@@ -116,9 +140,9 @@ CURLcode Curl_file_connect(struct connectdata *conn)
|
||||
|
||||
fd = open(actual_path, O_RDONLY | O_BINARY); /* no CR/LF translation! */
|
||||
#else
|
||||
fd = open(actual_path, O_RDONLY);
|
||||
fd = open(real_path, O_RDONLY);
|
||||
#endif
|
||||
free(actual_path);
|
||||
free(real_path);
|
||||
|
||||
if(fd == -1) {
|
||||
failf(conn->data, "Couldn't open file %s", conn->path);
|
||||
|
292
lib/ftp.c
292
lib/ftp.c
@@ -99,12 +99,24 @@
|
||||
#endif
|
||||
|
||||
/* Local API functions */
|
||||
static CURLcode ftp_sendquote(struct connectdata *conn, struct curl_slist *quote);
|
||||
static CURLcode ftp_sendquote(struct connectdata *conn,
|
||||
struct curl_slist *quote);
|
||||
static CURLcode ftp_cwd(struct connectdata *conn, char *path);
|
||||
static CURLcode ftp_mkd(struct connectdata *conn, char *path);
|
||||
static CURLcode cwd_and_mkd(struct connectdata *conn, char *path);
|
||||
|
||||
/* easy-to-use macro: */
|
||||
#define FTPSENDF(x,y,z) if((result = Curl_ftpsendf(x,y,z))) return result
|
||||
|
||||
static void freedirs(struct FTP *ftp)
|
||||
{
|
||||
int i;
|
||||
for (i=0; ftp->dirs[i]; i++){
|
||||
free(ftp->dirs[i]);
|
||||
ftp->dirs[i]=NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/***********************************************************************
|
||||
*
|
||||
* AllowServerConnect()
|
||||
@@ -598,6 +610,14 @@ CURLcode Curl_ftp_done(struct connectdata *conn)
|
||||
int ftpcode;
|
||||
CURLcode result=CURLE_OK;
|
||||
|
||||
/* free the dir tree parts */
|
||||
freedirs(ftp);
|
||||
|
||||
if(ftp->file) {
|
||||
free(ftp->file);
|
||||
ftp->file = NULL;
|
||||
}
|
||||
|
||||
if(data->set.upload) {
|
||||
if((-1 != data->set.infilesize) &&
|
||||
(data->set.infilesize != *ftp->bytecountp) &&
|
||||
@@ -709,36 +729,6 @@ CURLcode ftp_sendquote(struct connectdata *conn, struct curl_slist *quote)
|
||||
return CURLE_OK;
|
||||
}
|
||||
|
||||
/***********************************************************************
|
||||
*
|
||||
* ftp_cwd()
|
||||
*
|
||||
* Send 'CWD' to the remote server to Change Working Directory.
|
||||
* It is the ftp version of the unix 'cd' command.
|
||||
*/
|
||||
static
|
||||
CURLcode ftp_cwd(struct connectdata *conn, char *path)
|
||||
{
|
||||
ssize_t nread;
|
||||
int ftpcode;
|
||||
CURLcode result;
|
||||
|
||||
FTPSENDF(conn, "CWD %s", path);
|
||||
result = Curl_GetFTPResponse(&nread, conn, &ftpcode);
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
/* According to RFC959, CWD is supposed to return 250 on success, but
|
||||
there seem to be non-compliant FTP servers out there that return 200,
|
||||
so we accept any '2xy' code here. */
|
||||
if (ftpcode/100 != 2) {
|
||||
failf(conn->data, "Couldn't cd to %s", path);
|
||||
return CURLE_FTP_ACCESS_DENIED;
|
||||
}
|
||||
|
||||
return CURLE_OK;
|
||||
}
|
||||
|
||||
/***********************************************************************
|
||||
*
|
||||
* ftp_getfiletime()
|
||||
@@ -1214,18 +1204,24 @@ CURLcode ftp_use_port(struct connectdata *conn)
|
||||
|
||||
if(data->set.ftpport) {
|
||||
in_addr_t in;
|
||||
int rc;
|
||||
|
||||
/* First check if the given name is an IP address */
|
||||
in=inet_addr(data->set.ftpport);
|
||||
|
||||
if((in == CURL_INADDR_NONE) &&
|
||||
Curl_if2ip(data->set.ftpport, myhost, sizeof(myhost))) {
|
||||
h = Curl_resolv(data, myhost, 0);
|
||||
rc = Curl_resolv(conn, myhost, 0, &h);
|
||||
if(rc == 1)
|
||||
rc = Curl_wait_for_resolv(conn, &h);
|
||||
}
|
||||
else {
|
||||
int len = strlen(data->set.ftpport);
|
||||
if(len>1)
|
||||
h = Curl_resolv(data, data->set.ftpport, 0);
|
||||
if(len>1) {
|
||||
rc = Curl_resolv(conn, data->set.ftpport, 0, &h);
|
||||
if(rc == 1)
|
||||
rc = Curl_wait_for_resolv(conn, &h);
|
||||
}
|
||||
if(h)
|
||||
strcpy(myhost, data->set.ftpport); /* buffer overflow risk */
|
||||
}
|
||||
@@ -1364,6 +1360,7 @@ CURLcode ftp_use_pasv(struct connectdata *conn,
|
||||
CURLcode result;
|
||||
struct Curl_dns_entry *addr=NULL;
|
||||
Curl_ipconnect *conninfo;
|
||||
int rc;
|
||||
|
||||
/*
|
||||
Here's the excecutive summary on what to do:
|
||||
@@ -1488,14 +1485,20 @@ CURLcode ftp_use_pasv(struct connectdata *conn,
|
||||
* We don't want to rely on a former host lookup that might've expired
|
||||
* now, instead we remake the lookup here and now!
|
||||
*/
|
||||
addr = Curl_resolv(data, conn->proxyhost, conn->port);
|
||||
rc = Curl_resolv(conn, conn->proxyhost, conn->port, &addr);
|
||||
if(rc == 1)
|
||||
rc = Curl_wait_for_resolv(conn, &addr);
|
||||
|
||||
connectport =
|
||||
(unsigned short)conn->port; /* we connect to the proxy's port */
|
||||
|
||||
}
|
||||
else {
|
||||
/* normal, direct, ftp connection */
|
||||
addr = Curl_resolv(data, newhostp, newport);
|
||||
rc = Curl_resolv(conn, newhostp, newport, &addr);
|
||||
if(rc == 1)
|
||||
rc = Curl_wait_for_resolv(conn, &addr);
|
||||
|
||||
if(!addr) {
|
||||
failf(data, "Can't resolve new host %s:%d", newhostp, newport);
|
||||
return CURLE_FTP_CANT_GET_HOST;
|
||||
@@ -1754,7 +1757,7 @@ CURLcode Curl_ftp_nextconnect(struct connectdata *conn)
|
||||
if(result)
|
||||
return result;
|
||||
|
||||
/* Send any PREQUOTE strings after transfer type is set? (Wesley Laxton)*/
|
||||
/* Send any PREQUOTE strings after transfer type is set? */
|
||||
if(data->set.prequote) {
|
||||
if ((result = ftp_sendquote(conn, data->set.prequote)) != CURLE_OK)
|
||||
return result;
|
||||
@@ -1973,20 +1976,21 @@ CURLcode ftp_perform(struct connectdata *conn,
|
||||
if ((result = ftp_sendquote(conn, data->set.quote)) != CURLE_OK)
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
/* This is a re-used connection. Since we change directory to where the
|
||||
transfer is taking place, we must now get back to the original dir
|
||||
where we ended up after login: */
|
||||
if (conn->bits.reuse && ftp->entrypath) {
|
||||
if ((result = ftp_cwd(conn, ftp->entrypath)) != CURLE_OK)
|
||||
if ((result = cwd_and_mkd(conn, ftp->entrypath)) != CURLE_OK)
|
||||
return result;
|
||||
}
|
||||
|
||||
{
|
||||
int i; /* counter for loop */
|
||||
for (i=0; ftp->dirs[i]; i++) {
|
||||
/* RFC 1738 says empty components should be respected too */
|
||||
if ((result = ftp_cwd(conn, ftp->dirs[i])) != CURLE_OK)
|
||||
/* RFC 1738 says empty components should be respected too, but
|
||||
that is plain stupid since CWD can't be used with an empty argument */
|
||||
if ((result = cwd_and_mkd(conn, ftp->dirs[i])) != CURLE_OK)
|
||||
return result;
|
||||
}
|
||||
}
|
||||
@@ -1995,33 +1999,38 @@ CURLcode ftp_perform(struct connectdata *conn,
|
||||
if((data->set.get_filetime || data->set.timecondition) &&
|
||||
ftp->file) {
|
||||
result = ftp_getfiletime(conn, ftp->file);
|
||||
if(result)
|
||||
return result;
|
||||
|
||||
if(data->set.timecondition) {
|
||||
if((data->info.filetime > 0) && (data->set.timevalue > 0)) {
|
||||
switch(data->set.timecondition) {
|
||||
case TIMECOND_IFMODSINCE:
|
||||
default:
|
||||
if(data->info.filetime < data->set.timevalue) {
|
||||
infof(data, "The requested document is not new enough\n");
|
||||
ftp->no_transfer = TRUE; /* mark this to not transfer data */
|
||||
return CURLE_OK;
|
||||
switch( result )
|
||||
{
|
||||
case CURLE_FTP_COULDNT_RETR_FILE:
|
||||
case CURLE_OK:
|
||||
if(data->set.timecondition) {
|
||||
if((data->info.filetime > 0) && (data->set.timevalue > 0)) {
|
||||
switch(data->set.timecondition) {
|
||||
case TIMECOND_IFMODSINCE:
|
||||
default:
|
||||
if(data->info.filetime < data->set.timevalue) {
|
||||
infof(data, "The requested document is not new enough\n");
|
||||
ftp->no_transfer = TRUE; /* mark this to not transfer data */
|
||||
return CURLE_OK;
|
||||
}
|
||||
break;
|
||||
case TIMECOND_IFUNMODSINCE:
|
||||
if(data->info.filetime > data->set.timevalue) {
|
||||
infof(data, "The requested document is not old enough\n");
|
||||
ftp->no_transfer = TRUE; /* mark this to not transfer data */
|
||||
return CURLE_OK;
|
||||
}
|
||||
break;
|
||||
} /* switch */
|
||||
}
|
||||
break;
|
||||
case TIMECOND_IFUNMODSINCE:
|
||||
if(data->info.filetime > data->set.timevalue) {
|
||||
infof(data, "The requested document is not old enough\n");
|
||||
ftp->no_transfer = TRUE; /* mark this to not transfer data */
|
||||
return CURLE_OK;
|
||||
else {
|
||||
infof(data, "Skipping time comparison\n");
|
||||
}
|
||||
break;
|
||||
} /* switch */
|
||||
}
|
||||
else {
|
||||
infof(data, "Skipping time comparison\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return result;
|
||||
} /* switch */
|
||||
}
|
||||
|
||||
/* If we have selected NOBODY and HEADER, it means that we only want file
|
||||
@@ -2032,6 +2041,8 @@ CURLcode ftp_perform(struct connectdata *conn,
|
||||
may not support it! It is however the only way we have to get a file's
|
||||
size! */
|
||||
ssize_t filesize;
|
||||
ssize_t nread;
|
||||
int ftpcode;
|
||||
|
||||
ftp->no_transfer = TRUE; /* this means no actual transfer is made */
|
||||
|
||||
@@ -2051,6 +2062,18 @@ CURLcode ftp_perform(struct connectdata *conn,
|
||||
return result;
|
||||
}
|
||||
|
||||
/* Determine if server can respond to REST command and therefore
|
||||
whether it can do a range */
|
||||
FTPSENDF(conn, "REST 0", NULL);
|
||||
result = Curl_GetFTPResponse(&nread, conn, &ftpcode);
|
||||
|
||||
if ((CURLE_OK == result) && (ftpcode == 350)) {
|
||||
result = Curl_client_write(data, CLIENTWRITE_BOTH,
|
||||
(char *)"Accept-ranges: bytes\r\n", 0);
|
||||
if(result)
|
||||
return result;
|
||||
}
|
||||
|
||||
/* If we asked for a time of the file and we actually got one as
|
||||
well, we "emulate" a HTTP-style header in our output. */
|
||||
|
||||
@@ -2059,9 +2082,9 @@ CURLcode ftp_perform(struct connectdata *conn,
|
||||
struct tm *tm;
|
||||
#ifdef HAVE_LOCALTIME_R
|
||||
struct tm buffer;
|
||||
tm = (struct tm *)localtime_r(&data->info.filetime, &buffer);
|
||||
tm = (struct tm *)localtime_r((time_t *)&data->info.filetime, &buffer);
|
||||
#else
|
||||
tm = localtime(&data->info.filetime);
|
||||
tm = localtime((time_t *)&data->info.filetime);
|
||||
#endif
|
||||
/* format: "Tue, 15 Nov 1994 12:45:26 GMT" */
|
||||
strftime(buf, BUFSIZE-1, "Last-Modified: %a, %d %b %Y %H:%M:%S GMT\r\n",
|
||||
@@ -2091,7 +2114,7 @@ CURLcode ftp_perform(struct connectdata *conn,
|
||||
else {
|
||||
/* We have chosen (this is default) to use the PASV command */
|
||||
result = ftp_use_pasv(conn, connected);
|
||||
if(connected)
|
||||
if(!result && *connected)
|
||||
infof(data, "Connected the data stream with PASV!\n");
|
||||
}
|
||||
|
||||
@@ -2147,7 +2170,8 @@ CURLcode Curl_ftp(struct connectdata *conn)
|
||||
|
||||
if (!ftp->dirs[path_part]) { /* run out of memory ... */
|
||||
failf(data, "no memory");
|
||||
retcode = CURLE_OUT_OF_MEMORY;
|
||||
freedirs(ftp);
|
||||
return CURLE_OUT_OF_MEMORY;
|
||||
}
|
||||
}
|
||||
else {
|
||||
@@ -2161,17 +2185,10 @@ CURLcode Curl_ftp(struct connectdata *conn)
|
||||
/* too deep, we need the last entry to be kept NULL at all
|
||||
times to signal end of list */
|
||||
failf(data, "too deep dir hierarchy");
|
||||
retcode = CURLE_URL_MALFORMAT;
|
||||
freedirs(ftp);
|
||||
return CURLE_URL_MALFORMAT;
|
||||
}
|
||||
}
|
||||
if (retcode) {
|
||||
int i;
|
||||
for (i=0;i<path_part;i++) { /* free previous parts */
|
||||
free(ftp->dirs[i]);
|
||||
ftp->dirs[i]=NULL;
|
||||
}
|
||||
return retcode; /* failure */
|
||||
}
|
||||
}
|
||||
|
||||
ftp->file = cur_pos; /* the rest is the file name */
|
||||
@@ -2179,11 +2196,7 @@ CURLcode Curl_ftp(struct connectdata *conn)
|
||||
if(*ftp->file) {
|
||||
ftp->file = curl_unescape(ftp->file, 0);
|
||||
if(NULL == ftp->file) {
|
||||
int i;
|
||||
for (i=0;i<path_part;i++){
|
||||
free(ftp->dirs[i]);
|
||||
ftp->dirs[i]=NULL;
|
||||
}
|
||||
freedirs(ftp);
|
||||
failf(data, "no memory");
|
||||
return CURLE_OUT_OF_MEMORY;
|
||||
}
|
||||
@@ -2274,24 +2287,117 @@ CURLcode Curl_ftpsendf(struct connectdata *conn,
|
||||
CURLcode Curl_ftp_disconnect(struct connectdata *conn)
|
||||
{
|
||||
struct FTP *ftp= conn->proto.ftp;
|
||||
int i;
|
||||
|
||||
/* The FTP session may or may not have been allocated/setup at this point! */
|
||||
if(ftp) {
|
||||
if(ftp->entrypath)
|
||||
free(ftp->entrypath);
|
||||
if(ftp->cache)
|
||||
if(ftp->cache) {
|
||||
free(ftp->cache);
|
||||
if(ftp->file)
|
||||
free(ftp->file);
|
||||
for (i=0;ftp->dirs[i];i++){
|
||||
free(ftp->dirs[i]);
|
||||
ftp->dirs[i]=NULL;
|
||||
ftp->cache = NULL;
|
||||
}
|
||||
|
||||
ftp->file = NULL; /* zero */
|
||||
if(ftp->file) {
|
||||
free(ftp->file);
|
||||
ftp->file = NULL; /* zero */
|
||||
}
|
||||
freedirs(ftp);
|
||||
}
|
||||
return CURLE_OK;
|
||||
}
|
||||
|
||||
/***********************************************************************
|
||||
*
|
||||
* ftp_mkd()
|
||||
*
|
||||
* Makes a directory on the FTP server.
|
||||
*
|
||||
* Calls failf()
|
||||
*/
|
||||
CURLcode ftp_mkd(struct connectdata *conn, char *path)
|
||||
{
|
||||
CURLcode result=CURLE_OK;
|
||||
int ftpcode; /* for ftp status */
|
||||
ssize_t nread;
|
||||
|
||||
/* Create a directory on the remote server */
|
||||
FTPSENDF(conn, "MKD %s", path);
|
||||
|
||||
result = Curl_GetFTPResponse(&nread, conn, &ftpcode);
|
||||
if(result)
|
||||
return result;
|
||||
|
||||
switch(ftpcode) {
|
||||
case 257:
|
||||
/* success! */
|
||||
infof( conn->data , "Created remote directory %s\n" , path );
|
||||
break;
|
||||
case 550:
|
||||
failf(conn->data, "Permission denied to make directory %s", path);
|
||||
result = CURLE_FTP_ACCESS_DENIED;
|
||||
break;
|
||||
default:
|
||||
failf(conn->data, "unrecognized MKD response: %d", ftpcode );
|
||||
result = CURLE_FTP_ACCESS_DENIED;
|
||||
break;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/***********************************************************************
|
||||
*
|
||||
* ftp_cwd()
|
||||
*
|
||||
* Send 'CWD' to the remote server to Change Working Directory. It is the ftp
|
||||
* version of the unix 'cd' command. This function is only called from the
|
||||
* cwd_and_mkd() function these days.
|
||||
*
|
||||
* This function does NOT call failf().
|
||||
*/
|
||||
static
|
||||
CURLcode ftp_cwd(struct connectdata *conn, char *path)
|
||||
{
|
||||
ssize_t nread;
|
||||
int ftpcode;
|
||||
CURLcode result;
|
||||
|
||||
FTPSENDF(conn, "CWD %s", path);
|
||||
result = Curl_GetFTPResponse(&nread, conn, &ftpcode);
|
||||
if (!result) {
|
||||
/* According to RFC959, CWD is supposed to return 250 on success, but
|
||||
there seem to be non-compliant FTP servers out there that return 200,
|
||||
so we accept any '2xy' code here. */
|
||||
if (ftpcode/100 != 2)
|
||||
result = CURLE_FTP_ACCESS_DENIED;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/***********************************************************************
|
||||
*
|
||||
* ftp_cwd_and_mkd()
|
||||
*
|
||||
* Change to the given directory. If the directory is not present, and we
|
||||
* have been told to allow it, then create the directory and cd to it.
|
||||
*
|
||||
*/
|
||||
static CURLcode cwd_and_mkd(struct connectdata *conn, char *path)
|
||||
{
|
||||
CURLcode result;
|
||||
|
||||
result = ftp_cwd(conn, path);
|
||||
if (result) {
|
||||
if(conn->data->set.ftp_create_missing_dirs) {
|
||||
result = ftp_mkd(conn, path);
|
||||
if (result)
|
||||
/* ftp_mkd() calls failf() itself */
|
||||
return result;
|
||||
result = ftp_cwd(conn, path);
|
||||
}
|
||||
if(result)
|
||||
failf(conn->data, "Couldn't cd to %s", path);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
#endif /* CURL_DISABLE_FTP */
|
||||
|
@@ -106,6 +106,9 @@ CURLcode Curl_getinfo(struct SessionHandle *data, CURLINFO info, ...)
|
||||
case CURLINFO_HTTP_CODE:
|
||||
*param_longp = data->info.httpcode;
|
||||
break;
|
||||
case CURLINFO_HTTP_CONNECTCODE:
|
||||
*param_longp = data->info.httpproxycode;
|
||||
break;
|
||||
case CURLINFO_FILETIME:
|
||||
*param_longp = data->info.filetime;
|
||||
break;
|
||||
|
33
lib/hash.c
33
lib/hash.c
@@ -64,8 +64,9 @@ _hash_element_dtor (void *user, void *element)
|
||||
free(e);
|
||||
}
|
||||
|
||||
void
|
||||
Curl_hash_init (curl_hash *h, int slots, curl_hash_dtor dtor)
|
||||
/* return 1 on error, 0 is fine */
|
||||
int
|
||||
Curl_hash_init(curl_hash *h, int slots, curl_hash_dtor dtor)
|
||||
{
|
||||
int i;
|
||||
|
||||
@@ -74,21 +75,35 @@ Curl_hash_init (curl_hash *h, int slots, curl_hash_dtor dtor)
|
||||
h->slots = slots;
|
||||
|
||||
h->table = (curl_llist **) malloc(slots * sizeof(curl_llist *));
|
||||
for (i = 0; i < slots; ++i) {
|
||||
h->table[i] = Curl_llist_alloc((curl_llist_dtor) _hash_element_dtor);
|
||||
if(h->table) {
|
||||
for (i = 0; i < slots; ++i) {
|
||||
h->table[i] = Curl_llist_alloc((curl_llist_dtor) _hash_element_dtor);
|
||||
if(!h->table[i]) {
|
||||
while(i--)
|
||||
Curl_llist_destroy(h->table[i], NULL);
|
||||
free(h->table);
|
||||
return 1; /* failure */
|
||||
}
|
||||
}
|
||||
return 0; /* fine */
|
||||
}
|
||||
else
|
||||
return 1; /* failure */
|
||||
}
|
||||
|
||||
curl_hash *
|
||||
Curl_hash_alloc (int slots, curl_hash_dtor dtor)
|
||||
Curl_hash_alloc(int slots, curl_hash_dtor dtor)
|
||||
{
|
||||
curl_hash *h;
|
||||
|
||||
h = (curl_hash *) malloc(sizeof(curl_hash));
|
||||
if (NULL == h)
|
||||
return NULL;
|
||||
|
||||
Curl_hash_init(h, slots, dtor);
|
||||
if (h) {
|
||||
if(Curl_hash_init(h, slots, dtor)) {
|
||||
/* failure */
|
||||
free(h);
|
||||
h = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return h;
|
||||
}
|
||||
|
@@ -45,7 +45,7 @@ typedef struct _curl_hash_element {
|
||||
} curl_hash_element;
|
||||
|
||||
|
||||
void Curl_hash_init(curl_hash *, int, curl_hash_dtor);
|
||||
int Curl_hash_init(curl_hash *, int, curl_hash_dtor);
|
||||
curl_hash *Curl_hash_alloc(int, curl_hash_dtor);
|
||||
int Curl_hash_add(curl_hash *, char *, size_t, const void *);
|
||||
int Curl_hash_delete(curl_hash *h, char *key, size_t key_len);
|
||||
|
493
lib/hostip.c
493
lib/hostip.c
@@ -65,6 +65,7 @@
|
||||
#include "hostip.h"
|
||||
#include "hash.h"
|
||||
#include "share.h"
|
||||
#include "url.h"
|
||||
|
||||
#define _MPRINTF_REPLACE /* use our functions only */
|
||||
#include <curl/mprintf.h>
|
||||
@@ -81,10 +82,13 @@
|
||||
static curl_hash hostname_cache;
|
||||
static int host_cache_initialized;
|
||||
|
||||
static Curl_addrinfo *my_getaddrinfo(struct SessionHandle *data,
|
||||
char *hostname,
|
||||
int port,
|
||||
char **bufp);
|
||||
static Curl_addrinfo *my_getaddrinfo(struct connectdata *conn,
|
||||
char *hostname,
|
||||
int port,
|
||||
int *waitp);
|
||||
#if !defined(HAVE_GETHOSTBYNAME_R) || defined(USE_ARES)
|
||||
static struct hostent* pack_hostent(char** buf, struct hostent* orig);
|
||||
#endif
|
||||
|
||||
void Curl_global_host_cache_init(void)
|
||||
{
|
||||
@@ -135,15 +139,14 @@ create_hostcache_id(char *server, int port, ssize_t *entry_len)
|
||||
char *id = NULL;
|
||||
|
||||
/* Get the length of the new entry id */
|
||||
*entry_len = *entry_len + /* Hostname length */
|
||||
1 + /* The ':' seperator */
|
||||
_num_chars(port); /* The number of characters the port will take up */
|
||||
*entry_len = *entry_len + /* Hostname length */
|
||||
1 + /* ':' seperator */
|
||||
_num_chars(port); /* number of characters the port will take up */
|
||||
|
||||
/* Allocate the new entry id */
|
||||
id = malloc(*entry_len + 1);
|
||||
if (!id) {
|
||||
if (!id)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Create the new entry */
|
||||
/* If sprintf() doesn't return the entry length, that signals failure */
|
||||
@@ -192,57 +195,26 @@ hostcache_prune(curl_hash *hostcache, int cache_timeout, int now)
|
||||
hostcache_timestamp_remove);
|
||||
}
|
||||
|
||||
#if defined(CURLDEBUG) && defined(AGGRESIVE_TEST)
|
||||
/* Called from Curl_done() to check that there's no DNS cache entry with
|
||||
a non-zero counter left. */
|
||||
void Curl_scan_cache_used(void *user, void *ptr)
|
||||
{
|
||||
struct Curl_dns_entry *e = ptr;
|
||||
(void)user; /* prevent compiler warning */
|
||||
if(e->inuse) {
|
||||
fprintf(stderr, "*** WARNING: locked DNS cache entry detected: %s\n",
|
||||
e->entry_id);
|
||||
/* perform a segmentation fault to draw attention */
|
||||
*(void **)0 = 0;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Macro to save redundant free'ing of entry_id */
|
||||
#define HOSTCACHE_RETURN(dns) \
|
||||
{ \
|
||||
free(entry_id); \
|
||||
if(data->share) \
|
||||
{ \
|
||||
Curl_share_unlock(data, CURL_LOCK_DATA_DNS); \
|
||||
} \
|
||||
return dns; \
|
||||
}
|
||||
|
||||
#ifdef HAVE_SIGSETJMP
|
||||
/* Beware this is a global and unique instance */
|
||||
sigjmp_buf curl_jmpenv;
|
||||
#endif
|
||||
|
||||
struct Curl_dns_entry *Curl_resolv(struct SessionHandle *data,
|
||||
char *hostname,
|
||||
int port)
|
||||
{
|
||||
char *entry_id = NULL;
|
||||
struct Curl_dns_entry *dns = NULL;
|
||||
ssize_t entry_len;
|
||||
time_t now;
|
||||
char *bufp;
|
||||
|
||||
#ifdef HAVE_SIGSETJMP
|
||||
/* this allows us to time-out from the name resolver, as the timeout
|
||||
will generate a signal and we will siglongjmp() from that here */
|
||||
if(!data->set.no_signal && sigsetjmp(curl_jmpenv, 1)) {
|
||||
/* this is coming from a siglongjmp() */
|
||||
failf(data, "name lookup timed out");
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
/* When calling Curl_resolv() has resulted in a response with a returned
|
||||
address, we call this function to store the information in the dns
|
||||
cache etc */
|
||||
|
||||
static struct Curl_dns_entry *
|
||||
cache_resolv_response(struct SessionHandle *data,
|
||||
Curl_addrinfo *addr,
|
||||
char *hostname,
|
||||
int port)
|
||||
{
|
||||
char *entry_id;
|
||||
int entry_len;
|
||||
struct Curl_dns_entry *dns;
|
||||
time_t now;
|
||||
|
||||
/* Create an entry id, based upon the hostname and port */
|
||||
entry_len = strlen(hostname);
|
||||
@@ -251,45 +223,112 @@ struct Curl_dns_entry *Curl_resolv(struct SessionHandle *data,
|
||||
if (!entry_id)
|
||||
return NULL;
|
||||
|
||||
/* Create a new cache entry */
|
||||
dns = (struct Curl_dns_entry *) malloc(sizeof(struct Curl_dns_entry));
|
||||
if (!dns) {
|
||||
Curl_freeaddrinfo(addr);
|
||||
free(entry_id);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
dns->inuse = 0;
|
||||
dns->addr = addr;
|
||||
|
||||
/* Store it in our dns cache */
|
||||
Curl_hash_add(data->hostcache, entry_id, entry_len+1,
|
||||
(const void *) dns);
|
||||
time(&now);
|
||||
|
||||
dns->timestamp = now;
|
||||
dns->inuse++; /* mark entry as in-use */
|
||||
|
||||
|
||||
/* Remove outdated and unused entries from the hostcache */
|
||||
hostcache_prune(data->hostcache,
|
||||
data->set.dns_cache_timeout,
|
||||
now);
|
||||
|
||||
/* free the allocated entry_id again */
|
||||
free(entry_id);
|
||||
|
||||
return dns;
|
||||
}
|
||||
|
||||
/* Resolve a name and return a pointer in the 'entry' argument if one
|
||||
is available.
|
||||
|
||||
Return codes:
|
||||
|
||||
-1 = error, no pointer
|
||||
0 = OK, pointer provided
|
||||
1 = waiting for response, no pointer
|
||||
*/
|
||||
int Curl_resolv(struct connectdata *conn,
|
||||
char *hostname,
|
||||
int port,
|
||||
struct Curl_dns_entry **entry)
|
||||
{
|
||||
char *entry_id = NULL;
|
||||
struct Curl_dns_entry *dns = NULL;
|
||||
ssize_t entry_len;
|
||||
int wait;
|
||||
struct SessionHandle *data = conn->data;
|
||||
|
||||
/* default to failure */
|
||||
int rc = -1;
|
||||
*entry = NULL;
|
||||
|
||||
#ifdef HAVE_SIGSETJMP
|
||||
/* this allows us to time-out from the name resolver, as the timeout
|
||||
will generate a signal and we will siglongjmp() from that here */
|
||||
if(!data->set.no_signal && sigsetjmp(curl_jmpenv, 1)) {
|
||||
/* this is coming from a siglongjmp() */
|
||||
failf(data, "name lookup timed out");
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Create an entry id, based upon the hostname and port */
|
||||
entry_len = strlen(hostname);
|
||||
entry_id = create_hostcache_id(hostname, port, &entry_len);
|
||||
/* If we can't create the entry id, fail */
|
||||
if (!entry_id)
|
||||
return -1;
|
||||
|
||||
if(data->share)
|
||||
Curl_share_lock(data, CURL_LOCK_DATA_DNS, CURL_LOCK_ACCESS_SINGLE);
|
||||
|
||||
/* See if its already in our dns cache */
|
||||
dns = Curl_hash_pick(data->hostcache, entry_id, entry_len+1);
|
||||
|
||||
/* free the allocated entry_id again */
|
||||
free(entry_id);
|
||||
|
||||
if (!dns) {
|
||||
Curl_addrinfo *addr = my_getaddrinfo(data, hostname, port, &bufp);
|
||||
/* The entry was not in the cache. Resolve it to IP address */
|
||||
|
||||
/* If my_getaddrinfo() returns NULL, 'wait' might be set to a non-zero
|
||||
value indicating that we need to wait for the response to the resolve
|
||||
call */
|
||||
Curl_addrinfo *addr = my_getaddrinfo(conn, hostname, port, &wait);
|
||||
|
||||
if (!addr) {
|
||||
HOSTCACHE_RETURN(NULL);
|
||||
if(wait)
|
||||
/* the response to our resolve call will come asynchronously at
|
||||
a later time, good or bad */
|
||||
rc = 1;
|
||||
}
|
||||
|
||||
/* Create a new cache entry */
|
||||
dns = (struct Curl_dns_entry *) malloc(sizeof(struct Curl_dns_entry));
|
||||
if (!dns) {
|
||||
Curl_freeaddrinfo(addr);
|
||||
HOSTCACHE_RETURN(NULL);
|
||||
}
|
||||
|
||||
dns->inuse = 0;
|
||||
dns->addr = addr;
|
||||
/* Save it in our host cache */
|
||||
Curl_hash_add(data->hostcache, entry_id, entry_len+1, (const void *) dns);
|
||||
else
|
||||
/* we got a response, store it in the cache */
|
||||
dns = cache_resolv_response(data, addr, hostname, port);
|
||||
}
|
||||
time(&now);
|
||||
|
||||
dns->timestamp = now;
|
||||
dns->inuse++; /* mark entry as in-use */
|
||||
#ifdef CURLDEBUG
|
||||
dns->entry_id = entry_id;
|
||||
#endif
|
||||
if(data->share)
|
||||
Curl_share_unlock(data, CURL_LOCK_DATA_DNS);
|
||||
|
||||
/* Remove outdated and unused entries from the hostcache */
|
||||
hostcache_prune(data->hostcache,
|
||||
data->set.dns_cache_timeout,
|
||||
now);
|
||||
*entry = dns;
|
||||
|
||||
HOSTCACHE_RETURN(dns);
|
||||
return rc;
|
||||
}
|
||||
|
||||
void Curl_resolv_unlock(struct SessionHandle *data, struct Curl_dns_entry *dns)
|
||||
@@ -314,7 +353,7 @@ void Curl_freeaddrinfo(Curl_addrinfo *p)
|
||||
#ifdef ENABLE_IPV6
|
||||
freeaddrinfo(p);
|
||||
#else
|
||||
free(p);
|
||||
free(p); /* works fine for the ARES case too */
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -332,7 +371,224 @@ void Curl_freednsinfo(void *freethis)
|
||||
|
||||
/* --- resolve name or IP-number --- */
|
||||
|
||||
#ifdef ENABLE_IPV6
|
||||
/* Allocate enough memory to hold the full name information structs and
|
||||
* everything. OSF1 is known to require at least 8872 bytes. The buffer
|
||||
* required for storing all possible aliases and IP numbers is according to
|
||||
* Stevens' Unix Network Programming 2nd edition, p. 304: 8192 bytes!
|
||||
*/
|
||||
#define CURL_NAMELOOKUP_SIZE 9000
|
||||
|
||||
#ifdef USE_ARES
|
||||
|
||||
CURLcode Curl_multi_ares_fdset(struct connectdata *conn,
|
||||
fd_set *read_fd_set,
|
||||
fd_set *write_fd_set,
|
||||
int *max_fdp)
|
||||
|
||||
{
|
||||
int max = ares_fds(conn->data->state.areschannel,
|
||||
read_fd_set, write_fd_set);
|
||||
*max_fdp = max;
|
||||
|
||||
return CURLE_OK;
|
||||
}
|
||||
|
||||
/* called to check if the name is resolved now */
|
||||
CURLcode Curl_is_resolved(struct connectdata *conn, bool *done)
|
||||
{
|
||||
fd_set read_fds, write_fds;
|
||||
static const struct timeval tv={0,0};
|
||||
int count;
|
||||
struct SessionHandle *data = conn->data;
|
||||
int nfds = ares_fds(data->state.areschannel, &read_fds, &write_fds);
|
||||
|
||||
count = select(nfds, &read_fds, &write_fds, NULL,
|
||||
(struct timeval *)&tv);
|
||||
|
||||
if(count)
|
||||
ares_process(data->state.areschannel, &read_fds, &write_fds);
|
||||
|
||||
if(conn->async.done) {
|
||||
*done = TRUE;
|
||||
|
||||
if(!conn->async.dns)
|
||||
return CURLE_COULDNT_RESOLVE_HOST;
|
||||
}
|
||||
else
|
||||
*done = FALSE;
|
||||
|
||||
return CURLE_OK;
|
||||
}
|
||||
|
||||
/* This is a function that locks and waits until the name resolve operation
|
||||
has completed.
|
||||
|
||||
If 'entry' is non-NULL, make it point to the resolved dns entry
|
||||
|
||||
Return CURLE_COULDNT_RESOLVE_HOST if the host was not resolved, and
|
||||
CURLE_OPERATION_TIMEDOUT if a time-out occurred.
|
||||
*/
|
||||
CURLcode Curl_wait_for_resolv(struct connectdata *conn,
|
||||
struct Curl_dns_entry **entry)
|
||||
{
|
||||
CURLcode rc=CURLE_OK;
|
||||
struct SessionHandle *data = conn->data;
|
||||
|
||||
/* Wait for the name resolve query to complete. */
|
||||
while (1) {
|
||||
int nfds=0;
|
||||
fd_set read_fds, write_fds;
|
||||
struct timeval *tvp, tv;
|
||||
int count;
|
||||
|
||||
FD_ZERO(&read_fds);
|
||||
FD_ZERO(&write_fds);
|
||||
nfds = ares_fds(data->state.areschannel, &read_fds, &write_fds);
|
||||
if (nfds == 0)
|
||||
break;
|
||||
tvp = ares_timeout(data->state.areschannel,
|
||||
NULL, /* pass in our maximum time here */
|
||||
&tv);
|
||||
count = select(nfds, &read_fds, &write_fds, NULL, tvp);
|
||||
if (count < 0 && errno != EINVAL)
|
||||
break;
|
||||
|
||||
ares_process(data->state.areschannel, &read_fds, &write_fds);
|
||||
}
|
||||
|
||||
/* Operation complete, if the lookup was successful we now have the entry
|
||||
in the cache. */
|
||||
|
||||
/* this destroys the channel and we cannot use it anymore after this */
|
||||
ares_destroy(data->state.areschannel);
|
||||
|
||||
if(entry)
|
||||
*entry = conn->async.dns;
|
||||
|
||||
if(!conn->async.dns) {
|
||||
/* a name was not resolved */
|
||||
if(conn->async.done)
|
||||
rc = CURLE_COULDNT_RESOLVE_HOST;
|
||||
else
|
||||
rc = CURLE_OPERATION_TIMEDOUT;
|
||||
|
||||
/* close the connection, since we can't return failure here without
|
||||
cleaning up this connection properly */
|
||||
Curl_disconnect(conn);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* this function gets called by ares when we got the name resolved */
|
||||
static void host_callback(void *arg, /* "struct connectdata *" */
|
||||
int status,
|
||||
struct hostent *hostent)
|
||||
{
|
||||
struct connectdata *conn = (struct connectdata *)arg;
|
||||
struct Curl_dns_entry *dns = NULL;
|
||||
|
||||
conn->async.done = TRUE;
|
||||
conn->async.status = status;
|
||||
|
||||
if(ARES_SUCCESS == status) {
|
||||
/* we got a resolved name in 'hostent' */
|
||||
char *bufp = (char *)malloc(CURL_NAMELOOKUP_SIZE);
|
||||
if(bufp) {
|
||||
|
||||
/* pack_hostent() copies to and shrinks the target buffer */
|
||||
struct hostent *he = pack_hostent(&bufp, hostent);
|
||||
|
||||
dns = cache_resolv_response(conn->data, he,
|
||||
conn->async.hostname, conn->async.port);
|
||||
}
|
||||
}
|
||||
|
||||
conn->async.dns = dns;
|
||||
|
||||
/* The input hostent struct will be freed by ares when we return from this
|
||||
function */
|
||||
}
|
||||
|
||||
/*
|
||||
* Return name information about the given hostname and port number. If
|
||||
* successful, the 'hostent' is returned and the forth argument will point to
|
||||
* memory we need to free after use. That meory *MUST* be freed with
|
||||
* Curl_freeaddrinfo(), nothing else.
|
||||
*/
|
||||
static Curl_addrinfo *my_getaddrinfo(struct connectdata *conn,
|
||||
char *hostname,
|
||||
int port,
|
||||
int *waitp)
|
||||
{
|
||||
int rc;
|
||||
char *bufp;
|
||||
struct SessionHandle *data = conn->data;
|
||||
|
||||
rc = ares_init(&data->state.areschannel);
|
||||
|
||||
*waitp = FALSE;
|
||||
|
||||
if(!rc) {
|
||||
/* only if success */
|
||||
|
||||
bufp = strdup(hostname);
|
||||
|
||||
if(bufp) {
|
||||
Curl_safefree(conn->async.hostname);
|
||||
conn->async.hostname = bufp;
|
||||
conn->async.port = port;
|
||||
conn->async.done = FALSE; /* not done */
|
||||
conn->async.status = 0; /* clear */
|
||||
conn->async.dns = NULL; /* clear */
|
||||
|
||||
ares_gethostbyname(data->state.areschannel, hostname, PF_INET,
|
||||
host_callback, conn);
|
||||
|
||||
*waitp = TRUE; /* please wait for the response */
|
||||
}
|
||||
else
|
||||
ares_destroy(data->state.areschannel);
|
||||
}
|
||||
|
||||
return NULL; /* no struct yet */
|
||||
|
||||
}
|
||||
#else
|
||||
/* For builds without ARES, Curl_resolv() can never return wait==TRUE,
|
||||
so this function will never be called. If it still gets called, we
|
||||
return failure at once. */
|
||||
CURLcode Curl_wait_for_resolv(struct connectdata *conn,
|
||||
struct Curl_dns_entry **entry)
|
||||
{
|
||||
(void)conn;
|
||||
*entry=NULL;
|
||||
return CURLE_COULDNT_RESOLVE_HOST;
|
||||
}
|
||||
|
||||
CURLcode Curl_multi_ares_fdset(struct connectdata *conn,
|
||||
fd_set *read_fd_set,
|
||||
fd_set *write_fd_set,
|
||||
int *max_fdp)
|
||||
{
|
||||
(void)conn;
|
||||
(void)read_fd_set;
|
||||
(void)write_fd_set;
|
||||
(void)max_fdp;
|
||||
return CURLE_OK;
|
||||
}
|
||||
|
||||
CURLcode Curl_is_resolved(struct connectdata *conn, bool *done)
|
||||
{
|
||||
(void)conn;
|
||||
*done = TRUE;
|
||||
|
||||
return CURLE_OK;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(ENABLE_IPV6) && !defined(USE_ARES)
|
||||
|
||||
#ifdef CURLDEBUG
|
||||
/* These two are strictly for memory tracing and are using the same
|
||||
@@ -377,15 +633,18 @@ void curl_freeaddrinfo(struct addrinfo *freethis,
|
||||
* memory we need to free after use. That meory *MUST* be freed with
|
||||
* Curl_freeaddrinfo(), nothing else.
|
||||
*/
|
||||
static Curl_addrinfo *my_getaddrinfo(struct SessionHandle *data,
|
||||
static Curl_addrinfo *my_getaddrinfo(struct connectdata *conn,
|
||||
char *hostname,
|
||||
int port,
|
||||
char **bufp)
|
||||
int *waitp)
|
||||
{
|
||||
struct addrinfo hints, *res;
|
||||
int error;
|
||||
char sbuf[NI_MAXSERV];
|
||||
int s, pf = PF_UNSPEC;
|
||||
struct SessionHandle *data = conn->data;
|
||||
|
||||
*waitp=0; /* don't wait, we have the response now */
|
||||
|
||||
/* see if we have an IPv6 stack */
|
||||
s = socket(PF_INET6, SOCK_DGRAM, 0);
|
||||
@@ -410,20 +669,17 @@ static Curl_addrinfo *my_getaddrinfo(struct SessionHandle *data,
|
||||
infof(data, "getaddrinfo(3) failed for %s:%d\n", hostname, port);
|
||||
return NULL;
|
||||
}
|
||||
*bufp=(char *)res; /* make it point to the result struct */
|
||||
|
||||
return res;
|
||||
}
|
||||
#else /* following code is IPv4-only */
|
||||
|
||||
#ifndef HAVE_GETHOSTBYNAME_R
|
||||
#if !defined(HAVE_GETHOSTBYNAME_R) || defined(USE_ARES)
|
||||
static void hostcache_fixoffset(struct hostent *h, int offset);
|
||||
/**
|
||||
/*
|
||||
* Performs a "deep" copy of a hostent into a buffer (returns a pointer to the
|
||||
* copy). Make absolutely sure the destination buffer is big enough!
|
||||
*
|
||||
* Keith McGuigan
|
||||
* 10/3/2001 */
|
||||
*/
|
||||
static struct hostent* pack_hostent(char** buf, struct hostent* orig)
|
||||
{
|
||||
char *bufptr;
|
||||
@@ -512,6 +768,25 @@ static struct hostent* pack_hostent(char** buf, struct hostent* orig)
|
||||
}
|
||||
#endif
|
||||
|
||||
static void hostcache_fixoffset(struct hostent *h, int offset)
|
||||
{
|
||||
int i=0;
|
||||
h->h_name=(char *)((long)h->h_name+offset);
|
||||
h->h_aliases=(char **)((long)h->h_aliases+offset);
|
||||
while(h->h_aliases[i]) {
|
||||
h->h_aliases[i]=(char *)((long)h->h_aliases[i]+offset);
|
||||
i++;
|
||||
}
|
||||
h->h_addr_list=(char **)((long)h->h_addr_list+offset);
|
||||
i=0;
|
||||
while(h->h_addr_list[i]) {
|
||||
h->h_addr_list[i]=(char *)((long)h->h_addr_list[i]+offset);
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef USE_ARES
|
||||
|
||||
static char *MakeIP(unsigned long num, char *addr, int addr_len)
|
||||
{
|
||||
#if defined(HAVE_INET_NTOA) || defined(HAVE_INET_NTOA_R)
|
||||
@@ -533,43 +808,24 @@ static char *MakeIP(unsigned long num, char *addr, int addr_len)
|
||||
return (addr);
|
||||
}
|
||||
|
||||
static void hostcache_fixoffset(struct hostent *h, int offset)
|
||||
{
|
||||
int i=0;
|
||||
h->h_name=(char *)((long)h->h_name+offset);
|
||||
h->h_aliases=(char **)((long)h->h_aliases+offset);
|
||||
while(h->h_aliases[i]) {
|
||||
h->h_aliases[i]=(char *)((long)h->h_aliases[i]+offset);
|
||||
i++;
|
||||
}
|
||||
h->h_addr_list=(char **)((long)h->h_addr_list+offset);
|
||||
i=0;
|
||||
while(h->h_addr_list[i]) {
|
||||
h->h_addr_list[i]=(char *)((long)h->h_addr_list[i]+offset);
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
/* The original code to this function was once stolen from the Dancer source
|
||||
code, written by Bjorn Reese, it has since been patched and modified
|
||||
considerably. */
|
||||
static Curl_addrinfo *my_getaddrinfo(struct SessionHandle *data,
|
||||
static Curl_addrinfo *my_getaddrinfo(struct connectdata *conn,
|
||||
char *hostname,
|
||||
int port,
|
||||
char **bufp)
|
||||
int *waitp)
|
||||
{
|
||||
struct hostent *h = NULL;
|
||||
in_addr_t in;
|
||||
int ret; /* this variable is unused on several platforms but used on some */
|
||||
struct SessionHandle *data = conn->data;
|
||||
|
||||
#define CURL_NAMELOOKUP_SIZE 9000
|
||||
/* Allocate enough memory to hold the full name information structs and
|
||||
* everything. OSF1 is known to require at least 8872 bytes. The buffer
|
||||
* required for storing all possible aliases and IP numbers is according to
|
||||
* Stevens' Unix Network Programming 2nd editor, p. 304: 8192 bytes! */
|
||||
port=0; /* unused in IPv4 code */
|
||||
(void)port; /* unused in IPv4 code */
|
||||
ret = 0; /* to prevent the compiler warning */
|
||||
|
||||
*waitp = 0; /* don't wait, we act synchronously */
|
||||
|
||||
in=inet_addr(hostname);
|
||||
if (in != CURL_INADDR_NONE) {
|
||||
struct in_addr *addrentry;
|
||||
@@ -581,7 +837,6 @@ static Curl_addrinfo *my_getaddrinfo(struct SessionHandle *data,
|
||||
} *buf = (struct namebuf *)malloc(sizeof(struct namebuf));
|
||||
if(!buf)
|
||||
return NULL; /* major failure */
|
||||
*bufp = (char *)buf;
|
||||
|
||||
h = &buf->hostentry;
|
||||
h->h_addr_list = &buf->h_addr_list[0];
|
||||
@@ -602,7 +857,6 @@ static Curl_addrinfo *my_getaddrinfo(struct SessionHandle *data,
|
||||
int *buf = (int *)malloc(CURL_NAMELOOKUP_SIZE);
|
||||
if(!buf)
|
||||
return NULL; /* major failure */
|
||||
*bufp=(char *)buf;
|
||||
|
||||
/* Workaround for gethostbyname_r bug in qnx nto. It is also _required_
|
||||
for some of these functions. */
|
||||
@@ -638,7 +892,6 @@ static Curl_addrinfo *my_getaddrinfo(struct SessionHandle *data,
|
||||
offset=(long)h-(long)buf;
|
||||
hostcache_fixoffset(h, offset);
|
||||
buf=(int *)h;
|
||||
*bufp=(char *)buf;
|
||||
}
|
||||
else
|
||||
#endif
|
||||
@@ -687,7 +940,6 @@ static Curl_addrinfo *my_getaddrinfo(struct SessionHandle *data,
|
||||
offset=(long)h-(long)buf;
|
||||
hostcache_fixoffset(h, offset);
|
||||
buf=(int *)h;
|
||||
*bufp=(char *)buf;
|
||||
}
|
||||
else
|
||||
#endif
|
||||
@@ -730,13 +982,11 @@ static Curl_addrinfo *my_getaddrinfo(struct SessionHandle *data,
|
||||
infof(data, "gethostbyname_r(2) failed for %s\n", hostname);
|
||||
h = NULL; /* set return code to NULL */
|
||||
free(buf);
|
||||
*bufp=NULL;
|
||||
}
|
||||
#else
|
||||
else {
|
||||
if ((h = gethostbyname(hostname)) == NULL ) {
|
||||
infof(data, "gethostbyname(2) failed for %s\n", hostname);
|
||||
*bufp=NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -745,7 +995,6 @@ static Curl_addrinfo *my_getaddrinfo(struct SessionHandle *data,
|
||||
static one we got a pointer to might get removed when we don't
|
||||
want/expect that */
|
||||
h = pack_hostent(&buf, h);
|
||||
*bufp=(char *)buf;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
@@ -753,3 +1002,5 @@ static Curl_addrinfo *my_getaddrinfo(struct SessionHandle *data,
|
||||
}
|
||||
|
||||
#endif /* end of IPv4-specific code */
|
||||
|
||||
#endif /* end of !USE_ARES */
|
||||
|
18
lib/hostip.h
18
lib/hostip.h
@@ -29,6 +29,7 @@
|
||||
struct addrinfo;
|
||||
struct hostent;
|
||||
struct SessionHandle;
|
||||
struct connectdata;
|
||||
|
||||
void Curl_global_host_cache_init(void);
|
||||
void Curl_global_host_cache_dtor(void);
|
||||
@@ -41,9 +42,6 @@ struct Curl_dns_entry {
|
||||
time_t timestamp;
|
||||
long inuse; /* use-counter, make very sure you decrease this
|
||||
when you're done using the address you received */
|
||||
#ifdef CURLDEBUG
|
||||
char *entry_id;
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -54,10 +52,18 @@ struct Curl_dns_entry {
|
||||
* use, or we'll leak memory!
|
||||
*/
|
||||
|
||||
struct Curl_dns_entry *Curl_resolv(struct SessionHandle *data,
|
||||
char *hostname,
|
||||
int port);
|
||||
int Curl_resolv(struct connectdata *conn,
|
||||
char *hostname,
|
||||
int port,
|
||||
struct Curl_dns_entry **dnsentry);
|
||||
|
||||
CURLcode Curl_is_resolved(struct connectdata *conn, bool *done);
|
||||
CURLcode Curl_wait_for_resolv(struct connectdata *conn,
|
||||
struct Curl_dns_entry **dnsentry);
|
||||
CURLcode Curl_multi_ares_fdset(struct connectdata *conn,
|
||||
fd_set *read_fd_set,
|
||||
fd_set *write_fd_set,
|
||||
int *max_fdp);
|
||||
/* unlock a previously resolved dns entry */
|
||||
void Curl_resolv_unlock(struct SessionHandle *data, struct Curl_dns_entry *dns);
|
||||
|
||||
|
141
lib/http.c
141
lib/http.c
@@ -91,6 +91,9 @@
|
||||
#include "http_digest.h"
|
||||
#include "http_ntlm.h"
|
||||
#include "http_negotiate.h"
|
||||
#include "url.h"
|
||||
#include "share.h"
|
||||
#include "http.h"
|
||||
|
||||
#define _MPRINTF_REPLACE /* use our functions only */
|
||||
#include <curl/mprintf.h>
|
||||
@@ -100,6 +103,8 @@
|
||||
#include "memdebug.h"
|
||||
#endif
|
||||
|
||||
static CURLcode Curl_output_basic_proxy(struct connectdata *conn);
|
||||
|
||||
/* fread() emulation to provide POST and/or request data */
|
||||
static int readmoredata(char *buffer,
|
||||
size_t size,
|
||||
@@ -428,6 +433,13 @@ CURLcode Curl_ConnectHTTPProxyTunnel(struct connectdata *conn,
|
||||
|
||||
infof(data, "Establish HTTP proxy tunnel to %s:%d\n", hostname, remote_port);
|
||||
|
||||
/*
|
||||
* This code currently only supports Basic authentication for this CONNECT
|
||||
* request to a proxy.
|
||||
*/
|
||||
if(conn->bits.proxy_user_passwd)
|
||||
Curl_output_basic_proxy(conn);
|
||||
|
||||
/* OK, now send the connect request to the proxy */
|
||||
result =
|
||||
Curl_sendf(tunnelsocket, conn,
|
||||
@@ -544,6 +556,8 @@ CURLcode Curl_ConnectHTTPProxyTunnel(struct connectdata *conn,
|
||||
if(error)
|
||||
return CURLE_RECV_ERROR;
|
||||
|
||||
data->info.httpproxycode = httperror;
|
||||
|
||||
if(200 != httperror) {
|
||||
if(407 == httperror)
|
||||
/* Added Nov 6 1998 */
|
||||
@@ -552,6 +566,14 @@ CURLcode Curl_ConnectHTTPProxyTunnel(struct connectdata *conn,
|
||||
failf(data, "Received error code %d from proxy", httperror);
|
||||
return CURLE_RECV_ERROR;
|
||||
}
|
||||
|
||||
/* If a proxy-authorization header was used for the proxy, then we should
|
||||
make sure that it isn't accidentally used for the document request
|
||||
after we've connected. So let's free and clear it here. */
|
||||
Curl_safefree(conn->allocptr.proxyuserpwd);
|
||||
conn->allocptr.proxyuserpwd = NULL;
|
||||
|
||||
Curl_http_auth_stage(data, 401); /* move on to the host auth */
|
||||
|
||||
infof (data, "Proxy replied to CONNECT request\n");
|
||||
return CURLE_OK;
|
||||
@@ -664,6 +686,37 @@ static CURLcode Curl_output_basic(struct connectdata *conn)
|
||||
return CURLE_OK;
|
||||
}
|
||||
|
||||
static CURLcode Curl_output_basic_proxy(struct connectdata *conn)
|
||||
{
|
||||
char *authorization;
|
||||
struct SessionHandle *data=conn->data;
|
||||
|
||||
sprintf(data->state.buffer, "%s:%s", conn->proxyuser, conn->proxypasswd);
|
||||
if(Curl_base64_encode(data->state.buffer, strlen(data->state.buffer),
|
||||
&authorization) >= 0) {
|
||||
Curl_safefree(conn->allocptr.proxyuserpwd);
|
||||
conn->allocptr.proxyuserpwd =
|
||||
aprintf("Proxy-authorization: Basic %s\015\012", authorization);
|
||||
free(authorization);
|
||||
}
|
||||
else
|
||||
return CURLE_OUT_OF_MEMORY;
|
||||
return CURLE_OK;
|
||||
}
|
||||
|
||||
void Curl_http_auth_stage(struct SessionHandle *data,
|
||||
int stage)
|
||||
{
|
||||
if(stage == 401)
|
||||
data->state.authwant = data->set.httpauth;
|
||||
else if(stage == 407)
|
||||
data->state.authwant = data->set.proxyauth;
|
||||
else
|
||||
return; /* bad input stage */
|
||||
data->state.authstage = stage;
|
||||
data->state.authavail = CURLAUTH_NONE;
|
||||
}
|
||||
|
||||
CURLcode Curl_http(struct connectdata *conn)
|
||||
{
|
||||
struct SessionHandle *data=conn->data;
|
||||
@@ -677,6 +730,13 @@ CURLcode Curl_http(struct connectdata *conn)
|
||||
char *ptr;
|
||||
char *request;
|
||||
|
||||
if(!data->state.authstage) {
|
||||
if(conn->bits.httpproxy && conn->bits.proxy_user_passwd)
|
||||
Curl_http_auth_stage(data, 407);
|
||||
else
|
||||
Curl_http_auth_stage(data, 401);
|
||||
}
|
||||
|
||||
if(!conn->proto.http) {
|
||||
/* Only allocate this struct if we don't already have it! */
|
||||
|
||||
@@ -720,39 +780,62 @@ CURLcode Curl_http(struct connectdata *conn)
|
||||
curl_strequal(data->state.auth_host, conn->hostname) ||
|
||||
data->set.http_disable_hostname_check_before_authentication) {
|
||||
|
||||
#ifdef GSSAPI
|
||||
if((data->state.authwant == CURLAUTH_GSSNEGOTIATE) &&
|
||||
data->state.negotiate.context &&
|
||||
!GSS_ERROR(data->state.negotiate.status)) {
|
||||
result = Curl_output_negotiate(conn);
|
||||
if (result)
|
||||
return result;
|
||||
}
|
||||
else
|
||||
#endif
|
||||
/* Send proxy authentication header if needed */
|
||||
if (data->state.authstage == 407) {
|
||||
#ifdef USE_SSLEAY
|
||||
if(data->state.authwant == CURLAUTH_NTLM) {
|
||||
result = Curl_output_ntlm(conn, FALSE);
|
||||
if(result)
|
||||
return result;
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
if((data->state.authwant == CURLAUTH_DIGEST) &&
|
||||
data->state.digest.nonce) {
|
||||
result = Curl_output_digest(conn,
|
||||
(unsigned char *)request,
|
||||
(unsigned char *)ppath);
|
||||
if(data->state.authwant == CURLAUTH_NTLM) {
|
||||
result = Curl_output_ntlm(conn, TRUE);
|
||||
if(result)
|
||||
return result;
|
||||
}
|
||||
else if((data->state.authwant == CURLAUTH_BASIC) && /* Basic */
|
||||
conn->bits.user_passwd &&
|
||||
!checkheaders(data, "Authorization:")) {
|
||||
result = Curl_output_basic(conn);
|
||||
else
|
||||
#endif
|
||||
if((data->state.authwant == CURLAUTH_BASIC) && /* Basic */
|
||||
conn->bits.proxy_user_passwd &&
|
||||
!checkheaders(data, "Proxy-authorization:")) {
|
||||
result = Curl_output_basic_proxy(conn);
|
||||
if(result)
|
||||
return result;
|
||||
/* Switch to web authentication after proxy authentication is done */
|
||||
Curl_http_auth_stage(data, 401);
|
||||
}
|
||||
}
|
||||
/* Send web authentication header if needed */
|
||||
if (data->state.authstage == 401) {
|
||||
#ifdef GSSAPI
|
||||
if((data->state.authwant == CURLAUTH_GSSNEGOTIATE) &&
|
||||
data->state.negotiate.context &&
|
||||
!GSS_ERROR(data->state.negotiate.status)) {
|
||||
result = Curl_output_negotiate(conn);
|
||||
if (result)
|
||||
return result;
|
||||
}
|
||||
else
|
||||
#endif
|
||||
#ifdef USE_SSLEAY
|
||||
if(data->state.authwant == CURLAUTH_NTLM) {
|
||||
result = Curl_output_ntlm(conn, FALSE);
|
||||
if(result)
|
||||
return result;
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
if((data->state.authwant == CURLAUTH_DIGEST) &&
|
||||
data->state.digest.nonce) {
|
||||
result = Curl_output_digest(conn,
|
||||
(unsigned char *)request,
|
||||
(unsigned char *)ppath);
|
||||
if(result)
|
||||
return result;
|
||||
}
|
||||
else if((data->state.authwant == CURLAUTH_BASIC) && /* Basic */
|
||||
conn->bits.user_passwd &&
|
||||
!checkheaders(data, "Authorization:")) {
|
||||
result = Curl_output_basic(conn);
|
||||
if(result)
|
||||
return result;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -846,10 +929,12 @@ CURLcode Curl_http(struct connectdata *conn)
|
||||
}
|
||||
|
||||
if(data->cookies) {
|
||||
Curl_share_lock(data, CURL_LOCK_DATA_COOKIE, CURL_LOCK_ACCESS_SINGLE);
|
||||
co = Curl_cookie_getlist(data->cookies,
|
||||
conn->allocptr.cookiehost?
|
||||
conn->allocptr.cookiehost:host, ppath,
|
||||
(bool)(conn->protocol&PROT_HTTPS?TRUE:FALSE));
|
||||
Curl_share_unlock(data, CURL_LOCK_DATA_COOKIE);
|
||||
}
|
||||
|
||||
if (conn->bits.httpproxy &&
|
||||
@@ -1020,7 +1105,7 @@ CURLcode Curl_http(struct connectdata *conn)
|
||||
struct Cookie *store=co;
|
||||
/* now loop through all cookies that matched */
|
||||
while(co) {
|
||||
if(co->value && strlen(co->value)) {
|
||||
if(co->value) {
|
||||
if(0 == count) {
|
||||
add_bufferf(req_buffer, "Cookie: ");
|
||||
}
|
||||
|
@@ -42,5 +42,6 @@ CURLcode Curl_http_connect(struct connectdata *conn);
|
||||
void Curl_httpchunk_init(struct connectdata *conn);
|
||||
CHUNKcode Curl_httpchunk_read(struct connectdata *conn, char *datap,
|
||||
ssize_t length, ssize_t *wrote);
|
||||
void Curl_http_auth_stage(struct SessionHandle *data, int stage);
|
||||
#endif
|
||||
#endif
|
||||
|
@@ -102,8 +102,9 @@ CHUNKcode Curl_httpchunk_read(struct connectdata *conn,
|
||||
size_t length,
|
||||
size_t *wrote)
|
||||
{
|
||||
CURLcode result;
|
||||
CURLcode result=CURLE_OK;
|
||||
struct Curl_chunker *ch = &conn->proto.http->chunk;
|
||||
struct Curl_transfer_keeper *k = &conn->keep;
|
||||
int piece;
|
||||
*wrote = 0; /* nothing yet */
|
||||
|
||||
@@ -180,8 +181,9 @@ CHUNKcode Curl_httpchunk_read(struct connectdata *conn,
|
||||
switch (conn->keep.content_encoding) {
|
||||
case IDENTITY:
|
||||
#endif
|
||||
result = Curl_client_write(conn->data, CLIENTWRITE_BODY, datap,
|
||||
piece);
|
||||
if(!k->ignorebody)
|
||||
result = Curl_client_write(conn->data, CLIENTWRITE_BODY, datap,
|
||||
piece);
|
||||
#ifdef HAVE_LIBZ
|
||||
break;
|
||||
|
||||
|
@@ -46,6 +46,7 @@
|
||||
#include "base64.h"
|
||||
#include "http_ntlm.h"
|
||||
#include "url.h"
|
||||
#include "http.h" /* for Curl_http_auth_stage() */
|
||||
|
||||
#define _MPRINTF_REPLACE /* use our functions only */
|
||||
#include <curl/mprintf.h>
|
||||
@@ -551,7 +552,10 @@ CURLcode Curl_output_ntlm(struct connectdata *conn,
|
||||
return CURLE_OUT_OF_MEMORY; /* FIX TODO */
|
||||
|
||||
ntlm->state = NTLMSTATE_TYPE3; /* we sent a type-3 */
|
||||
|
||||
|
||||
/* Switch to web authentication after proxy authentication is done */
|
||||
if (proxy)
|
||||
Curl_http_auth_stage(conn->data, 401);
|
||||
}
|
||||
break;
|
||||
|
||||
|
10
lib/llist.c
10
lib/llist.c
@@ -159,10 +159,10 @@ Curl_llist_count(curl_llist *list)
|
||||
void
|
||||
Curl_llist_destroy(curl_llist *list, void *user)
|
||||
{
|
||||
while (list->size > 0) {
|
||||
Curl_llist_remove(list, CURL_LLIST_TAIL(list), user);
|
||||
}
|
||||
if(list) {
|
||||
while (list->size > 0)
|
||||
Curl_llist_remove(list, CURL_LLIST_TAIL(list), user);
|
||||
|
||||
free(list);
|
||||
list = NULL;
|
||||
free(list);
|
||||
}
|
||||
}
|
||||
|
@@ -62,7 +62,10 @@ struct memdebug {
|
||||
* Don't use these with multithreaded test programs!
|
||||
*/
|
||||
|
||||
FILE *logfile;
|
||||
#define logfile curl_debuglogfile
|
||||
FILE *curl_debuglogfile;
|
||||
static bool memlimit; /* enable memory limit */
|
||||
static long memsize; /* set number of mallocs allowed */
|
||||
|
||||
/* this sets the log file name */
|
||||
void curl_memdebug(const char *logname)
|
||||
@@ -73,12 +76,47 @@ void curl_memdebug(const char *logname)
|
||||
logfile = stderr;
|
||||
}
|
||||
|
||||
/* This function sets the number of malloc() calls that should return
|
||||
successfully! */
|
||||
void curl_memlimit(long limit)
|
||||
{
|
||||
memlimit = TRUE;
|
||||
memsize = limit;
|
||||
}
|
||||
|
||||
/* returns TRUE if this isn't allowed! */
|
||||
static bool countcheck(const char *func, int line, const char *source)
|
||||
{
|
||||
/* if source is NULL, then the call is made internally and this check
|
||||
should not be made */
|
||||
if(memlimit && source) {
|
||||
if(!memsize) {
|
||||
if(logfile && source)
|
||||
fprintf(logfile, "LIMIT %s:%d %s reached memlimit\n",
|
||||
source, line, func);
|
||||
return TRUE; /* RETURN ERROR! */
|
||||
}
|
||||
else
|
||||
memsize--; /* countdown */
|
||||
|
||||
/* log the countdown */
|
||||
if(logfile && source)
|
||||
fprintf(logfile, "LIMIT %s:%d %ld ALLOCS left\n",
|
||||
source, line, memsize);
|
||||
|
||||
}
|
||||
|
||||
return FALSE; /* allow this */
|
||||
}
|
||||
|
||||
void *curl_domalloc(size_t wantedsize, int line, const char *source)
|
||||
{
|
||||
struct memdebug *mem;
|
||||
size_t size;
|
||||
|
||||
if(countcheck("malloc", line, source))
|
||||
return NULL;
|
||||
|
||||
/* alloc at least 64 bytes */
|
||||
size = sizeof(struct memdebug)+wantedsize;
|
||||
|
||||
@@ -106,6 +144,9 @@ char *curl_dostrdup(const char *str, int line, const char *source)
|
||||
exit(2);
|
||||
}
|
||||
|
||||
if(countcheck("strdup", line, source))
|
||||
return NULL;
|
||||
|
||||
len=strlen(str)+1;
|
||||
|
||||
mem=curl_domalloc(len, 0, NULL); /* NULL prevents logging */
|
||||
@@ -125,6 +166,9 @@ void *curl_dorealloc(void *ptr, size_t wantedsize,
|
||||
|
||||
size_t size = sizeof(struct memdebug)+wantedsize;
|
||||
|
||||
if(countcheck("realloc", line, source))
|
||||
return NULL;
|
||||
|
||||
mem = (struct memdebug *)((char *)ptr - offsetof(struct memdebug, mem));
|
||||
|
||||
mem=(struct memdebug *)(realloc)(mem, size);
|
||||
|
@@ -39,6 +39,8 @@
|
||||
#include <memory.h>
|
||||
#endif
|
||||
|
||||
#define logfile curl_debuglogfile
|
||||
|
||||
extern FILE *logfile;
|
||||
|
||||
/* memory functions */
|
||||
@@ -47,6 +49,7 @@ void *curl_dorealloc(void *ptr, size_t size, int line, const char *source);
|
||||
void curl_dofree(void *ptr, int line, const char *source);
|
||||
char *curl_dostrdup(const char *str, int line, const char *source);
|
||||
void curl_memdebug(const char *logname);
|
||||
void curl_memlimit(long limit);
|
||||
|
||||
/* file descriptor manipulators */
|
||||
int curl_socket(int domain, int type, int protocol, int, const char *);
|
||||
|
@@ -171,6 +171,7 @@ static BOOL dprintf_IsQualifierNoDollar(char c)
|
||||
case '0': case '1': case '2': case '3': case '4':
|
||||
case '5': case '6': case '7': case '8': case '9':
|
||||
case 'h': case 'l': case 'L': case 'Z': case 'q':
|
||||
case '*':
|
||||
return TRUE;
|
||||
default:
|
||||
return FALSE;
|
||||
|
51
lib/multi.c
51
lib/multi.c
@@ -56,7 +56,8 @@ struct Curl_message {
|
||||
|
||||
typedef enum {
|
||||
CURLM_STATE_INIT,
|
||||
CURLM_STATE_CONNECT, /* connect has been sent off */
|
||||
CURLM_STATE_CONNECT, /* resolve/connect has been sent off */
|
||||
CURLM_STATE_WAITRESOLVE, /* we're awaiting the resolve to finalize */
|
||||
CURLM_STATE_WAITCONNECT, /* we're awaiting the connect to finalize */
|
||||
CURLM_STATE_DO, /* send off the request (part 1) */
|
||||
CURLM_STATE_DO_MORE, /* send off the request (part 2) */
|
||||
@@ -239,6 +240,14 @@ CURLMcode curl_multi_fdset(CURLM *multi_handle,
|
||||
switch(easy->state) {
|
||||
default:
|
||||
break;
|
||||
case CURLM_STATE_WAITRESOLVE:
|
||||
/* waiting for a resolve to complete */
|
||||
Curl_multi_ares_fdset(easy->easy_conn, read_fd_set, write_fd_set,
|
||||
&this_max_fd);
|
||||
if(this_max_fd > *max_fd)
|
||||
*max_fd = this_max_fd;
|
||||
break;
|
||||
|
||||
case CURLM_STATE_WAITCONNECT:
|
||||
case CURLM_STATE_DO_MORE:
|
||||
{
|
||||
@@ -293,6 +302,7 @@ CURLMcode curl_multi_perform(CURLM *multi_handle, int *running_handles)
|
||||
CURLMcode result=CURLM_OK;
|
||||
struct Curl_message *msg = NULL;
|
||||
bool connected;
|
||||
bool async;
|
||||
|
||||
*running_handles = 0; /* bump this once for every living handle */
|
||||
|
||||
@@ -320,6 +330,7 @@ CURLMcode curl_multi_perform(CURLM *multi_handle, int *running_handles)
|
||||
easy->easy_handle->state.used_interface = Curl_if_multi;
|
||||
}
|
||||
break;
|
||||
|
||||
case CURLM_STATE_CONNECT:
|
||||
if (Curl_global_host_cache_use(easy->easy_handle)) {
|
||||
easy->easy_handle->hostcache = Curl_global_host_cache_get();
|
||||
@@ -333,16 +344,46 @@ CURLMcode curl_multi_perform(CURLM *multi_handle, int *running_handles)
|
||||
|
||||
/* Connect. We get a connection identifier filled in. */
|
||||
Curl_pgrsTime(easy->easy_handle, TIMER_STARTSINGLE);
|
||||
easy->result = Curl_connect(easy->easy_handle, &easy->easy_conn);
|
||||
easy->result = Curl_connect(easy->easy_handle, &easy->easy_conn, &async);
|
||||
|
||||
/* after the connect has been sent off, go WAITCONNECT */
|
||||
if(CURLE_OK == easy->result) {
|
||||
easy->state = CURLM_STATE_WAITCONNECT;
|
||||
result = CURLM_CALL_MULTI_PERFORM;
|
||||
if(async)
|
||||
/* We're now waiting for an asynchronous name lookup */
|
||||
easy->state = CURLM_STATE_WAITRESOLVE;
|
||||
else {
|
||||
/* after the connect has been sent off, go WAITCONNECT */
|
||||
easy->state = CURLM_STATE_WAITCONNECT;
|
||||
result = CURLM_CALL_MULTI_PERFORM;
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case CURLM_STATE_WAITRESOLVE:
|
||||
/* awaiting an asynch name resolve to complete */
|
||||
{
|
||||
bool done;
|
||||
|
||||
/* check if we have the name resolved by now */
|
||||
easy->result = Curl_is_resolved(easy->easy_conn, &done);
|
||||
|
||||
if(done) {
|
||||
/* Perform the next step in the connection phase, and then move on
|
||||
to the WAITCONNECT state */
|
||||
easy->result = Curl_async_resolved(easy->easy_conn);
|
||||
|
||||
easy->state = CURLM_STATE_WAITCONNECT;
|
||||
}
|
||||
|
||||
if(CURLE_OK != easy->result) {
|
||||
/* failure detected */
|
||||
easy->easy_conn = NULL; /* no more connection */
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case CURLM_STATE_WAITCONNECT:
|
||||
/* awaiting a completion of an asynch connect */
|
||||
{
|
||||
bool connected;
|
||||
easy->result = Curl_is_connected(easy->easy_conn,
|
||||
|
@@ -132,8 +132,8 @@ void curl_slist_free_all(struct curl_slist *list)
|
||||
|
||||
void Curl_infof(struct SessionHandle *data, const char *fmt, ...)
|
||||
{
|
||||
va_list ap;
|
||||
if(data->set.verbose) {
|
||||
if(data && data->set.verbose) {
|
||||
va_list ap;
|
||||
char print_buffer[1024 + 1];
|
||||
va_start(ap, fmt);
|
||||
vsnprintf(print_buffer, 1024, fmt, ap);
|
||||
|
17
lib/setup.h
17
lib/setup.h
@@ -185,7 +185,7 @@ int fileno( FILE *stream);
|
||||
* Information regarding a single IP witin a Curl_addrinfo MUST be stored in
|
||||
* a Curl_ipconnect struct.
|
||||
*/
|
||||
#ifdef ENABLE_IPV6
|
||||
#if defined(ENABLE_IPV6) && !defined(USE_ARES)
|
||||
typedef struct addrinfo Curl_addrinfo;
|
||||
typedef struct addrinfo Curl_ipconnect;
|
||||
#else
|
||||
@@ -193,6 +193,21 @@ typedef struct hostent Curl_addrinfo;
|
||||
typedef struct in_addr Curl_ipconnect;
|
||||
#endif
|
||||
|
||||
#if 0
|
||||
#if (SIZEOF_OFF_T > 4)
|
||||
/* off_t is bigger than 4 bytes, and that makes it our prefered variable
|
||||
type for filesizes */
|
||||
typedef off_t filesize_t;
|
||||
#else
|
||||
#ifdef HAVE_LONGLONG
|
||||
/* we have long long, use this for filesizes internally */
|
||||
typedef long long filesize_t;
|
||||
#else
|
||||
/* small off_t and no long long, no support for large files :-( */
|
||||
typedef long filesize_t;
|
||||
#endif /* didn't have long long */
|
||||
#endif /* sizeof wasn't bigger than 4 */
|
||||
|
||||
#endif /* 0 */
|
||||
|
||||
#endif /* __CONFIG_H */
|
||||
|
37
lib/share.c
37
lib/share.c
@@ -39,8 +39,10 @@ curl_share_init(void)
|
||||
{
|
||||
struct Curl_share *share =
|
||||
(struct Curl_share *)malloc(sizeof(struct Curl_share));
|
||||
if (share)
|
||||
if (share) {
|
||||
memset (share, 0, sizeof(struct Curl_share));
|
||||
share->specifier |= (1<<CURL_LOCK_DATA_SHARE);
|
||||
}
|
||||
|
||||
return share;
|
||||
}
|
||||
@@ -76,6 +78,9 @@ curl_share_setopt(CURLSH *sh, CURLSHoption option, ...)
|
||||
break;
|
||||
|
||||
case CURL_LOCK_DATA_COOKIE:
|
||||
if (!share->cookies) {
|
||||
share->cookies = Curl_cookie_init(NULL, NULL, NULL, TRUE );
|
||||
}
|
||||
break;
|
||||
|
||||
case CURL_LOCK_DATA_SSL_SESSION:
|
||||
@@ -103,6 +108,10 @@ curl_share_setopt(CURLSH *sh, CURLSHoption option, ...)
|
||||
break;
|
||||
|
||||
case CURL_LOCK_DATA_COOKIE:
|
||||
if (share->cookies) {
|
||||
Curl_cookie_cleanup(share->cookies);
|
||||
share->cookies = NULL;
|
||||
}
|
||||
break;
|
||||
|
||||
case CURL_LOCK_DATA_SSL_SESSION:
|
||||
@@ -138,12 +147,29 @@ curl_share_setopt(CURLSH *sh, CURLSHoption option, ...)
|
||||
return CURLSHE_OK;
|
||||
}
|
||||
|
||||
CURLSHcode curl_share_cleanup(CURLSH *sh)
|
||||
CURLSHcode
|
||||
curl_share_cleanup(CURLSH *sh)
|
||||
{
|
||||
struct Curl_share *share = (struct Curl_share *)sh;
|
||||
if (share->dirty)
|
||||
|
||||
if (share == NULL)
|
||||
return CURLSHE_INVALID;
|
||||
|
||||
share->lockfunc(NULL, CURL_LOCK_DATA_SHARE, CURL_LOCK_ACCESS_SINGLE,
|
||||
share->clientdata);
|
||||
|
||||
if (share->dirty) {
|
||||
share->unlockfunc(NULL, CURL_LOCK_DATA_SHARE, share->clientdata);
|
||||
return CURLSHE_IN_USE;
|
||||
}
|
||||
|
||||
if(share->hostcache)
|
||||
Curl_hash_destroy(share->hostcache);
|
||||
|
||||
if(share->cookies)
|
||||
Curl_cookie_cleanup(share->cookies);
|
||||
|
||||
share->unlockfunc(NULL, CURL_LOCK_DATA_SHARE, share->clientdata);
|
||||
free (share);
|
||||
|
||||
return CURLSHE_OK;
|
||||
@@ -151,7 +177,8 @@ CURLSHcode curl_share_cleanup(CURLSH *sh)
|
||||
|
||||
|
||||
CURLSHcode
|
||||
Curl_share_lock(struct SessionHandle *data, curl_lock_data type, curl_lock_access access)
|
||||
Curl_share_lock(struct SessionHandle *data, curl_lock_data type,
|
||||
curl_lock_access access)
|
||||
{
|
||||
struct Curl_share *share = data->share;
|
||||
|
||||
@@ -159,7 +186,7 @@ Curl_share_lock(struct SessionHandle *data, curl_lock_data type, curl_lock_acces
|
||||
return CURLSHE_INVALID;
|
||||
|
||||
if(share->specifier & (1<<type)) {
|
||||
share->lockfunc (data, type, access, share->clientdata);
|
||||
share->lockfunc(data, type, access, share->clientdata);
|
||||
}
|
||||
/* else if we don't share this, pretend successful lock */
|
||||
|
||||
|
@@ -26,6 +26,7 @@
|
||||
|
||||
#include "setup.h"
|
||||
#include <curl/curl.h>
|
||||
#include "cookie.h"
|
||||
|
||||
/* this struct is libcurl-private, don't export details */
|
||||
struct Curl_share {
|
||||
@@ -37,6 +38,7 @@ struct Curl_share {
|
||||
void *clientdata;
|
||||
|
||||
curl_hash *hostcache;
|
||||
struct CookieInfo *cookies;
|
||||
};
|
||||
|
||||
CURLSHcode Curl_share_lock (
|
||||
|
@@ -97,6 +97,7 @@
|
||||
#include "http_digest.h"
|
||||
#include "http_ntlm.h"
|
||||
#include "http_negotiate.h"
|
||||
#include "share.h"
|
||||
|
||||
#define _MPRINTF_REPLACE /* use our functions only */
|
||||
#include <curl/mprintf.h>
|
||||
@@ -605,8 +606,7 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
len = end-start+1;
|
||||
|
||||
/* allocate memory of a cloned copy */
|
||||
if(data->info.contenttype)
|
||||
free(data->info.contenttype);
|
||||
Curl_safefree(data->info.contenttype);
|
||||
|
||||
data->info.contenttype = malloc(len + 1);
|
||||
if (NULL == data->info.contenttype)
|
||||
@@ -707,12 +707,16 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
}
|
||||
else if(data->cookies &&
|
||||
checkprefix("Set-Cookie:", k->p)) {
|
||||
Curl_cookie_add(data->cookies, TRUE, k->p+11,
|
||||
Curl_share_lock(data, CURL_LOCK_DATA_COOKIE,
|
||||
CURL_LOCK_ACCESS_SINGLE);
|
||||
Curl_cookie_add(data,
|
||||
data->cookies, TRUE, k->p+11,
|
||||
/* If there is a custom-set Host: name, use it
|
||||
here, or else use real peer host name. */
|
||||
conn->allocptr.cookiehost?
|
||||
conn->allocptr.cookiehost:conn->name,
|
||||
conn->ppath);
|
||||
Curl_share_unlock(data, CURL_LOCK_DATA_COOKIE);
|
||||
}
|
||||
else if(checkprefix("Last-Modified:", k->p) &&
|
||||
(data->set.timecondition || data->set.get_filetime) ) {
|
||||
@@ -722,12 +726,24 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
if(data->set.get_filetime)
|
||||
data->info.filetime = k->timeofdoc;
|
||||
}
|
||||
else if(checkprefix("WWW-Authenticate:", k->p) &&
|
||||
(401 == k->httpcode)) {
|
||||
else if((checkprefix("WWW-Authenticate:", k->p) &&
|
||||
(401 == k->httpcode)) ||
|
||||
(checkprefix("Proxy-authenticate:", k->p) &&
|
||||
(407 == k->httpcode))) {
|
||||
/*
|
||||
* This page requires authentication
|
||||
*/
|
||||
char *start = k->p+strlen("WWW-Authenticate:");
|
||||
char *start = (k->httpcode == 407) ?
|
||||
k->p+strlen("Proxy-authenticate:"):
|
||||
k->p+strlen("WWW-Authenticate:");
|
||||
/*
|
||||
* Switch from proxy to web authentication and back if needed
|
||||
*/
|
||||
if (k->httpcode == 407 && data->state.authstage != 407)
|
||||
Curl_http_auth_stage(data, 407);
|
||||
|
||||
else if (k->httpcode == 401 && data->state.authstage != 401)
|
||||
Curl_http_auth_stage(data, 401);
|
||||
|
||||
/* pass all white spaces */
|
||||
while(*start && isspace((int)*start))
|
||||
@@ -753,7 +769,7 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
if(data->state.authwant == CURLAUTH_NTLM) {
|
||||
/* NTLM authentication is activated */
|
||||
CURLntlm ntlm =
|
||||
Curl_input_ntlm(conn, FALSE, start);
|
||||
Curl_input_ntlm(conn, k->httpcode == 407, start);
|
||||
|
||||
if(CURLNTLM_BAD != ntlm)
|
||||
conn->newurl = strdup(data->change.url); /* clone string */
|
||||
@@ -1486,6 +1502,7 @@ Transfer(struct connectdata *conn)
|
||||
|
||||
CURLcode Curl_pretransfer(struct SessionHandle *data)
|
||||
{
|
||||
CURLcode res;
|
||||
if(!data->change.url)
|
||||
/* we can't do anything wihout URL */
|
||||
return CURLE_URL_MALFORMAT;
|
||||
@@ -1494,7 +1511,11 @@ CURLcode Curl_pretransfer(struct SessionHandle *data)
|
||||
/* Init the SSL session ID cache here. We do it here since we want to
|
||||
do it after the *_setopt() calls (that could change the size) but
|
||||
before any transfer. */
|
||||
Curl_SSL_InitSessions(data, data->set.ssl.numsessions);
|
||||
res = Curl_SSL_InitSessions(data, data->set.ssl.numsessions);
|
||||
if(res)
|
||||
return res;
|
||||
#else
|
||||
(void)res;
|
||||
#endif
|
||||
|
||||
data->set.followlocation=0; /* reset the location-follow counter */
|
||||
@@ -1502,19 +1523,22 @@ CURLcode Curl_pretransfer(struct SessionHandle *data)
|
||||
data->state.errorbuf = FALSE; /* no error has occurred */
|
||||
|
||||
/* set preferred authentication, default to basic */
|
||||
data->state.authwant = data->set.httpauth?data->set.httpauth:CURLAUTH_BASIC;
|
||||
data->state.authavail = CURLAUTH_NONE; /* nothing so far */
|
||||
|
||||
data->state.authstage = 0; /* initialize authentication later */
|
||||
|
||||
/* If there was a list of cookie files to read and we haven't done it before,
|
||||
do it now! */
|
||||
if(data->change.cookielist) {
|
||||
struct curl_slist *list = data->change.cookielist;
|
||||
Curl_share_lock(data, CURL_LOCK_DATA_COOKIE, CURL_LOCK_ACCESS_SINGLE);
|
||||
while(list) {
|
||||
data->cookies = Curl_cookie_init(list->data,
|
||||
data->cookies = Curl_cookie_init(data,
|
||||
list->data,
|
||||
data->cookies,
|
||||
data->set.cookiesession);
|
||||
list = list->next;
|
||||
}
|
||||
Curl_share_unlock(data, CURL_LOCK_DATA_COOKIE);
|
||||
curl_slist_free_all(data->change.cookielist); /* clean up list */
|
||||
data->change.cookielist = NULL; /* don't do this again! */
|
||||
}
|
||||
@@ -1899,10 +1923,22 @@ CURLcode Curl_perform(struct SessionHandle *data)
|
||||
do {
|
||||
int urlchanged = FALSE;
|
||||
do {
|
||||
bool async;
|
||||
Curl_pgrsTime(data, TIMER_STARTSINGLE);
|
||||
data->change.url_changed = FALSE;
|
||||
res = Curl_connect(data, &conn);
|
||||
res = Curl_connect(data, &conn, &async);
|
||||
|
||||
if((CURLE_OK == res) && async) {
|
||||
/* Now, if async is TRUE here, we need to wait for the name
|
||||
to resolve */
|
||||
res = Curl_wait_for_resolv(conn, NULL);
|
||||
if(CURLE_OK == res)
|
||||
/* Resolved, continue with the connection */
|
||||
res = Curl_async_resolved(conn);
|
||||
}
|
||||
if(res)
|
||||
break;
|
||||
|
||||
/* If a callback (or something) has altered the URL we should use within
|
||||
the Curl_connect(), we detect it here and act as if we are redirected
|
||||
to the new URL */
|
||||
|
267
lib/url.c
267
lib/url.c
@@ -147,7 +147,11 @@ static unsigned int ConnectionStore(struct SessionHandle *data,
|
||||
struct connectdata *conn);
|
||||
static bool safe_strequal(char* str1, char* str2);
|
||||
|
||||
#if !defined(WIN32)||defined(__CYGWIN32__)
|
||||
#ifndef USE_ARES
|
||||
/* not for Win32, unless it is cygwin
|
||||
not for ares builds */
|
||||
#if !defined(WIN32) || defined(__CYGWIN32__)
|
||||
|
||||
#ifndef RETSIGTYPE
|
||||
#define RETSIGTYPE void
|
||||
#endif
|
||||
@@ -165,6 +169,7 @@ RETSIGTYPE alarmfunc(int signal)
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
#endif /* USE_ARES */
|
||||
|
||||
void Curl_safefree(void *ptr)
|
||||
{
|
||||
@@ -191,10 +196,6 @@ CURLcode Curl_close(struct SessionHandle *data)
|
||||
Curl_SSL_Close_All(data);
|
||||
#endif
|
||||
|
||||
/* No longer a dirty share, if it exists */
|
||||
if (data->share)
|
||||
data->share->dirty--;
|
||||
|
||||
if(data->change.cookielist) /* clean up list if any */
|
||||
curl_slist_free_all(data->change.cookielist);
|
||||
|
||||
@@ -213,13 +214,17 @@ CURLcode Curl_close(struct SessionHandle *data)
|
||||
Curl_safefree(data->state.headerbuff);
|
||||
|
||||
#ifndef CURL_DISABLE_HTTP
|
||||
Curl_share_lock(data, CURL_LOCK_DATA_COOKIE, CURL_LOCK_ACCESS_SINGLE);
|
||||
if(data->set.cookiejar) {
|
||||
/* we have a "destination" for all the cookies to get dumped to */
|
||||
if(Curl_cookie_output(data->cookies, data->set.cookiejar))
|
||||
infof(data, "WARNING: failed to save cookies in given jar\n");
|
||||
}
|
||||
|
||||
Curl_cookie_cleanup(data->cookies);
|
||||
if( !data->share || (data->cookies != data->share->cookies) ) {
|
||||
Curl_cookie_cleanup(data->cookies);
|
||||
}
|
||||
Curl_share_unlock(data, CURL_LOCK_DATA_COOKIE);
|
||||
#endif
|
||||
|
||||
/* free the connection cache */
|
||||
@@ -229,6 +234,10 @@ CURLcode Curl_close(struct SessionHandle *data)
|
||||
|
||||
Curl_digest_cleanup(data);
|
||||
|
||||
/* No longer a dirty share, if it exists */
|
||||
if (data->share)
|
||||
data->share->dirty--;
|
||||
|
||||
free(data);
|
||||
return CURLE_OK;
|
||||
}
|
||||
@@ -303,12 +312,16 @@ CURLcode Curl_open(struct SessionHandle **curl)
|
||||
|
||||
data->set.proxytype = CURLPROXY_HTTP; /* defaults to HTTP proxy */
|
||||
|
||||
data->set.httpauth = CURLAUTH_BASIC; /* defaults to basic authentication */
|
||||
data->set.proxyauth = CURLAUTH_BASIC; /* defaults to basic authentication */
|
||||
|
||||
/* create an array with connection data struct pointers */
|
||||
data->state.numconnects = 5; /* hard-coded right now */
|
||||
data->state.connects = (struct connectdata **)
|
||||
malloc(sizeof(struct connectdata *) * data->state.numconnects);
|
||||
|
||||
if(!data->state.connects) {
|
||||
free(data->state.headerbuff);
|
||||
free(data);
|
||||
return CURLE_OUT_OF_MEMORY;
|
||||
}
|
||||
@@ -480,6 +493,13 @@ CURLcode Curl_setopt(struct SessionHandle *data, CURLoption option, ...)
|
||||
*/
|
||||
data->set.get_filetime = va_arg(param, long)?TRUE:FALSE;
|
||||
break;
|
||||
case CURLOPT_FTP_CREATE_MISSING_DIRS:
|
||||
/*
|
||||
* An FTP option that modifies an upload to create missing directories on
|
||||
* the server.
|
||||
*/
|
||||
data->set.ftp_create_missing_dirs = va_arg( param , long )?TRUE:FALSE;
|
||||
break;
|
||||
case CURLOPT_FTPLISTONLY:
|
||||
/*
|
||||
* An FTP option that changes the command to one that asks for a list
|
||||
@@ -601,7 +621,7 @@ CURLcode Curl_setopt(struct SessionHandle *data, CURLoption option, ...)
|
||||
* Activate the cookie parser. This may or may not already
|
||||
* have been made.
|
||||
*/
|
||||
data->cookies = Curl_cookie_init(NULL, data->cookies,
|
||||
data->cookies = Curl_cookie_init(data, NULL, data->cookies,
|
||||
data->set.cookiesession);
|
||||
break;
|
||||
#endif
|
||||
@@ -862,6 +882,26 @@ CURLcode Curl_setopt(struct SessionHandle *data, CURLoption option, ...)
|
||||
}
|
||||
break;
|
||||
|
||||
case CURLOPT_PROXYAUTH:
|
||||
/*
|
||||
* Set HTTP Authentication type BITMASK.
|
||||
*/
|
||||
{
|
||||
long auth = va_arg(param, long);
|
||||
/* switch off bits we can't support */
|
||||
#ifndef USE_SSLEAY
|
||||
auth &= ~CURLAUTH_NTLM; /* no NTLM without SSL */
|
||||
#endif
|
||||
#ifndef GSSAPI
|
||||
auth &= ~CURLAUTH_GSSNEGOTIATE; /* no GSS-Negotiate without GSSAPI */
|
||||
#endif
|
||||
if(!auth)
|
||||
return CURLE_FAILED_INIT; /* no supported types left! */
|
||||
|
||||
data->set.proxyauth = auth;
|
||||
}
|
||||
break;
|
||||
|
||||
case CURLOPT_USERPWD:
|
||||
/*
|
||||
* user:password to use in the operation
|
||||
@@ -1148,34 +1188,56 @@ CURLcode Curl_setopt(struct SessionHandle *data, CURLoption option, ...)
|
||||
{
|
||||
struct Curl_share *set;
|
||||
set = va_arg(param, struct Curl_share *);
|
||||
if(data->share)
|
||||
{
|
||||
|
||||
/* disconnect from old share, if any */
|
||||
if(data->share) {
|
||||
Curl_share_lock(data, CURL_LOCK_DATA_SHARE, CURL_LOCK_ACCESS_SINGLE);
|
||||
|
||||
/* checking the dns cache stuff */
|
||||
if(data->share->hostcache == data->hostcache)
|
||||
{
|
||||
data->hostcache = NULL;
|
||||
}
|
||||
|
||||
if(data->share->cookies == data->cookies)
|
||||
data->cookies = NULL;
|
||||
|
||||
data->share->dirty--;
|
||||
|
||||
Curl_share_unlock(data, CURL_LOCK_DATA_SHARE);
|
||||
data->share = NULL;
|
||||
}
|
||||
|
||||
/* use new share if it set */
|
||||
data->share = set;
|
||||
if(data->share) {
|
||||
|
||||
Curl_share_lock(data, CURL_LOCK_DATA_SHARE, CURL_LOCK_ACCESS_SINGLE);
|
||||
Curl_share_lock(data, CURL_LOCK_DATA_SHARE, CURL_LOCK_ACCESS_SINGLE);
|
||||
|
||||
data->share->dirty++;
|
||||
data->share->dirty++;
|
||||
|
||||
if( data->hostcache )
|
||||
{
|
||||
Curl_hash_destroy(data->hostcache);
|
||||
data->hostcache = data->share->hostcache;
|
||||
if(data->share->hostcache) {
|
||||
/* use shared host cache, first free own one if any */
|
||||
if(data->hostcache)
|
||||
Curl_hash_destroy(data->hostcache);
|
||||
|
||||
data->hostcache = data->share->hostcache;
|
||||
}
|
||||
|
||||
if(data->share->cookies) {
|
||||
/* use shared cookie list, first free own one if any */
|
||||
if (data->cookies)
|
||||
Curl_cookie_cleanup(data->cookies);
|
||||
data->cookies = data->share->cookies;
|
||||
}
|
||||
|
||||
Curl_share_unlock(data, CURL_LOCK_DATA_SHARE);
|
||||
|
||||
}
|
||||
|
||||
Curl_share_unlock(data, CURL_LOCK_DATA_SHARE);
|
||||
/* check cookie list is set */
|
||||
if(!data->cookies)
|
||||
data->cookies = Curl_cookie_init(data, NULL, NULL, TRUE );
|
||||
|
||||
/* check for host cache not needed,
|
||||
* it will be done by curl_easy_perform */
|
||||
}
|
||||
break;
|
||||
|
||||
@@ -1260,7 +1322,11 @@ CURLcode Curl_disconnect(struct connectdata *conn)
|
||||
Curl_safefree(conn->allocptr.host);
|
||||
Curl_safefree(conn->allocptr.cookiehost);
|
||||
Curl_safefree(conn->proxyhost);
|
||||
|
||||
#ifdef USE_ARES
|
||||
/* possible left-overs from the async name resolve */
|
||||
Curl_safefree(conn->async.hostname);
|
||||
#endif
|
||||
|
||||
Curl_free_ssl_config(&conn->ssl_config);
|
||||
|
||||
free(conn); /* free all the connection oriented data */
|
||||
@@ -1606,7 +1672,15 @@ static int handleSock5Proxy(
|
||||
#ifndef ENABLE_IPV6
|
||||
struct Curl_dns_entry *dns;
|
||||
Curl_addrinfo *hp=NULL;
|
||||
dns = Curl_resolv(conn->data, conn->hostname, conn->remote_port);
|
||||
int rc = Curl_resolv(conn, conn->hostname, conn->remote_port, &dns);
|
||||
|
||||
if(rc == -1)
|
||||
return CURLE_COULDNT_RESOLVE_HOST;
|
||||
|
||||
if(rc == 1)
|
||||
/* this requires that we're in "wait for resolve" state */
|
||||
rc = Curl_wait_for_resolv(conn, &dns);
|
||||
|
||||
/*
|
||||
* We cannot use 'hostent' as a struct that Curl_resolv() returns. It
|
||||
* returns a Curl_addrinfo pointer that may not always look the same.
|
||||
@@ -1815,8 +1889,19 @@ CURLcode Curl_protocol_connect(struct connectdata *conn,
|
||||
return result; /* pass back status */
|
||||
}
|
||||
|
||||
/*
|
||||
* CreateConnection() sets up a new connectdata struct, or re-uses an already
|
||||
* existing one, and resolves host name.
|
||||
*
|
||||
* if this function returns CURLE_OK and *async is set to TRUE, the resolve
|
||||
* response will be coming asynchronously. If *async is FALSE, the name is
|
||||
* already resolved.
|
||||
*/
|
||||
|
||||
static CURLcode CreateConnection(struct SessionHandle *data,
|
||||
struct connectdata **in_connect)
|
||||
struct connectdata **in_connect,
|
||||
struct Curl_dns_entry **addr,
|
||||
bool *async)
|
||||
{
|
||||
char *tmp;
|
||||
CURLcode result=CURLE_OK;
|
||||
@@ -1833,7 +1918,7 @@ static CURLcode CreateConnection(struct SessionHandle *data,
|
||||
char passwd[MAX_CURL_PASSWORD_LENGTH];
|
||||
bool passwdgiven=FALSE; /* set TRUE if an application-provided password has
|
||||
been set */
|
||||
|
||||
int rc;
|
||||
|
||||
#ifdef HAVE_SIGACTION
|
||||
struct sigaction keep_sigact; /* store the old struct here */
|
||||
@@ -1844,6 +1929,9 @@ static CURLcode CreateConnection(struct SessionHandle *data,
|
||||
#endif
|
||||
#endif
|
||||
|
||||
*addr = NULL; /* nothing yet */
|
||||
*async = FALSE;
|
||||
|
||||
/*************************************************************
|
||||
* Check input data
|
||||
*************************************************************/
|
||||
@@ -2203,15 +2291,13 @@ static CURLcode CreateConnection(struct SessionHandle *data,
|
||||
"%" MAX_CURL_PASSWORD_LENGTH_TXT "[^@]",
|
||||
user, passwd))) {
|
||||
/* found user and password, rip them out */
|
||||
if(conn->proxyuser)
|
||||
free(conn->proxyuser);
|
||||
Curl_safefree(conn->proxyuser);
|
||||
conn->proxyuser = strdup(user);
|
||||
|
||||
if(!conn->proxyuser)
|
||||
return CURLE_OUT_OF_MEMORY;
|
||||
|
||||
if(conn->proxypasswd)
|
||||
free(conn->proxypasswd);
|
||||
Curl_safefree(conn->proxypasswd);
|
||||
conn->proxypasswd = strdup(passwd);
|
||||
|
||||
if(!conn->proxypasswd)
|
||||
@@ -2795,6 +2881,8 @@ static CURLcode CreateConnection(struct SessionHandle *data,
|
||||
|
||||
free(old_conn->user);
|
||||
free(old_conn->passwd);
|
||||
Curl_safefree(old_conn->proxyuser);
|
||||
Curl_safefree(old_conn->proxypasswd);
|
||||
|
||||
free(old_conn); /* we don't need this anymore */
|
||||
|
||||
@@ -2849,8 +2937,10 @@ static CURLcode CreateConnection(struct SessionHandle *data,
|
||||
/* else, no chunky upload */
|
||||
FALSE;
|
||||
|
||||
#ifndef USE_ARES
|
||||
/*************************************************************
|
||||
* Set timeout if that is being used
|
||||
* Set timeout if that is being used, and we're not using an asynchronous
|
||||
* name resolve.
|
||||
*************************************************************/
|
||||
if((data->set.timeout || data->set.connecttimeout) && !data->set.no_signal) {
|
||||
/*************************************************************
|
||||
@@ -2893,7 +2983,8 @@ static CURLcode CreateConnection(struct SessionHandle *data,
|
||||
has been done since then until now. */
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/*************************************************************
|
||||
* Resolve the name of the server or proxy
|
||||
*************************************************************/
|
||||
@@ -2909,9 +3000,11 @@ static CURLcode CreateConnection(struct SessionHandle *data,
|
||||
conn->port = conn->remote_port; /* it is the same port */
|
||||
|
||||
/* Resolve target host right on */
|
||||
hostaddr = Curl_resolv(data, conn->name, conn->port);
|
||||
rc = Curl_resolv(conn, conn->name, conn->port, &hostaddr);
|
||||
if(rc == 1)
|
||||
*async = TRUE;
|
||||
|
||||
if(!hostaddr) {
|
||||
else if(!hostaddr) {
|
||||
failf(data, "Couldn't resolve host '%s'", conn->name);
|
||||
result = CURLE_COULDNT_RESOLVE_HOST;
|
||||
/* don't return yet, we need to clean up the timeout first */
|
||||
@@ -2921,15 +3014,19 @@ static CURLcode CreateConnection(struct SessionHandle *data,
|
||||
/* This is a proxy that hasn't been resolved yet. */
|
||||
|
||||
/* resolve proxy */
|
||||
hostaddr = Curl_resolv(data, conn->proxyhost, conn->port);
|
||||
rc = Curl_resolv(conn, conn->proxyhost, conn->port, &hostaddr);
|
||||
|
||||
if(!hostaddr) {
|
||||
if(rc == 1)
|
||||
*async = TRUE;
|
||||
|
||||
else if(!hostaddr) {
|
||||
failf(data, "Couldn't resolve proxy '%s'", conn->proxyhost);
|
||||
result = CURLE_COULDNT_RESOLVE_PROXY;
|
||||
/* don't return yet, we need to clean up the timeout first */
|
||||
}
|
||||
}
|
||||
Curl_pgrsTime(data, TIMER_NAMELOOKUP);
|
||||
*addr = hostaddr;
|
||||
|
||||
#ifdef HAVE_ALARM
|
||||
if((data->set.timeout || data->set.connecttimeout) && !data->set.no_signal) {
|
||||
#ifdef HAVE_SIGACTION
|
||||
@@ -2969,25 +3066,44 @@ static CURLcode CreateConnection(struct SessionHandle *data,
|
||||
alarm(0); /* just shut it off */
|
||||
}
|
||||
#endif
|
||||
if(result)
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/* SetupConnection() should be called after the name resolve initiated in
|
||||
* CreateConnection() is all done.
|
||||
*/
|
||||
|
||||
static CURLcode SetupConnection(struct connectdata *conn,
|
||||
struct Curl_dns_entry *hostaddr)
|
||||
{
|
||||
struct SessionHandle *data = conn->data;
|
||||
CURLcode result=CURLE_OK;
|
||||
|
||||
Curl_pgrsTime(data, TIMER_NAMELOOKUP);
|
||||
|
||||
if(conn->protocol & PROT_FILE)
|
||||
/* There's nothing in this function to setup if we're only doing
|
||||
a file:// transfer */
|
||||
return result;
|
||||
|
||||
/*************************************************************
|
||||
* Proxy authentication
|
||||
*************************************************************/
|
||||
#if 0 /* This code is not needed anymore (moved to http.c) */
|
||||
if(conn->bits.proxy_user_passwd) {
|
||||
char *authorization;
|
||||
snprintf(data->state.buffer, BUFSIZE, "%s:%s",
|
||||
conn->proxyuser, conn->proxypasswd);
|
||||
if(Curl_base64_encode(data->state.buffer, strlen(data->state.buffer),
|
||||
&authorization) >= 0) {
|
||||
if(conn->allocptr.proxyuserpwd)
|
||||
free(conn->allocptr.proxyuserpwd);
|
||||
Curl_safefree(conn->allocptr.proxyuserpwd);
|
||||
conn->allocptr.proxyuserpwd =
|
||||
aprintf("Proxy-authorization: Basic %s\015\012", authorization);
|
||||
free(authorization);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/*************************************************************
|
||||
* Send user-agent to HTTP proxies even if the target protocol
|
||||
@@ -2996,16 +3112,14 @@ static CURLcode CreateConnection(struct SessionHandle *data,
|
||||
if((conn->protocol&PROT_HTTP) ||
|
||||
(data->change.proxy && *data->change.proxy)) {
|
||||
if(data->set.useragent) {
|
||||
if(conn->allocptr.uagent)
|
||||
free(conn->allocptr.uagent);
|
||||
Curl_safefree(conn->allocptr.uagent);
|
||||
conn->allocptr.uagent =
|
||||
aprintf("User-Agent: %s\015\012", data->set.useragent);
|
||||
}
|
||||
}
|
||||
|
||||
if(data->set.encoding) {
|
||||
if(conn->allocptr.accept_encoding)
|
||||
free(conn->allocptr.accept_encoding);
|
||||
Curl_safefree(conn->allocptr.accept_encoding);
|
||||
conn->allocptr.accept_encoding =
|
||||
aprintf("Accept-Encoding: %s\015\012", data->set.encoding);
|
||||
}
|
||||
@@ -3057,26 +3171,60 @@ static CURLcode CreateConnection(struct SessionHandle *data,
|
||||
}
|
||||
|
||||
CURLcode Curl_connect(struct SessionHandle *data,
|
||||
struct connectdata **in_connect)
|
||||
struct connectdata **in_connect,
|
||||
bool *asyncp)
|
||||
{
|
||||
CURLcode code;
|
||||
struct connectdata *conn;
|
||||
struct Curl_dns_entry *dns;
|
||||
|
||||
*asyncp = FALSE; /* assume synchronous resolves by default */
|
||||
|
||||
/* call the stuff that needs to be called */
|
||||
code = CreateConnection(data, in_connect);
|
||||
code = CreateConnection(data, in_connect, &dns, asyncp);
|
||||
|
||||
if(CURLE_OK == code) {
|
||||
/* no error */
|
||||
if(dns || !*asyncp)
|
||||
/* If an address is available it means that we already have the name
|
||||
resolved, OR it isn't async.
|
||||
If so => continue connecting from here */
|
||||
code = SetupConnection(*in_connect, dns);
|
||||
/* else
|
||||
response will be received and treated async wise */
|
||||
}
|
||||
|
||||
if(CURLE_OK != code) {
|
||||
/* We're not allowed to return failure with memory left allocated
|
||||
in the connectdata struct, free those here */
|
||||
conn = (struct connectdata *)*in_connect;
|
||||
if(conn) {
|
||||
Curl_disconnect(conn); /* close the connection */
|
||||
*in_connect = NULL; /* return a NULL */
|
||||
if(*in_connect) {
|
||||
Curl_disconnect(*in_connect); /* close the connection */
|
||||
*in_connect = NULL; /* return a NULL */
|
||||
}
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
/* Call this function after Curl_connect() has returned async=TRUE and
|
||||
then a successful name resolve has been received */
|
||||
CURLcode Curl_async_resolved(struct connectdata *conn)
|
||||
{
|
||||
#ifdef USE_ARES
|
||||
CURLcode code = SetupConnection(conn, conn->async.dns);
|
||||
|
||||
if(code)
|
||||
/* We're not allowed to return failure with memory left allocated
|
||||
in the connectdata struct, free those here */
|
||||
Curl_disconnect(conn); /* close the connection */
|
||||
|
||||
return code;
|
||||
#else
|
||||
(void)conn;
|
||||
return CURLE_OK;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
CURLcode Curl_done(struct connectdata *conn)
|
||||
{
|
||||
struct SessionHandle *data=conn->data;
|
||||
@@ -3153,11 +3301,28 @@ CURLcode Curl_do(struct connectdata **connp)
|
||||
conn->bits.close = TRUE; /* enforce close of this connetion */
|
||||
result = Curl_done(conn); /* we are so done with this */
|
||||
if(CURLE_OK == result) {
|
||||
bool async;
|
||||
/* Now, redo the connect and get a new connection */
|
||||
result = Curl_connect(data, connp);
|
||||
if(CURLE_OK == result)
|
||||
result = Curl_connect(data, connp, &async);
|
||||
if(CURLE_OK == result) {
|
||||
/* We have connected or sent away a name resolve query fine */
|
||||
|
||||
if(async) {
|
||||
/* Now, if async is TRUE here, we need to wait for the name
|
||||
to resolve */
|
||||
result = Curl_wait_for_resolv(conn, NULL);
|
||||
if(result)
|
||||
return result;
|
||||
|
||||
/* Resolved, continue with the connection */
|
||||
result = Curl_async_resolved(conn);
|
||||
if(result)
|
||||
return result;
|
||||
}
|
||||
|
||||
/* ... finally back to actually retry the DO phase */
|
||||
result = conn->curl_do(*connp);
|
||||
result = conn->curl_do(conn);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -30,7 +30,9 @@
|
||||
CURLcode Curl_open(struct SessionHandle **curl);
|
||||
CURLcode Curl_setopt(struct SessionHandle *data, CURLoption option, ...);
|
||||
CURLcode Curl_close(struct SessionHandle *data); /* opposite of curl_open() */
|
||||
CURLcode Curl_connect(struct SessionHandle *, struct connectdata **);
|
||||
CURLcode Curl_connect(struct SessionHandle *, struct connectdata **,
|
||||
bool *async);
|
||||
CURLcode Curl_async_resolved(struct connectdata *conn);
|
||||
CURLcode Curl_do(struct connectdata **);
|
||||
CURLcode Curl_do_more(struct connectdata *);
|
||||
CURLcode Curl_done(struct connectdata *);
|
||||
|
@@ -90,6 +90,10 @@
|
||||
#include <gssapi.h>
|
||||
#endif
|
||||
|
||||
#ifdef USE_ARES
|
||||
#include <ares.h>
|
||||
#endif
|
||||
|
||||
/* Download buffer size, keep it fairly big for speed reasons */
|
||||
#define BUFSIZE CURL_MAX_WRITE_SIZE
|
||||
|
||||
@@ -364,6 +368,16 @@ struct Curl_transfer_keeper {
|
||||
bool ignorebody; /* we read a response-body but we ignore it! */
|
||||
};
|
||||
|
||||
#ifdef USE_ARES
|
||||
struct Curl_async {
|
||||
char *hostname;
|
||||
int port;
|
||||
struct Curl_dns_entry *dns;
|
||||
bool done; /* set TRUE when the lookup is complete */
|
||||
int status; /* if done is TRUE, this is the status from the callback */
|
||||
};
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The connectdata struct contains all fields and variables that should be
|
||||
* unique for an entire connection.
|
||||
@@ -538,6 +552,11 @@ struct connectdata {
|
||||
because it authenticates connections, not
|
||||
single requests! */
|
||||
struct ntlmdata proxyntlm; /* NTLM data for proxy */
|
||||
|
||||
#ifdef USE_ARES
|
||||
/* data used for the asynch name resolve callback */
|
||||
struct Curl_async async;
|
||||
#endif
|
||||
};
|
||||
|
||||
/* The end of connectdata. */
|
||||
@@ -547,9 +566,12 @@ struct connectdata {
|
||||
*/
|
||||
struct PureInfo {
|
||||
int httpcode;
|
||||
int httpproxycode;
|
||||
int httpversion;
|
||||
time_t filetime; /* If requested, this is might get set. Set to -1 if
|
||||
the time was unretrievable */
|
||||
long filetime; /* If requested, this is might get set. Set to -1 if the time
|
||||
was unretrievable. We cannot have this of type time_t,
|
||||
since time_t is unsigned on several platforms such as
|
||||
OpenVMS. */
|
||||
long header_size; /* size of read header(s) in bytes */
|
||||
long request_size; /* the amount of bytes sent in the request(s) */
|
||||
|
||||
@@ -665,8 +687,17 @@ struct UrlState {
|
||||
struct negotiatedata negotiate;
|
||||
#endif
|
||||
|
||||
long authwant; /* inherited from what the user set with CURLOPT_HTTPAUTH */
|
||||
long authstage; /* 0 - authwant and authavail are still not initialized
|
||||
401 - web authentication is performed
|
||||
407 - proxy authentication is performed */
|
||||
long authwant; /* initially set to authentication methods requested by
|
||||
client (either with CURLOPT_HTTPAUTH or CURLOPT_PROXYAUTH
|
||||
depending on authstage) */
|
||||
long authavail; /* what the server reports */
|
||||
|
||||
#ifdef USE_ARES
|
||||
ares_channel areschannel; /* for name resolves */
|
||||
#endif
|
||||
};
|
||||
|
||||
|
||||
@@ -716,6 +747,7 @@ struct UserDefined {
|
||||
long use_port; /* which port to use (when not using default) */
|
||||
char *userpwd; /* <user:password>, if used */
|
||||
long httpauth; /* what kind of HTTP authentication to use (bitmask) */
|
||||
long proxyauth; /* what kind of proxy authentication to use (bitmask) */
|
||||
char *set_range; /* range, if used. See README for detailed specification
|
||||
on this syntax. */
|
||||
long followlocation; /* as in HTTP Location: */
|
||||
@@ -793,6 +825,7 @@ struct UserDefined {
|
||||
bool ftp_append;
|
||||
bool ftp_ascii;
|
||||
bool ftp_list_only;
|
||||
bool ftp_create_missing_dirs;
|
||||
bool ftp_use_port;
|
||||
bool hide_progress;
|
||||
bool http_fail_on_error;
|
||||
|
@@ -182,6 +182,9 @@ static curl_version_info_data version_info = {
|
||||
#endif
|
||||
#ifdef CURLDEBUG
|
||||
| CURL_VERSION_DEBUG
|
||||
#endif
|
||||
#ifdef USE_ARES
|
||||
| CURL_VERSION_ASYNCHDNS
|
||||
#endif
|
||||
,
|
||||
NULL, /* ssl_version */
|
||||
|
@@ -46,4 +46,9 @@ MKHELP=$(top_srcdir)/src/mkhelp.pl
|
||||
# This generates the hugehelp.c file
|
||||
hugehelp.c: $(README) $(MANPAGE) mkhelp.pl
|
||||
rm -f hugehelp.c
|
||||
$(NROFF) -man $(MANPAGE) | $(PERL) -s $(MKHELP) $(MKHELPOPT) $(README) > hugehelp.c
|
||||
$(NROFF) -man $(MANPAGE) | $(PERL) $(MKHELP) $(MKHELPOPT) $(README) > hugehelp.c
|
||||
|
||||
# for distribution, generate an uncompressed help file!
|
||||
dist-hook:
|
||||
chmod 0644 $(distdir)/hugehelp.c
|
||||
$(NROFF) -man $(MANPAGE) | $(PERL) $(MKHELP) $(README) > $(distdir)/hugehelp.c
|
||||
|
@@ -24,10 +24,14 @@ LINKR = link.exe /incremental:no /libpath:"../lib"
|
||||
CCD = cl.exe /MDd /Gm /ZI /Od /D "_DEBUG" /GZ
|
||||
LINKD = link.exe /incremental:yes /debug /libpath:"../lib"
|
||||
|
||||
CFLAGS = /I "../include" /nologo /W3 /GX /D "WIN32" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
|
||||
LFLAGS = /nologo /out:$(PROGRAM_NAME) /subsystem:console /machine:I386
|
||||
LINKLIBS = ws2_32.lib libcurl.lib winmm.lib
|
||||
LINKLIBS_DEBUG = ws2_32.lib libcurld.lib winmm.lib
|
||||
!IFNDEF ZLIB_PATH
|
||||
ZLIB_PATH = ../../zlib-1.1.4
|
||||
!ENDIF
|
||||
|
||||
CFLAGS = /I "../include" /I "$(ZLIB_PATH)" /nologo /W3 /GX /D "WIN32" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
|
||||
LFLAGS = /nologo /libpath:"$(ZLIB_PATH)" /out:$(PROGRAM_NAME) /subsystem:console /machine:I386
|
||||
LINKLIBS = ws2_32.lib libcurl.lib winmm.lib zlib.lib
|
||||
LINKLIBS_DEBUG = ws2_32.lib libcurld.lib winmm.lib zlib.lib
|
||||
|
||||
RELEASE_OBJS= \
|
||||
hugehelpr.obj \
|
||||
@@ -46,17 +50,16 @@ LINK_OBJS= \
|
||||
writeout.obj \
|
||||
urlglob.obj \
|
||||
main.obj
|
||||
|
||||
|
||||
######################
|
||||
# release-ssl
|
||||
|
||||
!IF "$(CFG)" == "release-ssl"
|
||||
!IFNDEF OPENSSL_PATH
|
||||
OPENSSL_PATH = ../../openssl-0.9.6
|
||||
OPENSSL_PATH = ../../openssl-0.9.7a
|
||||
!ENDIF
|
||||
LFLAGSSSL = /LIBPATH:"$(OPENSSL_PATH)/out32"
|
||||
SSLLIBS = libeay32.lib ssleay32.lib RSAglue.lib gdi32.lib
|
||||
SSLLIBS = libeay32.lib ssleay32.lib gdi32.lib
|
||||
LINKLIBS = $(LINKLIBS) $(SSLLIBS)
|
||||
LFLAGS = $(LFLAGS) $(LFLAGSSSL)
|
||||
!ENDIF
|
||||
@@ -100,6 +103,3 @@ clean:
|
||||
|
||||
distrib: clean
|
||||
-@erase $(PROGRAM_NAME)
|
||||
|
||||
hugehelp.c: hugehelp.c.cvs
|
||||
copy hugehelp.c.cvs hugehelp.c
|
||||
|
310
src/main.c
310
src/main.c
@@ -364,106 +364,112 @@ struct getout {
|
||||
|
||||
static void help(void)
|
||||
{
|
||||
printf(CURL_ID "%s\n"
|
||||
"Usage: curl [options...] <url>\n"
|
||||
"Options: (H) means HTTP/HTTPS only, (F) means FTP only\n"
|
||||
" -a/--append Append to target file when uploading (F)\n"
|
||||
" -A/--user-agent <string> User-Agent to send to server (H)\n"
|
||||
" --anyauth Tell curl to choose authentication method (H)\n"
|
||||
" -b/--cookie <name=string/file> Cookie string or file to read cookies from (H)\n"
|
||||
" --basic Enable HTTP Basic Authentication (H)\n"
|
||||
" -B/--use-ascii Use ASCII/text transfer\n",
|
||||
curl_version());
|
||||
puts(" -c/--cookie-jar <file> Write all cookies to this file after operation (H)\n"
|
||||
" -C/--continue-at <offset> Specify absolute resume offset\n"
|
||||
" -d/--data <data> HTTP POST data (H)\n"
|
||||
" --data-ascii <data> HTTP POST ASCII data (H)\n"
|
||||
" --data-binary <data> HTTP POST binary data (H)\n"
|
||||
" --negotiate Enable HTTP Negotiate Authentication (H - req GSS-lib)\n"
|
||||
" --digest Enable HTTP Digest Authentication (H)");
|
||||
puts(" --disable-eprt Prevents curl from using EPRT or LPRT (F)\n"
|
||||
" --disable-epsv Prevents curl from using EPSV (F)\n"
|
||||
" -D/--dump-header <file> Write the headers to this file\n"
|
||||
" --egd-file <file> EGD socket path for random data (SSL)\n"
|
||||
int i;
|
||||
const char *help[]={
|
||||
"Usage: curl [options...] <url>",
|
||||
"Options: (H) means HTTP/HTTPS only, (F) means FTP only",
|
||||
" -a/--append Append to target file when uploading (F)",
|
||||
" -A/--user-agent <string> User-Agent to send to server (H)",
|
||||
" --anyauth Tell curl to choose authentication method (H)",
|
||||
" -b/--cookie <name=string/file> Cookie string or file to read cookies from (H)",
|
||||
" --basic Enable HTTP Basic Authentication (H)",
|
||||
" -B/--use-ascii Use ASCII/text transfer",
|
||||
" -c/--cookie-jar <file> Write all cookies to this file after operation (H)",
|
||||
" -C/--continue-at <offset> Specify absolute resume offset",
|
||||
" -d/--data <data> HTTP POST data (H)",
|
||||
" --data-ascii <data> HTTP POST ASCII data (H)",
|
||||
" --data-binary <data> HTTP POST binary data (H)",
|
||||
" --negotiate Enable HTTP Negotiate Authentication (H - req GSS-lib)",
|
||||
" --digest Enable HTTP Digest Authentication (H)",
|
||||
" --disable-eprt Prevents curl from using EPRT or LPRT (F)",
|
||||
" --disable-epsv Prevents curl from using EPSV (F)",
|
||||
" -D/--dump-header <file> Write the headers to this file",
|
||||
" --egd-file <file> EGD socket path for random data (SSL)",
|
||||
#ifdef USE_ENVIRONMENT
|
||||
" --environment Write result codes to environment variables (RISC OS)\n"
|
||||
" --environment Write result codes to environment variables (RISC OS)",
|
||||
#endif
|
||||
" -e/--referer Referer page (H)");
|
||||
puts(" -E/--cert <cert[:passwd]> Specifies your certificate file and password (HTTPS)\n"
|
||||
" --cert-type <type> Specifies certificate file type (DER/PEM/ENG) (HTTPS)\n"
|
||||
" --key <key> Specifies private key file (HTTPS)\n"
|
||||
" --key-type <type> Specifies private key file type (DER/PEM/ENG) (HTTPS)\n"
|
||||
" --pass <pass> Specifies passphrase for the private key (HTTPS)");
|
||||
puts(" --engine <eng> Specifies the crypto engine to use (HTTPS)\n"
|
||||
" --cacert <file> CA certificate to verify peer against (SSL)\n"
|
||||
" --capath <directory> CA directory (made using c_rehash) to verify\n"
|
||||
" peer against (SSL)\n"
|
||||
" --ciphers <list> What SSL ciphers to use (SSL)\n"
|
||||
" --compressed Request a compressed response (using deflate or gzip).");
|
||||
puts(" --connect-timeout <seconds> Maximum time allowed for connection\n"
|
||||
" --create-dirs Create the necessary local directory hierarchy\n"
|
||||
" --crlf Convert LF to CRLF in upload. Useful for MVS (OS/390)\n"
|
||||
" -f/--fail Fail silently (no output at all) on errors (H)\n"
|
||||
" -F/--form <name=content> Specify HTTP POST data (H)\n"
|
||||
" -g/--globoff Disable URL sequences and ranges using {} and []\n"
|
||||
" -G/--get Send the -d data with a HTTP GET (H)");
|
||||
puts(" -h/--help This help text\n"
|
||||
" -H/--header <line> Custom header to pass to server. (H)\n"
|
||||
" -i/--include Include the HTTP-header in the output (H)\n"
|
||||
" -I/--head Fetch document info only (HTTP HEAD/FTP SIZE)");
|
||||
puts(" -j/--junk-session-cookies Ignore session cookies read from file (H)\n"
|
||||
" --interface <interface> Specify the interface to be used\n"
|
||||
" --krb4 <level> Enable krb4 with specified security level (F)\n"
|
||||
" -k/--insecure Allow curl to connect to SSL sites without certs (H)\n"
|
||||
" -K/--config Specify which config file to read\n"
|
||||
" -l/--list-only List only names of an FTP directory (F)\n"
|
||||
" --limit-rate <rate> Limit how fast transfers to allow");
|
||||
puts(" -L/--location Follow Location: hints (H)\n"
|
||||
" --location-trusted Same, and continue to send authentication when \n"
|
||||
" following locations, even when hostname changed\n"
|
||||
" -m/--max-time <seconds> Maximum time allowed for the transfer\n"
|
||||
" -M/--manual Display huge help text\n"
|
||||
" -n/--netrc Must read .netrc for user name and password\n"
|
||||
" --netrc-optional Use either .netrc or URL; overrides -n\n"
|
||||
" --ntlm Enable HTTP NTLM authentication (H)");
|
||||
puts(" -N/--no-buffer Disables the buffering of the output stream");
|
||||
puts(" -o/--output <file> Write output to <file> instead of stdout\n"
|
||||
" -O/--remote-name Write output to a file named as the remote file\n"
|
||||
" -p/--proxytunnel Perform non-HTTP services through a HTTP proxy\n"
|
||||
" -P/--ftpport <address> Use PORT with address instead of PASV when ftping (F)\n"
|
||||
" -q When used as the first parameter disables .curlrc\n"
|
||||
" -Q/--quote <cmd> Send QUOTE command to FTP before file transfer (F)");
|
||||
puts(" -r/--range <range> Retrieve a byte range from a HTTP/1.1 or FTP server\n"
|
||||
" -R/--remote-time Set the remote file's time on the local output\n"
|
||||
" -s/--silent Silent mode. Don't output anything\n"
|
||||
" -S/--show-error Show error. With -s, make curl show errors when they occur");
|
||||
puts(" --stderr <file> Where to redirect stderr. - means stdout.\n"
|
||||
" -t/--telnet-option <OPT=val> Set telnet option\n"
|
||||
" --trace <file> Dump a network/debug trace to the given file\n"
|
||||
" --trace-ascii <file> Like --trace but without the hex output\n"
|
||||
" -T/--upload-file <file> Transfer/upload <file> to remote site\n"
|
||||
" --url <URL> Another way to specify URL to work with");
|
||||
puts(" -u/--user <user[:password]> Specify user and password to use\n"
|
||||
" Overrides -n and --netrc-optional\n"
|
||||
" -U/--proxy-user <user[:password]> Specify Proxy authentication\n"
|
||||
" -v/--verbose Makes the operation more talkative\n"
|
||||
" -V/--version Outputs version number then quits");
|
||||
" -e/--referer Referer page (H)",
|
||||
" -E/--cert <cert[:passwd]> Specifies your certificate file and password (SSL)",
|
||||
" --cert-type <type> Specifies certificate file type (DER/PEM/ENG) (SSL)",
|
||||
" --key <key> Specifies private key file (SSL)",
|
||||
" --key-type <type> Specifies private key file type (DER/PEM/ENG) (SSL)",
|
||||
" --pass <pass> Specifies passphrase for the private key (SSL)",
|
||||
" --engine <eng> Specifies the crypto engine to use (HTTPS)",
|
||||
" --cacert <file> CA certificate to verify peer against (SSL)",
|
||||
" --capath <directory> CA directory (made using c_rehash) to verify",
|
||||
" peer against (SSL)",
|
||||
" --ciphers <list> What SSL ciphers to use (SSL)",
|
||||
" --compressed Request a compressed response (using deflate or gzip).",
|
||||
" --connect-timeout <seconds> Maximum time allowed for connection",
|
||||
" --create-dirs Create the necessary local directory hierarchy",
|
||||
" --crlf Convert LF to CRLF in upload. Useful for MVS (OS/390)",
|
||||
" -f/--fail Fail silently (no output at all) on errors (H)",
|
||||
" --ftp-create-dirs Create the remote dirs if not present (F)",
|
||||
" -F/--form <name=content> Specify HTTP POST data (H)",
|
||||
" -g/--globoff Disable URL sequences and ranges using {} and []",
|
||||
" -G/--get Send the -d data with a HTTP GET (H)",
|
||||
" -h/--help This help text",
|
||||
" -H/--header <line> Custom header to pass to server. (H)",
|
||||
" -i/--include Include the HTTP-header in the output (H)",
|
||||
" -I/--head Fetch document info only (HTTP HEAD/FTP SIZE)",
|
||||
" -j/--junk-session-cookies Ignore session cookies read from file (H)",
|
||||
" --interface <interface> Specify the interface to be used",
|
||||
" --krb4 <level> Enable krb4 with specified security level (F)",
|
||||
" -k/--insecure Allow curl to connect to SSL sites without certs (H)",
|
||||
" -K/--config Specify which config file to read",
|
||||
" -l/--list-only List only names of an FTP directory (F)",
|
||||
" --limit-rate <rate> Limit how fast transfers to allow",
|
||||
" -L/--location Follow Location: hints (H)",
|
||||
" --location-trusted Same, and continue to send authentication when ",
|
||||
" following locations, even when hostname changed",
|
||||
" -m/--max-time <seconds> Maximum time allowed for the transfer",
|
||||
" --max-redirs <num> Set maximum number of redirections allowed (H)",
|
||||
" -M/--manual Display huge help text",
|
||||
" -n/--netrc Must read .netrc for user name and password",
|
||||
" --netrc-optional Use either .netrc or URL; overrides -n",
|
||||
" --ntlm Enable HTTP NTLM authentication (H)",
|
||||
" -N/--no-buffer Disables the buffering of the output stream",
|
||||
" -o/--output <file> Write output to <file> instead of stdout",
|
||||
" -O/--remote-name Write output to a file named as the remote file",
|
||||
" -p/--proxytunnel Perform non-HTTP services through a HTTP proxy",
|
||||
" --proxy-ntlm Use NTLM authentication on the proxy (H)",
|
||||
" -P/--ftpport <address> Use PORT with address instead of PASV when ftping (F)",
|
||||
" -q When used as the first parameter disables .curlrc",
|
||||
" -Q/--quote <cmd> Send QUOTE command to FTP before file transfer (F)",
|
||||
" -r/--range <range> Retrieve a byte range from a HTTP/1.1 or FTP server",
|
||||
" --random-file <file> File to use for reading random data from (SSL)",
|
||||
" -R/--remote-time Set the remote file's time on the local output",
|
||||
" -s/--silent Silent mode. Don't output anything",
|
||||
" -S/--show-error Show error. With -s, make curl show errors when they occur",
|
||||
" --stderr <file> Where to redirect stderr. - means stdout.",
|
||||
" -t/--telnet-option <OPT=val> Set telnet option",
|
||||
" --trace <file> Dump a network/debug trace to the given file",
|
||||
" --trace-ascii <file> Like --trace but without the hex output",
|
||||
" -T/--upload-file <file> Transfer/upload <file> to remote site",
|
||||
" --url <URL> Another way to specify URL to work with",
|
||||
" -u/--user <user[:password]> Specify user and password to use",
|
||||
" Overrides -n and --netrc-optional",
|
||||
" -U/--proxy-user <user[:password]> Specify Proxy authentication",
|
||||
" -v/--verbose Makes the operation more talkative",
|
||||
" -V/--version Outputs version number then quits",
|
||||
#ifdef __DJGPP__
|
||||
puts(" --wdebug Turns on WATT-32 debugging under DJGPP");
|
||||
" --wdebug Turns on WATT-32 debugging under DJGPP",
|
||||
#endif
|
||||
puts(" -w/--write-out [format] What to output after completion\n"
|
||||
" -x/--proxy <host[:port]> Use proxy. (Default port is 1080)\n"
|
||||
" --random-file <file> File to use for reading random data from (SSL)\n"
|
||||
" -X/--request <command> Specific request command to use");
|
||||
puts(" -y/--speed-time Time needed to trig speed-limit abort. Defaults to 30\n"
|
||||
" -Y/--speed-limit Stop transfer if below speed-limit for 'speed-time' secs\n"
|
||||
" -z/--time-cond <time> Includes a time condition to the server (H)\n"
|
||||
" -Z/--max-redirs <num> Set maximum number of redirections allowed (H)\n"
|
||||
" -0/--http1.0 Force usage of HTTP 1.0 (H)\n"
|
||||
" -1/--tlsv1 Force usage of TLSv1 (H)\n"
|
||||
" -2/--sslv2 Force usage of SSLv2 (H)\n"
|
||||
" -3/--sslv3 Force usage of SSLv3 (H)");
|
||||
puts(" -#/--progress-bar Display transfer progress as a progress bar");
|
||||
" -w/--write-out [format] What to output after completion",
|
||||
" -x/--proxy <host[:port]> Use proxy. (Default port is 1080)",
|
||||
" -X/--request <command> Specific request command to use",
|
||||
" -y/--speed-time Time needed to trig speed-limit abort. Defaults to 30",
|
||||
" -Y/--speed-limit Stop transfer if below speed-limit for 'speed-time' secs",
|
||||
" -z/--time-cond <time> Includes a time condition to the server (H)",
|
||||
" -0/--http1.0 Force usage of HTTP 1.0 (H)",
|
||||
" -1/--tlsv1 Force usage of TLSv1 (H)",
|
||||
" -2/--sslv2 Force usage of SSLv2 (H)",
|
||||
" -3/--sslv3 Force usage of SSLv3 (H)",
|
||||
" -#/--progress-bar Display transfer progress as a progress bar",
|
||||
NULL
|
||||
};
|
||||
for(i=0; help[i]; i++)
|
||||
puts(help[i]);
|
||||
}
|
||||
|
||||
struct LongShort {
|
||||
@@ -540,6 +546,8 @@ struct Configurable {
|
||||
bool use_httpget;
|
||||
bool insecure_ok; /* set TRUE to allow insecure SSL connects */
|
||||
bool create_dirs;
|
||||
bool ftp_create_dirs;
|
||||
bool proxyntlm;
|
||||
|
||||
char *writeout; /* %-styled format string to output */
|
||||
bool writeenv; /* write results to environment, if available */
|
||||
@@ -1085,6 +1093,10 @@ static ParameterError getparameter(char *flag, /* f or -long-flag */
|
||||
#ifdef __DJGPP__
|
||||
{"5p", "wdebug", FALSE},
|
||||
#endif
|
||||
{"5q", "ftp-create-dirs", FALSE},
|
||||
{"5r", "create-dirs", FALSE},
|
||||
{"5s", "max-redirs", TRUE},
|
||||
{"5t", "proxy-ntlm", FALSE},
|
||||
{"0", "http1.0", FALSE},
|
||||
{"1", "tlsv1", FALSE},
|
||||
{"2", "sslv2", FALSE},
|
||||
@@ -1092,7 +1104,6 @@ static ParameterError getparameter(char *flag, /* f or -long-flag */
|
||||
{"a", "append", FALSE},
|
||||
{"A", "user-agent", TRUE},
|
||||
{"b", "cookie", TRUE},
|
||||
{"B", "ftp-ascii", FALSE}, /* this long format is OBSOLETE now! */
|
||||
{"B", "use-ascii", FALSE},
|
||||
{"c", "cookie-jar", TRUE},
|
||||
{"C", "continue-at", TRUE},
|
||||
@@ -1151,9 +1162,7 @@ static ParameterError getparameter(char *flag, /* f or -long-flag */
|
||||
{"Y", "speed-limit", TRUE},
|
||||
{"y", "speed-time", TRUE},
|
||||
{"z", "time-cond", TRUE},
|
||||
{"Z", "max-redirs", TRUE},
|
||||
{"#", "progress-bar",FALSE},
|
||||
{"@", "create-dirs", FALSE},
|
||||
};
|
||||
|
||||
if(('-' != flag[0]) ||
|
||||
@@ -1334,6 +1343,22 @@ static ParameterError getparameter(char *flag, /* f or -long-flag */
|
||||
dbug_init();
|
||||
break;
|
||||
#endif
|
||||
case 'q': /* --ftp-create-dirs */
|
||||
config->ftp_create_dirs ^= TRUE;
|
||||
break;
|
||||
|
||||
case 'r': /* --create-dirs */
|
||||
config->create_dirs = TRUE;
|
||||
break;
|
||||
|
||||
case 's': /* --max-redirs */
|
||||
/* specified max no of redirects (http(s)) */
|
||||
config->maxredirs = atoi(nextarg);
|
||||
break;
|
||||
|
||||
case 't': /* --proxy-ntlm */
|
||||
config->proxyntlm ^= TRUE;
|
||||
break;
|
||||
|
||||
default: /* the URL! */
|
||||
{
|
||||
@@ -1774,7 +1799,8 @@ static ParameterError getparameter(char *flag, /* f or -long-flag */
|
||||
{"libz", CURL_VERSION_LIBZ},
|
||||
{"NTLM", CURL_VERSION_NTLM},
|
||||
{"GSS-Negotiate", CURL_VERSION_GSSNEGOTIATE},
|
||||
{"Debug", CURL_VERSION_DEBUG}
|
||||
{"Debug", CURL_VERSION_DEBUG},
|
||||
{"AsynchDNS", CURL_VERSION_ASYNCHDNS}
|
||||
};
|
||||
printf("Features: ");
|
||||
for(i=0; i<sizeof(feats)/sizeof(feats[0]); i++) {
|
||||
@@ -1857,15 +1883,6 @@ static ParameterError getparameter(char *flag, /* f or -long-flag */
|
||||
}
|
||||
}
|
||||
break;
|
||||
case 'Z':
|
||||
/* specified max no of redirects (http(s)) */
|
||||
config->maxredirs = atoi(nextarg);
|
||||
break;
|
||||
|
||||
case '@':
|
||||
config->create_dirs = TRUE;
|
||||
break;
|
||||
|
||||
default: /* unknown flag */
|
||||
return PARAM_OPTION_UNKNOWN;
|
||||
}
|
||||
@@ -2469,7 +2486,7 @@ operate(struct Configurable *config, int argc, char *argv[])
|
||||
FILE *infd = stdin;
|
||||
FILE *headerfilep = NULL;
|
||||
char *urlbuffer=NULL;
|
||||
int infilesize=-1; /* -1 means unknown */
|
||||
long infilesize=-1; /* -1 means unknown */
|
||||
bool stillflags=TRUE;
|
||||
|
||||
bool allocuseragent=FALSE;
|
||||
@@ -2488,6 +2505,11 @@ operate(struct Configurable *config, int argc, char *argv[])
|
||||
curl_free(env);
|
||||
curl_memdebug("memdump");
|
||||
}
|
||||
env = curl_getenv("CURL_MEMLIMIT");
|
||||
if(env) {
|
||||
curl_memlimit(atoi(env));
|
||||
curl_free(env);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* we get libcurl info right away */
|
||||
@@ -2631,13 +2653,15 @@ operate(struct Configurable *config, int argc, char *argv[])
|
||||
|
||||
/*
|
||||
* Get a curl handle to use for all forthcoming curl transfers. Cleanup
|
||||
* when all transfers are done. This is supported with libcurl 7.7 and
|
||||
* should not be attempted on previous versions.
|
||||
* when all transfers are done.
|
||||
*/
|
||||
curl = curl_easy_init();
|
||||
if(!curl)
|
||||
return CURLE_FAILED_INIT;
|
||||
|
||||
/* After this point, we should call curl_easy_cleanup() if we decide to bail
|
||||
* out from this function! */
|
||||
|
||||
urlnode = config->url_list;
|
||||
|
||||
if(config->headerfile) {
|
||||
@@ -2653,7 +2677,7 @@ operate(struct Configurable *config, int argc, char *argv[])
|
||||
}
|
||||
|
||||
/* loop through the list of given URLs */
|
||||
while(urlnode) {
|
||||
while(urlnode && !res) {
|
||||
|
||||
/* get the full URL (it might be NULL) */
|
||||
url=urlnode->url;
|
||||
@@ -2680,8 +2704,10 @@ operate(struct Configurable *config, int argc, char *argv[])
|
||||
res = glob_url(&urls, url, &urlnum,
|
||||
config->showerror?
|
||||
(config->errors?config->errors:stderr):NULL);
|
||||
if(res != CURLE_OK)
|
||||
return res;
|
||||
if(res != CURLE_OK) {
|
||||
clean_getout(config);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2714,10 +2740,17 @@ operate(struct Configurable *config, int argc, char *argv[])
|
||||
else
|
||||
pc=url;
|
||||
pc = strrchr(pc, '/');
|
||||
outfile = (char *) NULL == pc ? NULL : strdup(pc+1) ;
|
||||
if(!outfile) {
|
||||
|
||||
if(pc) {
|
||||
/* duplicate the string beyond the slash */
|
||||
pc++;
|
||||
outfile = *pc ? strdup(pc): NULL;
|
||||
}
|
||||
if(!outfile || !*outfile) {
|
||||
helpf("Remote file name has no length!\n");
|
||||
return CURLE_WRITE_ERROR;
|
||||
res = CURLE_WRITE_ERROR;
|
||||
free(url);
|
||||
break;
|
||||
}
|
||||
#if defined(__DJGPP__)
|
||||
{
|
||||
@@ -2735,14 +2768,22 @@ operate(struct Configurable *config, int argc, char *argv[])
|
||||
char *storefile = outfile;
|
||||
outfile = glob_match_url(storefile, urls);
|
||||
free(storefile);
|
||||
if(!outfile) {
|
||||
/* bad globbing */
|
||||
fprintf(stderr, "bad output glob!\n");
|
||||
free(url);
|
||||
res = CURLE_FAILED_INIT;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Create the directory hierarchy, if not pre-existant to a multiple
|
||||
file output call */
|
||||
|
||||
if(config->create_dirs)
|
||||
if (-1 == create_dir_hierarchy(outfile))
|
||||
if (-1 == create_dir_hierarchy(outfile)) {
|
||||
return CURLE_WRITE_ERROR;
|
||||
}
|
||||
|
||||
if(config->resume_from_current) {
|
||||
/* We're told to continue from where we are now. Get the
|
||||
@@ -3021,7 +3062,8 @@ operate(struct Configurable *config, int argc, char *argv[])
|
||||
config->headerfile?&heads:NULL);
|
||||
curl_easy_setopt(curl, CURLOPT_COOKIEFILE, config->cookiefile);
|
||||
/* cookie jar was added in 7.9 */
|
||||
curl_easy_setopt(curl, CURLOPT_COOKIEJAR, config->cookiejar);
|
||||
if(config->cookiejar)
|
||||
curl_easy_setopt(curl, CURLOPT_COOKIEJAR, config->cookiejar);
|
||||
/* cookie session added in 7.9.7 */
|
||||
curl_easy_setopt(curl, CURLOPT_COOKIESESSION, config->cookiesession);
|
||||
|
||||
@@ -3085,6 +3127,12 @@ operate(struct Configurable *config, int argc, char *argv[])
|
||||
curl_easy_setopt(curl, CURLOPT_ENCODING,
|
||||
(config->encoding) ? "" : NULL);
|
||||
|
||||
/* new in curl 7.10.7 */
|
||||
curl_easy_setopt(curl, CURLOPT_FTP_CREATE_MISSING_DIRS,
|
||||
config->ftp_create_dirs);
|
||||
if(config->proxyntlm)
|
||||
curl_easy_setopt(curl, CURLOPT_PROXYAUTH, CURLAUTH_NTLM);
|
||||
|
||||
res = curl_easy_perform(curl);
|
||||
|
||||
if((config->progressmode == CURL_PROGRESS_BAR) &&
|
||||
@@ -3230,19 +3278,17 @@ static char *my_get_line(FILE *fp)
|
||||
char *nl = NULL;
|
||||
char *retval = NULL;
|
||||
|
||||
do
|
||||
{
|
||||
if (NULL == fgets(buf, sizeof(buf), fp))
|
||||
do {
|
||||
if (NULL == fgets(buf, sizeof(buf), fp))
|
||||
break;
|
||||
if (NULL == retval)
|
||||
retval = strdup(buf);
|
||||
else {
|
||||
if (NULL == (retval = realloc(retval,
|
||||
strlen(retval) + strlen(buf) + 1)))
|
||||
break;
|
||||
if (NULL == retval)
|
||||
retval = strdup(buf);
|
||||
else
|
||||
{
|
||||
if (NULL == (retval = realloc(retval,
|
||||
strlen(retval) + strlen(buf) + 1)))
|
||||
break;
|
||||
strcat(retval, buf);
|
||||
}
|
||||
strcat(retval, buf);
|
||||
}
|
||||
}
|
||||
while (NULL == (nl = strchr(retval, '\n')));
|
||||
|
||||
|
@@ -6,6 +6,12 @@
|
||||
# THEY DON'T FIT ME :-)
|
||||
|
||||
# Get readme file as parameter:
|
||||
|
||||
if($ARGV[0] eq "-c") {
|
||||
$c=1;
|
||||
shift @ARGV;
|
||||
}
|
||||
|
||||
my $README = $ARGV[0];
|
||||
|
||||
if($README eq "") {
|
||||
@@ -20,6 +26,7 @@ push @out, " / __| | | | |_) | | \n";
|
||||
push @out, " | (__| |_| | _ <| |___ \n";
|
||||
push @out, " \\___|\\___/|_| \\_\\_____|\n";
|
||||
|
||||
my $olen=0;
|
||||
while (<STDIN>) {
|
||||
my $line = $_;
|
||||
|
||||
@@ -70,16 +77,19 @@ close(READ);
|
||||
if($c) {
|
||||
my @test = `gzip --version 2>&1`;
|
||||
if($test[0] =~ /gzip/) {
|
||||
open(GZIP, "|gzip -9 >dumpit.gz") ||
|
||||
die "can't run gzip, try without -c";
|
||||
open(GZIP, ">dumpit") ||
|
||||
die "can't create the dumpit file, try without -c";
|
||||
binmode GZIP;
|
||||
for(@out) {
|
||||
print GZIP $_;
|
||||
$gzip += length($_);
|
||||
}
|
||||
close(GZIP);
|
||||
|
||||
system("gzip --best --no-name dumpit");
|
||||
|
||||
open(GZIP, "<dumpit.gz");
|
||||
open(GZIP, "<dumpit.gz") ||
|
||||
die "can't read the dumpit.gz file, try without -c";
|
||||
binmode GZIP;
|
||||
while(<GZIP>) {
|
||||
push @gzip, $_;
|
||||
|
@@ -48,8 +48,8 @@
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef MALLOCDEBUG
|
||||
/* This is an ugly hack for MALLOCDEBUG conditions only. We need to include
|
||||
#ifdef CURLDEBUG
|
||||
/* This is an ugly hack for CURLDEBUG conditions only. We need to include
|
||||
the file here, since it might set the _FILE_OFFSET_BITS define, which must
|
||||
be set BEFORE all normal system headers. */
|
||||
#include "../lib/setup.h"
|
||||
|
100
src/urlglob.c
100
src/urlglob.c
@@ -232,6 +232,7 @@ static GlobCode glob_word(URLGlob *glob, char *pattern, int pos, int *amount)
|
||||
*/
|
||||
char* buf = glob->glob_buffer;
|
||||
int litindex;
|
||||
GlobCode res = GLOB_OK;
|
||||
|
||||
*amount = 1; /* default is one single string */
|
||||
|
||||
@@ -257,22 +258,30 @@ static GlobCode glob_word(URLGlob *glob, char *pattern, int pos, int *amount)
|
||||
litindex = glob->size / 2;
|
||||
/* literals 0,1,2,... correspond to size=0,2,4,... */
|
||||
glob->literal[litindex] = strdup(glob->glob_buffer);
|
||||
if(!glob->literal[litindex])
|
||||
return GLOB_ERROR;
|
||||
++glob->size;
|
||||
|
||||
switch (*pattern) {
|
||||
case '\0':
|
||||
return GLOB_OK; /* singular URL processed */
|
||||
break; /* singular URL processed */
|
||||
|
||||
case '{':
|
||||
/* process set pattern */
|
||||
return glob_set(glob, ++pattern, ++pos, amount);
|
||||
res = glob_set(glob, ++pattern, ++pos, amount);
|
||||
break;
|
||||
|
||||
case '[':
|
||||
/* process range pattern */
|
||||
return glob_range(glob, ++pattern, ++pos, amount);
|
||||
res= glob_range(glob, ++pattern, ++pos, amount);
|
||||
break;
|
||||
}
|
||||
|
||||
return GLOB_ERROR; /* something got wrong */
|
||||
if(GLOB_OK != res)
|
||||
/* free that strdup'ed string again */
|
||||
free(glob->literal[litindex]);
|
||||
|
||||
return res; /* something got wrong */
|
||||
}
|
||||
|
||||
int glob_url(URLGlob** glob, char* url, int *urlnum, FILE *error)
|
||||
@@ -327,9 +336,12 @@ void glob_cleanup(URLGlob* glob)
|
||||
for (i = glob->size - 1; i >= 0; --i) {
|
||||
if (!(i & 1)) { /* even indexes contain literals */
|
||||
free(glob->literal[i/2]);
|
||||
} else { /* odd indexes contain sets or ranges */
|
||||
}
|
||||
else { /* odd indexes contain sets or ranges */
|
||||
if (glob->pattern[i/2].type == UPTSet) {
|
||||
for (elem = glob->pattern[i/2].content.Set.size - 1; elem >= 0; --elem) {
|
||||
for (elem = glob->pattern[i/2].content.Set.size - 1;
|
||||
elem >= 0;
|
||||
--elem) {
|
||||
free(glob->pattern[i/2].content.Set.elements[elem]);
|
||||
}
|
||||
free(glob->pattern[i/2].content.Set.elements);
|
||||
@@ -387,7 +399,7 @@ char *glob_next_url(URLGlob *glob)
|
||||
}
|
||||
|
||||
for (i = 0; i < glob->size; ++i) {
|
||||
if (!(i % 2)) { /* every other term (i even) is a literal */
|
||||
if (!(i % 2)) { /* every other term (i even) is a literal */
|
||||
lit = glob->literal[i/2];
|
||||
strcpy(buf, lit);
|
||||
buf += strlen(lit);
|
||||
@@ -403,7 +415,8 @@ char *glob_next_url(URLGlob *glob)
|
||||
*buf++ = pat->content.CharRange.ptr_c;
|
||||
break;
|
||||
case UPTNumRange:
|
||||
sprintf(buf, "%0*d", pat->content.NumRange.padlength, pat->content.NumRange.ptr_n);
|
||||
sprintf(buf, "%0*d",
|
||||
pat->content.NumRange.padlength, pat->content.NumRange.ptr_n);
|
||||
buf += strlen(buf); /* make no sprint() return code assumptions */
|
||||
break;
|
||||
default:
|
||||
@@ -419,8 +432,6 @@ char *glob_next_url(URLGlob *glob)
|
||||
char *glob_match_url(char *filename, URLGlob *glob)
|
||||
{
|
||||
char *target;
|
||||
URLPattern pat;
|
||||
int i;
|
||||
int allocsize;
|
||||
int stringlen=0;
|
||||
char numbuf[18];
|
||||
@@ -436,42 +447,41 @@ char *glob_match_url(char *filename, URLGlob *glob)
|
||||
if(NULL == target)
|
||||
return NULL; /* major failure */
|
||||
|
||||
while (*filename != '\0') {
|
||||
if (*filename == '#') {
|
||||
if (!isdigit((int)*++filename) ||
|
||||
*filename == '0') { /* only '#1' ... '#9' allowed */
|
||||
/* printf("illegal matching expression\n");
|
||||
exit(CURLE_URL_MALFORMAT);*/
|
||||
continue;
|
||||
while (*filename) {
|
||||
if (*filename == '#' && isdigit((int)filename[1])) {
|
||||
/* only '#1' ... '#9' allowed */
|
||||
int i;
|
||||
unsigned long num = strtoul(&filename[1], &filename, 10);
|
||||
|
||||
i = num-1;
|
||||
|
||||
if (num && (i <= glob->size / 2)) {
|
||||
URLPattern pat = glob->pattern[i];
|
||||
switch (pat.type) {
|
||||
case UPTSet:
|
||||
appendthis = pat.content.Set.elements[pat.content.Set.ptr_s];
|
||||
appendlen =
|
||||
(int)strlen(pat.content.Set.elements[pat.content.Set.ptr_s]);
|
||||
break;
|
||||
case UPTCharRange:
|
||||
numbuf[0]=pat.content.CharRange.ptr_c;
|
||||
numbuf[1]=0;
|
||||
appendthis=numbuf;
|
||||
appendlen=1;
|
||||
break;
|
||||
case UPTNumRange:
|
||||
sprintf(numbuf, "%0*d",
|
||||
pat.content.NumRange.padlength,
|
||||
pat.content.NumRange.ptr_n);
|
||||
appendthis = numbuf;
|
||||
appendlen = (int)strlen(numbuf);
|
||||
break;
|
||||
default:
|
||||
printf("internal error: invalid pattern type (%d)\n", pat.type);
|
||||
free(target);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
i = *filename - '1';
|
||||
if (i + 1 > glob->size / 2) {
|
||||
/*printf("match against nonexisting pattern\n");
|
||||
exit(CURLE_URL_MALFORMAT);*/
|
||||
continue;
|
||||
}
|
||||
pat = glob->pattern[i];
|
||||
switch (pat.type) {
|
||||
case UPTSet:
|
||||
appendthis = pat.content.Set.elements[pat.content.Set.ptr_s];
|
||||
appendlen = (int)strlen(pat.content.Set.elements[pat.content.Set.ptr_s]);
|
||||
break;
|
||||
case UPTCharRange:
|
||||
numbuf[0]=pat.content.CharRange.ptr_c;
|
||||
numbuf[1]=0;
|
||||
appendthis=numbuf;
|
||||
appendlen=1;
|
||||
break;
|
||||
case UPTNumRange:
|
||||
sprintf(numbuf, "%0*d", pat.content.NumRange.padlength, pat.content.NumRange.ptr_n);
|
||||
appendthis = numbuf;
|
||||
appendlen = (int)strlen(numbuf);
|
||||
break;
|
||||
default:
|
||||
printf("internal error: invalid pattern type (%d)\n", pat.type);
|
||||
return NULL;
|
||||
}
|
||||
++filename;
|
||||
}
|
||||
else {
|
||||
appendthis=filename++;
|
||||
|
@@ -1,3 +1,3 @@
|
||||
#define CURL_NAME "curl"
|
||||
#define CURL_VERSION "7.10.6"
|
||||
#define CURL_VERSION "7.10.7"
|
||||
#define CURL_ID CURL_NAME " " CURL_VERSION " (" OS ") "
|
||||
|
@@ -2,3 +2,4 @@ Makefile
|
||||
Makefile.in
|
||||
memdump
|
||||
log
|
||||
*.pid
|
||||
|
@@ -95,7 +95,12 @@ command is run, they are clear again after the command has been run.
|
||||
|
||||
<command [option=no-output]>
|
||||
command line to run, there's a bunch of %variables that get replaced
|
||||
accordingly. more about them elsewhere
|
||||
accordingly.
|
||||
|
||||
Note that the URL that gets passed to the server actually controls what data
|
||||
that is returned. The last slash in the URL must be followed by a number. That
|
||||
number (N) will be used by the test-server to load test case N and return the
|
||||
data that is defined within the <reply><data></data></reply> section.
|
||||
|
||||
Set 'option=no-output' to prevent the test script to slap on the --output
|
||||
argument that directs the output to a file. The --output is also not added if
|
||||
|
@@ -6,20 +6,22 @@ SUBDIRS = data server libtest
|
||||
|
||||
PERLFLAGS = -I$(srcdir)
|
||||
|
||||
curl:
|
||||
@(cd ..; make)
|
||||
CLEANFILES = .http.pid .https.pid .ftp.pid .ftps.pid
|
||||
|
||||
test: server/sws
|
||||
curl:
|
||||
@cd $(top_builddir) && $(MAKE)
|
||||
|
||||
test: sws
|
||||
@cd data && exec $(MAKE) test
|
||||
srcdir=$(srcdir) $(PERL) $(PERLFLAGS) $(srcdir)/runtests.pl
|
||||
|
||||
quiet-test: server/sws
|
||||
quiet-test: sws
|
||||
@cd data && exec $(MAKE) test
|
||||
srcdir=$(srcdir) $(PERL) $(PERLFLAGS) $(srcdir)/runtests.pl -s -a
|
||||
|
||||
full-test: server/sws
|
||||
full-test: sws
|
||||
@cd data && exec $(MAKE) test
|
||||
srcdir=$(srcdir) $(PERL) $(PERLFLAGS) $(srcdir)/runtests.pl -a
|
||||
|
||||
server/sws:
|
||||
cd server; make sws
|
||||
sws:
|
||||
@cd server && $(MAKE)
|
||||
|
@@ -19,6 +19,8 @@ test304 test39 test32 test128 test48 test306 \
|
||||
test130 test131 test132 test133 test134 test135 test403 test305 \
|
||||
test49 test50 test51 test52 test53 test54 test55 test56 \
|
||||
test500 test501 test502 test503 test504 test136 test57 test137 test138 \
|
||||
test58 test139 test140 test141 test59 test60 test61 test142 test143 test62 \
|
||||
test63 test64 test65 test66 test144 test145 test67 test68 test41 \
|
||||
test40 test42 test69 test70 test71
|
||||
test58 test139 test140 test141 test59 test60 test61 test142 test143 \
|
||||
test62 test63 test64 test65 test66 test144 test145 test67 test68 test41 \
|
||||
test40 test42 test69 test70 test71 test72 test73 test146 test505 \
|
||||
test74 test75 test76 test77 test78 test147 test148 test506 test79 test80 \
|
||||
test81 test82 test83 test84 test85 test86 test87
|
||||
|
@@ -29,5 +29,6 @@ CWD path
|
||||
MDTM 103
|
||||
TYPE I
|
||||
SIZE 103
|
||||
REST 0
|
||||
</protocol>
|
||||
</verify>
|
||||
|
@@ -30,6 +30,7 @@ CWD blalbla
|
||||
MDTM 141
|
||||
TYPE I
|
||||
SIZE 141
|
||||
REST 0
|
||||
</protocol>
|
||||
<stdout>
|
||||
Last-Modified: Wed, 09 Apr 2003 10:26:59 GMT
|
||||
|
46
tests/data/test146
Normal file
46
tests/data/test146
Normal file
@@ -0,0 +1,46 @@
|
||||
# Server-side
|
||||
<reply>
|
||||
<data>
|
||||
this is file contents
|
||||
</data>
|
||||
</reply>
|
||||
|
||||
# Client-side
|
||||
<client>
|
||||
<server>
|
||||
ftp
|
||||
</server>
|
||||
<name>
|
||||
persistant FTP with different paths
|
||||
</name>
|
||||
<command>
|
||||
ftp://%HOSTIP:%FTPPORT/first/dir/here/146 ftp://%HOSTIP:%FTPPORT/146
|
||||
</command>
|
||||
</test>
|
||||
|
||||
# Verify data after the test has been "shot"
|
||||
<verify>
|
||||
# strip all kinds of PORT, EPRT and LPRT curl can do
|
||||
<strip>
|
||||
^PORT 127,0,0,1,.*
|
||||
^EPRT.*
|
||||
^LPRT.*
|
||||
</strip>
|
||||
<protocol>
|
||||
USER anonymous
|
||||
PASS curl_by_daniel@haxx.se
|
||||
PWD
|
||||
CWD first
|
||||
CWD dir
|
||||
CWD here
|
||||
EPSV
|
||||
TYPE I
|
||||
SIZE 146
|
||||
RETR 146
|
||||
CWD /nowhere/anywhere
|
||||
EPSV
|
||||
TYPE I
|
||||
SIZE 146
|
||||
RETR 146
|
||||
</protocol>
|
||||
</verify>
|
48
tests/data/test147
Normal file
48
tests/data/test147
Normal file
@@ -0,0 +1,48 @@
|
||||
# Server-side
|
||||
<reply>
|
||||
<data>
|
||||
foo
|
||||
</data>
|
||||
</reply>
|
||||
|
||||
# Client-side
|
||||
<client>
|
||||
<server>
|
||||
ftp
|
||||
</server>
|
||||
<name>
|
||||
FTP with --ftp-create-dirs (failing CWD)
|
||||
</name>
|
||||
<command>
|
||||
ftp://%HOSTIP:%FTPPORT/first/dir/here/147 --ftp-create-dirs
|
||||
</command>
|
||||
</test>
|
||||
<file name="log/ftpserver.cmd">
|
||||
REPLY CWD 550 I won't allow this on my server
|
||||
COUNT CWD 1
|
||||
</file>
|
||||
</client>
|
||||
|
||||
# Verify data after the test has been "shot"
|
||||
<verify>
|
||||
# strip all kinds of PORT, EPRT and LPRT curl can do
|
||||
<strip>
|
||||
^PORT 127,0,0,1,.*
|
||||
^EPRT.*
|
||||
^LPRT.*
|
||||
</strip>
|
||||
<protocol>
|
||||
USER anonymous
|
||||
PASS curl_by_daniel@haxx.se
|
||||
PWD
|
||||
CWD first
|
||||
MKD first
|
||||
CWD first
|
||||
CWD dir
|
||||
CWD here
|
||||
EPSV
|
||||
TYPE I
|
||||
SIZE 147
|
||||
RETR 147
|
||||
</protocol>
|
||||
</verify>
|
41
tests/data/test148
Normal file
41
tests/data/test148
Normal file
@@ -0,0 +1,41 @@
|
||||
# Server-side
|
||||
<reply>
|
||||
</reply>
|
||||
|
||||
# Client-side
|
||||
<client>
|
||||
<server>
|
||||
ftp
|
||||
</server>
|
||||
<name>
|
||||
FTP with --ftp-create-dirs (failing MKD)
|
||||
</name>
|
||||
<command>
|
||||
ftp://%HOSTIP:%FTPPORT/attempt/to/get/this/148 --ftp-create-dirs
|
||||
</command>
|
||||
</test>
|
||||
<file name="log/ftpserver.cmd">
|
||||
REPLY CWD 550 I won't allow this on my server
|
||||
REPLY MKD 550 We will have no such thing
|
||||
</file>
|
||||
</client>
|
||||
|
||||
# Verify data after the test has been "shot"
|
||||
<verify>
|
||||
# strip all kinds of PORT, EPRT and LPRT curl can do
|
||||
<strip>
|
||||
^PORT 127,0,0,1,.*
|
||||
^EPRT.*
|
||||
^LPRT.*
|
||||
</strip>
|
||||
<errorcode>
|
||||
9
|
||||
</errorcode>
|
||||
<protocol>
|
||||
USER anonymous
|
||||
PASS curl_by_daniel@haxx.se
|
||||
PWD
|
||||
CWD attempt
|
||||
MKD attempt
|
||||
</protocol>
|
||||
</verify>
|
@@ -8,6 +8,7 @@ Content-Type: text/html
|
||||
Set-Cookie: ckyPersistent=permanent; expires=Tue, 01-Jan-2010 05:00:00 GMT; path=/
|
||||
Set-Cookie: ckySession=temporary; path=/
|
||||
Set-Cookie: ASPSESSIONIDQGGQQSJJ=GKNBDIFAAOFDPDAIEAKDIBKE; path=/
|
||||
Set-Cookie: justaname=; path=/;
|
||||
Cache-control: private
|
||||
Content-Length: 62
|
||||
|
||||
@@ -34,6 +35,7 @@ HTTP, get cookies and store in cookie jar
|
||||
www.fake.come FALSE / FALSE 1022144953 cookiecliente si
|
||||
www.loser.com FALSE / FALSE 1139150993 UID 99
|
||||
127.0.0.1 FALSE / FALSE 1139150993 mooo indeed
|
||||
127.0.0.1 FALSE / FALSE 0 empty
|
||||
</file>
|
||||
</client>
|
||||
|
||||
@@ -47,7 +49,7 @@ GET /want/46 HTTP/1.1
|
||||
Host: 127.0.0.1:8999
|
||||
Pragma: no-cache
|
||||
Accept: image/gif, image/x-xbitmap, image/jpeg, image/pjpeg, */*
|
||||
Cookie: mooo=indeed
|
||||
Cookie: empty=; mooo=indeed
|
||||
|
||||
</protocol>
|
||||
<file name="log/jar46">
|
||||
@@ -58,8 +60,10 @@ Cookie: mooo=indeed
|
||||
www.fake.come FALSE / FALSE 1022144953 cookiecliente si
|
||||
www.loser.com FALSE / FALSE 1139150993 UID 99
|
||||
127.0.0.1 FALSE / FALSE 1139150993 mooo indeed
|
||||
127.0.0.1 FALSE / FALSE 0 empty
|
||||
127.0.0.1 FALSE / FALSE 1262322000 ckyPersistent permanent
|
||||
127.0.0.1 FALSE / FALSE 0 ckySession temporary
|
||||
127.0.0.1 FALSE / FALSE 0 ASPSESSIONIDQGGQQSJJ GKNBDIFAAOFDPDAIEAKDIBKE
|
||||
127.0.0.1 FALSE / FALSE 0 justaname
|
||||
</file>
|
||||
</verify>
|
||||
|
@@ -42,7 +42,6 @@ CONNECT 127.0.0.1:8433 HTTP/1.0
|
||||
Proxy-authorization: Basic dGVzdDppbmc=
|
||||
|
||||
GET /503 HTTP/1.1
|
||||
Proxy-authorization: Basic dGVzdDppbmc=
|
||||
Authorization: Basic dGVzdDppbmc=
|
||||
Host: 127.0.0.1:8433
|
||||
Pragma: no-cache
|
||||
|
147
tests/data/test506
Normal file
147
tests/data/test506
Normal file
@@ -0,0 +1,147 @@
|
||||
# Server-side
|
||||
<reply>
|
||||
<data1>
|
||||
HTTP/1.1 200 OK
|
||||
Date: Thu, 09 Nov 2010 14:49:00 GMT
|
||||
Server: test-server/fake
|
||||
Content-Type: text/html
|
||||
Set-Cookie: test1=one; domain=foo.com; expires=Tue Feb 1 11:56:27 MET 2007
|
||||
Set-Cookie: test2=two; domain=host.foo.com; expires=Tue Feb 1 11:56:27 MET 2007
|
||||
Set-Cookie: test3=three; domain=foo.com; expires=Tue Feb 1 11:56:27 MET 2007
|
||||
Content-Length: 29
|
||||
|
||||
run 1: set cookie 1, 2 and 3
|
||||
</data1>
|
||||
<data2>
|
||||
HTTP/1.1 200 OK
|
||||
Date: Thu, 09 Nov 2010 14:49:01 GMT
|
||||
Server: test-server/fake
|
||||
Content-Type: text/html
|
||||
Set-Cookie: test4=four; domain=host.foo.com; expires=Tue Feb 1 11:56:27 MET 2007
|
||||
Set-Cookie: test5=five; domain=host.foo.com; expires=Tue Feb 1 11:56:27 MET 2007
|
||||
Content-Length: 26
|
||||
|
||||
run 2: set cookie 4 and 5
|
||||
</data2>
|
||||
<data3>
|
||||
HTTP/1.1 200 OK
|
||||
Date: Thu, 09 Nov 2010 14:49:02 GMT
|
||||
Server: test-server/fake
|
||||
Content-Type: text/html
|
||||
Funny-head: yesyes
|
||||
Set-Cookie: test4=overwritten4; domain=host.foo.com; expires=Thu Dec 31 23:59:59 MET 2009
|
||||
Set-Cookie: test1=overwritten1; domain=foo.com; expires=Tue Feb 2 11:56:27 MET 2007
|
||||
Content-Type: text/html
|
||||
Content-Length: 32
|
||||
|
||||
run 3: overwrite cookie 1 and 4
|
||||
</data5>
|
||||
</reply>
|
||||
|
||||
# Client-side
|
||||
<client>
|
||||
<server>
|
||||
http
|
||||
</server>
|
||||
<name>
|
||||
HTTP with shared cookie list (and dns cache)
|
||||
</name>
|
||||
<tool>
|
||||
lib506
|
||||
</tool>
|
||||
<command>
|
||||
http://%HOSTIP:%HOSTPORT/506
|
||||
</command>
|
||||
</client>
|
||||
|
||||
# Verify data after the test has been "shot"
|
||||
<verify>
|
||||
<stdout>
|
||||
GLOBAL_INIT
|
||||
SHARE_INIT
|
||||
CURL_LOCK_DATA_COOKIE
|
||||
CURL_LOCK_DATA_DNS
|
||||
*** run 1
|
||||
CURLOPT_SHARE
|
||||
lock: share <Pigs in space>
|
||||
unlock: share <Pigs in space>
|
||||
PERFORM
|
||||
lock: dns <Pigs in space>
|
||||
unlock: dns <Pigs in space>
|
||||
lock: cookie <Pigs in space>
|
||||
unlock: cookie <Pigs in space>
|
||||
lock: cookie <Pigs in space>
|
||||
unlock: cookie <Pigs in space>
|
||||
lock: cookie <Pigs in space>
|
||||
unlock: cookie <Pigs in space>
|
||||
lock: cookie <Pigs in space>
|
||||
unlock: cookie <Pigs in space>
|
||||
run 1: set cookie 1, 2 and 3
|
||||
lock: dns <Pigs in space>
|
||||
unlock: dns <Pigs in space>
|
||||
CLEANUP
|
||||
lock: cookie <Pigs in space>
|
||||
unlock: cookie <Pigs in space>
|
||||
*** run 2
|
||||
CURLOPT_SHARE
|
||||
lock: share <Pigs in space>
|
||||
unlock: share <Pigs in space>
|
||||
PERFORM
|
||||
lock: dns <Pigs in space>
|
||||
unlock: dns <Pigs in space>
|
||||
lock: cookie <Pigs in space>
|
||||
unlock: cookie <Pigs in space>
|
||||
lock: cookie <Pigs in space>
|
||||
unlock: cookie <Pigs in space>
|
||||
lock: cookie <Pigs in space>
|
||||
unlock: cookie <Pigs in space>
|
||||
run 2: set cookie 4 and 5
|
||||
lock: dns <Pigs in space>
|
||||
unlock: dns <Pigs in space>
|
||||
CLEANUP
|
||||
lock: cookie <Pigs in space>
|
||||
unlock: cookie <Pigs in space>
|
||||
*** run 3
|
||||
CURLOPT_SHARE
|
||||
lock: share <Pigs in space>
|
||||
unlock: share <Pigs in space>
|
||||
CURLOPT_COOKIEJAR
|
||||
PERFORM
|
||||
lock: dns <Pigs in space>
|
||||
unlock: dns <Pigs in space>
|
||||
lock: cookie <Pigs in space>
|
||||
unlock: cookie <Pigs in space>
|
||||
lock: cookie <Pigs in space>
|
||||
unlock: cookie <Pigs in space>
|
||||
lock: cookie <Pigs in space>
|
||||
unlock: cookie <Pigs in space>
|
||||
run 3: overwrite cookie 1 and 4
|
||||
lock: dns <Pigs in space>
|
||||
unlock: dns <Pigs in space>
|
||||
try SHARE_CLEANUP...
|
||||
lock: share <Pigs in space>
|
||||
unlock: share <Pigs in space>
|
||||
SHARE_CLEANUP failed, correct
|
||||
CLEANUP
|
||||
lock: cookie <Pigs in space>
|
||||
unlock: cookie <Pigs in space>
|
||||
SHARE_CLEANUP
|
||||
lock: share <Pigs in space>
|
||||
unlock: share <Pigs in space>
|
||||
GLOBAL_CLEANUP
|
||||
</stdout>
|
||||
<stderr>
|
||||
http://%HOSTIP:%HOSTPORT/506
|
||||
</stderr>
|
||||
<file name="log/jar506">
|
||||
# Netscape HTTP Cookie File
|
||||
# http://www.netscape.com/newsref/std/cookie_spec.html
|
||||
# This file was generated by libcurl! Edit at your own risk.
|
||||
|
||||
.foo.com TRUE / FALSE 1170413787 test1 overwritten1
|
||||
.host.foo.com TRUE / FALSE 1170327387 test2 two
|
||||
.foo.com TRUE / FALSE 1170327387 test3 three
|
||||
.host.foo.com TRUE / FALSE 1262300399 test4 overwritten4
|
||||
.host.foo.com TRUE / FALSE 1170327387 test5 five
|
||||
</file>
|
||||
</verify>
|
76
tests/data/test72
Normal file
76
tests/data/test72
Normal file
@@ -0,0 +1,76 @@
|
||||
# Server-side
|
||||
<reply>
|
||||
<data>
|
||||
HTTP/1.1 401 Authorization Required
|
||||
Server: Apache/1.3.27 (Darwin) PHP/4.1.2
|
||||
WWW-Authenticate: Basic realm="foothis"
|
||||
WWW-Authenticate: Digest realm="testrealm", nonce="1053604199"
|
||||
Content-Type: text/html; charset=iso-8859-1
|
||||
Content-Length: 26
|
||||
|
||||
This is not the real page
|
||||
</data>
|
||||
|
||||
# This is supposed to be returned when the server gets a
|
||||
# Authorization: Digest line passed-in from the client
|
||||
<data1000>
|
||||
HTTP/1.1 200 OK
|
||||
Server: Apache/1.3.27 (Darwin) PHP/4.1.2
|
||||
Content-Type: text/html; charset=iso-8859-1
|
||||
Content-Length: 23
|
||||
|
||||
This IS the real page!
|
||||
</data1000>
|
||||
|
||||
<datacheck>
|
||||
HTTP/1.1 401 Authorization Required
|
||||
Server: Apache/1.3.27 (Darwin) PHP/4.1.2
|
||||
WWW-Authenticate: Basic realm="foothis"
|
||||
WWW-Authenticate: Digest realm="testrealm", nonce="1053604199"
|
||||
Content-Type: text/html; charset=iso-8859-1
|
||||
Content-Length: 26
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
Server: Apache/1.3.27 (Darwin) PHP/4.1.2
|
||||
Content-Type: text/html; charset=iso-8859-1
|
||||
Content-Length: 23
|
||||
|
||||
This IS the real page!
|
||||
</datacheck>
|
||||
|
||||
</reply>
|
||||
|
||||
# Client-side
|
||||
<client>
|
||||
<server>
|
||||
http
|
||||
</server>
|
||||
<name>
|
||||
HTTP with Digest *OR* Basic authorization
|
||||
</name>
|
||||
<command>
|
||||
http://%HOSTIP:%HOSTPORT/72 -u testuser:testpass --anyauth
|
||||
</command>
|
||||
</test>
|
||||
|
||||
# Verify data after the test has been "shot"
|
||||
<verify>
|
||||
<strip>
|
||||
^User-Agent:.*
|
||||
</strip>
|
||||
<protocol>
|
||||
GET /72 HTTP/1.1
|
||||
User-Agent: curl/7.10.6-pre1 (i686-pc-linux-gnu) libcurl/7.10.6-pre1 OpenSSL/0.9.7a ipv6 zlib/1.1.3
|
||||
Host: 127.0.0.1:8999
|
||||
Pragma: no-cache
|
||||
Accept: image/gif, image/x-xbitmap, image/jpeg, image/pjpeg, */*
|
||||
|
||||
GET /72 HTTP/1.1
|
||||
Authorization: Digest username="testuser", realm="testrealm", nonce="1053604199", uri="/72", response="9fcd1330377365a09bbcb33b2cbb25bd"
|
||||
User-Agent: curl/7.10.6-pre1 (i686-pc-linux-gnu) libcurl/7.10.6-pre1 OpenSSL/0.9.7a ipv6 zlib/1.1.3
|
||||
Host: 127.0.0.1:8999
|
||||
Pragma: no-cache
|
||||
Accept: image/gif, image/x-xbitmap, image/jpeg, image/pjpeg, */*
|
||||
|
||||
</protocol>
|
||||
</verify>
|
45
tests/data/test73
Normal file
45
tests/data/test73
Normal file
@@ -0,0 +1,45 @@
|
||||
# Server-side
|
||||
<reply>
|
||||
<data>
|
||||
HTTP/1.1 200 OK swsclose
|
||||
Date: Thu, 09 Nov 2010 14:49:00 GMT
|
||||
Content-Type: text/html
|
||||
Set-Cookie: IPCZQX01af0fca5c=000010008168c200d25dfc4b; path=/; domain=.NOT_DISCLOSED.se
|
||||
|
||||
boo
|
||||
</data>
|
||||
</reply>
|
||||
|
||||
# Client-side
|
||||
<client>
|
||||
<server>
|
||||
http
|
||||
</server>
|
||||
<name>
|
||||
HTTP, receive cookies when using custom Host:, domain using only two dots
|
||||
</name>
|
||||
<command>
|
||||
http://%HOSTIP:%HOSTPORT/we/want/73 -c log/jar73.txt -H "Host: host.NOT_DISCLOSED.se"
|
||||
</command>
|
||||
</client>
|
||||
|
||||
# Verify data after the test has been "shot"
|
||||
<verify>
|
||||
<strip>
|
||||
^User-Agent:.*
|
||||
</strip>
|
||||
<protocol>
|
||||
GET /we/want/73 HTTP/1.1
|
||||
Pragma: no-cache
|
||||
Accept: image/gif, image/x-xbitmap, image/jpeg, image/pjpeg, */*
|
||||
Host: host.NOT_DISCLOSED.se
|
||||
|
||||
</protocol>
|
||||
<file name="log/jar73.txt">
|
||||
# Netscape HTTP Cookie File
|
||||
# http://www.netscape.com/newsref/std/cookie_spec.html
|
||||
# This file was generated by libcurl! Edit at your own risk.
|
||||
|
||||
.NOT_DISCLOSED.se TRUE / FALSE 0 IPCZQX01af0fca5c 000010008168c200d25dfc4b
|
||||
</file>
|
||||
</verify>
|
67
tests/data/test74
Normal file
67
tests/data/test74
Normal file
@@ -0,0 +1,67 @@
|
||||
#
|
||||
# Server-side
|
||||
<reply>
|
||||
<data nocheck=1>
|
||||
HTTP/1.1 200 OK
|
||||
Date: Thu, 09 Nov 2010 14:49:00 GMT
|
||||
Server: test-server/fake
|
||||
Last-Modified: Tue, 13 Jun 2000 12:10:00 GMT
|
||||
ETag: "21025-dc7-39462498"
|
||||
Accept-Ranges: bytes
|
||||
Content-Length: 6
|
||||
Content-Type: text/html
|
||||
Funny-head: yesyes
|
||||
|
||||
<foo>
|
||||
</data>
|
||||
<data1>
|
||||
HTTP/1.0 200 OK
|
||||
Content-Type: text/html
|
||||
Funny-head: swsclose
|
||||
Connection: close
|
||||
|
||||
crap data
|
||||
</data1>
|
||||
</reply>
|
||||
|
||||
#
|
||||
# Client-side
|
||||
<client>
|
||||
<server>
|
||||
http
|
||||
</server>
|
||||
<name>
|
||||
HTTP, urlglob {}-retrieval and -o #[num] usage
|
||||
</name>
|
||||
<command option=no-output>
|
||||
"http://%HOSTIP:%HOSTPORT/{74,740001}" -o "log/dumpit#1.dump"
|
||||
</command>
|
||||
</client>
|
||||
|
||||
#
|
||||
# Verify data after the test has been "shot"
|
||||
<verify>
|
||||
<strip>
|
||||
^User-Agent:.*
|
||||
</strip>
|
||||
<protocol>
|
||||
GET /74 HTTP/1.1
|
||||
Host: 127.0.0.1:8999
|
||||
Pragma: no-cache
|
||||
Accept: image/gif, image/x-xbitmap, image/jpeg, image/pjpeg, */*
|
||||
|
||||
GET /740001 HTTP/1.1
|
||||
Host: 127.0.0.1:8999
|
||||
Pragma: no-cache
|
||||
Accept: image/gif, image/x-xbitmap, image/jpeg, image/pjpeg, */*
|
||||
|
||||
</protocol>
|
||||
<file name="log/dumpit740001.dump">
|
||||
HTTP/1.0 200 OK
|
||||
Content-Type: text/html
|
||||
Funny-head: swsclose
|
||||
Connection: close
|
||||
|
||||
crap data
|
||||
</file>
|
||||
</verify>
|
33
tests/data/test75
Normal file
33
tests/data/test75
Normal file
@@ -0,0 +1,33 @@
|
||||
#
|
||||
# Server-side
|
||||
<reply>
|
||||
</reply>
|
||||
|
||||
#
|
||||
# Client-side
|
||||
<client>
|
||||
<server>
|
||||
none
|
||||
</server>
|
||||
<name>
|
||||
HTTP, urlglob retrieval with bad range
|
||||
</name>
|
||||
<command option=no-output>
|
||||
"http://%HOSTIP:%HOSTPORT/[1-1]" -o "log/weee#1.dump" --stderr -
|
||||
</command>
|
||||
</client>
|
||||
|
||||
#
|
||||
# Verify data after the test has been "shot"
|
||||
<verify>
|
||||
<strip>
|
||||
^User-Agent:.*
|
||||
</strip>
|
||||
<errorcode>
|
||||
3
|
||||
</errorcode>
|
||||
<stdout>
|
||||
curl: (3) [globbing] error: illegal pattern or range specification after pos 24
|
||||
|
||||
</stdout>
|
||||
</verify>
|
29
tests/data/test76
Normal file
29
tests/data/test76
Normal file
@@ -0,0 +1,29 @@
|
||||
#
|
||||
# Server-side
|
||||
<reply>
|
||||
</reply>
|
||||
|
||||
#
|
||||
# Client-side
|
||||
<client>
|
||||
<server>
|
||||
none
|
||||
</server>
|
||||
<name>
|
||||
HTTP, -O with no file name part in the URL
|
||||
</name>
|
||||
<command option=no-output>
|
||||
http://%HOSTIP:%HOSTPORT/76/ -O
|
||||
</command>
|
||||
</client>
|
||||
|
||||
#
|
||||
# Verify data after the test has been "shot"
|
||||
<verify>
|
||||
<strip>
|
||||
^User-Agent:.*
|
||||
</strip>
|
||||
<errorcode>
|
||||
23
|
||||
</errorcode>
|
||||
</verify>
|
47
tests/data/test77
Normal file
47
tests/data/test77
Normal file
@@ -0,0 +1,47 @@
|
||||
#
|
||||
# Server-side
|
||||
<reply>
|
||||
<data>
|
||||
HTTP/1.1 200 OK
|
||||
Date: Thu, 09 Nov 2010 14:49:00 GMT
|
||||
Server: test-server/fake
|
||||
Last-Modified: Tue, 13 Jun 2010 12:10:00 GMT
|
||||
ETag: "21025-dc7-39462498"
|
||||
Accept-Ranges: bytes
|
||||
Content-Length: 6
|
||||
Connection: close
|
||||
Content-Type: text/html
|
||||
|
||||
<foo>
|
||||
</data>
|
||||
</reply>
|
||||
|
||||
#
|
||||
# Client-side
|
||||
<client>
|
||||
<server>
|
||||
http
|
||||
</server>
|
||||
<name>
|
||||
HTTP with -z "older date"
|
||||
</name>
|
||||
<command>
|
||||
http://%HOSTIP:%HOSTPORT/77 -z "1999-12-12 12:00:00 MET"
|
||||
</command>
|
||||
</client>
|
||||
|
||||
#
|
||||
# Verify data after the test has been "shot"
|
||||
<verify>
|
||||
<strip>
|
||||
^User-Agent:.*
|
||||
</strip>
|
||||
<protocol>
|
||||
GET /77 HTTP/1.1
|
||||
Host: 127.0.0.1:8999
|
||||
Pragma: no-cache
|
||||
Accept: image/gif, image/x-xbitmap, image/jpeg, image/pjpeg, */*
|
||||
If-Modified-Since: Sun, 12 Dec 1999 11:00:00 GMT
|
||||
|
||||
</protocol>
|
||||
</verify>
|
59
tests/data/test78
Normal file
59
tests/data/test78
Normal file
@@ -0,0 +1,59 @@
|
||||
#
|
||||
# Server-side
|
||||
<reply>
|
||||
<data>
|
||||
HTTP/1.1 200 OK
|
||||
Server: test-server/fake
|
||||
Last-Modified: Tue, 13 Jun 1990 12:10:00 GMT
|
||||
ETag: "21025-dc7-39462498"
|
||||
Accept-Ranges: bytes
|
||||
Content-Length: 6
|
||||
Connection: close
|
||||
Content-Type: text/html
|
||||
Funny-head: yesyes
|
||||
|
||||
<foo>
|
||||
</data>
|
||||
<datacheck>
|
||||
HTTP/1.1 200 OK
|
||||
Server: test-server/fake
|
||||
Last-Modified: Tue, 13 Jun 1990 12:10:00 GMT
|
||||
ETag: "21025-dc7-39462498"
|
||||
Accept-Ranges: bytes
|
||||
Content-Length: 6
|
||||
Connection: close
|
||||
Content-Type: text/html
|
||||
Funny-head: yesyes
|
||||
|
||||
</datacheck>
|
||||
</reply>
|
||||
|
||||
#
|
||||
# Client-side
|
||||
<client>
|
||||
<server>
|
||||
http
|
||||
</server>
|
||||
<name>
|
||||
HTTP with -z "newer date"
|
||||
</name>
|
||||
<command>
|
||||
http://%HOSTIP:%HOSTPORT/78 -z "1999-12-12 12:00:00 MET"
|
||||
</command>
|
||||
</client>
|
||||
|
||||
#
|
||||
# Verify data after the test has been "shot"
|
||||
<verify>
|
||||
<strip>
|
||||
^User-Agent:.*
|
||||
</strip>
|
||||
<protocol>
|
||||
GET /78 HTTP/1.1
|
||||
Host: 127.0.0.1:8999
|
||||
Pragma: no-cache
|
||||
Accept: image/gif, image/x-xbitmap, image/jpeg, image/pjpeg, */*
|
||||
If-Modified-Since: Sun, 12 Dec 1999 11:00:00 GMT
|
||||
|
||||
</protocol>
|
||||
</verify>
|
42
tests/data/test79
Normal file
42
tests/data/test79
Normal file
@@ -0,0 +1,42 @@
|
||||
#
|
||||
# Server-side
|
||||
<reply>
|
||||
<data>
|
||||
HTTP/1.1 200 OK
|
||||
Date: Thu, 09 Nov 2010 14:49:00 GMT
|
||||
Server: test-server/fake swsclose
|
||||
Content-Type: text/html
|
||||
Funny-head: yesyes
|
||||
|
||||
contents
|
||||
</data>
|
||||
</reply>
|
||||
|
||||
#
|
||||
# Client-side
|
||||
<client>
|
||||
<server>
|
||||
http
|
||||
</server>
|
||||
<name>
|
||||
FTP over HTTP proxy
|
||||
</name>
|
||||
<command>
|
||||
ftp://%HOSTIP:%HOSTPORT/we/want/that/page/79 -x %HOSTIP:%HOSTPORT
|
||||
</command>
|
||||
</test>
|
||||
|
||||
#
|
||||
# Verify data after the test has been "shot"
|
||||
<verify>
|
||||
<strip>
|
||||
^User-Agent:.*
|
||||
</strip>
|
||||
<protocol>
|
||||
GET ftp://127.0.0.1:8999/we/want/that/page/79 HTTP/1.1
|
||||
Host: 127.0.0.1:8999
|
||||
Pragma: no-cache
|
||||
Accept: image/gif, image/x-xbitmap, image/jpeg, image/pjpeg, */*
|
||||
|
||||
</protocol>
|
||||
</verify>
|
48
tests/data/test80
Normal file
48
tests/data/test80
Normal file
@@ -0,0 +1,48 @@
|
||||
#
|
||||
# Server-side
|
||||
<reply>
|
||||
<data>
|
||||
HTTP/1.1 200 OK
|
||||
Date: Thu, 09 Nov 2010 14:49:00 GMT
|
||||
Server: test-server/fake swsclose
|
||||
Content-Type: text/html
|
||||
Funny-head: yesyes
|
||||
|
||||
contents
|
||||
</data>
|
||||
</reply>
|
||||
|
||||
#
|
||||
# Client-side
|
||||
<client>
|
||||
<server>
|
||||
http
|
||||
</server>
|
||||
<name>
|
||||
HTTP over proxy with both proxy and site authentication
|
||||
</name>
|
||||
<command>
|
||||
http://%HOSTIP:%HOSTPORT/we/want/that/page/80 -p -x %HOSTIP:%HOSTPORT --user iam:myself --proxy-user youare:yourself
|
||||
</command>
|
||||
</test>
|
||||
|
||||
#
|
||||
# Verify data after the test has been "shot"
|
||||
<verify>
|
||||
<strip>
|
||||
^User-Agent:.*
|
||||
</strip>
|
||||
<protocol>
|
||||
CONNECT 127.0.0.1:8999 HTTP/1.0
|
||||
Proxy-authorization: Basic eW91YXJlOnlvdXJzZWxm
|
||||
User-Agent: curl/7.10.7-pre2 (i686-pc-linux-gnu) libcurl/7.10.7-pre2 OpenSSL/0.9.7a zlib/1.1.3
|
||||
|
||||
GET /we/want/that/page/80 HTTP/1.1
|
||||
Authorization: Basic aWFtOm15c2VsZg==
|
||||
User-Agent: curl/7.10.7-pre2 (i686-pc-linux-gnu) libcurl/7.10.7-pre2 OpenSSL/0.9.7a zlib/1.1.3
|
||||
Host: 127.0.0.1:8999
|
||||
Pragma: no-cache
|
||||
Accept: image/gif, image/x-xbitmap, image/jpeg, image/pjpeg, */*
|
||||
|
||||
</protocol>
|
||||
</verify>
|
80
tests/data/test81
Normal file
80
tests/data/test81
Normal file
@@ -0,0 +1,80 @@
|
||||
# Server-side
|
||||
<reply>
|
||||
|
||||
# This is supposed to be returned when the server gets a first
|
||||
# Authorization: NTLM line passed-in from the client
|
||||
<data1001>
|
||||
HTTP/1.1 407 Now gimme that second request of crap
|
||||
Server: Microsoft-IIS/5.0
|
||||
Content-Type: text/html; charset=iso-8859-1
|
||||
Content-Length: 34
|
||||
Proxy-Authenticate: NTLM TlRMTVNTUAACAAAAAgACADAAAAAGgoEAc51AYVDgyNcAAAAAAAAAAG4AbgAyAAAAQ0MCAAQAQwBDAAEAEgBFAEwASQBTAEEAQgBFAFQASAAEABgAYwBjAC4AaQBjAGUAZABlAHYALgBuAHUAAwAsAGUAbABpAHMAYQBiAGUAdABoAC4AYwBjAC4AaQBjAGUAZABlAHYALgBuAHUAAAAAAA==
|
||||
|
||||
This is not the real page either!
|
||||
</data1001>
|
||||
|
||||
# This is supposed to be returned when the server gets the second
|
||||
# Authorization: NTLM line passed-in from the client
|
||||
<data1002>
|
||||
HTTP/1.1 200 Things are fine in server land swsclose
|
||||
Server: Microsoft-IIS/5.0
|
||||
Content-Type: text/html; charset=iso-8859-1
|
||||
|
||||
Finally, this is the real page!
|
||||
</data1002>
|
||||
|
||||
<datacheck>
|
||||
HTTP/1.1 407 Now gimme that second request of crap
|
||||
Server: Microsoft-IIS/5.0
|
||||
Content-Type: text/html; charset=iso-8859-1
|
||||
Content-Length: 34
|
||||
Proxy-Authenticate: NTLM TlRMTVNTUAACAAAAAgACADAAAAAGgoEAc51AYVDgyNcAAAAAAAAAAG4AbgAyAAAAQ0MCAAQAQwBDAAEAEgBFAEwASQBTAEEAQgBFAFQASAAEABgAYwBjAC4AaQBjAGUAZABlAHYALgBuAHUAAwAsAGUAbABpAHMAYQBiAGUAdABoAC4AYwBjAC4AaQBjAGUAZABlAHYALgBuAHUAAAAAAA==
|
||||
|
||||
HTTP/1.1 200 Things are fine in server land swsclose
|
||||
Server: Microsoft-IIS/5.0
|
||||
Content-Type: text/html; charset=iso-8859-1
|
||||
|
||||
Finally, this is the real page!
|
||||
</datacheck>
|
||||
|
||||
</reply>
|
||||
|
||||
# Client-side
|
||||
<client>
|
||||
# NTLM only works if SSL-support is present
|
||||
<features>
|
||||
SSL
|
||||
</features>
|
||||
<server>
|
||||
http
|
||||
</server>
|
||||
<name>
|
||||
HTTP with proxy using NTLM authorization
|
||||
</name>
|
||||
<command>
|
||||
http://%HOSTIP:%HOSTPORT/81 --proxy-user testuser:testpass -x http://%HOSTIP:%HOSTPORT --proxy-ntlm
|
||||
</command>
|
||||
</test>
|
||||
|
||||
# Verify data after the test has been "shot"
|
||||
<verify>
|
||||
<strip>
|
||||
^User-Agent:.*
|
||||
</strip>
|
||||
<protocol>
|
||||
GET http://127.0.0.1:8999/81 HTTP/1.1
|
||||
Proxy-Authorization: NTLM TlRMTVNTUAABAAAAAgIAAAAAAAAgAAAAAAAAACAAAAA=
|
||||
User-Agent: curl/7.10.6-pre1 (i686-pc-linux-gnu) libcurl/7.10.6-pre1 OpenSSL/0.9.7a ipv6 zlib/1.1.3
|
||||
Host: 127.0.0.1:8999
|
||||
Pragma: no-cache
|
||||
Accept: image/gif, image/x-xbitmap, image/jpeg, image/pjpeg, */*
|
||||
|
||||
GET http://127.0.0.1:8999/81 HTTP/1.1
|
||||
Proxy-Authorization: NTLM TlRMTVNTUAADAAAAGAAYAEgAAAAAAAAAYAAAAAAAAABAAAAACAAIAEAAAAAAAAAASAAAAAAAAABgAAAAAYIAAHRlc3R1c2VyWmRDApEJkUyGOPS3DjvASModEeW/N/FB
|
||||
User-Agent: curl/7.10.6-pre1 (i686-pc-linux-gnu) libcurl/7.10.6-pre1 OpenSSL/0.9.7a ipv6 zlib/1.1.3
|
||||
Host: 127.0.0.1:8999
|
||||
Pragma: no-cache
|
||||
Accept: image/gif, image/x-xbitmap, image/jpeg, image/pjpeg, */*
|
||||
|
||||
</protocol>
|
||||
</verify>
|
45
tests/data/test82
Normal file
45
tests/data/test82
Normal file
@@ -0,0 +1,45 @@
|
||||
# Server-side
|
||||
<reply>
|
||||
|
||||
<data>
|
||||
HTTP/1.1 407 We only deal with NTLM my friend
|
||||
Server: Microsoft-IIS/5.0
|
||||
Content-Type: text/html; charset=iso-8859-1
|
||||
Content-Length: 34
|
||||
Proxy-Authenticate: NTLM
|
||||
|
||||
This is not the real page either!
|
||||
</data1001>
|
||||
|
||||
</reply>
|
||||
|
||||
# Client-side
|
||||
<client>
|
||||
<features>
|
||||
</features>
|
||||
<server>
|
||||
http
|
||||
</server>
|
||||
<name>
|
||||
HTTP with proxy requiring NTLM, but we send Basic
|
||||
</name>
|
||||
<command>
|
||||
http://%HOSTIP:%HOSTPORT/82 --proxy-user testuser:testpass -x http://%HOSTIP:%HOSTPORT
|
||||
</command>
|
||||
</test>
|
||||
|
||||
# Verify data after the test has been "shot"
|
||||
<verify>
|
||||
<strip>
|
||||
^User-Agent:.*
|
||||
</strip>
|
||||
<protocol>
|
||||
GET http://127.0.0.1:8999/82 HTTP/1.1
|
||||
Proxy-authorization: Basic dGVzdHVzZXI6dGVzdHBhc3M=
|
||||
User-Agent: curl/7.10.6-pre1 (i686-pc-linux-gnu) libcurl/7.10.6-pre1 OpenSSL/0.9.7a ipv6 zlib/1.1.3
|
||||
Host: 127.0.0.1:8999
|
||||
Pragma: no-cache
|
||||
Accept: image/gif, image/x-xbitmap, image/jpeg, image/pjpeg, */*
|
||||
|
||||
</protocol>
|
||||
</verify>
|
47
tests/data/test83
Normal file
47
tests/data/test83
Normal file
@@ -0,0 +1,47 @@
|
||||
#
|
||||
# Server-side
|
||||
<reply>
|
||||
<data>
|
||||
HTTP/1.1 200 OK
|
||||
Date: Thu, 09 Nov 2010 14:49:00 GMT
|
||||
Server: test-server/fake swsclose
|
||||
Content-Type: text/html
|
||||
Funny-head: yesyes
|
||||
|
||||
contents
|
||||
</data>
|
||||
</reply>
|
||||
|
||||
#
|
||||
# Client-side
|
||||
<client>
|
||||
<server>
|
||||
http
|
||||
</server>
|
||||
<name>
|
||||
HTTP over proxy-tunnel with site authentication
|
||||
</name>
|
||||
<command>
|
||||
http://%HOSTIP:%HOSTPORT/we/want/that/page/83 -p -x %HOSTIP:%HOSTPORT --user iam:myself
|
||||
</command>
|
||||
</test>
|
||||
|
||||
#
|
||||
# Verify data after the test has been "shot"
|
||||
<verify>
|
||||
<strip>
|
||||
^User-Agent:.*
|
||||
</strip>
|
||||
<protocol>
|
||||
CONNECT 127.0.0.1:8999 HTTP/1.0
|
||||
User-Agent: curl/7.10.7-pre2 (i686-pc-linux-gnu) libcurl/7.10.7-pre2 OpenSSL/0.9.7a zlib/1.1.3
|
||||
|
||||
GET /we/want/that/page/83 HTTP/1.1
|
||||
Authorization: Basic aWFtOm15c2VsZg==
|
||||
User-Agent: curl/7.10.7-pre2 (i686-pc-linux-gnu) libcurl/7.10.7-pre2 OpenSSL/0.9.7a zlib/1.1.3
|
||||
Host: 127.0.0.1:8999
|
||||
Pragma: no-cache
|
||||
Accept: image/gif, image/x-xbitmap, image/jpeg, image/pjpeg, */*
|
||||
|
||||
</protocol>
|
||||
</verify>
|
44
tests/data/test84
Normal file
44
tests/data/test84
Normal file
@@ -0,0 +1,44 @@
|
||||
#
|
||||
# Server-side
|
||||
<reply>
|
||||
<data>
|
||||
HTTP/1.1 200 OK
|
||||
Date: Thu, 09 Nov 2010 14:49:00 GMT
|
||||
Server: test-server/fake swsclose
|
||||
Content-Type: text/html
|
||||
Funny-head: yesyes
|
||||
|
||||
contents
|
||||
</data>
|
||||
</reply>
|
||||
|
||||
#
|
||||
# Client-side
|
||||
<client>
|
||||
<server>
|
||||
http
|
||||
</server>
|
||||
<name>
|
||||
HTTP over proxy with site authentication
|
||||
</name>
|
||||
<command>
|
||||
http://%HOSTIP:%HOSTPORT/we/want/that/page/84 -x %HOSTIP:%HOSTPORT --user iam:myself
|
||||
</command>
|
||||
</test>
|
||||
|
||||
#
|
||||
# Verify data after the test has been "shot"
|
||||
<verify>
|
||||
<strip>
|
||||
^User-Agent:.*
|
||||
</strip>
|
||||
<protocol>
|
||||
GET http://127.0.0.1:8999/we/want/that/page/84 HTTP/1.1
|
||||
Authorization: Basic aWFtOm15c2VsZg==
|
||||
User-Agent: curl/7.10.7-pre2 (i686-pc-linux-gnu) libcurl/7.10.7-pre2 OpenSSL/0.9.7a zlib/1.1.3
|
||||
Host: 127.0.0.1:8999
|
||||
Pragma: no-cache
|
||||
Accept: image/gif, image/x-xbitmap, image/jpeg, image/pjpeg, */*
|
||||
|
||||
</protocol>
|
||||
</verify>
|
45
tests/data/test85
Normal file
45
tests/data/test85
Normal file
@@ -0,0 +1,45 @@
|
||||
#
|
||||
# Server-side
|
||||
<reply>
|
||||
<data>
|
||||
HTTP/1.1 200 OK
|
||||
Date: Thu, 09 Nov 2010 14:49:00 GMT
|
||||
Server: test-server/fake swsclose
|
||||
Content-Type: text/html
|
||||
Funny-head: yesyes
|
||||
|
||||
contents
|
||||
</data>
|
||||
</reply>
|
||||
|
||||
#
|
||||
# Client-side
|
||||
<client>
|
||||
<server>
|
||||
http
|
||||
</server>
|
||||
<name>
|
||||
HTTP over proxy with site and proxy authentication
|
||||
</name>
|
||||
<command>
|
||||
http://%HOSTIP:%HOSTPORT/we/want/that/page/85 -x %HOSTIP:%HOSTPORT --user iam:myself --proxy-user testing:this
|
||||
</command>
|
||||
</test>
|
||||
|
||||
#
|
||||
# Verify data after the test has been "shot"
|
||||
<verify>
|
||||
<strip>
|
||||
^User-Agent:.*
|
||||
</strip>
|
||||
<protocol>
|
||||
GET http://127.0.0.1:8999/we/want/that/page/85 HTTP/1.1
|
||||
Proxy-authorization: Basic dGVzdGluZzp0aGlz
|
||||
Authorization: Basic aWFtOm15c2VsZg==
|
||||
User-Agent: curl/7.10.7-pre2 (i686-pc-linux-gnu) libcurl/7.10.7-pre2 OpenSSL/0.9.7a zlib/1.1.3
|
||||
Host: 127.0.0.1:8999
|
||||
Pragma: no-cache
|
||||
Accept: image/gif, image/x-xbitmap, image/jpeg, image/pjpeg, */*
|
||||
|
||||
</protocol>
|
||||
</verify>
|
90
tests/data/test86
Normal file
90
tests/data/test86
Normal file
@@ -0,0 +1,90 @@
|
||||
#
|
||||
# Server-side
|
||||
<reply>
|
||||
<data nocheck=1>
|
||||
HTTP/1.1 200 OK
|
||||
Date: Thu, 09 Nov 2010 14:49:00 GMT
|
||||
Server: test-server/fake
|
||||
Last-Modified: Tue, 13 Jun 2000 12:10:00 GMT
|
||||
ETag: "21025-dc7-39462498"
|
||||
Accept-Ranges: bytes
|
||||
Content-Length: 6
|
||||
Connection: close
|
||||
Content-Type: text/html
|
||||
Funny-head: yesyes
|
||||
|
||||
<foo>
|
||||
</data>
|
||||
<data1>
|
||||
HTTP/1.0 200 OK
|
||||
Content-Type: text/html
|
||||
Funny-head: swsclose
|
||||
|
||||
crap data
|
||||
</data1>
|
||||
<data2>
|
||||
HTTP/1.0 200 OK
|
||||
Content-Type: text/html
|
||||
Funny-head: swsclose
|
||||
|
||||
crap data
|
||||
</data2>
|
||||
<data3>
|
||||
HTTP/1.0 200 OK
|
||||
Content-Type: text/html
|
||||
Funny-head: swsclose
|
||||
|
||||
crap data
|
||||
</data3>
|
||||
</reply>
|
||||
|
||||
#
|
||||
# Client-side
|
||||
<client>
|
||||
<server>
|
||||
http
|
||||
</server>
|
||||
<name>
|
||||
HTTP, urlglob []-retrieval and -o #[num] usage
|
||||
</name>
|
||||
<command option=no-output>
|
||||
"http://%HOSTIP:%HOSTPORT/[860001-860003]" -o "log/dumpit#1.dump"
|
||||
</command>
|
||||
</client>
|
||||
|
||||
#
|
||||
# Verify data after the test has been "shot"
|
||||
<verify>
|
||||
<strip>
|
||||
^User-Agent:.*
|
||||
</strip>
|
||||
<protocol>
|
||||
GET /860001 HTTP/1.1
|
||||
User-Agent: curl/7.10.7-pre4 (i686-pc-linux-gnu) libcurl/7.10.7-pre4 OpenSSL/0.9.7a ipv6 zlib/1.1.3 GSS
|
||||
Host: 127.0.0.1:8999
|
||||
Pragma: no-cache
|
||||
Accept: image/gif, image/x-xbitmap, image/jpeg, image/pjpeg, */*
|
||||
|
||||
GET /860002 HTTP/1.1
|
||||
User-Agent: curl/7.10.7-pre4 (i686-pc-linux-gnu) libcurl/7.10.7-pre4 OpenSSL/0.9.7a ipv6 zlib/1.1.3 GSS
|
||||
Host: 127.0.0.1:8999
|
||||
Pragma: no-cache
|
||||
Accept: image/gif, image/x-xbitmap, image/jpeg, image/pjpeg, */*
|
||||
|
||||
GET /860003 HTTP/1.1
|
||||
User-Agent: curl/7.10.7-pre4 (i686-pc-linux-gnu) libcurl/7.10.7-pre4 OpenSSL/0.9.7a ipv6 zlib/1.1.3 GSS
|
||||
Host: 127.0.0.1:8999
|
||||
Pragma: no-cache
|
||||
Accept: image/gif, image/x-xbitmap, image/jpeg, image/pjpeg, */*
|
||||
|
||||
</protocol>
|
||||
|
||||
# we check the second file
|
||||
<file name="log/dumpit860002.dump">
|
||||
HTTP/1.0 200 OK
|
||||
Content-Type: text/html
|
||||
Funny-head: swsclose
|
||||
|
||||
crap data
|
||||
</file>
|
||||
</verify>
|
25
tests/data/test87
Normal file
25
tests/data/test87
Normal file
@@ -0,0 +1,25 @@
|
||||
#
|
||||
# Server-side
|
||||
<reply>
|
||||
</reply>
|
||||
#
|
||||
# Client-side
|
||||
<client>
|
||||
<server>
|
||||
none
|
||||
</server>
|
||||
<name>
|
||||
urlglob with bad -o #[num] usage
|
||||
</name>
|
||||
<command option=no-output>
|
||||
"http://%HOSTIP:%HOSTPORT/[870001-870003]" -o "log/dumpit#2.dump"
|
||||
</command>
|
||||
</client>
|
||||
|
||||
#
|
||||
# Verify data after the test has been "shot"
|
||||
<verify>
|
||||
<errorcode>
|
||||
2
|
||||
</errorcode>
|
||||
</verify>
|
@@ -92,6 +92,7 @@ my %commandok = (
|
||||
'SYST' => 'loggedin',
|
||||
'SIZE' => 'loggedin|twosock',
|
||||
'PWD' => 'loggedin|twosock',
|
||||
'MKD' => 'loggedin|twosock',
|
||||
'QUIT' => 'loggedin|twosock',
|
||||
'RNFR' => 'loggedin|twosock',
|
||||
'RNTO' => 'loggedin|twosock',
|
||||
@@ -118,6 +119,7 @@ my %displaytext = ('USER' => '331 We are happy you popped in!',
|
||||
'SYST' => '215 UNIX Type: L8', # just fake something
|
||||
'QUIT' => '221 bye bye baby', # just reply something
|
||||
'PWD' => '257 "/nowhere/anywhere" is current directory',
|
||||
'MKD' => '257 Created your requested directory',
|
||||
'REST' => '350 Yeah yeah we set it there for you',
|
||||
'DELE' => '200 OK OK OK whatever you say',
|
||||
'RNFR' => '350 Received your order. Please provide more',
|
||||
@@ -428,6 +430,7 @@ sub PORT_command {
|
||||
$SIG{CHLD} = \&REAPER;
|
||||
|
||||
my %customreply;
|
||||
my %customcount;
|
||||
my %delayreply;
|
||||
sub customize {
|
||||
undef %customreply;
|
||||
@@ -440,6 +443,11 @@ sub customize {
|
||||
if($_ =~ /REPLY ([A-Z]+) (.*)/) {
|
||||
$customreply{$1}=$2;
|
||||
}
|
||||
if($_ =~ /COUNT ([A-Z]+) (.*)/) {
|
||||
# we blank the customreply for this command when having
|
||||
# been used this number of times
|
||||
$customcount{$1}=$2;
|
||||
}
|
||||
elsif($_ =~ /DELAY ([A-Z]+) (\d*)/) {
|
||||
$delayreply{$1}=$2;
|
||||
}
|
||||
@@ -540,6 +548,10 @@ for ( $waitedpid = 0;
|
||||
$text = $displaytext{$FTPCMD};
|
||||
}
|
||||
else {
|
||||
if($customcount{$FTPCMD} && (!--$customcount{$FTPCMD})) {
|
||||
# used enough number of times, now blank the customreply
|
||||
$customreply{$FTPCMD}="";
|
||||
}
|
||||
logmsg "$FTPCMD made to send '$text'\n";
|
||||
}
|
||||
if($text) {
|
||||
|
@@ -8,3 +8,4 @@ Makefile
|
||||
Makefile.in
|
||||
lib504
|
||||
lib505
|
||||
lib506
|
||||
|
@@ -11,7 +11,7 @@ LIBDIR = ../../lib
|
||||
SUPPORTFILES = first.c test.h
|
||||
|
||||
# here are all tools used for running libcurl tests
|
||||
noinst_PROGRAMS = lib500 lib501 lib502 lib503 lib504 lib505
|
||||
noinst_PROGRAMS = lib500 lib501 lib502 lib503 lib504 lib505 lib506
|
||||
|
||||
lib500_SOURCES = lib500.c $(SUPPORTFILES)
|
||||
lib500_LDADD = $(LIBDIR)/libcurl.la
|
||||
@@ -36,3 +36,8 @@ lib504_DEPENDENCIES = $(LIBDIR)/libcurl.la
|
||||
lib505_SOURCES = lib505.c $(SUPPORTFILES)
|
||||
lib505_LDADD = $(LIBDIR)/libcurl.la
|
||||
lib505_DEPENDENCIES = $(LIBDIR)/libcurl.la
|
||||
|
||||
lib506_SOURCES = lib506.c $(SUPPORTFILES)
|
||||
lib506_LDADD = $(LIBDIR)/libcurl.la
|
||||
lib506_DEPENDENCIES = $(LIBDIR)/libcurl.la
|
||||
|
||||
|
210
tests/libtest/lib506.c
Normal file
210
tests/libtest/lib506.c
Normal file
@@ -0,0 +1,210 @@
|
||||
#include "test.h"
|
||||
#include <stdlib.h>
|
||||
#include <ctype.h>
|
||||
#include <errno.h>
|
||||
|
||||
const char *HOSTHEADER = "Host: www.host.foo.com";
|
||||
const char *JAR = "log/jar506";
|
||||
#define THREADS 2
|
||||
|
||||
|
||||
/* struct containing data of a thread */
|
||||
struct Tdata {
|
||||
CURLSH *share;
|
||||
char *url;
|
||||
};
|
||||
|
||||
|
||||
/* lock callback */
|
||||
void lock(CURL *handle, curl_lock_data data, curl_lock_access access,
|
||||
void *useptr )
|
||||
{
|
||||
const char *what;
|
||||
(void)handle;
|
||||
(void)access;
|
||||
switch ( data ) {
|
||||
case CURL_LOCK_DATA_SHARE:
|
||||
what = "share ";
|
||||
break;
|
||||
case CURL_LOCK_DATA_DNS:
|
||||
what = "dns ";
|
||||
break;
|
||||
case CURL_LOCK_DATA_COOKIE:
|
||||
what = "cookie";
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr, "lock: no such data: %d\n",data);
|
||||
return;
|
||||
}
|
||||
printf("lock: %s <%s>\n", what, (char *)useptr);
|
||||
}
|
||||
|
||||
/* unlock callback */
|
||||
void unlock(CURL *handle, curl_lock_data data, void *useptr )
|
||||
{
|
||||
const char *what;
|
||||
(void)handle;
|
||||
switch ( data ) {
|
||||
case CURL_LOCK_DATA_SHARE:
|
||||
what = "share ";
|
||||
break;
|
||||
case CURL_LOCK_DATA_DNS:
|
||||
what = "dns ";
|
||||
break;
|
||||
case CURL_LOCK_DATA_COOKIE:
|
||||
what = "cookie";
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr, "unlock: no such data: %d\n",data);
|
||||
return;
|
||||
}
|
||||
printf("unlock: %s <%s>\n", what, (char *)useptr);
|
||||
}
|
||||
|
||||
|
||||
/* build host entry */
|
||||
struct curl_slist *sethost(struct curl_slist *headers)
|
||||
{
|
||||
(void)headers;
|
||||
return curl_slist_append(NULL, HOSTHEADER );
|
||||
}
|
||||
|
||||
|
||||
/* the dummy thread function */
|
||||
void *fire(void *ptr)
|
||||
{
|
||||
CURLcode code;
|
||||
struct curl_slist *headers;
|
||||
struct Tdata *tdata = (struct Tdata*)ptr;
|
||||
CURL *curl = curl_easy_init();
|
||||
int i;
|
||||
|
||||
headers = sethost(NULL);
|
||||
curl_easy_setopt(curl, CURLOPT_VERBOSE, 1);
|
||||
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, (void*)headers);
|
||||
curl_easy_setopt(curl, CURLOPT_URL, (void*)tdata->url);
|
||||
printf( "CURLOPT_SHARE\n" );
|
||||
curl_easy_setopt(curl, CURLOPT_SHARE, (void*)tdata->share);
|
||||
|
||||
printf( "PERFORM\n" );
|
||||
code = curl_easy_perform(curl);
|
||||
if( code != CURLE_OK ) {
|
||||
fprintf(stderr, "perform url '%s' repeat %d failed, curlcode %d\n",
|
||||
tdata->url, i, code);
|
||||
}
|
||||
|
||||
printf( "CLEANUP\n" );
|
||||
curl_easy_cleanup(curl);
|
||||
curl_slist_free_all(headers);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
/* build request url */
|
||||
char *suburl(char *base, int i)
|
||||
{
|
||||
int len = strlen(base);
|
||||
char *url = (char *)malloc(len+5);
|
||||
if (!url) {
|
||||
abort();
|
||||
}
|
||||
strcpy(url, base);
|
||||
strcat(url, "0000");
|
||||
url[len+3] = 48+i;
|
||||
return url;
|
||||
}
|
||||
|
||||
|
||||
/* test function */
|
||||
CURLcode test(char *URL)
|
||||
{
|
||||
CURLcode res;
|
||||
CURLSHcode scode;
|
||||
char *url;
|
||||
struct Tdata tdata;
|
||||
CURL *curl;
|
||||
CURLSH *share;
|
||||
struct curl_slist *headers;
|
||||
int i;
|
||||
|
||||
printf( "GLOBAL_INIT\n" );
|
||||
curl_global_init( CURL_GLOBAL_ALL );
|
||||
|
||||
/* prepare share */
|
||||
printf( "SHARE_INIT\n" );
|
||||
share = curl_share_init();
|
||||
curl_share_setopt( share, CURLSHOPT_LOCKFUNC, lock);
|
||||
curl_share_setopt( share, CURLSHOPT_UNLOCKFUNC, unlock);
|
||||
curl_share_setopt( share, CURLSHOPT_USERDATA, "Pigs in space");
|
||||
printf( "CURL_LOCK_DATA_COOKIE\n" );
|
||||
curl_share_setopt( share, CURLSHOPT_SHARE, CURL_LOCK_DATA_COOKIE);
|
||||
printf( "CURL_LOCK_DATA_DNS\n" );
|
||||
curl_share_setopt( share, CURLSHOPT_SHARE, CURL_LOCK_DATA_DNS);
|
||||
|
||||
res = 0;
|
||||
|
||||
/* start treads */
|
||||
for (i=1; i<=THREADS; i++ ) {
|
||||
|
||||
/* set thread data */
|
||||
tdata.url = suburl( URL, i ); /* must be freed */
|
||||
tdata.share = share;
|
||||
|
||||
/* simulate thread, direct call of "thread" function */
|
||||
printf( "*** run %d\n",i );
|
||||
fire( &tdata );
|
||||
|
||||
free( tdata.url );
|
||||
|
||||
}
|
||||
|
||||
|
||||
/* fetch a another one and save cookies */
|
||||
printf( "*** run %d\n", i );
|
||||
curl = curl_easy_init();
|
||||
|
||||
url = suburl( URL, i );
|
||||
headers = sethost( NULL );
|
||||
curl_easy_setopt( curl, CURLOPT_HTTPHEADER, (void*)headers );
|
||||
curl_easy_setopt( curl, CURLOPT_URL, url );
|
||||
printf( "CURLOPT_SHARE\n" );
|
||||
curl_easy_setopt( curl, CURLOPT_SHARE, share );
|
||||
printf( "CURLOPT_COOKIEJAR\n" );
|
||||
curl_easy_setopt( curl, CURLOPT_COOKIEJAR, JAR );
|
||||
|
||||
printf( "PERFORM\n" );
|
||||
curl_easy_perform( curl );
|
||||
|
||||
/* try to free share, expect to fail because share is in use*/
|
||||
printf( "try SHARE_CLEANUP...\n" );
|
||||
scode = curl_share_cleanup( share );
|
||||
if ( scode==CURLSHE_OK )
|
||||
{
|
||||
fprintf(stderr, "curl_share_cleanup succeed but error expected\n");
|
||||
share = NULL;
|
||||
} else {
|
||||
printf( "SHARE_CLEANUP failed, correct\n" );
|
||||
}
|
||||
|
||||
/* clean up last handle */
|
||||
printf( "CLEANUP\n" );
|
||||
curl_easy_cleanup( curl );
|
||||
curl_slist_free_all( headers );
|
||||
free(url);
|
||||
|
||||
|
||||
/* free share */
|
||||
printf( "SHARE_CLEANUP\n" );
|
||||
scode = curl_share_cleanup( share );
|
||||
if ( scode!=CURLSHE_OK )
|
||||
{
|
||||
fprintf(stderr, "curl_share_cleanup failed, code errno %d\n", scode);
|
||||
}
|
||||
|
||||
printf( "GLOBAL_CLEANUP\n" );
|
||||
curl_global_cleanup();
|
||||
|
||||
return res;
|
||||
}
|
||||
|
@@ -45,7 +45,15 @@ while(<FILE>) {
|
||||
chomp $_;
|
||||
$line = $_;
|
||||
|
||||
if($line =~ /^MEM ([^ ]*):(\d*) (.*)/) {
|
||||
if($line =~ /^LIMIT ([^ ]*):(\d*) (.*)/) {
|
||||
# new memory limit test prefix
|
||||
my $i = $3;
|
||||
my ($source, $linenum) = ($1, $2);
|
||||
if($trace && ($i =~ /([^ ]*) reached memlimit/)) {
|
||||
print "LIMIT: $1 returned error at $source:$linenum\n";
|
||||
}
|
||||
}
|
||||
elsif($line =~ /^MEM ([^ ]*):(\d*) (.*)/) {
|
||||
# generic match for the filename+linenumber
|
||||
$source = $1;
|
||||
$linenum = $2;
|
||||
|
@@ -452,6 +452,22 @@ sub checkcurl {
|
||||
|
||||
$curl =~ s/^(.*)(libcurl.*)/$1/g;
|
||||
$libcurl = $2;
|
||||
|
||||
if ($curl =~ /win32/)
|
||||
{
|
||||
# Native Windows builds don't understand the
|
||||
# output of cygwin's pwd. It will be
|
||||
# something like /cygdrive/c/<some path>.
|
||||
#
|
||||
# Use the cygpath utility to convert the
|
||||
# working directory to a Windows friendly
|
||||
# path. The -m option converts to use drive
|
||||
# letter:, but it uses / instead \. Forward
|
||||
# slashes (/) are easier for us. We don't
|
||||
# have to escape them to get them to curl
|
||||
# through a shell.
|
||||
chomp($pwd = `cygpath -m $pwd`);
|
||||
}
|
||||
}
|
||||
elsif($_ =~ /^Protocols: (.*)/i) {
|
||||
# these are the supported protocols, we don't use this knowledge
|
||||
@@ -543,7 +559,7 @@ sub singletest {
|
||||
}
|
||||
|
||||
$why = "lacks $f";
|
||||
$serverproblem = 5; # set it here
|
||||
$serverproblem = 15; # set it here
|
||||
last;
|
||||
}
|
||||
|
||||
@@ -554,7 +570,6 @@ sub singletest {
|
||||
if($serverproblem) {
|
||||
# there's a problem with the server, don't run
|
||||
# this particular server, but count it as "skipped"
|
||||
my $why;
|
||||
if($serverproblem == 2) {
|
||||
$why = "server problems";
|
||||
}
|
||||
@@ -564,6 +579,12 @@ sub singletest {
|
||||
elsif($serverproblem == 99) {
|
||||
$why = "bad test";
|
||||
}
|
||||
elsif($serverproblem == 15) {
|
||||
# set above, a lacking prereq
|
||||
}
|
||||
elsif($serverproblem == 1) {
|
||||
$why = "no SSL-capable server";
|
||||
}
|
||||
else {
|
||||
$why = "unfulfilled requirements";
|
||||
}
|
||||
@@ -669,18 +690,9 @@ sub singletest {
|
||||
# make some nice replace operations
|
||||
$cmd =~ s/\n//g; # no newlines please
|
||||
|
||||
# substitute variables in the command line
|
||||
subVariables \$cmd;
|
||||
|
||||
# $cmd =~ s/%HOSTIP/$HOSTIP/g;
|
||||
# $cmd =~ s/%HOSTPORT/$HOSTPORT/g;
|
||||
# $cmd =~ s/%HTTPSPORT/$HTTPSPORT/g;
|
||||
# $cmd =~ s/%FTPPORT/$FTPPORT/g;
|
||||
# $cmd =~ s/%FTPSPORT/$FTPSPORT/g;
|
||||
# $cmd =~ s/%SRCDIR/$srcdir/g;
|
||||
# $cmd =~ s/%PWD/$pwd/g;
|
||||
|
||||
#$cmd =~ s/%HOSTNAME/$HOSTNAME/g;
|
||||
|
||||
if($curl_debug) {
|
||||
unlink($memdump);
|
||||
}
|
||||
@@ -709,10 +721,8 @@ sub singletest {
|
||||
|
||||
my $out="";
|
||||
|
||||
if($cmdhash{'option'} eq "no-output") {
|
||||
#print "*** We don't slap on --output\n";
|
||||
}
|
||||
else {
|
||||
if($cmdhash{'option'} !~ /no-output/) {
|
||||
#We may slap on --output!
|
||||
if (!@validstdout) {
|
||||
$out=" --output $CURLOUT ";
|
||||
}
|
||||
@@ -746,7 +756,7 @@ sub singletest {
|
||||
$DBGCURL=$CMDLINE;
|
||||
}
|
||||
|
||||
$CMDLINE .= "$cmdargs >$STDOUT 2>$STDERR";
|
||||
$CMDLINE .= "$cmdargs >>$STDOUT 2>>$STDERR";
|
||||
|
||||
if($verbose) {
|
||||
print "$CMDLINE\n";
|
||||
@@ -958,7 +968,7 @@ sub singletest {
|
||||
# 100 if this is not a test case
|
||||
# 99 if this test case has no servers specified
|
||||
# 2 if one of the required servers couldn't be started
|
||||
# 1 if this test is skipped due to unfulfilled requirements
|
||||
# 1 if this test is skipped due to unfulfilled SSL/stunnel-requirements
|
||||
|
||||
sub serverfortest {
|
||||
my ($testnum)=@_;
|
||||
@@ -1227,6 +1237,8 @@ for(keys %run) {
|
||||
stopserver($run{$_}); # the pid file is in the hash table
|
||||
}
|
||||
|
||||
my $all = $total + $skipped;
|
||||
|
||||
if($total) {
|
||||
printf("TESTDONE: $ok tests out of $total reported OK: %d%%\n",
|
||||
$ok/$total*100);
|
||||
@@ -1238,14 +1250,18 @@ if($total) {
|
||||
else {
|
||||
print "TESTFAIL: No tests were performed!\n";
|
||||
}
|
||||
|
||||
if($all) {
|
||||
print "TESTDONE: $all tests were considered.\n";
|
||||
}
|
||||
|
||||
if($skipped) {
|
||||
my $s=0;
|
||||
print "TESTINFO: $skipped tests were skipped due to these restraints:\n";
|
||||
|
||||
for(keys %skipped) {
|
||||
print ", " if($s);
|
||||
my $r = $_;
|
||||
printf "TESTINFO: \"%s\" happened %d times (", $r, $skipped{$_};
|
||||
printf "TESTINFO: \"%s\" %d times (", $r, $skipped{$_};
|
||||
|
||||
# now show all test case numbers that had this reason for being
|
||||
# skipped
|
||||
@@ -1259,7 +1275,6 @@ if($skipped) {
|
||||
}
|
||||
}
|
||||
print ")\n";
|
||||
$s++;
|
||||
}
|
||||
}
|
||||
if($total && ($ok != $total)) {
|
||||
|
@@ -20,7 +20,7 @@ char *appendstring(char *string, /* original string */
|
||||
{
|
||||
int len = strlen(buffer);
|
||||
|
||||
if((len + *stringlen) > *stralloc) {
|
||||
if((len + *stringlen) >= *stralloc) {
|
||||
char *newptr= realloc(string, *stralloc*2);
|
||||
if(newptr) {
|
||||
string = newptr;
|
||||
|
Reference in New Issue
Block a user