Compare commits

...

101 Commits

Author SHA1 Message Date
Daniel Stenberg
5d39dde961 pre-release commit 2000-05-30 16:31:46 +00:00
Daniel Stenberg
15c143bba9 modified to the new curl_getenv() style 2000-05-29 23:09:31 +00:00
Daniel Stenberg
6d522c9c1d made getenv() more threadsafe for win32 2000-05-29 23:07:22 +00:00
Daniel Stenberg
45885f30c2 updated 2000-05-29 22:54:17 +00:00
Daniel Stenberg
1ea12e705e had to add this since it was missing in the system includes! 2000-05-29 22:52:17 +00:00
Daniel Stenberg
a0ce95e155 David LeBlanc's fixes! 2000-05-29 22:51:13 +00:00
Daniel Stenberg
abc751ae13 generated with bison 1.28 which removes the alloca() mess! 2000-05-29 22:50:40 +00:00
Daniel Stenberg
b6d06a9590 URLGET_ERROR_SIZE is now CURL_ERROR_SIZE 2000-05-26 13:58:10 +00:00
Daniel Stenberg
784e9406ae silly mistake corrected 2000-05-26 13:57:11 +00:00
Daniel Stenberg
9e157ad2ec added the new man pages docs/*.3 2000-05-26 13:55:13 +00:00
Daniel Stenberg
85e6e935c5 updated 2000-05-26 11:59:43 +00:00
Daniel Stenberg
9a85060cde more documented stuff 2000-05-26 08:50:13 +00:00
Daniel Stenberg
fb3a5ab6ee fixed the CURL_ERROR_SIZE and renamed a urlget() leftover 2000-05-26 08:12:14 +00:00
Daniel Stenberg
52585e8b31 Added a list of stuff to do for version 7 2000-05-26 07:48:38 +00:00
Daniel Stenberg
3ad1df668e AIX wants sys/time.h included 2000-05-25 15:18:34 +00:00
Daniel Stenberg
d5bc98fcec added sys/time.h check 2000-05-25 15:18:02 +00:00
Daniel Stenberg
c10684ac3f fixed the new path to the curl man page 2000-05-24 22:56:00 +00:00
Daniel Stenberg
f81e775954 fixed the new docs path 2000-05-24 22:53:37 +00:00
Daniel Stenberg
e92a10c36f now splits the text into several puts() calls 2000-05-23 10:25:30 +00:00
Daniel Stenberg
9b20d0a47f Jrn's fixes mentioned 2000-05-22 19:15:18 +00:00
Daniel Stenberg
67fea90f3f moved many docs to the new docs/ dir 2000-05-22 19:13:37 +00:00
Daniel Stenberg
ceda25398b more changes! ;-) 2000-05-22 19:11:39 +00:00
Daniel Stenberg
285994a5ae slightly adjusted 2000-05-22 19:10:53 +00:00
Daniel Stenberg
e27e490c7e oops 2000-05-22 19:09:31 +00:00
Daniel Stenberg
dbd864a2a6 adjusted to work even without RTLD_GLOBAL defined 2000-05-22 19:04:18 +00:00
Daniel Stenberg
dc98405114 libcurl v7 adjustments 2000-05-22 19:02:54 +00:00
Daniel Stenberg
7c37c6a8e9 moved here from the root directory 2000-05-22 17:35:35 +00:00
Daniel Stenberg
4341671545 moved to the new docs/ directory 2000-05-22 17:33:31 +00:00
Daniel Stenberg
2bd72eb53d Jrn's fixes 2000-05-22 17:20:29 +00:00
Daniel Stenberg
c35238e0a3 Jrt Hartroth's updates 2000-05-22 17:18:55 +00:00
Daniel Stenberg
39b1801c63 moved here from the newlib branch 2000-05-22 17:17:57 +00:00
Daniel Stenberg
98e5d82a34 unused 2000-05-22 14:13:05 +00:00
Daniel Stenberg
96dde76b99 moved here from the newlib branch 2000-05-22 14:12:12 +00:00
Daniel Stenberg
fb9d1ff00f files moved to main branch from the newlib branch 2000-05-22 14:09:31 +00:00
Daniel Stenberg
c9053bf3d4 moved the win32 init stuff here from the lib 2000-05-17 21:21:10 +00:00
Daniel Stenberg
601b8e78c7 "Will you write a script for me getting ZZZ from YYY?" added 2000-05-14 15:33:47 +00:00
Daniel Stenberg
bc10563c98 Added BOTH nsl+socket lib check
Added HOST and TARGET checks
Added automatic update of the *spec.in files (Linux RPM stuff)
2000-05-14 15:21:45 +00:00
Daniel Stenberg
5330a85133 the last days' changes 2000-05-14 13:36:38 +00:00
Daniel Stenberg
95c2534a95 string checks added 2000-05-14 13:33:53 +00:00
Daniel Stenberg
febd0e7059 lots of stuff 2000-05-14 13:12:11 +00:00
Daniel Stenberg
984878ed7a Removed all #ifdef GLOBURL lines, we always use them 2000-05-09 22:42:53 +00:00
Daniel Stenberg
de275780a9 Support for systems where RTLD_LAZY_GLOBAL is defined instead of
RTLD_GLOBAL and RTLD_LAZY separately.
2000-05-09 22:23:55 +00:00
Daniel Stenberg
b64dd9c13f changed the glob_url() call, after Janne Johansson's buffer overflow report 2000-05-09 12:29:28 +00:00
Daniel Stenberg
fac113a275 configure having problem with openssl 2000-04-24 15:20:58 +00:00
Daniel Stenberg
89f05410d9 added a missing prototype, removed unused code 2000-04-11 21:48:33 +00:00
Daniel Stenberg
0d12792514 now uses a tiny bit of the new library interface! 2000-04-11 21:47:28 +00:00
Daniel Stenberg
7aae77f0fd Added Darwin to the list of platforms 2000-04-10 18:05:29 +00:00
Daniel Stenberg
72cb0f7a69 updated the ports list 2000-04-08 20:45:42 +00:00
Daniel Stenberg
d0e640deba Added the *spec.in files 2000-04-08 19:33:11 +00:00
Daniel Stenberg
f9ec7d3a0f new files to generate .spec files automatically 2000-04-08 19:32:40 +00:00
Daniel Stenberg
75cd7f482d now sends user agent to all requests that go through a http proxy 2000-04-08 19:29:58 +00:00
Daniel Stenberg
eb856b04fe Improved the looks of the progress meter when the file size is between
9999 KB and 100 MB since it then can display the size as XX.X MB instead of
just XX as before.
2000-04-08 19:28:23 +00:00
Daniel Stenberg
cd9ad9e54b some rpm build improvements
adjusted progress bar
user agent sent when talking non-http through a http proxy
2000-04-08 19:27:06 +00:00
Daniel Stenberg
c30a3913b5 Marco's fix got adjusted 2000-04-04 18:08:34 +00:00
Daniel Stenberg
d620ada259 two location problems and one OS/2 patch 2000-04-04 17:47:05 +00:00
Daniel Stenberg
00ad88408d Marco G. Salvagno's OS/2 patch is applied 2000-04-04 17:44:24 +00:00
Daniel Stenberg
d62d25bdb4 Marco G. Salvagno's patch applied 2000-04-04 17:42:43 +00:00
Daniel Stenberg
ae6a5018dd added INTERNALS 2000-04-02 18:28:26 +00:00
Daniel Stenberg
c0c225b938 describes internal code design 2000-04-02 18:09:50 +00:00
Daniel Stenberg
0059911261 James Atwill correctly pointed out that curl didn't follow Location: headers
properly when the new URL is an absolute one to a different port than the
first URL...
2000-04-02 12:08:12 +00:00
Daniel Stenberg
e67157b5a2 Added some more explanatory text about HTTP posts 2000-03-27 21:42:40 +00:00
Daniel Stenberg
2f668aba96 "H. Daphne Luong" <daphne@tellme.com> pointed out that curl cannot destroy
the proxy server data internally as it did, since when doing a location:
following it needs the proxy server string several times.
2000-03-27 21:36:05 +00:00
Daniel Stenberg
4a33436663 spelling errors corrected 2000-03-23 11:02:08 +00:00
Daniel Stenberg
0eb1f1e5d0 download.c, configure, VC fix and OS/2 fix 2000-03-23 10:43:14 +00:00
Daniel Stenberg
9849c76d88 yet another tiny OS/2 fix 2000-03-23 10:41:39 +00:00
Daniel Stenberg
e541da93fe in case the select() returns -1 and errno is EINTR, it should not abort
the download (MT-adjustment)
2000-03-23 10:41:16 +00:00
Daniel Stenberg
0fac349c62 Wham Bang's fixes 2000-03-23 10:40:14 +00:00
Daniel Stenberg
83acbda569 Troy Engel's latest updates 2000-03-23 10:39:38 +00:00
Daniel Stenberg
1acda9ef9c HAVE_UNAME was bad
HAVE_UNISTD_H is now not set if VC6 is defined
2000-03-23 10:39:00 +00:00
Daniel Stenberg
20161c38f2 Added a question about other SSL libraries 2000-03-23 10:36:49 +00:00
Daniel Stenberg
7b6394b75a just a little reformatted 2000-03-23 09:33:03 +00:00
Daniel Stenberg
d1a1fcc6f1 removed the double BSD install check 2000-03-21 17:12:27 +00:00
Daniel Stenberg
475869a612 6.5.2 release commit 2000-03-21 15:37:13 +00:00
Daniel Stenberg
a2d2569c7d release commit time 2000-03-21 14:23:45 +00:00
Daniel Stenberg
9e4d9346a7 curl_unescape() update 2000-03-21 14:18:39 +00:00
Daniel Stenberg
9efd212745 reformatted, added a custom FTP command question 2000-03-20 11:27:29 +00:00
Daniel Stenberg
d75b87605d changed the URL to use http:// instead 2000-03-20 10:31:08 +00:00
Daniel Stenberg
211b9e552d curl_unescape() could make a buffer overflow 2000-03-20 10:22:12 +00:00
Daniel Stenberg
bc5c4b8953 openbsd complaints fixes
http_code in -w fix
MS VC++ fixes
documentation updates
-D update
OS/2 port
2000-03-20 09:25:18 +00:00
Daniel Stenberg
7826133bff modified to look better when man2html'ified 2000-03-20 09:21:31 +00:00
Daniel Stenberg
ba37e24abd generated from the new getdate.y file 2000-03-20 09:11:29 +00:00
Daniel Stenberg
fe43488fc5 commented the CFLAGS that was left in here by mistake 2000-03-20 09:10:51 +00:00
Daniel Stenberg
fb071e04c1 contributor Marco G. Salvagno added 2000-03-20 09:10:10 +00:00
Daniel Stenberg
f78a69b7d5 Marco G. Salvagno's paragraph about compiling for OS/2 is added 2000-03-20 09:09:12 +00:00
Daniel Stenberg
f9a839d906 Marco G. Salvagno's OS/2 changes 2000-03-19 19:55:02 +00:00
Daniel Stenberg
ff3fd842d8 Marco G. Salvagno's OS/2 fixes 2000-03-19 19:54:13 +00:00
Daniel Stenberg
35140201b5 updated to compile 6.5 2000-03-17 17:18:40 +00:00
Daniel Stenberg
b4fc921a0a how to report curl bugs! 2000-03-17 11:45:20 +00:00
Daniel Stenberg
339bdd1e08 corrected the CVS info 2000-03-16 15:21:14 +00:00
Daniel Stenberg
e3ef8b66a1 the --dump-header option now only creates the file when it needs to, not any
sooner
2000-03-16 11:43:10 +00:00
Daniel Stenberg
739b3f241d Added the forgotten http_code! 2000-03-16 11:41:56 +00:00
Daniel Stenberg
d48939c0c0 makes use of HAVE_RAND_SCREEN instead of WIN32 for using RAND_screen() 2000-03-16 11:41:27 +00:00
Daniel Stenberg
0aa3f705c2 removed an unnecessary #ifdef WIN32 2000-03-16 11:40:48 +00:00
Daniel Stenberg
14253f34f4 if stdlib.h exists, malloc.h should not be included (thus stop OpenBSD
complaints)
2000-03-16 11:40:15 +00:00
Daniel Stenberg
ab05797500 cleaned up some #ifdef mess, now uses data->fwrite() to write headers 2000-03-16 11:39:31 +00:00
Daniel Stenberg
8629719e0e now writers the headers with the data->fwrite() function as well 2000-03-16 11:38:32 +00:00
Daniel Stenberg
570b4c3b59 as Pascal Gaudette pointed out, the ldap files were missing 2000-03-16 11:35:48 +00:00
Daniel Stenberg
333c21b8cc tiny fixes 2000-03-16 11:35:03 +00:00
Daniel Stenberg
8898ff9e04 Added checks for RAND_screen, malloc.h and stdlib.h 2000-03-16 11:34:19 +00:00
Daniel Stenberg
912fd9b165 RAND_SCREEN is only available for win32 SSL users 2000-03-16 11:33:39 +00:00
Daniel Stenberg
5992252b3d updates and fixes 2000-03-16 11:32:53 +00:00
94 changed files with 5610 additions and 2601 deletions

210
CHANGES
View File

@@ -6,7 +6,189 @@
History of Changes
Version XX
Daniel (29 May 2000)
- Corrected the bits.* assignments when using CURLOPT options that only
toggles one of those bits.
- Applied the huge patches from David LeBlanc <dleblanc at qnx.com> that add
usage of the gethostbyname_r() and similar functions in case they're around,
since that make libcurl much better threadsafe in many systems (such as
solaris). I added the checks for these functions to the configure script.
I can't explain why, but the inet_ntoa_r() function did not appear in my
Solaris include files, I had to add my own include file for this for now.
Daniel (22 May 2000)
- J<>rn Hartroth brought me fixes to make the win32 version compile properly as
well as a rename of the 'interface' field in the urldata struct, as it seems
to be reserved in some gcc versions!
- Rich Gray struck back with yet some portability reports. Data General DG/UX
needed a little fix in lib/ldap.c since it doesn't have RTLD_GLOBAL defined.
More fixes are expected as a result of Richies very helpful work.
Version 7.0.1beta released
Daniel (21 May 2000)
- Updated lots of #defines, enums and variable type names in the library. No
more weird URG or URLTAG prefixes. All types and names should be curl-
prefixed to avoid name space clashes. The FLAGS-parameter to the former
curl_urlget() has been converted into a bunch of flags to use in separate
setopt calls. I'm still focusing on the easy-interface, as the curl tool is
now using that.
- Bjorn Reese has provided me with an asynchronous name resolver that I plan
to use in upcoming versions of curl to be able to gracefully timeout name
lookups.
Version 7.0beta released
Daniel (18 May 2000)
- Introduced LIBCURL_VERSION_NUM to the curl.h include file to better allow
source codes to be dependent on the lib version. This define is now set to
a dexadecimal number, with 8 bits each for major number, minor number and
patch number. In other words, version 1.2.3 would make it 0x010203. It also
makes a larger number a newer version.
Daniel (17 May 2000)
- Martin Kammerhofer correctly pointed out several flaws in the FTP range
option. I corrected them.
- Removed the win32 winsock init crap from the lib to the src/main.c file
in the application instead. They can't be in the lib, especially not for
multithreaded purposes.
Daniel (16 May 2000)
- Rewrote the src/main.c source to use the new easy-interface to libcurl 7.
There is still more work to do, but the first step is now taken.
<curl/easy.h> is the include file to use.
Daniel (14 May 2000)
- FTP URLs are now treated slightly different, more according to RFC 1738.
- FTP sessions are now performed differently, with CWD commands to change
directory instead of RETR/STOR/LIST with the full path. Discussions with
Rich Gray made me notice these problems.
- Janne Johansson discovered and corrected a buffer overflow in the
src/usrglob.c file.
- I had to add a lib/strequal.c file for doing case insensitive string
compares on all platforms.
Daniel (8 May 2000):
- Been working lots on the new lib.
- Together with Rich Gray, I've tried to adjust the configure script to work
better on the NCR MP-RAS Unix.
Daniel (2 May 2000):
- Albert Chin-A-Young pointed out that I had a few too many instructions in
configure.in that didn't do any good.
Daniel (24 April 2000):
- Added a new paragraph to the FAQ about what to do when configure can't
find OpenSSL even though it is installed. Supplied by Bob Allison
<allisonb@users.sourceforge.net>.
Daniel (12 April 2000):
- Started messing around big-time to convert the old library interface to a
better one...
Daniel (8 April 2000):
- Made the progress bar look better for file sizes between 9999 kilobytes
and 100 megabytes. They're now displayed XX.XM.
- I also noticed that ftp fetches through HTTP proxies didn't add the user
agent string. It does now.
- Habibie <habibie@MailandNews.com> supplied a pretty good way to build RPMs
on a Linux machine. It still a) requires me to be root to do it, b) leaves
the rpm packages laying at some odd place on my disk c) doesn't work to
build the ssl version of curl since I didn't install openssl from an rpm
package so now the rpm crap thinks I don't have openssl and refuses to build
a package that depends on ssl... Did I mention I don't get along with RPM?
- Once again I received a bug report about autoconf not setting -L prior to -l
on the command line when checking for libs. In this case it made the native
cc compiler on Solaris 7 to fail the OpenSSL check. This has previously been
reported to cause problems on HP-UX and is a known flaw in autoconf 2.13. It
is a pity there's no newer release around...
Daniel (4 April 2000):
- Marco G. Salvagno <mgs@whiz.cjb.net> supplied me with two fixes that
appearantly makes the OS/2 port work better with multiple URLs.
Daniel (2 April 2000):
- Another Location: fix. This time, when curl connected to a port and then
followed a location with an absolute URL to another port, it misbehaved.
Daniel (27 March 2000):
- H. Daphne Luong <daphne@tellme.com> pointed out that curl was wrongly
messing up the proxy string when fetching a document through a http proxy,
which screwed up multiple fetches such as in location: followings.
Daniel (23 March 2000):
- Marco G. Salvagno <mgs@whiz.cjb.net> corrected my badly applied patch he
actually already told me about!
- H. Daphne Luong <daphne@tellme.com> brought me a fix that now makes curl
ignore select() errors in the download if errno is EINTR, which turns out to
happen every now and then when using libcurl multi-threaded...
Daniel (22 March 2000):
- Wham Bang <wham_bang@yahoo.com> supplied a couple of win32 fixes. HAVE_UNAME
was accidentally #defined in config-win32.h, which it shouldn't have been.
The HAVE_UNISTD_H is not defined when compiling with the Makefile.vc6
makefile for MS VC++.
Daniel (21 March 2000):
- I removed the AC_PROG_INSTALL macro from configure.in, since it appears that
one of the AM_* macros searches for a BSD compatible install already. Janne
Johansson made me aware of this.
Version 6.5.2
Daniel (21 March 2000):
- Paul Harrington <paul@pizza.org> quickly pointed out to me that 6.5.1
crashes hard. I upload 6.5.2 now as quickly as possible! The problem was
the -D adjustments in src/main.c.
Version 6.5.1
Daniel (20 March 2000):
- An anonymous post on sourceforge correctly pointed out a possible buffer
overflow in the curl_unescape() function for URL conversions. The main
problem with this bug is that the ftp download uses that function and this
single- byte overflow could lead to very odd bugs (as one reported by Janne
Johansson).
Daniel (19 March 2000):
- Marco G. Salvagno <mgs@whiz.cjb.net> supplied me with a series of patches
that now allows curl to get compiled on OS/2. It even includes a section in
the INSTALL file. Very nice job!
Daniel (17 March 2000):
- Wham Bang <wham_bang@yahoo.com> supplied a patch for the lib/Makefile.vc6
file. We still need some fixes for the config-win32.h since it appears that
VC++ and mingw32 have different opinions about (at least) unistd.h's
existence.
Daniel (15 March 2000):
- I modified the -D/--dump-header workings so that it doesn't write anything
to the file until it needs to. This way, you can actually use -b and -D
on the same file if you want repeated invokes to store and read the cookies
in that one single file.
- Poked around in lots of texts. Added the BUGS file for bug reporting stuff.
Added the classic HTTP POST question to the FAQ, removed some #ifdef WIN32
stuff from the sources (they're covered by the config-win32.h now).
- Pascal Gaudette <pascal@gaudette.org> fixed a missing ldap.c problem in the
Makefile.vc6 file. He also addressed a problem in src/config-win32.h.
Daniel (14 March 2000):
- Paul Harrington pointed out that the 'http_code' variable in the -w output
was never written. I fixed it now.
- Janne Johansson <jj@dynarc.se> reported the complaints that OpenBSD does
when getdate.c #includes malloc.h. It claims stdlib.h should be included
instead. I added #ifdef HAVE_MALLOC_H code in getdate.y and two checks in
the configure.in for malloc.h and stdlib.h.
Version 6.5
Daniel (13 March 2000):
- <curl@spam.wolvesbane.net> pointed out that the way curl sent cookies in a
@@ -35,13 +217,13 @@ Daniel (2 March 2000):
- J<>rn Hartroth <Joern.Hartroth@telekom.de>, Chris <cbayliss@csc.come> and Ulf
M<>ller from the openssl development team helped bringing me the details for
fixing an OpenSSL usage flaw. It became appearant when they released openssl
0.9.5 since that barfed on curl's bad behaviour (not seeding a random number
fixing an OpenSSL usage flaw. It became apparent when they released openssl
0.9.5 since that barfed on curl's bad behavior (not seeding a random number
thing).
- Yet another option: -N/--no-buffer disables buffering in the output stream.
Probably most useful for very slow transfers when you really want to get
every byte curl receives within some prefered time. Andrew <tmr@gci.net>
every byte curl receives within some preferred time. Andrew <tmr@gci.net>
suggested this.
- Damien Adant <dams@usa.net> mailed me his fixes for making curl compile
@@ -74,7 +256,7 @@ Daniel (21 February 2000):
- I added the -w/--write-out flag and some variables to go with it. -w is a
single string, whatever you enter there will be written out when curl has
completed a successful request. There are some variable substitutions and
they are specifed as '%{variable}' (without the quotes). Variables that
they are specified as '%{variable}' (without the quotes). Variables that
exist as of this moment are:
total_time - total transfer time in seconds (with 2 decimals)
@@ -108,7 +290,7 @@ Daniel (11 February 2000):
- Eetu Ojanen <esojanen@jyu.fi>'s suggestion of supporting the @-style for -b
is implemented. Now -b@<filename> works as well as the old style. -b@- also
similarily reads the cookies from stdin.
similarly reads the cookies from stdin.
- Reminder: -D should not write to the file until it needs to, in the same way
-o does. That would enable curl to use -b and -D on the same file...
@@ -116,7 +298,7 @@ Daniel (11 February 2000):
- Ellis Pritchard <ellis@citria.com> made getdate.y work for MacOS X.
- Paul Harrington <paul@pizza.org> helped me out finding the crash in the
cookie parser. He also pointed out curl's habbit of sending empty cookies to
cookie parser. He also pointed out curl's habit of sending empty cookies to
the server.
Daniel (8 February 2000):
@@ -146,7 +328,7 @@ Daniel (31 January 2000):
and let them get "uploaded" in Transfer() as well.
- Zhibiao Wu <wuzb@erols.com> pointed out a curl bug in the location: area,
although I did not get a reproducable way to do this why I have to wait
although I did not get a reproducible way to do this why I have to wait
with fixing anything.
- Bob Schader <rschader@product-des.com> suggested I should implement resume
@@ -201,7 +383,7 @@ Daniel (10 January 2000):
- Jim Gallagher <jmgallag@usa.net> properly tracked down a bug in autoconf
2.13. The AC_CHECK_LIB() macro wrongfully uses the -l flag before the -L
flag to 'ld' which causes the HP-UX 10.20 flavour to fail on all libchecks
and thefore you can't make the configure script find the openssl libs!
and therefore you can't make the configure script find the openssl libs!
Daniel (28 December 1999):
- Tim Verhoeven <dj@walhalla.sin.khk.be> correctly identified that curl
@@ -215,7 +397,7 @@ Daniel (28 December 1999):
Daniel (27 December 1999):
- When curl parsed cookies straight off a remote site, it corrupted the input
data, which, if the downloaded headers were stored made very odd characters
in the saved data. Correctfully identified and reported by Paul Harrington
in the saved data. Correctly identified and reported by Paul Harrington
<paul@pizza.org>.
Daniel (13 December 1999):
@@ -294,7 +476,7 @@ Version 6.3
Daniel (8 November 1999):
- I made the -b/--cookie option capable of reading netscape formatted cookie
files as well as normal http-header files. It should be able to
transparantly figure out what kind of file it got as input.
transparently figure out what kind of file it got as input.
Daniel (29 October 1999):
- Another one of Sebastiaan van Erk's ideas (that has been requested before
@@ -314,7 +496,7 @@ Version 6.3
makes sense since if you access ftp through a HTTP proxy, you'd get the
file size the same way.
I changed the order of the QUOTE command execusions. They're now executed
I changed the order of the QUOTE command executions. They're now executed
just after the login and before any other command. I made this to enable
quote commands to run before the -I stuff is done too.
@@ -560,7 +742,7 @@ Version 5.10
OpenSSL. Now why couldn't they simply add a *new* function instead of
modifying the parameters of an already existing function? This way, we get
a compiler warning if compiling with 0.9.4 but not with earlier. So, I had
to come up with a #if contruction that deals with this...
to come up with a #if construction that deals with this...
- Made curl output the SSL version number get displayed properly with 0.9.4.
@@ -644,7 +826,7 @@ Version 5.10
T. Yamada <tai@imasy.or.jp> (30 July 1999)
- It breaks with segfault when 1) curl is using .netrc to obtain
username/password (option '-n'), and 2) is auto-matically redirected to
username/password (option '-n'), and 2) is automatically redirected to
another location (option '-L').
There is a small bug in lib/url.c (block starting from line 641), which

31
FAQ
View File

@@ -1,31 +0,0 @@
Date: 19 November 1999
Frequently Asked Questions about Curl
1. Problems connecting to SSL servers.
It took a very long time before I could sort out why curl had problems
to connect to certain SSL servers when using SSLeay or OpenSSL v0.9+.
The error sometimes showed up similar to:
16570:error:1407D071:SSL routines:SSL2_READ:bad mac decode:s2_pkt.c:233:
It turned out to be because many older SSL servers don't deal with SSLv3
requests properly. To correct this problem, tell curl to select SSLv2 from
the command line (-2/--sslv2).
I have also seen examples where the remote server didn't like the SSLv2
request and instead you had to force curl to use SSLv3 with -3/--sslv3.
2. Does curl support resume?
Yes. Both ways on FTP, download ways on HTTP.
3. Is libcurl thread safe?
Yes, as far as curl's own code goes. It does use system calls that often
aren't thread safe in most environments, such as gethostbyname().
I am very interested in once and for all getting some kind of report or
README file from those who have used libcurl in a threaded environment,
since I haven't and I get this question more and more frequently!

22
FILES
View File

@@ -1,18 +1,22 @@
CHANGES
CONTRIBUTE
FEATURES
FAQ
FILES
INSTALL
LEGAL
MPL-1.0.txt
README
README.curl
README.libcurl
curl.1
*spec
RESOURCES
TODO
*spec.in
docs/BUGS
docs/CONTRIBUTE
docs/FAQ
docs/FEATURES
docs/INSTALL
docs/INTERNALS
docs/README.curl
docs/README.libcurl
docs/RESOURCES
docs/TODO
docs/curl.1
docs/*.3
maketgz
Makefile.in
Makefile.am

View File

@@ -4,7 +4,7 @@
AUTOMAKE_OPTIONS = foreign no-dependencies
man_MANS = curl.1
man_MANS = docs/curl.1
EXTRA_DIST = $(man_MANS)

16
README
View File

@@ -26,3 +26,19 @@ README
Sweden -- ftp://ftp.sunet.se/pub/www/utilities/curl/
Germany -- ftp://ftp.fu-berlin.de/pub/unix/network/curl/
China -- http://www.pshowing.com/curl/
To download the very latest source off the CVS server do this:
cvs -d :pserver:anonymous@cvs.curl.sourceforge.net:/cvsroot/curl login
(just press enter when asked for password)
cvs -d :pserver:anonymous@cvs.curl.sourceforge.net:/cvsroot/curl co .
(now, you'll get all the latest sources downloaded into your current
directory. Note that this does not create a directory named curl or
anything)
cvs -d :pserver:anonymous@cvs.curl.sourceforge.net:/cvsroot/curl logout
(you're off the hook!)

View File

@@ -1,108 +0,0 @@
_ _ _ _
| (_) |__ ___ _ _ _ __| |
| | | '_ \ / __| | | | '__| |
| | | |_) | (__| |_| | | | |
|_|_|_.__/ \___|\__,_|_| |_|
How To Use Libcurl In Your Program:
(by Ralph Beckmann <rabe@uni-paderborn.de>)
NOTE: If you plan to use libcurl.a in Threads under Linux, do not use the old
gcc-2.7.x because the function 'gethostbyname' seems not to be thread-safe,
that is to say an unavoidable SEGMENTATION FAULT might occur.
1. a) In a C-Program:
#include "curl.h"
b) In a C++-Program:
extern "C" {
#include "curl.h"
}
2. char *url="http://www.domain.com";
curl_urlget (URGTAG_URL, url,
URGTAG_FLAGS, CONF_NOPROGRESS,
URGTAG_ERRORBUFFER, errorBuffer,
URGTAG_WRITEFUNCTION, (size_t (*)(void *, int, int, FILE
*))handle_data,
URGTAG_TIMEOUT, 30, /* or anything You want */
...
URGTAG_DONE);
3. size_t handle_data (const void *ptr, size_t size, size_t nitems,
FILE *stream)
{
(void)stream; /* stop complaining using g++ -Wall */
if ((int)nitems <= 0) {
return (size_t)0;
}
fprintf(stdout, (char *)ptr); /* or do anything else with it */
return nitems;
}
4. Compile Your Program with -I$(CURL_DIR)/include
5. Link Your Program together with $(CURL_DIR)/lib/libcurl.a
Small Example of How To Use libcurl
----------------------------------------------------------------------
/* Full example that uses libcurl.a to fetch web pages. */
/* curlthreads.c */
/* - Test-Program by Ralph Beckmann for using curl in POSIX-Threads */
/* Change *url1 and *url2 to textual long and slow non-FRAMESET websites! */
/*
1. Compile with gcc or g++ as $(CC):
$(CC) -c -Wall -pedantic curlthreads.c -I$(CURL_DIR)/include
2. Link with:
- Linux:
$(CC) -o curlthreads curlthreads.o $(CURL_DIR)/lib/libcurl.a -lpthread
-lm
- Solaris:
$(CC) -o curlthreads curlthreads.o $(CURL_DIR)/lib/libcurl.a -lpthread
-lm -lsocket -lnsl
*/
#include <pthread.h>
#include <stdio.h>
#ifdef __cplusplus
extern "C" {
#include "curl.h"
}
#else
#include "curl.h"
#endif
size_t storedata (const void *ptr, size_t size, size_t nitems, FILE *stream) {
(void)ptr; (void)stream; /* just to stop g++ -Wall complaining */
fprintf(stdout, "Thread #%i reads %i Bytes.\n",
(int)pthread_self(), (int)(nitems*size));
return (nitems);
}
void *urlfetcher(void *url) {
curl_urlget (URGTAG_URL, url,
URGTAG_FLAGS, CONF_NOPROGRESS | CONF_FAILONERROR,
URGTAG_WRITEFUNCTION, (size_t (*)(void *, int, int, FILE
*))storedata,
URGTAG_DONE);
return NULL;
}
int main(void) {
char *url1="www.sun.com";
char *url2="www.microsoft.com";
pthread_t thread_id1, thread_id2;
pthread_create(&thread_id1, NULL, urlfetcher, (void *)url1);
pthread_create(&thread_id2, NULL, urlfetcher, (void *)url2);
pthread_join(thread_id1, NULL);
pthread_join(thread_id2, NULL);
fprintf(stdout, "Ready.\n");
return 0;
}

View File

@@ -71,6 +71,9 @@
/* Define if you have the strcasecmp function. */
/*#define HAVE_STRCASECMP 1*/
/* Define if you have the stricmp function. */
#define HAVE_STRICMP 1
/* Define if you have the strdup function. */
#define HAVE_STRDUP 1
@@ -87,7 +90,7 @@
/*#define HAVE_TCSETATTR 1*/
/* Define if you have the uname function. */
#define HAVE_UNAME 1
/*#define HAVE_UNAME 1*/
/* Define if you have the <alloca.h> header file. */
/*#define HAVE_ALLOCA_H 1*/
@@ -146,15 +149,9 @@
/* Define if you have the <termios.h> header file. */
#define HAVE_TERMIOS_H 1
/* Define if you have the <unistd.h> header file. */
#define HAVE_UNISTD_H 1
/* Name of package */
#define PACKAGE "curl"
/* Version number of package */
#define VERSION "6.3.1"
/* Define if you have the <io.h> header file. */
#define HAVE_IO_H 1
@@ -169,3 +166,16 @@
/* Define if you have the setvbuf function. */
#define HAVE_SETVBUF 1
/* Define if you have the RAND_screen function when using SSL */
#define HAVE_RAND_SCREEN 1
/*************************************************
* This section is for compiler specific defines.*
*************************************************/
#ifndef VC6 /* VC6 => Microsoft Visual C++ 6 */
/* Define if you have the <unistd.h> header file. */
#define HAVE_UNISTD_H 1
#endif

View File

@@ -37,6 +37,9 @@
/* The number of bytes in a long long. */
#undef SIZEOF_LONG_LONG
/* Define if you have the RAND_screen function. */
#undef HAVE_RAND_SCREEN
/* Define if you have the RAND_status function. */
#undef HAVE_RAND_STATUS
@@ -46,6 +49,12 @@
/* Define if you have the gethostbyaddr function. */
#undef HAVE_GETHOSTBYADDR
/* Define if you have the gethostbyaddr_r function. */
#undef HAVE_GETHOSTBYADDR_R
/* Define if you have the gethostbyname_r function. */
#undef HAVE_GETHOSTBYNAME_R
/* Define if you have the gethostname function. */
#undef HAVE_GETHOSTNAME
@@ -64,6 +73,9 @@
/* Define if you have the inet_ntoa function. */
#undef HAVE_INET_NTOA
/* Define if you have the inet_ntoa_r function. */
#undef HAVE_INET_NTOA_R
/* Define if you have the perror function. */
#undef HAVE_PERROR
@@ -79,12 +91,18 @@
/* Define if you have the strcasecmp function. */
#undef HAVE_STRCASECMP
/* Define if you have the strcmpi function. */
#undef HAVE_STRCMPI
/* Define if you have the strdup function. */
#undef HAVE_STRDUP
/* Define if you have the strftime function. */
#undef HAVE_STRFTIME
/* Define if you have the stricmp function. */
#undef HAVE_STRICMP
/* Define if you have the strstr function. */
#undef HAVE_STRSTR
@@ -121,6 +139,9 @@
/* Define if you have the <io.h> header file. */
#undef HAVE_IO_H
/* Define if you have the <malloc.h> header file. */
#undef HAVE_MALLOC_H
/* Define if you have the <net/if.h> header file. */
#undef HAVE_NET_IF_H
@@ -160,6 +181,9 @@
/* Define if you have the <ssl.h> header file. */
#undef HAVE_SSL_H
/* Define if you have the <stdlib.h> header file. */
#undef HAVE_STDLIB_H
/* Define if you have the <sys/param.h> header file. */
#undef HAVE_SYS_PARAM_H
@@ -175,6 +199,9 @@
/* Define if you have the <sys/stat.h> header file. */
#undef HAVE_SYS_STAT_H
/* Define if you have the <sys/time.h> header file. */
#undef HAVE_SYS_TIME_H
/* Define if you have the <sys/types.h> header file. */
#undef HAVE_SYS_TYPES_H

View File

@@ -2,11 +2,12 @@ dnl $Id$
dnl Process this file with autoconf to produce a configure script.
AC_INIT(lib/urldata.h)
AM_CONFIG_HEADER(config.h src/config.h)
AM_INIT_AUTOMAKE(curl,"6.5")
AM_INIT_AUTOMAKE(curl,"7.0.1beta")
dnl Checks for programs.
AC_PROG_CC
AC_PROG_INSTALL
dnl The install stuff has already been taken care of by the automake stuff
dnl AC_PROG_INSTALL
AC_PROG_MAKE_SET
dnl Check for AIX weirdos
@@ -119,6 +120,8 @@ dnl Checks for header files.
AC_HEADER_STDC
AC_CHECK_HEADERS( \
unistd.h \
malloc.h \
stdlib.h \
arpa/inet.h \
net/if.h \
netinet/in.h \
@@ -128,6 +131,7 @@ AC_CHECK_HEADERS( \
sys/sockio.h \
sys/stat.h \
sys/types.h \
sys/time.h \
getopt.h \
sys/param.h \
termios.h \
@@ -171,23 +175,27 @@ AC_CHECK_FUNCS( socket \
strftime \
uname \
strcasecmp \
stricmp \
strcmpi \
gethostname \
gethostbyname_r \
gethostbyaddr \
gethostbyaddr_r \
getservbyname \
gettimeofday \
inet_addr \
inet_ntoa \
inet_ntoa_r \
tcsetattr \
tcgetattr \
perror \
getpass \
closesocket \
setvbuf \
RAND_status
RAND_status \
RAND_screen
)
AC_PATH_PROG( PERL, perl, ,
$PATH:/usr/local/bin/perl:/usr/bin/:/usr/local/bin )
AC_SUBST(PERL)

View File

@@ -1,6 +1,6 @@
%define name curl-ssl
%define tarball curl
%define version 6.0
%define version 6.5.2
%define release 1
%define prefix /usr/local
@@ -35,11 +35,12 @@ rm -rf %{builddir}
%setup -n %{tarball}-%{version}
%build
CFLAGS=$RPM_OPT_FLAGS ./configure --prefix=$RPM_BUILD_ROOT%{prefix} --with-ssl
make CFLAGS="-DUSE_SSLEAY -I/usr/include/openssl"
CFLAGS=$RPM_OPT_FLAGS ./configure --prefix=$RPM_BUILD_ROOT%{prefix}
make
%install
make install-strip
install -m 0644 curl.1 $RPM_BUILD_ROOT%{prefix}/man/man1
%clean
rm -rf $RPM_BUILD_ROOT
@@ -48,5 +49,6 @@ rm -rf %{builddir}
%files
%defattr(-,root,root)
%attr(0755,root,root) %{prefix}/bin/curl
%doc curl.1 README* CHANGES CONTRIBUTE FAQ FILES INSTALL LEGAL MPL-1.0.txt RESOURCES TODO perl/
%attr(0644,root,root) %{prefix}/man/man1/curl.1
%doc BUGS CHANGES CONTRIBUTE FAQ FEATURES FILES INSTALL LEGAL MPL-1.0.txt README* RESOURCES TODO

98
curl-ssl.spec.in Normal file
View File

@@ -0,0 +1,98 @@
%define ver @VERSION@
%define rel 1
%define prefix /usr
Summary: get a file from a FTP, GOPHER or HTTP server.
Name: @PACKAGE@-ssl
Version: %ver
Release: %rel
Copyright: MPL
Group: Utilities/Console
Source: @PACKAGE@-%{version}.tar.gz
URL: http://@PACKAGE@.haxx.nu
BuildPrereq: openssl
BuildRoot: /tmp/%{name}-%{version}-%{rel}-root
Packager: Fill In As You Wish
Docdir: %{prefix}/doc
%description
@PACKAGE@-ssl is a client to get documents/files from servers, using
any of the supported protocols. The command is designed to
work without user interaction or any kind of interactivity.
@PACKAGE@-ssl offers a busload of useful tricks like proxy support,
user authentication, ftp upload, HTTP post, file transfer
resume and more.
Note: this version is compiled with SSL (https:) support.
Authors:
Daniel Stenberg <Daniel.Stenberg@haxx.nu>
%prep
%setup -n @PACKAGE@-@VERSION@
%build
# Needed for snapshot releases.
if [ ! -f configure ]; then
CONF="./autogen.sh"
else
CONF="./configure"
fi
#
# Configuring the package
#
CFLAGS="${RPM_OPT_FLAGS}" ${CONF} \
--prefix=%{prefix} \
--with-ssl
[ "$SMP" != "" ] && JSMP = '"MAKE=make -k -j $SMP"'
make ${JSMP} CFLAGS="-DUSE_SSLEAY -I/usr/include/openssl";
%install
[ -d ${RPM_BUILD_ROOT} ] && rm -rf ${RPM_BUILD_ROOT}
make prefix=${RPM_BUILD_ROOT}%{prefix} install-strip
#
# Generating file lists and store them in file-lists
# Starting with the directory listings
#
find ${RPM_BUILD_ROOT}%{prefix}/{bin,lib,man} -type d | sed "s#^${RPM_BUILD_ROOT}#\%attr (-\,root\,root) \%dir #" > file-lists
#
# Then, the file listings
#
echo "%defattr (-, root, root)" >> file-lists
find ${RPM_BUILD_ROOT}%{prefix} -type f | sed -e "s#^${RPM_BUILD_ROOT}##g" >> file-lists
%clean
(cd ..; rm -rf @PACKAGE@-@VERSION@ ${RPM_BUILD_ROOT})
%files -f file-lists
%defattr (-, root, root)
%doc BUGS
%doc CHANGES
%doc CONTRIBUTE
%doc FAQ
%doc FEATURES
%doc FILES
%doc INSTALL
%doc LEGAL
%doc MPL-1.0.txt
%doc README
%doc README.curl
%doc README.lib@PACKAGE@
%doc RESOURCES
%doc TODO
%doc %{name}-ssl.spec.in
%doc %{name}.spec.in

View File

@@ -1,5 +1,5 @@
%define name curl
%define version 6.0
%define version 6.5.2
%define release 1
%define prefix /usr/local
@@ -35,11 +35,12 @@ rm -rf %{builddir}
%build
export CFLAGS=$RPM_OPT_FLAGS
./configure --prefix=$RPM_BUILD_ROOT%{prefix}
./configure --prefix=$RPM_BUILD_ROOT%{prefix} --without-ssl
make
%install
make install-strip
install -m 0644 curl.1 $RPM_BUILD_ROOT%{prefix}/man/man1
%clean
rm -rf $RPM_BUILD_ROOT
@@ -48,5 +49,6 @@ rm -rf %{builddir}
%files
%defattr(-,root,root)
%attr(0755,root,root) %{prefix}/bin/curl
%doc curl.1 README* CHANGES CONTRIBUTE FAQ FILES INSTALL LEGAL MPL-1.0.txt RESOURCES TODO perl/
%attr(0644,root,root) %{prefix}/man/man1/curl.1
%doc BUGS CHANGES CONTRIBUTE FAQ FEATURES FILES INSTALL LEGAL MPL-1.0.txt README* RESOURCES TODO

96
curl.spec.in Normal file
View File

@@ -0,0 +1,96 @@
%define ver @VERSION@
%define rel 1
%define prefix /usr
Summary: get a file from a FTP, GOPHER or HTTP server.
Name: @PACKAGE@
Version: %ver
Release: %rel
Copyright: MPL
Group: Utilities/Console
Source: %{name}-%{version}.tar.gz
URL: http://@PACKAGE@.haxx.nu
BuildRoot: /tmp/%{name}-%{version}-%{rel}-root
Packager: Fill In As You Wish
Docdir: %{prefix}/doc
%description
@PACKAGE@ is a client to get documents/files from servers, using
any of the supported protocols. The command is designed to
work without user interaction or any kind of interactivity.
@PACKAGE@ offers a busload of useful tricks like proxy support,
user authentication, ftp upload, HTTP post, file transfer
resume and more.
Note: this version is compiled without SSL (https:) support.
Authors:
Daniel Stenberg <Daniel.Stenberg@haxx.nu>
%prep
%setup -n %{name}-%{version}
%build
# Needed for snapshot releases.
if [ ! -f configure ]; then
CONF="./autogen.sh"
else
CONF="./configure"
fi
#
# Configuring the package
#
CFLAGS="${RPM_OPT_FLAGS}" ${CONF} \
--prefix=%{prefix}
[ "$SMP" != "" ] && JSMP = '"MAKE=make -k -j $SMP"'
make ${JSMP};
%install
[ -d ${RPM_BUILD_ROOT} ] && rm -rf ${RPM_BUILD_ROOT}
make prefix=${RPM_BUILD_ROOT}%{prefix} install-strip
#
# Generating file lists and store them in file-lists
# Starting with the directory listings
#
find ${RPM_BUILD_ROOT}%{prefix}/{bin,lib,man} -type d | sed "s#^${RPM_BUILD_ROOT}#\%attr (-\,root\,root) \%dir #" > file-lists
#
# Then, the file listings
#
echo "%defattr (-, root, root)" >> file-lists
find ${RPM_BUILD_ROOT}%{prefix} -type f | sed -e "s#^${RPM_BUILD_ROOT}##g" >> file-lists
%clean
(cd ..; rm -rf %{name}-%{version} ${RPM_BUILD_ROOT})
%files -f file-lists
%defattr (-, root, root)
%doc BUGS
%doc CHANGES
%doc CONTRIBUTE
%doc FAQ
%doc FEATURES
%doc FILES
%doc INSTALL
%doc LEGAL
%doc MPL-1.0.txt
%doc README
%doc README.curl
%doc README.lib@PACKAGE@
%doc RESOURCES
%doc TODO
%doc %{name}-ssl.spec.in
%doc %{name}.spec.in

56
docs/BUGS Normal file
View File

@@ -0,0 +1,56 @@
_ _ ____ _
___| | | | _ \| |
/ __| | | | |_) | |
| (__| |_| | _ <| |___
\___|\___/|_| \_\_____|
BUGS
Curl has grown substantially from that day, several years ago, when I
started fiddling with it. When I write this, there are 16500 lines of source
code, and by the time you read this it has probably grown even more.
Of course there are lots of bugs left. And lots of misfeatures.
To help us make curl the stable and solid product we want it to be, we need
bug reports and bug fixes. If you can't fix a bug yourself and submit a fix
for it, try to report an as detailed report as possible to the curl mailing
list to allow one of us to have a go at a solution. You should also post
your bug/problem at curl's bug tracking system over at
http://sourceforge.net/bugs/?group_id=976
When reporting a bug, you should include information that will help us
understand what's wrong, what's expected and how to repeat it. You therefore
need to supply your operating system's name and version number (uname -a
under a unix is fine), what version of curl you're using (curl -v is fine),
what URL you were working with and anything else you think matters.
If curl crashed, causing a core dump (in unix), there is hardly any use to
send that huge file to anyone of us. Unless we have an exact same system
setup as you, we can't do much with it. What we instead ask of you is to get
a stack trace and send that (much smaller) output to us instead!
The address and how to subscribe to the mailing list is detailed in the
README.curl file.
HOW TO GET A STACK TRACE with a common unix debugger
====================================================
First, you must make sure that you compile all sources with -g and that you
don't 'strip' the final executable.
Run the program until it bangs.
Run your debugger on the core file, like '<debugger> curl core'. <debugger>
should be replaced with the name of your debugger, in most cases that will
be 'gdb', but 'dbx' and others also occur.
When the debugger has finished loading the core file and presents you a
prompt, you can give the compiler instructions. Enter 'where' (without the
quotes) and press return.
The list that is presented is the stack trace. If everything worked, it is
supposed to contain the chain of functions that were called when curl
crashed.

View File

@@ -1,4 +1,10 @@
Date: 1999-08-04
_ _ ____ _
___| | | | _ \| |
/ __| | | | |_) | |
| (__| |_| | _ <| |___
\___|\___/|_| \_\_____|
CONTRIBUTE
To Think About When Contributing Source Code
@@ -31,7 +37,7 @@ Commenting
General Style
Keep your functions small. If they're small you avoid a lot of mistakes and
you don't accidentaly mix up variables.
you don't accidentally mix up variables.
Non-clobbering All Over

85
docs/FAQ Normal file
View File

@@ -0,0 +1,85 @@
_ _ ____ _
___| | | | _ \| |
/ __| | | | |_) | |
| (__| |_| | _ <| |___
\___|\___/|_| \_\_____|
FAQ
Problems connecting to SSL servers.
===================================
It took a very long time before I could sort out why curl had problems
to connect to certain SSL servers when using SSLeay or OpenSSL v0.9+.
The error sometimes showed up similar to:
16570:error:1407D071:SSL routines:SSL2_READ:bad mac decode:s2_pkt.c:233:
It turned out to be because many older SSL servers don't deal with SSLv3
requests properly. To correct this problem, tell curl to select SSLv2 from
the command line (-2/--sslv2).
I have also seen examples where the remote server didn't like the SSLv2
request and instead you had to force curl to use SSLv3 with -3/--sslv3.
Does curl support resume?
=========================
Yes. Both ways on FTP, download ways on HTTP.
Is libcurl thread safe?
=======================
Yes, as far as curl's own code goes. It does use system calls that often
aren't thread safe in most environments, such as gethostbyname().
I am very interested in once and for all getting some kind of report or
README file from those who have used libcurl in a threaded environment,
since I haven't and I get this question more and more frequently!
Why doesn't my posting using -F work?
=====================================
You can't simply use -F or -d at your choice. The web server that will
receive your post assumes one of the formats. If the form you're trying to
"fake" sets the type to 'multipart/form-data', than and only then you must
use the -F type. In all the most common cases, you should use -d which then
causes a posting with the type 'application/x-www-form-urlencoded'.
Does curl support custom FTP commands?
======================================
Yes it does, you can tell curl to perform optional commands both before
and/or after a file transfer. Study the -Q/--quote option.
Since curl is used for file transfers, you don't use curl to just perform
ftp commands without transfering anything. Therefore you must always specify
a URL to transfer to/from even when doing custom FTP commands.
Does curl work with other SSL libraries?
========================================
Curl has been written to use OpenSSL, although I doubt there would be much
problems using a different library. I just don't know any other free one and
that has limited my possibilities to develop against anything else.
If anyone does "port" curl to use a commercial SSL library, I am of course
very interested in getting the patch!
configre doesn't find OpenSSL even when it is installed
=======================================================
Platforms: Solaris (native cc compiler) and HPUX (native cc compiler)
When configuring curl, I specify --with-ssl. OpenSSL is installed in
/usr/local/ssl Configure reports SSL in /usr/local/ssl, but fails to find
CRYPTO_lock in -lcrypto
Cause: The cc for this test places the -L/usr/local/ssl/lib AFTER -lcrypto,
so ld can't find the library. This is due to a bug in the GNU autoconf tool.
Workaround: Specifying "LDFLAGS=-L/usr/local/ssl/lib" in front of ./configure
places the -L/usr/local/ssl/lib early enough in the command line to make
things work
Submitted by: Bob Allison <allisonb@users.sourceforge.net>

View File

@@ -1,7 +1,16 @@
_ _ ____ _
___| | | | _ \| |
/ __| | | | |_) | |
| (__| |_| | _ <| |___
\___|\___/|_| \_\_____|
FEATURES
Misc
- full URL syntax
- custom maximum download time
- custom least download speed acceptable
- custom output result after completion
- multiple URLs
- guesses protocol from host name unless specified
- uses .netrc
@@ -21,6 +30,7 @@ HTTP
- follow redirects
- custom HTTP request
- cookie get/send
- understands the netscape cookie file
- custom headers (that can replace internally generated headers)
- custom user-agent string
- custom referer string

View File

@@ -6,21 +6,56 @@
How To Compile
Curl has been compiled and built on numerous different operating systems. The
way to proceed is mainly divided in two different ways: the unix way or the
windows way.
If you're using Windows (95, 98, NT) or OS/2, you should continue reading from
the Win32 header below. All other systems should be capable of being installed
as described in the the UNIX header.
PORTS
=====
Just to show off, this is a probably incomplete list of known hardware and
operating systems that curl has been compiled for:
- Ultrix
- SINIX-Z v5
Alpha DEC OSF 4
Alpha Digital UNIX V3.2D-1 (rev 41)
HP-PA HP-UX 10.X 11.X
MIPS IRIX 6.2, 6.5
Power AIX 4.2, 4.3.1, 4.3.2
PowerPC Darwin 1.0
PowerPC Mac OS X
Sparc Solaris 2.4, 2.5, 2.5.1, 2.6, 7
Sparc SunOS 4.1.*
i386 BeOS
i386 FreeBSD
i386 Linux 1.3, 2.0, 2.2
i386 NetBSD
i386 OS/2
i386 OpenBSD
i386 Solaris 2.7
i386 Windows 95, 98, NT
m68k AmigaOS 3
m68k OpenBSD
UNIX
====
The configure script *always* tries to find a working SSL library unless
explicitely told not to. If you have SSLeay or OpenSSL installed in the
default search path for your compiler/linker, you don't need to do anything
explicitly told not to. If you have OpenSSL installed in the default
search path for your compiler/linker, you don't need to do anything
special.
If you have SSLeay or OpenSSL installed in /usr/local/ssl, you can
run configure like so:
If you have OpenSSL installed in /usr/local/ssl, you can run configure
like:
./configure --with-ssl
If you have SSLeay or OpenSSL installed somewhere else (for example,
/opt/OpenSSL,) you can run configure like this:
If you have OpenSSL installed somewhere else (for example, /opt/OpenSSL,)
you can run configure like this:
./configure --with-ssl=/opt/OpenSSL
@@ -29,10 +64,10 @@ UNIX
./configure --without-ssl
If you have SSLeay or OpenSSL installed, but with the libraries in
one place and the header files somewhere else, you'll have to set the
LDFLAGS and CPPFLAGS environment variables prior to running configure.
Something like this should work:
If you have OpenSSL installed, but with the libraries in one place and the
header files somewhere else, you'll have to set the LDFLAGS and CPPFLAGS
environment variables prior to running configure. Something like this
should work:
(with the Bourne shell and its clones):
@@ -162,13 +197,43 @@ Win32
Microsoft / Borland style
-------------------------
If you have OpenSSL/SSLeay, and want curl to take advantage of it,
edit your project properties to use the SSL include path, link with
the SSL libs and define the USE_SSLEAY symbol.
If you have OpenSSL, and want curl to take advantage of it, edit your
project properties to use the SSL include path, link with the SSL libs
and define the USE_SSLEAY symbol.
OpenSSL/SSLeay
==============
IBM OS/2
========
Building under OS/2 is not much different from building under unix.
You need:
- emx 0.9d
- GNU make
- GNU patch
- ksh
- GNU bison
- GNU file utilities
- GNU sed
- autoconf 2.13
If you want to build with OpenSSL or OpenLDAP support, you'll need to
download those libraries, too. Dirk Ohme has done some work to port SSL
libraries under OS/2, but it looks like he doesn't care about emx. You'll
find his patches on: http://come.to/Dirk.Ohme
If during the linking you get an error about _errno being an undefined
symbol referenced from the text segment, you need to add -D__ST_MT_ERRNO__
in your definitions.
If everything seems to work fine but there's no curl.exe, you need to add
-Zexe to your linker flags.
If you're getting huge binaries, probably your makefiles have the -g in
CFLAGS.
OpenSSL
=======
You'll find OpenSSL information at:

140
docs/INTERNALS Normal file
View File

@@ -0,0 +1,140 @@
_ _ ____ _
___| | | | _ \| |
/ __| | | | |_) | |
| (__| |_| | _ <| |___
\___|\___/|_| \_\_____|
INTERNALS
The project is kind of split in two. The library and the client. The client
part uses the library, but the library is meant to be designed to allow other
applications to use it.
Thus, the largest amount of code and complexity is in the library part.
Windows vs Unix
===============
There are a few differences in how to program curl the unix way compared to
the Windows way. The four most notable details are:
1. Different function names for close(), read(), write()
2. Windows requires a couple of init calls
3. The file descriptors for network communication and file operations are
not easily interchangable as in unix
4. When writing data to stdout, Windows makes end-of-lines the DOS way, thus
destroying binary data, although you do want that conversion if it is
text coming through... (sigh)
In curl, (1) and (2) are done with defines and macros, so that the source
looks the same at all places except for the header file that defines them.
(3) is simply avoided by not trying any funny tricks on file descriptors.
(4) is left alone, giving windows users problems when they pipe binary data
through stdout...
Inside the source code, I do make an effort to avoid '#ifdef WIN32'. All
conditionals that deal with features *should* instead be in the format
'#ifdef HAVE_THAT_WEIRD_FUNCTION'. Since Windows can't run configure scripts,
I maintain two config-win32.h files (one in / and one in src/) that are
supposed to look exactly as a config.h file would have looked like on a
Windows machine!
Library
=======
There is a few entry points to the library, namely each publicly defined
function that libcurl offers to applications. All of those functions are
rather small and easy-to-follow, accept the one single and do-it-all named
curl_urlget() (entry point in lib/url.c).
curl_urlget() takes a variable amount of arguments, and they must all be
passed in pairs, the parameter-ID and the parameter-value. The list of
arguments must be ended with a end-of-arguments parameter-ID.
The function then continues to analyze the URL, get the different components
and connects to the remote host. This may involve using a proxy and/or using
SSL. The GetHost() function in lib/hostip.c is used for looking up host
names.
When connected, the proper function is called. The functions are named after
the protocols they handle. ftp(), http(), dict(), etc. They all reside in
their respective files (ftp.c, http.c and dict.c).
The protocol-specific functions deal with protocol-specific negotiations and
setup. They have access to the sendf() (from lib/sendf.c) function to send
printf-style formatted data to the remote host and when they're ready to make
the actual file transfer they call the Transfer() function (in
lib/download.c) to do the transfer. All printf()-style functions use the
supplied clones in lib/mprintf.c.
While transfering, the progress functions in lib/progress.c are called at a
frequent interval. The speedcheck functions in lib/speedcheck.c are also used
to verify that the transfer is as fast as required.
When the operation is done, the writeout() function in lib/writeout.c may be
called to report about the operation as specified previously in the arguments
to curl_urlget().
HTTP(S)
HTTP offers a lot and is the protocol in curl that uses the most lines of
code. There is a special file (lib/formdata.c) that offers all the multipart
post functions.
base64-functions for user+password stuff is in (lib/base64.c) and all
functions for parsing and sending cookies are found in
(lib/cookie.c).
HTTPS uses in almost every means the same procedure as HTTP, with only two
exceptions: the connect procedure is different and the function used
FTP
The if2ip() function can be used for getting the IP number of a specified
network interface, and it resides in lib/if2ip.c
TELNET
Telnet is implemented in lib/telnet.c.
FILE
The file:// protocol is dealt with in lib/file.c.
LDAP
Everything LDAP is in lib/ldap.c.
GENERAL
URL encoding and decoding, called escaping and unescaping in the source code,
is found in lib/escape.c.
While transfering data in Transfer() a few functions might get
used. get_date() in lib/getdate.c is for HTTP date comparisons.
lib/getenv.c is for reading environment variables in a neat platform
independent way. That's used in the client, but also in lib/url.c when
checking the PROXY variables.
lib/netrc.c keeps the .netrc parser
lib/timeval.c features replacement functions for systems that don't have
A function named curl_version() that returns the full curl version string is
found in lib/version.c.
Client
======
main() resides in src/main.c together with most of the client
code. src/hugehelp.c is automatically generated by the mkhelp.pl perl script
to display the complete "manual" and the src/urlglob.c file holds the
functions used for the multiple-URL support.
The client mostly mess around to setup its config struct properly, then it
calls the curl_urlget() function in the library and when it gets back control
it checks status and exits.

View File

@@ -135,9 +135,13 @@ UPLOADING
curl -T uploadfile -u user:passwd ftp://ftp.upload.com/
NOTE: Curl is not currently supporing ftp upload through a proxy! The reason
for this is simply that proxies are seldomly configured to allow this and
that no author has supplied code that makes it possible!
Upload a local file to get appended to the remote file using ftp:
curl -T localfile -a ftp://ftp.upload.com/remotefile
NOTE: Curl does not support ftp upload through a proxy! The reason for this
is simply that proxies are seldomly configured to allow this and that no
author has supplied code that makes it possible!
HTTP
@@ -190,6 +194,41 @@ POST (HTTP)
curl -d "name=Rafael%20Sagula&phone=3320780" \
http://www.where.com/guest.cgi
How to post a form with curl, lesson #1:
Dig out all the <input> tags in the form that you want to fill in. (There's
a perl program called formfind.pl on the curl site that helps with this).
If there's a "normal" post, you use -d to post. -d takes a full "post
string", which is in the format
<variable1>=<data1>&<variable2>=<data2>&...
The 'variable' names are the names set with "name=" in the <input> tags, and
the data is the contents you want to fill in for the inputs. The data *must*
be properly URL encoded. That means you replace space with + and that you
write weird letters with %XX where XX is the hexadecimal representation of
the letter's ASCII code.
Example:
(page located at http://www.formpost.com/getthis/
<form action="post.cgi" method="post">
<input name=user size=10>
<input name=pass type=password size=10>
<input name=id type=hidden value="blablabla">
<input name=ding value="submit">
</form>
We want to enter user 'foobar' with password '12345'.
To post to this, you enter a curl command line like:
curl -d "user=foobar&pass=12345&id=blablabla&dig=submit" (continues)
http://www.formpost.com/getthis/post.cgi
While -d uses the application/x-www-form-urlencoded mime-type, generally
understood by CGI's and similar, curl also supports the more capable
multipart/form-data type. This latter type supports things like file upload.
@@ -457,9 +496,9 @@ FTP and firewalls
HTTPS
Secure HTTP requires SSLeay to be installed and used when curl is built. If
that is done, curl is capable of retrieving and posting documents using the
HTTPS procotol.
Secure HTTP requires SSL libraries to be installed and used when curl is
built. If that is done, curl is capable of retrieving and posting documents
using the HTTPS procotol.
Example:
@@ -472,9 +511,10 @@ HTTPS
browsers (Netscape and MSEI both use the so called PKCS#12 format). If you
want curl to use the certificates you use with your (favourite) browser, you
may need to download/compile a converter that can convert your browser's
formatted certificates to PEM formatted ones. Dr Stephen N. Henson has
written a patch for SSLeay that adds this functionality. You can get his
patch (that requires an SSLeay installation) from his site at:
formatted certificates to PEM formatted ones. This kind of converter is
included in recent versions of OpenSSL, and for older versions Dr Stephen
N. Henson has written a patch for SSLeay that adds this functionality. You
can get his patch (that requires an SSLeay installation) from his site at:
http://www.drh-consultancy.demon.co.uk/
Example on how to automatically retrieve a document using a certificate with
@@ -601,6 +641,34 @@ ENVIRONMENT VARIABLES
The usage of the -x/--proxy flag overrides the environment variables.
NETRC
Unix introduced the .netrc concept a long time ago. It is a way for a user
to specify name and password for commonly visited ftp sites in a file so
that you don't have to type them in each time you visit those sites. You
realize this is a big security risk if someone else gets hold of your
passwords, so therefor most unix programs won't read this file unless it is
only readable by yourself (curl doesn't care though).
Curl supports .netrc files if told so (using the -n/--netrc option). This is
not restricted to only ftp, but curl can use it for all protocols where
authentication is used.
A very simple .netrc file could look something like:
machine curl.haxx.nu login iamdaniel password mysecret
CUSTOM OUTPUT
To better allow script programmers to get to know about the progress of
curl, the -w/--write-out option was introduced. Using this, you can specify
what information from the previous transfer you want to extract.
To display the amount of bytes downloaded together with some text and an
ending newline:
curl -w 'We downloaded %{size_download} bytes\n' www.download.com
MAILING LIST
We have an open mailing list to discuss curl, its development and things

51
docs/README.libcurl Normal file
View File

@@ -0,0 +1,51 @@
_ _ _ _
| (_) |__ ___ _ _ _ __| |
| | | '_ \ / __| | | | '__| |
| | | |_) | (__| |_| | | | |
|_|_|_.__/ \___|\__,_|_| |_|
How To Use Libcurl In Your Program
Interfaces
libcurl currently offers two different interfaces to the URL transfer
engine. They can be seen as one low-level and one high-level, in the sense
that the low-level one will allow you to deal with a lot more details but on
the other hand not offer as many fancy features (such as Location:
following). The high-level interface is supposed to be a built-in
implementation of the low-level interface. You will not be able to mix
function calls from the different layers.
As we currently ONLY support the high-level interface, the so called easy
interface, I will not attempt to describe any low-level functions at this
point.
Function descriptions
The interface is meant to be very simple for very simple
implementations. Thus, we have minimized the number of entries.
The Easy Interface
When using the easy interface, you init your easy-session and get a handle,
which you use as input to the following interface functions you use.
You continue be setting all the options you want in the upcoming transfer,
most important among them is the URL itself. You might want to set some
callbacks as well that will be called from the library when data is available
etc.
When all is setup, you tell libcurl to perform the transfer. It will then do
the entire operation and won't return until it is done or failed.
After the performance is made, you cleanup the easy-session's handle and
libcurl is entire off the hook!
See the separate man pages for the libcurl functions for details:
curl_easy_init()
curl_easy_setopt()
curl_easy_perform()
curl_easy_cleanup()

View File

@@ -12,34 +12,51 @@ specify standards used by curl, software that extends curl and web pages with
Standards
RFC 959 - Defines how FTP works
RFC 1738 - Uniform Resource Locators
RFC 1777 - defines the LDAP protocol
RFC 1808 - Relative Uniform Resource Locators
RFC 1867 - Form-based File Upload in HTML
RFC 1950 - ZLIB Compressed Data Format Specification
RFC 1951 - DEFLATE Compressed Data Format Specification
RFC 1952 - gzip compression format
RFC 1959 - LDAP URL syntax
RFC 2045-2049 - Everything you need to know about MIME! (needed for form
based upload)
RFC 2068 - HTTP 1.1 (obsoleted by RFC 2616)
RFC 2109 - HTTP State Management Mechanism (cookie stuff)
- Also, read Netscape's specification at
http://www.netscape.com/newsref/std/cookie_spec.html
RFC 2183 - "The Content-Disposition Header Field"
RFC 2229 - "A Dictionary Server Protocol"
RFC 2231 - "MIME Parameter Value and Encoded Word Extensions:
Character Sets, Languages, and Continuations"
RFC 2388 - "Returning Values from Forms: multipart/form-data"
Use this as an addition to the 1867
RFC 2396 - "Uniform Resource Identifiers: Generic Syntax and Semantics"
This one obsoletes 1738, but since 1738 is often mentioned I've left it
in this list.
RFC 2428 - "FTP Extensions for IPv6 and NATs"
This should be considered when introducing IPv6 awareness.
RFC 2616 - HTTP 1.1
RFC 2617 - HTTP Authentication
Compilers

View File

@@ -6,6 +6,33 @@
TODO
For version 7. Stuff I palnned to have included in curl for version
seven. Let's do a serious attempt to include most of this.
Document the easy-interface completely
Make sure the low-level interface works. highlevel.c should basically be
possible to write using that interface.
Document the low-level interface
Add asynchronous name resolving, as this enables full timeout support for
fork() systems.
Make sure you can set the progress callback
Add libtool stuff
Move non-URL related functions that are used by both the lib and the curl
application to a separate "portability lib".
Add support for other languages than C (not important)
Improve the -K config file parser.
For the future
Ok, this is what I wanna do with Curl. Please tell me what you think, and
please don't hesitate to contribute and send me patches that improve this
product! (Yes, you may add things not mentioned here, these are just a
@@ -24,18 +51,17 @@ TODO
* HTTP Pipelining/persistant connections
- I'm gonna introduce HTTP "pipelining". Curl should be able
to request for several HTTP documents in one connect. It is the beginning
for supporing more advanced functions in the future, like web site
- We should introduce HTTP "pipelining". Curl could be able to request for
several HTTP documents in one connect. It would be the beginning for
supporing more advanced functions in the future, like web site
mirroring. This will require that the urlget() function supports several
documents from a single HTTP server, which it doesn't today.
- When curl supports fetching several documents from the same
server using pipelining, I'd like to offer that function to the command
line. Anyone has a good idea how? The current way of specifying one URL
with the output sent to the stdout or a file gets in the way. Imagine a
syntax that supports "additional documents from the same server" in a way
similar to:
- When curl supports fetching several documents from the same server using
pipelining, I'd like to offer that function to the command line. Anyone has
a good idea how? The current way of specifying one URL with the output sent
to the stdout or a file gets in the way. Imagine a syntax that supports
"additional documents from the same server" in a way similar to:
curl <main URL> --more-doc <path> --more-doc <path>
@@ -52,12 +78,11 @@ TODO
And some friendly person's server source code is available at
http://hopf.math.nwu.edu/digestauth/index.html
Then there's the Apache mod_digest source code too of course.
It seems as if Netscape doesn't support this, and not many servers
do. Although this is a lot better authentication method than the more
common "Basic". Basic sends the password in cleartext over the network,
this "Digest" method uses a challange-response protocol which increases
security quite a lot.
Then there's the Apache mod_digest source code too of course. It seems as
if Netscape doesn't support this, and not many servers do. Although this is
a lot better authentication method than the more common "Basic". Basic
sends the password in cleartext over the network, this "Digest" method uses
a challange-response protocol which increases security quite a lot.
* Different FTP Upload Through Web Proxy
I don't know any web proxies that allow CONNECT through on port 21, but
@@ -74,7 +99,7 @@ TODO
* Other proxies
Ftp-kind proxy, Socks5, whatever kind of proxies are there?
* IPv6 Awareness
* IPv6 Awareness and support
Where ever it would fit. I am not that into v6 yet to fully grasp what we
would need to do, but letting the autoconf search for v6-versions of a few
functions and then use them instead is of course the first thing to do...
@@ -88,3 +113,7 @@ TODO
(http://search.ietf.org/internet-drafts/draft-murray-auth-ftp-ssl-05.txt)
* HTTP POST resume using Range:
* Make curl capable of verifying the server's certificate when connecting
with HTTPS://.

View File

@@ -2,7 +2,7 @@
.\" nroff -man curl.1
.\" Written by Daniel Stenberg
.\"
.TH curl 1 "13 March 2000" "Curl 6.5" "Curl Manual"
.TH curl 1 "22 May 2000" "Curl 7.0" "Curl Manual"
.SH NAME
curl \- get a URL with FTP, TELNET, LDAP, GOPHER, DICT, FILE, HTTP or
HTTPS syntax.
@@ -193,7 +193,7 @@ Makes curl scan the
file in the user's home directory for login name and password. This is
typically used for ftp on unix. If used with http, curl will enable user
authentication. See
.BR netrc(4)
.BR netrc(5)
for details on the file format. Curl will not complain if that file
hasn't the right permissions (it should not be world nor group
readable). The environment variable "HOME" is used to find the home
@@ -204,7 +204,7 @@ A quick and very simple example of how to setup a
to allow curl to ftp to the machine host.domain.com with user name
'myself' and password 'secret' should look similar to:
.B "machine host.domain.com user myself password secret"
.B "machine host.domain.com login myself password secret"
.IP "-N/--no-buffer"
Disables the buffering of the output stream. In normal work situations, curl
will use a standard buffered output stream that will have the effect that it
@@ -212,8 +212,9 @@ will output the data in chunks, not necessarily exactly when the data arrives.
Using this option will disable that buffering.
.IP "-o/--output <file>"
Write output to <file> instead of stdout. If you are using {} or [] to fetch
multiple documents, you can use #[num] in the <file> specifier. That variable
will be replaced with the current string for the URL being fetched. Like in:
multiple documents, you can use '#' followed by a number in the <file>
specifier. That variable will be replaced with the current string for the URL
being fetched. Like in:
curl http://{one,two}.site.com -o "file_#1.txt"
@@ -553,10 +554,10 @@ If you do find any (or have other suggestions), mail Daniel Stenberg
- Linas Vepstas <linas@linas.org>
- Bjorn Reese <breese@mail1.stofanet.dk>
- Johan Anderson <johan@homemail.com>
- Kjell Ericson <Kjell.Ericson@sth.frontec.se>
- Kjell Ericson <Kjell.Ericson@haxx,nu>
- Troy Engel <tengel@sonic.net>
- Ryan Nelson <ryan@inch.com>
- Bjorn Stenberg <Bjorn.Stenberg@sth.frontec.se>
- Bjorn Stenberg <Bjorn.Stenberg@haxx.nu>
- Angus Mackay <amackay@gus.ml.org>
- Eric Young <eay@cryptsoft.com>
- Simon Dick <simond@totally.irrelevant.org>
@@ -586,6 +587,11 @@ If you do find any (or have other suggestions), mail Daniel Stenberg
- Ellis Pritchard <ellis@citria.com>
- Damien Adant <dams@usa.net>
- Chris <cbayliss@csc.come>
- Marco G. Salvagno <mgs@whiz.cjb.net>
- Paul Marquis <pmarquis@iname.com>
- David LeBlanc <dleblanc@qnx.com>
- Rich Gray at Plus Technologies
.SH WWW
http://curl.haxx.nu
.SH FTP

25
docs/curl_easy_cleanup.3 Normal file
View File

@@ -0,0 +1,25 @@
.\" You can view this file with:
.\" nroff -man [file]
.\" Written by Daniel.Stenberg@haxx.nu
.\"
.TH curl_easy_cleanup 3 "22 May 2000" "Curl 7.0" "libcurl Manual"
.SH NAME
curl_easy_cleanup - End a libcurl "easy" session
.SH SYNOPSIS
.B #include <curl/easy.h>
.sp
.BI "curl_easy_cleanup(CURL *" handle ");
.ad
.SH DESCRIPTION
This function must be the last function to call for a curl session. It is the
opposite of the
.I curl_easy_init
function and must be called with the same
.I handle
as input as the curl_easy_init call returned.
.SH RETURN VALUE
None
.SH "SEE ALSO"
.BR curl_easy_init "(3), "
.SH BUGS
Surely there are some, you tell me!

25
docs/curl_easy_init.3 Normal file
View File

@@ -0,0 +1,25 @@
.\" You can view this file with:
.\" nroff -man [file]
.\" Written by Daniel.Stenberg@haxx.nu
.\"
.TH curl_easy_init 3 "22 May 2000" "Curl 7.0" "libcurl Manual"
.SH NAME
curl_easy_init - Start a libcurl "easy" session
.SH SYNOPSIS
.B #include <curl/easy.h>
.sp
.BI "CURL *curl_easy_init( );"
.ad
.SH DESCRIPTION
This function must be the first function to call, and it returns a CURL handle
that you shall use as input to the other easy-functions. The init calls
intializes curl and this call MUST have a corresponding call to
.I curl_easy_cleanup
when the operation is complete.
.SH RETURN VALUE
If this function returns NULL, something went wrong and you cannot use the
other curl functions.
.SH "SEE ALSO"
.BR curl_easy_cleanup "(3), "
.SH BUGS
Surely there are some, you tell me!

29
docs/curl_easy_perform.3 Normal file
View File

@@ -0,0 +1,29 @@
.\" You can view this file with:
.\" nroff -man [file]
.\" Written by Daniel.Stenberg@haxx.nu
.\"
.TH curl_easy_perform 3 "26 May 2000" "Curl 7.0" "libcurl Manual"
.SH NAME
curl_easy_perform - Do the actual transfer in a "easy" session
.SH SYNOPSIS
.B #include <curl/easy.h>
.sp
.BI "CURLcode curl_easy_perform(CURL *" handle ");
.ad
.SH DESCRIPTION
This function is called after the init and all the curl_easy_setopt() calls
are made, and will perform the transfer as described in the options.
It must be called with the same
.I handle
as input as the curl_easy_init call returned.
.SH RETURN VALUE
0 means everything was ok, non-zero means an error occurred as
.I <curl/curl.h>
defines. If the CURLOPT_ERRORBUFFER was set with
.I curl_easy_setopt
there willo be a readable error message in the error buffer when non-zero is
returned.
.SH "SEE ALSO"
.BR curl_easy_init "(3), " curl_easy_setopt "(3), "
.SH BUGS
Surely there are some, you tell me!

300
docs/curl_easy_setopt.3 Normal file
View File

@@ -0,0 +1,300 @@
.\" You can view this file with:
.\" nroff -man [file]
.\" Written by Daniel.Stenberg@haxx.nu
.\"
.TH curl_easy_setopt 3 "22 May 2000" "Curl 7.0" "libcurl Manual"
.SH NAME
curl_easy_setopt - Set curl easy-session options
.SH SYNOPSIS
.B #include <curl/easy.h>
.sp
.BI "CURLcode curl_easy_setopt(CURL *" handle ", CURLoption "option ", ...);
.ad
.SH DESCRIPTION
curl_easy_setopt() is called to tell libcurl how to behave in a number of
ways. Most operations in libcurl have default actions, and by using the
appropriate options you can make them behave differently (as documented). All
options are set with the
.I option
followed by a parameter. That parameter can be a long, a function pointer or
an object pointer, all depending on what the option in question expects. Read
this manual carefully as bad input values may cause libcurl to behave badly!
The
.I "handle"
is the return code from the
.I "curl_easy_init"
call.
.SH OPTIONS
.TP 0.8i
.B CURLOPT_FILE
Data pointer to pass instead of FILE * to the file write function. Note that
if you specify the
.I CURLOPT_WRITEFUNCTION
, this is the pointer you'll get as input.
.TP
.B CURLOPT_WRITEFUNCTION
Function pointer that should use match the following prototype:
.BI "size_t function( void *ptr, size_t size, size_t nmemb, FILE *stream);"
This function gets called by libcurl as soon as there is received data that
needs to be written down. The size of the data pointed to by
.I ptr
is
.I size
multiplied with
.I nmemb.
Return the number of bytes actually written or return -1 to signal error to the library (it will cause it to abort the transfer).
.TP
.B CURLOPT_INFILE
Data pointer to pass instead of FILE * to the file read function. Note that if
you specify the
.I CURLOPT_READFUNCTION
, this is the pointer you'll get as input.
.TP
.B CURLOPT_READFUNCTION
Function pointer that should use match the following prototype:
.BI "size_t function( void *ptr, size_t size, size_t nmemb, FILE *stream);"
This function gets called by libcurl as soon as it needs to read data in order
to send it to the peer. The data area pointed at by the pointer
.I ptr
may be filled with at most
.I size
multiplied with
.I nmemb
number of bytes. Your function must return the actual number of bytes that you
stored in that memory area. Returning -1 will signal an error to the library
and cause it to abort the current transfer immediately.
.TP
.B CURLOPT_INFILESIZE
When uploading a file to a remote site, this option should be used to tell
libcurl what the expected size of the infile is.
.TP
.B CURLOPT_URL
The actual URL to deal with. The parameter should be a char * to a zero
terminated string. NOTE: this option is currently required!
.TP
.B CURLOPT_PROXY
If you need libcurl to use a http proxy to access the outside world, set the
proxy string with this option. The parameter should be a char * to a zero
terminated string.
.TP
.B CURLOPT_VERBOSE
Set the parameter to non-zero to get the library to display a lot of verbose
information about its operations.
.TP
.B CURLOPT_HEADER
A non-zero parameter tells the library to include the header in the
output. This is only relevant for protocols that actually has a header
preceeding the data (like HTTP).
.TP
.B CURLOPT_NOPROGRESS
A non-zero parameter tells the library to shut of the built-in progress meter
completely. (NOTE: future versions of the lib is likely to not have any
built-in progress meter at all).
.TP
.B CURLOPT_NOBODY
A non-zero parameter tells the library to not include the body-part in the
output. This is only relevant for protocols that have a separate header and
body part.
.TP
.B CURLOPT_FAILONERROR
A non-zero parameter tells the library to fail silently if the HTTP code
returned is equal or larger than 300. The default action would be to return
the page normally, ignoring that code.
.TP
.B CURLOPT_UPLOAD
A non-zero parameter tells the library to prepare for an upload. The
CURLOPT_INFILE and CURLOPT_INFILESIZE are also interesting for uploads.
.TP
.B CURLOPT_POST
A non-zero parameter tells the library to do a regular HTTP post. This is a
normal application/x-www-form-urlencoded kind, which is the most commonly used
one by HTML forms. See the CURLOPT_POSTFIELDS option for how to specify the
data to post.
.TP
.B CURLOPT_FTPLISTONLY
A non-zero parameter tells the library to just list the names of an ftp
directory, instead of doing a full directory listin that would include file
sizes, dates etc.
.TP
.B CURLOPT_FTPAPPEND
A non-zero parameter tells the library to append to the remote file instead of
overwrite it. This is only useful when uploading to a ftp site.
.TP
.B CURLOPT_NETRC
A non-zero parameter tells the library to scan your
.I ~/.netrc
file to find user name and password for the remote site you are about to
access. Do note that curl does not verify that the file has the correct
properties set (as the standard unix ftp client does), and that only machine
name, user name and password is taken into account (init macros and similar
things aren't supported).
.TP
.B CURLOPT_FOLLOWLOCATION
A non-zero parameter tells the library to follow any Location: header that the
server sends as part of a HTTP header. NOTE that this means that the library
will resend the same request on the new location and follow new Location:
headers all the way until no more such headers are returned.
.TP
.B CURLOPT_FTPASCII
A non-zero parameter tells the library to use ASCII mode for ftp transfers,
instead of the default binary transfer. This will only be useable when
transfering text data between system with different views on certain
characters, such as newlines or similar.
.TP
.B CURLOPT_PUT
A non-zero parameter tells the library to use HTTP PUT a file. The file to put
must be set with CURLOPT_INFILE and CURLOPT_INFILESIZE.
.TP
.B CURLOPT_MUTE
A non-zero parameter tells the library to be completely quiet.
.TP
.B CURLOPT_USERPWD
Pass a char * as parameter, which should be [username]:[password] to use for
the connection. If the password is left out, you will be prompted for it.
.TP
.B CURLOPT_PROXYUSERPWD
Pass a char * as parameter, which should be [username]:[password] to use for
the connection to the HTTP proxy. If the password is left out, you will be
prompted for it.
.TP
.B CURLOPT_RANGE
Pass a char * as parameter, which should contain the specified range you
want. It should be in the format "X-Y", where X or Y may be left out. The HTTP
transfers also support several intervals, separated with commas as in
.I "X-Y,N-M".
.TP
.B CURLOPT_ERRORBUFFER
Pass a char * to a buffer that the libcurl may store human readable error
messages in. This may be more helpful than just the return code from the
library. The buffer must be at least CURL_ERROR_SIZE big.
.TP
.B CURLOPT_TIMEOUT
Pass a long as parameter containing the maximum time in seconds that you allow
the libcurl transfer operation to take. Do note that normally, name lookups
maky take a considerable time and that limiting the operation to less than a
few minutes risk aborting perfectly normal operations.
.TP
.B CURLOPT_POSTFIELDS
Pass a char * as parameter, which should be the full data to post in a HTTP
post operation. See also the CURLOPT_POST.
.TP
.B CURLOPT_REFERER
Pass a pointer to a zero terminated string as parameter. It will be used to
set the referer: header in the http request sent to the remote server. This
can be used to fool servers or scripts.
.TP
.B CURLOPT_USERAGENT
Pass a pointer to a zero terminated string as parameter. It will be used to
set the user-agent: header in the http request sent to the remote server. This
can be used to fool servers or scripts.
.TP
.B CURLOPT_FTPPORT
Pass a pointer to a zero terminated string as parameter. It will be used to
get the IP address to use for the ftp PORT instruction. The PORT instruction
tells the remote server to connect to our specified IP address. The string may
be a plain IP address, a host name, an network interface name (under unix) or
just a '-' letter to let the library use your systems default IP address.
.TP
.B CURLOPT_LOW_SPEED_LIMIT
Pass a long as parameter. It contains the transfer speed in bytes per second
that the transfer should be below during CURLOPT_LOW_SPEED_TIME seconds for
the library to consider it too slow and abort.
.TP
.B CURLOPT_LOW_SPEED_TIME
Pass a long as parameter. It contains the time in seconds that the transfer
should be below the CURLOPT_LOW_SPEED_LIMIT for the library to consider it too
slow and abort.
.TP
.B CURLOPT_RESUME_FROM
Pass a long as parameter. It contains the offset in number of bytes that you
want the transfer to start from.
.TP
.B CURLOPT_COOKIE
Pass a pointer to a zero terminated string as parameter. It will be used to
set a cookie in the http request. The format of the string should be
'[NAME]=[CONTENTS];' Where NAME is the cookie name.
.TP
.B CURLOPT_HTTPHEADER
Pass a pointer to a linked list of HTTP headers to pass to the server in your
HTTP request. The linked list should be a fully valid list of 'struct
HttpHeader' structs properly filled in. TBD!
.TP
.B CURLOPT_HTTPPOST
Pass a pointer to a linked list of HTTP post data to pass to the server in
your http request. The linked list should be a fully valid list of 'struct
HttpPost' structs properly filled in. TBD!
.TP
.B CURLOPT_SSLCERT
Pass a pointer to a zero terminated string as parameter. The string should be
the file name of your certficicate in PEM format.
.TP
.B CURLOPT_SSLCERTPASSWD
Pass a pointer to a zero terminated string as parameter. It will be used as
the password required to use the CURLOPT_SSLCERT certificate. If the password
is not supplied, you will be prompted for it.
.TP
.B CURLOPT_CRLF
TBD.
.TP
.B CURLOPT_QUOTE
Pass a pointer to a linked list of FTP commands to pass to the server prior to
your ftp request. The linked list should be a fully valid list of 'struct
curl_slist' structs properly filled in. TBD!
.TP
.B CURLOPT_POSTQUOTE
Pass a pointer to a linked list of FTP commands to pass to the server after
your ftp transfer request. The linked list should be a fully valid list of
'struct curl_slist' structs properly filled in. TBD!
.TP
.B CURLOPT_WRITEHEADER
Pass a FILE * to be used to write the header part of the received data to.
.TP
.B CURLOPT_COOKIEFILE
Pass a pointer to a zero terminated string as parameter. It should contain the
name of your file holding cookie data. The cookie data may be in netscape
cookie data format or just regular HTTP-style headers dumped to a file.
.TP
.B CURLOPT_SSLVERSION
Pass a long as parameter. Set what version of SSL to attempt to use, 2 or
3. By default, the SSL library will try to solve this by itself although some
servers make this difficult why you at times will have to use this option.
.TP
.B CURLOPT_TIMECONDITION
Pass a long as parameter. This defines how the CURLOPT_TIMEVALUE time value is
treated. You can set this parameter to TIMECOND_IFMODSINCE or
TIMECOND_IFUNMODSINCE. This is aa HTTP-only feature. (TBD)
.TP
.B CURLOPT_TIMEVALUE
Pass a long as parameter. This should be the time in seconds since 1 jan 1970,
and the time will be used as specified in CURLOPT_TIMECONDITION or if that
isn't used, it will be TIMECOND_IFMODSINCE by default.
.TP
.B CURLOPT_CUSTOMREQUEST
Pass a pointer to a zero terminated string as parameter. It will be user
instead of GET or HEAD when doing the HTTP request. This is useful for doing
DELETE or other more obscure HTTP requests. Don't do this at will, make sure
your server supports the command first.
.TP
.B CURLOPT_STDERR
Pass a FILE * as parameter. This is the stream to use instead of stderr
internally when reporting errors.
.TP
.B CURLOPT_PROGRESSMODE
This is currently unsupported, and is likely to be removed in future
versions. TBD
.TP
.B CURLOPT_WRITEINFO
Pass a pointer to a zero terminated string as parameter. It will be used to
report information after a successful request. This string may contain
variables that will be substituted by their contents when output. Described
elsewhere.
.PP
.SH RETURN VALUE
0 means the option was set properly, non-zero means an error as
.I <curl/curl.h>
defines
.SH "SEE ALSO"
.BR curl_easy_init "(3), " curl_easy_cleanup "(3), "
.SH BUGS
Surely there are some, you tell me!

View File

@@ -1,5 +1,5 @@
#ifndef __CURL_H
#define __CURL_H
#ifndef __CURL_CURL_H
#define __CURL_CURL_H
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
@@ -39,6 +39,25 @@
*
* ------------------------------------------------------------
****************************************************************************/
/* The include stuff here is mainly for time_t! */
#ifdef vms
# include <types.h>
# include <time.h>
#else
# include <sys/types.h>
# if TIME_WITH_SYS_TIME
# include <sys/time.h>
# include <time.h>
# else
# if HAVE_SYS_TIME_H
# include <sys/time.h>
# else
# include <time.h>
# endif
# endif
#endif /* defined (vms) */
#ifndef TRUE
#define TRUE 1
#endif
@@ -46,71 +65,8 @@
#define FALSE 0
#endif
#include <curl/types.h>
#define CONF_DEFAULT 0
#define CONF_PROXY (1<<0) /* set if proxy is in use */
#define CONF_PORT (1<<1) /* set if different port than protcol-defines is
used */
#define CONF_HTTP (1<<2) /* http get */
#define CONF_GOPHER (1<<3) /* gopher get */
#define CONF_FTP (1<<4) /* ftp get (binary mode) */
#define CONF_VERBOSE (1<<5) /* talk a lot */
#define CONF_TELNET (1<<6)
#define CONF_HEADER (1<<8) /* throw the header out too */
#define CONF_USERPWD (1<<9) /* user+passwd has been specified */
#define CONF_NOPROGRESS (1<<10) /* shut off the progress meter (auto)
see also _MUTE */
#define CONF_NOBODY (1<<11) /* use HEAD to get http document */
#define CONF_FAILONERROR (1<<12) /* Makes urlget() fail with a return code
WITHOUT writing anything to the output if
a return code >=300 is returned from the
server. */
#define CONF_RANGE (1<<13) /* Byte-range request, specified parameter is set */
#define CONF_UPLOAD (1<<14) /* this is an upload, only supported for ftp
currently */
#define CONF_POST (1<<15) /* HTTP POST method */
/* When getting an FTP directory, this switch makes the listing only show file
names and nothing else. Makes machine parsing of the output possible. This
enforces the NLST command to the ftp server, compared to the otherwise
used: LIST. */
#define CONF_FTPLISTONLY (1<<16)
/* Set the referer string */
#define CONF_REFERER (1<<17)
#define CONF_PROXYUSERPWD (1<<18) /* Proxy user+passwd has been specified */
/* For FTP, use PORT instead of PASV! */
#define CONF_FTPPORT (1<<19)
/* FTP: Append instead of overwrite on upload! */
#define CONF_FTPAPPEND (1<<20)
#define CONF_HTTPS (1<<21) /* Use SSLeay for encrypted communication */
#define CONF_NETRC (1<<22) /* read user+password from .netrc */
#define CONF_FOLLOWLOCATION (1<<23) /* get the page that the Location: tells
us to get */
#define CONF_FTPASCII (1<<24) /* use TYPE A for transfer */
#define CONF_HTTPPOST (1<<25) /* this causes a multipart/form-data
HTTP POST */
#define CONF_NOPROT (1<<26) /* host name specified without protocol */
#define CONF_PUT (1<<27) /* PUT the input file */
#define CONF_MUTE (1<<28) /* force NOPROGRESS */
#define CONF_DICT (1<<29) /* DICT:// protocol */
#define CONF_FILE (1<<30) /* FILE:// protocol */
#define CONF_LDAP (1<<31) /* LDAP:// protocol */
struct HttpHeader {
struct HttpHeader *next; /* next entry in the list */
@@ -132,88 +88,89 @@ struct HttpPost {
may return other values, stay prepared. */
typedef enum {
URG_OK = 0,
URG_UNSUPPORTED_PROTOCOL,
URG_FAILED_INIT,
URG_URL_MALFORMAT,
URG_URL_MALFORMAT_USER,
URG_COULDNT_RESOLVE_PROXY,
URG_COULDNT_RESOLVE_HOST,
URG_COULDNT_CONNECT,
URG_FTP_WEIRD_SERVER_REPLY,
URG_FTP_ACCESS_DENIED,
URG_FTP_USER_PASSWORD_INCORRECT,
URG_FTP_WEIRD_PASS_REPLY,
URG_FTP_WEIRD_USER_REPLY,
URG_FTP_WEIRD_PASV_REPLY,
URG_FTP_WEIRD_227_FORMAT,
URG_FTP_CANT_GET_HOST,
URG_FTP_CANT_RECONNECT,
URG_FTP_COULDNT_SET_BINARY,
URG_PARTIAL_FILE,
URG_FTP_COULDNT_RETR_FILE,
URG_FTP_WRITE_ERROR,
URG_FTP_QUOTE_ERROR,
URG_HTTP_NOT_FOUND,
URG_WRITE_ERROR,
CURLE_OK = 0,
CURLE_UNSUPPORTED_PROTOCOL,
CURLE_FAILED_INIT,
CURLE_URL_MALFORMAT,
CURLE_URL_MALFORMAT_USER,
CURLE_COULDNT_RESOLVE_PROXY,
CURLE_COULDNT_RESOLVE_HOST,
CURLE_COULDNT_CONNECT,
CURLE_FTP_WEIRD_SERVER_REPLY,
CURLE_FTP_ACCESS_DENIED,
CURLE_FTP_USER_PASSWORD_INCORRECT,
CURLE_FTP_WEIRD_PASS_REPLY,
CURLE_FTP_WEIRD_USER_REPLY,
CURLE_FTP_WEIRD_PASV_REPLY,
CURLE_FTP_WEIRD_227_FORMAT,
CURLE_FTP_CANT_GET_HOST,
CURLE_FTP_CANT_RECONNECT,
CURLE_FTP_COULDNT_SET_BINARY,
CURLE_PARTIAL_FILE,
CURLE_FTP_COULDNT_RETR_FILE,
CURLE_FTP_WRITE_ERROR,
CURLE_FTP_QUOTE_ERROR,
CURLE_HTTP_NOT_FOUND,
CURLE_WRITE_ERROR,
URG_MALFORMAT_USER, /* the user name is illegally specified */
URG_FTP_COULDNT_STOR_FILE, /* failed FTP upload */
URG_READ_ERROR, /* could open/read from file */
CURLE_MALFORMAT_USER, /* the user name is illegally specified */
CURLE_FTP_COULDNT_STOR_FILE, /* failed FTP upload */
CURLE_READ_ERROR, /* could open/read from file */
URG_OUT_OF_MEMORY,
URG_OPERATION_TIMEOUTED, /* the timeout time was reached */
URG_FTP_COULDNT_SET_ASCII, /* TYPE A failed */
CURLE_OUT_OF_MEMORY,
CURLE_OPERATION_TIMEOUTED, /* the timeout time was reached */
CURLE_FTP_COULDNT_SET_ASCII, /* TYPE A failed */
URG_FTP_PORT_FAILED, /* FTP PORT operation failed */
CURLE_FTP_PORT_FAILED, /* FTP PORT operation failed */
URG_FTP_COULDNT_USE_REST, /* the REST command failed */
URG_FTP_COULDNT_GET_SIZE, /* the SIZE command failed */
CURLE_FTP_COULDNT_USE_REST, /* the REST command failed */
CURLE_FTP_COULDNT_GET_SIZE, /* the SIZE command failed */
URG_HTTP_RANGE_ERROR, /* The RANGE "command" didn't seem to work */
CURLE_HTTP_RANGE_ERROR, /* The RANGE "command" didn't seem to work */
URG_HTTP_POST_ERROR,
CURLE_HTTP_POST_ERROR,
URG_SSL_CONNECT_ERROR, /* something was wrong when connecting with SSL */
CURLE_SSL_CONNECT_ERROR, /* something was wrong when connecting with SSL */
URG_FTP_BAD_DOWNLOAD_RESUME, /* couldn't resume download */
CURLE_FTP_BAD_DOWNLOAD_RESUME, /* couldn't resume download */
URG_FILE_COULDNT_READ_FILE,
CURLE_FILE_COULDNT_READ_FILE,
URG_LDAP_CANNOT_BIND,
URG_LDAP_SEARCH_FAILED,
URG_LIBRARY_NOT_FOUND,
URG_FUNCTION_NOT_FOUND,
CURLE_LDAP_CANNOT_BIND,
CURLE_LDAP_SEARCH_FAILED,
CURLE_LIBRARY_NOT_FOUND,
CURLE_FUNCTION_NOT_FOUND,
URL_LAST
} UrgError;
CURLE_ABORTED_BY_CALLBACK,
CURLE_BAD_FUNCTION_ARGUMENT,
CURLE_BAD_CALLING_ORDER,
CURL_LAST
} CURLcode;
/* This is just to make older programs not break: */
#define URG_FTP_PARTIAL_FILE URG_PARTIAL_FILE
#define CURLE_FTP_PARTIAL_FILE CURLE_PARTIAL_FILE
#define URGTAG_DONE -1
#define URGTAG_LAST -1
#define URGTAG_END -1
#define URLGET_ERROR_SIZE 256
#define CURL_ERROR_SIZE 256
/* maximum URL length we deal with */
#define URL_MAX_LENGTH 4096
#define URL_MAX_LENGTH_TXT "4095"
/* name is uppercase URGTAG_<name>,
type is one of the defined URGTYPE_<type>
/* name is uppercase CURLOPT_<name>,
type is one of the defined CURLOPTTYPE_<type>
number is unique identifier */
#define T(name,type,number) URGTAG_ ## name = URGTYPE_ ## type + number
#define T(name,type,number) CURLOPT_ ## name = CURLOPTTYPE_ ## type + number
/* long may be 32 or 64 bits, but we should never depend on anything else
but 32 */
#define URGTYPE_LONG 0
#define URGTYPE_OBJECTPOINT 10000
#define URGTYPE_FUNCTIONPOINT 20000
#define CURLOPTTYPE_LONG 0
#define CURLOPTTYPE_OBJECTPOINT 10000
#define CURLOPTTYPE_FUNCTIONPOINT 20000
typedef enum {
URGTAG_NOTHING, /* the first unused */
T(NOTHING, LONG, 0), /********* the first one is unused ************/
/* This is the FILE * the regular output should be written to. */
T(FILE, OBJECTPOINT, 1),
@@ -222,50 +179,51 @@ typedef enum {
T(URL, OBJECTPOINT, 2),
/* Port number to connect to, if other than default. Specify the CONF_PORT
flag in the URGTAG_FLAGS to activate this */
flag in the CURLOPT_FLAGS to activate this */
T(PORT, LONG, 3),
/* Name of proxy to use. Specify the CONF_PROXY flag in the URGTAG_FLAGS to
/* Name of proxy to use. Specify the CONF_PROXY flag in the CURLOPT_FLAGS to
activate this */
T(PROXY, OBJECTPOINT, 4),
/* Name and password to use when fetching. Specify the CONF_USERPWD flag in
the URGTAG_FLAGS to activate this */
the CURLOPT_FLAGS to activate this */
T(USERPWD, OBJECTPOINT, 5),
/* Name and password to use with Proxy. Specify the CONF_PROXYUSERPWD
flag in the URGTAG_FLAGS to activate this */
flag in the CURLOPT_FLAGS to activate this */
T(PROXYUSERPWD, OBJECTPOINT, 6),
/* Range to get, specified as an ASCII string. Specify the CONF_RANGE flag
in the URGTAG_FLAGS to activate this */
in the CURLOPT_FLAGS to activate this */
T(RANGE, OBJECTPOINT, 7),
#if 0
/* Configuration flags */
T(FLAGS, LONG, 8),
#endif
/* Specified file stream to upload from (use as input): */
T(INFILE, OBJECTPOINT, 9),
/* Buffer to receive error messages in, must be at least URLGET_ERROR_SIZE
bytes big. If this is not used, error messages go to stderr instead: */
/* Buffer to receive error messages in, must be at least CURL_ERROR_SIZE
* bytes big. If this is not used, error messages go to stderr instead: */
T(ERRORBUFFER, OBJECTPOINT, 10),
/* Function that will be called to store the output (instead of fwrite). The
parameters will use fwrite() syntax, make sure to follow them. */
* parameters will use fwrite() syntax, make sure to follow them. */
T(WRITEFUNCTION, FUNCTIONPOINT, 11),
/* Function that will be called to read the input (instead of fread). The
parameters will use fread() syntax, make sure to follow them. */
* parameters will use fread() syntax, make sure to follow them. */
T(READFUNCTION, FUNCTIONPOINT, 12),
/* Time-out the read operation after this amount of seconds */
T(TIMEOUT, LONG, 13),
/* If the URGTAG_INFILE is used, this can be used to inform urlget about how
large the file being sent really is. That allows better error checking
and better verifies that the upload was succcessful. -1 means unknown
size. */
/* If the CURLOPT_INFILE is used, this can be used to inform libcurl about
* how large the file being sent really is. That allows better error
* checking and better verifies that the upload was succcessful. -1 means
* unknown size. */
T(INFILESIZE, LONG, 14),
/* POST input fields. */
@@ -364,8 +322,25 @@ typedef enum {
as described elsewhere. */
T(WRITEINFO, OBJECTPOINT, 40),
URGTAG_LASTENTRY /* the last unusued */
} UrgTag;
/* Previous FLAG bits */
T(VERBOSE, LONG, 41), /* talk a lot */
T(HEADER, LONG, 42), /* throw the header out too */
T(NOPROGRESS, LONG, 43), /* shut off the progress meter */
T(NOBODY, LONG, 44), /* use HEAD to get http document */
T(FAILONERROR, LONG, 45), /* no output on http error codes >= 300 */
T(UPLOAD, LONG, 46), /* this is an upload */
T(POST, LONG, 47), /* HTTP POST method */
T(FTPLISTONLY, LONG, 48), /* Use NLST when listing ftp dir */
T(FTPAPPEND, LONG, 50), /* Append instead of overwrite on upload! */
T(NETRC, LONG, 51), /* read user+password from .netrc */
T(FOLLOWLOCATION, LONG, 52), /* use Location: Luke! */
T(FTPASCII, LONG, 53), /* use TYPE A for transfer */
T(PUT, LONG, 54), /* PUT the input file */
T(MUTE, LONG, 55), /* force NOPROGRESS */
CURLOPT_LASTENTRY /* the last unusued */
} CURLoption;
#define CURL_PROGRESS_STATS 0 /* default progress display */
#define CURL_PROGRESS_BAR 1
@@ -388,23 +363,11 @@ typedef char bool;
#endif /* (rabe) */
#endif
/**********************************************************************
*
* >>> urlget() interface #defines changed in v5! <<<
*
* You enter parameters as tags. Tags are specified as a pair of parameters.
* The first parameter in a pair is the tag identifier, telling urlget what
* kind of tag it is, and the second is the data. The tags may come in any
* order but MUST ALWAYS BE TERMINATED with an ending URGTAG_DONE (which
* needs no data).
*
* _Very_ simple example:
*
* curl_urlget(URGTAG_URL, "http://www.fts.frontec.se/~dast/", URGTAG_DONE);
*
***********************************************************************/
#if 0
/* At last, I stand here in front of you today and can officially proclaim
this function prototype as history... 17th of May, 2000 */
UrgError curl_urlget(UrgTag, ...);
#endif
/* external form function */
int curl_FormParse(char *string,
@@ -418,9 +381,10 @@ char *curl_GetEnv(char *variable);
char *curl_version(void);
/* This is the version number */
#define LIBCURL_VERSION "6.5"
#define LIBCURL_VERSION "7.0.1beta"
#define LIBCURL_VERSION_NUM 0x070001
/* linked-list structure for QUOTE */
/* linked-list structure for the CURLOPT_QUOTE option */
struct curl_slist {
char *data;
struct curl_slist *next;
@@ -429,4 +393,192 @@ struct curl_slist {
struct curl_slist *curl_slist_append(struct curl_slist *list, char *data);
void curl_slist_free_all(struct curl_slist *list);
#endif /* __URLGET_H */
/*
* NAME curl_init()
*
* DESCRIPTION
*
* Inits libcurl globally. This must be used before any libcurl calls can
* be used. This may install global plug-ins or whatever. (This does not
* do winsock inits in Windows.)
*
* EXAMPLE
*
* curl_init();
*
*/
CURLcode curl_init(void);
/*
* NAME curl_init()
*
* DESCRIPTION
*
* Frees libcurl globally. This must be used after all libcurl calls have
* been used. This may remove global plug-ins or whatever. (This does not
* do winsock cleanups in Windows.)
*
* EXAMPLE
*
* curl_free(curl);
*
*/
void curl_free(void);
/*
* NAME curl_open()
*
* DESCRIPTION
*
* Opens a general curl session. It does not try to connect or do anything
* on the network because of this call. The specified URL is only required
* to enable curl to figure out what protocol to "activate".
*
* A session should be looked upon as a series of requests to a single host. A
* session interacts with one host only, using one single protocol.
*
* The URL is not required. If set to "" or NULL, it can still be set later
* using the curl_setopt() function. If the curl_connect() function is called
* without the URL being known, it will return error.
*
* EXAMPLE
*
* CURLcode result;
* CURL *curl;
* result = curl_open(&curl, "http://curl.haxx.nu/libcurl/");
* if(result != CURL_OK) {
* return result;
* }
* */
CURLcode curl_open(CURL **curl, char *url);
/*
* NAME curl_setopt()
*
* DESCRIPTION
*
* Sets a particular option to the specified value.
*
* EXAMPLE
*
* CURL curl;
* curl_setopt(curl, CURL_HTTP_FOLLOW_LOCATION, TRUE);
*/
CURLcode curl_setopt(CURL *handle, CURLoption option, ...);
/*
* NAME curl_close()
*
* DESCRIPTION
*
* Closes a session previously opened with curl_open()
*
* EXAMPLE
*
* CURL *curl;
* CURLcode result;
*
* result = curl_close(curl);
*/
CURLcode curl_close(CURL *curl); /* the opposite of curl_open() */
CURLcode curl_read(CURLconnect *c_conn, char *buf, size_t buffersize,
size_t *n);
CURLcode curl_write(CURLconnect *c_conn, char *buf, size_t amount,
size_t *n);
/*
* NAME curl_connect()
*
* DESCRIPTION
*
* Connects to the peer server and performs the initial setup. This function
* writes a connect handle to its second argument that is a unique handle for
* this connect. This allows multiple connects from the same handle returned
* by curl_open().
*
* EXAMPLE
*
* CURLCode result;
* CURL curl;
* CURLconnect connect;
* result = curl_connect(curl, &connect);
*/
CURLcode curl_connect(CURL *curl, CURLconnect **in_connect);
/*
* NAME curl_do()
*
* DESCRIPTION
*
* (Note: May 3rd 2000: this function does not currently allow you to
* specify a document, it will use the one set previously)
*
* This function asks for the particular document, file or resource that
* resides on the server we have connected to. You may specify a full URL,
* just an absolute path or even a relative path. That means, if you're just
* getting one file from the remote site, you can use the same URL as input
* for both curl_open() as well as for this function.
*
* In the even there is a host name, port number, user name or password parts
* in the URL, you can use the 'flags' argument to ignore them completely, or
* at your choice, make the function fail if you're trying to get a URL from
* different host than you connected to with curl_connect().
*
* You can only get one document at a time using the same connection. When one
* document has been received you can although request again.
*
* When the transfer is done, curl_done() MUST be called.
*
* EXAMPLE
*
* CURLCode result;
* char *url;
* CURLconnect *connect;
* result = curl_do(connect, url, CURL_DO_NONE); */
CURLcode curl_do(CURLconnect *in_conn);
/*
* NAME curl_done()
*
* DESCRIPTION
*
* When the transfer following a curl_do() call is done, this function should
* get called.
*
* EXAMPLE
*
* CURLCode result;
* char *url;
* CURLconnect *connect;
* result = curl_done(connect); */
CURLcode curl_done(CURLconnect *connect);
/*
* NAME curl_disconnect()
*
* DESCRIPTION
*
* Disconnects from the peer server and performs connection cleanup.
*
* EXAMPLE
*
* CURLcode result;
* CURLconnect *connect;
* result = curl_disconnect(connect); */
CURLcode curl_disconnect(CURLconnect *connect);
/*
* NAME curl_getdate()
*
* DESCRIPTION
*
* Returns the time, in seconds since 1 Jan 1970 of the time string given in
* the first argument. The time argument in the second parameter is for cases
* where the specified time is relative now, like 'two weeks' or 'tomorrow'
* etc.
*/
time_t curl_getdate(const char *p, const time_t *now);
#endif /* __CURL_CURL_H */

46
include/curl/easy.h Normal file
View File

@@ -0,0 +1,46 @@
#ifndef __CURL_EASY_H
#define __CURL_EASY_H
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
* License for the specific language governing rights and limitations
* under the License.
*
* The Original Code is Curl.
*
* The Initial Developer of the Original Code is Daniel Stenberg.
*
* Portions created by the Initial Developer are Copyright (C) 1998.
* All Rights Reserved.
*
* ------------------------------------------------------------
* Main author:
* - Daniel Stenberg <Daniel.Stenberg@haxx.nu>
*
* http://curl.haxx.nu
*
* $Source$
* $Revision$
* $Date$
* $Author$
* $State$
* $Locker$
*
* ------------------------------------------------------------
****************************************************************************/
CURL *curl_easy_init(void);
CURLcode curl_easy_setopt(CURL *curl, CURLoption option, ...);
CURLcode curl_easy_perform(CURL *curl);
void curl_easy_cleanup(CURL *curl);
#endif

View File

45
include/curl/types.h Normal file
View File

@@ -0,0 +1,45 @@
#ifndef __CURL_TYPES_H
#define __CURL_TYPES_H
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
* License for the specific language governing rights and limitations
* under the License.
*
* The Original Code is Curl.
*
* The Initial Developer of the Original Code is Daniel Stenberg.
*
* Portions created by the Initial Developer are Copyright (C) 1998.
* All Rights Reserved.
*
* ------------------------------------------------------------
* Main author:
* - Daniel Stenberg <Daniel.Stenberg@haxx.nu>
*
* http://curl.haxx.nu
*
* $Source$
* $Revision$
* $Date$
* $Author$
* $State$
* $Locker$
*
* ------------------------------------------------------------
****************************************************************************/
typedef void CURL;
typedef void CURLconnect;
#endif /* __CURL_TYPES_H */

View File

@@ -7,7 +7,7 @@ AUTOMAKE_OPTIONS = foreign no-dependencies
noinst_LIBRARIES = libcurl.a
# Some flags needed when trying to cause warnings ;-)
CFLAGS = -g #-Wall -pedantic
CFLAGS = -g -Wall #-pedantic
INCLUDES = -I$(top_srcdir)/include
@@ -23,7 +23,7 @@ download.c getdate.h ldap.c ssluse.c version.c \
download.h getenv.c ldap.h ssluse.h \
escape.c getenv.h mprintf.c telnet.c \
escape.h getpass.c netrc.c telnet.h \
writeout.c writeout.h
writeout.c writeout.h highlevel.c strequal.c strequal.h easy.c
# Say $(srcdir), so GNU make does not report an ambiguity with the .y.c rule.
$(srcdir)/getdate.c: getdate.y

View File

@@ -77,11 +77,11 @@ AUTOMAKE_OPTIONS = foreign no-dependencies
noinst_LIBRARIES = libcurl.a
# Some flags needed when trying to cause warnings ;-)
CFLAGS = -g #-Wall -pedantic
CFLAGS = -g -Wall #-pedantic
INCLUDES = -I$(top_srcdir)/include
libcurl_a_SOURCES = arpa_telnet.h file.c getpass.h netrc.h timeval.c base64.c file.h hostip.c progress.c timeval.h base64.h formdata.c hostip.h progress.h cookie.c formdata.h http.c sendf.c cookie.h ftp.c http.h sendf.h url.c dict.c ftp.h if2ip.c speedcheck.c url.h dict.h getdate.c if2ip.h speedcheck.h urldata.h download.c getdate.h ldap.c ssluse.c version.c download.h getenv.c ldap.h ssluse.h escape.c getenv.h mprintf.c telnet.c escape.h getpass.c netrc.c telnet.h writeout.c writeout.h
libcurl_a_SOURCES = arpa_telnet.h file.c getpass.h netrc.h timeval.c base64.c file.h hostip.c progress.c timeval.h base64.h formdata.c hostip.h progress.h cookie.c formdata.h http.c sendf.c cookie.h ftp.c http.h sendf.h url.c dict.c ftp.h if2ip.c speedcheck.c url.h dict.h getdate.c if2ip.h speedcheck.h urldata.h download.c getdate.h ldap.c ssluse.c version.c download.h getenv.c ldap.h ssluse.h escape.c getenv.h mprintf.c telnet.c escape.h getpass.c netrc.c telnet.h writeout.c writeout.h highlevel.c strequal.c strequal.h easy.c
mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs
CONFIG_HEADER = ../config.h ../src/config.h
@@ -97,7 +97,8 @@ libcurl_a_LIBADD =
libcurl_a_OBJECTS = file.o timeval.o base64.o hostip.o progress.o \
formdata.o cookie.o http.o sendf.o ftp.o url.o dict.o if2ip.o \
speedcheck.o getdate.o download.o ldap.o ssluse.o version.o getenv.o \
escape.o mprintf.o telnet.o getpass.o netrc.o writeout.o
escape.o mprintf.o telnet.o getpass.o netrc.o writeout.o highlevel.o \
strequal.o easy.o
AR = ar
COMPILE = $(CC) $(DEFS) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
CCLD = $(CC)
@@ -116,7 +117,7 @@ all: all-redirect
.SUFFIXES:
.SUFFIXES: .S .c .o .s
$(srcdir)/Makefile.in: Makefile.am $(top_srcdir)/configure.in $(ACLOCAL_M4)
cd $(top_srcdir) && $(AUTOMAKE) --foreign --include-deps lib/Makefile
cd $(top_srcdir) && $(AUTOMAKE) --foreign lib/Makefile
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
cd $(top_builddir) \

View File

@@ -9,7 +9,7 @@
CC = gcc
AR = ar
RANLIB = ranlib
OPENSSL_PATH = ../../openssl-0.9.4
OPENSSL_PATH = ../../openssl-0.9.5a
########################################################
## Nothing more to do below this line!
@@ -31,12 +31,14 @@ urldata.h formdata.c hostip.h netrc.h stdcheaders.h formdata.h \
if2ip.c progress.c sendf.c sendf.h speedcheck.c speedcheck.h \
ftp.c ftp.h getpass.c getpass.h version.c timeval.c timeval.h cookie.c \
cookie.h escape.c escape.h getdate.c getdate.h dict.h dict.c http.c \
http.h telnet.c telnet.h file.c file.h ldap.c ldap.h writeout.c writeout.h
http.h telnet.c telnet.h file.c file.h ldap.c ldap.h writeout.c writeout.h \
highlevel.c strequal.c strequal.h easy.c
libcurl_a_OBJECTS = base64.o getenv.o mprintf.o url.o download.o \
getpass.o ssluse.o hostip.o netrc.o formdata.o if2ip.o progress.o \
sendf.o speedcheck.o ftp.o getpass.o version.o timeval.o \
cookie.o escape.o getdate.o dict.o http.o telnet.o file.o ldap.o writeout.o
cookie.o escape.o getdate.o dict.o http.o telnet.o file.o ldap.o writeout.o \
highlevel.o strequal.o easy.o
LIBRARIES = $(libcurl_a_LIBRARIES)
SOURCES = $(libcurl_a_SOURCES)
@@ -63,5 +65,6 @@ clean:
-@erase $(libcurl_a_OBJECTS)
distrib: clean
-@erase $(libcurl_a_LIBRARIES)

View File

@@ -23,7 +23,7 @@ LINKD = link.exe -lib
CCRS = cl.exe /ML /O2 /D "NDEBUG" /D "USE_SSLEAY" /I "$(OPENSSL_PATH)/inc32" /I "$(OPENSSL_PATH)/inc32/openssl"
LINKRS = link.exe -lib /LIBPATH:$(OPENSSL_PATH)/out32dll
CFLAGS = /nologo /W3 /GX /D "WIN32" /D "_MBCS" /D "_LIB" /YX /FD /c
CFLAGS = /I "../include" /nologo /W3 /GX /D "WIN32" /D "VC6" /D "_MBCS" /D "_LIB" /YX /FD /c /D "MSDOS"
LFLAGS = /nologo /out:$(PROGRAM_NAME)
LINKLIBS = kernel32.lib wsock32.lib
LINKSLIBS = libeay32.lib ssleay32.lib RSAglue.lib
@@ -36,6 +36,7 @@ RELEASE_OBJS= \
formdatar.obj \
ftpr.obj \
httpr.obj \
ldapr.obj \
dictr.obj \
telnetr.obj \
getdater.obj \
@@ -50,9 +51,9 @@ RELEASE_OBJS= \
speedcheckr.obj \
ssluser.obj \
timevalr.obj \
uploadr.obj \
urlr.obj \
filer.obj \
writeoutr.obj \
versionr.obj
DEBUG_OBJS= \
@@ -63,6 +64,7 @@ DEBUG_OBJS= \
formdatad.obj \
ftpd.obj \
httpd.obj \
ldapd.obj \
dictd.obj \
telnetd.obj \
getdated.obj \
@@ -77,9 +79,9 @@ DEBUG_OBJS= \
speedcheckd.obj \
sslused.obj \
timevald.obj \
uploadd.obj \
urld.obj \
filed.obj \
writeoutd.obj \
versiond.obj
RELEASE_SSL_OBJS= \
@@ -90,6 +92,7 @@ RELEASE_SSL_OBJS= \
formdatars.obj \
ftprs.obj \
httprs.obj \
ldaprs.obj \
dictrs.obj \
telnetrs.obj \
getdaters.obj \
@@ -104,9 +107,9 @@ RELEASE_SSL_OBJS= \
speedcheckrs.obj \
sslusers.obj \
timevalrs.obj \
uploadrs.obj \
urlrs.obj \
filers.obj \
writeouts.obj \
versionrs.obj
LINK_OBJS= \
@@ -117,6 +120,7 @@ LINK_OBJS= \
formdata.obj \
ftp.obj \
http.obj \
ldap.obj \
dict.obj \
telnet.obj \
getdate.obj \
@@ -131,9 +135,9 @@ LINK_OBJS= \
speedcheck.obj \
ssluse.obj \
timeval.obj \
upload.obj \
url.obj \
file.obj \
writeout.obj \
version.obj
all : release
@@ -163,6 +167,8 @@ ftpr.obj: ftp.c
$(CCR) $(CFLAGS) ftp.c
httpr.obj: http.c
$(CCR) $(CFLAGS) http.c
ldapr.obj: ldap.c
$(CCR) $(CFLAGS) ldap.c
dictr.obj: dict.c
$(CCR) $(CFLAGS) dict.c
telnetr.obj: telnet.c
@@ -191,12 +197,12 @@ ssluser.obj: ssluse.c
$(CCR) $(CFLAGS) ssluse.c
timevalr.obj: timeval.c
$(CCR) $(CFLAGS) timeval.c
uploadr.obj: upload.c
$(CCR) $(CFLAGS) upload.c
urlr.obj: url.c
$(CCR) $(CFLAGS) url.c
filer.obj: file.c
$(CCR) $(CFLAGS) file.c
writeoutr.obj: writeout.c
$(CCR) $(CFLAGS) writeout.c
versionr.obj: version.c
$(CCR) $(CFLAGS) version.c
@@ -215,6 +221,8 @@ ftpd.obj: ftp.c
$(CCD) $(CFLAGS) ftp.c
httpd.obj: http.c
$(CCD) $(CFLAGS) http.c
ldapd.obj: ldap.c
$(CCR) $(CFLAGS) ldap.c
dictd.obj: dict.c
$(CCD) $(CFLAGS) dict.c
telnetd.obj: telnet.c
@@ -243,12 +251,12 @@ sslused.obj: ssluse.c
$(CCD) $(CFLAGS) ssluse.c
timevald.obj: timeval.c
$(CCD) $(CFLAGS) timeval.c
uploadd.obj: upload.c
$(CCD) $(CFLAGS) upload.c
urld.obj: url.c
$(CCD) $(CFLAGS) url.c
filed.obj: file.c
$(CCD) $(CFLAGS) file.c
writeoutd.obj: writeout.c
$(CCR) $(CFLAGS) writeout.c
versiond.obj: version.c
$(CCD) $(CFLAGS) version.c
@@ -268,6 +276,8 @@ ftprs.obj: ftp.c
$(CCRS) $(CFLAGS) ftp.c
httprs.obj: http.c
$(CCRS) $(CFLAGS) http.c
ldaprs.obj: ldap.c
$(CCR) $(CFLAGS) ldap.c
dictrs.obj: dict.c
$(CCRS) $(CFLAGS) dict.c
telnetrs.obj: telnet.c
@@ -296,12 +306,12 @@ sslusers.obj: ssluse.c
$(CCRS) $(CFLAGS) ssluse.c
timevalrs.obj: timeval.c
$(CCRS) $(CFLAGS) timeval.c
uploadrs.obj: upload.c
$(CCRS) $(CFLAGS) upload.c
urlrs.obj: url.c
$(CCRS) $(CFLAGS) url.c
filers.obj: file.c
$(CCRS) $(CFLAGS) file.c
writeoutrs.obj: writeout.c
$(CCR) $(CFLAGS) writeout.c
versionrs.obj: version.c
$(CCRS) $(CFLAGS) version.c

View File

@@ -62,6 +62,7 @@ Example set of cookies:
#include "cookie.h"
#include "setup.h"
#include "getdate.h"
#include "strequal.h"
/****************************************************************************
*
@@ -131,7 +132,7 @@ struct Cookie *cookie_add(struct CookieInfo *c,
}
else if(strequal("expires", name)) {
co->expirestr=strdup(what);
co->expires = get_date(what, &now);
co->expires = curl_getdate(what, &now);
}
else if(!co->name) {
co->name = strdup(name);
@@ -173,9 +174,11 @@ struct Cookie *cookie_add(struct CookieInfo *c,
return NULL;
}
/* strip off the possible end-of-line characters */
if(ptr=strchr(lineptr, '\r'))
ptr=strchr(lineptr, '\r');
if(ptr)
*ptr=0; /* clear it */
if(ptr=strchr(lineptr, '\n'))
ptr=strchr(lineptr, '\n');
if(ptr)
*ptr=0; /* clear it */
firstptr=strtok(lineptr, "\t"); /* first tokenize it on the TAB */

View File

@@ -92,12 +92,17 @@
#include "sendf.h"
#include "progress.h"
#include "strequal.h"
#define _MPRINTF_REPLACE /* use our functions only */
#include <curl/mprintf.h>
CURLcode dict_done(struct connectdata *conn)
{
return CURLE_OK;
}
UrgError dict(struct UrlData *data, char *path, long *bytecount)
CURLcode dict(struct connectdata *conn)
{
int nth;
char *word;
@@ -106,9 +111,13 @@ UrgError dict(struct UrlData *data, char *path, long *bytecount)
char *strategy = NULL;
char *nthdef = NULL; /* This is not part of the protocol, but required
by RFC 2229 */
UrgError result=URG_OK;
CURLcode result=CURLE_OK;
struct UrlData *data=conn->data;
if(data->conf & CONF_USERPWD) {
char *path = conn->path;
long *bytecount = &conn->bytecount;
if(data->bits.user_passwd) {
/* AUTH is missing */
}
@@ -162,7 +171,7 @@ UrgError dict(struct UrlData *data, char *path, long *bytecount)
word
);
result = Transfer(data, data->firstsocket, -1, FALSE, bytecount,
result = Transfer(conn, data->firstsocket, -1, FALSE, bytecount,
-1, NULL); /* no upload */
if(result)
@@ -210,7 +219,7 @@ UrgError dict(struct UrlData *data, char *path, long *bytecount)
word
);
result = Transfer(data, data->firstsocket, -1, FALSE, bytecount,
result = Transfer(conn, data->firstsocket, -1, FALSE, bytecount,
-1, NULL); /* no upload */
if(result)
@@ -234,7 +243,7 @@ UrgError dict(struct UrlData *data, char *path, long *bytecount)
"QUIT\n",
ppath);
result = Transfer(data, data->firstsocket, -1, FALSE, bytecount,
result = Transfer(conn, data->firstsocket, -1, FALSE, bytecount,
-1, NULL);
if(result)
@@ -243,10 +252,5 @@ UrgError dict(struct UrlData *data, char *path, long *bytecount)
}
}
#if 0
ProgressEnd(data);
#endif
pgrsDone(data);
return URG_OK;
return CURLE_OK;
}

View File

@@ -40,6 +40,7 @@
*
* ------------------------------------------------------------
****************************************************************************/
UrgError dict(struct UrlData *data, char *path, long *bytecountp);
CURLcode dict(struct connectdata *conn);
CURLcode dict_done(struct connectdata *conn);
#endif

View File

@@ -78,19 +78,15 @@
#include "speedcheck.h"
#include "sendf.h"
#ifdef USE_ZLIB
#include <zlib.h>
#endif
#define MAX(x,y) ((x)>(y)?(x):(y))
#include <curl/types.h>
/* --- download and upload a stream from/to a socket --- */
/* Parts of this function was brought to us by the friendly Mark Butler
<butlerm@xmission.com>. */
UrgError
Transfer (struct UrlData *data,
CURLcode
Transfer(CURLconnect *c_conn,
/* READ stuff */
int sockfd, /* socket to read from or -1 */
int size, /* -1 if unknown at this point */
@@ -101,484 +97,21 @@ Transfer (struct UrlData *data,
int writesockfd, /* socket to write to, it may very well be
the same we read from. -1 disables */
long *writebytecountp /* return number of bytes written or NULL */
)
)
{
char *buf = data->buffer;
size_t nread;
int bytecount = 0; /* number of bytes read */
int writebytecount = 0; /* number of bytes written */
long contentlength=0; /* size of incoming data */
struct timeval start = tvnow();
struct timeval now = start;
bool header = TRUE; /* incoming data has HTTP header */
int headerline = 0; /* counts header lines to better track the
first one */
struct connectdata *conn = (struct connectdata *)c_conn;
if(!conn)
return CURLE_BAD_FUNCTION_ARGUMENT;
char *hbufp; /* points at *end* of header line */
int hbuflen = 0;
char *str; /* within buf */
char *str_start; /* within buf */
char *end_ptr; /* within buf */
char *p; /* within headerbuff */
bool content_range = FALSE; /* set TRUE if Content-Range: was found */
int offset = 0; /* possible resume offset read from the
Content-Range: header */
int code = 0; /* error code from the 'HTTP/1.? XXX' line */
/* now copy all input parameters */
conn->sockfd = sockfd;
conn->size = size;
conn->getheader = getheader;
conn->bytecountp = bytecountp;
conn->writesockfd = writesockfd;
conn->writebytecountp = writebytecountp;
/* for the low speed checks: */
UrgError urg;
time_t timeofdoc=0;
long bodywrites=0;
return CURLE_OK;
char newurl[URL_MAX_LENGTH]; /* buffer for Location: URL */
/* the highest fd we use + 1 */
int maxfd = (sockfd>writesockfd?sockfd:writesockfd)+1;
hbufp = data->headerbuff;
myalarm (0); /* switch off the alarm-style timeout */
now = tvnow();
start = now;
#define KEEP_READ 1
#define KEEP_WRITE 2
pgrsTime(data, TIMER_PRETRANSFER);
if (!getheader) {
header = FALSE;
if(size > 0)
pgrsSetDownloadSize(data, size);
}
{
fd_set readfd;
fd_set writefd;
fd_set rkeepfd;
fd_set wkeepfd;
struct timeval interval;
int keepon=0;
/* timeout every X second
- makes a better progressmeter (i.e even when no data is read, the
meter can be updated and reflect reality)
- allows removal of the alarm() crap
- variable timeout is easier
*/
FD_ZERO (&readfd); /* clear it */
if(sockfd != -1) {
FD_SET (sockfd, &readfd); /* read socket */
keepon |= KEEP_READ;
}
FD_ZERO (&writefd); /* clear it */
if(writesockfd != -1) {
FD_SET (writesockfd, &writefd); /* write socket */
keepon |= KEEP_WRITE;
}
/* get these in backup variables to be able to restore them on each lap in
the select() loop */
rkeepfd = readfd;
wkeepfd = writefd;
while (keepon) {
readfd = rkeepfd; /* set those every lap in the loop */
writefd = wkeepfd;
interval.tv_sec = 1;
interval.tv_usec = 0;
switch (select (maxfd, &readfd, &writefd, NULL, &interval)) {
case -1: /* select() error, stop reading */
keepon = 0; /* no more read or write */
continue;
case 0: /* timeout */
break;
default:
if((keepon & KEEP_READ) && FD_ISSET(sockfd, &readfd)) {
/* read! */
#ifdef USE_SSLEAY
if (data->use_ssl) {
nread = SSL_read (data->ssl, buf, BUFSIZE - 1);
}
else {
#endif
nread = sread (sockfd, buf, BUFSIZE - 1);
#ifdef USE_SSLEAY
}
#endif /* USE_SSLEAY */
/* NULL terminate, allowing string ops to be used */
if (0 < (signed int) nread)
buf[nread] = 0;
/* if we receive 0 or less here, the server closed the connection and
we bail out from this! */
else if (0 >= (signed int) nread) {
keepon &= ~KEEP_READ;
break;
}
str = buf; /* Default buffer to use when we write the
buffer, it may be changed in the flow below
before the actual storing is done. */
/* Since this is a two-state thing, we check if we are parsing
headers at the moment or not. */
if (header) {
/* we are in parse-the-header-mode */
/* header line within buffer loop */
do {
int hbufp_index;
str_start = str; /* str_start is start of line within buf */
end_ptr = strchr (str_start, '\n');
if (!end_ptr) {
/* no more complete header lines within buffer */
/* copy what is remaining into headerbuff */
int str_length = (int)strlen(str);
if (hbuflen + (int)str_length >= data->headersize) {
char *newbuff;
long newsize=MAX((hbuflen+str_length)*3/2,
data->headersize*2);
hbufp_index = hbufp - data->headerbuff;
newbuff = (char *)realloc(data->headerbuff, newsize);
if(!newbuff) {
failf (data, "Failed to alloc memory for big header!");
return URG_READ_ERROR;
}
data->headersize=newsize;
data->headerbuff = newbuff;
hbufp = data->headerbuff + hbufp_index;
}
strcpy (hbufp, str);
hbufp += strlen (str);
hbuflen += strlen (str);
break; /* read more and try again */
}
str = end_ptr + 1; /* move just past new line */
if (hbuflen + (str - str_start) >= data->headersize) {
char *newbuff;
long newsize=MAX((hbuflen+(str-str_start))*3/2,
data->headersize*2);
hbufp_index = hbufp - data->headerbuff;
newbuff = (char *)realloc(data->headerbuff, newsize);
if(!newbuff) {
failf (data, "Failed to alloc memory for big header!");
return URG_READ_ERROR;
}
data->headersize= newsize;
data->headerbuff = newbuff;
hbufp = data->headerbuff + hbufp_index;
}
/* copy to end of line */
strncpy (hbufp, str_start, str - str_start);
hbufp += str - str_start;
hbuflen += str - str_start;
*hbufp = 0;
p = data->headerbuff;
/* we now have a full line that p points to */
if (('\n' == *p) || ('\r' == *p)) {
/* Zero-length line means end of header! */
if (-1 != size) /* if known */
size += bytecount; /* we append the already read size */
if ('\r' == *p)
p++; /* pass the \r byte */
if ('\n' == *p)
p++; /* pass the \n byte */
pgrsSetDownloadSize(data, size);
header = FALSE; /* no more header to parse! */
/* now, only output this if the header AND body are requested:
*/
if ((data->conf & (CONF_HEADER | CONF_NOBODY)) ==
CONF_HEADER) {
if((p - data->headerbuff) !=
data->fwrite (data->headerbuff, 1,
p - data->headerbuff, data->out)) {
failf (data, "Failed writing output");
return URG_WRITE_ERROR;
}
}
if(data->writeheader) {
/* obviously, the header is requested to be written to
this file: */
if((p - data->headerbuff) !=
fwrite (data->headerbuff, 1, p - data->headerbuff,
data->writeheader)) {
failf (data, "Failed writing output");
return URG_WRITE_ERROR;
}
}
break; /* exit header line loop */
}
if (!headerline++) {
/* This is the first header, it MUST be the error code line
or else we consiser this to be the body right away! */
if (sscanf (p, " HTTP/1.%*c %3d", &code)) {
/* 404 -> URL not found! */
if (
( ((data->conf & CONF_FOLLOWLOCATION) && (code >= 400))
||
!(data->conf & CONF_FOLLOWLOCATION) && (code >= 300))
&& (data->conf & CONF_FAILONERROR)) {
/* If we have been told to fail hard on HTTP-errors,
here is the check for that: */
/* serious error, go home! */
failf (data, "The requested file was not found");
return URG_HTTP_NOT_FOUND;
}
data->progress.httpcode = code;
}
else {
header = FALSE; /* this is not a header line */
break;
}
}
/* check for Content-Length: header lines to get size */
if (strnequal("Content-Length", p, 14) &&
sscanf (p+14, ": %ld", &contentlength))
size = contentlength;
else if (strnequal("Content-Range", p, 13) &&
sscanf (p+13, ": bytes %d-", &offset)) {
if (data->resume_from == offset) {
/* we asked for a resume and we got it */
content_range = TRUE;
}
}
else if(data->cookies &&
strnequal("Set-Cookie: ", p, 11)) {
cookie_add(data->cookies, TRUE, &p[12]);
}
else if(strnequal("Last-Modified:", p,
strlen("Last-Modified:")) &&
data->timecondition) {
time_t secs=time(NULL);
timeofdoc = get_date(p+strlen("Last-Modified:"), &secs);
}
else if ((code >= 300 && code < 400) &&
(data->conf & CONF_FOLLOWLOCATION) &&
strnequal("Location", p, 8) &&
sscanf (p+8, ": %" URL_MAX_LENGTH_TXT "s", newurl)) {
/* this is the URL that the server advices us to get
instead */
data->newurl = strdup (newurl);
}
if (data->conf & CONF_HEADER) {
if(hbuflen != data->fwrite (p, 1, hbuflen, data->out)) {
failf (data, "Failed writing output");
return URG_WRITE_ERROR;
}
}
if(data->writeheader) {
/* the header is requested to be written to this file */
if(hbuflen != fwrite (p, 1, hbuflen, data->writeheader)) {
failf (data, "Failed writing output");
return URG_WRITE_ERROR;
}
}
/* reset hbufp pointer && hbuflen */
hbufp = data->headerbuff;
hbuflen = 0;
}
while (*str); /* header line within buffer */
/* We might have reached the end of the header part here, but
there might be a non-header part left in the end of the read
buffer. */
if (!header) {
/* the next token and forward is not part of
the header! */
/* we subtract the remaining header size from the buffer */
nread -= (str - buf);
}
} /* end if header mode */
/* This is not an 'else if' since it may be a rest from the header
parsing, where the beginning of the buffer is headers and the end
is non-headers. */
if (str && !header && (nread > 0)) {
if(0 == bodywrites) {
/* These checks are only made the first time we are about to
write a chunk of the body */
if(data->conf&CONF_HTTP) {
/* HTTP-only checks */
if (data->resume_from && !content_range ) {
/* we wanted to resume a download, although the server
doesn't seem to support this */
failf (data, "HTTP server doesn't seem to support byte ranges. Cannot resume.");
return URG_HTTP_RANGE_ERROR;
}
else if (data->newurl) {
/* abort after the headers if "follow Location" is set */
infof (data, "Follow to new URL: %s\n", data->newurl);
return URG_OK;
}
else if(data->timecondition && !data->range) {
/* A time condition has been set AND no ranges have been
requested. This seems to be what chapter 13.3.4 of
RFC 2616 defines to be the correct action for a
HTTP/1.1 client */
if((timeofdoc > 0) && (data->timevalue > 0)) {
switch(data->timecondition) {
case TIMECOND_IFMODSINCE:
default:
if(timeofdoc < data->timevalue) {
infof(data,
"The requested document is not new enough");
return URG_OK;
}
break;
case TIMECOND_IFUNMODSINCE:
if(timeofdoc > data->timevalue) {
infof(data,
"The requested document is not old enough");
return URG_OK;
}
break;
} /* switch */
} /* two valid time strings */
} /* we have a time condition */
} /* this is HTTP */
} /* this is the first time we write a body part */
bodywrites++;
if(data->maxdownload &&
(bytecount + nread > data->maxdownload)) {
nread = data->maxdownload - bytecount;
if(nread < 0 ) /* this should be unusual */
nread = 0;
keepon &= ~KEEP_READ; /* we're done reading */
}
bytecount += nread;
pgrsSetDownloadCounter(data, (double)bytecount);
if (nread != data->fwrite (str, 1, nread, data->out)) {
failf (data, "Failed writing output");
return URG_WRITE_ERROR;
}
} /* if (! header and data to read ) */
} /* if( read from socket ) */
if((keepon & KEEP_WRITE) && FD_ISSET(writesockfd, &writefd)) {
/* write */
char scratch[BUFSIZE * 2];
int i, si;
int bytes_written;
if(data->crlf)
buf = data->buffer; /* put it back on the buffer */
nread = data->fread(buf, 1, BUFSIZE, data->in);
writebytecount += nread;
pgrsSetUploadCounter(data, (double)writebytecount);
if (nread<=0) {
/* done */
keepon &= ~KEEP_WRITE; /* we're done writing */
break;
}
/* convert LF to CRLF if so asked */
if (data->crlf) {
for(i = 0, si = 0; i < (int)nread; i++, si++) {
if (buf[i] == 0x0a) {
scratch[si++] = 0x0d;
scratch[si] = 0x0a;
}
else {
scratch[si] = buf[i];
}
}
nread = si;
buf = scratch; /* point to the new buffer */
}
/* write to socket */
#ifdef USE_SSLEAY
if (data->use_ssl) {
bytes_written = SSL_write(data->ssl, buf, nread);
}
else {
#endif
bytes_written = swrite(writesockfd, buf, nread);
#ifdef USE_SSLEAY
}
#endif /* USE_SSLEAY */
if(nread != bytes_written) {
failf(data, "Failed uploading data");
return URG_WRITE_ERROR;
}
}
break;
}
now = tvnow();
pgrsUpdate(data);
urg = speedcheck (data, now);
if (urg)
return urg;
if (data->timeout && (tvdiff (now, start) > data->timeout)) {
failf (data, "Operation timed out with %d out of %d bytes received",
bytecount, size);
return URG_OPERATION_TIMEOUTED;
}
#ifdef MULTIDOC
if(contentlength && bytecount >= contentlength) {
/* we're done with this download, now stop it */
break;
}
#endif
}
}
if(!(data->conf&CONF_NOBODY) && contentlength &&
(bytecount != contentlength)) {
failf(data, "transfer closed with %d bytes remaining to read",
contentlength-bytecount);
return URG_PARTIAL_FILE;
}
pgrsUpdate(data);
if(bytecountp)
*bytecountp = bytecount; /* read count */
if(writebytecountp)
*writebytecountp = writebytecount; /* write count */
return URG_OK;
}

View File

@@ -39,8 +39,8 @@
*
* ------------------------------------------------------------
****************************************************************************/
UrgError
Transfer (struct UrlData *data,
CURLcode
Transfer (struct connectdata *data,
int sockfd, /* socket to read from or -1 */
int size, /* -1 if unknown at this point */
bool getheader, /* TRUE if header parsing is wanted */

160
lib/easy.c Normal file
View File

@@ -0,0 +1,160 @@
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
* License for the specific language governing rights and limitations
* under the License.
*
* The Original Code is Curl.
*
* The Initial Developer of the Original Code is Daniel Stenberg.
*
* Portions created by the Initial Developer are Copyright (C) 1998.
* All Rights Reserved.
*
* ------------------------------------------------------------
* Main author:
* - Daniel Stenberg <Daniel.Stenberg@haxx.nu>
*
* http://curl.haxx.nu
*
* $Source$
* $Revision$
* $Date$
* $Author$
* $State$
* $Locker$
*
* ------------------------------------------------------------
****************************************************************************/
/* -- WIN32 approved -- */
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#include <stdlib.h>
#include <ctype.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <errno.h>
#include "setup.h"
#include "strequal.h"
#if defined(WIN32) && !defined(__GNUC__) || defined(__MINGW32__)
#include <winsock.h>
#include <time.h>
#include <io.h>
#else
#ifdef HAVE_SYS_SOCKET_H
#include <sys/socket.h>
#endif
#include <netinet/in.h>
#include <sys/time.h>
#include <sys/resource.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <netdb.h>
#ifdef HAVE_ARPA_INET_H
#include <arpa/inet.h>
#endif
#ifdef HAVE_NET_IF_H
#include <net/if.h>
#endif
#include <sys/ioctl.h>
#include <signal.h>
#ifdef HAVE_SYS_PARAM_H
#include <sys/param.h>
#endif
#ifdef HAVE_SYS_SELECT_H
#include <sys/select.h>
#endif
#endif
#include "urldata.h"
#include <curl/curl.h>
#include "highlevel.h"
#include <curl/types.h>
#define _MPRINTF_REPLACE /* use our functions only */
#include <curl/mprintf.h>
CURL *curl_easy_init(void)
{
CURLcode res;
struct UrlData *data;
if(curl_init())
return NULL;
/* We use curl_open() with undefined URL so far */
res = curl_open((CURL **)&data, NULL);
if(res != CURLE_OK)
return NULL;
data->interf = CURLI_EASY; /* mark it as an easy one */
return data;
}
typedef int (*func_T)(void);
CURLcode curl_easy_setopt(CURL *curl, CURLoption tag, ...)
{
va_list arg;
func_T param_func = (func_T)0;
long param_long = 0;
void *param_obj = NULL;
struct UrlData *data = curl;
va_start(arg, tag);
/* PORTING NOTE:
Object pointers can't necessarily be casted to function pointers and
therefore we need to know what type it is and read the correct type
at once. This should also correct problems with different sizes of
the types.
*/
if(tag < CURLOPTTYPE_OBJECTPOINT) {
/* This is a LONG type */
param_long = va_arg(arg, long);
curl_setopt(data, tag, param_long);
}
else if(tag < CURLOPTTYPE_FUNCTIONPOINT) {
/* This is a object pointer type */
param_obj = va_arg(arg, void *);
curl_setopt(data, tag, param_obj);
}
else {
param_func = va_arg(arg, func_T );
curl_setopt(data, tag, param_func);
}
va_end(arg);
return CURLE_OK;
}
CURLcode curl_easy_perform(CURL *curl)
{
return curl_transfer(curl);
}
void curl_easy_cleanup(CURL *curl)
{
curl_close(curl);
curl_free();
}

View File

@@ -47,7 +47,7 @@
char *curl_escape(char *string)
{
int alloc=strlen(string);
int alloc=strlen(string)+1;
char *ns = malloc(alloc);
unsigned char in;
int newlen = alloc;
@@ -81,16 +81,15 @@ char *curl_escape(char *string)
return ns;
}
char *curl_unescape(char *string)
char *curl_unescape(char *string, int length)
{
int alloc = strlen(string);
int alloc = (length?length:strlen(string))+1;
char *ns = malloc(alloc);
unsigned char in;
int index=0;
int hex;
while(*string) {
while(--alloc) {
in = *string;
if('+' == in)
in = ' ';

View File

@@ -44,6 +44,6 @@
* allocated string or NULL if an error occurred. */
char *curl_escape(char *string);
char *curl_unescape(char *string);
char *curl_unescape(char *string, int length);
#endif

View File

@@ -100,28 +100,30 @@
#include <curl/mprintf.h>
UrgError file(struct UrlData *data, char *path, long *bytecountp)
CURLcode file(struct connectdata *conn)
{
/* This implementation ignores the host name in conformance with
RFC 1738. Only local files (reachable via the standard file system)
are supported. This means that files on remotely mounted directories
(via NFS, Samba, NT sharing) can be accessed through a file:// URL
*/
CURLcode res = CURLE_OK;
char *path = conn->path;
struct stat statbuf;
size_t expected_size=-1;
size_t nread;
struct UrlData *data = conn->data;
char *buf = data->buffer;
int bytecount = 0;
struct timeval start = tvnow();
struct timeval now = start;
int fd;
char *actual_path = curl_unescape(path);
char *actual_path = curl_unescape(path, 0);
#ifdef WIN32
#if defined(WIN32) || defined(__EMX__)
int i;
/* change path separators from '/' to '\\' for Windows */
/* change path separators from '/' to '\\' for Windows and OS/2 */
for (i=0; actual_path[i] != '\0'; ++i)
if (actual_path[i] == '/')
actual_path[i] = '\\';
@@ -134,7 +136,7 @@ UrgError file(struct UrlData *data, char *path, long *bytecountp)
if(fd == -1) {
failf(data, "Couldn't open file %s", path);
return URG_FILE_COULDNT_READ_FILE;
return CURLE_FILE_COULDNT_READ_FILE;
}
if( -1 != fstat(fd, &statbuf)) {
/* we could stat it, then read out the size */
@@ -151,7 +153,7 @@ UrgError file(struct UrlData *data, char *path, long *bytecountp)
if(expected_size != -1)
pgrsSetDownloadSize(data, expected_size);
while (1) {
while (res == CURLE_OK) {
nread = read(fd, buf, BUFSIZE-1);
if (0 <= nread)
@@ -166,21 +168,19 @@ UrgError file(struct UrlData *data, char *path, long *bytecountp)
file descriptor). */
if(nread != data->fwrite (buf, 1, nread, data->out)) {
failf (data, "Failed writing output");
return URG_WRITE_ERROR;
return CURLE_WRITE_ERROR;
}
now = tvnow();
pgrsUpdate(data);
#if 0
ProgressShow (data, bytecount, start, now, FALSE);
#endif
if(pgrsUpdate(data))
res = CURLE_ABORTED_BY_CALLBACK;
}
now = tvnow();
#if 0
ProgressShow (data, bytecount, start, now, TRUE);
#endif
pgrsUpdate(data);
if(pgrsUpdate(data))
res = CURLE_ABORTED_BY_CALLBACK;
close(fd);
return URG_OK;
free(actual_path);
return res;
}

View File

@@ -40,6 +40,6 @@
*
* ------------------------------------------------------------
****************************************************************************/
UrgError file(struct UrlData *data, char *path, long *bytecountp);
CURLcode file(struct connectdata *conn);
#endif

View File

@@ -60,6 +60,8 @@
#include <curl/curl.h>
#include "formdata.h"
#include "strequal.h"
/* Length of the random boundary string. The risk of this being used
in binary data is very close to zero, 64^32 makes
6277101735386680763835789423207666416102355444464034512896
@@ -377,7 +379,7 @@ void FormFree(struct FormData *form)
free(form->line); /* free the line */
free(form); /* free the struct */
} while(form=next); /* continue */
} while((form=next)); /* continue */
}
struct FormData *getFormData(struct HttpPost *post,
@@ -513,12 +515,17 @@ struct FormData *getFormData(struct HttpPost *post,
int FormInit(struct Form *form, struct FormData *formdata )
{
form->data = formdata;
form->sent = 0;
if(!formdata)
return 1; /* error */
/* First, make sure that we'll send a nice terminating sequence at the end
* of the post. We *DONT* add this string to the size of the data since this
* is actually AFTER the data. */
AddFormDataf(&formdata, "\r\n\r\n");
form->data = formdata;
form->sent = 0;
return 0;
}

520
lib/ftp.c
View File

@@ -56,7 +56,10 @@
#if defined(WIN32) && !defined(__GNUC__) || defined(__MINGW32__)
#include <winsock.h>
#else /* some kind of unix */
#ifdef HAVE_SYS_SOCKET_H
#include <sys/socket.h>
#endif
#include <sys/types.h>
#include <netinet/in.h>
#ifdef HAVE_ARPA_INET_H
#include <arpa/inet.h>
@@ -69,6 +72,9 @@
#include <errno.h>
#endif
#ifdef HAVE_INET_NTOA_R
#include "inet_ntoa_r.h"
#endif
#include <curl/curl.h>
#include "urldata.h"
@@ -149,7 +155,7 @@ void curl_slist_free_all(struct curl_slist *list)
}
static UrgError AllowServerConnect(struct UrlData *data,
static CURLcode AllowServerConnect(struct UrlData *data,
int sock)
{
fd_set rdset;
@@ -167,11 +173,11 @@ static UrgError AllowServerConnect(struct UrlData *data,
case -1: /* error */
/* let's die here */
failf(data, "Error while waiting for server connect");
return URG_FTP_PORT_FAILED;
return CURLE_FTP_PORT_FAILED;
case 0: /* timeout */
/* let's die here */
failf(data, "Timeout while waiting for server connect");
return URG_FTP_PORT_FAILED;
return CURLE_FTP_PORT_FAILED;
default:
/* we have received data here */
{
@@ -185,7 +191,7 @@ static UrgError AllowServerConnect(struct UrlData *data,
if( -1 == s) {
/* DIE! */
failf(data, "Error accept()ing server connect");
return URG_FTP_PORT_FAILED;
return CURLE_FTP_PORT_FAILED;
}
infof(data, "Connection accepted from server\n");
@@ -193,7 +199,7 @@ static UrgError AllowServerConnect(struct UrlData *data,
}
break;
}
return URG_OK;
return CURLE_OK;
}
@@ -202,7 +208,7 @@ static UrgError AllowServerConnect(struct UrlData *data,
#define lastline(line) (isdigit((int)line[0]) && isdigit((int)line[1]) && \
isdigit((int)line[2]) && (' ' == line[3]))
static int GetLastResponse(int sockfd, char *buf,
int GetLastResponse(int sockfd, char *buf,
struct UrlData *data)
{
int nread;
@@ -230,7 +236,7 @@ static int GetLastResponse(int sockfd, char *buf,
}
*ptr=0; /* zero terminate */
if(data->conf & CONF_VERBOSE) {
if(data->bits.verbose) {
fputs("< ", data->err);
fwrite(buf, 1, nread, data->err);
fputs("\n", data->err);
@@ -241,25 +247,20 @@ static int GetLastResponse(int sockfd, char *buf,
}
/* -- who are we? -- */
char *getmyhost(void)
char *getmyhost(char *buf, int buf_size)
{
static char myhost[256];
#if !defined(WIN32) && !defined(HAVE_UNAME) && !defined(HAVE_GETHOSTNAME)
/* We have no means of finding the local host name! */
strcpy(myhost, "localhost");
#endif
#if defined(WIN32) || !defined(HAVE_UNAME)
gethostname(myhost, 256);
#else
#if defined(HAVE_GETHOSTNAME)
gethostname(buf, buf_size);
#elif defined(HAVE_UNAME)
struct utsname ugnm;
if (uname(&ugnm) < 0)
return "localhost";
(void) strncpy(myhost, ugnm.nodename, 255);
myhost[255] = '\0';
strncpy(buf, uname(&ugnm) < 0 ? "localhost" : ugnm.nodename, buf_size - 1);
buf[buf_size - 1] = '\0';
#else
/* We have no means of finding the local host name! */
strncpy(buf, "localhost", buf_size);
buf[buf_size - 1] = '\0';
#endif
return myhost;
return buf;
}
#if 0
@@ -309,32 +310,37 @@ static char *URLfix(char *string)
}
#endif
static
UrgError _ftp(struct UrlData *data,
long *bytecountp,
char *ftpuser,
char *ftppasswd,
char *ppath)
/* ftp_connect() should do everything that is to be considered a part
of the connection phase. */
CURLcode ftp_connect(struct connectdata *conn)
{
/* this is FTP and no proxy */
size_t nread;
UrgError result;
struct UrlData *data=conn->data;
char *buf = data->buffer; /* this is our buffer */
/* for the ftp PORT mode */
int portsock=-1;
struct sockaddr_in serv_addr;
struct FTP *ftp;
struct curl_slist *qitem; /* QUOTE item */
ftp = (struct FTP *)malloc(sizeof(struct FTP));
if(!ftp)
return CURLE_OUT_OF_MEMORY;
memset(ftp, 0, sizeof(struct FTP));
data->proto.ftp = ftp;
/* get some initial data into the ftp struct */
ftp->bytecountp = &conn->bytecount;
ftp->user = data->user;
ftp->passwd = data->passwd;
/* The first thing we do is wait for the "220*" line: */
nread = GetLastResponse(data->firstsocket, buf, data);
if(strncmp(buf, "220", 3)) {
failf(data, "This doesn't seem like a nice ftp-server response");
return URG_FTP_WEIRD_SERVER_REPLY;
return CURLE_FTP_WEIRD_SERVER_REPLY;
}
/* send USER */
sendf(data->firstsocket, data, "USER %s\r\n", ftpuser);
sendf(data->firstsocket, data, "USER %s\r\n", ftp->user);
/* wait for feedback */
nread = GetLastResponse(data->firstsocket, buf, data);
@@ -343,19 +349,19 @@ UrgError _ftp(struct UrlData *data,
/* 530 User ... access denied
(the server denies to log the specified user) */
failf(data, "Access denied: %s", &buf[4]);
return URG_FTP_ACCESS_DENIED;
return CURLE_FTP_ACCESS_DENIED;
}
else if(!strncmp(buf, "331", 3)) {
/* 331 Password required for ...
(the server requires to send the user's password too) */
sendf(data->firstsocket, data, "PASS %s\r\n", ftppasswd);
sendf(data->firstsocket, data, "PASS %s\r\n", ftp->passwd);
nread = GetLastResponse(data->firstsocket, buf, data);
if(!strncmp(buf, "530", 3)) {
/* 530 Login incorrect.
(the username and/or the password are incorrect) */
failf(data, "the username and/or the password are incorrect");
return URG_FTP_USER_PASSWORD_INCORRECT;
return CURLE_FTP_USER_PASSWORD_INCORRECT;
}
else if(!strncmp(buf, "230", 3)) {
/* 230 User ... logged in.
@@ -365,7 +371,7 @@ UrgError _ftp(struct UrlData *data,
}
else {
failf(data, "Odd return code after PASS");
return URG_FTP_WEIRD_PASS_REPLY;
return CURLE_FTP_WEIRD_PASS_REPLY;
}
}
else if(! strncmp(buf, "230", 3)) {
@@ -375,9 +381,109 @@ UrgError _ftp(struct UrlData *data,
}
else {
failf(data, "Odd return code after USER");
return URG_FTP_WEIRD_USER_REPLY;
return CURLE_FTP_WEIRD_USER_REPLY;
}
return CURLE_OK;
}
/* argument is already checked for validity */
CURLcode ftp_done(struct connectdata *conn)
{
struct UrlData *data = conn->data;
struct FTP *ftp = data->proto.ftp;
size_t nread;
char *buf = data->buffer; /* this is our buffer */
struct curl_slist *qitem; /* QUOTE item */
if(data->bits.upload) {
if((-1 != data->infilesize) && (data->infilesize != *ftp->bytecountp)) {
failf(data, "Wrote only partial file (%d out of %d bytes)",
*ftp->bytecountp, data->infilesize);
return CURLE_PARTIAL_FILE;
}
}
else {
if((-1 != conn->size) && (conn->size != *ftp->bytecountp) &&
(data->maxdownload != *ftp->bytecountp)) {
failf(data, "Received only partial file");
return CURLE_PARTIAL_FILE;
}
else if(0 == *ftp->bytecountp) {
failf(data, "No data was received!");
return CURLE_FTP_COULDNT_RETR_FILE;
}
}
/* shut down the socket to inform the server we're done */
sclose(data->secondarysocket);
data->secondarysocket = -1;
/* now let's see what the server says about the transfer we
just performed: */
nread = GetLastResponse(data->firstsocket, buf, data);
/* 226 Transfer complete */
if(strncmp(buf, "226", 3)) {
failf(data, "%s", buf+4);
return CURLE_FTP_WRITE_ERROR;
}
/* Send any post-transfer QUOTE strings? */
if(data->postquote) {
qitem = data->postquote;
/* Send all QUOTE strings in same order as on command-line */
while (qitem) {
/* Send string */
if (qitem->data) {
sendf(data->firstsocket, data, "%s\r\n", qitem->data);
nread = GetLastResponse(data->firstsocket, buf, data);
if (buf[0] != '2') {
failf(data, "QUOT string not accepted: %s",
qitem->data);
return CURLE_FTP_QUOTE_ERROR;
}
}
qitem = qitem->next;
}
}
if(ftp->file)
free(ftp->file);
if(ftp->dir)
free(ftp->dir);
/* TBD: the ftp struct is still allocated here */
return CURLE_OK;
}
static
CURLcode _ftp(struct connectdata *conn)
{
/* this is FTP and no proxy */
size_t nread;
CURLcode result;
struct UrlData *data=conn->data;
char *buf = data->buffer; /* this is our buffer */
/* for the ftp PORT mode */
int portsock=-1;
struct sockaddr_in serv_addr;
char hostent_buf[512];
#if defined (HAVE_INET_NTOA_R)
char ntoa_buf[64];
#endif
struct curl_slist *qitem; /* QUOTE item */
/* the ftp struct is already inited in ftp_connect() */
struct FTP *ftp = data->proto.ftp;
long *bytecountp = ftp->bytecountp;
/* Send any QUOTE strings? */
if(data->quote) {
qitem = data->quote;
@@ -392,7 +498,7 @@ UrgError _ftp(struct UrlData *data,
if (buf[0] != '2') {
failf(data, "QUOT string not accepted: %s",
qitem->data);
return URG_FTP_QUOTE_ERROR;
return CURLE_FTP_QUOTE_ERROR;
}
}
qitem = qitem->next;
@@ -401,18 +507,18 @@ UrgError _ftp(struct UrlData *data,
/* If we have selected NOBODY, it means that we only want file information.
Which in FTP can't be much more than the file size! */
if(data->conf & CONF_NOBODY) {
if(data->bits.no_body) {
/* The SIZE command is _not_ RFC 959 specified, and therefor many servers
may not support it! It is however the only way we have to get a file's
size! */
int filesize;
sendf(data->firstsocket, data, "SIZE %s\r\n", ppath);
sendf(data->firstsocket, data, "SIZE %s\r\n", ftp->file);
nread = GetLastResponse(data->firstsocket, buf, data);
if(strncmp(buf, "213", 3)) {
failf(data, "Couldn't get file size: %s", buf+4);
return URG_FTP_COULDNT_GET_SIZE;
return CURLE_FTP_COULDNT_GET_SIZE;
}
/* get the size from the ascii string: */
filesize = atoi(buf+4);
@@ -421,42 +527,40 @@ UrgError _ftp(struct UrlData *data,
if(strlen(buf) != data->fwrite(buf, 1, strlen(buf), data->out)) {
failf (data, "Failed writing output");
return URG_WRITE_ERROR;
return CURLE_WRITE_ERROR;
}
if(data->writeheader) {
/* the header is requested to be written to this file */
if(strlen(buf) != fwrite (buf, 1, strlen(buf), data->writeheader)) {
if(strlen(buf) != data->fwrite (buf, 1, strlen(buf),
data->writeheader)) {
failf (data, "Failed writing output");
return URG_WRITE_ERROR;
return CURLE_WRITE_ERROR;
}
}
return URG_OK;
return CURLE_OK;
}
/* We have chosen to use the PORT command */
if(data->conf & CONF_FTPPORT) {
if(data->bits.ftp_use_port) {
struct sockaddr_in sa;
struct hostent *h=NULL;
size_t size;
unsigned short porttouse;
char *myhost=NULL;
char myhost[256] = "";
if(data->ftpport) {
myhost = if2ip(data->ftpport);
if(myhost) {
h = GetHost(data, myhost);
if(if2ip(data->ftpport, myhost, sizeof(myhost))) {
h = GetHost(data, myhost, hostent_buf, sizeof(hostent_buf));
}
else {
if(strlen(data->ftpport)>1)
h = GetHost(data, data->ftpport);
h = GetHost(data, data->ftpport, hostent_buf, sizeof(hostent_buf));
if(h)
myhost=data->ftpport;
strcpy(myhost,data->ftpport);
}
}
if(!myhost) {
myhost = getmyhost();
h=GetHost(data, myhost);
if(! *myhost) {
h=GetHost(data, getmyhost(myhost,sizeof(myhost)), hostent_buf, sizeof(hostent_buf));
}
infof(data, "We connect from %s\n", myhost);
@@ -479,35 +583,40 @@ UrgError _ftp(struct UrlData *data,
if(getsockname(portsock, (struct sockaddr *) &add,
(int *)&size)<0) {
failf(data, "getsockname() failed");
return URG_FTP_PORT_FAILED;
return CURLE_FTP_PORT_FAILED;
}
porttouse = ntohs(add.sin_port);
if ( listen(portsock, 1) < 0 ) {
failf(data, "listen(2) failed on socket");
return URG_FTP_PORT_FAILED;
return CURLE_FTP_PORT_FAILED;
}
}
else {
failf(data, "bind(2) failed on socket");
return URG_FTP_PORT_FAILED;
return CURLE_FTP_PORT_FAILED;
}
}
else {
failf(data, "socket(2) failed (%s)");
return URG_FTP_PORT_FAILED;
return CURLE_FTP_PORT_FAILED;
}
}
else {
failf(data, "could't find my own IP address (%s)", myhost);
return URG_FTP_PORT_FAILED;
return CURLE_FTP_PORT_FAILED;
}
{
struct in_addr in;
unsigned short ip[5];
(void) memcpy(&in.s_addr, *h->h_addr_list, sizeof (in.s_addr));
#if defined (HAVE_INET_NTOA_R)
sscanf( inet_ntoa_r(in, ntoa_buf, sizeof(ntoa_buf)), "%hu.%hu.%hu.%hu",
&ip[0], &ip[1], &ip[2], &ip[3]);
#else
sscanf( inet_ntoa(in), "%hu.%hu.%hu.%hu",
&ip[0], &ip[1], &ip[2], &ip[3]);
#endif
sendf(data->firstsocket, data, "PORT %d,%d,%d,%d,%d,%d\n",
ip[0], ip[1], ip[2], ip[3],
porttouse >> 8,
@@ -518,7 +627,7 @@ UrgError _ftp(struct UrlData *data,
if(strncmp(buf, "200", 3)) {
failf(data, "Server does not grok PORT, try without it!");
return URG_FTP_PORT_FAILED;
return CURLE_FTP_PORT_FAILED;
}
}
else { /* we use the PASV command */
@@ -529,7 +638,7 @@ UrgError _ftp(struct UrlData *data,
if(strncmp(buf, "227", 3)) {
failf(data, "Odd return code after PASV");
return URG_FTP_WEIRD_PASV_REPLY;
return CURLE_FTP_WEIRD_PASV_REPLY;
}
else {
int ip[4];
@@ -537,7 +646,7 @@ UrgError _ftp(struct UrlData *data,
unsigned short newport;
char newhost[32];
struct hostent *he;
char *str=buf;
char *str=buf,*ip_addr;
/*
* New 227-parser June 3rd 1999.
@@ -559,13 +668,13 @@ UrgError _ftp(struct UrlData *data,
}
if(!*str) {
failf(data, "Couldn't interpret this 227-reply: %s", buf);
return URG_FTP_WEIRD_227_FORMAT;
return CURLE_FTP_WEIRD_227_FORMAT;
}
sprintf(newhost, "%d.%d.%d.%d", ip[0], ip[1], ip[2], ip[3]);
he = GetHost(data, newhost);
he = GetHost(data, newhost, hostent_buf, sizeof(hostent_buf));
if(!he) {
failf(data, "Can't resolve new host %s", newhost);
return URG_FTP_CANT_GET_HOST;
return CURLE_FTP_CANT_GET_HOST;
}
@@ -577,27 +686,38 @@ UrgError _ftp(struct UrlData *data,
serv_addr.sin_family = he->h_addrtype;
serv_addr.sin_port = htons(newport);
if(data->conf & CONF_VERBOSE) {
if(data->bits.verbose) {
struct in_addr in;
#if 1
struct hostent * answer;
#if defined(HAVE_INET_ADDR)
unsigned long address;
#if defined(HAVE_INET_ADDR) || defined(WIN32)
#if defined(HAVE_GETHOSTBYADDR_R)
int h_errnop;
#endif
address = inet_addr(newhost);
answer = gethostbyaddr((char *) &address, sizeof(address),
AF_INET);
#if defined(HAVE_GETHOSTBYADDR_R)
answer = gethostbyaddr_r((char *) &address, sizeof(address), AF_INET,
(struct hostent *)hostent_buf,
hostent_buf + sizeof(*answer),
sizeof(hostent_buf) - sizeof(*answer),
&h_errnop);
#else
answer = gethostbyaddr((char *) &address, sizeof(address), AF_INET);
#endif
#else
answer = NULL;
#endif
(void) memcpy(&in.s_addr, *he->h_addr_list, sizeof (in.s_addr));
infof(data, "Connecting to %s (%s) port %u\n",
answer?answer->h_name:newhost, inet_ntoa(in), newport);
answer?answer->h_name:newhost,
#if defined(HAVE_INET_NTOA_R)
ip_addr = inet_ntoa_r(in, ntoa_buf, sizeof(ntoa_buf)),
#else
(void) memcpy(&in.s_addr, *he->h_addr_list, sizeof (in.s_addr));
infof(data, "Connecting to %s (%s) port %u\n",
he->h_name, inet_ntoa(in), newport);
ip_addr = inet_ntoa(in),
#endif
newport);
}
if (connect(data->secondarysocket, (struct sockaddr *) &serv_addr,
@@ -618,26 +738,39 @@ UrgError _ftp(struct UrlData *data,
failf(data, "Can't connect to ftp server");
break;
}
return URG_FTP_CANT_RECONNECT;
return CURLE_FTP_CANT_RECONNECT;
}
}
}
/* we have the (new) data connection ready */
infof(data, "Connected!\n");
if(data->conf & CONF_UPLOAD) {
/* change directory first */
if(ftp->dir && ftp->dir[0]) {
sendf(data->firstsocket, data, "CWD %s\r\n", ftp->dir);
nread = GetLastResponse(data->firstsocket, buf, data);
if(strncmp(buf, "250", 3)) {
failf(data, "Couldn't change to directory %s", ftp->dir);
return CURLE_FTP_ACCESS_DENIED;
}
}
if(data->bits.upload) {
/* Set type to binary (unless specified ASCII) */
sendf(data->firstsocket, data, "TYPE %s\r\n",
(data->conf&CONF_FTPASCII)?"A":"I");
(data->bits.ftp_ascii)?"A":"I");
nread = GetLastResponse(data->firstsocket, buf, data);
if(strncmp(buf, "200", 3)) {
failf(data, "Couldn't set %s mode",
(data->conf&CONF_FTPASCII)?"ASCII":"binary");
return (data->conf&CONF_FTPASCII)? URG_FTP_COULDNT_SET_ASCII:
URG_FTP_COULDNT_SET_BINARY;
(data->bits.ftp_ascii)?"ASCII":"binary");
return (data->bits.ftp_ascii)? CURLE_FTP_COULDNT_SET_ASCII:
CURLE_FTP_COULDNT_SET_BINARY;
}
if(data->resume_from) {
@@ -658,13 +791,13 @@ UrgError _ftp(struct UrlData *data,
/* we could've got a specified offset from the command line,
but now we know we didn't */
sendf(data->firstsocket, data, "SIZE %s\r\n", ppath);
sendf(data->firstsocket, data, "SIZE %s\r\n", ftp->file);
nread = GetLastResponse(data->firstsocket, buf, data);
if(strncmp(buf, "213", 3)) {
failf(data, "Couldn't get file size: %s", buf+4);
return URG_FTP_COULDNT_GET_SIZE;
return CURLE_FTP_COULDNT_GET_SIZE;
}
/* get the size from the ascii string: */
@@ -685,11 +818,11 @@ UrgError _ftp(struct UrlData *data,
if(strncmp(buf, "350", 3)) {
failf(data, "Couldn't use REST: %s", buf+4);
return URG_FTP_COULDNT_USE_REST;
return CURLE_FTP_COULDNT_USE_REST;
}
#else
/* enable append instead */
data->conf |= CONF_FTPAPPEND;
data->bits.ftp_append = 1;
#endif
/* Now, let's read off the proper amount of bytes from the
input. If we knew it was a proper file we could've just
@@ -708,7 +841,7 @@ UrgError _ftp(struct UrlData *data,
if(actuallyread != readthisamountnow) {
failf(data, "Could only read %d bytes from the input\n",
passed);
return URG_FTP_COULDNT_USE_REST;
return CURLE_FTP_COULDNT_USE_REST;
}
}
while(passed != data->resume_from);
@@ -719,7 +852,7 @@ UrgError _ftp(struct UrlData *data,
if(data->infilesize <= 0) {
infof(data, "File already completely uploaded\n");
return URG_OK;
return CURLE_OK;
}
}
/* we've passed, proceed as normal */
@@ -727,21 +860,21 @@ UrgError _ftp(struct UrlData *data,
}
/* Send everything on data->in to the socket */
if(data->conf & CONF_FTPAPPEND)
if(data->bits.ftp_append)
/* we append onto the file instead of rewriting it */
sendf(data->firstsocket, data, "APPE %s\r\n", ppath);
sendf(data->firstsocket, data, "APPE %s\r\n", ftp->file);
else
sendf(data->firstsocket, data, "STOR %s\r\n", ppath);
sendf(data->firstsocket, data, "STOR %s\r\n", ftp->file);
nread = GetLastResponse(data->firstsocket, buf, data);
if(atoi(buf)>=400) {
failf(data, "Failed FTP upload:%s", buf+3);
/* oops, we never close the sockets! */
return URG_FTP_COULDNT_STOR_FILE;
return CURLE_FTP_COULDNT_STOR_FILE;
}
if(data->conf & CONF_FTPPORT) {
if(data->bits.ftp_use_port) {
result = AllowServerConnect(data, portsock);
if( result )
return result;
@@ -756,24 +889,19 @@ UrgError _ftp(struct UrlData *data,
#if 0
ProgressInit(data, data->infilesize);
#endif
result = Transfer(data, -1, -1, FALSE, NULL, /* no download */
result = Transfer(conn, -1, -1, FALSE, NULL, /* no download */
data->secondarysocket, bytecountp);
if(result)
return result;
if((-1 != data->infilesize) && (data->infilesize != *bytecountp)) {
failf(data, "Wrote only partial file (%d out of %d bytes)",
*bytecountp, data->infilesize);
return URG_PARTIAL_FILE;
}
}
else {
/* Retrieve file or directory */
bool dirlist=FALSE;
long downloadsize=-1;
if(data->conf&CONF_RANGE && data->range) {
int from, to;
if(data->bits.set_range && data->range) {
long from, to;
int totalsize=-1;
char *ptr;
char *ptr2;
@@ -786,32 +914,34 @@ UrgError _ftp(struct UrlData *data,
/* we didn't get any digit */
to=-1;
}
if(-1 == to) {
if((-1 == to) && (from>=0)) {
/* X - */
data->resume_from = from;
infof(data, "FTP RANGE %d to end of file\n", from);
}
else if(from < 0) {
/* -Y */
from = 0;
to = -from;
totalsize = to-from;
data->maxdownload = totalsize;
totalsize = -from;
data->maxdownload = -from;
data->resume_from = from;
infof(data, "FTP RANGE the last %d bytes\n", totalsize);
}
else {
/* X- */
/* X-Y */
totalsize = to-from;
data->maxdownload = totalsize;
data->maxdownload = totalsize+1; /* include the last mentioned byte */
data->resume_from = from;
infof(data, "FTP RANGE from %d getting %d bytes\n", from, data->maxdownload);
}
infof(data, "range-download from %d to %d, totally %d bytes\n",
from, to, totalsize);
}
#if 0
if(!ppath[0])
/* make sure this becomes a valid name */
ppath="./";
if((data->conf & CONF_FTPLISTONLY) ||
('/' == ppath[strlen(ppath)-1] )) {
#endif
if((data->bits.ftp_list_only) || !ftp->file) {
/* The specified path ends with a slash, and therefore we think this
is a directory that is requested, use LIST. But before that we
need to set ASCII transfer mode. */
@@ -824,30 +954,29 @@ UrgError _ftp(struct UrlData *data,
if(strncmp(buf, "200", 3)) {
failf(data, "Couldn't set ascii mode");
return URG_FTP_COULDNT_SET_ASCII;
return CURLE_FTP_COULDNT_SET_ASCII;
}
/* if this output is to be machine-parsed, the NLST command will be
better used since the LIST command output is not specified or
standard in any way */
sendf(data->firstsocket, data, "%s %s\r\n",
sendf(data->firstsocket, data, "%s\r\n",
data->customrequest?data->customrequest:
(data->conf&CONF_FTPLISTONLY?"NLST":"LIST"),
ppath);
(data->bits.ftp_list_only?"NLST":"LIST"));
}
else {
/* Set type to binary (unless specified ASCII) */
sendf(data->firstsocket, data, "TYPE %s\r\n",
(data->conf&CONF_FTPASCII)?"A":"I");
(data->bits.ftp_list_only)?"A":"I");
nread = GetLastResponse(data->firstsocket, buf, data);
if(strncmp(buf, "200", 3)) {
failf(data, "Couldn't set %s mode",
(data->conf&CONF_FTPASCII)?"ASCII":"binary");
return (data->conf&CONF_FTPASCII)? URG_FTP_COULDNT_SET_ASCII:
URG_FTP_COULDNT_SET_BINARY;
(data->bits.ftp_ascii)?"ASCII":"binary");
return (data->bits.ftp_ascii)? CURLE_FTP_COULDNT_SET_ASCII:
CURLE_FTP_COULDNT_SET_BINARY;
}
if(data->resume_from) {
@@ -858,7 +987,7 @@ UrgError _ftp(struct UrlData *data,
* of the file we're gonna get. If we can get the size, this is by far
* the best way to know if we're trying to resume beyond the EOF. */
sendf(data->firstsocket, data, "SIZE %s\r\n", ppath);
sendf(data->firstsocket, data, "SIZE %s\r\n", ftp->file);
nread = GetLastResponse(data->firstsocket, buf, data);
@@ -873,14 +1002,28 @@ UrgError _ftp(struct UrlData *data,
int foundsize=atoi(buf+4);
/* We got a file size report, so we check that there actually is a
part of the file left to get, or else we go home. */
if(data->resume_from< 0) {
/* We're supposed to download the last abs(from) bytes */
if(foundsize < -data->resume_from) {
failf(data, "Offset (%d) was beyond file size (%d)",
data->resume_from, foundsize);
return CURLE_FTP_BAD_DOWNLOAD_RESUME;
}
/* convert to size to download */
downloadsize = -data->resume_from;
/* download from where? */
data->resume_from = foundsize - downloadsize;
}
else {
if(foundsize <= data->resume_from) {
failf(data, "Offset (%d) was beyond file size (%d)",
data->resume_from, foundsize);
return URG_FTP_BAD_DOWNLOAD_RESUME;
return CURLE_FTP_BAD_DOWNLOAD_RESUME;
}
/* Now store the number of bytes we are expected to download */
downloadsize = foundsize-data->resume_from;
}
}
/* Set resume file transfer offset */
infof(data, "Instructs server to resume from offset %d\n",
@@ -892,11 +1035,11 @@ UrgError _ftp(struct UrlData *data,
if(strncmp(buf, "350", 3)) {
failf(data, "Couldn't use REST: %s", buf+4);
return URG_FTP_COULDNT_USE_REST;
return CURLE_FTP_COULDNT_USE_REST;
}
}
sendf(data->firstsocket, data, "RETR %s\r\n", ppath);
sendf(data->firstsocket, data, "RETR %s\r\n", ftp->file);
}
nread = GetLastResponse(data->firstsocket, buf, data);
@@ -966,12 +1109,12 @@ UrgError _ftp(struct UrlData *data,
if(size <= 0) {
failf(data, "Offset (%d) was beyond file size (%d)",
data->resume_from, data->resume_from+size);
return URG_PARTIAL_FILE;
return CURLE_PARTIAL_FILE;
}
}
#endif
if(data->conf & CONF_FTPPORT) {
if(data->bits.ftp_use_port) {
result = AllowServerConnect(data, portsock);
if( result )
return result;
@@ -980,95 +1123,74 @@ UrgError _ftp(struct UrlData *data,
infof(data, "Getting file with size: %d\n", size);
/* FTP download: */
result=Transfer(data, data->secondarysocket, size, FALSE,
result=Transfer(conn, data->secondarysocket, size, FALSE,
bytecountp,
-1, NULL); /* no upload here */
if(result)
return result;
if((-1 != size) && (size != *bytecountp)) {
failf(data, "Received only partial file");
return URG_PARTIAL_FILE;
}
else if(0 == *bytecountp) {
failf(data, "No data was received!");
return URG_FTP_COULDNT_RETR_FILE;
}
}
else {
failf(data, "%s", buf+4);
return URG_FTP_COULDNT_RETR_FILE;
return CURLE_FTP_COULDNT_RETR_FILE;
}
}
/* end of transfer */
#if 0
ProgressEnd(data);
#endif
pgrsDone(data);
/* shut down the socket to inform the server we're done */
sclose(data->secondarysocket);
data->secondarysocket = -1;
/* now let's see what the server says about the transfer we
just performed: */
nread = GetLastResponse(data->firstsocket, buf, data);
/* 226 Transfer complete */
if(strncmp(buf, "226", 3)) {
failf(data, "%s", buf+4);
return URG_FTP_WRITE_ERROR;
}
/* Send any post-transfer QUOTE strings? */
if(data->postquote) {
qitem = data->postquote;
/* Send all QUOTE strings in same order as on command-line */
while (qitem) {
/* Send string */
if (qitem->data) {
sendf(data->firstsocket, data, "%s\r\n", qitem->data);
nread = GetLastResponse(data->firstsocket, buf, data);
if (buf[0] != '2') {
failf(data, "QUOT string not accepted: %s",
qitem->data);
return URG_FTP_QUOTE_ERROR;
}
}
qitem = qitem->next;
}
}
return URG_OK;
return CURLE_OK;
}
/* -- deal with the ftp server! -- */
UrgError ftp(struct UrlData *data,
long *bytecountp,
char *ftpuser,
char *ftppasswd,
char *urlpath)
/* argument is already checked for validity */
CURLcode ftp(struct connectdata *conn)
{
char *realpath;
UrgError retcode;
CURLcode retcode;
#if 0
realpath = URLfix(urlpath);
#else
realpath = curl_unescape(urlpath);
#endif
if(realpath) {
retcode = _ftp(data, bytecountp, ftpuser, ftppasswd, realpath);
free(realpath);
struct UrlData *data = conn->data;
struct FTP *ftp;
int dirlength=0; /* 0 forces strlen() */
/* the ftp struct is already inited in ftp_connect() */
ftp = data->proto.ftp;
/* We split the path into dir and file parts *before* we URLdecode
it */
ftp->file = strrchr(conn->ppath, '/');
if(ftp->file) {
ftp->file++; /* point to the first letter in the file name part or
remain NULL */
}
else {
ftp->file = conn->ppath; /* there's only a file part */
}
dirlength=ftp->file-conn->ppath;
if(*ftp->file) {
ftp->file = curl_unescape(ftp->file, 0);
if(NULL == ftp->file) {
failf(data, "no memory");
return CURLE_OUT_OF_MEMORY;
}
}
else
/* then we try the original path */
retcode = _ftp(data, bytecountp, ftpuser, ftppasswd, urlpath);
ftp->file=NULL; /* instead of point to a zero byte, we make it a NULL
pointer */
ftp->urlpath = conn->ppath;
if(dirlength) {
ftp->dir = curl_unescape(ftp->urlpath, dirlength);
if(NULL == ftp->dir) {
if(ftp->file)
free(ftp->file);
failf(data, "no memory");
return CURLE_OUT_OF_MEMORY; /* failure */
}
}
else
ftp->dir = NULL;
retcode = _ftp(conn);
return retcode;
}

View File

@@ -40,11 +40,9 @@
*
* ------------------------------------------------------------
****************************************************************************/
UrgError ftp(struct UrlData *data,
long *bytecountp,
char *ftpuser,
char *ftppasswd,
char *ppath);
CURLcode ftp(struct connectdata *conn);
CURLcode ftp_done(struct connectdata *conn);
CURLcode ftp_connect(struct connectdata *conn);
struct curl_slist *curl_slist_append(struct curl_slist *list, char *data);
void curl_slist_free_all(struct curl_slist *list);

View File

@@ -53,7 +53,7 @@
#include <sys/types.h>
#include <sys/malloc.h>
#else
#include <malloc.h>
#endif
#include <string.h>
#include <stdio.h>
@@ -61,6 +61,12 @@
#if HAVE_STDLIB_H
# include <stdlib.h> /* for `free'; used by Bison 1.27 */
#else
#ifdef HAVE_MALLOC_H
#include <malloc.h>
#endif
#endif
#if defined (STDC_HEADERS) || (!defined (isascii) && !defined (HAVE_ISASCII))
@@ -206,7 +212,7 @@ static int yyRelSeconds;
static int yyRelYear;
#line 189 "getdate.y"
#line 195 "getdate.y"
typedef union {
int Number;
enum _MERIDIAN Meridian;
@@ -289,11 +295,11 @@ static const short yyrhs[] = { -1,
#if YYDEBUG != 0
static const short yyrline[] = { 0,
205, 206, 209, 212, 215, 218, 221, 224, 227, 233,
239, 248, 254, 266, 269, 272, 278, 282, 286, 292,
296, 314, 320, 326, 330, 335, 339, 346, 354, 357,
360, 363, 366, 369, 372, 375, 378, 381, 384, 387,
390, 393, 396, 399, 402, 405, 408, 413, 446, 450
211, 212, 215, 218, 221, 224, 227, 230, 233, 239,
245, 254, 260, 272, 275, 278, 284, 288, 292, 298,
302, 320, 326, 332, 336, 341, 345, 352, 360, 363,
366, 369, 372, 375, 378, 381, 384, 387, 390, 393,
396, 399, 402, 405, 408, 411, 414, 419, 452, 456
};
#endif
@@ -374,7 +380,7 @@ static const short yycheck[] = { 0,
56
};
/* -*-C-*- Note some compilers choke on comments on `#line' lines. */
#line 3 "/usr/lib/bison.simple"
#line 3 "/usr/local/share/bison.simple"
/* This file comes from bison-1.28. */
/* Skeleton output parser for bison,
@@ -588,7 +594,7 @@ __yy_memcpy (char *to, char *from, unsigned int count)
#endif
#endif
#line 217 "/usr/lib/bison.simple"
#line 217 "/usr/local/share/bison.simple"
/* The user can define YYPARSE_PARAM as the name of an argument to be passed
into yyparse. The argument should have type void *.
@@ -917,37 +923,37 @@ yyreduce:
switch (yyn) {
case 3:
#line 209 "getdate.y"
#line 215 "getdate.y"
{
yyHaveTime++;
;
break;}
case 4:
#line 212 "getdate.y"
#line 218 "getdate.y"
{
yyHaveZone++;
;
break;}
case 5:
#line 215 "getdate.y"
#line 221 "getdate.y"
{
yyHaveDate++;
;
break;}
case 6:
#line 218 "getdate.y"
#line 224 "getdate.y"
{
yyHaveDay++;
;
break;}
case 7:
#line 221 "getdate.y"
#line 227 "getdate.y"
{
yyHaveRel++;
;
break;}
case 9:
#line 227 "getdate.y"
#line 233 "getdate.y"
{
yyHour = yyvsp[-1].Number;
yyMinutes = 0;
@@ -956,7 +962,7 @@ case 9:
;
break;}
case 10:
#line 233 "getdate.y"
#line 239 "getdate.y"
{
yyHour = yyvsp[-3].Number;
yyMinutes = yyvsp[-1].Number;
@@ -965,7 +971,7 @@ case 10:
;
break;}
case 11:
#line 239 "getdate.y"
#line 245 "getdate.y"
{
yyHour = yyvsp[-3].Number;
yyMinutes = yyvsp[-1].Number;
@@ -977,7 +983,7 @@ case 11:
;
break;}
case 12:
#line 248 "getdate.y"
#line 254 "getdate.y"
{
yyHour = yyvsp[-5].Number;
yyMinutes = yyvsp[-3].Number;
@@ -986,7 +992,7 @@ case 12:
;
break;}
case 13:
#line 254 "getdate.y"
#line 260 "getdate.y"
{
yyHour = yyvsp[-5].Number;
yyMinutes = yyvsp[-3].Number;
@@ -999,53 +1005,53 @@ case 13:
;
break;}
case 14:
#line 266 "getdate.y"
#line 272 "getdate.y"
{
yyTimezone = yyvsp[0].Number;
;
break;}
case 15:
#line 269 "getdate.y"
#line 275 "getdate.y"
{
yyTimezone = yyvsp[0].Number - 60;
;
break;}
case 16:
#line 273 "getdate.y"
#line 279 "getdate.y"
{
yyTimezone = yyvsp[-1].Number - 60;
;
break;}
case 17:
#line 278 "getdate.y"
#line 284 "getdate.y"
{
yyDayOrdinal = 1;
yyDayNumber = yyvsp[0].Number;
;
break;}
case 18:
#line 282 "getdate.y"
#line 288 "getdate.y"
{
yyDayOrdinal = 1;
yyDayNumber = yyvsp[-1].Number;
;
break;}
case 19:
#line 286 "getdate.y"
#line 292 "getdate.y"
{
yyDayOrdinal = yyvsp[-1].Number;
yyDayNumber = yyvsp[0].Number;
;
break;}
case 20:
#line 292 "getdate.y"
#line 298 "getdate.y"
{
yyMonth = yyvsp[-2].Number;
yyDay = yyvsp[0].Number;
;
break;}
case 21:
#line 296 "getdate.y"
#line 302 "getdate.y"
{
/* Interpret as YYYY/MM/DD if $1 >= 1000, otherwise as MM/DD/YY.
The goal in recognizing YYYY/MM/DD is solely to support legacy
@@ -1066,7 +1072,7 @@ case 21:
;
break;}
case 22:
#line 314 "getdate.y"
#line 320 "getdate.y"
{
/* ISO 8601 format. yyyy-mm-dd. */
yyYear = yyvsp[-2].Number;
@@ -1075,7 +1081,7 @@ case 22:
;
break;}
case 23:
#line 320 "getdate.y"
#line 326 "getdate.y"
{
/* e.g. 17-JUN-1992. */
yyDay = yyvsp[-2].Number;
@@ -1084,14 +1090,14 @@ case 23:
;
break;}
case 24:
#line 326 "getdate.y"
#line 332 "getdate.y"
{
yyMonth = yyvsp[-1].Number;
yyDay = yyvsp[0].Number;
;
break;}
case 25:
#line 330 "getdate.y"
#line 336 "getdate.y"
{
yyMonth = yyvsp[-3].Number;
yyDay = yyvsp[-2].Number;
@@ -1099,14 +1105,14 @@ case 25:
;
break;}
case 26:
#line 335 "getdate.y"
#line 341 "getdate.y"
{
yyMonth = yyvsp[0].Number;
yyDay = yyvsp[-1].Number;
;
break;}
case 27:
#line 339 "getdate.y"
#line 345 "getdate.y"
{
yyMonth = yyvsp[-1].Number;
yyDay = yyvsp[-2].Number;
@@ -1114,7 +1120,7 @@ case 27:
;
break;}
case 28:
#line 346 "getdate.y"
#line 352 "getdate.y"
{
yyRelSeconds = -yyRelSeconds;
yyRelMinutes = -yyRelMinutes;
@@ -1125,115 +1131,115 @@ case 28:
;
break;}
case 30:
#line 357 "getdate.y"
#line 363 "getdate.y"
{
yyRelYear += yyvsp[-1].Number * yyvsp[0].Number;
;
break;}
case 31:
#line 360 "getdate.y"
#line 366 "getdate.y"
{
yyRelYear += yyvsp[-1].Number * yyvsp[0].Number;
;
break;}
case 32:
#line 363 "getdate.y"
#line 369 "getdate.y"
{
yyRelYear += yyvsp[0].Number;
;
break;}
case 33:
#line 366 "getdate.y"
#line 372 "getdate.y"
{
yyRelMonth += yyvsp[-1].Number * yyvsp[0].Number;
;
break;}
case 34:
#line 369 "getdate.y"
#line 375 "getdate.y"
{
yyRelMonth += yyvsp[-1].Number * yyvsp[0].Number;
;
break;}
case 35:
#line 372 "getdate.y"
#line 378 "getdate.y"
{
yyRelMonth += yyvsp[0].Number;
;
break;}
case 36:
#line 375 "getdate.y"
#line 381 "getdate.y"
{
yyRelDay += yyvsp[-1].Number * yyvsp[0].Number;
;
break;}
case 37:
#line 378 "getdate.y"
#line 384 "getdate.y"
{
yyRelDay += yyvsp[-1].Number * yyvsp[0].Number;
;
break;}
case 38:
#line 381 "getdate.y"
#line 387 "getdate.y"
{
yyRelDay += yyvsp[0].Number;
;
break;}
case 39:
#line 384 "getdate.y"
#line 390 "getdate.y"
{
yyRelHour += yyvsp[-1].Number * yyvsp[0].Number;
;
break;}
case 40:
#line 387 "getdate.y"
#line 393 "getdate.y"
{
yyRelHour += yyvsp[-1].Number * yyvsp[0].Number;
;
break;}
case 41:
#line 390 "getdate.y"
#line 396 "getdate.y"
{
yyRelHour += yyvsp[0].Number;
;
break;}
case 42:
#line 393 "getdate.y"
#line 399 "getdate.y"
{
yyRelMinutes += yyvsp[-1].Number * yyvsp[0].Number;
;
break;}
case 43:
#line 396 "getdate.y"
#line 402 "getdate.y"
{
yyRelMinutes += yyvsp[-1].Number * yyvsp[0].Number;
;
break;}
case 44:
#line 399 "getdate.y"
#line 405 "getdate.y"
{
yyRelMinutes += yyvsp[0].Number;
;
break;}
case 45:
#line 402 "getdate.y"
#line 408 "getdate.y"
{
yyRelSeconds += yyvsp[-1].Number * yyvsp[0].Number;
;
break;}
case 46:
#line 405 "getdate.y"
#line 411 "getdate.y"
{
yyRelSeconds += yyvsp[-1].Number * yyvsp[0].Number;
;
break;}
case 47:
#line 408 "getdate.y"
#line 414 "getdate.y"
{
yyRelSeconds += yyvsp[0].Number;
;
break;}
case 48:
#line 414 "getdate.y"
#line 420 "getdate.y"
{
if (yyHaveTime && yyHaveDate && !yyHaveRel)
yyYear = yyvsp[0].Number;
@@ -1266,20 +1272,20 @@ case 48:
;
break;}
case 49:
#line 447 "getdate.y"
#line 453 "getdate.y"
{
yyval.Meridian = MER24;
;
break;}
case 50:
#line 451 "getdate.y"
#line 457 "getdate.y"
{
yyval.Meridian = yyvsp[0].Meridian;
;
break;}
}
/* the action file gets copied in in place of this dollarsign */
#line 543 "/usr/lib/bison.simple"
#line 543 "/usr/local/share/bison.simple"
yyvsp -= yylen;
yyssp -= yylen;
@@ -1499,7 +1505,7 @@ yyerrhandle:
}
return 1;
}
#line 456 "getdate.y"
#line 462 "getdate.y"
/* Include this file down here because bison inserts code above which
@@ -1955,7 +1961,7 @@ difftm (struct tm *a, struct tm *b)
}
time_t
get_date (const char *p, const time_t *now)
curl_getdate (const char *p, const time_t *now)
{
struct tm tm, tm0, *tmp;
time_t Start;
@@ -2089,7 +2095,7 @@ main (ac, av)
buff[MAX_BUFF_LEN] = 0;
while (fgets (buff, MAX_BUFF_LEN, stdin) && buff[0])
{
d = get_date (buff, (time_t *) NULL);
d = curl_getdate (buff, (time_t *) NULL);
if (d == -1)
(void) printf ("Bad format - couldn't convert.\n");
else

View File

@@ -1,18 +1,11 @@
/* Copyright (C) 1995, 1997, 1998 Free Software Foundation, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software Foundation,
Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
/*
** Originally written by Steven M. Bellovin <smb@research.att.com> while
** at the University of North Carolina at Chapel Hill. Later tweaked by
** a couple of people on Usenet. Completely overhauled by Rich $alz
** <rsalz@bbn.com> and Jim Berets <jberets@bbn.com> in August, 1990.
**
** This code is in the public domain and has no copyright.
*/
#if HAVE_CONFIG_H
# include <config.h>
@@ -43,4 +36,4 @@
# endif
#endif /* defined (vms) */
time_t get_date PARAMS ((const char *p, const time_t *now));
time_t curl_getdate PARAMS ((const char *p, const time_t *now));

View File

@@ -29,7 +29,7 @@
#include <sys/types.h>
#include <sys/malloc.h>
#else
#include <malloc.h>
#endif
#include <string.h>
#include <stdio.h>
@@ -37,6 +37,12 @@
#if HAVE_STDLIB_H
# include <stdlib.h> /* for `free'; used by Bison 1.27 */
#else
#ifdef HAVE_MALLOC_H
#include <malloc.h>
#endif
#endif
#if defined (STDC_HEADERS) || (!defined (isascii) && !defined (HAVE_ISASCII))
@@ -908,7 +914,7 @@ difftm (struct tm *a, struct tm *b)
}
time_t
get_date (const char *p, const time_t *now)
curl_getdate (const char *p, const time_t *now)
{
struct tm tm, tm0, *tmp;
time_t Start;
@@ -1042,7 +1048,7 @@ main (ac, av)
buff[MAX_BUFF_LEN] = 0;
while (fgets (buff, MAX_BUFF_LEN, stdin) && buff[0])
{
d = get_date (buff, (time_t *) NULL);
d = curl_getdate (buff, (time_t *) NULL);
if (d == -1)
(void) printf ("Bad format - couldn't convert.\n");
else

View File

@@ -22,18 +22,6 @@
* Portions created by the Initial Developer are Copyright (C) 1998.
* All Rights Reserved.
*
* Contributor(s):
* Rafael Sagula <sagula@inf.ufrgs.br>
* Sampo Kellomaki <sampo@iki.fi>
* Linas Vepstas <linas@linas.org>
* Bjorn Reese <breese@imada.ou.dk>
* Johan Anderson <johan@homemail.com>
* Kjell Ericson <Kjell.Ericson@haxx.nu>
* Troy Engel <tengel@palladium.net>
* Ryan Nelson <ryan@inch.com>
* Bjorn Stenberg <Bjorn.Stenberg@haxx.nu>
* Angus Mackay <amackay@gus.ml.org>
*
* ------------------------------------------------------------
* Main author:
* - Daniel Stenberg <Daniel.Stenberg@haxx.nu>
@@ -47,28 +35,11 @@
* $State$
* $Locker$
*
* ------------------------------------------------------------
* $Log$
* Revision 1.2 2000-01-10 23:36:14 bagder
* syncing with local edit
*
* Revision 1.4 1999/09/06 06:59:40 dast
* Changed email info
*
* Revision 1.3 1999/08/13 07:34:48 dast
* Changed the URL in the header
*
* Revision 1.2 1999/03/13 00:56:09 dast
* Big changes done due to url.c being split up in X smaller files and that
* the lib is now more stand-alone.
*
* Revision 1.1.1.1 1999/03/11 22:23:34 dast
* Imported sources
*
****************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef WIN32
#include <windows.h>
@@ -78,7 +49,7 @@ char *GetEnv(char *variable)
{
#ifdef WIN32
/* This shit requires windows.h (HUGE) to be included */
static char env[MAX_PATH]; /* MAX_PATH is from windef.h */
char env[MAX_PATH]; /* MAX_PATH is from windef.h */
char *temp = getenv(variable);
env[0] = '\0';
ExpandEnvironmentStrings(temp, env, sizeof(env));
@@ -86,7 +57,7 @@ char *GetEnv(char *variable)
/* no length control */
char *env = getenv(variable);
#endif
return env;
return env?strdup(env):NULL;
}
char *curl_GetEnv(char *v)

View File

@@ -76,7 +76,9 @@ char *getpass(const char *prompt)
FILE *outfp;
static char buf[INPUT_BUFFER];
RETSIGTYPE (*sigint)();
#ifndef __EMX__
RETSIGTYPE (*sigtstp)();
#endif
size_t bytes_read;
int infd;
int outfd;
@@ -92,7 +94,11 @@ char *getpass(const char *prompt)
#endif
sigint = signal(SIGINT, SIG_IGN);
/* 20000318 mgs
* this is needed by the emx system, SIGTSTP is not a supported signal */
#ifndef __EMX__
sigtstp = signal(SIGTSTP, SIG_IGN);
#endif
if( (infp=fopen("/dev/tty", "r")) == NULL )
{
@@ -169,7 +175,9 @@ char *getpass(const char *prompt)
#endif
signal(SIGINT, sigint);
#ifndef __EMX__
signal(SIGTSTP, sigtstp);
#endif
return(buf);
}

766
lib/highlevel.c Normal file
View File

@@ -0,0 +1,766 @@
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
* License for the specific language governing rights and limitations
* under the License.
*
* The Original Code is Curl.
*
* The Initial Developer of the Original Code is Daniel Stenberg.
*
* Portions created by the Initial Developer are Copyright (C) 1998.
* All Rights Reserved.
*
* ------------------------------------------------------------
* Main author:
* - Daniel Stenberg <Daniel.Stenberg@haxx.nu>
*
* http://curl.haxx.nu
*
* $Source$
* $Revision$
* $Date$
* $Author$
* $State$
* $Locker$
*
* ------------------------------------------------------------
****************************************************************************/
/* -- WIN32 approved -- */
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#include <stdlib.h>
#include <ctype.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <errno.h>
#include "setup.h"
#include "strequal.h"
#if defined(WIN32) && !defined(__GNUC__) || defined(__MINGW32__)
#include <winsock.h>
#include <time.h>
#include <io.h>
#else
#ifdef HAVE_SYS_SOCKET_H
#include <sys/socket.h>
#endif
#include <netinet/in.h>
#include <sys/time.h>
#include <sys/resource.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <netdb.h>
#ifdef HAVE_ARPA_INET_H
#include <arpa/inet.h>
#endif
#ifdef HAVE_NET_IF_H
#include <net/if.h>
#endif
#include <sys/ioctl.h>
#include <signal.h>
#ifdef HAVE_SYS_PARAM_H
#include <sys/param.h>
#endif
#ifdef HAVE_SYS_SELECT_H
#include <sys/select.h>
#endif
#ifndef HAVE_VPRINTF
#error "We can't compile without vprintf() support!"
#endif
#ifndef HAVE_SELECT
#error "We can't compile without select() support!"
#endif
#ifndef HAVE_SOCKET
#error "We can't compile without socket() support!"
#endif
#endif
#include "urldata.h"
#include <curl/curl.h>
#include <curl/types.h>
#include "netrc.h"
#include "getenv.h"
#include "hostip.h"
#include "download.h"
#include "sendf.h"
#include "speedcheck.h"
#include "getpass.h"
#include "progress.h"
#include "getdate.h"
#include "writeout.h"
#define _MPRINTF_REPLACE /* use our functions only */
#include <curl/mprintf.h>
CURLcode
_Transfer (struct connectdata *c_conn)
{
size_t nread; /* number of bytes read */
int bytecount = 0; /* total number of bytes read */
int writebytecount = 0; /* number of bytes written */
long contentlength=0; /* size of incoming data */
struct timeval start = tvnow();
struct timeval now = start; /* current time */
bool header = TRUE; /* incoming data has HTTP header */
int headerline = 0; /* counts header lines to better track the
first one */
char *hbufp; /* points at *end* of header line */
int hbuflen = 0;
char *str; /* within buf */
char *str_start; /* within buf */
char *end_ptr; /* within buf */
char *p; /* within headerbuff */
bool content_range = FALSE; /* set TRUE if Content-Range: was found */
int offset = 0; /* possible resume offset read from the
Content-Range: header */
int code = 0; /* error code from the 'HTTP/1.? XXX' line */
/* for the low speed checks: */
CURLcode urg;
time_t timeofdoc=0;
long bodywrites=0;
char newurl[URL_MAX_LENGTH]; /* buffer for Location: URL */
/* the highest fd we use + 1 */
struct UrlData *data;
struct connectdata *conn = (struct connectdata *)c_conn;
char *buf;
int maxfd;
if(!conn || (conn->handle != STRUCT_CONNECT))
return CURLE_BAD_FUNCTION_ARGUMENT;
data = conn->data; /* there's the root struct */
buf = data->buffer;
maxfd = (conn->sockfd>conn->writesockfd?conn->sockfd:conn->writesockfd)+1;
hbufp = data->headerbuff;
myalarm (0); /* switch off the alarm-style timeout */
now = tvnow();
start = now;
#define KEEP_READ 1
#define KEEP_WRITE 2
pgrsTime(data, TIMER_PRETRANSFER);
if (!conn->getheader) {
header = FALSE;
if(conn->size > 0)
pgrsSetDownloadSize(data, conn->size);
}
{
fd_set readfd;
fd_set writefd;
fd_set rkeepfd;
fd_set wkeepfd;
struct timeval interval;
int keepon=0;
/* timeout every X second
- makes a better progressmeter (i.e even when no data is read, the
meter can be updated and reflect reality)
- allows removal of the alarm() crap
- variable timeout is easier
*/
FD_ZERO (&readfd); /* clear it */
if(conn->sockfd != -1) {
FD_SET (conn->sockfd, &readfd); /* read socket */
keepon |= KEEP_READ;
}
FD_ZERO (&writefd); /* clear it */
if(conn->writesockfd != -1) {
FD_SET (conn->writesockfd, &writefd); /* write socket */
keepon |= KEEP_WRITE;
}
/* get these in backup variables to be able to restore them on each lap in
the select() loop */
rkeepfd = readfd;
wkeepfd = writefd;
while (keepon) {
readfd = rkeepfd; /* set those every lap in the loop */
writefd = wkeepfd;
interval.tv_sec = 1;
interval.tv_usec = 0;
switch (select (maxfd, &readfd, &writefd, NULL, &interval)) {
case -1: /* select() error, stop reading */
#ifdef EINTR
/* The EINTR is not serious, and it seems you might get this more
ofen when using the lib in a multi-threaded environment! */
if(errno == EINTR)
;
else
#endif
keepon = 0; /* no more read or write */
continue;
case 0: /* timeout */
break;
default:
if((keepon & KEEP_READ) && FD_ISSET(conn->sockfd, &readfd)) {
/* read! */
urg = curl_read(conn, buf, BUFSIZE -1, &nread);
/* NULL terminate, allowing string ops to be used */
if (0 < (signed int) nread)
buf[nread] = 0;
/* if we receive 0 or less here, the server closed the connection and
we bail out from this! */
else if (0 >= (signed int) nread) {
keepon &= ~KEEP_READ;
break;
}
str = buf; /* Default buffer to use when we write the
buffer, it may be changed in the flow below
before the actual storing is done. */
/* Since this is a two-state thing, we check if we are parsing
headers at the moment or not. */
if (header) {
/* we are in parse-the-header-mode */
/* header line within buffer loop */
do {
int hbufp_index;
str_start = str; /* str_start is start of line within buf */
end_ptr = strchr (str_start, '\n');
if (!end_ptr) {
/* no more complete header lines within buffer */
/* copy what is remaining into headerbuff */
int str_length = (int)strlen(str);
if (hbuflen + (int)str_length >= data->headersize) {
char *newbuff;
long newsize=MAX((hbuflen+str_length)*3/2,
data->headersize*2);
hbufp_index = hbufp - data->headerbuff;
newbuff = (char *)realloc(data->headerbuff, newsize);
if(!newbuff) {
failf (data, "Failed to alloc memory for big header!");
return CURLE_READ_ERROR;
}
data->headersize=newsize;
data->headerbuff = newbuff;
hbufp = data->headerbuff + hbufp_index;
}
strcpy (hbufp, str);
hbufp += strlen (str);
hbuflen += strlen (str);
break; /* read more and try again */
}
str = end_ptr + 1; /* move just past new line */
if (hbuflen + (str - str_start) >= data->headersize) {
char *newbuff;
long newsize=MAX((hbuflen+(str-str_start))*3/2,
data->headersize*2);
hbufp_index = hbufp - data->headerbuff;
newbuff = (char *)realloc(data->headerbuff, newsize);
if(!newbuff) {
failf (data, "Failed to alloc memory for big header!");
return CURLE_READ_ERROR;
}
data->headersize= newsize;
data->headerbuff = newbuff;
hbufp = data->headerbuff + hbufp_index;
}
/* copy to end of line */
strncpy (hbufp, str_start, str - str_start);
hbufp += str - str_start;
hbuflen += str - str_start;
*hbufp = 0;
p = data->headerbuff;
/* we now have a full line that p points to */
if (('\n' == *p) || ('\r' == *p)) {
/* Zero-length line means end of header! */
if (-1 != conn->size) /* if known */
conn->size += bytecount; /* we append the already read size */
if ('\r' == *p)
p++; /* pass the \r byte */
if ('\n' == *p)
p++; /* pass the \n byte */
pgrsSetDownloadSize(data, conn->size);
header = FALSE; /* no more header to parse! */
/* now, only output this if the header AND body are requested:
*/
if (data->bits.http_include_header) {
if((p - data->headerbuff) !=
data->fwrite (data->headerbuff, 1,
p - data->headerbuff, data->out)) {
failf (data, "Failed writing output");
return CURLE_WRITE_ERROR;
}
}
if(data->writeheader) {
/* obviously, the header is requested to be written to
this file: */
if((p - data->headerbuff) !=
data->fwrite (data->headerbuff, 1, p - data->headerbuff,
data->writeheader)) {
failf (data, "Failed writing output");
return CURLE_WRITE_ERROR;
}
}
break; /* exit header line loop */
}
if (!headerline++) {
/* This is the first header, it MUST be the error code line
or else we consiser this to be the body right away! */
if (sscanf (p, " HTTP/1.%*c %3d", &code)) {
/* 404 -> URL not found! */
if (
( ((data->bits.http_follow_location) && (code >= 400))
||
(!data->bits.http_follow_location && (code >= 300)))
&& (data->bits.http_fail_on_error)) {
/* If we have been told to fail hard on HTTP-errors,
here is the check for that: */
/* serious error, go home! */
failf (data, "The requested file was not found");
return CURLE_HTTP_NOT_FOUND;
}
data->progress.httpcode = code;
}
else {
header = FALSE; /* this is not a header line */
break;
}
}
/* check for Content-Length: header lines to get size */
if (strnequal("Content-Length", p, 14) &&
sscanf (p+14, ": %ld", &contentlength))
conn->size = contentlength;
else if (strnequal("Content-Range", p, 13) &&
sscanf (p+13, ": bytes %d-", &offset)) {
if (data->resume_from == offset) {
/* we asked for a resume and we got it */
content_range = TRUE;
}
}
else if(data->cookies &&
strnequal("Set-Cookie: ", p, 11)) {
cookie_add(data->cookies, TRUE, &p[12]);
}
else if(strnequal("Last-Modified:", p,
strlen("Last-Modified:")) &&
data->timecondition) {
time_t secs=time(NULL);
timeofdoc = curl_getdate(p+strlen("Last-Modified:"), &secs);
}
else if ((code >= 300 && code < 400) &&
(data->bits.http_follow_location) &&
strnequal("Location", p, 8) &&
sscanf (p+8, ": %" URL_MAX_LENGTH_TXT "s",
newurl)) {
/* this is the URL that the server advices us to get
instead */
data->newurl = strdup (newurl);
}
if (data->bits.http_include_header) {
if(hbuflen != data->fwrite (p, 1, hbuflen, data->out)) {
failf (data, "Failed writing output");
return CURLE_WRITE_ERROR;
}
}
if(data->writeheader) {
/* the header is requested to be written to this file */
if(hbuflen != data->fwrite (p, 1, hbuflen,
data->writeheader)) {
failf (data, "Failed writing output");
return CURLE_WRITE_ERROR;
}
}
/* reset hbufp pointer && hbuflen */
hbufp = data->headerbuff;
hbuflen = 0;
}
while (*str); /* header line within buffer */
/* We might have reached the end of the header part here, but
there might be a non-header part left in the end of the read
buffer. */
if (!header) {
/* the next token and forward is not part of
the header! */
/* we subtract the remaining header size from the buffer */
nread -= (str - buf);
}
} /* end if header mode */
/* This is not an 'else if' since it may be a rest from the header
parsing, where the beginning of the buffer is headers and the end
is non-headers. */
if (str && !header && (nread > 0)) {
if(0 == bodywrites) {
/* These checks are only made the first time we are about to
write a chunk of the body */
if(conn->protocol&PROT_HTTP) {
/* HTTP-only checks */
if (data->resume_from && !content_range ) {
/* we wanted to resume a download, although the server
doesn't seem to support this */
failf (data, "HTTP server doesn't seem to support byte ranges. Cannot resume.");
return CURLE_HTTP_RANGE_ERROR;
}
else if (data->newurl) {
/* abort after the headers if "follow Location" is set */
infof (data, "Follow to new URL: %s\n", data->newurl);
return CURLE_OK;
}
else if(data->timecondition && !data->range) {
/* A time condition has been set AND no ranges have been
requested. This seems to be what chapter 13.3.4 of
RFC 2616 defines to be the correct action for a
HTTP/1.1 client */
if((timeofdoc > 0) && (data->timevalue > 0)) {
switch(data->timecondition) {
case TIMECOND_IFMODSINCE:
default:
if(timeofdoc < data->timevalue) {
infof(data,
"The requested document is not new enough");
return CURLE_OK;
}
break;
case TIMECOND_IFUNMODSINCE:
if(timeofdoc > data->timevalue) {
infof(data,
"The requested document is not old enough");
return CURLE_OK;
}
break;
} /* switch */
} /* two valid time strings */
} /* we have a time condition */
} /* this is HTTP */
} /* this is the first time we write a body part */
bodywrites++;
if(data->maxdownload &&
(bytecount + nread > data->maxdownload)) {
nread = data->maxdownload - bytecount;
if(nread < 0 ) /* this should be unusual */
nread = 0;
keepon &= ~KEEP_READ; /* we're done reading */
}
bytecount += nread;
pgrsSetDownloadCounter(data, (double)bytecount);
if (nread != data->fwrite (str, 1, nread, data->out)) {
failf (data, "Failed writing output");
return CURLE_WRITE_ERROR;
}
} /* if (! header and data to read ) */
} /* if( read from socket ) */
if((keepon & KEEP_WRITE) && FD_ISSET(conn->writesockfd, &writefd)) {
/* write */
char scratch[BUFSIZE * 2];
int i, si;
int bytes_written;
if(data->crlf)
buf = data->buffer; /* put it back on the buffer */
nread = data->fread(buf, 1, BUFSIZE, data->in);
writebytecount += nread;
pgrsSetUploadCounter(data, (double)writebytecount);
if (nread<=0) {
/* done */
keepon &= ~KEEP_WRITE; /* we're done writing */
break;
}
/* convert LF to CRLF if so asked */
if (data->crlf) {
for(i = 0, si = 0; i < (int)nread; i++, si++) {
if (buf[i] == 0x0a) {
scratch[si++] = 0x0d;
scratch[si] = 0x0a;
}
else {
scratch[si] = buf[i];
}
}
nread = si;
buf = scratch; /* point to the new buffer */
}
/* write to socket */
urg = curl_write(conn, buf, nread, &bytes_written);
if(nread != bytes_written) {
failf(data, "Failed uploading data");
return CURLE_WRITE_ERROR;
}
}
break;
}
now = tvnow();
if(pgrsUpdate(data))
urg = CURLE_ABORTED_BY_CALLBACK;
else
urg = speedcheck (data, now);
if (urg)
return urg;
if (data->timeout && (tvdiff (now, start) > data->timeout)) {
failf (data, "Operation timed out with %d out of %d bytes received",
bytecount, conn->size);
return CURLE_OPERATION_TIMEOUTED;
}
}
}
if(!(data->bits.no_body) && contentlength &&
(bytecount != contentlength)) {
failf(data, "transfer closed with %d bytes remaining to read",
contentlength-bytecount);
return CURLE_PARTIAL_FILE;
}
if(pgrsUpdate(data))
return CURLE_ABORTED_BY_CALLBACK;
if(conn->bytecountp)
*conn->bytecountp = bytecount; /* read count */
if(conn->writebytecountp)
*conn->writebytecountp = writebytecount; /* write count */
return CURLE_OK;
}
typedef int (*func_T)(void);
CURLcode curl_transfer(CURL *curl)
{
CURLcode res;
struct UrlData *data = curl;
struct connectdata *c_connect;
do {
res = curl_connect(curl, (CURLconnect **)&c_connect);
if(res == CURLE_OK) {
res = curl_do(c_connect);
if(res == CURLE_OK) {
res = _Transfer(c_connect); /* now fetch that URL please */
if(res == CURLE_OK)
res = curl_done(c_connect);
}
if((res == CURLE_OK) && data->newurl) {
/* Location: redirect */
char prot[16];
char path[URL_MAX_LENGTH];
if(2 != sscanf(data->newurl, "%15[^:]://%" URL_MAX_LENGTH_TXT
"s", prot, path)) {
/***
*DANG* this is an RFC 2068 violation. The URL is supposed
to be absolute and this doesn't seem to be that!
***
Instead, we have to TRY to append this new path to the old URL
to the right of the host part. Oh crap, this is doomed to cause
problems in the future...
*/
char *protsep;
char *pathsep;
char *newest;
/* protsep points to the start of the host name */
protsep=strstr(data->url, "//");
if(!protsep)
protsep=data->url;
else {
/* TBD: set the port with curl_setopt() */
data->port=0; /* we got a full URL and then we should reset the
port number here to re-initiate it later */
protsep+=2; /* pass the slashes */
}
if('/' != data->newurl[0]) {
/* First we need to find out if there's a ?-letter in the URL,
and cut it and the right-side of that off */
pathsep = strrchr(protsep, '?');
if(pathsep)
*pathsep=0;
/* we have a relative path to append to the last slash if
there's one available */
pathsep = strrchr(protsep, '/');
if(pathsep)
*pathsep=0;
}
else {
/* We got a new absolute path for this server, cut off from the
first slash */
pathsep = strchr(protsep, '/');
if(pathsep)
*pathsep=0;
}
newest=(char *)malloc( strlen(data->url) +
1 + /* possible slash */
strlen(data->newurl) + 1/* zero byte */);
if(!newest)
return CURLE_OUT_OF_MEMORY;
sprintf(newest, "%s%s%s", data->url, ('/' == data->newurl[0])?"":"/",
data->newurl);
free(data->newurl);
data->newurl = newest;
}
else {
/* This was an absolute URL, clear the port number! */
/* TBD: set the port with curl_setopt() */
data->port = 0;
}
/* TBD: set the URL with curl_setopt() */
data->url = data->newurl;
data->newurl = NULL; /* don't show! */
infof(data, "Follows Location: to new URL: '%s'\n", data->url);
curl_disconnect(c_connect);
continue;
}
curl_disconnect(c_connect);
}
break; /* it only reaches here when this shouldn't loop */
} while(1); /* loop if Location: */
if(data->newurl)
free(data->newurl);
if((CURLE_OK == res) && data->writeinfo) {
/* Time to output some info to stdout */
WriteOut(data);
}
return res;
}
#if 0
CURLcode curl_urlget(UrgTag tag, ...)
{
va_list arg;
func_T param_func = (func_T)0;
long param_long = 0;
void *param_obj = NULL;
CURLcode res;
struct UrlData *data;
/* this is for the lame win32 socket crap */
if(curl_init())
return CURLE_FAILED_INIT;
/* We use curl_open() with undefined URL so far */
res = curl_open(&data, NULL);
if(res != CURLE_OK)
return CURLE_FAILED_INIT;
/* data is now filled with good-looking zeroes */
va_start(arg, tag);
while(tag != URGTAG_DONE) {
/* PORTING NOTE:
Object pointers can't necessarily be casted to function pointers and
therefore we need to know what type it is and read the correct type
at once. This should also correct problems with different sizes of
the types.
*/
if(tag < URGTYPE_OBJECTPOINT) {
/* This is a LONG type */
param_long = va_arg(arg, long);
curl_setopt(data, tag, param_long);
}
else if(tag < URGTYPE_FUNCTIONPOINT) {
/* This is a object pointer type */
param_obj = va_arg(arg, void *);
curl_setopt(data, tag, param_obj);
}
else {
param_func = va_arg(arg, func_T );
curl_setopt(data, tag, param_func);
}
/* printf("tag: %d\n", tag); */
tag = va_arg(arg, UrgTag);
}
va_end(arg);
pgrsMode(data, data->progress.mode);
pgrsStartNow(data);
/********* Now, connect to the remote site **********/
res = curl_transfer(data);
curl_close(data);
return res;
}
#endif

43
lib/highlevel.h Normal file
View File

@@ -0,0 +1,43 @@
#ifndef __HIGHLEVEL_H
#define __HIGHLEVEL_H
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
* License for the specific language governing rights and limitations
* under the License.
*
* The Original Code is Curl.
*
* The Initial Developer of the Original Code is Daniel Stenberg.
*
* Portions created by the Initial Developer are Copyright (C) 1998.
* All Rights Reserved.
*
* ------------------------------------------------------------
* Main author:
* - Daniel Stenberg <Daniel.Stenberg@haxx.nu>
*
* http://curl.haxx.nu
*
* $Source$
* $Revision$
* $Date$
* $Author$
* $State$
* $Locker$
*
* ------------------------------------------------------------
****************************************************************************/
CURLcode curl_transfer(CURL *curl);
#endif

View File

@@ -39,6 +39,7 @@
****************************************************************************/
#include <string.h>
#include <malloc.h>
#include "setup.h"
@@ -61,51 +62,77 @@
#include "urldata.h"
#include "sendf.h"
#ifdef HAVE_INET_NTOA_R
#include "inet_ntoa_r.h"
#endif
/* --- resolve name or IP-number --- */
char *MakeIP(unsigned long num)
char *MakeIP(unsigned long num,char *addr, int addr_len)
{
#ifdef HAVE_INET_NTOA
#if defined(HAVE_INET_NTOA) || defined(HAVE_INET_NTOA_R)
struct in_addr in;
in.s_addr = htonl(num);
return (inet_ntoa(in));
#if defined(HAVE_INET_NTOA_R)
inet_ntoa_r(in,addr,addr_len);
#else
strncpy(addr,inet_ntoa(in),addr_len);
#endif
#else
static char addr[128];
unsigned char *paddr;
num = htonl(num); /* htonl() added to avoid endian probs */
paddr = (unsigned char *)&num;
sprintf(addr, "%u.%u.%u.%u", paddr[0], paddr[1], paddr[2], paddr[3]);
return (addr);
#endif
return (addr);
}
/* Stolen from Dancer source code, written by
Bjorn Reese <breese@imada.ou.dk> */
/* The original code to this function was stolen from the Dancer source code,
written by Bjorn Reese, it has since been patched and modified. */
#ifndef INADDR_NONE
#define INADDR_NONE (unsigned long) ~0
#endif
struct hostent *GetHost(struct UrlData *data, char *hostname)
struct hostent *GetHost(struct UrlData *data,
char *hostname,
char *buf,
int buf_size )
{
struct hostent *h = NULL;
unsigned long in;
static struct hostent he;
static char name[MAXHOSTNAMELEN];
static char *addrlist[2];
static struct in_addr addrentry;
if ( (in=inet_addr(hostname)) != INADDR_NONE ) {
addrentry.s_addr = in;
addrlist[0] = (char *)&addrentry;
addrlist[1] = NULL;
he.h_name = strncpy(name, MakeIP(ntohl(in)), MAXHOSTNAMELEN);
he.h_addrtype = AF_INET;
he.h_length = sizeof(struct in_addr);
he.h_addr_list = addrlist;
h = &he;
} else if ( (h=gethostbyname(hostname)) == NULL ) {
struct in_addr *addrentry;
h = (struct hostent*)buf;
h->h_addr_list = (char**)(buf + sizeof(*h));
addrentry = (struct in_addr*)(h->h_addr_list + 2);
addrentry->s_addr = in;
h->h_addr_list[0] = (char*)addrentry;
h->h_addr_list[1] = NULL;
h->h_addrtype = AF_INET;
h->h_length = sizeof(*addrentry);
h->h_name = (char*)(h->h_addr_list + h->h_length);
MakeIP(ntohl(in),h->h_name,buf_size - (long)(h->h_name) + (long)buf);
#if defined(HAVE_GETHOSTBYNAME_R)
}
else {
int h_errnop;
memset(buf,0,buf_size); /* workaround for gethostbyname_r bug in qnx nto */
if ((h = gethostbyname_r(hostname,
(struct hostent *)buf,buf +
sizeof(struct hostent),buf_size -
sizeof(struct hostent),&h_errnop)) == NULL ) {
infof(data, "gethostbyname_r(2) failed for %s\n", hostname);
}
#else
}
else {
if ((h = gethostbyname(hostname)) == NULL ) {
infof(data, "gethostbyname(2) failed for %s\n", hostname);
}
#endif
}
return (h);
}

View File

@@ -40,7 +40,7 @@
* ------------------------------------------------------------
****************************************************************************/
struct hostent *GetHost(struct UrlData *data, char *hostname);
char *MakeIP(unsigned long num);
extern struct hostent *GetHost(struct UrlData *data, char *hostname, char *buf, int buf_size );
extern char *MakeIP(unsigned long num,char *addr, int addr_len);
#endif

View File

@@ -94,6 +94,9 @@
#include "progress.h"
#include "base64.h"
#include "cookie.h"
#include "strequal.h"
#include "url.h"
#include "ssluse.h"
#define _MPRINTF_REPLACE /* use our functions only */
#include <curl/mprintf.h>
@@ -115,33 +118,114 @@ bool static checkheaders(struct UrlData *data, char *thisheader)
return FALSE;
}
UrgError http(struct UrlData *data, char *ppath, char *host, long *bytecount)
CURLcode http_connect(struct connectdata *conn)
{
/* Send the GET line to the HTTP server */
struct UrlData *data;
struct FormData *sendit=NULL;
int postsize=0;
UrgError result;
char *buf;
struct Cookie *co = NULL;
char *p_pragma = NULL;
char *p_accept = NULL;
long readbytecount;
long writebytecount;
data=conn->data;
buf = data->buffer; /* this is our buffer */
/* If we are not using a proxy and we want a secure connection,
* perform SSL initialization & connection now.
* If using a proxy with https, then we must tell the proxy to CONNECT
* us to the host we want to talk to. Only after the connect
* has occured, can we start talking SSL
*/
if (conn->protocol & PROT_HTTPS) {
if (data->bits.httpproxy) {
if ( (data->conf&(CONF_HTTP|CONF_FTP)) &&
(data->conf&CONF_UPLOAD)) {
data->conf |= CONF_PUT;
/* OK, now send the connect statment */
sendf(data->firstsocket, data,
"CONNECT %s:%d HTTP/1.0\015\012"
"%s"
"%s"
"\r\n",
data->hostname, data->remote_port,
(data->bits.proxy_user_passwd)?data->ptr_proxyuserpwd:"",
(data->useragent?data->ptr_uagent:"")
);
/* wait for the proxy to send us a HTTP/1.0 200 OK header */
/* Daniel rewrote this part Nov 5 1998 to make it more obvious */
{
int httperror=0;
int subversion=0;
while(GetLine(data->firstsocket, data->buffer, data)) {
if('\r' == data->buffer[0])
break; /* end of headers */
if(2 == sscanf(data->buffer, "HTTP/1.%d %d",
&subversion,
&httperror)) {
;
}
#if 0 /* old version */
if((data->conf&(CONF_HTTP|CONF_UPLOAD)) ==
(CONF_HTTP|CONF_UPLOAD)) {
/* enable PUT! */
data->conf |= CONF_PUT;
}
#endif
if(200 != httperror) {
if(407 == httperror)
/* Added Nov 6 1998 */
failf(data, "Proxy requires authorization!");
else
failf(data, "Received error code %d from proxy", httperror);
return CURLE_READ_ERROR;
}
}
infof (data, "Proxy has replied to CONNECT request\n");
}
/* now, perform the SSL initialization for this socket */
if(UrgSSLConnect (data)) {
return CURLE_SSL_CONNECT_ERROR;
}
}
return CURLE_OK;
}
CURLcode http_done(struct connectdata *conn)
{
struct UrlData *data;
long *bytecount = &conn->bytecount;
struct HTTP *http;
data=conn->data;
http=data->proto.http;
if(data->bits.http_formpost) {
*bytecount = http->readbytecount + http->writebytecount;
FormFree(http->sendit); /* Now free that whole lot */
data->fread = http->storefread; /* restore */
data->in = http->in; /* restore */
}
else if(data->bits.http_put) {
*bytecount = http->readbytecount + http->writebytecount;
}
/* TBD: the HTTP struct remains allocated here */
return CURLE_OK;
}
CURLcode http(struct connectdata *conn)
{
struct UrlData *data=conn->data;
char *buf = data->buffer; /* this is a short cut to the buffer */
CURLcode result;
struct HTTP *http;
struct Cookie *co=NULL; /* no cookies from start */
char *ppath = conn->ppath; /* three previous function arguments */
char *host = conn->name;
long *bytecount = &conn->bytecount;
http = (struct HTTP *)malloc(sizeof(struct HTTP));
if(!http)
return CURLE_OUT_OF_MEMORY;
memset(http, 0, sizeof(struct HTTP));
data->proto.http = http;
if ( (conn->protocol&(PROT_HTTP|PROT_FTP)) &&
data->bits.upload) {
data->bits.http_put=1;
}
/* The User-Agent string has been built in url.c already, because it might
have been used in the proxy connect, but if we have got a header with
@@ -152,17 +236,17 @@ UrgError http(struct UrlData *data, char *ppath, char *host, long *bytecount)
data->ptr_uagent=NULL;
}
if((data->conf & CONF_USERPWD) && !checkheaders(data, "Authorization:")) {
if((data->bits.user_passwd) && !checkheaders(data, "Authorization:")) {
char authorization[512];
sprintf(data->buffer, "%s:%s", data->user, data->passwd);
base64Encode(data->buffer, authorization);
data->ptr_userpwd = maprintf( "Authorization: Basic %s\015\012",
authorization);
}
if((data->conf & CONF_RANGE) && !checkheaders(data, "Range:")) {
if((data->bits.set_range) && !checkheaders(data, "Range:")) {
data->ptr_rangeline = maprintf("Range: bytes=%s\015\012", data->range);
}
if((data->conf & CONF_REFERER) && !checkheaders(data, "Referer:")) {
if((data->bits.http_set_referer) && !checkheaders(data, "Referer:")) {
data->ptr_ref = maprintf("Referer: %s\015\012", data->referer);
}
if(data->cookie && !checkheaders(data, "Cookie:")) {
@@ -173,16 +257,16 @@ UrgError http(struct UrlData *data, char *ppath, char *host, long *bytecount)
co = cookie_getlist(data->cookies,
host,
ppath,
data->conf&CONF_HTTPS?TRUE:FALSE);
conn->protocol&PROT_HTTPS?TRUE:FALSE);
}
if ((data->conf & CONF_PROXY) && (!(data->conf & CONF_HTTPS))) {
if ((data->bits.httpproxy) && !(conn->protocol&PROT_HTTPS)) {
/* The path sent to the proxy is in fact the entire URL */
strncpy(ppath, data->url, URL_MAX_LENGTH-1);
}
if(data->conf & CONF_HTTPPOST) {
if(data->bits.http_formpost) {
/* we must build the whole darned post sequence first, so that we have
a size of the whole shebang before we start to send it */
sendit = getFormData(data->httppost, &postsize);
http->sendit = getFormData(data->httppost, &http->postsize);
}
if(!checkheaders(data, "Host:"))
@@ -190,10 +274,10 @@ UrgError http(struct UrlData *data, char *ppath, char *host, long *bytecount)
if(!checkheaders(data, "Pragma:"))
p_pragma = "Pragma: no-cache\r\n";
http->p_pragma = "Pragma: no-cache\r\n";
if(!checkheaders(data, "Accept:"))
p_accept = "Accept: image/gif, image/x-xbitmap, image/jpeg, image/pjpeg, */*\r\n";
http->p_accept = "Accept: image/gif, image/x-xbitmap, image/jpeg, image/pjpeg, */*\r\n";
do {
sendf(data->firstsocket, data,
@@ -210,19 +294,19 @@ UrgError http(struct UrlData *data, char *ppath, char *host, long *bytecount)
"%s", /* referer */
data->customrequest?data->customrequest:
(data->conf&CONF_NOBODY?"HEAD":
(data->conf&(CONF_POST|CONF_HTTPPOST))?"POST":
(data->conf&CONF_PUT)?"PUT":"GET"),
(data->bits.no_body?"HEAD":
(data->bits.http_post || data->bits.http_formpost)?"POST":
(data->bits.http_put)?"PUT":"GET"),
ppath,
(data->conf&CONF_PROXYUSERPWD && data->ptr_proxyuserpwd)?data->ptr_proxyuserpwd:"",
(data->conf&CONF_USERPWD && data->ptr_userpwd)?data->ptr_userpwd:"",
(data->conf&CONF_RANGE && data->ptr_rangeline)?data->ptr_rangeline:"",
(data->bits.proxy_user_passwd && data->ptr_proxyuserpwd)?data->ptr_proxyuserpwd:"",
(data->bits.user_passwd && data->ptr_userpwd)?data->ptr_userpwd:"",
(data->bits.set_range && data->ptr_rangeline)?data->ptr_rangeline:"",
(data->useragent && *data->useragent && data->ptr_uagent)?data->ptr_uagent:"",
(data->ptr_cookie?data->ptr_cookie:""), /* Cookie: <data> */
(data->ptr_host?data->ptr_host:""), /* Host: host */
p_pragma?p_pragma:"",
p_accept?p_accept:"",
(data->conf&CONF_REFERER && data->ptr_ref)?data->ptr_ref:"" /* Referer: <data> <CRLF> */
http->p_pragma?http->p_pragma:"",
http->p_accept?http->p_accept:"",
(data->bits.http_set_referer && data->ptr_ref)?data->ptr_ref:"" /* Referer: <data> <CRLF> */
);
if(co) {
@@ -234,9 +318,10 @@ UrgError http(struct UrlData *data, char *ppath, char *host, long *bytecount)
sendf(data->firstsocket, data,
"Cookie:");
}
count++;
sendf(data->firstsocket, data,
" %s=%s;", co->name, co->value);
"%s%s=%s", count?"; ":"", co->name,
co->value);
count++;
}
co = co->next; /* next cookie please */
}
@@ -253,7 +338,7 @@ UrgError http(struct UrlData *data, char *ppath, char *host, long *bytecount)
thistime = localtime(&data->timevalue);
#if defined(HAVE_STRFTIME) || defined(WIN32)
#ifdef HAVE_STRFTIME
/* format: "Tue, 15 Nov 1994 12:45:26 GMT" */
strftime(buf, BUFSIZE-1, "%a, %d %b %Y %H:%M:%S %Z", thistime);
#else
@@ -284,8 +369,8 @@ UrgError http(struct UrlData *data, char *ppath, char *host, long *bytecount)
data->headers = data->headers->next;
}
if(data->conf&(CONF_POST|CONF_HTTPPOST)) {
if(data->conf & CONF_POST) {
if(data->bits.http_post || data->bits.http_formpost) {
if(data->bits.http_post) {
/* this is the simple x-www-form-urlencoded style */
sendf(data->firstsocket, data,
"Content-Length: %d\015\012"
@@ -295,53 +380,39 @@ UrgError http(struct UrlData *data, char *ppath, char *host, long *bytecount)
data->postfields );
}
else {
struct Form form;
size_t (*storefread)(char *, size_t , size_t , FILE *);
FILE *in;
long conf;
if(FormInit(&form, sendit)) {
if(FormInit(&http->form, http->sendit)) {
failf(data, "Internal HTTP POST error!\n");
return URG_HTTP_POST_ERROR;
return CURLE_HTTP_POST_ERROR;
}
storefread = data->fread; /* backup */
in = data->in; /* backup */
http->storefread = data->fread; /* backup */
http->in = data->in; /* backup */
data->fread =
(size_t (*)(char *, size_t, size_t, FILE *))
FormReader; /* set the read function to read from the
generated form data */
data->in = (FILE *)&form;
data->in = (FILE *)&http->form;
sendf(data->firstsocket, data,
"Content-Length: %d\r\n",
postsize-2);
http->postsize-2);
pgrsSetUploadSize(data, postsize);
#if 0
ProgressInit(data, postsize);
#endif
pgrsSetUploadSize(data, http->postsize);
result = Transfer(data, data->firstsocket, -1, TRUE, &readbytecount,
data->firstsocket, &writebytecount);
*bytecount = readbytecount + writebytecount;
FormFree(sendit); /* Now free that whole lot */
if(result)
result = Transfer(conn, data->firstsocket, -1, TRUE,
&http->readbytecount,
data->firstsocket,
&http->writebytecount);
if(result) {
FormFree(http->sendit); /* free that whole lot */
return result;
data->fread = storefread; /* restore */
data->in = in; /* restore */
sendf(data->firstsocket, data,
"\r\n\r\n");
}
}
else if(data->conf&CONF_PUT) {
}
else if(data->bits.http_put) {
/* Let's PUT the data to the server! */
long conf;
if(data->infilesize>0) {
sendf(data->firstsocket, data,
@@ -352,39 +423,28 @@ UrgError http(struct UrlData *data, char *ppath, char *host, long *bytecount)
sendf(data->firstsocket, data,
"\015\012");
#if 0
ProgressInit(data, data->infilesize);
#endif
pgrsSetUploadSize(data, data->infilesize);
result = Transfer(data, data->firstsocket, -1, TRUE, &readbytecount,
data->firstsocket, &writebytecount);
*bytecount = readbytecount + writebytecount;
result = Transfer(conn, data->firstsocket, -1, TRUE,
&http->readbytecount,
data->firstsocket,
&http->writebytecount);
if(result)
return result;
}
else {
sendf(data->firstsocket, data, "\r\n");
}
if(0 == *bytecount) {
/* HTTP GET/HEAD download: */
result = Transfer(data, data->firstsocket, -1, TRUE, bytecount,
result = Transfer(conn, data->firstsocket, -1, TRUE, bytecount,
-1, NULL); /* nothing to upload */
}
if(result)
return result;
#if 0
ProgressEnd(data);
#endif
pgrsDone(data);
} while (0); /* this is just a left-over from the multiple document download
attempts */
return URG_OK;
return CURLE_OK;
}

View File

@@ -40,6 +40,9 @@
*
* ------------------------------------------------------------
****************************************************************************/
UrgError http(struct UrlData *data, char *path, char *host, long *bytecountp);
CURLcode http(struct connectdata *conn);
CURLcode http_done(struct connectdata *conn);
CURLcode http_connect(struct connectdata *conn);
#endif

View File

@@ -66,13 +66,21 @@
#include <netdb.h>
#endif
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#ifdef HAVE_SYS_SOCKIO_H
#include <sys/sockio.h>
#endif
#ifdef HAVE_INET_NTOA_R
#include "inet_ntoa_r.h"
#endif
#define SYS_ERROR -1
char *if2ip(char *interface)
char *if2ip(char *interface, char *buf, int buf_size)
{
int dummy;
char *ip=NULL;
@@ -97,7 +105,12 @@ char *if2ip(char *interface)
struct sockaddr_in *s = (struct sockaddr_in *)&req.ifr_dstaddr;
memcpy(&in, &(s->sin_addr.s_addr), sizeof(in));
ip = (char *)strdup(inet_ntoa(in));
#if defined(HAVE_INET_NTOA_R)
ip = inet_ntoa_r(in,buf,buf_size);
#else
ip = strncpy(buf,inet_ntoa(in),buf_size);
ip[buf_size - 1] = 0;
#endif
}
close(dummy);
}

View File

@@ -42,9 +42,9 @@
#include "setup.h"
#if ! defined(WIN32) && ! defined(__BEOS__)
char *if2ip(char *interface);
extern char *if2ip(char *interface, char *buf, int buf_size);
#else
#define if2ip(x) NULL
#define if2ip(a,b,c) NULL
#endif
#endif

9
lib/inet_ntoa_r.h Normal file
View File

@@ -0,0 +1,9 @@
#ifndef __INET_NTOA_R_H
#define __INET_NTOA_R_H
/*
* My solaris 5.6 system running gcc 2.8.1 does *not* have this prototype
* in any system include file! Isn't that weird?
*/
char *inet_ntoa_r(const struct in_addr in, char *buffer, int buflen);
#endif

View File

@@ -72,7 +72,7 @@
#define DYNA_GET_FUNCTION(type, fnc) \
(fnc) = (type)DynaGetFunction(#fnc); \
if ((fnc) == NULL) { \
return URG_FUNCTION_NOT_FOUND; \
return CURLE_FUNCTION_NOT_FOUND; \
} \
/***********************************************************************
@@ -89,7 +89,18 @@ static void DynaOpen(void)
* liblber.so automatically, but since it does not we will
* handle it here by opening liblber.so as global.
*/
dlopen("liblber.so", RTLD_LAZY | RTLD_GLOBAL);
dlopen("liblber.so",
#ifdef RTLD_LAZY_GLOBAL /* It turns out some systems use this: */
RTLD_LAZY_GLOBAL
#else
#ifdef RTLD_GLOBAL
RTLD_LAZY | RTLD_GLOBAL
#else
/* and some systems don't have the RTLD_GLOBAL symbol */
RTLD_LAZY
#endif
#endif
);
libldap = dlopen("libldap.so", RTLD_LAZY);
}
#endif
@@ -124,15 +135,20 @@ static int WriteProc(void *param, char *text, int len)
{
struct UrlData *data = (struct UrlData *)param;
printf("%s\n", text);
data->fwrite(text, 1, strlen(text), data->out);
return 0;
}
CURLcode ldap_done(struct connectdata *conn)
{
return CURLE_OK;
}
/***********************************************************************
*/
UrgError ldap(struct UrlData *data, char *path, long *bytecount)
CURLcode ldap(struct connectdata *conn)
{
UrgError status = URG_OK;
CURLcode status = CURLE_OK;
int rc;
void *(*ldap_open)(char *, int);
int (*ldap_simple_bind_s)(void *, char *, char *);
@@ -146,24 +162,19 @@ UrgError ldap(struct UrlData *data, char *path, long *bytecount)
void *server;
void *result;
void *entryIterator;
#if 0
char *dn;
char **attrArray;
char *attrIterator;
char *attrString;
void *dummy;
#endif
int ldaptext;
struct UrlData *data=conn->data;
infof(data, "LDAP: %s %s\n", data->url);
DynaOpen();
if (libldap == NULL) {
failf(data, "The needed LDAP library/libraries couldn't be opened");
return URG_LIBRARY_NOT_FOUND;
return CURLE_LIBRARY_NOT_FOUND;
}
ldaptext = data->conf & CONF_FTPASCII; /* This is a dirty hack */
ldaptext = data->bits.ftp_ascii; /* This is a dirty hack */
/* The types are needed because ANSI C distinguishes between
* pointer-to-object (data) and pointer-to-function.
@@ -182,17 +193,17 @@ UrgError ldap(struct UrlData *data, char *path, long *bytecount)
if (server == NULL) {
failf(data, "LDAP: Cannot connect to %s:%d",
data->hostname, data->port);
status = URG_COULDNT_CONNECT;
status = CURLE_COULDNT_CONNECT;
} else {
rc = ldap_simple_bind_s(server, data->user, data->passwd);
if (rc != 0) {
failf(data, "LDAP: %s", ldap_err2string(rc));
status = URG_LDAP_CANNOT_BIND;
status = CURLE_LDAP_CANNOT_BIND;
} else {
rc = ldap_url_search_s(server, data->url, 0, &result);
if (rc != 0) {
failf(data, "LDAP: %s", ldap_err2string(rc));
status = URG_LDAP_SEARCH_FAILED;
status = CURLE_LDAP_SEARCH_FAILED;
} else {
for (entryIterator = ldap_first_entry(server, result);
entryIterator;
@@ -204,7 +215,7 @@ UrgError ldap(struct UrlData *data, char *path, long *bytecount)
"", 0, 0);
if (rc != 0) {
failf(data, "LDAP: %s", ldap_err2string(rc));
status = URG_LDAP_SEARCH_FAILED;
status = CURLE_LDAP_SEARCH_FAILED;
}
} else {
rc = ldap_entry2html(server, NULL, entryIterator, NULL,
@@ -212,7 +223,7 @@ UrgError ldap(struct UrlData *data, char *path, long *bytecount)
"", 0, 0, NULL, NULL);
if (rc != 0) {
failf(data, "LDAP: %s", ldap_err2string(rc));
status = URG_LDAP_SEARCH_FAILED;
status = CURLE_LDAP_SEARCH_FAILED;
}
}
}

View File

@@ -40,6 +40,7 @@
*
* ------------------------------------------------------------
****************************************************************************/
UrgError ldap(struct UrlData *data, char *path, long *bytecount);
CURLcode ldap(struct connectdata *conn);
CURLcode ldap_done(struct connectdata *conn);
#endif /* __LDAP_H */

View File

@@ -56,6 +56,7 @@
#include "setup.h"
#include "getenv.h"
#include "strequal.h"
/* Debug this single source file with:
'make netrc' then run './netrc'!
@@ -94,9 +95,14 @@ int ParseNetrc(char *host,
#define NETRC DOT_CHAR "netrc"
if(!home || (strlen(home)>(sizeof(netrcbuffer)-strlen(NETRC))))
if(!home)
return -1;
if(strlen(home)>(sizeof(netrcbuffer)-strlen(NETRC))) {
free(home);
return -1;
}
sprintf(netrcbuffer, "%s%s%s", home, DIR_CHAR, NETRC);
file = fopen(netrcbuffer, "r");
@@ -161,6 +167,8 @@ int ParseNetrc(char *host,
fclose(file);
}
free(home);
return retcode;
}

View File

@@ -48,6 +48,13 @@
#include <time.h>
#endif
/* 20000318 mgs
* later we use _scrsize to determine the screen width, this emx library
* function needs stdlib.h to be included */
#if defined(__EMX__)
#include <stdlib.h>
#endif
#include <curl/curl.h>
#include "urldata.h"
@@ -65,15 +72,23 @@ void time2str(char *r, int t)
but never longer than 5 columns. Add suffix k, M, G when suitable... */
char *max5data(double bytes, char *max5)
{
#define ONE_KILOBYTE 1024
#define ONE_MEGABYTE (1024*1024)
if(bytes < 100000) {
sprintf(max5, "%5d", (int)bytes);
return max5;
}
if(bytes < (9999*1024)) {
sprintf(max5, "%4dk", (int)bytes/1024);
if(bytes < (9999*ONE_KILOBYTE)) {
sprintf(max5, "%4dk", (int)bytes/ONE_KILOBYTE);
return max5;
}
sprintf(max5, "%4dM", (int)bytes/(1024*1024));
if(bytes < (100*ONE_MEGABYTE)) {
/* 'XX.XM' is good as long as we're less than 100 megs */
sprintf(max5, "%2.1fM", bytes/ONE_MEGABYTE);
return max5;
}
sprintf(max5, "%4dM", (int)bytes/ONE_MEGABYTE);
return max5;
}
@@ -103,7 +118,7 @@ void pgrsDone(struct UrlData *data)
void pgrsMode(struct UrlData *data, int mode)
{
/* mode should include a hidden mode as well */
if(data->conf&(CONF_NOPROGRESS|CONF_MUTE))
if(data->bits.hide_progress || data->bits.mute)
data->progress.flags |= PGRS_HIDE; /* don't show anything */
else {
data->progress.mode = mode; /* store type */
@@ -172,29 +187,10 @@ void pgrsSetUploadSize(struct UrlData *data, double size)
*/
void pgrsUpdate(struct UrlData *data)
int pgrsUpdate(struct UrlData *data)
{
struct timeval now;
if(data->progress.flags & PGRS_HIDE)
; /* We do enter this function even if we don't wanna see anything, since
this is were lots of the calculations are being made that will be used
even when not displayed! */
else if(!(data->progress.flags & PGRS_HEADERS_OUT)) {
if ( data->progress.mode == CURL_PROGRESS_STATS ) {
fprintf(data->err,
" %% Total %% Received %% Xferd Average Speed Time Curr.\n"
" Dload Upload Total Current Left Speed\n");
}
data->progress.flags |= PGRS_HEADERS_OUT; /* headers are shown */
}
now = tvnow(); /* what time is it */
switch(data->progress.mode) {
case CURL_PROGRESS_STATS:
default:
{
char max5[6][6];
double dlpercen=0;
double ulpercen=0;
@@ -221,8 +217,23 @@ void pgrsUpdate(struct UrlData *data)
double total_estimate;
if(data->progress.flags & PGRS_HIDE)
; /* We do enter this function even if we don't wanna see anything, since
this is were lots of the calculations are being made that will be used
even when not displayed! */
else if(!(data->progress.flags & PGRS_HEADERS_OUT)) {
if ( data->progress.mode == CURL_PROGRESS_STATS ) {
fprintf(data->err,
" %% Total %% Received %% Xferd Average Speed Time Curr.\n"
" Dload Upload Total Current Left Speed\n");
}
data->progress.flags |= PGRS_HEADERS_OUT; /* headers are shown */
}
now = tvnow(); /* what time is it */
if(data->progress.lastshow == tvlong(now))
return; /* never update this more than once a second if the end isn't
return 0; /* never update this more than once a second if the end isn't
reached */
data->progress.lastshow = now.tv_sec;
@@ -249,7 +260,14 @@ void pgrsUpdate(struct UrlData *data)
(speeder[nowindex]-speeder[checkindex])/(count?count:1);
if(data->progress.flags & PGRS_HIDE)
return;
return 0;
else if(data->fprogress) {
return data->fprogress(data->progress_client,
data->progress.size_dl,
data->progress.downloaded,
data->progress.size_ul,
data->progress.uploaded);
}
/* Figure out the estimated time of arrival for the upload */
if(data->progress.flags & PGRS_UL_SIZE_KNOWN) {
@@ -299,7 +317,6 @@ void pgrsUpdate(struct UrlData *data)
if(total_expected_transfer)
total_percen=(double)(total_transfer/total_expected_transfer)*100;
fprintf(stderr,
"\r%3d %s %3d %s %3d %s %s %s %s %s %s %s",
(int)total_percen, /* total % */
@@ -316,8 +333,8 @@ void pgrsUpdate(struct UrlData *data)
time_left, /* time left */
max5data(data->progress.current_speed, max5[5]) /* current speed */
);
}
break;
#if 0
case CURL_PROGRESS_BAR:
/* original progress bar code by Lars Aas */
@@ -350,7 +367,8 @@ void pgrsUpdate(struct UrlData *data)
prev = point;
break;
#endif
}
return 0;
}
@@ -365,6 +383,12 @@ static int width = 0;
void ProgressInit(struct UrlData *data, int max/*, int options, int moremax*/)
{
#ifdef __EMX__
/* 20000318 mgs */
int scr_size [2];
#endif
char *colp;
if(data->conf&(CONF_NOPROGRESS|CONF_MUTE))
return;
@@ -372,10 +396,28 @@ void ProgressInit(struct UrlData *data, int max/*, int options, int moremax*/)
/* TODO: get terminal width through ansi escapes or something similar.
try to update width when xterm is resized... - 19990617 larsa */
if (curl_GetEnv("COLUMNS") != NULL)
width = atoi(curl_GetEnv("COLUMNS"));
#ifndef __EMX__
/* 20000318 mgs
* OS/2 users most likely won't have this env var set, and besides that
* we're using our own way to determine screen width */
colp = curl_GetEnv("COLUMNS");
if (colp != NULL) {
width = atoi(colp);
free(colp);
}
else
width = 79;
#else
/* 20000318 mgs
* We use this emx library call to get the screen width, and subtract
* one from what we got in order to avoid a problem with the cursor
* advancing to the next line if we print a string that is as long as
* the screen is wide. */
_scrsize(scr_size);
width = scr_size[0] - 1;
#endif
progressmax = max;
if(-1 == max)

View File

@@ -42,21 +42,25 @@
#include "timeval.h"
#if 0
void ProgressInit(struct UrlData *data, int max);
void ProgressShow(struct UrlData *data,
int point, struct timeval start, struct timeval now, bool force);
void ProgressEnd(struct UrlData *data);
void ProgressMode(int mode);
#endif
typedef enum {
TIMER_NONE,
TIMER_NAMELOOKUP,
TIMER_CONNECT,
TIMER_PRETRANSFER,
TIMER_POSTRANSFER,
TIMER_LAST /* must be last */
} timerid;
void pgrsDone(struct UrlData *data);
void pgrsMode(struct UrlData *data, int mode);
void pgrsStartNow(struct UrlData *data);
void pgrsSetDownloadSize(struct UrlData *data, double size);
void pgrsSetUploadSize(struct UrlData *data, double size);
void pgrsSetDownloadCounter(struct UrlData *data, double size);
void pgrsSetUploadCounter(struct UrlData *data, double size);
void pgrsUpdate(struct UrlData *data);
void pgrsSetUploadCounter(struct UrlData *data, double size);
int pgrsUpdate(struct UrlData *data);
void pgrsTime(struct UrlData *data, timerid timer);
/* Don't show progress for sizes smaller than: */
@@ -77,14 +81,4 @@ void pgrsUpdate(struct UrlData *data);
#define PGRS_HEADERS_OUT (1<<7) /* set when the headers have been written */
typedef enum {
TIMER_NONE,
TIMER_NAMELOOKUP,
TIMER_CONNECT,
TIMER_PRETRANSFER,
TIMER_POSTRANSFER,
TIMER_LAST /* must be last */
} timerid;
#endif /* __PROGRESS_H */

View File

@@ -61,7 +61,7 @@
void infof(struct UrlData *data, char *fmt, ...)
{
va_list ap;
if(data->conf & CONF_VERBOSE) {
if(data->bits.verbose) {
va_start(ap, fmt);
fputs("* ", data->err);
vfprintf(data->err, fmt, ap);
@@ -95,7 +95,7 @@ int sendf(int fd, struct UrlData *data, char *fmt, ...)
va_end(ap);
if(!s)
return 0; /* failure */
if(data->conf & CONF_VERBOSE)
if(data->bits.verbose)
fprintf(data->err, "> %s", s);
#ifndef USE_SSLEAY
bytes_written = swrite(fd, s, strlen(s));

View File

@@ -57,8 +57,6 @@
#endif
#endif
#ifndef OS
#ifdef WIN32
#define OS "win32"
@@ -99,6 +97,7 @@ defined(HAVE_LIBSSL) && defined(HAVE_LIBCRYPTO)
#endif
#endif
#if 0
#ifdef HAVE_STRCASECMP
#define strnequal(x,y,z) !(strncasecmp)(x,y,z)
#define strequal(x,y) !(strcasecmp)(x,y)
@@ -107,6 +106,7 @@ defined(HAVE_LIBSSL) && defined(HAVE_LIBCRYPTO)
#define strnequal(x,y,z) !strnicmp(x,y,z)
#define strequal(x,y) !stricmp(x,y)
#endif
#endif
/* Below we define four functions. They should
1. close a socket

View File

@@ -48,7 +48,7 @@
#include "sendf.h"
#include "speedcheck.h"
UrgError speedcheck(struct UrlData *data,
CURLcode speedcheck(struct UrlData *data,
struct timeval now)
{
static struct timeval keeps_speed;
@@ -69,13 +69,13 @@ UrgError speedcheck(struct UrlData *data,
"Less than %d bytes/sec transfered the last %d seconds",
data->low_speed_limit,
data->low_speed_time);
return URG_OPERATION_TIMEOUTED;
return CURLE_OPERATION_TIMEOUTED;
}
}
else {
/* we keep up the required speed all right */
keeps_speed = now;
}
return URG_OK;
return CURLE_OK;
}

View File

@@ -44,7 +44,7 @@
#include "timeval.h"
UrgError speedcheck(struct UrlData *data,
CURLcode speedcheck(struct UrlData *data,
struct timeval now);
#endif

View File

@@ -38,6 +38,12 @@
* ------------------------------------------------------------
****************************************************************************/
/*
* The original SSL code was written by
* Linas Vepstas <linas@linas.org> and Sampo Kellomaki <sampo@iki.fi>
*/
#include <string.h>
#include <stdlib.h>
@@ -169,7 +175,7 @@ UrgSSLConnect (struct UrlData *data)
#endif
{
/* We need to seed the PRNG properly! */
#ifdef WIN32
#ifdef HAVE_RAND_SCREEN
/* This one gets a random value by reading the currently shown screen */
RAND_screen();
#else

83
lib/strequal.c Normal file
View File

@@ -0,0 +1,83 @@
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
* License for the specific language governing rights and limitations
* under the License.
*
* The Original Code is Curl.
*
* The Initial Developer of the Original Code is Daniel Stenberg.
*
* Portions created by the Initial Developer are Copyright (C) 2000.
* All Rights Reserved.
*
* ------------------------------------------------------------
* Main author:
* - Daniel Stenberg <Daniel.Stenberg@haxx.nu>
*
* http://curl.haxx.nu
*
* $Source$
* $Revision$
* $Date$
* $Author$
* $State$
* $Locker$
*
* ------------------------------------------------------------
****************************************************************************/
#include "setup.h"
int strequal(const char *first, const char *second)
{
#if defined(HAVE_STRCASECMP)
return !strcasecmp(first, second);
#elif defined(HAVE_STRCMPI)
return !strcmpi(first, second);
#elif defined(HAVE_STRICMP)
return !strcmpi(first, second);
#else
while (*first && *second) {
if (toupper(*first) != toupper(*second)) {
break;
}
first++;
second++;
}
return toupper(*first) == toupper(*second);
#endif
}
int strnequal(const char *first, const char *second, size_t max)
{
#if defined(HAVE_STRCASECMP)
return !strncasecmp(first, second, max);
#elif defined(HAVE_STRCMPI)
return !strncmpi(first, second, max);
#elif defined(HAVE_STRICMP)
return !strnicmp(first, second, max);
#else
while (*first && *second && max) {
if (toupper(*first) != toupper(*second)) {
break;
}
max--;
first++;
second++;
}
return toupper(*first) == toupper(*second);
#endif
}

45
lib/strequal.h Normal file
View File

@@ -0,0 +1,45 @@
#ifndef __STREQUAL_H
#define __STREQUAL_H
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
* License for the specific language governing rights and limitations
* under the License.
*
* The Original Code is Curl.
*
* The Initial Developer of the Original Code is Daniel Stenberg.
*
* Portions created by the Initial Developer are Copyright (C) 2000.
* All Rights Reserved.
*
* ------------------------------------------------------------
* Main author:
* - Daniel Stenberg <Daniel.Stenberg@haxx.nu>
*
* http://curl.haxx.nu
*
* $Source$
* $Revision$
* $Date$
* $Author$
* $State$
* $Locker$
*
* ------------------------------------------------------------
****************************************************************************/
int strequal(const char *first, const char *second);
int strnequal(const char *first, const char *second, size_t max);
#endif

View File

@@ -225,7 +225,7 @@ static void printoption(struct UrlData *data,
char *fmt;
char *opt;
if (data->conf & CONF_VERBOSE)
if (data->bits.verbose)
{
if (cmd == IAC)
{
@@ -628,7 +628,7 @@ static void printsub(struct UrlData *data,
{
int i = 0;
if (data->conf & CONF_VERBOSE)
if (data->bits.verbose)
{
if (direction)
{
@@ -871,8 +871,14 @@ void telwrite(struct UrlData *data,
}
}
UrgError telnet(struct UrlData *data)
CURLcode telnet_done(struct connectdata *conn)
{
return CURLE_OK;
}
CURLcode telnet(struct connectdata *conn)
{
struct UrlData *data = conn->data;
int sockfd = data->firstsocket;
fd_set readfd;
fd_set keepfd;
@@ -931,7 +937,7 @@ UrgError telnet(struct UrlData *data)
telrcv(data, (unsigned char *)buf, nread);
}
}
return URG_OK;
return CURLE_OK;
}

View File

@@ -40,6 +40,7 @@
*
* ------------------------------------------------------------
****************************************************************************/
UrgError telnet(struct UrlData *data);
CURLcode telnet(struct connectdata *conn);
CURLcode telnet_done(struct connectdata *conn);
#endif

1060
lib/url.c

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,44 @@
#ifndef __URL_H
#define __URL_H
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
* License for the specific language governing rights and limitations
* under the License.
*
* The Original Code is Curl.
*
* The Initial Developer of the Original Code is Daniel Stenberg.
*
* Portions created by the Initial Developer are Copyright (C) 1998.
* All Rights Reserved.
*
* ------------------------------------------------------------
* Main author:
* - Daniel Stenberg <Daniel.Stenberg@haxx.nu>
*
* http://curl.haxx.nu
*
* $Source$
* $Revision$
* $Date$
* $Author$
* $State$
* $Locker$
*
* ------------------------------------------------------------
****************************************************************************/
int GetLine(int sockfd, char *buf, struct UrlData *data);
#endif

View File

@@ -67,6 +67,7 @@
#define CURL_DEFAULT_PASSWORD "curl_by_Daniel.Stenberg@haxx.nu"
#include "cookie.h"
#include "formdata.h"
#ifdef USE_SSLEAY
/* SSLeay stuff usually in /usr/local/ssl/include */
@@ -87,8 +88,12 @@
#endif
#endif
#include <netinet/in.h>
#include "timeval.h"
/* Download buffer size, keep it fairly big for speed reasons */
#define BUFSIZE (1024*50)
@@ -96,6 +101,88 @@
of need. */
#define HEADERSIZE 256
#ifndef MAX
#define MAX(x,y) ((x)>(y)?(x):(y))
#endif
typedef enum {
STRUCT_NONE,
STRUCT_OPEN,
STRUCT_CONNECT,
STRUCT_LAST
} Handle;
typedef enum {
CONN_NONE, /* illegal state */
CONN_INIT, /* curl_connect() has been called */
CONN_DO, /* curl_do() has been called successfully */
CONN_DONE, /* curl_done() has been called successfully */
CONN_ERROR, /* and error has occurred */
CONN_LAST /* illegal state */
} ConnState;
/*
* The connectdata struct contains all fields and variables that should be
* unique for an entire connection.
*/
struct connectdata {
/**** Fields set when inited and not modified again */
/* To better see what kind of struct that is passed as input, *ALL* publicly
returned handles MUST have this initial 'Handle'. */
Handle handle; /* struct identifier */
struct UrlData *data; /* link to the root CURL struct */
/**** curl_connect() phase fields */
ConnState state; /* for state dependent actions */
long protocol; /* PROT_* flags concerning the protocol set */
#define PROT_MISSING (1<<0)
#define PROT_GOPHER (1<<1)
#define PROT_HTTP (1<<2)
#define PROT_HTTPS (1<<3)
#define PROT_FTP (1<<4)
#define PROT_TELNET (1<<5)
#define PROT_DICT (1<<6)
#define PROT_LDAP (1<<7)
#define PROT_FILE (1<<8)
struct hostent *hp;
struct sockaddr_in serv_addr;
char proto[64];
char gname[256];
char *name;
char path[URL_MAX_LENGTH];
char *ppath;
long bytecount;
struct timeval now;
/* These two functions MUST be set by the curl_connect() function to be
be protocol dependent */
CURLcode (*curl_do)(struct connectdata *connect);
CURLcode (*curl_done)(struct connectdata *connect);
/* This function *MAY* be set to a protocol-dependent function that is run
* after the connect() and everything is done, as a step in the connection.
*/
CURLcode (*curl_connect)(struct connectdata *connect);
/**** curl_get() phase fields */
/* READ stuff */
int sockfd; /* socket to read from or -1 */
int size; /* -1 if unknown at this point */
bool getheader; /* TRUE if header parsing is wanted */
long *bytecountp; /* return number of bytes read or NULL */
/* WRITE stuff */
int writesockfd; /* socket to write to, it may very well be
the same we read from. -1 disables */
long *writebytecountp; /* return number of bytes written or NULL */
};
struct Progress {
long lastshow; /* time() of the last displayed progress meter or NULL to
force redraw at next call */
@@ -121,42 +208,161 @@ struct Progress {
int httpcode;
};
/****************************************************************************
* HTTP unique setup
***************************************************************************/
struct HTTP {
struct FormData *sendit;
int postsize;
char *p_pragma;
char *p_accept;
long readbytecount;
long writebytecount;
/* For FORM posting */
struct Form form;
size_t (*storefread)(char *, size_t , size_t , FILE *);
FILE *in;
};
/****************************************************************************
* FTP unique setup
***************************************************************************/
struct FTP {
long *bytecountp;
char *user;
char *passwd;
char *urlpath; /* the originally given path part of the URL */
char *dir; /* decoded directory */
char *file; /* decoded file */
};
struct Configbits {
bool ftp_append;
bool ftp_ascii;
bool ftp_list_only;
bool ftp_use_port;
bool hide_progress;
bool http_fail_on_error;
bool http_follow_location;
bool http_formpost;
bool http_include_header;
bool http_post;
bool http_put;
bool http_set_referer;
bool httpproxy;
bool mute;
bool no_body;
bool proxy_user_passwd;
bool proxystringalloc; /* the http proxy string is malloc()'ed */
bool set_port;
bool set_range;
bool upload;
bool use_netrc;
bool user_passwd;
bool verbose;
};
typedef size_t (*progress_callback)(void *clientp,
size_t dltotal,
size_t dlnow,
size_t ultotal,
size_t ulnow);
typedef size_t (*write_callback)(char *buffer,
size_t size,
size_t nitems,
FILE *outstream);
typedef size_t (*read_callback)(char *buffer,
size_t size,
size_t nitems,
FILE *instream);
/* What type of interface that intiated this struct */
typedef enum {
CURLI_NONE,
CURLI_EASY,
CURLI_NORMAL,
CURLI_LAST
} CurlInterface;
/*
* As of April 11, 2000 we're now trying to split up the urldata struct in
* three different parts:
*
* (Global)
* 1 - No matter how many hosts and requests that are being performed, this
* goes for all of them.
*
* (Session)
* 2 - Host and protocol-specific. No matter if we do several transfers to and
* from this host, these variables stay the same.
*
* (Request)
* 3 - Request-specific. Variables that are of interest for this particular
* transfer being made right now.
*
*/
struct UrlData {
Handle handle; /* struct identifier */
CurlInterface interf; /* created by WHAT interface? */
/*************** Global - specific items ************/
FILE *err; /* the stderr writes goes here */
char *errorbuffer; /* store failure messages in here */
/*************** Session - specific items ************/
char *proxy; /* if proxy, set it here, set CONF_PROXY to use this */
char *proxyuserpwd; /* Proxy <user:password>, if used */
/*************** Request - specific items ************/
union {
struct HTTP *http;
struct HTTP *gopher; /* alias, just for the sake of being more readable */
struct HTTP *https; /* alias, just for the sake of being more readable */
struct FTP *ftp;
#if 0 /* no need for special ones for these: */
struct TELNET *telnet;
struct FILE *file;
struct LDAP *ldap;
struct DICT *dict;
#endif
void *generic;
} proto;
FILE *out; /* the fetched file goes here */
FILE *in; /* the uploaded file is read from here */
FILE *err; /* the stderr writes goes here */
FILE *writeheader; /* write the header to this is non-NULL */
char *url; /* what to get */
char *freethis; /* if non-NULL, an allocated string for the URL */
char *hostname; /* hostname to contect, as parsed from url */
char *hostname; /* hostname to connect, as parsed from url */
unsigned short port; /* which port to use (if non-protocol bind) set
CONF_PORT to use this */
unsigned short remote_port; /* what remote port to connect to, not the proxy
port! */
char *proxy; /* if proxy, set it here, set CONF_PROXY to use this */
long conf; /* configure flags */
struct Configbits bits; /* new-style (v7) flag data */
char *userpwd; /* <user:password>, if used */
char *proxyuserpwd; /* Proxy <user:password>, if used */
char *range; /* range, if used. See README for detailed specification on
this syntax. */
char *postfields; /* if POST, set the fields' values here */
char *referer;
char *errorbuffer; /* store failure messages in here */
char *useragent; /* User-Agent string */
char *ftpport; /* port to send with the PORT command */
/* function that stores the output:*/
size_t (*fwrite)(char *buffer,
size_t size,
size_t nitems,
FILE *outstream);
write_callback fwrite;
/* function that reads the input:*/
size_t (*fread)(char *buffer,
size_t size,
size_t nitems,
FILE *outstream);
read_callback fread;
/* function that wants progress information */
progress_callback fprogress;
void *progress_client; /* pointer to pass to the progress callback */
long timeout; /* in seconds, 0 means no timeout */
long infilesize; /* size of file to upload, -1 means unknown */

View File

@@ -58,7 +58,7 @@ char *curl_version(void)
#ifdef USE_SSLEAY
#if (SSLEAY_VERSION_NUMBER >= 0x900000)
sprintf(ptr, " (SSL %x.%x.%x)",
sprintf(ptr, " (SSL %lx.%lx.%lx)",
(SSLEAY_VERSION_NUMBER>>28)&0xff,
(SSLEAY_VERSION_NUMBER>>20)&0xff,
(SSLEAY_VERSION_NUMBER>>12)&0xf);

View File

@@ -41,6 +41,7 @@
#include <stdio.h>
#include <string.h>
#include "strequal.h"
#include "writeout.h"
typedef enum {
@@ -102,6 +103,9 @@ void WriteOut(struct UrlData *data)
case VAR_EFFECTIVE_URL:
fprintf(stream, "%s", data->url?data->url:"");
break;
case VAR_HTTP_CODE:
fprintf(stream, "%03d", data->progress.httpcode);
break;
case VAR_TOTAL_TIME:
fprintf(stream, "%.3f", data->progress.timespent);
break;

View File

@@ -1,3 +1,5 @@
#ifndef __WRITEOUT_H
#define __WRITEOUT_H
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
@@ -41,3 +43,5 @@
#include "urldata.h"
void WriteOut(struct UrlData *data);
#endif

13
maketgz
View File

@@ -7,15 +7,24 @@ read version
libversion="$version"
#
# Now we have a section to get the major, minor and patch number from the
# full version string. We create a single hexadecimal number from it '0xMMmmpp'
#
perl='$a=<STDIN>;@p=split("\\.",$a);for(0..2){printf STDOUT ("%02x",$p[0+$_]);}';
numeric=`echo $libversion | perl -e "$perl"`
echo "CURL version number?"
read curlversion
HEADER=include/curl/curl.h
CHEADER=src/version.h
# Replace version number in header file:
sed 's/#define LIBCURL_VERSION.*/#define LIBCURL_VERSION "'$libversion'"/g' $HEADER >$HEADER.new
sed -e 's/^#define LIBCURL_VERSION .*/#define LIBCURL_VERSION "'$libversion'"/g' \
-e 's/^#define LIBCURL_VERSION_NUM .*/#define LIBCURL_VERSION_NUM 0x'$numeric'/g' \
$HEADER >$HEADER.new
# Save old header file
cp -p $HEADER $HEADER.old

View File

@@ -4,7 +4,7 @@
# Some flags needed when trying to cause warnings ;-)
# CFLAGS = -Wall -pedantic
CPPFLAGS = -DGLOBURL -DCURL_SEPARATORS
#CPPFLAGS = -DGLOBURL -DCURL_SEPARATORS
INCLUDES = -I$(top_srcdir)/include
@@ -21,7 +21,11 @@ EXTRA_DIST = mkhelp.pl Makefile.vc6
AUTOMAKE_OPTIONS = foreign no-dependencies
MANPAGE=$(top_srcdir)/docs/curl.1
README=$(top_srcdir)/docs/README.curl
MKHELP=$(top_srcdir)/src/mkhelp.pl
# This generates the hugehelp.c file
hugehelp.c: $(top_srcdir)/README.curl $(top_srcdir)/curl.1 mkhelp.pl
hugehelp.c: $(README) $(MANPAGE) mkhelp.pl
rm -f hugehelp.c
$(NROFF) -man $(top_srcdir)/curl.1 | $(PERL) $(top_srcdir)/src/mkhelp.pl $(top_srcdir)/README.curl > hugehelp.c
$(NROFF) -man $(MANPAGE) | $(PERL) $(MKHELP) $(README) > hugehelp.c

View File

@@ -9,7 +9,7 @@
CC = gcc
STRIP = strip -s
OPENSSL_PATH = ../../openssl-0.9.4
OPENSSL_PATH = ../../openssl-0.9.5a
# We may need these someday
# PERL = perl
@@ -19,7 +19,7 @@ OPENSSL_PATH = ../../openssl-0.9.4
## Nothing more to do below this line!
INCLUDES = -I. -I.. -I../include
CFLAGS = -g -O2 -DGLOBURL -DCURL_SEPARATORS
CFLAGS = -g -O2
LDFLAGS =
COMPILE = $(CC) $(INCLUDES) $(CFLAGS)
LINK = $(CC) $(CFLAGS) $(LDFLAGS) -o $@

View File

@@ -2,6 +2,9 @@
/* Define if you have the strcasecmp function. */
/*#define HAVE_STRCASECMP 1*/
/* Define if you have the stricmp function. */
#define HAVE_STRICMP 1
/* Define cpu-machine-OS */
#define OS "win32"

View File

@@ -231,7 +231,9 @@ puts (
" Makes curl scan the .netrc file in the user's home\n"
" directory for login name and password. This is typi<70>\n"
" cally used for ftp on unix. If used with http, curl\n"
" will enable user authentication. See netrc(4) for\n"
);
puts(
" will enable user authentication. See netrc(5) for\n"
" details on the file format. Curl will not complain if\n"
" that file hasn't the right permissions (it should not\n"
" be world nor group readable). The environment variable\n"
@@ -241,7 +243,7 @@ puts (
" .netrc to allow curl to ftp to the machine\n"
" host.domain.com with user name\n"
"\n"
" machine host.domain.com user myself password secret\n"
" machine host.domain.com login myself password secret\n"
"\n"
" -N/--no-buffer\n"
" Disables the buffering of the output stream. In normal\n"
@@ -254,9 +256,9 @@ puts (
" -o/--output <file>\n"
" Write output to <file> instead of stdout. If you are\n"
" using {} or [] to fetch multiple documents, you can use\n"
" #[num] in the <file> specifier. That variable will be\n"
" replaced with the current string for the URL being\n"
" fetched. Like in:\n"
" '#' followed by a number in the <file> specifier. That\n"
" variable will be replaced with the current string for\n"
" the URL being fetched. Like in:\n"
"\n"
" curl http://{one,two}.site.com -o \"file_#1.txt\"\n"
"\n"
@@ -468,6 +470,8 @@ puts (
"\n"
" -z/--time-cond <date expression>\n"
" (HTTP) Request to get a file that has been modified\n"
);
puts(
" later than the given time and date, or one that has\n"
" been modified before that time. The date expression can\n"
" be all sorts of date strings or if it doesn't match any\n"
@@ -654,10 +658,10 @@ puts (
" - Linas Vepstas <linas@linas.org>\n"
" - Bjorn Reese <breese@mail1.stofanet.dk>\n"
" - Johan Anderson <johan@homemail.com>\n"
" - Kjell Ericson <Kjell.Ericson@sth.frontec.se>\n"
" - Kjell Ericson <Kjell.Ericson@haxx,nu>\n"
" - Troy Engel <tengel@sonic.net>\n"
" - Ryan Nelson <ryan@inch.com>\n"
" - Bjorn Stenberg <Bjorn.Stenberg@sth.frontec.se>\n"
" - Bjorn Stenberg <Bjorn.Stenberg@haxx.nu>\n"
" - Angus Mackay <amackay@gus.ml.org>\n"
" - Eric Young <eay@cryptsoft.com>\n"
" - Simon Dick <simond@totally.irrelevant.org>\n"
@@ -687,6 +691,10 @@ puts (
" - Ellis Pritchard <ellis@citria.com>\n"
" - Damien Adant <dams@usa.net>\n"
" - Chris <cbayliss@csc.come>\n"
" - Marco G. Salvagno <mgs@whiz.cjb.net>\n"
" - Paul Marquis <pmarquis@iname.com>\n"
" - David LeBlanc <dleblanc@qnx.com>\n"
" - Rich Gray at Plus Technologies\n"
"\n"
"WWW\n"
" http://curl.haxx.nu\n"
@@ -790,6 +798,8 @@ puts (
" curl -u user:passwd -x my-proxy:888 http://www.get.this/\n"
"\n"
" Some proxies require special authentication. Specify by using -U as above:\n"
);
puts(
"\n"
" curl -U user:passwd -x my-proxy:888 http://www.get.this/\n"
"\n"
@@ -834,9 +844,13 @@ puts (
" \n"
" curl -T uploadfile -u user:passwd ftp://ftp.upload.com/\n"
"\n"
" NOTE: Curl is not currently supporing ftp upload through a proxy! The reason\n"
" for this is simply that proxies are seldomly configured to allow this and\n"
" that no author has supplied code that makes it possible!\n"
" Upload a local file to get appended to the remote file using ftp:\n"
"\n"
" curl -T localfile -a ftp://ftp.upload.com/remotefile\n"
"\n"
" NOTE: Curl does not support ftp upload through a proxy! The reason for this\n"
" is simply that proxies are seldomly configured to allow this and that no\n"
" author has supplied code that makes it possible!\n"
"\n"
" HTTP\n"
"\n"
@@ -889,6 +903,41 @@ puts (
" curl -d \"name=Rafael%20Sagula&phone=3320780\" \\\n"
" http://www.where.com/guest.cgi\n"
"\n"
" How to post a form with curl, lesson #1:\n"
"\n"
" Dig out all the <input> tags in the form that you want to fill in. (There's\n"
" a perl program called formfind.pl on the curl site that helps with this).\n"
"\n"
" If there's a \"normal\" post, you use -d to post. -d takes a full \"post\n"
" string\", which is in the format\n"
"\n"
" <variable1>=<data1>&<variable2>=<data2>&...\n"
"\n"
" The 'variable' names are the names set with \"name=\" in the <input> tags, and\n"
" the data is the contents you want to fill in for the inputs. The data *must*\n"
" be properly URL encoded. That means you replace space with + and that you\n"
" write weird letters with %XX where XX is the hexadecimal representation of\n"
" the letter's ASCII code.\n"
"\n"
" Example:\n"
"\n"
" (page located at http://www.formpost.com/getthis/\n"
"\n"
" <form action=\"post.cgi\" method=\"post\">\n"
" <input name=user size=10>\n"
" <input name=pass type=password size=10>\n"
" <input name=id type=hidden value=\"blablabla\">\n"
" <input name=ding value=\"submit\">\n"
" </form>\n"
"\n"
" We want to enter user 'foobar' with password '12345'.\n"
"\n"
" To post to this, you enter a curl command line like:\n"
"\n"
" curl -d \"user=foobar&pass=12345&id=blablabla&dig=submit\" (continues)\n"
" http://www.formpost.com/getthis/post.cgi\n"
"\n"
"\n"
" While -d uses the application/x-www-form-urlencoded mime-type, generally\n"
" understood by CGI's and similar, curl also supports the more capable\n"
" multipart/form-data type. This latter type supports things like file upload.\n"
@@ -1020,6 +1069,8 @@ puts (
"\n"
" From left-to-right:\n"
" % - percentage completed of the whole transfer\n"
);
puts(
" Total - total size of the whole expected transfer\n"
" % - percentage completed of the download\n"
" Received - currently downloaded amount of bytes\n"
@@ -1156,9 +1207,9 @@ puts (
"\n"
"HTTPS\n"
"\n"
" Secure HTTP requires SSLeay to be installed and used when curl is built. If\n"
" that is done, curl is capable of retrieving and posting documents using the\n"
" HTTPS procotol.\n"
" Secure HTTP requires SSL libraries to be installed and used when curl is\n"
" built. If that is done, curl is capable of retrieving and posting documents\n"
" using the HTTPS procotol.\n"
"\n"
" Example:\n"
"\n"
@@ -1171,9 +1222,10 @@ puts (
" browsers (Netscape and MSEI both use the so called PKCS#12 format). If you\n"
" want curl to use the certificates you use with your (favourite) browser, you\n"
" may need to download/compile a converter that can convert your browser's\n"
" formatted certificates to PEM formatted ones. Dr Stephen N. Henson has\n"
" written a patch for SSLeay that adds this functionality. You can get his\n"
" patch (that requires an SSLeay installation) from his site at:\n"
" formatted certificates to PEM formatted ones. This kind of converter is\n"
" included in recent versions of OpenSSL, and for older versions Dr Stephen\n"
" N. Henson has written a patch for SSLeay that adds this functionality. You\n"
" can get his patch (that requires an SSLeay installation) from his site at:\n"
" http://www.drh-consultancy.demon.co.uk/\n"
"\n"
" Example on how to automatically retrieve a document using a certificate with\n"
@@ -1299,6 +1351,36 @@ puts (
"\n"
"\n"
" The usage of the -x/--proxy flag overrides the environment variables.\n"
);
puts(
"\n"
"NETRC\n"
"\n"
" Unix introduced the .netrc concept a long time ago. It is a way for a user\n"
" to specify name and password for commonly visited ftp sites in a file so\n"
" that you don't have to type them in each time you visit those sites. You\n"
" realize this is a big security risk if someone else gets hold of your\n"
" passwords, so therefor most unix programs won't read this file unless it is\n"
" only readable by yourself (curl doesn't care though).\n"
"\n"
" Curl supports .netrc files if told so (using the -n/--netrc option). This is\n"
" not restricted to only ftp, but curl can use it for all protocols where\n"
" authentication is used.\n"
"\n"
" A very simple .netrc file could look something like:\n"
"\n"
" machine curl.haxx.nu login iamdaniel password mysecret\n"
"\n"
"CUSTOM OUTPUT\n"
"\n"
" To better allow script programmers to get to know about the progress of\n"
" curl, the -w/--write-out option was introduced. Using this, you can specify\n"
" what information from the previous transfer you want to extract.\n"
"\n"
" To display the amount of bytes downloaded together with some text and an\n"
" ending newline:\n"
"\n"
" curl -w 'We downloaded %{size_download} bytes\\n' www.download.com\n"
"\n"
"MAILING LIST\n"
"\n"

View File

@@ -46,17 +46,27 @@
#include <ctype.h>
#include <curl/curl.h>
#include <curl/types.h> /* new for v7 */
#include <curl/easy.h> /* new for v7 */
#include <curl/mprintf.h>
#include "../lib/getdate.h"
#ifdef GLOBURL
#include "urlglob.h"
#define CURLseparator "--_curl_--"
#define MIMEseparator "_curl_"
#endif
/* This define make use of the "Curlseparator" as opposed to the
MIMEseparator. We might add support for the latter one in the
future, and that's why this is left in the source. */
#define CURL_SEPARATORS
/* This is now designed to have its own local setup.h */
#include "setup.h"
#ifdef WIN32
#include <winsock.h>
#endif
#include "version.h"
#ifdef HAVE_IO_H /* typical win32 habit */
@@ -67,6 +77,25 @@
#include <unistd.h>
#endif
/* Just a set of bits */
#define CONF_DEFAULT 0
#define CONF_VERBOSE (1<<5) /* talk a lot */
#define CONF_HEADER (1<<8) /* throw the header out too */
#define CONF_NOPROGRESS (1<<10) /* shut off the progress meter */
#define CONF_NOBODY (1<<11) /* use HEAD to get http document */
#define CONF_FAILONERROR (1<<12) /* no output on http error codes >= 300 */
#define CONF_UPLOAD (1<<14) /* this is an upload */
#define CONF_POST (1<<15) /* HTTP POST method */
#define CONF_FTPLISTONLY (1<<16) /* Use NLST when listing ftp dir */
#define CONF_FTPAPPEND (1<<20) /* Append instead of overwrite on upload! */
#define CONF_NETRC (1<<22) /* read user+password from .netrc */
#define CONF_FOLLOWLOCATION (1<<23) /* use Location: Luke! */
#define CONF_FTPASCII (1<<24) /* use TYPE A for transfer */
#define CONF_HTTPPOST (1<<25) /* multipart/form-data HTTP POST */
#define CONF_PUT (1<<27) /* PUT the input file */
#define CONF_MUTE (1<<28) /* force NOPROGRESS */
#ifndef HAVE_STRDUP
/* Ultrix doesn't have strdup(), so make a quick clone: */
char *strdup(char *str)
@@ -88,6 +117,72 @@ char *strdup(char *str)
extern void hugehelp(void);
/***********************************************************************
* Start with some silly functions to make win32-systems survive
***********************************************************************/
#if defined(WIN32) && !defined(__GNUC__) || defined(__MINGW32__)
static void win32_cleanup(void)
{
WSACleanup();
}
static CURLcode win32_init(void)
{
WORD wVersionRequested;
WSADATA wsaData;
int err;
wVersionRequested = MAKEWORD(1, 1);
err = WSAStartup(wVersionRequested, &wsaData);
if (err != 0)
/* Tell the user that we couldn't find a useable */
/* winsock.dll. */
return CURLE_FAILED_INIT;
/* Confirm that the Windows Sockets DLL supports 1.1.*/
/* Note that if the DLL supports versions greater */
/* than 1.1 in addition to 1.1, it will still return */
/* 1.1 in wVersion since that is the version we */
/* requested. */
if ( LOBYTE( wsaData.wVersion ) != 1 ||
HIBYTE( wsaData.wVersion ) != 1 ) {
/* Tell the user that we couldn't find a useable */
/* winsock.dll. */
WSACleanup();
return CURLE_FAILED_INIT;
}
return CURLE_OK;
}
/* The Windows Sockets DLL is acceptable. Proceed. */
#else
static CURLcode win32_init(void) { return CURLE_OK; }
#define win32_cleanup()
#endif
/*
* This is the main global constructor for the app. Call this before
* _any_ libcurl usage. If this fails, *NO* libcurl functions may be
* used, or havoc may be the result.
*/
CURLcode main_init(void)
{
return win32_init();
}
/*
* This is the main global destructor for the app. Call this after
* _all_ libcurl usage is done.
*/
void main_free(void)
{
win32_cleanup();
}
static void helpf(char *fmt, ...)
{
va_list ap;
@@ -227,7 +322,10 @@ static void GetStr(char **string,
{
if(*string)
free(*string);
if(value && *value)
*string = strdup(value);
else
*string = NULL;
}
static char *file2string(FILE *file)
@@ -350,7 +448,7 @@ static int getparameter(char *flag, /* f or -long-flag */
if(parse) {
/* this is the second match, we can't continue! */
helpf("option --%s is ambiguous\n", &flag[1]);
return URG_FAILED_INIT;
return CURLE_FAILED_INIT;
}
parse = aliases[j].letter;
hit = j;
@@ -358,7 +456,7 @@ static int getparameter(char *flag, /* f or -long-flag */
}
if(hit < 0) {
helpf("unknown option -%s.\n", flag);
return URG_FAILED_INIT;
return CURLE_FAILED_INIT;
}
}
else {
@@ -384,18 +482,18 @@ static int getparameter(char *flag, /* f or -long-flag */
}
if(hit < 0) {
helpf("unknown option -%c.\n", letter);
return URG_FAILED_INIT;
return CURLE_FAILED_INIT;
}
}
if(hit < 0) {
helpf("unknown option -%c.\n", letter);
return URG_FAILED_INIT;
return CURLE_FAILED_INIT;
}
if(!nextarg && aliases[hit].extraparam) {
helpf("option -%s/--%s requires an extra argument!\n",
aliases[hit].letter,
aliases[hit].lname);
return URG_FAILED_INIT;
return CURLE_FAILED_INIT;
}
else if(nextarg && aliases[hit].extraparam)
*usedarg = TRUE; /* mark it as used */
@@ -421,7 +519,7 @@ static int getparameter(char *flag, /* f or -long-flag */
break;
}
now=time(NULL);
config->condtime=get_date(nextarg, &now);
config->condtime=curl_getdate(nextarg, &now);
if(-1 == config->condtime) {
/* now let's see if it is a file name to get the time from instead! */
struct stat statbuf;
@@ -516,7 +614,6 @@ static int getparameter(char *flag, /* f or -long-flag */
break;
case 'e':
GetStr(&config->referer, nextarg);
config->conf |= CONF_REFERER;
break;
case 'E':
{
@@ -540,13 +637,12 @@ static int getparameter(char *flag, /* f or -long-flag */
if(curl_FormParse(nextarg,
&config->httppost,
&config->last_post))
return URG_FAILED_INIT;
config->conf |= CONF_HTTPPOST; /* no toggle, OR! */
return CURLE_FAILED_INIT;
break;
case 'h': /* h for help */
help();
return URG_FAILED_INIT;
return CURLE_FAILED_INIT;
case 'H':
head = (struct HttpHeader *)malloc(sizeof(struct HttpHeader));
if(head) {
@@ -589,7 +685,7 @@ static int getparameter(char *flag, /* f or -long-flag */
break;
case 'M': /* M for manual, huge help */
hugehelp();
return URG_FAILED_INIT;
return CURLE_FAILED_INIT;
case 'n':
/* pick info from .netrc, if this is used for http, curl will
automatically enfore user+password with the request */
@@ -613,7 +709,6 @@ static int getparameter(char *flag, /* f or -long-flag */
this will make us try to get the "default" address.
NOTE: this is a changed behaviour since the released 4.1!
*/
config->conf |= CONF_FTPPORT;
GetStr(&config->ftpport, nextarg);
break;
#if 0
@@ -642,7 +737,6 @@ static int getparameter(char *flag, /* f or -long-flag */
case 'r':
/* byte range requested */
GetStr(&config->range, nextarg);
config->conf |= CONF_RANGE;
break;
case 's':
/* don't show progress meter, don't show errors : */
@@ -665,19 +759,17 @@ static int getparameter(char *flag, /* f or -long-flag */
case 'u':
/* user:password */
GetStr(&config->userpwd, nextarg);
config->conf |= CONF_USERPWD;
break;
case 'U':
/* Proxy user:password */
GetStr(&config->proxyuserpwd, nextarg);
config->conf |= CONF_PROXYUSERPWD;
break;
case 'v':
config->conf ^= CONF_VERBOSE; /* talk a lot */
break;
case 'V':
printf(CURL_ID "%s\n", curl_version());
return URG_FAILED_INIT;
return CURLE_FAILED_INIT;
case 'w':
/* get the output string */
if('@' == *nextarg) {
@@ -698,14 +790,7 @@ static int getparameter(char *flag, /* f or -long-flag */
break;
case 'x':
/* proxy */
if(!*nextarg) {
/* disable proxy when no proxy is given */
config->conf &= ~CONF_PROXY;
}
else {
config->conf |= CONF_PROXY;
GetStr(&config->proxy, nextarg);
}
break;
case 'X':
/* HTTP request */
@@ -729,13 +814,13 @@ static int getparameter(char *flag, /* f or -long-flag */
helpf("Unknown option '%c'\n", letter);
else
helpf("Unknown option\n"); /* short help blurb */
return URG_FAILED_INIT;
return CURLE_FAILED_INIT;
}
hit = -1;
} while(*++parse && !*usedarg);
return URG_OK;
return CURLE_OK;
}
@@ -747,16 +832,21 @@ static int parseconfig(char *filename,
char configbuffer[4096];
char filebuffer[256];
bool usedarg;
char *home=NULL;
if(!filename || !*filename) {
/* NULL or no file name attempts to load .curlrc from the homedir! */
#define CURLRC DOT_CHAR "curlrc"
char *home = curl_GetEnv("HOME"); /* portable environment reader */
home = curl_GetEnv("HOME"); /* portable environment reader */
if(!home || (strlen(home)>(sizeof(filebuffer)-strlen(CURLRC))))
return URG_OK;
if(!home)
return CURLE_OK;
if(strlen(home)>(sizeof(filebuffer)-strlen(CURLRC))) {
free(home);
return CURLE_OK;
}
sprintf(filebuffer, "%s%s%s", home, DIR_CHAR, CURLRC);
@@ -824,7 +914,9 @@ static int parseconfig(char *filename,
if(file != stdin)
fclose(file);
}
return URG_OK;
if(home)
free(home);
return CURLE_OK;
}
struct OutStruct {
@@ -856,17 +948,17 @@ int my_fwrite(void *buffer, size_t size, size_t nmemb, FILE *stream)
int main(int argc, char *argv[])
{
char errorbuffer[URLGET_ERROR_SIZE];
char errorbuffer[CURL_ERROR_SIZE];
struct OutStruct outs;
struct OutStruct heads;
char *url = NULL;
#ifdef GLOBURL
URLGlob *urls;
int urlnum;
char *outfiles = NULL;
int separator = 0;
#endif
FILE *infd = stdin;
FILE *headerfilep = NULL;
@@ -874,7 +966,8 @@ int main(int argc, char *argv[])
int infilesize=-1; /* -1 means unknown */
bool stillflags=TRUE;
int res=URG_OK;
CURL *curl;
int res=CURLE_OK;
int i;
outs.stream = stdout;
@@ -911,7 +1004,7 @@ int main(int argc, char *argv[])
if ((argc < 2) && !config.url) {
helpf(NULL);
return URG_FAILED_INIT;
return CURLE_FAILED_INIT;
}
/* Parse options */
@@ -944,7 +1037,7 @@ int main(int argc, char *argv[])
else {
if(url) {
helpf("only one URL is supported!\n");
return URG_FAILED_INIT;
return CURLE_FAILED_INIT;
}
url = argv[i];
}
@@ -957,15 +1050,18 @@ int main(int argc, char *argv[])
if(!url) {
helpf("no URL specified!\n");
return URG_FAILED_INIT;
return CURLE_FAILED_INIT;
}
#if 0
fprintf(stderr, "URL: %s PROXY: %s\n", url, config.proxy?config.proxy:"none");
#endif
#ifdef GLOBURL
urlnum = glob_url(&urls, url); /* expand '{...}' and '[...]' expressions and return
total number of URLs in pattern set */
/* expand '{...}' and '[...]' expressions and return total number of URLs
in pattern set */
res = glob_url(&urls, url, &urlnum);
if(res != CURLE_OK)
return res;
outfiles = config.outfile; /* save outfile pattern befor expansion */
if (!outfiles && !config.remotefile && urlnum > 1) {
#ifdef CURL_SEPARATORS
@@ -982,11 +1078,10 @@ int main(int argc, char *argv[])
for (i = 0; (url = next_url(urls)); ++i) {
if (outfiles)
config.outfile = strdup(outfiles);
#endif
if(config.outfile && config.infile) {
helpf("you can't both upload and download!\n");
return URG_FAILED_INIT;
return CURLE_FAILED_INIT;
}
if (config.outfile || config.remotefile) {
@@ -1005,13 +1100,11 @@ int main(int argc, char *argv[])
config.outfile = strrchr(config.outfile, '/');
if(!config.outfile || !strlen(++config.outfile)) {
helpf("Remote file name has no length!\n");
return URG_WRITE_ERROR;
return CURLE_WRITE_ERROR;
}
}
#ifdef GLOBURL
else /* fill '#1' ... '#9' terms from URL pattern */
config.outfile = match_url(config.outfile, *urls);
#endif
if((0 == config.resume_from) && config.use_resume) {
/* we're told to continue where we are now, then we get the size of the
@@ -1030,7 +1123,7 @@ int main(int argc, char *argv[])
outs.stream=(FILE *) fopen(config.outfile, config.resume_from?"ab":"wb");
if (!outs.stream) {
helpf("Can't open '%s'!\n", config.outfile);
return URG_WRITE_ERROR;
return CURLE_WRITE_ERROR;
}
}
else {
@@ -1057,7 +1150,7 @@ int main(int argc, char *argv[])
urlbuffer=(char *)malloc(strlen(url) + strlen(config.infile) + 3);
if(!urlbuffer) {
helpf("out of memory\n");
return URG_OUT_OF_MEMORY;
return CURLE_OUT_OF_MEMORY;
}
if(ptr)
/* there is a trailing slash on the URL */
@@ -1072,7 +1165,7 @@ int main(int argc, char *argv[])
infd=(FILE *) fopen(config.infile, "rb");
if (!infd || stat(config.infile, &fileinfo)) {
helpf("Can't open '%s'!\n", config.infile);
return URG_READ_ERROR;
return CURLE_READ_ERROR;
}
infilesize=fileinfo.st_size;
@@ -1086,14 +1179,12 @@ int main(int argc, char *argv[])
/* open file for output: */
if(strcmp(config.headerfile,"-"))
{
headerfilep=(FILE *) fopen(config.headerfile, "wb");
if (!headerfilep) {
helpf("Can't open '%s'!\n", config.headerfile);
return URG_WRITE_ERROR;
}
heads.filename = config.headerfile;
headerfilep=NULL;
}
else
headerfilep=stdout;
heads.stream = headerfilep;
}
if(outs.stream && isatty(fileno(outs.stream)) &&
@@ -1102,7 +1193,7 @@ int main(int argc, char *argv[])
we switch off the progress meter */
config.conf |= CONF_NOPROGRESS;
#ifdef GLOBURL
if (urlnum > 1) {
fprintf(stderr, "\n[%d/%d]: %s --> %s\n", i+1, urlnum, url, config.outfile ? config.outfile : "<stdout>");
if (separator) {
@@ -1115,56 +1206,136 @@ int main(int argc, char *argv[])
#endif
}
}
#endif
if(!config.errors)
config.errors = stderr;
res = curl_urlget(URGTAG_FILE, (FILE *)&outs, /* where to store */
URGTAG_WRITEFUNCTION, my_fwrite, /* what call to write */
URGTAG_INFILE, infd, /* for uploads */
URGTAG_INFILESIZE, infilesize, /* size of uploaded file */
URGTAG_URL, url, /* what to fetch */
URGTAG_PROXY, config.proxy, /* proxy to use */
URGTAG_FLAGS, config.conf, /* flags */
URGTAG_USERPWD, config.userpwd, /* user + passwd */
URGTAG_PROXYUSERPWD, config.proxyuserpwd, /* Proxy user + passwd */
URGTAG_RANGE, config.range, /* range of document */
URGTAG_ERRORBUFFER, errorbuffer,
URGTAG_TIMEOUT, config.timeout,
URGTAG_POSTFIELDS, config.postfields,
URGTAG_REFERER, config.referer,
URGTAG_USERAGENT, config.useragent,
URGTAG_FTPPORT, config.ftpport,
URGTAG_LOW_SPEED_LIMIT, config.low_speed_limit,
URGTAG_LOW_SPEED_TIME, config.low_speed_time,
URGTAG_RESUME_FROM, config.use_resume?config.resume_from:0,
URGTAG_COOKIE, config.cookie,
URGTAG_HTTPHEADER, config.headers,
URGTAG_HTTPPOST, config.httppost,
URGTAG_SSLCERT, config.cert,
URGTAG_SSLCERTPASSWD, config.cert_passwd,
URGTAG_CRLF, config.crlf,
URGTAG_QUOTE, config.quote,
URGTAG_POSTQUOTE, config.postquote,
URGTAG_WRITEHEADER, headerfilep,
URGTAG_COOKIEFILE, config.cookiefile,
URGTAG_SSLVERSION, config.ssl_version,
URGTAG_TIMECONDITION, config.timecond,
URGTAG_TIMEVALUE, config.condtime,
URGTAG_CUSTOMREQUEST, config.customrequest,
URGTAG_STDERR, config.errors,
URGTAG_PROGRESSMODE, config.progressmode,
URGTAG_WRITEINFO, config.writeout,
URGTAG_DONE); /* always terminate the list of tags */
if((res!=URG_OK) && config.showerror)
main_init();
#if 0
/* This is code left from the pre-v7 time, left here mainly as a reminder
and possibly as a warning! ;-) */
res = curl_urlget(CURLOPT_FILE, (FILE *)&outs, /* where to store */
CURLOPT_WRITEFUNCTION, my_fwrite, /* what call to write */
CURLOPT_INFILE, infd, /* for uploads */
CURLOPT_INFILESIZE, infilesize, /* size of uploaded file */
CURLOPT_URL, url, /* what to fetch */
CURLOPT_PROXY, config.proxy, /* proxy to use */
CURLOPT_FLAGS, config.conf, /* flags */
CURLOPT_USERPWD, config.userpwd, /* user + passwd */
CURLOPT_PROXYUSERPWD, config.proxyuserpwd, /* Proxy user + passwd */
CURLOPT_RANGE, config.range, /* range of document */
CURLOPT_ERRORBUFFER, errorbuffer,
CURLOPT_TIMEOUT, config.timeout,
CURLOPT_POSTFIELDS, config.postfields,
CURLOPT_REFERER, config.referer,
CURLOPT_USERAGENT, config.useragent,
CURLOPT_FTPPORT, config.ftpport,
CURLOPT_LOW_SPEED_LIMIT, config.low_speed_limit,
CURLOPT_LOW_SPEED_TIME, config.low_speed_time,
CURLOPT_RESUME_FROM, config.use_resume?config.resume_from:0,
CURLOPT_COOKIE, config.cookie,
CURLOPT_HTTPHEADER, config.headers,
CURLOPT_HTTPPOST, config.httppost,
CURLOPT_SSLCERT, config.cert,
CURLOPT_SSLCERTPASSWD, config.cert_passwd,
CURLOPT_CRLF, config.crlf,
CURLOPT_QUOTE, config.quote,
CURLOPT_POSTQUOTE, config.postquote,
CURLOPT_WRITEHEADER, config.headerfile?&heads:NULL,
CURLOPT_COOKIEFILE, config.cookiefile,
CURLOPT_SSLVERSION, config.ssl_version,
CURLOPT_TIMECONDITION, config.timecond,
CURLOPT_TIMEVALUE, config.condtime,
CURLOPT_CUSTOMREQUEST, config.customrequest,
CURLOPT_STDERR, config.errors,
CURLOPT_PROGRESSMODE, config.progressmode,
CURLOPT_WRITEINFO, config.writeout,
CURLOPT_DONE); /* always terminate the list of tags */
#endif
/* The new, v7-style easy-interface! */
curl = curl_easy_init();
if(curl) {
curl_easy_setopt(curl, CURLOPT_FILE, (FILE *)&outs); /* where to store */
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, my_fwrite); /* what call to write */
curl_easy_setopt(curl, CURLOPT_INFILE, infd); /* for uploads */
curl_easy_setopt(curl, CURLOPT_INFILESIZE, infilesize); /* size of uploaded file */
curl_easy_setopt(curl, CURLOPT_URL, url); /* what to fetch */
curl_easy_setopt(curl, CURLOPT_PROXY, config.proxy); /* proxy to use */
#if 0
curl_easy_setopt(curl, CURLOPT_FLAGS, config.conf); /* flags */
#else
curl_easy_setopt(curl, CURLOPT_VERBOSE, config.conf&CONF_VERBOSE);
curl_easy_setopt(curl, CURLOPT_HEADER, config.conf&CONF_HEADER);
curl_easy_setopt(curl, CURLOPT_NOPROGRESS, config.conf&CONF_NOPROGRESS);
curl_easy_setopt(curl, CURLOPT_NOBODY, config.conf&CONF_NOBODY);
curl_easy_setopt(curl, CURLOPT_FAILONERROR, config.conf&CONF_FAILONERROR);
curl_easy_setopt(curl, CURLOPT_UPLOAD, config.conf&CONF_UPLOAD);
curl_easy_setopt(curl, CURLOPT_POST, config.conf&CONF_POST);
curl_easy_setopt(curl, CURLOPT_FTPLISTONLY, config.conf&CONF_FTPLISTONLY);
curl_easy_setopt(curl, CURLOPT_FTPAPPEND, config.conf&CONF_FTPAPPEND);
curl_easy_setopt(curl, CURLOPT_NETRC, config.conf&CONF_NETRC);
curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, config.conf&CONF_FOLLOWLOCATION);
curl_easy_setopt(curl, CURLOPT_FTPASCII, config.conf&CONF_FTPASCII);
curl_easy_setopt(curl, CURLOPT_PUT, config.conf&CONF_PUT);
curl_easy_setopt(curl, CURLOPT_MUTE, config.conf&CONF_MUTE);
#endif
curl_easy_setopt(curl, CURLOPT_USERPWD, config.userpwd); /* user + passwd */
curl_easy_setopt(curl, CURLOPT_PROXYUSERPWD, config.proxyuserpwd); /* Proxy user + passwd */
curl_easy_setopt(curl, CURLOPT_RANGE, config.range); /* range of document */
curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, errorbuffer);
curl_easy_setopt(curl, CURLOPT_TIMEOUT, config.timeout);
curl_easy_setopt(curl, CURLOPT_POSTFIELDS, config.postfields);
curl_easy_setopt(curl, CURLOPT_REFERER, config.referer);
curl_easy_setopt(curl, CURLOPT_USERAGENT, config.useragent);
curl_easy_setopt(curl, CURLOPT_FTPPORT, config.ftpport);
curl_easy_setopt(curl, CURLOPT_LOW_SPEED_LIMIT, config.low_speed_limit);
curl_easy_setopt(curl, CURLOPT_LOW_SPEED_TIME, config.low_speed_time);
curl_easy_setopt(curl, CURLOPT_RESUME_FROM, config.use_resume?config.resume_from:0);
curl_easy_setopt(curl, CURLOPT_COOKIE, config.cookie);
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, config.headers);
curl_easy_setopt(curl, CURLOPT_HTTPPOST, config.httppost);
curl_easy_setopt(curl, CURLOPT_SSLCERT, config.cert);
curl_easy_setopt(curl, CURLOPT_SSLCERTPASSWD, config.cert_passwd);
curl_easy_setopt(curl, CURLOPT_CRLF, config.crlf);
curl_easy_setopt(curl, CURLOPT_QUOTE, config.quote);
curl_easy_setopt(curl, CURLOPT_POSTQUOTE, config.postquote);
curl_easy_setopt(curl, CURLOPT_WRITEHEADER, config.headerfile?&heads:NULL);
curl_easy_setopt(curl, CURLOPT_COOKIEFILE, config.cookiefile);
curl_easy_setopt(curl, CURLOPT_SSLVERSION, config.ssl_version);
curl_easy_setopt(curl, CURLOPT_TIMECONDITION, config.timecond);
curl_easy_setopt(curl, CURLOPT_TIMEVALUE, config.condtime);
curl_easy_setopt(curl, CURLOPT_CUSTOMREQUEST, config.customrequest);
curl_easy_setopt(curl, CURLOPT_STDERR, config.errors);
curl_easy_setopt(curl, CURLOPT_PROGRESSMODE, config.progressmode);
curl_easy_setopt(curl, CURLOPT_WRITEINFO, config.writeout);
res = curl_easy_perform(curl);
/* always cleanup */
curl_easy_cleanup(curl);
if((res!=CURLE_OK) && config.showerror)
fprintf(config.errors, "curl: (%d) %s\n", res, errorbuffer);
}
else
fprintf(config.errors, "curl: failed to init libcurl!\n");
main_free();
if((config.errors != stderr) &&
(config.errors != stdout))
/* it wasn't directed to stdout or stderr so close the file! */
fclose(config.errors);
if(config.headerfile && !headerfilep && heads.stream)
fclose(heads.stream);
if(urlbuffer)
free(urlbuffer);
if (config.outfile && outs.stream)
@@ -1177,7 +1348,6 @@ int main(int argc, char *argv[])
if(config.url)
free(config.url);
#ifdef GLOBURL
if(url)
free(url);
if(config.outfile && !config.remotefile)
@@ -1186,7 +1356,6 @@ int main(int argc, char *argv[])
#ifdef MIME_SEPARATORS
if (separator)
printf("--%s--\n", MIMEseparator);
#endif
#endif
curl_slist_free_all(config.quote); /* the checks for config.quote == NULL */

View File

@@ -69,16 +69,25 @@ print "void hugehelp(void)\n";
print "{\n";
print "puts (\n";
$outsize=0;
for(@out) {
chop;
$new = $_;
$outsize += length($new);
$new =~ s/\\/\\\\/g;
$new =~ s/\"/\\\"/g;
printf("\"%s\\n\"\n", $new);
if($outsize > 10000) {
# terminate and make another puts() call here
print ");\n puts(\n";
$outsize=0;
}
}
print " ) ;\n}\n"

View File

@@ -82,10 +82,20 @@ int fileno( FILE *stream);
#define DIR_CHAR "\\"
#define DOT_CHAR "_"
#else
#ifdef __EMX__
/* 20000318 mgs
* OS/2 supports leading dots in filenames if the volume is formatted
* with JFS or HPFS. */
#define PATH_CHAR ";"
#define DIR_CHAR "\\"
#define DOT_CHAR "."
#else
#define PATH_CHAR ":"
#define DIR_CHAR "/"
#define DOT_CHAR "."
#endif
#endif
#endif /* __SETUP_H */

View File

@@ -69,18 +69,18 @@ int glob_set(char *pattern, int pos) {
switch (*pattern) {
case '\0': /* URL ended while set was still open */
printf("error: unmatched brace at pos %d\n", pos);
exit (URG_URL_MALFORMAT);
exit (CURLE_URL_MALFORMAT);
case '{':
case '[': /* no nested expressions at this time */
printf("error: nested braces not supported %d\n", pos);
exit (URG_URL_MALFORMAT);
exit (CURLE_URL_MALFORMAT);
case ',':
case '}': /* set element completed */
*buf = '\0';
pat->content.Set.elements = realloc(pat->content.Set.elements, (pat->content.Set.size + 1) * sizeof(char*));
if (!pat->content.Set.elements) {
printf("out of memory in set pattern\n");
exit(URG_OUT_OF_MEMORY);
exit(CURLE_OUT_OF_MEMORY);
}
pat->content.Set.elements[pat->content.Set.size] = strdup(glob_buffer);
++pat->content.Set.size;
@@ -95,11 +95,11 @@ int glob_set(char *pattern, int pos) {
break;
case ']': /* illegal closing bracket */
printf("error: illegal pattern at pos %d\n", pos);
exit (URG_URL_MALFORMAT);
exit (CURLE_URL_MALFORMAT);
case '\\': /* escaped character, skip '\' */
if (*(buf+1) == '\0') { /* but no escaping of '\0'! */
printf("error: illegal pattern at pos %d\n", pos);
exit (URG_URL_MALFORMAT);
exit (CURLE_URL_MALFORMAT);
}
++pattern;
++pos; /* intentional fallthrough */
@@ -108,7 +108,7 @@ int glob_set(char *pattern, int pos) {
++pos;
}
}
exit (URG_FAILED_INIT);
exit (CURLE_FAILED_INIT);
}
int glob_range(char *pattern, int pos) {
@@ -132,7 +132,7 @@ int glob_range(char *pattern, int pos) {
pat->content.CharRange.max_c - pat->content.CharRange.min_c > 'z' - 'a') {
/* the pattern is not well-formed */
printf("error: illegal pattern or range specification after pos %d\n", pos);
exit (URG_URL_MALFORMAT);
exit (CURLE_URL_MALFORMAT);
}
pat->content.CharRange.ptr_c = pat->content.CharRange.min_c;
/* always check for a literal (may be "") between patterns */
@@ -146,7 +146,7 @@ int glob_range(char *pattern, int pos) {
pat->content.NumRange.min_n >= pat->content.NumRange.max_n) {
/* the pattern is not well-formed */
printf("error: illegal pattern or range specification after pos %d\n", pos);
exit (URG_URL_MALFORMAT);
exit (CURLE_URL_MALFORMAT);
}
if (*pattern == '0') { /* leading zero specified */
c = pattern;
@@ -161,7 +161,7 @@ int glob_range(char *pattern, int pos) {
glob_word(c, pos + (c - pattern));
}
printf("error: illegal character in range specification at pos %d\n", pos);
exit (URG_URL_MALFORMAT);
exit (CURLE_URL_MALFORMAT);
}
int glob_word(char *pattern, int pos) {
@@ -174,14 +174,14 @@ int glob_word(char *pattern, int pos) {
while (*pattern != '\0' && *pattern != '{' && *pattern != '[') {
if (*pattern == '}' || *pattern == ']') {
printf("illegal character at position %d\n", pos);
exit (URG_URL_MALFORMAT);
exit (CURLE_URL_MALFORMAT);
}
if (*pattern == '\\') { /* escape character, skip '\' */
++pattern;
++pos;
if (*pattern == '\0') { /* but no escaping of '\0'! */
printf("illegal character at position %d\n", pos);
exit (URG_URL_MALFORMAT);
exit (CURLE_URL_MALFORMAT);
}
}
*buf++ = *pattern++; /* copy character to literal */
@@ -201,20 +201,25 @@ int glob_word(char *pattern, int pos) {
return glob_range(++pattern, ++pos);/* process range pattern */
}
printf("internal error\n");
exit (URG_FAILED_INIT);
exit (CURLE_FAILED_INIT);
}
int glob_url(URLGlob** glob, char* url) {
int urlnum; /* counts instances of a globbed pattern */
int glob_url(URLGlob** glob, char* url, int *urlnum)
{
if (strlen(url)>URL_MAX_LENGTH) {
printf("Illegally sized URL\n");
return CURLE_URL_MALFORMAT;
}
glob_expand = (URLGlob*)malloc(sizeof(URLGlob));
glob_expand->size = 0;
urlnum = glob_word(url, 1);
*urlnum = glob_word(url, 1);
*glob = glob_expand;
return urlnum;
return CURLE_OK;
}
char *next_url(URLGlob *glob) {
char *next_url(URLGlob *glob)
{
static int beenhere = 0;
char *buf = glob_buffer;
URLPattern *pat;
@@ -253,7 +258,7 @@ char *next_url(URLGlob *glob) {
break;
default:
printf("internal error: invalid pattern type (%d)\n", pat->type);
exit (URG_FAILED_INIT);
exit (CURLE_FAILED_INIT);
}
}
if (carry) /* first pattern ptr has run into overflow, done! */
@@ -282,7 +287,7 @@ char *next_url(URLGlob *glob) {
break;
default:
printf("internal error: invalid pattern type (%d)\n", pat->type);
exit (URG_FAILED_INIT);
exit (CURLE_FAILED_INIT);
}
}
}
@@ -300,12 +305,12 @@ char *match_url(char *filename, URLGlob glob) {
if (!isdigit((int)*++filename) ||
*filename == '0') { /* only '#1' ... '#9' allowed */
printf("illegal matching expression\n");
exit(URG_URL_MALFORMAT);
exit(CURLE_URL_MALFORMAT);
}
i = *filename - '1';
if (i + 1 > glob.size / 2) {
printf("match against nonexisting pattern\n");
exit(URG_URL_MALFORMAT);
exit(CURLE_URL_MALFORMAT);
}
pat = glob.pattern[i];
switch (pat.type) {
@@ -322,7 +327,7 @@ char *match_url(char *filename, URLGlob glob) {
break;
default:
printf("internal error: invalid pattern type (%d)\n", pat.type);
exit (URG_FAILED_INIT);
exit (CURLE_FAILED_INIT);
}
++filename;
}

View File

@@ -67,7 +67,7 @@ typedef struct {
int size;
} URLGlob;
int glob_url(URLGlob**, char*);
int glob_url(URLGlob**, char*, int *);
char* next_url(URLGlob*);
char* match_url(char*, URLGlob);

View File

@@ -1,3 +1,3 @@
#define CURL_NAME "curl"
#define CURL_VERSION "6.5"
#define CURL_VERSION "7.0.1beta"
#define CURL_ID CURL_NAME " " CURL_VERSION " (" OS ") "