- new upstream release

This commit is contained in:
Kamil Dudka 2010-02-11 11:31:31 +00:00
parent fd19e7e468
commit 6d0277e653
14 changed files with 46 additions and 520 deletions

View File

@ -1 +1 @@
curl-7.19.7.tar.lzma
curl-7.20.0.tar.lzma

View File

@ -1,8 +1,8 @@
diff --git a/curl-config.in b/curl-config.in
index 1c439a1..9d675ae 100644
index d336999..1c5873e 100644
--- a/curl-config.in
+++ b/curl-config.in
@@ -42,7 +42,6 @@ Available values for OPTION include:
@@ -43,7 +43,6 @@ Available values for OPTION include:
--libs library linking information
--prefix curl install prefix
--protocols newline separated list of enabled protocols
@ -10,7 +10,7 @@ index 1c439a1..9d675ae 100644
--version output version information
--vernum output the version information as a number (hexadecimal)
EOF
@@ -69,7 +68,7 @@ while test $# -gt 0; do
@@ -70,7 +69,7 @@ while test $# -gt 0; do
;;
--cc)
@ -19,7 +19,7 @@ index 1c439a1..9d675ae 100644
;;
--prefix)
@@ -130,20 +129,7 @@ while test $# -gt 0; do
@@ -131,20 +130,7 @@ while test $# -gt 0; do
;;
--libs)
@ -40,4 +40,4 @@ index 1c439a1..9d675ae 100644
+ pkg-config libcurl --libs
;;
*)
--configure)

View File

@ -1,13 +1,12 @@
diff --git a/libcurl.pc.in b/libcurl.pc.in
index 25beadd..d7c0805 100644
index b765994..d843688 100644
--- a/libcurl.pc.in
+++ b/libcurl.pc.in
@@ -35,6 +35,6 @@ Name: libcurl
URL: http://curl.haxx.se/
Description: Library to transfer files with ftp, http, etc.
Version: @VERSION@
-Libs: -L${libdir} -lcurl @LDFLAGS@ @LIBS@
-Libs.private: @LIBCURL_LIBS@ @LIBS@
-Libs: -L${libdir} -lcurl @LIBS@
+Libs: -L${libdir} -lcurl
+Libs.private: @LIBCURL_LIBS@ @LIBS@ @LDFLAGS@
Libs.private: @LIBCURL_LIBS@ @LIBS@
Cflags: -I${includedir}

View File

@ -1,8 +1,8 @@
diff --git a/configure.ac b/configure.ac
index e575a20..81a7772 100644
index 7ca2145..0413332 100644
--- a/configure.ac
+++ b/configure.ac
@@ -228,7 +228,10 @@ dnl **********************************************************************
@@ -253,7 +253,10 @@ dnl **********************************************************************
CURL_CHECK_COMPILER
CURL_SET_COMPILER_BASIC_OPTS

View File

@ -1,8 +1,8 @@
diff --git a/lib/hostares.c b/lib/hostares.c
index 15f9c84..719efc1 100644
index 7cc24de..46e0f8a 100644
--- a/lib/hostares.c
+++ b/lib/hostares.c
@@ -327,7 +327,7 @@ Curl_addrinfo *Curl_getaddrinfo(struct connectdata *conn,
@@ -381,7 +381,7 @@ Curl_addrinfo *Curl_getaddrinfo(struct connectdata *conn,
switch(data->set.ip_version) {
default:

View File

@ -1,224 +0,0 @@
diff --git a/docs/curl.1 b/docs/curl.1
index e4f5a00..7b7d549 100644
--- a/docs/curl.1
+++ b/docs/curl.1
@@ -598,6 +598,9 @@ time only.
make it discard all "session cookies". This will basically have the same effect
as if a new session is started. Typical browsers always discard session
cookies when they're closed down.
+.IP "-J/--remote-header-name"
+(HTTP) This option tells the -O/--remote-name option to use the server-specified
+Content-Disposition filename instead of extracting a filename from the URL.
.IP "-k/--insecure"
(SSL) This option explicitly allows curl to perform "insecure" SSL connections
and transfers. All SSL connections are attempted to be made secure by using
diff --git a/src/main.c b/src/main.c
index dae96ee..84ed7d2 100644
--- a/src/main.c
+++ b/src/main.c
@@ -612,6 +612,7 @@ struct Configurable {
bool post302;
bool nokeepalive; /* for keepalive needs */
long alivetime;
+ bool content_disposition; /* use Content-disposition filename */
int default_node_flags; /* default flags to seach for each 'node', which is
basically each given URL to transfer */
@@ -817,6 +818,7 @@ static void help(void)
" --krb <level> Enable Kerberos with specified security level (F)",
" --libcurl <file> Dump libcurl equivalent code of this command line",
" --limit-rate <rate> Limit transfer speed to this rate",
+ " -J/--remote-header-name Use the header-provided filename (H)",
" -l/--list-only List only names of an FTP directory (F)",
" --local-port <num>[-num] Force use of these local port numbers",
" -L/--location Follow Location: hints (H)",
@@ -1777,6 +1779,7 @@ static ParameterError getparameter(char *flag, /* f or -long-flag */
{"i", "include", FALSE},
{"I", "head", FALSE},
{"j", "junk-session-cookies", FALSE},
+ {"J", "remote-header-name", FALSE},
{"k", "insecure", FALSE},
{"K", "config", TRUE},
{"l", "list-only", FALSE},
@@ -2634,6 +2637,14 @@ static ParameterError getparameter(char *flag, /* f or -long-flag */
&config->httpreq))
return PARAM_BAD_USE;
break;
+ case 'J': /* --remote-header-name */
+ if (config->include_headers) {
+ warnf(config,
+ "--include and --remote-header-name cannot be combined.\n");
+ return PARAM_BAD_USE;
+ }
+ config->content_disposition = toggle;
+ break;
case 'k': /* allow insecure SSL connects */
config->insecure_ok = toggle;
break;
@@ -3288,19 +3299,37 @@ static size_t my_fwrite(void *buffer, size_t sz, size_t nmemb, void *stream)
struct OutStruct *out=(struct OutStruct *)stream;
struct Configurable *config = out->config;
+ /*
+ * Once that libcurl has called back my_fwrite() the returned value
+ * is checked against the amount that was intended to be written, if
+ * it does not match then it fails with CURLE_WRITE_ERROR. So at this
+ * point returning a value different from sz*nmemb indicates failure.
+ */
+ const size_t err_rc = (sz * nmemb) ? 0 : 1;
+
if(!out->stream) {
+ if (!out->filename) {
+ warnf(config, "Remote filename has no length!\n");
+ return err_rc; /* Failure */
+ }
+
+ if (config->content_disposition) {
+ /* don't overwrite existing files */
+ FILE* f = fopen(out->filename, "r");
+ if (f) {
+ fclose(f);
+ warnf(config, "Refusing to overwrite %s: %s\n", out->filename,
+ strerror(EEXIST));
+ return err_rc; /* Failure */
+ }
+ }
+
/* open file for writing */
out->stream=fopen(out->filename, "wb");
if(!out->stream) {
- warnf(config, "Failed to create the file %s\n", out->filename);
- /*
- * Once that libcurl has called back my_fwrite() the returned value
- * is checked against the amount that was intended to be written, if
- * it does not match then it fails with CURLE_WRITE_ERROR. So at this
- * point returning a value different from sz*nmemb indicates failure.
- */
- rc = (0 == (sz * nmemb)) ? 1 : 0;
- return rc; /* failure */
+ warnf(config, "Failed to create the file %s: %s\n", out->filename,
+ strerror(errno));
+ return err_rc; /* failure */
}
}
@@ -4011,6 +4040,87 @@ static bool stdin_upload(const char *uploadfile)
return curlx_strequal(uploadfile, "-") || curlx_strequal(uploadfile, ".");
}
+static char*
+parse_filename(char *ptr, int len)
+{
+ char* copy;
+ char* p;
+ char* q;
+ char quote = 0;
+
+ /* simple implementation of strndup() */
+ copy = malloc(len+1);
+ if (!copy)
+ return NULL;
+ strncpy(copy, ptr, len);
+ copy[len] = 0;
+
+ p = copy;
+ if (*p == '\'' || *p == '"') {
+ /* store the starting quote */
+ quote = *p;
+ p++;
+ }
+
+ /* if the filename contains a path, only use filename portion */
+ q = strrchr(copy, '/');
+ if (q) {
+ p=q+1;
+ if (!*p) {
+ free(copy);
+ return NULL;
+ }
+ }
+
+ q = strrchr(p, quote);
+ if (q)
+ *q = 0;
+
+ if (copy!=p)
+ memmove(copy, p, strlen(p)+1);
+
+ return copy;
+}
+
+static size_t
+header_callback(void *ptr, size_t size, size_t nmemb, void *stream)
+{
+ struct OutStruct* outs = (struct OutStruct*)stream;
+ const char* str = (char*)ptr;
+ const size_t cb = size*nmemb;
+ const char* end = (char*)ptr + cb;
+
+ if (cb > 20 && curlx_strnequal(str, "Content-disposition:", 20)) {
+ char *p = (char*)str + 20;
+
+ /* look for the 'filename=' parameter
+ (encoded filenames (*=) are not supported) */
+ while (1) {
+ char *filename;
+
+ while (p < end && !isalpha(*p))
+ p++;
+ if (p > end-9)
+ break;
+
+ if (memcmp(p, "filename=", 9)) {
+ /* no match, find next parameter */
+ while ((p < end) && (*p != ';'))
+ p++;
+ continue;
+ }
+ p+=9;
+ filename = parse_filename(p, cb - (p - str));
+ if (filename) {
+ outs->filename = filename;
+ break;
+ }
+ }
+ }
+
+ return cb;
+}
+
static int
operate(struct Configurable *config, int argc, argv_item_t argv[])
{
@@ -4393,7 +4503,7 @@ operate(struct Configurable *config, int argc, argv_item_t argv[])
pc++;
outfile = *pc ? strdup(pc): NULL;
}
- if(!outfile || !*outfile) {
+ if((!outfile || !*outfile) && !config->content_disposition) {
helpf(config->errors, "Remote file name has no length!\n");
res = CURLE_WRITE_ERROR;
free(url);
@@ -4994,6 +5104,12 @@ operate(struct Configurable *config, int argc, argv_item_t argv[])
my_setopt(curl, CURLOPT_POSTREDIR, config->post301 |
(config->post302 ? CURL_REDIR_POST_302 : FALSE));
+ if ((urlnode->flags & GETOUT_USEREMOTE)
+ && config->content_disposition) {
+ my_setopt(curl, CURLOPT_HEADERFUNCTION, header_callback);
+ my_setopt(curl, CURLOPT_HEADERDATA, &outs);
+ }
+
retry_numretries = config->req_retry;
retrystart = cutil_tvnow();
@@ -5005,6 +5121,9 @@ operate(struct Configurable *config, int argc, argv_item_t argv[])
break;
}
+ if (config->content_disposition && outs.stream && !config->mute)
+ printf("curl: Saved to filename '%s'\n", outs.filename);
+
/* if retry-max-time is non-zero, make sure we haven't exceeded the
time */
if(retry_numretries &&

View File

@ -1,12 +0,0 @@
diff --git a/lib/nss.c b/lib/nss.c
index f5c69e6..d1a9d1a 100644
--- a/lib/nss.c
+++ b/lib/nss.c
@@ -1265,6 +1265,7 @@ CURLcode Curl_nss_connect(struct connectdata *conn, int sockindex)
if(!connssl->handle)
goto error;
PR_Close(model); /* We don't need this any more */
+ model = NULL;
/* This is the password associated with the cert that we're using */
if (data->set.str[STRING_KEY_PASSWD]) {

View File

@ -1,105 +0,0 @@
diff --git a/lib/nss.c b/lib/nss.c
index ea904af..6e8d242 100644
--- a/lib/nss.c
+++ b/lib/nss.c
@@ -83,8 +83,6 @@ PRLock * nss_initlock = NULL;
volatile int initialized = 0;
-#define HANDSHAKE_TIMEOUT 30
-
typedef struct {
const char *name;
int num;
@@ -970,6 +968,8 @@ CURLcode Curl_nss_connect(struct connectdata *conn, int sockindex)
char *certDir = NULL;
int curlerr;
const int *cipher_to_enable;
+ PRSocketOptionData sock_opt;
+ PRUint32 timeout;
curlerr = CURLE_SSL_CONNECT_ERROR;
@@ -1063,6 +1063,12 @@ CURLcode Curl_nss_connect(struct connectdata *conn, int sockindex)
goto error;
model = SSL_ImportFD(NULL, model);
+ /* make the socket nonblocking */
+ sock_opt.option = PR_SockOpt_Nonblocking;
+ sock_opt.value.non_blocking = PR_TRUE;
+ if(PR_SetSocketOption(model, &sock_opt) != SECSuccess)
+ goto error;
+
if(SSL_OptionSet(model, SSL_SECURITY, PR_TRUE) != SECSuccess)
goto error;
if(SSL_OptionSet(model, SSL_HANDSHAKE_AS_SERVER, PR_FALSE) != SECSuccess)
@@ -1234,9 +1240,8 @@ CURLcode Curl_nss_connect(struct connectdata *conn, int sockindex)
SSL_SetURL(connssl->handle, conn->host.name);
/* Force the handshake now */
- if(SSL_ForceHandshakeWithTimeout(connssl->handle,
- PR_SecondsToInterval(HANDSHAKE_TIMEOUT))
- != SECSuccess) {
+ timeout = PR_MillisecondsToInterval(Curl_timeleft(conn, NULL, TRUE));
+ if(SSL_ForceHandshakeWithTimeout(connssl->handle, timeout) != SECSuccess) {
if(conn->data->set.ssl.certverifyresult == SSL_ERROR_BAD_CERT_DOMAIN)
curlerr = CURLE_PEER_FAILED_VERIFICATION;
else if(conn->data->set.ssl.certverifyresult!=0)
@@ -1288,27 +1293,12 @@ int Curl_nss_send(struct connectdata *conn, /* connection data */
const void *mem, /* send this data */
size_t len) /* amount to write */
{
- PRInt32 err;
- struct SessionHandle *data = conn->data;
- PRInt32 timeout;
int rc;
- if(data->set.timeout)
- timeout = PR_MillisecondsToInterval((PRUint32)data->set.timeout);
- else
- timeout = PR_MillisecondsToInterval(DEFAULT_CONNECT_TIMEOUT);
-
- rc = PR_Send(conn->ssl[sockindex].handle, mem, (int)len, 0, timeout);
+ rc = PR_Send(conn->ssl[sockindex].handle, mem, (int)len, 0, -1);
if(rc < 0) {
- err = PR_GetError();
-
- if(err == PR_IO_TIMEOUT_ERROR) {
- failf(data, "SSL connection timeout");
- return CURLE_OPERATION_TIMEDOUT;
- }
-
- failf(conn->data, "SSL write: error %d", err);
+ failf(conn->data, "SSL write: error %d", PR_GetError());
return -1;
}
return rc; /* number of bytes */
@@ -1326,15 +1316,8 @@ ssize_t Curl_nss_recv(struct connectdata * conn, /* connection data */
bool * wouldblock)
{
ssize_t nread;
- struct SessionHandle *data = conn->data;
- PRInt32 timeout;
- if(data->set.timeout)
- timeout = PR_SecondsToInterval((PRUint32)data->set.timeout);
- else
- timeout = PR_MillisecondsToInterval(DEFAULT_CONNECT_TIMEOUT);
-
- nread = PR_Recv(conn->ssl[num].handle, buf, (int)buffersize, 0, timeout);
+ nread = PR_Recv(conn->ssl[num].handle, buf, (int)buffersize, 0, -1);
*wouldblock = FALSE;
if(nread < 0) {
/* failed SSL read */
@@ -1344,10 +1327,6 @@ ssize_t Curl_nss_recv(struct connectdata * conn, /* connection data */
*wouldblock = TRUE;
return -1; /* basically EWOULDBLOCK */
}
- if(err == PR_IO_TIMEOUT_ERROR) {
- failf(data, "SSL connection timeout");
- return CURLE_OPERATION_TIMEDOUT;
- }
failf(conn->data, "SSL read: errno %d", err);
return -1;
}

View File

@ -1,24 +0,0 @@
diff --git a/lib/nss.c b/lib/nss.c
index d1a9d1a..637663e 100644
--- a/lib/nss.c
+++ b/lib/nss.c
@@ -990,7 +990,9 @@ CURLcode Curl_nss_connect(struct connectdata *conn, int sockindex)
{
PRInt32 err;
PRFileDesc *model = NULL;
- PRBool ssl2, ssl3, tlsv1;
+ PRBool ssl2 = PR_FALSE;
+ PRBool ssl3 = PR_FALSE;
+ PRBool tlsv1 = PR_FALSE;
struct SessionHandle *data = conn->data;
curl_socket_t sockfd = conn->sock[sockindex];
struct ssl_connect_data *connssl = &conn->ssl[sockindex];
@@ -1106,8 +1108,6 @@ CURLcode Curl_nss_connect(struct connectdata *conn, int sockindex)
if(SSL_OptionSet(model, SSL_HANDSHAKE_AS_CLIENT, PR_TRUE) != SECSuccess)
goto error;
- ssl2 = ssl3 = tlsv1 = PR_FALSE;
-
switch (data->set.ssl.version) {
default:
case CURL_SSLVERSION_DEFAULT:

View File

@ -1,13 +1,13 @@
diff --git a/tests/runtests.pl b/tests/runtests.pl
index a6597b9..c900ad8 100755
index d4e9072..fb32c78 100755
--- a/tests/runtests.pl
+++ b/tests/runtests.pl
@@ -209,7 +209,7 @@ my $sshdverstr; # for socks server, ssh daemon version string
@@ -222,7 +222,7 @@ my $sshdverstr; # for socks server, ssh daemon version string
my $sshderror; # for socks server, ssh daemon version error
my $defserverlogslocktimeout = 20; # timeout to await server logs lock removal
-my $defpostcommanddelay = 0; # delay between command and postcheck sections
+my $defpostcommanddelay = 1; # delay between command and postcheck sections
my $testnumcheck; # test number, set in singletest sub.
my $timestats; # time stamping and stats generation
my $fullstats; # show time stats for every single test

View File

@ -1,118 +0,0 @@
diff --git a/lib/nss.c b/lib/nss.c
index 6e8d242..f5c69e6 100644
--- a/lib/nss.c
+++ b/lib/nss.c
@@ -844,6 +844,36 @@ static SECStatus SelectClientCert(void *arg, PRFileDesc *sock,
return SECSuccess;
}
+/* This function is supposed to decide, which error codes should be used
+ * to conclude server is TLS intolerant.
+ *
+ * taken from xulrunner - nsNSSIOLayer.cpp
+ */
+static PRBool
+isTLSIntoleranceError(PRInt32 err)
+{
+ switch (err) {
+ case SSL_ERROR_BAD_MAC_ALERT:
+ case SSL_ERROR_BAD_MAC_READ:
+ case SSL_ERROR_HANDSHAKE_FAILURE_ALERT:
+ case SSL_ERROR_HANDSHAKE_UNEXPECTED_ALERT:
+ case SSL_ERROR_CLIENT_KEY_EXCHANGE_FAILURE:
+ case SSL_ERROR_ILLEGAL_PARAMETER_ALERT:
+ case SSL_ERROR_NO_CYPHER_OVERLAP:
+ case SSL_ERROR_BAD_SERVER:
+ case SSL_ERROR_BAD_BLOCK_PADDING:
+ case SSL_ERROR_UNSUPPORTED_VERSION:
+ case SSL_ERROR_PROTOCOL_VERSION_ALERT:
+ case SSL_ERROR_RX_MALFORMED_FINISHED:
+ case SSL_ERROR_BAD_HANDSHAKE_HASH_VALUE:
+ case SSL_ERROR_DECODE_ERROR_ALERT:
+ case SSL_ERROR_RX_UNKNOWN_ALERT:
+ return PR_TRUE;
+ default:
+ return PR_FALSE;
+ }
+}
+
/**
* Global SSL init
*
@@ -1081,7 +1111,11 @@ CURLcode Curl_nss_connect(struct connectdata *conn, int sockindex)
switch (data->set.ssl.version) {
default:
case CURL_SSLVERSION_DEFAULT:
- ssl3 = tlsv1 = PR_TRUE;
+ ssl3 = PR_TRUE;
+ if (data->state.ssl_connect_retry)
+ infof(data, "TLS disabled due to previous handshake failure\n");
+ else
+ tlsv1 = PR_TRUE;
break;
case CURL_SSLVERSION_TLSv1:
tlsv1 = PR_TRUE;
@@ -1104,6 +1138,9 @@ CURLcode Curl_nss_connect(struct connectdata *conn, int sockindex)
if(SSL_OptionSet(model, SSL_V2_COMPATIBLE_HELLO, ssl2) != SECSuccess)
goto error;
+ /* reset the flag to avoid an infinite loop */
+ data->state.ssl_connect_retry = FALSE;
+
/* enable all ciphers from enable_ciphers_by_default */
cipher_to_enable = enable_ciphers_by_default;
while (SSL_NULL_WITH_NULL_NULL != *cipher_to_enable) {
@@ -1280,10 +1317,21 @@ CURLcode Curl_nss_connect(struct connectdata *conn, int sockindex)
return CURLE_OK;
error:
+ /* reset the flag to avoid an infinite loop */
+ data->state.ssl_connect_retry = FALSE;
+
err = PR_GetError();
infof(data, "NSS error %d\n", err);
if(model)
PR_Close(model);
+
+ if (ssl3 && tlsv1 && isTLSIntoleranceError(err)) {
+ /* schedule reconnect through Curl_retry_request() */
+ data->state.ssl_connect_retry = TRUE;
+ infof(data, "Error in TLS handshake, trying SSLv3...\n");
+ return CURLE_OK;
+ }
+
return curlerr;
}
diff --git a/lib/transfer.c b/lib/transfer.c
index 1f69706..c3a1976 100644
--- a/lib/transfer.c
+++ b/lib/transfer.c
@@ -2572,10 +2572,11 @@ CURLcode Curl_retry_request(struct connectdata *conn,
if(data->set.upload && !(conn->protocol&PROT_HTTP))
return CURLE_OK;
- if((data->req.bytecount +
+ if(/* workaround for broken TLS servers */ data->state.ssl_connect_retry ||
+ ((data->req.bytecount +
data->req.headerbytecount == 0) &&
conn->bits.reuse &&
- !data->set.opt_no_body) {
+ !data->set.opt_no_body)) {
/* We got no data, we attempted to re-use a connection and yet we want a
"body". This might happen if the connection was left alive when we were
done using it before, but that was closed when we wanted to read from
diff --git a/lib/urldata.h b/lib/urldata.h
index b9e5c24..b181e3f 100644
--- a/lib/urldata.h
+++ b/lib/urldata.h
@@ -1331,6 +1331,9 @@ struct UrlState {
} proto;
/* current user of this SessionHandle instance, or NULL */
struct connectdata *current_conn;
+
+ /* if true, force SSL connection retry (workaround for certain servers) */
+ bool ssl_connect_retry;
};

7
curl-7.20.0.tar.lzma.asc Normal file
View File

@ -0,0 +1,7 @@
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.4.10 (GNU/Linux)
iEYEABECAAYFAktxNCEACgkQeOEcayedXJHZRACglh9Xu8QTsP27jfTMEJ3el5pZ
lwQAniuNQgCKFlfm+VPbd1M6rZ9v/+Jo
=V6xG
-----END PGP SIGNATURE-----

View File

@ -1,22 +1,33 @@
Summary: A utility for getting files from remote servers (FTP, HTTP, and others)
Name: curl
Version: 7.19.7
Release: 11%{?dist}
Version: 7.20.0
Release: 1%{?dist}
License: MIT
Group: Applications/Internet
Source: http://curl.haxx.se/download/%{name}-%{version}.tar.lzma
Source2: curlbuild.h
Patch1: curl-7.19.7-nss-nonblock.patch
Patch2: curl-7.19.7-ssl-retry.patch
Patch3: curl-7.19.7-modelfree.patch
Patch4: curl-7.19.7-nss-warning.patch
Patch5: curl-7.19.7-content-disposition.patch
# patching making libcurl multilib ready (by not installing static libraries)
Patch101: curl-7.15.3-multilib.patch
# tweak of libcurl.pc
Patch102: curl-7.16.0-privlibs.patch
# prevent configure script from discarding -g in CFLAGS (#496778)
Patch103: curl-7.19.4-debug.patch
# suppress occasional failure of curl test-suite on s390; caused more likely
# by the test-suite infrastructure than (lib)curl itself
Patch104: curl-7.19.7-s390-sleep.patch
# use localhost6 instead of ip6-localhost in the curl test-suite
Patch105: curl-7.19.7-localhost6.patch
# rebuild of cURL against newer c-ares-devel has caused a regression (#548269)
# this patch reverts back the old behavior of curl-7.19.7-4.fc13
# NOTE: this is a temporary workaround only
Patch106: curl-7.19.7-ares-ipv6.patch
Provides: webclient
URL: http://curl.haxx.se/
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
@ -81,13 +92,6 @@ use cURL's capabilities internally.
%prep
%setup -q
# upstream patches (already applied)
%patch1 -p1
%patch2 -p1
%patch3 -p1
%patch4 -p1
%patch5 -p1
# Fedora patches
%patch101 -p1
%patch102 -p1
@ -96,13 +100,9 @@ use cURL's capabilities internally.
# http://curl.haxx.se/mail/lib-2009-12/0031.html
%patch104 -p1
# we have localhost6 instead of ip6-localhost as name for ::1
# temporarily disabled (clash with patch #106)
#%patch105 -p1
# rebuild of cURL against newer c-ares-devel has caused a regression (#548269)
# this patch reverts back the old behavior of curl-7.19.7-4.fc13
# NOTE: this is a temporary workaround only
%patch106 -p1
autoconf
@ -196,6 +196,9 @@ rm -rf $RPM_BUILD_ROOT
%{_datadir}/aclocal/libcurl.m4
%changelog
* Thu Feb 11 2010 Kamil Dudka <kdudka@redhat.com> 7.20.0-1
- new upstream release - added support for IMAP(S), POP3(S), SMTP(S) and RTSP
* Fri Jan 29 2010 Kamil Dudka <kdudka@redhat.com> 7.19.7-11
- upstream patch adding a new option -J/--remote-header-name
- dropped temporary workaround for #545779

View File

@ -1 +1 @@
26124caef7359de6338172abafa98dc0 curl-7.19.7.tar.lzma
a68d8169e4aae58d35977ae355c9b38a curl-7.20.0.tar.lzma