Hi Denys,

Thank you for the review.
I moved the buffer to G, and added an incremental sleep between the retries
(as in GNU wget).

Even if we use poll() and read(), if we don't want to read byte by byte we
need to use a buffer (like fgets does), as if we read two newlines at once
we need to parse the buffer twice.
As for the default number of retries, if it won't be 1 that might cause
problems with already written scripts that assume wget doesn't try 20 times.
Also, using 'retries' as the variable name might be a little confusing as
when 'retries' = 1 it won't retry the download at all.

Martin
On Sun, 20 Jan 2019 at 18:46, Denys Vlasenko <vda.li...@googlemail.com>
wrote:

> Throws a warning:
>
> networking/wget.c: In function 'fread_buffered':
> networking/wget.c:575: error: declaration of 'read' shadows a global
> declaration
>
> Way too large, and adds to bss:
>
> function                                             old     new   delta
> fgets_buffer                                           -    4096   +4096
> fgets_trim_sanitize                                  128     621    +493
> retrieve_file_data                                   579     775    +196
> open_socket                                           49     117     +68
> fgets_buffer_len                                       -       4      +4
> wget_main                                           2437    2435      -2
> set_alarm                                             27       -     -27
>
> ------------------------------------------------------------------------------
> (add/remove: 2/1 grow/shrink: 3/1 up/down: 4857/-29)         Total: 4828
> bytes
>    text       data        bss        dec        hex    filename
>  979050        485       7296     986831      f0ecf    busybox_old
>  979775        485      11400     991660      f21ac    busybox_unstripped
>
> The second patch is mangled by gmail, please resend as attachment.
>
> I played with a version where one try is done in a child,
> see attached z.diff.
>
> function                                             old     new   delta
> download_one_url                                       -    2221   +2221
> retrieve_file_data                                   579     602     +23
> ftpcmd                                               133     151     +18
> get_sanitized_hdr                                    156     162      +6
> fgets_trim_sanitize                                  128     131      +3
> base64enc                                             46      49      +3
> packed_usage                                       33070   33042     -28
> wget_main                                           2437     565   -1872
>
> ------------------------------------------------------------------------------
> (add/remove: 1/0 grow/shrink: 5/2 up/down: 2274/-1900)        Total: 374
> bytes
>
> I'm not entirely happy with this approach either...
> ...probably need to rewrite existing code to get rid of fgets(),
> use poll() + read().
>
From 3d70e006affde33a9b278b2a62c81d4eb40f2394 Mon Sep 17 00:00:00 2001
From: Martin Lewis <martin.lewis....@gmail.com>
Date: Wed, 23 Jan 2019 15:46:16 +0100
Subject: [PATCH 1/2] wget: replace set_alarm with non blocking functions to
 support retries

---
 networking/wget.c | 161 +++++++++++++++++++++++++++++++++++++++++++++++++++---
 1 file changed, 152 insertions(+), 9 deletions(-)

diff --git a/networking/wget.c b/networking/wget.c
index fa4d21a..3df8e74 100644
--- a/networking/wget.c
+++ b/networking/wget.c
@@ -249,6 +249,8 @@ struct globals {
 	 * an order of magnitude slower than with big one.
 	 */
 	char wget_buf[CONFIG_FEATURE_COPYBUF_KB*1024] ALIGNED(16);
+	char fgets_buffer[4096];
+	size_t fgets_buffer_len;
 } FIX_ALIASING;
 #define G (*ptr_to_globals)
 #define INIT_G() do { \
@@ -363,6 +365,8 @@ static char *base64enc(const char *str)
 }
 #endif
 
+/* We need to find another way to handle timeouts in order to support retries */
+#if 0
 #if ENABLE_FEATURE_WGET_TIMEOUT
 static void alarm_handler(int sig UNUSED_PARAM)
 {
@@ -382,6 +386,7 @@ static void set_alarm(void)
 # define set_alarm()   ((void)0)
 # define clear_alarm() ((void)0)
 #endif
+#endif
 
 #if ENABLE_FEATURE_WGET_OPENSSL
 /*
@@ -408,10 +413,16 @@ static FILE *open_socket(len_and_sockaddr *lsa)
 {
 	int fd;
 	FILE *fp;
-
-	set_alarm();
-	fd = xconnect_stream(lsa);
-	clear_alarm();
+#if ENABLE_FEATURE_WGET_TIMEOUT
+	struct timeval timeout = {G.timeout_seconds, 0};
+#endif
+	fd = xsocket(lsa->u.sa.sa_family, SOCK_STREAM, 0);
+#if ENABLE_FEATURE_WGET_TIMEOUT
+	if (setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &timeout, sizeof (timeout)) < 0) {
+		bb_perror_msg_and_die("setsockopt failed\n");
+	}
+#endif
+	xconnect(fd, &lsa->u.sa, lsa->len);
 
 	/* glibc 2.4 seems to try seeking on it - ??! */
 	/* hopefully it understands what ESPIPE means... */
@@ -444,16 +455,147 @@ static char* sanitize_string(char *s)
 	return s;
 }
 
+/*  fgets() is blocking, so we need a non blocking version.
+    We need to do some buffering so we can cut at \n */
+/* Returns buffer if successfull, otherswise return NULL. Supports timeout. */
+static char *fgets_read_to_newline(char *buffer, size_t len, FILE *fp)
+{
+	struct pollfd polldata;
+	size_t bytes_read = 0;
+	char *newline = NULL;
+	char *buffer_start = buffer;
+
+	polldata.fd = fileno(fp);
+	polldata.events = POLLIN | POLLPRI;
+
+#if ENABLE_FEATURE_WGET_TIMEOUT
+	ndelay_on(polldata.fd);
+#endif
+
+	if (len < 1) {
+		/* Should not happen */
+		return NULL;
+	}
+
+	/* Clear buffer */
+	if (G.fgets_buffer_len > 0) {
+		if ((newline = memchr(G.fgets_buffer, '\n', sizeof (G.fgets_buffer)))) {
+			size_t to_copy = 0;
+			/* In the buffer there is a complete line, if we can copy and return */
+			if ((newline + 1 - G.fgets_buffer) + 1 <= len)
+				to_copy = newline + 1 - G.fgets_buffer;
+			else
+				/* Otherwise we copy only part of it */
+				to_copy = len - 1;
+
+			memcpy(buffer, G.fgets_buffer, to_copy);
+			/* Comply fgets we are replacing */
+			buffer[to_copy] = '\0';
+			G.fgets_buffer_len -= to_copy;
+			memmove(G.fgets_buffer, G.fgets_buffer + to_copy, G.fgets_buffer_len);
+
+			return buffer_start;
+		}
+
+		/* We don't have \n yet, empty the buffer */
+		if (G.fgets_buffer_len <= len - 1) {
+			memcpy(buffer, G.fgets_buffer, G.fgets_buffer_len);
+			len -= G.fgets_buffer_len;
+			buffer += G.fgets_buffer_len;
+			G.fgets_buffer_len = 0;
+		} else {
+			/* Or if it is not big enough */
+			memcpy(buffer, G.fgets_buffer, len - 1);
+			/* Comply fgets we are replacing */
+			buffer[len - 1] = '\0';
+			G.fgets_buffer_len -= len - 1;
+			memmove(G.fgets_buffer, G.fgets_buffer + len - 1, G.fgets_buffer_len);
+
+			return buffer_start;
+		}
+	}
+
+	while (1) {
+		/* Check if data available */
+#if ENABLE_FEATURE_WGET_TIMEOUT
+		if (safe_poll(&polldata, 1, G.timeout_seconds * 1000) == 0) {
+#else
+		/* Infinite time */
+		if (safe_poll(&polldata, 1, -1) == 0) {
+#endif
+			/* Timed out */
+			return NULL;
+		}
+
+		bytes_read = fread(G.fgets_buffer + G.fgets_buffer_len, 1, sizeof (G.fgets_buffer) - G.fgets_buffer_len, fp);
+		if (bytes_read <= 0) {
+			return NULL;
+		}
+
+		G.fgets_buffer_len += bytes_read;
+		newline = memchr(G.fgets_buffer, '\n', G.fgets_buffer_len);
+		if (newline != NULL) {
+			size_t to_copy = 0;
+			/* In the buffer there is a complete line, if we can copy and return */
+			if ((newline + 1 - G.fgets_buffer) + 1 <= len)
+				to_copy = newline + 1 - G.fgets_buffer;
+			else
+				/* Otherwise we copy only part of it */
+				to_copy = len - 1;
+
+			memcpy(buffer, G.fgets_buffer, to_copy);
+			/* Comply fgets we are replacing */
+			buffer[to_copy] = '\0';
+			G.fgets_buffer_len -= to_copy;
+			memmove(G.fgets_buffer, G.fgets_buffer + to_copy, G.fgets_buffer_len);
+
+			return buffer_start;
+		}
+
+		if (G.fgets_buffer_len >= len - 1) {
+			/* Buffer is full, return it */
+			memcpy(buffer, G.fgets_buffer, len - 1);
+			/* Comply fgets we are replacing */
+			buffer[len - 1] = '\0';
+			G.fgets_buffer_len -= len - 1;
+			memmove(G.fgets_buffer, G.fgets_buffer + len - 1, G.fgets_buffer_len);
+
+			return buffer_start;
+		}
+	}
+}
+
+/* Empty our buffer then call fread */
+static int fread_buffered(char *buffer, size_t len, FILE *fp)
+{
+	int bytes_read = 0;
+	if (G.fgets_buffer_len > 0) {
+		if (G.fgets_buffer_len <= len) {
+			memcpy(buffer, G.fgets_buffer, G.fgets_buffer_len);
+			bytes_read = G.fgets_buffer_len;
+			G.fgets_buffer_len = 0;
+			return bytes_read;
+		} else {
+			/* Or if it is not big enough */
+			memcpy(buffer, G.fgets_buffer, len);
+			G.fgets_buffer_len -= len;
+			memmove(G.fgets_buffer, G.fgets_buffer + len, G.fgets_buffer_len);
+			return len;
+		}
+	}
+
+	/* If no buffer then just call fread */
+	return fread(buffer, 1, len, fp);
+}
+
 /* Returns '\n' if it was seen, else '\0'. Trims at first '\r' or '\n' */
 static char fgets_trim_sanitize(FILE *fp, const char *fmt)
 {
 	char c;
 	char *buf_ptr;
 
-	set_alarm();
-	if (fgets(G.wget_buf, sizeof(G.wget_buf), fp) == NULL)
+	if (fgets_read_to_newline(G.wget_buf, sizeof(G.wget_buf), fp) == NULL)
 		bb_perror_msg_and_die("error getting response");
-	clear_alarm();
 
 	buf_ptr = strchrnul(G.wget_buf, '\n');
 	c = *buf_ptr;
@@ -921,7 +1063,7 @@ static void NOINLINE retrieve_file_data(FILE *dfp)
 					rdsz = (unsigned)G.content_len;
 				}
 			}
-			n = fread(G.wget_buf, 1, rdsz, dfp);
+			n = fread_buffered(G.wget_buf, rdsz, dfp);
 
 			if (n > 0) {
 				xwrite(G.output_fd, G.wget_buf, n);
@@ -1121,6 +1263,8 @@ static void download_one_url(const char *url)
 	/*G.content_len = 0; - redundant, got_clen = 0 is enough */
 	G.got_clen = 0;
 	G.chunked = 0;
+	/* If we get redirection, flush the current buffer */
+	G.fgets_buffer_len = 0;
 	if (use_proxy || target.protocol[0] != 'f' /*not ftp[s]*/) {
 		/*
 		 *  HTTP session
@@ -1462,7 +1606,6 @@ IF_DESKTOP(	"no-parent\0"        No_argument       "\xf0")
 
 #if ENABLE_FEATURE_WGET_TIMEOUT
 	G.timeout_seconds = 900;
-	signal(SIGALRM, alarm_handler);
 #endif
 	G.proxy_flag = "on";   /* use proxies if env vars are set */
 	G.user_agent = "Wget"; /* "User-Agent" header field */
-- 
1.9.1

From 809585bfcabeed3461641ea17fbef68429f156c6 Mon Sep 17 00:00:00 2001
From: Martin Lewis <martin.lewis....@gmail.com>
Date: Thu, 24 Jan 2019 08:52:19 +0100
Subject: [PATCH 2/2] wget: add support for retries in http requests

Replace die handlers with error returning so download_one_url can retry from the beginning.
When retries is 1 (default) the behaviour should be the same as before.
---
 networking/wget.c | 133 +++++++++++++++++++++++++++++++++++++++++++-----------
 1 file changed, 106 insertions(+), 27 deletions(-)

diff --git a/networking/wget.c b/networking/wget.c
index 3df8e74..0a23e99 100644
--- a/networking/wget.c
+++ b/networking/wget.c
@@ -125,13 +125,14 @@
 //usage:       "[-c|--continue] [--spider] [-q|--quiet] [-O|--output-document FILE]\n"
 //usage:       "	[-o|--output-file FILE] [--header 'header: value'] [-Y|--proxy on/off]\n"
 /* Since we ignore these opts, we don't show them in --help */
-/* //usage:    "	[--no-check-certificate] [--no-cache] [--passive-ftp] [-t TRIES]" */
+/* //usage:    "	[--no-check-certificate] [--no-cache] [--passive-ftp]" */
 /* //usage:    "	[-nv] [-nc] [-nH] [-np]" */
-//usage:       "	[-P DIR] [-S|--server-response] [-U|--user-agent AGENT]" IF_FEATURE_WGET_TIMEOUT(" [-T SEC]") " URL..."
+//usage:       "	[-t|--tries TRIES] [-P DIR] [-S|--server-response]\n"
+//usage:       "	[-U|--user-agent AGENT]" IF_FEATURE_WGET_TIMEOUT(" [-T SEC]") " URL..."
 //usage:	)
 //usage:	IF_NOT_FEATURE_WGET_LONG_OPTIONS(
-//usage:       "[-cq] [-O FILE] [-o FILE] [-Y on/off] [-P DIR] [-S] [-U AGENT]"
-//usage:			IF_FEATURE_WGET_TIMEOUT(" [-T SEC]") " URL..."
+//usage:       "[-cq] [-O FILE] [-o FILE] [-Y on/off] [-P DIR] [-S] [-U AGENT]\n"
+//usage:       "        [-t TRIES]" IF_FEATURE_WGET_TIMEOUT(" [-T SEC]") " URL..."
 //usage:	)
 //usage:#define wget_full_usage "\n\n"
 //usage:       "Retrieve files via HTTP or FTP\n"
@@ -150,6 +151,7 @@
 //usage:     "\n	-o FILE		Log messages to FILE"
 //usage:     "\n	-U STR		Use STR for User-Agent header"
 //usage:     "\n	-Y on/off	Use proxy"
+//usage:     "\n	-t TRIES	Set number of retries to TRIES (0 unlimits)"
 
 #include "libbb.h"
 
@@ -235,6 +237,7 @@ struct globals {
 	char *fname_log;        /* where to direct log (-o) */
 	const char *proxy_flag; /* Use proxies if env vars are set */
 	const char *user_agent; /* "User-Agent" header field */
+	unsigned tries; /* For -t option */
 	int output_fd;
 	int log_fd;
 	int o_flags;
@@ -409,6 +412,7 @@ static int is_ip_address(const char *string)
 }
 #endif
 
+/* Return NULL if connect() fails */
 static FILE *open_socket(len_and_sockaddr *lsa)
 {
 	int fd;
@@ -416,13 +420,19 @@ static FILE *open_socket(len_and_sockaddr *lsa)
 #if ENABLE_FEATURE_WGET_TIMEOUT
 	struct timeval timeout = {G.timeout_seconds, 0};
 #endif
+
 	fd = xsocket(lsa->u.sa.sa_family, SOCK_STREAM, 0);
 #if ENABLE_FEATURE_WGET_TIMEOUT
 	if (setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &timeout, sizeof (timeout)) < 0) {
 		bb_perror_msg_and_die("setsockopt failed\n");
 	}
 #endif
-	xconnect(fd, &lsa->u.sa, lsa->len);
+	if (connect(fd, &lsa->u.sa, lsa->len) < 0) {
+		/* Failure */
+		bb_perror_msg("connect failed");
+		close(fd);
+		return NULL;
+	}
 
 	/* glibc 2.4 seems to try seeking on it - ??! */
 	/* hopefully it understands what ESPIPE means... */
@@ -588,14 +598,16 @@ static int fread_buffered(char *buffer, size_t len, FILE *fp)
 	return fread(buffer, 1, len, fp);
 }
 
-/* Returns '\n' if it was seen, else '\0'. Trims at first '\r' or '\n' */
-static char fgets_trim_sanitize(FILE *fp, const char *fmt)
+/* Returns '\n' if it was seen, -1 if timeout occured, else '\0'. Trims at first '\r' or '\n' */
+static signed char fgets_trim_sanitize(FILE *fp, const char *fmt)
 {
 	char c;
 	char *buf_ptr;
 
-	if (fgets_read_to_newline(G.wget_buf, sizeof(G.wget_buf), fp) == NULL)
-		bb_perror_msg_and_die("error getting response");
+	if (fgets_read_to_newline(G.wget_buf, sizeof(G.wget_buf), fp) == NULL) {
+		bb_perror_msg("error getting response");
+		return -1;
+	}
 
 	buf_ptr = strchrnul(G.wget_buf, '\n');
 	c = *buf_ptr;
@@ -633,7 +645,9 @@ static int ftpcmd(const char *s1, const char *s2, FILE *fp)
 	/* Read until "Nxx something" is received */
 	G.wget_buf[3] = 0;
 	do {
-		fgets_trim_sanitize(fp, "%s\n");
+		if (fgets_trim_sanitize(fp, "%s\n") == -1) {
+			xfunc_die();
+		}
 	} while (!isdigit(G.wget_buf[0]) || G.wget_buf[3] != ' ');
 
 	G.wget_buf[3] = '\0';
@@ -738,6 +752,11 @@ static char *get_sanitized_hdr(FILE *fp)
 	/* retrieve header line */
 	c = fgets_trim_sanitize(fp, "  %s\n");
 
+	if (c == -1) {
+		/* Timed out */
+		return NULL;
+	}
+
 	/* end of the headers? */
 	if (G.wget_buf[0] == '\0')
 		return NULL;
@@ -920,7 +939,11 @@ static FILE* prepare_ftp_session(FILE **dfpp, struct host_info *target, len_and_
 	char *pass;
 	int port;
 
+	/* TODO: Add retry support for ftp */
 	sfp = open_socket(lsa);
+	if (!sfp) {
+		xfunc_die();
+	}
 #if ENABLE_FEATURE_WGET_HTTPS
 	if (target->protocol == P_FTPS)
 		spawn_ssl_client(target->host, fileno(sfp), TLSLOOP_EXIT_ON_LOCAL_EOF);
@@ -975,7 +998,11 @@ static FILE* prepare_ftp_session(FILE **dfpp, struct host_info *target, len_and_
 
 	set_nport(&lsa->u.sa, htons(port));
 
+	/* TODO: Add retry support for ftp */
 	*dfpp = open_socket(lsa);
+	if (!*dfpp) {
+		xfunc_die();
+	}
 
 #if ENABLE_FEATURE_WGET_HTTPS
 	if (target->protocol == P_FTPS) {
@@ -1004,7 +1031,8 @@ static FILE* prepare_ftp_session(FILE **dfpp, struct host_info *target, len_and_
 	return sfp;
 }
 
-static void NOINLINE retrieve_file_data(FILE *dfp)
+/* Return -1 if times out so we can retry */
+static int NOINLINE retrieve_file_data(FILE *dfp)
 {
 #if ENABLE_FEATURE_WGET_STATUSBAR || ENABLE_FEATURE_WGET_TIMEOUT
 # if ENABLE_FEATURE_WGET_TIMEOUT
@@ -1063,6 +1091,8 @@ static void NOINLINE retrieve_file_data(FILE *dfp)
 					rdsz = (unsigned)G.content_len;
 				}
 			}
+			/* We probably have some data in fgets_buffer, so we need to
+			   flush it first */
 			n = fread_buffered(G.wget_buf, rdsz, dfp);
 
 			if (n > 0) {
@@ -1103,7 +1133,7 @@ static void NOINLINE retrieve_file_data(FILE *dfp)
 # if ENABLE_FEATURE_WGET_TIMEOUT
 				if (second_cnt != 0 && --second_cnt == 0) {
 					progress_meter(PROGRESS_END);
-					bb_error_msg_and_die("download timed out");
+					return -1;
 				}
 # endif
 				/* We used to loop back to poll here,
@@ -1127,7 +1157,9 @@ static void NOINLINE retrieve_file_data(FILE *dfp)
 			break;
 
 		/* Each chunk ends with "\r\n" - eat it */
-		fgets_trim_sanitize(dfp, NULL);
+		if (fgets_trim_sanitize(dfp, NULL) == -1 ) {
+			return -1;
+		}
  get_clen:
 		/* chunk size format is "HEXNUM[;name[=val]]\r\n" */
 		fgets_trim_sanitize(dfp, NULL);
@@ -1177,6 +1209,8 @@ static void NOINLINE retrieve_file_data(FILE *dfp)
 		else
 			fprintf(stderr, "'%s' saved\n", G.fname_out);
 	}
+
+	return 0;
 }
 
 static void download_one_url(const char *url)
@@ -1186,6 +1220,7 @@ static void download_one_url(const char *url)
 	len_and_sockaddr *lsa;
 	FILE *sfp;                      /* socket to web/ftp server         */
 	FILE *dfp;                      /* socket to ftp server (data)      */
+	unsigned cur_tries = 0;         /* number of tries so far           */
 	char *fname_out_alloc;
 	char *redirected_path = NULL;
 	struct host_info server;
@@ -1251,9 +1286,29 @@ static void download_one_url(const char *url)
 		 * We are not sure it exists on remote side */
 	}
 
+ retry:
+	cur_tries++;
+	if (G.tries != 0 && cur_tries > G.tries) {
+		if (G.tries != 1) /* Show message about the tries only if was set to more than one */
+			bb_error_msg_and_die("Gave up after %u tries", G.tries);
+		else
+			xfunc_die();
+	}
+	if (cur_tries > 1) {
+		/* --waitretry=10 is default in GNU wget */
+		/* Sleep increasing number of seconds from 1 to 10 */
+		if (cur_tries < 10)
+			sleep(cur_tries - 1);
+		else
+			sleep(10);
+	}
 	redir_limit = 5;
  resolve_lsa:
-	lsa = xhost2sockaddr(server.host, server.port);
+	/* If DNS resolution fails, retry, don't die */
+	lsa = host2sockaddr(server.host, server.port);
+	if (!lsa)
+		goto retry;
+
 	if (!(option_mask32 & WGET_OPT_QUIET)) {
 		char *s = xmalloc_sockaddr2dotted(&lsa->u.sa);
 		fprintf(stderr, "Connecting to %s (%s)\n", server.host, s);
@@ -1283,6 +1338,8 @@ static void download_one_url(const char *url)
 # if ENABLE_FEATURE_WGET_HTTPS
 			if (fd < 0) { /* no openssl? try internal */
 				sfp = open_socket(lsa);
+				if (!sfp)
+					goto retry;
 				spawn_ssl_client(server.host, fileno(sfp), /*flags*/ 0);
 				goto socket_opened;
 			}
@@ -1295,15 +1352,22 @@ static void download_one_url(const char *url)
 			goto socket_opened;
 		}
 		sfp = open_socket(lsa);
+		if (!sfp) {
+			goto retry;
+		}
  socket_opened:
 #elif ENABLE_FEATURE_WGET_HTTPS
 		/* Only internal TLS support is configured */
 		sfp = open_socket(lsa);
-		if (server.protocol == P_HTTPS)
+		if (!sfp)
+			goto retry;
+		if (server.protocol == P_HTTPS) {
 			spawn_ssl_client(server.host, fileno(sfp), /*flags*/ 0);
 #else
 		/* ssl (https) support is not configured */
 		sfp = open_socket(lsa);
+		if (!sfp)
+			goto retry;
 #endif
 		/* Send HTTP request */
 		if (use_proxy) {
@@ -1379,7 +1443,11 @@ static void download_one_url(const char *url)
 		 * Retrieve HTTP response line and check for "200" status code.
 		 */
  read_response:
-		fgets_trim_sanitize(sfp, "  %s\n");
+		if (fgets_trim_sanitize(sfp, "  %s\n") == -1) {
+			/* Timed out */
+			bb_error_msg("timed out");
+			goto retry;
+		}
 
 		str = G.wget_buf;
 		str = skip_non_whitespace(str);
@@ -1454,7 +1522,8 @@ However, in real world it was observed that some web servers
 			/* Partial Content even though we did not ask for it??? */
 			/* fall through */
 		default:
-			bb_error_msg_and_die("server returned error: %s", G.wget_buf);
+			bb_error_msg("server returned error: %s", G.wget_buf);
+			goto retry;
 		}
 
 		/*
@@ -1480,19 +1549,24 @@ However, in real world it was observed that some web servers
 			if (key == KEY_content_length) {
 				G.content_len = BB_STRTOOFF(str, NULL, 10);
 				if (G.content_len < 0 || errno) {
-					bb_error_msg_and_die("content-length %s is garbage", str);
+					bb_error_msg("content-length %s is garbage", str);
+					goto retry;
 				}
 				G.got_clen = 1;
 				continue;
 			}
 			if (key == KEY_transfer_encoding) {
-				if (strcmp(str_tolower(str), "chunked") != 0)
-					bb_error_msg_and_die("transfer encoding '%s' is not supported", str);
+				if (strcmp(str_tolower(str), "chunked") != 0) {
+					bb_error_msg("transfer encoding '%s' is not supported", str);
+					goto retry;
+				}
 				G.chunked = 1;
 			}
 			if (key == KEY_location && status >= 300) {
-				if (--redir_limit == 0)
-					bb_error_msg_and_die("too many redirections");
+				if (--redir_limit == 0) {
+					bb_error_msg("too many redirections");
+					goto retry;
+				}
 				fclose(sfp);
 				if (str[0] == '/') {
 					free(redirected_path);
@@ -1531,13 +1605,18 @@ However, in real world it was observed that some web servers
 	free(lsa);
 
 	if (!(option_mask32 & WGET_OPT_SPIDER)) {
+		int retrieve_retval;
 		if (G.output_fd < 0)
 			G.output_fd = xopen(G.fname_out, G.o_flags);
-		retrieve_file_data(dfp);
+                retrieve_retval = retrieve_file_data(dfp);
 		if (!(option_mask32 & WGET_OPT_OUTNAME)) {
 			xclose(G.output_fd);
 			G.output_fd = -1;
 		}
+		if (retrieve_retval < 0) { /* We timed out, if we retry so the fd is closed */
+                       bb_error_msg("download timed out");
+                       goto retry;
+               }
 	} else {
 		if (!(option_mask32 & WGET_OPT_QUIET))
 			fprintf(stderr, "remote file exists\n");
@@ -1576,8 +1655,8 @@ int wget_main(int argc UNUSED_PARAM, char **argv)
 		"user-agent\0"       Required_argument "U"
 IF_FEATURE_WGET_TIMEOUT(
 		"timeout\0"          Required_argument "T")
+		"tries\0"            Required_argument "t"
 		/* Ignored: */
-IF_DESKTOP(	"tries\0"            Required_argument "t")
 		"header\0"           Required_argument "\xff"
 		"post-data\0"        Required_argument "\xfe"
 		"spider\0"           No_argument       "\xfd"
@@ -1607,12 +1686,12 @@ IF_DESKTOP(	"no-parent\0"        No_argument       "\xf0")
 #if ENABLE_FEATURE_WGET_TIMEOUT
 	G.timeout_seconds = 900;
 #endif
+	G.tries = 1;           /* Scripts might assume that wget exists after one try */
 	G.proxy_flag = "on";   /* use proxies if env vars are set */
 	G.user_agent = "Wget"; /* "User-Agent" header field */
 
 	GETOPT32(argv, "^"
-		"cqSO:o:P:Y:U:T:+"
-		/*ignored:*/ "t:"
+		"cqSO:o:P:Y:U:T:+t:+"
 		/*ignored:*/ "n::"
 		/* wget has exactly four -n<letter> opts, all of which we can ignore:
 		 * -nv --no-verbose: be moderately quiet (-q is full quiet)
@@ -1629,7 +1708,7 @@ IF_DESKTOP(	"no-parent\0"        No_argument       "\xf0")
 		, &G.fname_out, &G.fname_log, &G.dir_prefix,
 		&G.proxy_flag, &G.user_agent,
 		IF_FEATURE_WGET_TIMEOUT(&G.timeout_seconds) IF_NOT_FEATURE_WGET_TIMEOUT(NULL),
-		NULL, /* -t RETRIES */
+		&G.tries, /* -t RETRIES */
 		NULL  /* -n[ARG] */
 		IF_FEATURE_WGET_LONG_OPTIONS(, &headers_llist)
 		IF_FEATURE_WGET_LONG_OPTIONS(, &G.post_data)
-- 
1.9.1

_______________________________________________
busybox mailing list
busybox@busybox.net
http://lists.busybox.net/mailman/listinfo/busybox

Reply via email to