Re: ftp: don't close fin or s twice

2018-02-06 Thread sunil+tech
Stuart Henderson  wrote:
> Regarding ftp(1), it would be nice to get more eyes on sunil@'s rewrite,
> apart from anything else it fixes problems with some servers (like 
> ftp.tug.org)
> that don't work with the existing code..

Hi,

For folks on tech@, the latest code is at https://nimmagadda.net/ftp.tar.gz



Re: ftp: don't close fin or s twice

2018-02-06 Thread Stuart Henderson
Regarding ftp(1), it would be nice to get more eyes on sunil@'s rewrite,
apart from anything else it fixes problems with some servers (like ftp.tug.org)
that don't work with the existing code..



Re: ftp: don't close fin or s twice

2018-02-05 Thread Theo Buehler
On Tue, Feb 06, 2018 at 03:02:14PM +1300, richard.n.proc...@gmail.com wrote:
> 
> 
> On Tue, 6 Feb 2018, Theo Buehler wrote:
> 
> > In cleanup_url_get, fin and s will be closed a second time, so mark them
> > as invalid after closing them the first time.
> > 
> > Another option might be to remove the fclose/close calls, but since this
> > happens right before the recursive call, I'm not sure whether this might
> > run the risk of hitting limits.
> 
> I got curious about the 'else if': fin = fdopen(s) is possible, 
> and closing both 'fin' and 's' will clobber errno.
> 
> How about the following further tweak to eliminate it?
> 
> I've also attached a follow-up patch that renames 's' -> 'fd'; searching 
> for 's' is infeasible without the compiler's help.
> 

I'm ok with both diffs.

> Index: ftp/fetch.c
> ===
> --- ftp.orig/fetch.c
> +++ ftp/fetch.c
> @@ -636,9 +636,11 @@ noslash:
>   }
>   } else {
>   fin = fdopen(s, "r+");
> + s = -1;
>   }
>  #else /* !NOSSL */
>   fin = fdopen(s, "r+");
> + s = -1;
>  #endif /* !NOSSL */
>  
>  #ifdef SMALL
> @@ -912,10 +914,14 @@ noslash:
>   *loctail = '\0';
>   if (verbose)
>   fprintf(ttyout, "Redirected to %s\n", redirurl);
> - if (fin != NULL)
> + if (fin != NULL) {
>   fclose(fin);
> - else if (s != -1)
> + fin = NULL;
> + }
> + if (s != -1) {
>   close(s);
> + s = -1;
> + }
>   rval = url_get(redirurl, proxyenv, savefile, lastfile);
>   free(redirurl);
>   goto cleanup_url_get;
> @@ -1039,10 +1045,14 @@ cleanup_url_get:
>   free(full_host);
>   free(sslhost);
>  #endif /* !NOSSL */
> - if (fin != NULL)
> + if (fin != NULL) {
>   fclose(fin);
> - else if (s != -1)
> + fin = NULL;
> + }
> + if (s != -1) {
>   close(s);
> + s = -1;
> + }
>   if (out >= 0 && out != fileno(stdout))
>   close(out);
>   free(buf);
> 
> 
> Index: ftp/fetch.c
> ===
> --- ftp.orig/fetch.c
> +++ ftp/fetch.c
> @@ -189,7 +189,7 @@ url_get(const char *origline, const char
>   const char * volatile savefile;
>   char * volatile proxyurl = NULL;
>   char *credentials = NULL;
> - volatile int s = -1, out = -1;
> + volatile int fd = -1, out = -1;
>   volatile sig_t oldintr, oldinti;
>   FILE *fin = NULL;
>   off_t hashbytes;
> @@ -364,13 +364,13 @@ noslash:
>   if (isfileurl) {
>   struct stat st;
>  
> - s = open(path, O_RDONLY);
> - if (s == -1) {
> + fd = open(path, O_RDONLY);
> + if (fd == -1) {
>   warn("Can't open file %s", path);
>   goto cleanup_url_get;
>   }
>  
> - if (fstat(s, &st) == -1)
> + if (fstat(fd, &st) == -1)
>   filesize = -1;
>   else
>   filesize = st.st_size;
> @@ -399,7 +399,7 @@ noslash:
>   warn("Can't fstat %s", savefile);
>   goto cleanup_url_get;
>   }
> - if (lseek(s, st.st_size, SEEK_SET) == -1) {
> + if (lseek(fd, st.st_size, SEEK_SET) == -1) {
>   warn("Can't lseek %s", path);
>   goto cleanup_url_get;
>   }
> @@ -429,7 +429,7 @@ noslash:
>   /* Finally, suck down the file. */
>   i = 0;
>   oldinti = signal(SIGINFO, psummary);
> - while ((len = read(s, buf, buflen)) > 0) {
> + while ((len = read(fd, buf, buflen)) > 0) {
>   bytes += len;
>   for (cp = buf; len > 0; len -= i, cp += i) {
>   if ((i = write(out, cp, len)) == -1) {
> @@ -535,7 +535,7 @@ noslash:
>   if (verbose)
>   setvbuf(ttyout, NULL, _IOLBF, 0);
>  
> - s = -1;
> + fd = -1;
>   for (res = res0; res; res = res->ai_next) {
>   if (getnameinfo(res->ai_addr, res->ai_addrlen, hbuf,
>   sizeof(hbuf), NULL, 0, NI_NUMERICHOST) != 0)
> @@ -543,8 +543,8 @@ noslash:
>   if (verbose)
>   fprintf(ttyout, "Trying %s...\n", hbuf);
>  
> - s = socket(res->ai_family, res->ai_socktype, res->ai_protocol);
> - if (s == -1) {
> + fd = socket(res->ai_family, res->ai_socktype, res->ai_protocol);
> + if (fd == -1)

Re: ftp: don't close fin or s twice

2018-02-05 Thread richard . n . procter


On Tue, 6 Feb 2018, Theo Buehler wrote:

> In cleanup_url_get, fin and s will be closed a second time, so mark them
> as invalid after closing them the first time.
> 
> Another option might be to remove the fclose/close calls, but since this
> happens right before the recursive call, I'm not sure whether this might
> run the risk of hitting limits.

I got curious about the 'else if': fin = fdopen(s) is possible, 
and closing both 'fin' and 's' will clobber errno.

How about the following further tweak to eliminate it?

I've also attached a follow-up patch that renames 's' -> 'fd'; searching 
for 's' is infeasible without the compiler's help.

Index: ftp/fetch.c
===
--- ftp.orig/fetch.c
+++ ftp/fetch.c
@@ -636,9 +636,11 @@ noslash:
}
} else {
fin = fdopen(s, "r+");
+   s = -1;
}
 #else /* !NOSSL */
fin = fdopen(s, "r+");
+   s = -1;
 #endif /* !NOSSL */
 
 #ifdef SMALL
@@ -912,10 +914,14 @@ noslash:
*loctail = '\0';
if (verbose)
fprintf(ttyout, "Redirected to %s\n", redirurl);
-   if (fin != NULL)
+   if (fin != NULL) {
fclose(fin);
-   else if (s != -1)
+   fin = NULL;
+   }
+   if (s != -1) {
close(s);
+   s = -1;
+   }
rval = url_get(redirurl, proxyenv, savefile, lastfile);
free(redirurl);
goto cleanup_url_get;
@@ -1039,10 +1045,14 @@ cleanup_url_get:
free(full_host);
free(sslhost);
 #endif /* !NOSSL */
-   if (fin != NULL)
+   if (fin != NULL) {
fclose(fin);
-   else if (s != -1)
+   fin = NULL;
+   }
+   if (s != -1) {
close(s);
+   s = -1;
+   }
if (out >= 0 && out != fileno(stdout))
close(out);
free(buf);


Index: ftp/fetch.c
===
--- ftp.orig/fetch.c
+++ ftp/fetch.c
@@ -189,7 +189,7 @@ url_get(const char *origline, const char
const char * volatile savefile;
char * volatile proxyurl = NULL;
char *credentials = NULL;
-   volatile int s = -1, out = -1;
+   volatile int fd = -1, out = -1;
volatile sig_t oldintr, oldinti;
FILE *fin = NULL;
off_t hashbytes;
@@ -364,13 +364,13 @@ noslash:
if (isfileurl) {
struct stat st;
 
-   s = open(path, O_RDONLY);
-   if (s == -1) {
+   fd = open(path, O_RDONLY);
+   if (fd == -1) {
warn("Can't open file %s", path);
goto cleanup_url_get;
}
 
-   if (fstat(s, &st) == -1)
+   if (fstat(fd, &st) == -1)
filesize = -1;
else
filesize = st.st_size;
@@ -399,7 +399,7 @@ noslash:
warn("Can't fstat %s", savefile);
goto cleanup_url_get;
}
-   if (lseek(s, st.st_size, SEEK_SET) == -1) {
+   if (lseek(fd, st.st_size, SEEK_SET) == -1) {
warn("Can't lseek %s", path);
goto cleanup_url_get;
}
@@ -429,7 +429,7 @@ noslash:
/* Finally, suck down the file. */
i = 0;
oldinti = signal(SIGINFO, psummary);
-   while ((len = read(s, buf, buflen)) > 0) {
+   while ((len = read(fd, buf, buflen)) > 0) {
bytes += len;
for (cp = buf; len > 0; len -= i, cp += i) {
if ((i = write(out, cp, len)) == -1) {
@@ -535,7 +535,7 @@ noslash:
if (verbose)
setvbuf(ttyout, NULL, _IOLBF, 0);
 
-   s = -1;
+   fd = -1;
for (res = res0; res; res = res->ai_next) {
if (getnameinfo(res->ai_addr, res->ai_addrlen, hbuf,
sizeof(hbuf), NULL, 0, NI_NUMERICHOST) != 0)
@@ -543,8 +543,8 @@ noslash:
if (verbose)
fprintf(ttyout, "Trying %s...\n", hbuf);
 
-   s = socket(res->ai_family, res->ai_socktype, res->ai_protocol);
-   if (s == -1) {
+   fd = socket(res->ai_family, res->ai_socktype, res->ai_protocol);
+   if (fd == -1) {
cause = "socket";
continue;
}
@@ -552,17 +552,17 @@ noslash:
 #ifndef SMALL
if (srcaddr) {
   

ftp: don't close fin or s twice

2018-02-05 Thread Theo Buehler
In cleanup_url_get, fin and s will be closed a second time, so mark them
as invalid after closing them the first time.

Another option might be to remove the fclose/close calls, but since this
happens right before the recursive call, I'm not sure whether this might
run the risk of hitting limits.

Index: fetch.c
===
RCS file: /var/cvs/src/usr.bin/ftp/fetch.c,v
retrieving revision 1.164
diff -u -p -r1.164 fetch.c
--- fetch.c 25 Sep 2017 11:04:54 -  1.164
+++ fetch.c 6 Feb 2018 00:37:17 -
@@ -912,10 +912,13 @@ noslash:
*loctail = '\0';
if (verbose)
fprintf(ttyout, "Redirected to %s\n", redirurl);
-   if (fin != NULL)
+   if (fin != NULL) {
fclose(fin);
-   else if (s != -1)
+   fin = NULL;
+   } else if (s != -1) {
close(s);
+   s = -1;
+   }
rval = url_get(redirurl, proxyenv, savefile, lastfile);
free(redirurl);
goto cleanup_url_get;