[no subject]

2014-03-18 Thread szager
Subject: [PATCH] Enable index-pack threading in msysgit.

This adds a Windows implementation of pread.  Note that it is NOT
safe to intersperse calls to read() and pread() on a file
descriptor.  According to the ReadFile spec, using the 'overlapped'
argument should not affect the implicit position pointer of the
descriptor.  Experiments have shown that this is, in fact, a lie.

To accomodate that fact, this change also incorporates:

http://article.gmane.org/gmane.comp.version-control.git/196042

... which gives each index-pack thread its own file descriptor.
---
 builtin/index-pack.c | 21 -
 compat/mingw.c   | 31 ++-
 compat/mingw.h   |  3 +++
 config.mak.uname |  1 -
 4 files changed, 49 insertions(+), 7 deletions(-)

diff --git a/builtin/index-pack.c b/builtin/index-pack.c
index 2f37a38..c02dd4c 100644
--- a/builtin/index-pack.c
+++ b/builtin/index-pack.c
@@ -51,6 +51,7 @@ struct thread_local {
 #endif
struct base_data *base_cache;
size_t base_cache_used;
+   int pack_fd;
 };
 
 /*
@@ -91,7 +92,8 @@ static off_t consumed_bytes;
 static unsigned deepest_delta;
 static git_SHA_CTX input_ctx;
 static uint32_t input_crc32;
-static int input_fd, output_fd, pack_fd;
+static const char *curr_pack;
+static int input_fd, output_fd;
 
 #ifndef NO_PTHREADS
 
@@ -134,6 +136,7 @@ static inline void unlock_mutex(pthread_mutex_t *mutex)
  */
 static void init_thread(void)
 {
+   int i;
init_recursive_mutex(read_mutex);
pthread_mutex_init(counter_mutex, NULL);
pthread_mutex_init(work_mutex, NULL);
@@ -141,11 +144,17 @@ static void init_thread(void)
pthread_mutex_init(deepest_delta_mutex, NULL);
pthread_key_create(key, NULL);
thread_data = xcalloc(nr_threads, sizeof(*thread_data));
+   for (i = 0; i  nr_threads; i++) {
+   thread_data[i].pack_fd = open(curr_pack, O_RDONLY);
+   if (thread_data[i].pack_fd == -1)
+   die_errno(unable to open %s, curr_pack);
+   }
threads_active = 1;
 }
 
 static void cleanup_thread(void)
 {
+   int i;
if (!threads_active)
return;
threads_active = 0;
@@ -155,6 +164,8 @@ static void cleanup_thread(void)
if (show_stat)
pthread_mutex_destroy(deepest_delta_mutex);
pthread_key_delete(key);
+   for (i = 0; i  nr_threads; i++)
+   close(thread_data[i].pack_fd);
free(thread_data);
 }
 
@@ -288,13 +299,13 @@ static const char *open_pack_file(const char *pack_name)
output_fd = open(pack_name, O_CREAT|O_EXCL|O_RDWR, 
0600);
if (output_fd  0)
die_errno(_(unable to create '%s'), pack_name);
-   pack_fd = output_fd;
+   nothread_data.pack_fd = output_fd;
} else {
input_fd = open(pack_name, O_RDONLY);
if (input_fd  0)
die_errno(_(cannot open packfile '%s'), pack_name);
output_fd = -1;
-   pack_fd = input_fd;
+   nothread_data.pack_fd = input_fd;
}
git_SHA1_Init(input_ctx);
return pack_name;
@@ -542,7 +553,7 @@ static void *unpack_data(struct object_entry *obj,
 
do {
ssize_t n = (len  64*1024) ? len : 64*1024;
-   n = pread(pack_fd, inbuf, n, from);
+   n = pread(get_thread_data()-pack_fd, inbuf, n, from);
if (n  0)
die_errno(_(cannot pread pack file));
if (!n)
@@ -1490,7 +1501,7 @@ static void show_pack_info(int stat_only)
 int cmd_index_pack(int argc, const char **argv, const char *prefix)
 {
int i, fix_thin_pack = 0, verify = 0, stat_only = 0;
-   const char *curr_pack, *curr_index;
+   const char *curr_index;
const char *index_name = NULL, *pack_name = NULL;
const char *keep_name = NULL, *keep_msg = NULL;
char *index_name_buf = NULL, *keep_name_buf = NULL;
diff --git a/compat/mingw.c b/compat/mingw.c
index 383cafe..6cc85d6 100644
--- a/compat/mingw.c
+++ b/compat/mingw.c
@@ -329,7 +329,36 @@ int mingw_mkdir(const char *path, int mode)
return ret;
 }
 
-int mingw_open (const char *filename, int oflags, ...)
+
+ssize_t mingw_pread(int fd, void *buf, size_t count, off64_t offset)
+{
+   HANDLE hand = (HANDLE)_get_osfhandle(fd);
+   if (hand == INVALID_HANDLE_VALUE) {
+   errno = EBADF;
+   return -1;
+   }
+
+   LARGE_INTEGER offset_value;
+   offset_value.QuadPart = offset;
+
+   DWORD bytes_read = 0;
+   OVERLAPPED overlapped = {0};
+   overlapped.Offset = offset_value.LowPart;
+   overlapped.OffsetHigh = offset_value.HighPart;
+   BOOL result = ReadFile(hand, buf, count, bytes_read, overlapped);
+
+   ssize_t ret = bytes_read;
+
+   if (!result  GetLastError() != ERROR_HANDLE_EOF)

[PATCH] Enable index-pack threading in msysgit.

2014-03-18 Thread szager
This adds a Windows implementation of pread.  Note that it is NOT
safe to intersperse calls to read() and pread() on a file
descriptor.  According to the ReadFile spec, using the 'overlapped'
argument should not affect the implicit position pointer of the
descriptor.  Experiments have shown that this is, in fact, a lie.

To accomodate that fact, this change also incorporates:

http://article.gmane.org/gmane.comp.version-control.git/196042

... which gives each index-pack thread its own file descriptor.
---
 builtin/index-pack.c | 21 -
 compat/mingw.c   | 31 ++-
 compat/mingw.h   |  3 +++
 config.mak.uname |  1 -
 4 files changed, 49 insertions(+), 7 deletions(-)

diff --git a/builtin/index-pack.c b/builtin/index-pack.c
index 2f37a38..c02dd4c 100644
--- a/builtin/index-pack.c
+++ b/builtin/index-pack.c
@@ -51,6 +51,7 @@ struct thread_local {
 #endif
struct base_data *base_cache;
size_t base_cache_used;
+   int pack_fd;
 };
 
 /*
@@ -91,7 +92,8 @@ static off_t consumed_bytes;
 static unsigned deepest_delta;
 static git_SHA_CTX input_ctx;
 static uint32_t input_crc32;
-static int input_fd, output_fd, pack_fd;
+static const char *curr_pack;
+static int input_fd, output_fd;
 
 #ifndef NO_PTHREADS
 
@@ -134,6 +136,7 @@ static inline void unlock_mutex(pthread_mutex_t *mutex)
  */
 static void init_thread(void)
 {
+   int i;
init_recursive_mutex(read_mutex);
pthread_mutex_init(counter_mutex, NULL);
pthread_mutex_init(work_mutex, NULL);
@@ -141,11 +144,17 @@ static void init_thread(void)
pthread_mutex_init(deepest_delta_mutex, NULL);
pthread_key_create(key, NULL);
thread_data = xcalloc(nr_threads, sizeof(*thread_data));
+   for (i = 0; i  nr_threads; i++) {
+   thread_data[i].pack_fd = open(curr_pack, O_RDONLY);
+   if (thread_data[i].pack_fd == -1)
+   die_errno(unable to open %s, curr_pack);
+   }
threads_active = 1;
 }
 
 static void cleanup_thread(void)
 {
+   int i;
if (!threads_active)
return;
threads_active = 0;
@@ -155,6 +164,8 @@ static void cleanup_thread(void)
if (show_stat)
pthread_mutex_destroy(deepest_delta_mutex);
pthread_key_delete(key);
+   for (i = 0; i  nr_threads; i++)
+   close(thread_data[i].pack_fd);
free(thread_data);
 }
 
@@ -288,13 +299,13 @@ static const char *open_pack_file(const char *pack_name)
output_fd = open(pack_name, O_CREAT|O_EXCL|O_RDWR, 
0600);
if (output_fd  0)
die_errno(_(unable to create '%s'), pack_name);
-   pack_fd = output_fd;
+   nothread_data.pack_fd = output_fd;
} else {
input_fd = open(pack_name, O_RDONLY);
if (input_fd  0)
die_errno(_(cannot open packfile '%s'), pack_name);
output_fd = -1;
-   pack_fd = input_fd;
+   nothread_data.pack_fd = input_fd;
}
git_SHA1_Init(input_ctx);
return pack_name;
@@ -542,7 +553,7 @@ static void *unpack_data(struct object_entry *obj,
 
do {
ssize_t n = (len  64*1024) ? len : 64*1024;
-   n = pread(pack_fd, inbuf, n, from);
+   n = pread(get_thread_data()-pack_fd, inbuf, n, from);
if (n  0)
die_errno(_(cannot pread pack file));
if (!n)
@@ -1490,7 +1501,7 @@ static void show_pack_info(int stat_only)
 int cmd_index_pack(int argc, const char **argv, const char *prefix)
 {
int i, fix_thin_pack = 0, verify = 0, stat_only = 0;
-   const char *curr_pack, *curr_index;
+   const char *curr_index;
const char *index_name = NULL, *pack_name = NULL;
const char *keep_name = NULL, *keep_msg = NULL;
char *index_name_buf = NULL, *keep_name_buf = NULL;
diff --git a/compat/mingw.c b/compat/mingw.c
index 383cafe..6cc85d6 100644
--- a/compat/mingw.c
+++ b/compat/mingw.c
@@ -329,7 +329,36 @@ int mingw_mkdir(const char *path, int mode)
return ret;
 }
 
-int mingw_open (const char *filename, int oflags, ...)
+
+ssize_t mingw_pread(int fd, void *buf, size_t count, off64_t offset)
+{
+   HANDLE hand = (HANDLE)_get_osfhandle(fd);
+   if (hand == INVALID_HANDLE_VALUE) {
+   errno = EBADF;
+   return -1;
+   }
+
+   LARGE_INTEGER offset_value;
+   offset_value.QuadPart = offset;
+
+   DWORD bytes_read = 0;
+   OVERLAPPED overlapped = {0};
+   overlapped.Offset = offset_value.LowPart;
+   overlapped.OffsetHigh = offset_value.HighPart;
+   BOOL result = ReadFile(hand, buf, count, bytes_read, overlapped);
+
+   ssize_t ret = bytes_read;
+
+   if (!result  GetLastError() != ERROR_HANDLE_EOF)
+   {
+   errno = 

[PATCH] Make the global packed_git variable static to sha1_file.c.

2014-02-13 Thread szager
This is a first step in making the codebase thread-safe.  By and
large, the operations which might benefit from threading are those
that work with pack files (e.g., checkout, blame), so the focus of
this patch is stop leaking the global list of pack files outside of
sha1_file.c.

The next step will be to control access to the list of pack files
with a mutex.  However, that alone is not enough to make pack file
access thread safe.  Even in a read-only operation, the window list
associated with each pack file will need to be controlled.
Additionally, the global counters in sha1_file.c will need to be
controlled.

This patch is a pure refactor with no functional changes, so it
shouldn't require any additional tests.  Adding the actual locks
will be a functional change, and will require additional tests.

Signed-off-by: Stefan Zager sza...@chromium.org
---
 builtin/count-objects.c  |  44 ++-
 builtin/fsck.c   |  46 +++-
 builtin/gc.c |  26 +++
 builtin/pack-objects.c   | 189 ---
 builtin/pack-redundant.c |  37 +++---
 cache.h  |  18 -
 fast-import.c|   4 +-
 http-backend.c   |  28 ---
 http-push.c  |   4 +-
 http-walker.c|   2 +-
 pack-revindex.c  |  20 ++---
 server-info.c|  36 -
 sha1_file.c  |  52 ++---
 sha1_name.c  |  18 -
 14 files changed, 327 insertions(+), 197 deletions(-)

diff --git a/builtin/count-objects.c b/builtin/count-objects.c
index a7f70cb..a27c006 100644
--- a/builtin/count-objects.c
+++ b/builtin/count-objects.c
@@ -83,14 +83,32 @@ static char const * const count_objects_usage[] = {
NULL
 };
 
+struct pack_data {
+   unsigned long packed;
+   off_t size_pack;
+   unsigned long num_pack;
+};
+
+static int count_pack_objects(struct packed_git *p, void *data)
+{
+   struct pack_data *pd = (struct pack_data *) data;
+   if (p-pack_local  !open_pack_index(p)) {
+   pd-packed += p-num_objects;
+   pd-size_pack += p-pack_size + p-index_size;
+   pd-num_pack++;
+   }
+   return 0;
+}
+
 int cmd_count_objects(int argc, const char **argv, const char *prefix)
 {
int i, verbose = 0, human_readable = 0;
const char *objdir = get_object_directory();
int len = strlen(objdir);
char *path = xmalloc(len + 50);
-   unsigned long loose = 0, packed = 0, packed_loose = 0;
+   unsigned long loose = 0, packed_loose = 0;
off_t loose_size = 0;
+   struct pack_data pd = {0, 0, 0};
struct option opts[] = {
OPT__VERBOSE(verbose, N_(be verbose)),
OPT_BOOL('H', human-readable, human_readable,
@@ -118,41 +136,29 @@ int cmd_count_objects(int argc, const char **argv, const 
char *prefix)
closedir(d);
}
if (verbose) {
-   struct packed_git *p;
-   unsigned long num_pack = 0;
-   off_t size_pack = 0;
struct strbuf loose_buf = STRBUF_INIT;
struct strbuf pack_buf = STRBUF_INIT;
struct strbuf garbage_buf = STRBUF_INIT;
-   if (!packed_git)
-   prepare_packed_git();
-   for (p = packed_git; p; p = p-next) {
-   if (!p-pack_local)
-   continue;
-   if (open_pack_index(p))
-   continue;
-   packed += p-num_objects;
-   size_pack += p-pack_size + p-index_size;
-   num_pack++;
-   }
+   prepare_packed_git();
+   foreach_packed_git(count_pack_objects, NULL, pd);
 
if (human_readable) {
strbuf_humanise_bytes(loose_buf, loose_size);
-   strbuf_humanise_bytes(pack_buf, size_pack);
+   strbuf_humanise_bytes(pack_buf, pd.size_pack);
strbuf_humanise_bytes(garbage_buf, size_garbage);
} else {
strbuf_addf(loose_buf, %lu,
(unsigned long)(loose_size / 1024));
strbuf_addf(pack_buf, %lu,
-   (unsigned long)(size_pack / 1024));
+   (unsigned long)(pd.size_pack / 1024));
strbuf_addf(garbage_buf, %lu,
(unsigned long)(size_garbage / 1024));
}
 
printf(count: %lu\n, loose);
printf(size: %s\n, loose_buf.buf);
-   printf(in-pack: %lu\n, packed);
-   printf(packs: %lu\n, num_pack);
+   printf(in-pack: %lu\n, pd.packed);
+   printf(packs: %lu\n, pd.num_pack);
printf(size-pack: %s\n, pack_buf.buf);

[PATCH v2] Make the global packed_git variable static to sha1_file.c.

2014-02-12 Thread szager
From 0a59547f3e95ddecf7606c5f259ae6177c5a104f Mon Sep 17 00:00:00 2001
From: Stefan Zager sza...@chromium.org
Date: Mon, 10 Feb 2014 16:55:12 -0800
Subject: [PATCH] Make the global packed_git variable static to sha1_file.c.

This is a first step in making the codebase thread-safe.  By and
large, the operations which might benefit from threading are those
that work with pack files (e.g., checkout, blame), so the focus of
this patch is stop leaking the global list of pack files outside of
sha1_file.c.

The next step will be to control access to the list of pack files
with a mutex.  However, that alone is not enough to make pack file
access thread safe.  Even in a read-only operation, the window list
associated with each pack file will need to be controlled.
Additionally, the global counters in sha1_file.c will need to be
controlled.

This patch is a pure refactor with no functional changes, so it
shouldn't require any additional tests.  Adding the actual locks
will be a functional change, and will require additional tests.

Signed-off-by: Stefan Zager sza...@chromium.org
---
 builtin/count-objects.c  |  44 ++-
 builtin/fsck.c   |  46 +++-
 builtin/gc.c |  26 +++
 builtin/pack-objects.c   | 188 ---
 builtin/pack-redundant.c |  37 +++---
 cache.h  |  16 +++-
 fast-import.c|   4 +-
 http-backend.c   |  28 ---
 http-push.c  |   4 +-
 http-walker.c|   2 +-
 pack-revindex.c  |  20 ++---
 server-info.c|  35 +
 sha1_file.c  |  35 -
 sha1_name.c  |  18 -
 14 files changed, 315 insertions(+), 188 deletions(-)

diff --git a/builtin/count-objects.c b/builtin/count-objects.c
index a7f70cb..6554dfe 100644
--- a/builtin/count-objects.c
+++ b/builtin/count-objects.c
@@ -83,14 +83,32 @@ static char const * const count_objects_usage[] = {
NULL
 };
 
+struct pack_data {
+   unsigned long packed;
+   off_t size_pack;
+   unsigned long num_pack;
+};
+
+int pack_data_fn(struct packed_git *p, void *data)
+{
+   struct pack_data *pd = (struct pack_data *) data;
+   if (p-pack_local  !open_pack_index(p)) {
+   pd-packed += p-num_objects;
+   pd-size_pack += p-pack_size + p-index_size;
+   pd-num_pack++;
+   }
+   return 0;
+}
+
 int cmd_count_objects(int argc, const char **argv, const char *prefix)
 {
int i, verbose = 0, human_readable = 0;
const char *objdir = get_object_directory();
int len = strlen(objdir);
char *path = xmalloc(len + 50);
-   unsigned long loose = 0, packed = 0, packed_loose = 0;
+   unsigned long loose = 0, packed_loose = 0;
off_t loose_size = 0;
+   struct pack_data pd = {0,0,0};
struct option opts[] = {
OPT__VERBOSE(verbose, N_(be verbose)),
OPT_BOOL('H', human-readable, human_readable,
@@ -118,41 +136,29 @@ int cmd_count_objects(int argc, const char **argv, const 
char *prefix)
closedir(d);
}
if (verbose) {
-   struct packed_git *p;
-   unsigned long num_pack = 0;
-   off_t size_pack = 0;
struct strbuf loose_buf = STRBUF_INIT;
struct strbuf pack_buf = STRBUF_INIT;
struct strbuf garbage_buf = STRBUF_INIT;
-   if (!packed_git)
-   prepare_packed_git();
-   for (p = packed_git; p; p = p-next) {
-   if (!p-pack_local)
-   continue;
-   if (open_pack_index(p))
-   continue;
-   packed += p-num_objects;
-   size_pack += p-pack_size + p-index_size;
-   num_pack++;
-   }
+   prepare_packed_git();
+   foreach_packed_git(pack_data_fn, NULL, pd);
 
if (human_readable) {
strbuf_humanise_bytes(loose_buf, loose_size);
-   strbuf_humanise_bytes(pack_buf, size_pack);
+   strbuf_humanise_bytes(pack_buf, pd.size_pack);
strbuf_humanise_bytes(garbage_buf, size_garbage);
} else {
strbuf_addf(loose_buf, %lu,
(unsigned long)(loose_size / 1024));
strbuf_addf(pack_buf, %lu,
-   (unsigned long)(size_pack / 1024));
+   (unsigned long)(pd.size_pack / 1024));
strbuf_addf(garbage_buf, %lu,
(unsigned long)(size_garbage / 1024));
}
 
printf(count: %lu\n, loose);
printf(size: %s\n, loose_buf.buf);
-   printf(in-pack: %lu\n, packed);
- 

[PATCH] Enable parallelism in git submodule update.

2012-10-30 Thread szager
The --jobs parameter may be used to set the degree of per-submodule
parallel execution.

Signed-off-by: Stefan Zager sza...@google.com
---
 Documentation/git-submodule.txt |8 ++-
 git-submodule.sh|   40 ++-
 2 files changed, 46 insertions(+), 2 deletions(-)

diff --git a/Documentation/git-submodule.txt b/Documentation/git-submodule.txt
index b4683bb..cb23ba7 100644
--- a/Documentation/git-submodule.txt
+++ b/Documentation/git-submodule.txt
@@ -14,7 +14,8 @@ SYNOPSIS
 'git submodule' [--quiet] status [--cached] [--recursive] [--] [path...]
 'git submodule' [--quiet] init [--] [path...]
 'git submodule' [--quiet] update [--init] [-N|--no-fetch] [--rebase]
- [--reference repository] [--merge] [--recursive] [--] 
[path...]
+ [--reference repository] [--merge] [--recursive]
+ [-j|--jobs [jobs]] [--] [path...]
 'git submodule' [--quiet] summary [--cached|--files] [(-n|--summary-limit) n]
  [commit] [--] [path...]
 'git submodule' [--quiet] foreach [--recursive] command
@@ -146,6 +147,11 @@ If the submodule is not yet initialized, and you just want 
to use the
 setting as stored in .gitmodules, you can automatically initialize the
 submodule with the `--init` option.
 +
+By default, each submodule is treated serially.  You may specify a degree of
+parallel execution with the --jobs flag.  If a parameter is provided, it is
+the maximum number of jobs to run in parallel; without a parameter, all jobs 
are
+run in parallel.
++
 If `--recursive` is specified, this command will recurse into the
 registered submodules, and update any nested submodules within.
 +
diff --git a/git-submodule.sh b/git-submodule.sh
index ab6b110..60a5f96 100755
--- a/git-submodule.sh
+++ b/git-submodule.sh
@@ -8,7 +8,7 @@ dashless=$(basename $0 | sed -e 's/-/ /')
 USAGE=[--quiet] add [-b branch] [-f|--force] [--reference repository] [--] 
repository [path]
or: $dashless [--quiet] status [--cached] [--recursive] [--] [path...]
or: $dashless [--quiet] init [--] [path...]
-   or: $dashless [--quiet] update [--init] [-N|--no-fetch] [-f|--force] 
[--rebase] [--reference repository] [--merge] [--recursive] [--] [path...]
+   or: $dashless [--quiet] update [--init] [-N|--no-fetch] [-f|--force] 
[--rebase] [--reference repository] [--merge] [--recursive] [-j|--jobs 
[jobs]] [--] [path...]
or: $dashless [--quiet] summary [--cached|--files] [--summary-limit n] 
[commit] [--] [path...]
or: $dashless [--quiet] foreach [--recursive] command
or: $dashless [--quiet] sync [--] [path...]
@@ -500,6 +500,7 @@ cmd_update()
 {
# parse $args after submodule ... update.
orig_flags=
+   jobs=1
while test $# -ne 0
do
case $1 in
@@ -518,6 +519,20 @@ cmd_update()
-r|--rebase)
update=rebase
;;
+   -j|--jobs)
+   case $2 in
+   ''|-*)
+   jobs=0
+   ;;
+   *)
+   jobs=$2
+   shift
+   ;;
+   esac
+   # Don't preserve this arg.
+   shift
+   continue
+   ;;
--reference)
case $2 in '') usage ;; esac
reference=--reference=$2
@@ -551,11 +566,34 @@ cmd_update()
shift
done
 
+   # Correctly handle the case where '-q' came before 'update' on the 
command line.
+   if test -n $GIT_QUIET
+   then
+   orig_flags=$orig_flags -q
+   fi
+
if test -n $init
then
cmd_init -- $@ || return
fi
 
+   if test $jobs != 1
+   then
+   if ( echo test | xargs -P $jobs true 2/dev/null )
+   then
+   if ( echo test | xargs --max-lines=1 true 2/dev/null 
); then
+   max_lines=--max-lines=1
+   else
+   max_lines=-L 1
+   fi
+   module_list $@ | awk '{print $4}' |
+   xargs $max_lines -P $jobs git submodule update 
$orig_flags
+   return
+   else
+   echo Warn: parallel execution is not supported on this 
platform.
+   fi
+   fi
+
cloned_modules=
module_list $@ | {
err=
-- 
1.7.7.3

--
To unsubscribe from this list: send the line unsubscribe git in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] Enable parallelism in git submodule update.

2012-10-30 Thread szager
The --jobs parameter may be used to set the degree of per-submodule
parallel execution.

Signed-off-by: Stefan Zager sza...@google.com
---
 Documentation/git-submodule.txt |8 ++-
 git-submodule.sh|   40 ++-
 2 files changed, 46 insertions(+), 2 deletions(-)

diff --git a/Documentation/git-submodule.txt b/Documentation/git-submodule.txt
index b4683bb..cb23ba7 100644
--- a/Documentation/git-submodule.txt
+++ b/Documentation/git-submodule.txt
@@ -14,7 +14,8 @@ SYNOPSIS
 'git submodule' [--quiet] status [--cached] [--recursive] [--] [path...]
 'git submodule' [--quiet] init [--] [path...]
 'git submodule' [--quiet] update [--init] [-N|--no-fetch] [--rebase]
- [--reference repository] [--merge] [--recursive] [--] 
[path...]
+ [--reference repository] [--merge] [--recursive]
+ [-j|--jobs [jobs]] [--] [path...]
 'git submodule' [--quiet] summary [--cached|--files] [(-n|--summary-limit) n]
  [commit] [--] [path...]
 'git submodule' [--quiet] foreach [--recursive] command
@@ -146,6 +147,11 @@ If the submodule is not yet initialized, and you just want 
to use the
 setting as stored in .gitmodules, you can automatically initialize the
 submodule with the `--init` option.
 +
+By default, each submodule is treated serially.  You may specify a degree of
+parallel execution with the --jobs flag.  If a parameter is provided, it is
+the maximum number of jobs to run in parallel; without a parameter, all jobs 
are
+run in parallel.
++
 If `--recursive` is specified, this command will recurse into the
 registered submodules, and update any nested submodules within.
 +
diff --git a/git-submodule.sh b/git-submodule.sh
index ab6b110..60a5f96 100755
--- a/git-submodule.sh
+++ b/git-submodule.sh
@@ -8,7 +8,7 @@ dashless=$(basename $0 | sed -e 's/-/ /')
 USAGE=[--quiet] add [-b branch] [-f|--force] [--reference repository] [--] 
repository [path]
or: $dashless [--quiet] status [--cached] [--recursive] [--] [path...]
or: $dashless [--quiet] init [--] [path...]
-   or: $dashless [--quiet] update [--init] [-N|--no-fetch] [-f|--force] 
[--rebase] [--reference repository] [--merge] [--recursive] [--] [path...]
+   or: $dashless [--quiet] update [--init] [-N|--no-fetch] [-f|--force] 
[--rebase] [--reference repository] [--merge] [--recursive] [-j|--jobs 
[jobs]] [--] [path...]
or: $dashless [--quiet] summary [--cached|--files] [--summary-limit n] 
[commit] [--] [path...]
or: $dashless [--quiet] foreach [--recursive] command
or: $dashless [--quiet] sync [--] [path...]
@@ -500,6 +500,7 @@ cmd_update()
 {
# parse $args after submodule ... update.
orig_flags=
+   jobs=1
while test $# -ne 0
do
case $1 in
@@ -518,6 +519,20 @@ cmd_update()
-r|--rebase)
update=rebase
;;
+   -j|--jobs)
+   case $2 in
+   ''|-*)
+   jobs=0
+   ;;
+   *)
+   jobs=$2
+   shift
+   ;;
+   esac
+   # Don't preserve this arg.
+   shift
+   continue
+   ;;
--reference)
case $2 in '') usage ;; esac
reference=--reference=$2
@@ -551,11 +566,34 @@ cmd_update()
shift
done
 
+   # Correctly handle the case where '-q' came before 'update' on the 
command line.
+   if test -n $GIT_QUIET
+   then
+   orig_flags=$orig_flags -q
+   fi
+
if test -n $init
then
cmd_init -- $@ || return
fi
 
+   if test $jobs != 1
+   then
+   if ( echo test | xargs -P $jobs true 2/dev/null )
+   then
+   if ( echo test | xargs --max-lines=1 true 2/dev/null 
); then
+   max_lines=--max-lines=1
+   else
+   max_lines=-L 1
+   fi
+   module_list $@ | awk '{print $4}' |
+   xargs $max_lines -P $jobs git submodule update 
$orig_flags
+   return
+   else
+   echo Warn: parallel execution is not supported on this 
platform.
+   fi
+   fi
+
cloned_modules=
module_list $@ | {
err=
-- 
1.7.7.3

--
To unsubscribe from this list: send the line unsubscribe git in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Fix potential hang in https handshake (v3)

2012-10-19 Thread szager
From 32e06128dbc97ceb0d060c88ec8db204fa51be5c Mon Sep 17 00:00:00 2001
From: Stefan Zager sza...@google.com
Date: Thu, 18 Oct 2012 16:23:53 -0700
Subject: [PATCH] Fix potential hang in https handshake.

It has been observed that curl_multi_timeout may return a very long
timeout value (e.g., 294 seconds and some usec) just before
curl_multi_fdset returns no file descriptors for reading.  The
upshot is that select() will hang for a long time -- long enough for
an https handshake to be dropped.  The observed behavior is that
the git command will hang at the terminal and never transfer any
data.

This patch is a workaround for a probable bug in libcurl.  The bug
only seems to manifest around a very specific set of circumstances:

- curl version (from curl/curlver.h):

 #define LIBCURL_VERSION_NUM 0x071307

- git-remote-https running on an ubuntu-lucid VM.
- Connecting through squid proxy running on another VM.

Interestingly, the problem doesn't manifest if a host connects
through squid proxy running on localhost; only if the proxy is on
a separate VM (not sure if the squid host needs to be on a separate
physical machine).  That would seem to suggest that this issue
is timing-sensitive.

This patch is more or less in line with a recommendation in the
curl docs about how to behave when curl_multi_fdset doesn't return
and file descriptors:

http://curl.haxx.se/libcurl/c/curl_multi_fdset.html

Signed-off-by: Stefan Zager sza...@google.com
---
 http.c |   11 +++
 1 files changed, 11 insertions(+), 0 deletions(-)

diff --git a/http.c b/http.c
index df9bb71..51eef02 100644
--- a/http.c
+++ b/http.c
@@ -631,6 +631,17 @@ void run_active_slot(struct active_request_slot *slot)
FD_ZERO(excfds);
curl_multi_fdset(curlm, readfds, writefds, excfds, 
max_fd);
 
+   /* It can happen that curl_multi_timeout returns a 
pathologically
+* long timeout when curl_multi_fdset returns no file 
descriptors
+* to read.  See commit message for more details.
+*/
+   if (max_fd  0 
+   select_timeout.tv_sec  0 ||
+   select_timeout.tv_usec  5) {
+   select_timeout.tv_sec  = 0;
+   select_timeout.tv_usec = 5;
+   }
+
select(max_fd+1, readfds, writefds, excfds, 
select_timeout);
}
}
-- 
1.7.7.3

--
To unsubscribe from this list: send the line unsubscribe git in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] Fix potential hang in https handshake.

2012-10-19 Thread szager
It has been observed that curl_multi_timeout may return a very long
timeout value (e.g., 294 seconds and some usec) just before
curl_multi_fdset returns no file descriptors for reading.  The
upshot is that select() will hang for a long time -- long enough for
an https handshake to be dropped.  The observed behavior is that
the git command will hang at the terminal and never transfer any
data.

This patch is a workaround for a probable bug in libcurl.  The bug
only seems to manifest around a very specific set of circumstances:

- curl version (from curl/curlver.h):

 #define LIBCURL_VERSION_NUM 0x071307

- git-remote-https running on an ubuntu-lucid VM.
- Connecting through squid proxy running on another VM.

Interestingly, the problem doesn't manifest if a host connects
through squid proxy running on localhost; only if the proxy is on
a separate VM (not sure if the squid host needs to be on a separate
physical machine).  That would seem to suggest that this issue
is timing-sensitive.

This patch is more or less in line with a recommendation in the
curl docs about how to behave when curl_multi_fdset doesn't return
and file descriptors:

http://curl.haxx.se/libcurl/c/curl_multi_fdset.html

Signed-off-by: Stefan Zager sza...@google.com
---
 http.c |   12 
 1 files changed, 12 insertions(+), 0 deletions(-)

diff --git a/http.c b/http.c
index df9bb71..b7e7ab4 100644
--- a/http.c
+++ b/http.c
@@ -631,6 +631,18 @@ void run_active_slot(struct active_request_slot *slot)
FD_ZERO(excfds);
curl_multi_fdset(curlm, readfds, writefds, excfds, 
max_fd);
 
+   /*
+* It can happen that curl_multi_timeout returns a 
pathologically
+* long timeout when curl_multi_fdset returns no file 
descriptors
+* to read.  See commit message for more details.
+*/
+   if (max_fd  0 
+   (select_timeout.tv_sec  0 ||
+ select_timeout.tv_usec  5)) {
+   select_timeout.tv_sec  = 0;
+select_timeout.tv_usec = 5;
+   }
+
select(max_fd+1, readfds, writefds, excfds, 
select_timeout);
}
}
-- 
1.7.7.3

--
To unsubscribe from this list: send the line unsubscribe git in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Fix potential hang in https handshake.

2012-10-18 Thread szager
From 700b8075c578941c8f951711825c390ac68b190f Mon Sep 17 00:00:00 2001
From: Stefan Zager sza...@google.com
Date: Thu, 18 Oct 2012 14:03:59 -0700
Subject: [PATCH] Fix potential hang in https handshake.

It will sometimes happen that curl_multi_fdset() doesn't
return any file descriptors.  In that case, it's recommended
that the application sleep for a short time before running
curl_multi_perform() again.

http://curl.haxx.se/libcurl/c/curl_multi_fdset.html

Signed-off-by: Stefan Zager sza...@google.com
---
 http.c |   40 ++--
 1 files changed, 26 insertions(+), 14 deletions(-)

diff --git a/http.c b/http.c
index df9bb71..a6f66c0 100644
--- a/http.c
+++ b/http.c
@@ -602,35 +602,47 @@ void run_active_slot(struct active_request_slot *slot)
int max_fd;
struct timeval select_timeout;
int finished = 0;
+   long curl_timeout;
 
slot-finished = finished;
while (!finished) {
step_active_slots();
 
if (slot-in_use) {
+   max_fd = -1;
+   FD_ZERO(readfds);
+   FD_ZERO(writefds);
+   FD_ZERO(excfds);
+   curl_multi_fdset(curlm, readfds, writefds, excfds, 
max_fd);
+
 #if LIBCURL_VERSION_NUM = 0x070f04
-   long curl_timeout;
-   curl_multi_timeout(curlm, curl_timeout);
-   if (curl_timeout == 0) {
-   continue;
-   } else if (curl_timeout == -1) {
+   /* It will sometimes happen that curl_multi_fdset() 
doesn't
+  return any file descriptors.  In that case, it's 
recommended
+  that the application sleep for a short time before 
running
+  curl_multi_perform() again.
+
+  http://curl.haxx.se/libcurl/c/curl_multi_fdset.html
+   */
+   if (max_fd == -1) {
select_timeout.tv_sec  = 0;
select_timeout.tv_usec = 5;
} else {
-   select_timeout.tv_sec  =  curl_timeout / 1000;
-   select_timeout.tv_usec = (curl_timeout % 1000) 
* 1000;
+   curl_timeout = 0;
+   curl_multi_timeout(curlm, curl_timeout);
+   if (curl_timeout == 0) {
+   continue;
+   } else if (curl_timeout == -1) {
+   select_timeout.tv_sec  = 0;
+   select_timeout.tv_usec = 5;
+   } else {
+   select_timeout.tv_sec  =  curl_timeout 
/ 1000;
+   select_timeout.tv_usec = (curl_timeout 
% 1000) * 1000;
+   }
}
 #else
select_timeout.tv_sec  = 0;
select_timeout.tv_usec = 5;
 #endif
-
-   max_fd = -1;
-   FD_ZERO(readfds);
-   FD_ZERO(writefds);
-   FD_ZERO(excfds);
-   curl_multi_fdset(curlm, readfds, writefds, excfds, 
max_fd);
-
select(max_fd+1, readfds, writefds, excfds, 
select_timeout);
}
}
-- 
1.7.7.3

--
To unsubscribe from this list: send the line unsubscribe git in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Fix potential hang in https handshake (v2).

2012-10-18 Thread szager
From aa77ab3dd5b98a5786ac158528f45355fc0ddbc3 Mon Sep 17 00:00:00 2001
From: Stefan Zager sza...@google.com
Date: Thu, 18 Oct 2012 16:23:53 -0700
Subject: [PATCH] Fix potential hang in https handshake.

It will sometimes happen that curl_multi_fdset() doesn't
return any file descriptors.  In that case, it's recommended
that the application sleep for a short time before running
curl_multi_perform() again.

http://curl.haxx.se/libcurl/c/curl_multi_fdset.html

Signed-off-by: Stefan Zager sza...@google.com
---
 http.c |4 
 1 files changed, 4 insertions(+), 0 deletions(-)

diff --git a/http.c b/http.c
index df9bb71..e8aba7f 100644
--- a/http.c
+++ b/http.c
@@ -630,6 +630,10 @@ void run_active_slot(struct active_request_slot *slot)
FD_ZERO(writefds);
FD_ZERO(excfds);
curl_multi_fdset(curlm, readfds, writefds, excfds, 
max_fd);
+   if (max_fd  0) {
+   select_timeout.tv_sec  = 0;
+   select_timeout.tv_usec = 5;
+   }
 
select(max_fd+1, readfds, writefds, excfds, 
select_timeout);
}
-- 
1.7.7.3

--
To unsubscribe from this list: send the line unsubscribe git in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html