Currently, regardless of the contents of the "struct object_info" passed
to sha1_object_info_extended(), that function always accesses the
packfile whenever it returns information about a packed object, since it
needs to populate "u.packed".

Add the ability to pass NULL, and use NULL-ness of the argument to
activate an optimization in which sha1_object_info_extended() does not
needlessly access the packfile. A subsequent patch will make use of this
optimization.

A similar optimization is not made for the cached and loose cases as it
would not cause a significant performance improvement.

Signed-off-by: Jonathan Tan <jonathanta...@google.com>
---
 sha1_file.c | 11 +++++++++++
 1 file changed, 11 insertions(+)

diff --git a/sha1_file.c b/sha1_file.c
index b6bc02f09..bf6b64ec8 100644
--- a/sha1_file.c
+++ b/sha1_file.c
@@ -2977,12 +2977,16 @@ static int sha1_loose_object_info(const unsigned char 
*sha1,
 
 int sha1_object_info_extended(const unsigned char *sha1, struct object_info 
*oi, unsigned flags)
 {
+       static struct object_info blank_oi = OBJECT_INFO_INIT;
        struct pack_entry e;
        int rtype;
        const unsigned char *real = (flags & OBJECT_INFO_LOOKUP_REPLACE) ?
                                    lookup_replace_object(sha1) :
                                    sha1;
 
+       if (!oi)
+               oi = &blank_oi;
+
        if (!(flags & OBJECT_INFO_SKIP_CACHED)) {
                struct cached_object *co = find_cached_object(real);
                if (co) {
@@ -3020,6 +3024,13 @@ int sha1_object_info_extended(const unsigned char *sha1, 
struct object_info *oi,
                }
        }
 
+       if (oi == &blank_oi)
+               /*
+                * We know that the caller doesn't actually need the
+                * information below, so return early.
+                */
+               return 0;
+
        rtype = packed_object_info(e.p, e.offset, oi);
        if (rtype < 0) {
                mark_bad_packed_object(e.p, real);
-- 
2.13.1.611.g7e3b11ae1-goog

Reply via email to