On Thu, Aug 02, David Cramer wrote:

> 
> This was my best solution. I honestly wouldnt ever encourage anything
> where you dont specify which foreignkeys to follow. JOINs can get very
> slow when they expand beyond 2 or 3 tables, especially when table
> sizes increase.

Agreed ;-) I even think the max_depth argument should be removed, it doesn't
really make sense.

I have something similar that is able to correctly generate outer joins for
nullable foreign keys. It's currently only for my use since it's probably
mysql only, and it can't correctly nest joins if you need an inner join to
an outer join (hope this makes sense to you). It's also written for a
different API (but I prefer your idea).

I've attached it for a quick look, are you interested in joining our
efforts?

Michael



-- 
noris network AG - Deutschherrnstraße 15-19 - D-90429 Nürnberg -
Tel +49-911-9352-0 - Fax +49-911-9352-100
http://www.noris.de - The IT-Outsourcing Company
 
Vorstand: Ingo Kraupa (Vorsitzender), Joachim Astel, Hansjochen Klenk - 
Vorsitzender des Aufsichtsrats: Stefan Schnabel - AG Nürnberg HRB 17689

--~--~---------~--~----~------------~-------~--~----~
You received this message because you are subscribed to the Google Groups 
"Django developers" group.
To post to this group, send email to [email protected]
To unsubscribe from this group, send email to [EMAIL PROTECTED]
For more options, visit this group at 
http://groups.google.com/group/django-developers?hl=en
-~----------~----~----~----~------~----~------~--~---

>From nobody Mon Sep 17 00:00:00 2001
From: Michael Radziej <[EMAIL PROTECTED]>
Date: Wed Aug 1 12:44:09 2007 +0200
Subject: [PATCH] select related

select-related: Added capability to follow nullable foreign
   keys on explicit request (using outer joins)

---

 django/db/models/query.py |   94 ++++++++++++++++++++++++++++++---------------
 1 files changed, 63 insertions(+), 31 deletions(-)

base da04a2aa8c5fcb179b45fe38730e032ec7ea5c87
last 54aed2999701f8355ae50ea08322700f7b272de3
diff --git a/django/db/models/query.py b/django/db/models/query.py
index 207a220e36e85b8c72cda3eff8e7d4c67b73f106..50e71d7018031c9521c138dd8eff19888e783d3a 100644
--- a/django/db/models/query.py
+++ b/django/db/models/query.py
@@ -89,6 +89,11 @@ class _QuerySet(object):
         self._filters = Q()
         self._order_by = None        # Ordering, e.g. ('date', '-name'). If None, use model's ordering.
         self._select_related = False # Whether to fill cache for related objects.
+        self._follow_fields = ()     # Which fields to follow with select_related. It is Ellipsis (follow everything)
+                                       # or a list or tuple with field names. Instead of field names, it could be
+                                       # an element could be a tuple of (field_name, deeper_field_name1, deeper_field_name2 ...).
+                                       # deeper_field_names is then used to select which fields to follow within
+                                       # the related model.
         self._max_related_depth = 0  # Maximum "depth" for select_related
         self._distinct = False       # Whether the query should use SELECT DISTINCT.
         self._select = {}            # Dictionary of attname -> SQL.
@@ -206,8 +211,13 @@ class _QuerySet(object):
                 if has_resolve_columns:
                     row = self.resolve_columns(row, fields)
                 if fill_cache:
-                    obj, index_end = get_cached_row(klass=self.model, row=row,
-                                                    index_start=0, max_depth=self._max_related_depth)
+                    cache_row = get_cached_row(klass=self.model, row=row,
+                                                    index_start=0, max_depth=self._max_related_depth,
+                                                    follow_fields=self._follow_fields)
+                    if cache_row:
+                        obj, index_end = cache_row
+                    else:
+                        obj = self.model(*row[:index_end])
                 else:
                     obj = self.model(*row[:index_end])
                 for i, k in enumerate(extra_select):
@@ -418,9 +428,9 @@ class _QuerySet(object):
         else:
             return self._filter_or_exclude(None, **filter_obj)
 
-    def select_related(self, true_or_false=True, depth=0):
+    def select_related(self, true_or_false=True, depth=0, follow_fields=Ellipsis):
         "Returns a new QuerySet instance with '_select_related' modified."
-        return self._clone(_select_related=true_or_false, _max_related_depth=depth)
+        return self._clone(_select_related=true_or_false, _max_related_depth=depth, _follow_fields=follow_fields)
 
     def order_by(self, *field_names):
         "Returns a new QuerySet instance with the ordering changed."
@@ -478,6 +488,7 @@ class _QuerySet(object):
         c._filters = self._filters
         c._order_by = self._order_by
         c._select_related = self._select_related
+        c._follow_fields = self._follow_fields
         c._max_related_depth = self._max_related_depth
         c._distinct = self._distinct
         c._select = self._select.copy()
@@ -561,10 +572,11 @@ class _QuerySet(object):
 
         # Add additional tables and WHERE clauses based on select_related.
         if self._select_related:
-            fill_table_cache(opts, select, tables, where,
+            fill_table_cache(opts, select, tables, where, joins,
                              old_prefix=opts.db_table,
                              cache_tables_seen=[opts.db_table],
-                             max_depth=self._max_related_depth)
+                             max_depth=self._max_related_depth,
+                             follow_fields=self._follow_fields)
 
         # Add any additional SELECTs.
         if self._select:
@@ -631,6 +643,7 @@ class ValuesQuerySet(QuerySet):
         super(ValuesQuerySet, self).__init__(*args, **kwargs)
         # select_related isn't supported in values().
         self._select_related = False
+        self._follow_fields = ()
 
     def get_values_cursor(self):
         """Return a tuple (list of field names, fields, cursor)"""
@@ -851,9 +864,7 @@ class Q(object):
         return parse_lookup(self.kwargs.items(), opts)
 
     def join(self, prefix):
-        print "enter"
         kwargs = dict([("%s__%s" % (prefix, k), v) for (k,v) in self.kwargs.items()])
-        print kwargs
         return self.__class__(** kwargs)
 
 
@@ -922,24 +933,36 @@ def get_where_clause(lookup_type, table_prefix, field, field_name, value):
             raise NotImplementedError
     raise TypeError, "Got invalid lookup_type: %s" % repr(lookup_type)
 
-def get_cached_row(klass, row, index_start, max_depth=0, cur_depth=0):
+def _get_follow_fields(opts, follow_fields):
+    if follow_fields is Ellipsis:
+        for f in opts.fields:
+            if f.rel and not f.null:
+                yield (f, Ellipsis)
+    else:
+        for f in follow_fields:
+            if isinstance(f, (tuple, list)):
+                yield (opts.get_field(f[0]), f[1:])
+            else:
+                yield (opts.get_field(f), ())
+
+    
+def get_cached_row(klass, row, index_start, max_depth=0, cur_depth=0, follow_fields=Ellipsis):
     """Helper function that recursively returns an object with cache filled"""
 
     # If we've got a max_depth set and we've exceeded that depth, bail now.
     if max_depth and cur_depth > max_depth:
         return None
-
+    
     index_end = index_start + len(klass._meta.fields)
     obj = klass(*row[index_start:index_end])
-    for f in klass._meta.fields:
-        if f.rel and not f.null:
-            cached_row = get_cached_row(f.rel.to, row, index_end, max_depth, cur_depth+1)
-            if cached_row:
-                rel_obj, index_end = cached_row
-                setattr(obj, f.get_cache_name(), rel_obj)
+    for f, follow_deeper in _get_follow_fields(klass._meta, follow_fields):
+        cached_row = get_cached_row(f.rel.to, row, index_end, max_depth, cur_depth+1, follow_fields=follow_deeper)
+        if cached_row:
+            rel_obj, index_end = cached_row
+            setattr(obj, f.get_cache_name(), rel_obj)
     return obj, index_end
 
-def fill_table_cache(opts, select, tables, where, old_prefix, cache_tables_seen, max_depth=0, cur_depth=0):
+def fill_table_cache(opts, select, tables, where, joins, old_prefix, cache_tables_seen, max_depth=0, cur_depth=0, follow_fields=Ellipsis):
     """
     Helper function that recursively populates the select, tables and where (in
     place) for select_related queries.
@@ -948,22 +971,31 @@ def fill_table_cache(opts, select, tables, where, old_prefix, cache_tables_seen,
     # If we've got a max_depth set and we've exceeded that depth, bail now.
     if max_depth and cur_depth > max_depth:
         return None
+    
+    # If we've got to follow fields but there are none, bail now.
+    # (Empty sequence means: follow nothing. Ellipsis means: follow everything)
+    if not follow_fields:
+        return None
 
     qn = backend.quote_name
-    for f in opts.fields:
-        if f.rel and not f.null:
-            db_table = f.rel.to._meta.db_table
-            if db_table not in cache_tables_seen:
-                tables.append(qn(db_table))
-            else: # The table was already seen, so give it a table alias.
-                new_prefix = '%s%s' % (db_table, len(cache_tables_seen))
-                tables.append('%s %s' % (qn(db_table), qn(new_prefix)))
-                db_table = new_prefix
-            cache_tables_seen.append(db_table)
-            where.append('%s.%s = %s.%s' % \
-                (qn(old_prefix), qn(f.column), qn(db_table), qn(f.rel.get_related_field().column)))
-            select.extend(['%s.%s' % (qn(db_table), qn(f2.column)) for f2 in f.rel.to._meta.fields])
-            fill_table_cache(f.rel.to._meta, select, tables, where, db_table, cache_tables_seen, max_depth, cur_depth+1)
+
+    for f, follow_deeper in _get_follow_fields(opts, follow_fields):
+        db_table = f.rel.to._meta.db_table
+        if f.null:
+            join_type = "LEFT OUTER JOIN"
+        else:
+            join_type ="INNER JOIN"
+        if db_table not in cache_tables_seen and qn(db_table) not in tables:
+            join_alias = db_table
+        else: # The table was already seen, so give it a table alias.
+            join_alias = '%s%s' % (db_table, len(cache_tables_seen))
+        cache_tables_seen.append(db_table)
+        joins[join_alias] = (
+            db_table,
+            join_type,
+            '%s.%s = %s.%s' % (qn(old_prefix), qn(f.column), qn(join_alias), qn(f.rel.get_related_field().column)))
+        select.extend(['%s.%s' % (qn(db_table), qn(f2.column)) for f2 in f.rel.to._meta.fields])
+        fill_table_cache(f.rel.to._meta, select, tables, where, joins, db_table, cache_tables_seen, max_depth, cur_depth+1, follow_fields=follow_deeper)
 
 def parse_lookup(kwarg_items, opts):
     # Helper function that handles converting API kwargs
-- 
1.4.4.2

Reply via email to