Carl Karsten wrote:
> dabo Commit
> Revision 2560
> Date: 2006-12-17 14:51:05 -0800 (Sun, 17 Dec 2006)
> Author: Carl
> 
> Changed:
> U   trunk/dabo/db/dDataSet.py

duh - didn't mean to commit that one too.  I saw that it was queued up, and 
somehow thought "I need to remember only to commit dbMySQL.py" as if I would be 
given the choice.

good news is it was only a bunch of changes to comments outlining what I was 
planning on doing, but now the comments don't match the code, which is bad.

So how do I undo this?

> U   trunk/dabo/db/dbMySQL.py
> 
> Log:
> Prevents app from erroring when it incounters a BIT dataype.  very much 
> a hack, and should proably be commented out untill the BIT datatype is better 
> understood.
> Without it, you get this error:
>   File 
> "/usr/lib/python2.4/site-packages/dabo/ui/uiwx/dDataControlMixin.py", 
> line 48, in _coerceValue
>     val = int(val)
> ValueError: invalid literal for int(): []
> 
> 
> 
> Diff:
> Modified: trunk/dabo/db/dDataSet.py
> ===================================================================
> --- trunk/dabo/db/dDataSet.py 2006-12-17 22:24:33 UTC (rev 2559)
> +++ trunk/dabo/db/dDataSet.py 2006-12-17 22:51:05 UTC (rev 2560)
> @@ -25,7 +25,7 @@
>  SQLite: http://www.sqlite.org/download.html
>  pysqlite2: http://initd.org/tracker/pysqlite
>  
> -"""  
> +"""
>               sys.exit(msg)
>  
>  
> @@ -34,11 +34,11 @@
>       """ This class assumes that its contents are not ordinary tuples, but
>       rather tuples consisting of dicts, where the dict keys are field names.
>       This is the data structure returned by the dCursorMixin class.
> -     
> +
>       It is used to give these data sets the ability to be queried, joined, 
> etc.
>       This is accomplished by using SQLite in-memory databases. If SQLite
> -     and pysqlite2 are not installed on the machine this is run on, a 
> -     warning message will be printed out and the SQL functions will return 
> +     and pysqlite2 are not installed on the machine this is run on, a
> +     warning message will be printed out and the SQL functions will return
>       None. The data will still be usable, though.
>       """
>       def __init__(self, *args, **kwargs):
> @@ -61,34 +61,34 @@
>               # this dataset was derived from.
>               self._sourceDataSet = None
>               self._encoding = "utf8"
> -             
> +
>               # Register the converters
>               sqlite.register_converter("memento", self._convert_memento)
>               if _USE_DECIMAL:
>                       sqlite.register_converter("decimal", 
> self._convert_decimal)
> -     
> -             self._typeDict = {int: "integer", long: "integer", str: "text", 
> -                             unicode: "text", float: "real", datetime.date: 
> "date", 
> +
> +             self._typeDict = {int: "integer", long: "integer", str: "text",
> +                             unicode: "text", float: "real", datetime.date: 
> "date",
>                               datetime.datetime: "timestamp", dMemento : 
> "memento"}
>               if _USE_DECIMAL:
>                       self._typeDict[Decimal] = "decimal"
>  
> -     
> +
>       def __del__(self):
>               if self._cursor is not None:
>                       self._cursor.close()
>               if self._connection is not None:
>                       self._connection.close()
>  
> -     
> +
>       def _adapt_decimal(self, decVal):
>               """Converts the decimal value to a string for storage"""
>               return str(decVal)
> -     
> -     
> +
> +
>       def _convert_decimal(self, strval):
> -             """This is a converter routine. Takes the string 
> -             representation of a Decimal value and return an actual 
> +             """This is a converter routine. Takes the string
> +             representation of a Decimal value and return an actual
>               decimal, if that module is present. If not, returns a float.
>               """
>               if _USE_DECIMAL:
> @@ -96,36 +96,36 @@
>               else:
>                       ret = float(strval)
>               return ret
> -     
> -     
> +
> +
>       def _adapt_memento(self, mem):
>               """Substitutes a sequence for the memento for storage"""
>               pos = self._mementoSequence
>               self._mementoSequence += 1
>               self._mementoHold[pos] = mem
>               return str(pos)
> -     
> -     
> +
> +
>       def _convert_memento(self, strval):
>               """Replaces the placeholder sequence with the actual memento."""
>               pos = int(strval)
>               return self._mementoHold[pos]
> -     
> -     
> -     def replace(self, field, valOrExpr, scope=None):
> -             """Replaces the value of the specified field with the given 
> value
> -             or expression. All records matching the scope are affected; if
> +
> +
> +     def replace(self, field, Expr, scope=None):
> +             """Replaces the value of the specified field
> +        with the given expression.
> +
> +        All records matching the scope are affected; if
>               no scope is specified, all records are affected.
> -             
> -             'valOrExpr' will be treated as a literal value, unless it is 
> prefixed
> -             with an equals sign. All expressions will therefore be a string 
> -             beginning with '='. Literals can be of any type. 
> -             """
> +
> +             Scope is a boolean expression. """
> +
>               if scope is None:
>                       scope = "True"
>               else:
>                       scope = self._fldReplace(scope, "rec")
> -             
> +
>               literal = True
>               if isinstance(valOrExpr, basestring):
>                       if valOrExpr.strip()[0] == "=":
> @@ -139,28 +139,28 @@
>                               else:
>                                       expr = "rec['%s'] = %s" % (field, 
> valOrExpr)
>                                       exec(expr)
> -             
> -     
> +
> +
>       def sort(self, col, ascdesc=None, caseSensitive=None):
>               if ascdesc is None:
>                       ascdesc = "ASC"
>               casecollate = ""
>               if caseSensitive is False:
>                       # The default of None will be case-sensitive
> -                     casecollate = " COLLATE NOCASE "                
> +                     casecollate = " COLLATE NOCASE "
>               stmnt = "select * from dataset order by %s %s %s"
>               stmnt = stmnt % (col, casecollate, ascdesc)
>               ret = self.execute(stmnt)
>               # Sorting doesn't change the data, so preserve any source 
> dataset.
>               ret._sourceDataSet = self._sourceDataSet
>               return ret
> -     
> -     
> +
> +
>       def filter(self, fld, expr, op="="):
>               """This takes a field name, an expression, and an optional 
> operator,
>               and returns a dataset that is filtered on that field by that 
> expression.
> -             If the operator is specified, it will be used literally in the 
> evaluation 
> -             instead of the equals sign, unless it is one of the following 
> strings, 
> +             If the operator is specified, it will be used literally in the 
> evaluation
> +             instead of the equals sign, unless it is one of the following 
> strings,
>               which will be interpreted as indicated:
>                       eq, equals: =
>                       gt: >
> @@ -183,7 +183,7 @@
>                       "contains": " LIKE ? " }
>               if (expr in opDict) and (op not in opDict):
>                       # They sent the params in reverse order
> -                     op, expr = expr, op             
> +                     op, expr = expr, op
>               clause = opDict.get(op.lower(), " = ?")
>               oplow = op.lower()
>               if oplow in ("startswith", "beginswith"):
> @@ -198,38 +198,38 @@
>               ret = self.execute(stmnt, params=param)
>               ret._sourceDataSet = self
>               return ret
> -     
> -     
> +
> +
>       def filterByExpression(self, expr):
> -             """Allows you to filter by any expression that would make a 
> +             """Allows you to filter by any expression that would make a
>               valid 'where' clause in SQLite.
>               """
>               stmnt = "select * from dataset where %s" % (expr)
>               ret = self.execute(stmnt)
>               ret._sourceDataSet = self
>               return ret
> -     
> -     
> +
> +
>       def removeFilter(self):
>               """Remove the most recently applied filter."""
>               ret = self
>               if ret._sourceDataSet:
>                       ret = ret._sourceDataSet
>               return ret
> -     
> -     
> +
> +
>       def removeFilters(self):
>               """Remove all applied filters, going back to the original data 
> set."""
>               ret = self
>               while ret._sourceDataSet:
>                       ret = ret._sourceDataSet
>               return ret
> -             
>  
> +
>       def _fldReplace(self, expr, dictName=None):
>               """The list comprehensions require the field names be the keys
>               in a dictionary expression. Users, though, should not have to 
> know
> -             about this. This takes a user-defined, SQL-like expressions, 
> and 
> +             about this. This takes a user-defined, SQL-like expressions, and
>               substitutes any field name with the corresponding dict
>               expression.
>               """
> @@ -244,11 +244,11 @@
>                       if mtch:
>                               ret = mtch.groups()[0] + "%s['%s']" % 
> (dictName, kk) + mtch.groups()[1]
>               return ret
> -             
> -     
> +
> +
>       def _makeCreateTable(self, ds, alias=None):
>               """Makes the CREATE TABLE string needed to represent
> -             this data set. There must be at least one record in the 
> +             this data set. There must be at least one record in the
>               data set, or we can't get the necessary column info.
>               """
>               if len(ds) == 0:
> @@ -259,7 +259,7 @@
>               rec = ds[0]
>               keys = rec.keys()
>               retList = []
> -             
> +
>               for key in keys:
>                       if key.startswith("dabo-"):
>                               # This is an internal field
> @@ -273,8 +273,8 @@
>                       except KeyError:
>                               retList.append(safekey)
>               return "create table %s (%s)" % (alias, ", ".join(retList))
> -             
> -     
> +
> +
>       def _populate(self, ds, alias=None):
>               """This is the method that converts a Python dataset
>               into a SQLite table with the name specified by 'alias'.
> @@ -284,22 +284,22 @@
>                       alias = "dataset"
>               if len(ds) == 0:
>                       # Can't create and populate a table without a structure
> -                     dabo.errorLog.write(_("Cannot populate without data for 
> alias %s") 
> +                     dabo.errorLog.write(_("Cannot populate without data for 
> alias %s")
>                                       % alias)
>                       return None
>               if ds._populated:
>                       # Data's already there; no need to re-load it
>                       return
>               self._cursor.execute(self._makeCreateTable(ds, alias))
> -             
> +
>               flds, vals = ds[0].keys(), ds[0].values()
>               # Fields may contain illegal names. This will correct them
>               flds = [fld.replace("dabo-", "dabo_") for fld in flds]
>               fldParams = [":%s" % fld for fld in flds]
>               fldCnt = len(flds)
> -             insStmnt = "insert into %s (%s) values (%s)" % (alias, 
> +             insStmnt = "insert into %s (%s) values (%s)" % (alias,
>                               ", ".join(flds), ", ".join(fldParams))
> -             
> +
>               def recGenerator(ds):
>                       for rec in ds:
>                               yield rec
> @@ -307,20 +307,20 @@
>               self._cursor.executemany(insStmnt, recGenerator(ds))
>               if ds is self:
>                       self._populated = True
> -                     
>  
> +
>       def execute(self, sqlExpr, params=(), cursorDict=None):
>               """This method allows you to work with a Python data set
>               (i.e., a tuple of dictionaries) as if it were a SQL database. 
> You
>               can run any sort of statement that you can in a normal SQL
>               database. It requires that SQLite and pysqlite2 are installed;
>               if they aren't, this will return None.
> -             
> +
>               The SQL expression can be any standard SQL expression; however,
> -             the FROM clause should always be: 'from dataset', since these 
> +             the FROM clause should always be: 'from dataset', since these
>               datasets do not have table names.
> -             
> -             If you want to do multi-dataset joins, you need to pass the 
> +
> +             If you want to do multi-dataset joins, you need to pass the
>               additional DataSet objects in a dictionary, where the value is 
> the
>               DataSet, and the key is the alias used to reference that DataSet
>               in your join statement.
> @@ -337,47 +337,47 @@
>                               self.row_factory = dict_factory
>  
>               if self._connection is None:
> -                     self._connection = sqlite.connect(":memory:", 
> -                                     
> detect_types=(sqlite.PARSE_DECLTYPES|sqlite.PARSE_COLNAMES), 
> +                     self._connection = sqlite.connect(":memory:",
> +                                     
> detect_types=(sqlite.PARSE_DECLTYPES|sqlite.PARSE_COLNAMES),
>                                       isolation_level="EXCLUSIVE")
>                       # Set to default encoding
>                       self.Encoding = self._encoding
>               if self._cursor is None:
>                       self._cursor = 
> self._connection.cursor(factory=DictCursor)
> -             
> +
>  #            import time
>  #            st = time.clock()
>  #            print "starting"
>  
>               # Create the table for this dDataSet
>               self._populate(self, "dataset")
> -             
> +
>  #            pt = time.clock()
>  #            print "POPULATED", pt-st
>               # Now create any of the tables for the join dDataSets
>               if cursorDict is not None:
>                       for alias, ds in cursorDict.items():
>                               self._populate(ds, alias)
> -                             
> +
>               # We have a table now with the necessary data. Run the query!
>               if params and not isinstance(params, tuple):
>                       params = (params, )
>               self._cursor.execute(sqlExpr, params)
> -             
> +
>  #            et = time.clock()
>  #            print "EXECUTED", et - pt
>               # We need to know what sort of statement was run. Only a 
> 'select'
>               # will return results. The rest ('update', 'delete', 'insert') 
> return
> -             # nothing. In those cases, we need to run a 'select *' to get 
> the 
> +             # nothing. In those cases, we need to run a 'select *' to get 
> the
>               # modified data set.
>               if not sqlExpr.lower().strip().startswith("select "):
>                       self._cursor.execute("select * from dataset")
>               tmpres = self._cursor.fetchall()
> -             
> +
>  #            ft = time.clock()
>  #            print "FETCH", ft-et
>               return dDataSet(tmpres)
> -             
> +
>  #            res = []
>  #            if tmpres:
>  #                    # There will be no description if there are no records.
> @@ -390,7 +390,7 @@
>  #                                            fld = self.fieldAliases[fld]
>  #                                    rec[fld] = val
>  #                            res.append(rec)
> -#            
> +#
>  #            dt = time.clock()
>  #            print "CONVERTED", dt-ft
>  
> @@ -411,7 +411,7 @@
>  
>       def _getEncoding(self):
>               return self._encoding
> -     
> +
>       def _setEncoding(self, encoding):
>               self._encoding = encoding
>               try:
> @@ -420,14 +420,14 @@
>                       # The connection has not yet been set, but it will once
>                       # queries is executed
>                       pass
> -     
> -     
> +
> +
>       Bizobj = property(_getBizobj, _setBizobj, None,
>                       _("Reference to the bizobj that 'owns' this data set. 
> Default=None  (bizobj)"))
> -     
> +
>       Cursor = property(_getCursor, _setCursor, None,
>                       _("Reference to the bizobj that 'owns' this data set. 
> Default=None  (dCursorMixin)"))
> -     
> +
>       Encoding = property(_getEncoding, _setEncoding, None,
>                       _(" The encoding used for data in the dataset.  (str)"))
>  
> @@ -439,15 +439,15 @@
>  #    rather tuples consisting of dicts, where the dict keys are field names.
>  #    This is the data structure returned by the dCursorMixin class.
>  #    """
> -#    # List comprehensions used in this class require a non-conflicting 
> +#    # List comprehensions used in this class require a non-conflicting
>  #    # name. This is unlikely to be used anywhere else.
>  #    _dictSubName = "_dataSet_rec"
> -#    
> -#    
> +#
> +#
>  #    def _fldReplace(self, expr, dictName=None):
>  #            """The list comprehensions require the field names be the keys
>  #            in a dictionary expression. Users, though, should not have to 
> know
> -#            about this. This takes a user-defined, SQL-like expressions, 
> and 
> +#            about this. This takes a user-defined, SQL-like expressions, and
>  #            substitutes any field name with the corresponding dict
>  #            expression.
>  #            """
> @@ -462,8 +462,8 @@
>  #                    if mtch:
>  #                            ret = mtch.groups()[0] + "%s['%s']" % 
> (dictName, kk) + mtch.groups()[1]
>  #            return ret
> -#            
> -#    
> +#
> +#
>  #    def processFields(self, fields, aliasDict):
>  #            if isinstance(fields, basestring):
>  #                    fields = fields.split(",")
> @@ -476,8 +476,8 @@
>  #                            aliasDict[fld] = fa[1].strip()
>  #                    fields[num] = fld
>  #            return fields, aliasDict
> -# 
> -# 
> +#
> +#
>  #    def select(self, fields=None, where=None, orderBy=None):
>  #            fldList = []
>  #            fldAliases = {}
> @@ -492,7 +492,7 @@
>  #                    fldList.append("'%s' : %s" % (fld, 
> self._fldReplace(fld)))
>  #            fieldsToReturn = ", ".join(fldList)
>  #            fieldsToReturn = "{%s}" % fieldsToReturn
> -#            
> +#
>  #            # Where list elements
>  #            if where is None:
>  #                    whereClause = ""
> @@ -503,12 +503,12 @@
>  #                            whereList.append(self._fldReplace(wh))
>  #                    whereClause = " and ".join(whereList)
>  #            if whereClause:
> -#                    whereClause = " if %s" % whereClause            
> +#                    whereClause = " if %s" % whereClause
>  #            stmnt = "[%s for %s in self %s]" % (fieldsToReturn, 
> self._dictSubName, whereClause)
>  #            resultSet = eval(stmnt)
> -#            
> +#
>  #            if fldAliases:
> -#                    # We need to replace the keys for the field names with 
> the 
> +#                    # We need to replace the keys for the field names with 
> the
>  #                    # appropriate alias names
>  #                    for rec in resultSet:
>  #                            for key, val in fldAliases.items():
> @@ -516,17 +516,17 @@
>  #                                    if orig:
>  #                                            rec[val] = orig
>  #                                            del rec[key]
> -#                                    
> +#
>  #            if orderBy:
>  #                    # This should be a comma separated string in the format:
>  #                    #               fld1, fld2 desc, fld3 asc
> -#                    # After the field name is an optional direction, either 
> 'asc' 
> +#                    # After the field name is an optional direction, either 
> 'asc'
>  #                    # (ascending, default) or 'desc' (descending).
> -#                    # IMPORTANT! Fields referenced in 'orderBy' MUST be in 
> +#                    # IMPORTANT! Fields referenced in 'orderBy' MUST be in
>  #                    # the result data set!
>  #                    orderByList = orderBy.split(",")
>  #                    sortList = []
> -#                    
> +#
>  #                    def orderBySort(val1, val2):
>  #                            ret = 0
>  #                            compList = orderByList[:]
> @@ -551,25 +551,25 @@
>  #                                    elif eval1 < eval2:
>  #                                            ret = compVals[1]
>  #                                    else:
> -#                                            # They are equal. Continue 
> comparing using the 
> +#                                            # They are equal. Continue 
> comparing using the
>  #                                            # remaining terms in compList, 
> if any.
>  #                                            if not compList:
>  #                                                    break
>  #                            return ret
> -#            
> +#
>  #                    resultSet.sort(orderBySort)
> -#            
> +#
>  #            return DataSet(resultSet)
> -# 
> -# 
> +#
> +#
>  #    def join(self, target, sourceAlias, targetAlias, condition,
> -#                    sourceFields=None, targetFields=None, where=None, 
> +#                    sourceFields=None, targetFields=None, where=None,
>  #                    orderBy=None, joinType=None):
> -#            """This method joins the current DataSet and the target 
> +#            """This method joins the current DataSet and the target
>  #            DataSet, based on the specified condition. The 'joinType'
>  #            parameter will determine the type of join (inner, left, right, 
> full).
>  #            Where and orderBy will affect the result of the join, and so 
> they
> -#            should reference fields in the result set without alias 
> qualifiers.             
> +#            should reference fields in the result set without alias 
> qualifiers.
>  #            """
>  #            if joinType is None:
>  #                    joinType = "inner"
> @@ -581,7 +581,7 @@
>  #                    tmp = joinType.split()
>  #                    tmp.remove("outer")
>  #                    joinType = tmp[0]
> -#            
> +#
>  #            leftDS = self
>  #            rightDS = target
>  #            leftAlias = sourceAlias
> @@ -592,27 +592,27 @@
>  #            rightFldAliases = {}
>  #            if joinType == "right":
>  #                    # Same as left; we just need to reverse things
> -#                    (leftDS, rightDS, leftAlias, rightAlias, leftFields, 
> -#                                    rightFields) = (rightDS, leftDS, 
> rightAlias, leftAlias, 
> +#                    (leftDS, rightDS, leftAlias, rightAlias, leftFields,
> +#                                    rightFields) = (rightDS, leftDS, 
> rightAlias, leftAlias,
>  #                                    rightFields, leftFields)
> -#            
> -#            
> +#
> +#
>  #            leftFields, leftFldAliases = self.processFields(leftFields, 
> leftFldAliases)
>  #            rightFields, rightFldAliases = self.processFields(rightFields, 
> rightFldAliases)
> -# 
> -#            # Parse the condition. It should have an '==' in it. If not, 
> +#
> +#            # Parse the condition. It should have an '==' in it. If not,
>  #            # raise an error.
>  #            condList = condition.split("==")
>  #            if len(condList) == 1:
>  #                    # No equality specified
>  #                    errMsg = _("Bad join: no '==' in join condition: %s") % 
> condition
>  #                    raise dException.QueryException, errMsg
> -#            
> +#
>  #            leftCond = None
>  #            rightCond = None
>  #            leftPat = "(.*)(\\b%s\\b)(.*)" % leftAlias
>  #            rightPat = "(.*)(\\b%s\\b)(.*)" % rightAlias
> -#            
> +#
>  #            mtch = re.match(leftPat, condList[0])
>  #            if mtch:
>  #                    leftCond = condList[0].strip()
> @@ -636,7 +636,7 @@
>  #                    condError += _("No join condition specified for alias 
> '%s'") % rightAlias
>  #            if condError:
>  #                    raise dException.QueryException, condError
> -#            
> +#
>  #            # OK, we now know how to do the join. The plan is this:
>  #            #       create an empty result list
>  #            #       scan through all the left records
> @@ -650,9 +650,9 @@
>  #            #                       else:
>  #            #                               get dict.fromkeys() for right 
> select
>  #            #                               update left with fromkeys and 
> add to result
> -#            #       
> +#            #
>  #            #       We'll worry about full joins later.
> -# 
> +#
>  #            resultSet = []
>  #            for leftRec in leftDS:
>  #                    if leftFields:
> @@ -662,12 +662,12 @@
>  #                    tmpLeftCond = leftCond.replace(leftAlias, "leftRec")
>  #                    tmpLeftCond = "%s['%s']" % tuple(tmpLeftCond.split("."))
>  #                    leftVal = eval(tmpLeftCond)
> -#                    
> +#
>  #                    if isinstance(leftVal, basestring):
>  #                            leftVal = "'%s'" % leftVal
>  #                    rightWhere = rightCond.replace(rightAlias + ".", "") + 
> "== %s" % leftVal
>  #                    rightRecs = rightDS.select(fields=rightFields, 
> where=rightWhere)
> -# 
> +#
>  #                    if rightRecs:
>  #                            for rightRec in rightRecs:
>  #                                    rightRec.update(leftSelect)
> @@ -677,7 +677,7 @@
>  #                                    rightKeys = 
> rightDS.select(fields=rightFields)[0].keys()
>  #                                    
> leftSelect.update(dict.fromkeys(rightKeys))
>  #                                    resultSet.append(leftSelect)
> -#            
> +#
>  #            resultSet = DataSet(resultSet)
>  #            if where or orderBy:
>  #                    resultSet = resultSet.select(where=where, 
> orderBy=orderBy)
> @@ -690,7 +690,7 @@
>                               {"name" : "Dan Leafe", "age" : 14, "coder" :  
> False, "color": "green"},
>                               {"name" : "Paul McNett", "age" : 38, "coder" :  
> True, "color": "red"}]
>       ds = dDataSet(data)
> -     
> +
>       newDS = ds.execute("select name, age from dataset where age > 30")
>       print "Over 30:"
>       for rec in newDS:
> 
> Modified: trunk/dabo/db/dbMySQL.py
> ===================================================================
> --- trunk/dabo/db/dbMySQL.py  2006-12-17 22:24:33 UTC (rev 2559)
> +++ trunk/dabo/db/dbMySQL.py  2006-12-17 22:51:05 UTC (rev 2560)
> @@ -160,7 +160,8 @@
>                       if i[0] != "_":
>                               v = getattr(ftypes, i)
>                               typeMapping[v] = i
> -             
> +             # typeMapping[16]='BIT'
> +
>               daboMapping = {"BIT": "I",
>                               "BLOB": "M",
>                               "CHAR": "C",
> 
> 
> 
> 
> _______________________________________________
> Post Messages to: [email protected]
> Subscription Maintenance: http://leafe.com/mailman/listinfo/dabo-dev
> 
> 

_______________________________________________
Post Messages to: [email protected]
Subscription Maintenance: http://leafe.com/mailman/listinfo/dabo-dev

Reply via email to