Hardik Sanchawat (OpenERP) has proposed merging 
lp:~openerp-commiter/openobject-addons/trunk-exception-warning-imp-dbr-document-hsa
 into lp:~openerp-dev/openobject-addons/trunk-exception-warning-imp-dbr.

Requested reviews:
  Kuldeep Joshi(OpenERP) (kjo-openerp)

For more details, see:
https://code.launchpad.net/~openerp-commiter/openobject-addons/trunk-exception-warning-imp-dbr-document-hsa/+merge/114811

Hello,

I update warning messages in document module like :
document
document_ftp
document_webdev

Thanks
-hsa
-- 
https://code.launchpad.net/~openerp-commiter/openobject-addons/trunk-exception-warning-imp-dbr-document-hsa/+merge/114811
Your team OpenERP R&D Team is subscribed to branch 
lp:~openerp-dev/openobject-addons/trunk-exception-warning-imp-dbr.
=== modified file 'document/content_index.py'
--- document/content_index.py	2012-06-22 06:48:39 +0000
+++ document/content_index.py	2012-07-13 09:00:10 +0000
@@ -93,13 +93,13 @@
             except NhException:
                 pass
 
-        raise NhException('No appropriate method to index file')
+        raise NhException('No appropriate method to index file !')
 
     def _doIndexContent(self,content):
-        raise NhException("Content not handled here")
+        raise NhException("Content not handled here!")
 
     def _doIndexFile(self,fpath):
-        raise NhException("Content not handled here")
+        raise NhException("Content not handled here!")
 
     def __repr__(self):
         return "<indexer %s.%s>" %(self.__module__, self.__class__.__name__)
@@ -134,7 +134,7 @@
         if f:
             _logger.debug('Register content indexer: %r', obj)
         if not f:
-            raise Exception("Your indexer should at least suport a mimetype or extension")
+            raise Exception("Your indexer should at least support a mimetype or extension.")
 
     def doIndex(self, content, filename=None, content_type=None, realfname = None, debug=False):
         fobj = None

=== modified file 'document/document.py'
--- document/document.py	2012-06-26 20:42:50 +0000
+++ document/document.py	2012-07-13 09:00:10 +0000
@@ -61,7 +61,7 @@
             return False
 
         if ids is not None:
-            raise NotImplementedError("Ids is just there by convention! Don't use it yet, please.")
+            raise NotImplementedError("Ids is just there by convention,please donot use it yet.")
 
         cr.execute("UPDATE ir_attachment " \
                     "SET parent_id = %s, db_datas = decode(encode(db_datas,'escape'), 'base64') " \

=== modified file 'document/document_storage.py'
--- document/document_storage.py	2012-06-25 09:06:19 +0000
+++ document/document_storage.py	2012-07-13 09:00:10 +0000
@@ -189,7 +189,7 @@
             StringIO.__init__(self, None)
         else:
             _logger.error("Incorrect mode %s specified", mode)
-            raise IOError(errno.EINVAL, "Invalid file mode")
+            raise IOError(errno.EINVAL, "Invalid file mode!")
         self.mode = mode
 
     def size(self):
@@ -269,7 +269,7 @@
             StringIO.__init__(self, None)
         else:
             _logger.error("Incorrect mode %s specified", mode)
-            raise IOError(errno.EINVAL, "Invalid file mode")
+            raise IOError(errno.EINVAL, "Invalid file mode!")
         self.mode = mode
 
     def size(self):
@@ -317,7 +317,7 @@
                     (base64.encodestring(data), len(data), par.file_id))
             cr.commit()
         except Exception:
-            _logger.exception('Cannot update db file #%d for close:', par.file_id)
+            _logger.exception('Cannot update db file #%d for close !', par.file_id)
             raise
         finally:
             cr.close()
@@ -401,10 +401,10 @@
         #     self._logger.debug('Npath: %s', npath)
         for n in npath:
             if n == '..':
-                raise ValueError("Invalid '..' element in path")
+                raise ValueError("Invalid '..' element in path!")
             for ch in ('*', '|', "\\", '/', ':', '"', '<', '>', '?',):
                 if ch in n:
-                    raise ValueError("Invalid char %s in path %s" %(ch, n))
+                    raise ValueError("Invalid char %s in path %s!" %(ch, n))
         dpath = [store_path,]
         dpath += npath[:-1]
         path = os.path.join(*dpath)
@@ -420,7 +420,7 @@
         """
         boo = self.browse(cr, uid, id, context=context)
         if not boo.online:
-            raise IOError(errno.EREMOTE, 'medium offline')
+            raise IOError(errno.EREMOTE, 'medium offline!')
         
         if fil_obj:
             ira = fil_obj
@@ -435,10 +435,10 @@
             context = {}
         boo = self.browse(cr, uid, id, context=context)
         if not boo.online:
-            raise IOError(errno.EREMOTE, 'medium offline')
+            raise IOError(errno.EREMOTE, 'medium offline!')
         
         if boo.readonly and mode not in ('r', 'rb'):
-            raise IOError(errno.EPERM, "Readonly medium")
+            raise IOError(errno.EPERM, "Readonly medium!")
         
         ira = self.pool.get('ir.attachment').browse(cr, uid, file_node.file_id, context=context)
         if boo.type == 'filestore':
@@ -448,7 +448,7 @@
                 if mode in ('r','r+'):
                     if ira.file_size:
                         _logger.warning( "ir.attachment #%d does not have a filename, but is at filestore, fix it!" % ira.id)
-                    raise IOError(errno.ENOENT, 'No file can be located')
+                    raise IOError(errno.ENOENT, 'No file can be located!')
                 else:
                     store_fname = self.__get_random_fname(boo.path)
                     cr.execute('UPDATE ir_attachment SET store_fname = %s WHERE id = %s',
@@ -478,10 +478,10 @@
             return nodefd_file(file_node, path=fpath, mode=mode)
 
         elif boo.type == 'virtual':
-            raise ValueError('Virtual storage does not support static files')
+            raise ValueError('Virtual storage does not support static file(s).')
         
         else:
-            raise TypeError("No %s storage" % boo.type)
+            raise TypeError("No %s storage !" % boo.type)
 
     def __get_data_3(self, cr, uid, boo, ira, context):
         if boo.type == 'filestore':
@@ -524,10 +524,10 @@
                 raise IOError(errno.ENOENT, "File not found: %s" % fpath)
 
         elif boo.type == 'virtual':
-            raise ValueError('Virtual storage does not support static files')
+            raise ValueError('Virtual storage does not support static file(s).')
 
         else:
-            raise TypeError("No %s storage" % boo.type)
+            raise TypeError("No %s storage!" % boo.type)
 
     def set_data(self, cr, uid, id, file_node, data, context=None, fil_obj=None):
         """ store the data.
@@ -541,10 +541,10 @@
             ira = self.pool.get('ir.attachment').browse(cr, uid, file_node.file_id, context=context)
 
         if not boo.online:
-            raise IOError(errno.EREMOTE, 'medium offline')
+            raise IOError(errno.EREMOTE, 'Medium offline!')
         
         if boo.readonly:
-            raise IOError(errno.EPERM, "Readonly medium")
+            raise IOError(errno.EPERM, "Readonly medium!")
 
         _logger.debug( "Store data for ir.attachment #%d" % ira.id)
         store_fname = None
@@ -565,7 +565,7 @@
                 # TODO Here, an old file would be left hanging.
 
             except Exception, e:
-                _logger.warning( "Couldn't save data to %s", path, exc_info=True)
+                _logger.warning( "Cannot save data to %s.", path, exc_info=True)
                 raise except_orm(_('Error!'), str(e))
         elif boo.type == 'db':
             filesize = len(data)
@@ -593,14 +593,14 @@
                 store_fname = os.path.join(*npath)
                 # TODO Here, an old file would be left hanging.
             except Exception,e :
-                _logger.warning("Couldn't save data:", exc_info=True)
+                _logger.warning("Cannot save data:", exc_info=True)
                 raise except_orm(_('Error!'), str(e))
 
         elif boo.type == 'virtual':
-            raise ValueError('Virtual storage does not support static files')
+            raise ValueError('Virtual storage does not support static file(s).')
 
         else:
-            raise TypeError("No %s storage" % boo.type)
+            raise TypeError("No %s storage!" % boo.type)
 
         # 2nd phase: store the metadata
         try:
@@ -629,7 +629,7 @@
             file_node.content_type = mime
             return True
         except Exception, e :
-            self._logger.warning("Couldn't save data:", exc_info=True)
+            self._logger.warning("Cannot save data:", exc_info=True)
             # should we really rollback once we have written the actual data?
             # at the db case (only), that rollback would be safe
             raise except_orm(_('Error at doc write!'), str(e))
@@ -639,10 +639,10 @@
         files that have to be removed, too. """
 
         if not storage_bo.online:
-            raise IOError(errno.EREMOTE, 'medium offline')
+            raise IOError(errno.EREMOTE, 'Medium offline!')
         
         if storage_bo.readonly:
-            raise IOError(errno.EPERM, "Readonly medium")
+            raise IOError(errno.EPERM, "Readonly medium!")
 
         if storage_bo.type == 'filestore':
             fname = fil_bo.store_fname
@@ -659,7 +659,7 @@
             path = storage_bo.path
             return ( storage_bo.id, 'file', os.path.join(path, fname))
         else:
-            raise TypeError("No %s storage" % storage_bo.type)
+            raise TypeError("No %s storage!" % storage_bo.type)
 
     def do_unlink(self, cr, uid, unres):
         for id, ktype, fname in unres:
@@ -667,9 +667,9 @@
                 try:
                     os.unlink(fname)
                 except Exception:
-                    _logger.warning("Could not remove file %s, please remove manually.", fname, exc_info=True)
+                    _logger.warning("Cannot remove file %s, please remove manually.", fname, exc_info=True)
             else:
-                _logger.warning("Unknown unlink key %s" % ktype)
+                _logger.warning("Unlink unknown key %s." % ktype)
 
         return True
 
@@ -699,9 +699,9 @@
             fname = ira.store_fname
 
             if not fname:
-                _logger.warning("Trying to rename a non-stored file")
+                _logger.warning("Trying to rename a non-stored file.")
             if fname != os.path.join(*npath):
-                _logger.warning("inconsistency in realstore: %s != %s" , fname, repr(npath))
+                _logger.warning("Inconsistency to realstore: %s != %s." , fname, repr(npath))
 
             oldpath = os.path.join(path, npath[-1])
             newpath = os.path.join(path, new_name)
@@ -711,7 +711,7 @@
             store_fname = os.path.join(*store_path)
             return { 'name': new_name, 'datas_fname': new_name, 'store_fname': store_fname }
         else:
-            raise TypeError("No %s storage" % sbro.type)
+            raise TypeError("No %s storage!" % sbro.type)
 
     def simple_move(self, cr, uid, file_node, ndir_bro, context=None):
         """ A preparation for a file move.
@@ -739,8 +739,8 @@
                 break
             par = par.parent_id
         if file_node.storage_id != psto:
-            _logger.debug('Cannot move file %r from %r to %r', file_node, file_node.parent, ndir_bro.name)
-            raise NotImplementedError('Cannot move files between storage media')
+            _logger.debug('Cannot move file %r from %r to %r.', file_node, file_node.parent, ndir_bro.name)
+            raise NotImplementedError('Cannot move file(s) between storage media.')
 
         if sbro.type in ('filestore', 'db', 'db64'):
             # nothing to do for a rename, allow to change the db field
@@ -752,9 +752,9 @@
             fname = ira.store_fname
 
             if not fname:
-                _logger.warning("Trying to rename a non-stored file")
+                _logger.warning("Trying to rename a non-stored file.")
             if fname != os.path.join(*opath):
-                _logger.warning("inconsistency in realstore: %s != %s" , fname, repr(opath))
+                _logger.warning("Inconsistency to realstore: %s != %s." , fname, repr(opath))
 
             oldpath = os.path.join(path, opath[-1])
             
@@ -762,12 +762,12 @@
             npath = filter(lambda x: x is not None, npath)
             newdir = os.path.join(*npath)
             if not os.path.isdir(newdir):
-                _logger.debug("Must create dir %s", newdir)
+                _logger.debug("Must create dir %s.", newdir)
                 os.makedirs(newdir)
             npath.append(opath[-1])
             newpath = os.path.join(*npath)
             
-            _logger.debug("Going to move %s from %s to %s", opath[-1], oldpath, newpath)
+            _logger.debug("Going to move %s from %s to %s.", opath[-1], oldpath, newpath)
             shutil.move(oldpath, newpath)
             
             store_path = npath[1:] + [opath[-1],]
@@ -775,7 +775,7 @@
             
             return { 'store_fname': store_fname }
         else:
-            raise TypeError("No %s storage" % sbro.type)
+            raise TypeError("No %s storage!" % sbro.type)
 
 
 document_storage()

=== modified file 'document/nodes.py'
--- document/nodes.py	2012-06-22 07:25:45 +0000
+++ document/nodes.py	2012-07-13 09:00:10 +0000
@@ -271,7 +271,7 @@
         return False
 
     def get_data(self,cr):
-        raise TypeError('no data for %s'% self.type)
+        raise TypeError('No data for %s.'% self.type)
 
     def open_data(self, cr, mode):
         """ Open a node_descriptor object for this node.
@@ -285,10 +285,10 @@
         For this class, there is no data, so no implementation. Each
         child class that has data should override this.
         """
-        raise TypeError('no data for %s' % self.type)
+        raise TypeError('No data for %s.' % self.type)
 
     def _get_storage(self,cr):
-        raise RuntimeError("no storage for base class")
+        raise RuntimeError("No storage for base class.")
 
     def get_etag(self,cr):
         """ Get a tag, unique per object + modification.
@@ -327,7 +327,7 @@
         if self.DAV_M_NS.has_key(ns):
             prefix = self.DAV_M_NS[ns]
         else:
-            _logger.debug('No namespace: %s ("%s")',ns, prop)
+            _logger.debug('No namespace: %s ("%s").',ns, prop)
             return None
 
         mname = prefix + "_" + prop.replace('-','_')
@@ -340,7 +340,7 @@
             r = m(cr)
             return r
         except AttributeError:
-            _logger.debug('Property %s not supported' % prop, exc_info=True)
+            _logger.debug('Property %s not supported.' % prop, exc_info=True)
         return None
 
     def get_dav_resourcetype(self, cr):
@@ -384,13 +384,13 @@
         """ Create a regular file under this node
         """
         _logger.warning("Attempted to create a file under %r, not possible.", self)
-        raise IOError(errno.EPERM, "Not allowed to create files here")
+        raise IOError(errno.EPERM, "Not allowed to create file(s) here.")
     
     def create_child_collection(self, cr, objname):
         """ Create a child collection (directory) under self
         """
         _logger.warning("Attempted to create a collection under %r, not possible.", self)
-        raise IOError(errno.EPERM, "Not allowed to create folders here")
+        raise IOError(errno.EPERM, "Not allowed to create folder(s) here.")
 
     def rm(self, cr):
         raise NotImplementedError(repr(self))
@@ -423,9 +423,9 @@
             perms = pe2
         elif isinstance(perms, int):
             if perms < 0 or perms > 15:
-                raise ValueError("Invalid permission bits")
+                raise ValueError("Invalid permission bits.")
         else:
-            raise ValueError("Invalid permission attribute")
+            raise ValueError("Invalid permission attribute.")
         
         return ((self.uidperms & perms) == perms)
 
@@ -465,7 +465,7 @@
             is_allowed = self.check_perms(5)
         
         if not is_allowed:
-            raise IOError(errno.EPERM, "Permission into directory denied")
+            raise IOError(errno.EPERM, "Permission into directory denied.")
 
         if domain:
             where = where + domain
@@ -569,7 +569,7 @@
         
         is_allowed = self.check_perms(nodename and 1 or 5)
         if not is_allowed:
-            raise IOError(errno.EPERM, "Permission into directory denied")
+            raise IOError(errno.EPERM, "Permission into directory denied.")
 
         cntobj = self.context._dirobj.pool.get('document.directory.content')
         uid = self.context.uid
@@ -597,7 +597,7 @@
             is_allowed = self.check_perms(5)
         
         if not is_allowed:
-            raise IOError(errno.EPERM, "Permission into directory denied")
+            raise IOError(errno.EPERM, "Permission into directory denied.")
 
         if not domain:
             domain = []
@@ -633,7 +633,7 @@
         if not directory:
             raise OSError(2, 'Not such file or directory.')
         if not self.check_perms('u'):
-            raise IOError(errno.EPERM,"Permission denied")
+            raise IOError(errno.EPERM,"Permission denied.")
 
         if directory._table_name=='document.directory':
             if self.children(cr):
@@ -646,7 +646,7 @@
     def create_child_collection(self, cr, objname):
         object2 = False
         if not self.check_perms(2):
-            raise IOError(errno.EPERM,"Permission denied")
+            raise IOError(errno.EPERM,"Permission denied.")
 
         dirobj = self.context._dirobj
         uid = self.context.uid
@@ -672,7 +672,7 @@
             Return the node_* created
         """
         if not self.check_perms(2):
-            raise IOError(errno.EPERM,"Permission denied")
+            raise IOError(errno.EPERM,"Permission denied.")
 
         dirobj = self.context._dirobj
         uid = self.context.uid
@@ -702,10 +702,10 @@
             Note /may/ be called with ndir_node = None, to rename the document root.
         """
         if ndir_node and (ndir_node.context != self.context):
-            raise NotImplementedError("Cannot move directories between contexts")
+            raise NotImplementedError("Cannot move directories between contexts.")
 
         if (not self.check_perms('u')) or (not ndir_node.check_perms('w')):
-            raise IOError(errno.EPERM,"Permission denied")
+            raise IOError(errno.EPERM,"Permission denied.")
 
         dir_obj = self.context._dirobj
         if not fil_obj:
@@ -725,12 +725,12 @@
 
         if self.parent != ndir_node:
             _logger.debug('Cannot move dir %r from %r to %r', self, self.parent, ndir_node)
-            raise NotImplementedError('Cannot move dir to another dir')
+            raise NotImplementedError('Cannot move dir to another dir.')
 
         ret = {}
         if new_name and (new_name != dbro.name):
             if ndir_node.child(cr, new_name):
-                raise IOError(errno.EEXIST, "Destination path already exists")
+                raise IOError(errno.EEXIST, "Destination path already exists!")
             ret['name'] = new_name
 
         del dbro
@@ -845,7 +845,7 @@
             is_allowed = self.check_perms(5)
 
         if not is_allowed:
-            raise IOError(errno.EPERM,"Permission denied")
+            raise IOError(errno.EPERM,"Permission denied.")
 
         # print "Where clause for %s" % self.res_model, where
         if self.ressource_tree:
@@ -962,7 +962,7 @@
         res = []
         is_allowed = self.check_perms((nodename and 1) or 5)
         if not is_allowed:
-            raise IOError(errno.EPERM,"Permission denied")
+            raise IOError(errno.EPERM,"Permission denied.")
 
         cntobj = self.context._dirobj.pool.get('document.directory.content')
         uid = self.context.uid
@@ -1016,7 +1016,7 @@
 
         is_allowed = self.check_perms((name and 1) or 5)
         if not is_allowed:
-            raise IOError(errno.EPERM,"Permission denied")
+            raise IOError(errno.EPERM,"Permission denied.")
 
         uid = self.context.uid
         ctx = self.context.context.copy()
@@ -1103,7 +1103,7 @@
         dirobj = self.context._dirobj
         is_allowed = self.check_perms(2)
         if not is_allowed:
-            raise IOError(errno.EPERM,"Permission denied")
+            raise IOError(errno.EPERM,"Permission denied.")
 
         uid = self.context.uid
         ctx = self.context.context.copy()
@@ -1135,7 +1135,7 @@
         """
         is_allowed = self.check_perms(2)
         if not is_allowed:
-            raise IOError(errno.EPERM,"Permission denied")
+            raise IOError(errno.EPERM,"Permission denied.")
 
         dirobj = self.context._dirobj
         uid = self.context.uid
@@ -1215,7 +1215,7 @@
         stor = self.storage_id
         assert stor, "No storage for file #%s" % self.file_id
         if not self.check_perms(4):
-            raise IOError(errno.EPERM, "Permission denied")
+            raise IOError(errno.EPERM, "Permission denied.")
 
         # If storage is not set properly, we are just screwed here, don't
         # try to get it from default.
@@ -1225,7 +1225,7 @@
     def rm(self, cr):
         uid = self.context.uid
         if not self.check_perms(8):
-            raise IOError(errno.EPERM, "Permission denied")
+            raise IOError(errno.EPERM, "Permission denied.")
         document_obj = self.context._dirobj.pool.get('ir.attachment')
         if self.type in ('collection','database'):
             return False
@@ -1271,7 +1271,7 @@
         stor = self.storage_id
         assert stor, "No storage for file #%s" % self.file_id
         if not self.check_perms(4):
-            raise IOError(errno.EPERM, "Permission denied")
+            raise IOError(errno.EPERM, "Permission denied.")
 
         # If storage is not set properly, we are just screwed here, don't
         # try to get it from default.
@@ -1294,7 +1294,7 @@
         stor = self.storage_id
         assert stor, "No storage for file #%s" % self.file_id
         if not self.check_perms(2):
-            raise IOError(errno.EPERM, "Permission denied")
+            raise IOError(errno.EPERM, "Permission denied.")
 
         stobj = self.context._dirobj.pool.get('document.storage')
         return stobj.set_data(cr, self.context.uid,stor, self, data, self.context.context, fil_obj)
@@ -1304,10 +1304,10 @@
 
     def move_to(self, cr, ndir_node, new_name=False, fil_obj=None, ndir_obj=None, in_write=False):
         if ndir_node and ndir_node.context != self.context:
-            raise NotImplementedError("Cannot move files between contexts")
+            raise NotImplementedError("Cannot move files between contexts.")
 
         if (not self.check_perms(8)) and ndir_node.check_perms(2):
-            raise IOError(errno.EPERM, "Permission denied")
+            raise IOError(errno.EPERM, "Permission denied.")
 
         doc_obj = self.context._dirobj.pool.get('ir.attachment')
         if not fil_obj:
@@ -1343,7 +1343,7 @@
 
         if new_name and (new_name != dbro.name):
             if len(ret):
-                raise NotImplementedError("Cannot rename and move") # TODO
+                raise NotImplementedError("Cannot rename and move.") # TODO
             stobj = self.context._dirobj.pool.get('document.storage')
             r2 = stobj.simple_rename(cr, self.context.uid, self, new_name, self.context.context)
             ret.update(r2)
@@ -1399,7 +1399,7 @@
     def get_data(self, cr, fil_obj = None):
         cntobj = self.context._dirobj.pool.get('document.directory.content')
         if not self.check_perms(4):
-            raise IOError(errno.EPERM, "Permission denied")
+            raise IOError(errno.EPERM, "Permission denied.")
 
         ctx = self.context.context.copy()
         ctx.update(self.dctx)
@@ -1419,7 +1419,7 @@
             raise IOError(errno.EINVAL, "Cannot open at mode %s" % mode)
         
         if not self.check_perms(cperms):
-            raise IOError(errno.EPERM, "Permission denied")
+            raise IOError(errno.EPERM, "Permission denied.")
 
         ctx = self.context.context.copy()
         ctx.update(self.dctx)
@@ -1438,7 +1438,7 @@
     def set_data(self, cr, data, fil_obj = None):
         cntobj = self.context._dirobj.pool.get('document.directory.content')
         if not self.check_perms(2):
-            raise IOError(errno.EPERM, "Permission denied")
+            raise IOError(errno.EPERM, "Permission denied.")
 
         ctx = self.context.context.copy()
         ctx.update(self.dctx)
@@ -1474,7 +1474,7 @@
             StringIO.__init__(self, None)
         else:
             _logger.error("Incorrect mode %s specified", mode)
-            raise IOError(errno.EINVAL, "Invalid file mode")
+            raise IOError(errno.EINVAL, "Invalid file mode!")
         self.mode = mode
 
     def size(self):
@@ -1528,7 +1528,7 @@
             StringIO.__init__(self, None)
         else:
             _logger.error("Incorrect mode %s specified", mode)
-            raise IOError(errno.EINVAL, "Invalid file mode")
+            raise IOError(errno.EINVAL, "Invalid file mode!")
         self.mode = mode
 
     def size(self):

=== modified file 'document_ftp/ftpserver/abstracted_fs.py'
--- document_ftp/ftpserver/abstracted_fs.py	2012-06-22 06:48:39 +0000
+++ document_ftp/ftpserver/abstracted_fs.py	2012-07-13 09:00:10 +0000
@@ -176,7 +176,7 @@
             res = node.open_data(cr, mode)
             cr.commit()
         except TypeError:
-            raise IOError(errno.EINVAL, "No data")
+            raise IOError(errno.EINVAL, "No data.")
         return res
 
     # ok, but need test more
@@ -211,9 +211,9 @@
             self.cwd_node = None
             return None
         if not datacr[1]:
-            raise OSError(1, 'Operation not permitted')
+            raise OSError(1, 'Operation not permitted.')
         if datacr[1].type not in  ('collection','database'):
-            raise OSError(2, 'Path is not a directory')
+            raise OSError(2, 'Path is not a directory.')
         self.cwd = '/'+datacr[1].context.dbname + '/'
         self.cwd += '/'.join(datacr[1].full_path())
         self.cwd_node = datacr[1]
@@ -287,7 +287,7 @@
                 p_parts = p_parts[1:]
             # self._log.debug("Path parts: %r ", p_parts)
             if not p_parts:
-                raise IOError(errno.EPERM, 'Cannot perform operation at root dir')
+                raise IOError(errno.EPERM, 'Cannot perform operation at root directory.')
             dbname = p_parts[0]
             if dbname not in self.db_list():
                 raise IOError(errno.ENOENT,'Invalid database path: %s' % dbname)
@@ -318,7 +318,7 @@
                 node = self.cwd_node
             if node is False and mode not in ('???'):
                 cr.close()
-                raise IOError(errno.ENOENT, 'Path does not exist')
+                raise IOError(errno.ENOENT, 'Path does not exist!')
             return (cr, node, rem_path)
 
     def get_node_cr_uid(self, node):
@@ -429,7 +429,7 @@
     def getsize(self, datacr):
         """Return the size of the specified file in bytes."""
         if not (datacr and datacr[1]):
-            raise IOError(errno.ENOENT, "No such file or directory")
+            raise IOError(errno.ENOENT, "No such file or directory.")
         if datacr[1].type in ('file', 'content'):
             return datacr[1].get_data_len(datacr[0]) or 0L
         return 0L

=== modified file 'document_ftp/ftpserver/ftpserver.py'
--- document_ftp/ftpserver/ftpserver.py	2011-09-22 09:55:31 +0000
+++ document_ftp/ftpserver/ftpserver.py	2012-07-13 09:00:10 +0000
@@ -308,7 +308,7 @@
             raise AuthorizerError('No such directory: "%s"' %homedir)
         for p in perm:
             if p not in 'elradfmw':
-                raise AuthorizerError('No such permission "%s"' %p)
+                raise AuthorizerError('No such permission: "%s"' %p)
         for p in perm:
             if (p in self.write_perms) and (username == 'anonymous'):
                 warnings.warn("write permissions assigned to anonymous user.",
@@ -638,7 +638,7 @@
         elif type == 'i':
             self.data_wrapper = lambda x: x
         else:
-            raise TypeError, "Unsupported type"
+            raise TypeError, "Unsupported type!"
         self.receive = True
 
     def get_transmitted_bytes(self):
@@ -823,7 +823,7 @@
         elif type == 'i':
             self.data_wrapper = lambda x: x
         else:
-            raise TypeError, "Unsupported type"
+            raise TypeError, "Unsupported type!"
 
     def more(self):
         """Attempt a chunk of data of size self.buffer_size."""
@@ -2554,7 +2554,7 @@
             else:
                 datacr = self.get_crdata2(line)
                 if not datacr:
-                    raise IOError(errno.ENOENT, "%s is not retrievable" %line)
+                    raise IOError(errno.ENOENT, "%s is not retrievable." %line)
 
                 lmt = self.try_as_current_user(self.fs.getmtime, (datacr,), line=line)
             lmt = time.strftime("%Y%m%d%H%M%S", time.localtime(lmt))

=== modified file 'document_ftp/test_easyftp.py'
--- document_ftp/test_easyftp.py	2011-12-19 16:54:40 +0000
+++ document_ftp/test_easyftp.py	2012-07-13 09:00:10 +0000
@@ -62,7 +62,7 @@
     data = []
     def ffp(data, ndata):
         if len(data)+ len(ndata) > limit:
-            raise IndexError('Data over the limit')
+            raise IndexError('Data over the limit.')
         data.append(ndata)
     ftp.retrbinary('RETR %s' % fname, partial(ffp,data))
     return ''.join(data)

=== modified file 'document_webdav/dav_fs.py'
--- document_webdav/dav_fs.py	2011-12-19 16:54:40 +0000
+++ document_webdav/dav_fs.py	2012-07-13 09:00:10 +0000
@@ -98,7 +98,7 @@
 
     def read(self, size=-1):
         if not self._stream:
-            raise IOError(errno.EBADF, "read() without stream")
+            raise IOError(errno.EBADF, "read() without stream.")
         
         if self._rem_length == 0:
             return ''
@@ -136,25 +136,25 @@
         """
         if whence == os.SEEK_SET:
             if pos < 0 or pos > self._length:
-                raise IOError(errno.EINVAL,"Cannot seek")
+                raise IOError(errno.EINVAL,"Cannot seek!")
             self._stream.seek(pos - self._offset)
             self._rem_length = self._length - pos
         elif whence == os.SEEK_CUR:
             if pos > 0:
                 if pos > self._rem_length:
-                    raise IOError(errno.EINVAL,"Cannot seek past end")
+                    raise IOError(errno.EINVAL,"Cannot seek past end!")
                 elif pos < 0:
                     oldpos = self.tell()
                     if oldpos + pos < 0:
-                        raise IOError(errno.EINVAL,"Cannot seek before start")
+                        raise IOError(errno.EINVAL,"Cannot seek before start!")
                 self._stream.seek(pos, os.SEEK_CUR)
                 self._rem_length -= pos
         elif whence == os.SEEK_END:
             if pos > 0:
-                raise IOError(errno.EINVAL,"Cannot seek past end")
+                raise IOError(errno.EINVAL,"Cannot seek past end!")
             else:
                 if self._length + pos < 0:
-                    raise IOError(errno.EINVAL,"Cannot seek before start")
+                    raise IOError(errno.EINVAL,"Cannot seek before start!")
             newpos = self._offset + self._length + pos
             self._stream.seek(newpos, os.SEEK_SET)
             self._rem_length = 0 - pos
@@ -206,7 +206,7 @@
             self.parent.log_error("Cannot %s: %s", opname, str(e))
             self.parent.log_message("Exc: %s",traceback.format_exc())
             # see par 9.3.1 of rfc
-            raise DAV_Error(403, str(e) or 'Not supported at this path')
+            raise DAV_Error(403, str(e) or 'Not supported at this path.')
         except EnvironmentError, err:
             if cr: cr.close()
             import traceback
@@ -218,7 +218,7 @@
             if cr: cr.close()
             self.parent.log_error("Cannot %s: %s", opname, str(e))
             self.parent.log_message("Exc: %s",traceback.format_exc())
-            raise default_exc("Operation failed")
+            raise default_exc("Operation failed.")
 
     def _get_dav_lockdiscovery(self, uri):
         """ We raise that so that the node API is used """
@@ -434,7 +434,7 @@
         except DAV_Error:
             raise
         except Exception, e:
-            self.parent.log_error("cannot get_children: "+ str(e))
+            self.parent.log_error("Cannot get_children: "+ str(e))
             raise
         finally:
             if cr: cr.close()
@@ -500,10 +500,10 @@
                     assert start >= 0
                     if end and end < start:
                         self.parent.log_error("Invalid range for data: %s-%s" %(start, end))
-                        raise DAV_Error(416, "Invalid range for data")
+                        raise DAV_Error(416, "Invalid range for data.")
                     if end:
                         if end >= res.size():
-                            raise DAV_Error(416, "Requested data exceeds available size")
+                            raise DAV_Error(416, "Requested data exceeds available size.")
                         length = (end + 1) - start
                     else:
                         length = res.size() - start
@@ -661,7 +661,7 @@
         cr, uid, pool, dbname, uri2 = self.get_cr(uri)
         if not uri2[-1]:
             if cr: cr.close()
-            raise DAV_Error(409, "Cannot create nameless collection")
+            raise DAV_Error(409, "Cannot create nameless collection.")
         if not dbname:
             if cr: cr.close()
             raise DAV_Error, 409
@@ -672,7 +672,7 @@
         nc = node.child(cr, uri2[-1])
         if nc:
             cr.close()
-            raise DAV_Error(405, "Path already exists")
+            raise DAV_Error(405, "Path already exists.")
         self._try_function(node.create_child_collection, (cr, uri2[-1]),
                     "create col %s" % uri2[-1], cr=cr)
         cr.commit()
@@ -698,14 +698,14 @@
             dir_node = self.uri2object(cr, uid, pool, uri2[:-1])
             if not dir_node:
                 cr.close()
-                raise DAV_NotFound('Parent folder not found')
+                raise DAV_NotFound('Parent folder not found.')
 
             newchild = self._try_function(dir_node.create_child, (cr, objname, data),
                     "create %s" % objname, cr=cr)
             if not newchild:
                 cr.commit()
                 cr.close()
-                raise DAV_Error(400, "Failed to create resource")
+                raise DAV_Error(400, "Failed to create resource.")
             
             uparts=urlparse.urlparse(uri)
             fileloc = '/'.join(newchild.full_path())
@@ -937,7 +937,7 @@
         except AttributeError:
             # perhaps the node doesn't support locks
             cr.close()
-            raise DAV_Error(400, 'No locks for this resource')
+            raise DAV_Error(400, 'No locks for this resource.')
 
         res = self._try_function(node_fn, (cr, token), "unlock %s" % uri, cr=cr)
         cr.commit()
@@ -966,7 +966,7 @@
             dir_node = self.uri2object(cr, uid, pool, uri2[:-1])
             if not dir_node:
                 cr.close()
-                raise DAV_NotFound('Parent folder not found')
+                raise DAV_NotFound('Parent folder not found.')
 
             # We create a new node (file) but with empty data=None,
             # as in RFC4918 p. 9.10.4
@@ -975,7 +975,7 @@
             if not node:
                 cr.commit()
                 cr.close()
-                raise DAV_Error(400, "Failed to create resource")
+                raise DAV_Error(400, "Failed to create resource.")
             
             created = True
 
@@ -984,7 +984,7 @@
         except AttributeError:
             # perhaps the node doesn't support locks
             cr.close()
-            raise DAV_Error(400, 'No locks for this resource')
+            raise DAV_Error(400, 'No locks for this resource.')
 
         # Obtain the lock on the node
         lres, pid, token = self._try_function(node_fn, (cr, lock_data), "lock %s" % objname, cr=cr)
@@ -992,7 +992,7 @@
         if not lres:
             cr.commit()
             cr.close()
-            raise DAV_Error(423, "Resource already locked")
+            raise DAV_Error(423, "Resource already locked.")
         
         assert isinstance(lres, list), 'lres: %s' % repr(lres)
         

=== modified file 'document_webdav/document_webdav.py'
--- document_webdav/document_webdav.py	2011-12-19 16:54:40 +0000
+++ document_webdav/document_webdav.py	2012-07-13 09:00:10 +0000
@@ -43,7 +43,7 @@
         elif dbro.type == 'ressource':
             return nodes.node_res_dir
         else:
-            raise ValueError("dir node for %s type", dbro.type)
+            raise ValueError("Directory node for %s type", dbro.type)
 
     def _prepare_context(self, cr, uid, nctx, context=None):
         nctx.node_file_class = nodes.node_file

=== modified file 'document_webdav/test_davclient.py'
--- document_webdav/test_davclient.py	2012-06-25 09:06:19 +0000
+++ document_webdav/test_davclient.py	2012-07-13 09:00:10 +0000
@@ -278,7 +278,7 @@
     
             return self._parse_response(h.getfile(), sock, resp)
 
-        raise ProtocolError(host+handler, 403, "No authentication",'')
+        raise ProtocolError(host+handler, 403, "No authentication.",'')
 
 class PersistentAuthTransport(addAuthTransport,PersistentTransport):
     pass
@@ -402,7 +402,7 @@
                 r1 = conn.getresponse()
         except httplib.BadStatusLine, bsl:
                 log.warning("Bad status line: %s", bsl.line)
-                raise Exception('Bad status line')
+                raise Exception('Bad status line.')
         if r1.status == 401: # and r1.headers:
                 if 'www-authenticate' in r1.msg:
                         (atype,realm) = r1.msg.getheader('www-authenticate').split(' ',1)
@@ -437,7 +437,7 @@
                     doc = xml.dom.minidom.parseString(data1)
                     _logger.debug("XML Body:\n %s", doc.toprettyxml(indent="\t"))
             except Exception:
-                _logger.warning("could not print xml", exc_info=True)
+                _logger.warning("cannot print xml", exc_info=True)
                 pass
         conn.close()
         return r1.status, r1.msg, data1
@@ -651,7 +651,7 @@
             if isinstance(crange, tuple):
                 crange = [crange,]
             if not isinstance(crange, list):
-                raise TypeError("Range must be a tuple or list of tuples")
+                raise TypeError("Range must be a tuple or list of tuples.")
             rs = []
             for r in crange:
                 rs.append('%d-%d' % r)
@@ -689,7 +689,7 @@
         """
         hdrs = { }
         if not (body or srcpath):
-            raise ValueError("PUT must have something to send")
+            raise ValueError("PUT must have something to send.")
         if (not body) and srcpath:
             fd = open(srcpath, 'rb')
             body = fd.read()

=== modified file 'document_webdav/webdav.py'
--- document_webdav/webdav.py	2011-12-19 16:54:40 +0000
+++ document_webdav/webdav.py	2012-07-13 09:00:10 +0000
@@ -65,7 +65,7 @@
 
     def createText2Node(self, data):
         if not isinstance(data, StringTypes):
-            raise TypeError, "node contents must be a string"
+            raise TypeError, "Node contents must be a string."
         t = Text2()
         t.data = data
         t.ownerDocument = self.doc

=== modified file 'document_webdav/webdav_server.py'
--- document_webdav/webdav_server.py	2012-06-22 07:25:45 +0000
+++ document_webdav/webdav_server.py	2012-07-13 09:00:10 +0000
@@ -119,7 +119,7 @@
         if up.path.startswith(self.davpath):
             self.headers['Destination'] = up.path[len(self.davpath):]
         else:
-            raise DAV_Forbidden("Not allowed to copy/move outside webdav path")
+            raise DAV_Forbidden("Not allowed to copy/move outside webdav path.")
         # TODO: locks
         DAVRequestHandler.copymove(self, CLASS)
 
@@ -338,7 +338,7 @@
             if isinstance(ldif, list):
                 if len(ldif) !=1 or (not isinstance(ldif[0], TagList)) \
                         or len(ldif[0].list) != 1:
-                    raise DAV_Error(400, "Cannot accept multiple tokens")
+                    raise DAV_Error(400, "Cannot accept multiple tokens!")
                 ldif = ldif[0].list[0]
                 if ldif[0] == '<' and ldif[-1] == '>':
                     ldif = ldif[1:-1]
@@ -352,7 +352,7 @@
             lock_data.update(self._lock_unlock_parse(body))
 
         if lock_data['refresh'] and not lock_data.get('token', False):
-            raise DAV_Error(400, 'Lock refresh must specify token')
+            raise DAV_Error(400, 'Lock refresh must specify token!')
 
         lock_data['depth'] = depth
 

_______________________________________________
Mailing list: https://launchpad.net/~openerp-dev-gtk
Post to     : [email protected]
Unsubscribe : https://launchpad.net/~openerp-dev-gtk
More help   : https://help.launchpad.net/ListHelp

Reply via email to