Hello community,

here is the log from the commit of package python-xlrd for openSUSE:Factory 
checked in at 2015-11-04 15:32:53
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/python-xlrd (Old)
 and      /work/SRC/openSUSE:Factory/.python-xlrd.new (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "python-xlrd"

Changes:
--------
--- /work/SRC/openSUSE:Factory/python-xlrd/python-xlrd.changes  2013-05-02 
11:45:27.000000000 +0200
+++ /work/SRC/openSUSE:Factory/.python-xlrd.new/python-xlrd.changes     
2015-11-04 15:32:54.000000000 +0100
@@ -1,0 +2,6 @@
+Thu Oct 29 17:54:20 UTC 2015 - [email protected]
+
+- Update to 0.9.4
+  * No changelog available 
+
+-------------------------------------------------------------------

Old:
----
  xlrd-0.9.2.tar.gz

New:
----
  xlrd-0.9.4.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ python-xlrd.spec ++++++
--- /var/tmp/diff_new_pack.4sdQlS/_old  2015-11-04 15:32:55.000000000 +0100
+++ /var/tmp/diff_new_pack.4sdQlS/_new  2015-11-04 15:32:55.000000000 +0100
@@ -1,7 +1,7 @@
 #
 # spec file for package python-xlrd
 #
-# Copyright (c) 2013 SUSE LINUX Products GmbH, Nuernberg, Germany.
+# Copyright (c) 2015 SUSE LINUX GmbH, Nuernberg, Germany.
 #
 # All modifications and additions to the file contributed by third parties
 # remain the property of their copyright owners, unless otherwise agreed
@@ -17,7 +17,7 @@
 
 
 Name:           python-xlrd
-Version:        0.9.2
+Version:        0.9.4
 Release:        0
 Url:            http://www.lexicon.net/sjmachin/xlrd.htm
 Summary:        Library for Developers to Extract Data From Microsoft Excel 
Spreadsheet Files


++++++ xlrd-0.9.2.tar.gz -> xlrd-0.9.4.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/xlrd-0.9.2/PKG-INFO new/xlrd-0.9.4/PKG-INFO
--- old/xlrd-0.9.2/PKG-INFO     2013-04-09 21:36:10.000000000 +0200
+++ new/xlrd-0.9.4/PKG-INFO     2015-07-15 08:21:41.000000000 +0200
@@ -1,6 +1,6 @@
 Metadata-Version: 1.1
 Name: xlrd
-Version: 0.9.2
+Version: 0.9.4
 Summary: Library for developers to extract data from Microsoft Excel (tm) 
spreadsheet files
 Home-page: http://www.python-excel.org/
 Author: John Machin
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/xlrd-0.9.2/scripts/runxlrd.py 
new/xlrd-0.9.4/scripts/runxlrd.py
--- old/xlrd-0.9.2/scripts/runxlrd.py   2013-04-05 00:36:57.000000000 +0200
+++ new/xlrd-0.9.4/scripts/runxlrd.py   2015-07-15 08:12:19.000000000 +0200
@@ -89,9 +89,8 @@
             if cty == xlrd.XL_CELL_DATE:
                 try:
                     showval = xlrd.xldate_as_tuple(cval, dmode)
-                except xlrd.XLDateError:
-                    e1, e2 = sys.exc_info()[:2]
-                    showval = "%s:%s" % (e1.__name__, e2)
+                except xlrd.XLDateError as e:
+                    showval = "%s:%s" % (type(e).__name__, e)
                     cty = xlrd.XL_CELL_ERROR
             elif cty == xlrd.XL_CELL_ERROR:
                 showval = xlrd.error_text_from_code.get(cval, '<Unknown error 
code 0x%02x>' % cval)
@@ -335,17 +334,15 @@
                     t1 = time.time()
                     if not options.suppress_timing:
                         print("Open took %.2f seconds" % (t1-t0,))
-                except xlrd.XLRDError:
-                    e0, e1 = sys.exc_info()[:2]
-                    print("*** Open failed: %s: %s" % (e0.__name__, e1))
+                except xlrd.XLRDError as e:
+                    print("*** Open failed: %s: %s" % (type(e).__name__, e))
                     continue
                 except KeyboardInterrupt:
                     print("*** KeyboardInterrupt ***")
                     traceback.print_exc(file=sys.stdout)
                     sys.exit(1)
-                except:
-                    e0, e1 = sys.exc_info()[:2]
-                    print("*** Open failed: %s: %s" % (e0.__name__, e1))
+                except BaseException as e:
+                    print("*** Open failed: %s: %s" % (type(e).__name__, e))
                     traceback.print_exc(file=sys.stdout)
                     continue
                 t0 = time.time()
Files old/xlrd-0.9.2/tests/apachepoi_49609.xlsx and 
new/xlrd-0.9.4/tests/apachepoi_49609.xlsx differ
Files old/xlrd-0.9.2/tests/merged_cells.xlsx and 
new/xlrd-0.9.4/tests/merged_cells.xlsx differ
Files old/xlrd-0.9.2/tests/reveng1.xlsx and new/xlrd-0.9.4/tests/reveng1.xlsx 
differ
Files old/xlrd-0.9.2/tests/self_evaluation_report_2014-05-19.xlsx and 
new/xlrd-0.9.4/tests/self_evaluation_report_2014-05-19.xlsx differ
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/xlrd-0.9.2/tests/test_cell.py 
new/xlrd-0.9.4/tests/test_cell.py
--- old/xlrd-0.9.2/tests/test_cell.py   2013-04-05 00:36:57.000000000 +0200
+++ new/xlrd-0.9.4/tests/test_cell.py   2014-01-25 10:49:12.000000000 +0100
@@ -39,3 +39,26 @@
         row_lo, row_hi, col_lo, col_hi = sheet3.merged_cells[0]
         self.assertEqual(sheet3.cell(row_lo, col_lo).value, 'MERGED')
         self.assertEqual((row_lo, row_hi, col_lo, col_hi), (3, 7, 2, 5))
+
+    def test_merged_cells_xlsx(self):
+        book = xlrd.open_workbook(from_this_dir('merged_cells.xlsx'))
+
+        sheet1 = book.sheet_by_name('Sheet1')
+        expected = []
+        got = sheet1.merged_cells
+        self.assertEqual(expected, got)
+
+        sheet2 = book.sheet_by_name('Sheet2')
+        expected = [(0, 1, 0, 2)]
+        got = sheet2.merged_cells
+        self.assertEqual(expected, got)
+
+        sheet3 = book.sheet_by_name('Sheet3')
+        expected = [(0, 1, 0, 2), (0, 1, 2, 4), (1, 4, 0, 2), (1, 9, 2, 4)]
+        got = sheet3.merged_cells
+        self.assertEqual(expected, got)
+
+        sheet4 = book.sheet_by_name('Sheet4')
+        expected = [(0, 1, 0, 2), (2, 20, 0, 1), (1, 6, 2, 5)]
+        got = sheet4.merged_cells
+        self.assertEqual(expected, got)
Files old/xlrd-0.9.2/tests/test_comments_excel.xlsx and 
new/xlrd-0.9.4/tests/test_comments_excel.xlsx differ
Files old/xlrd-0.9.2/tests/test_comments_gdocs.xlsx and 
new/xlrd-0.9.4/tests/test_comments_gdocs.xlsx differ
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/xlrd-0.9.2/tests/test_sheet.py 
new/xlrd-0.9.4/tests/test_sheet.py
--- old/xlrd-0.9.2/tests/test_sheet.py  2013-04-05 00:36:57.000000000 +0200
+++ new/xlrd-0.9.4/tests/test_sheet.py  2015-07-15 08:11:03.000000000 +0200
@@ -2,8 +2,9 @@
 
 from unittest import TestCase
 
-import sys
 import os
+import sys
+import types
 import unittest
 
 import xlrd
@@ -92,6 +93,12 @@
         row = sheet.row(0)
         self.assertEqual(len(row), NCOLS)
 
+    def test_get_rows(self):
+        sheet = self.book.sheet_by_index(SHEETINDEX)
+        rows = sheet.get_rows()
+        self.assertTrue(isinstance(rows, types.GeneratorType), True)
+        self.assertEqual(len(list(rows)), sheet.nrows)
+
     def test_col_slice(self):
         sheet = self.book.sheet_by_index(SHEETINDEX)
         self.check_col_slice(sheet.col_slice)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/xlrd-0.9.2/tests/test_xldate_to_datetime.py 
new/xlrd-0.9.4/tests/test_xldate_to_datetime.py
--- old/xlrd-0.9.2/tests/test_xldate_to_datetime.py     1970-01-01 
01:00:00.000000000 +0100
+++ new/xlrd-0.9.4/tests/test_xldate_to_datetime.py     2014-04-07 
16:40:04.000000000 +0200
@@ -0,0 +1,164 @@
+###############################################################################
+#
+# Tests for the xlrd xldate.xldate_as_datetime() function.
+#
+
+import unittest
+from datetime import datetime
+from xlrd import xldate
+
+not_1904 = False
+is_1904 = True
+
+
+class TestConvertToDateTime(unittest.TestCase):
+    """
+    Testcases to test the _xldate_to_datetime() function against dates
+    extracted from Excel files, with 1900/1904 epochs.
+
+    """
+
+    def test_dates_and_times_1900_epoch(self):
+        """
+        Test the _xldate_to_datetime() function for dates and times in
+        the Excel standard 1900 epoch.
+
+        """
+        # Test Excel dates strings and corresponding serial date numbers taken
+        # from an Excel file.
+        excel_dates = [
+            # Excel's 0.0 date in the 1900 epoch is 1 day before 1900.
+            ('1899-12-31T00:00:00.000', 0),
+
+            # Date/time before the false Excel 1900 leapday.
+            ('1900-02-28T02:11:11.986', 59.09111094906),
+
+            # Date/time after the false Excel 1900 leapday.
+            ('1900-03-01T05:46:44.068', 61.24078782403),
+
+            # Random date/times in Excel's 0-9999.9999+ range.
+            ('1982-08-25T00:15:20.213', 30188.010650613425),
+            ('2065-04-19T00:16:48.290', 60376.011670023145),
+            ('3222-06-11T03:08:08.251', 483014.13065105322),
+            ('4379-08-03T06:14:48.580', 905652.26028449077),
+            ('5949-12-30T12:59:54.263', 1479232.5416002662),
+
+            # End of Excel's date range.
+            ('9999-12-31T23:59:59.000', 2958465.999988426),
+        ]
+
+        # Convert the Excel date strings to datetime objects and compare
+        # against the dateitme return value of xldate.xldate_as_datetime().
+        for excel_date in excel_dates:
+            exp = datetime.strptime(excel_date[0], "%Y-%m-%dT%H:%M:%S.%f")
+            got = xldate.xldate_as_datetime(excel_date[1], not_1904)
+
+            self.assertEqual(got, exp)
+
+    def test_dates_only_1900_epoch(self):
+        """
+        Test the _xldate_to_datetime() function for dates in the Excel
+        standard 1900 epoch.
+
+        """
+        # Test Excel dates strings and corresponding serial date numbers taken
+        # from an Excel file.
+        excel_dates = [
+            # Excel's day 0 in the 1900 epoch is 1 day before 1900.
+            ('1899-12-31', 0),
+
+            # Excel's day 1 in the 1900 epoch.
+            ('1900-01-01', 1),
+
+            # Date/time before the false Excel 1900 leapday.
+            ('1900-02-28', 59),
+
+            # Date/time after the false Excel 1900 leapday.
+            ('1900-03-01', 61),
+
+            # Random date/times in Excel's 0-9999.9999+ range.
+            ('1902-09-27', 1001),
+            ('1999-12-31', 36525),
+            ('2000-01-01', 36526),
+            ('4000-12-31', 767376),
+            ('4321-01-01', 884254),
+            ('9999-01-01', 2958101),
+
+            # End of Excel's date range.
+            ('9999-12-31', 2958465),
+        ]
+
+        # Convert the Excel date strings to datetime objects and compare
+        # against the dateitme return value of xldate.xldate_as_datetime().
+        for excel_date in excel_dates:
+            exp = datetime.strptime(excel_date[0], "%Y-%m-%d")
+            got = xldate.xldate_as_datetime(excel_date[1], not_1904)
+
+            self.assertEqual(got, exp)
+
+    def test_dates_only_1904_epoch(self):
+        """
+        Test the _xldate_to_datetime() function for dates in the Excel
+        Mac/1904 epoch.
+
+        """
+        # Test Excel dates strings and corresponding serial date numbers taken
+        # from an Excel file.
+        excel_dates = [
+            # Excel's day 0 in the 1904 epoch.
+            ('1904-01-01', 0),
+
+            # Random date/times in Excel's 0-9999.9999+ range.
+            ('1904-01-31', 30),
+            ('1904-08-31', 243),
+            ('1999-02-28', 34757),
+            ('1999-12-31', 35063),
+            ('2000-01-01', 35064),
+            ('2400-12-31', 181526),
+            ('4000-01-01', 765549),
+            ('9999-01-01', 2956639),
+
+            # End of Excel's date range.
+            ('9999-12-31', 2957003),
+        ]
+
+        # Convert the Excel date strings to datetime objects and compare
+        # against the dateitme return value of xldate.xldate_as_datetime().
+        for excel_date in excel_dates:
+            exp = datetime.strptime(excel_date[0], "%Y-%m-%d")
+            got = xldate.xldate_as_datetime(excel_date[1], is_1904)
+
+            self.assertEqual(got, exp)
+
+    def test_times_only(self):
+        """
+        Test the _xldate_to_datetime() function for times only, i.e, the
+        fractional part of the Excel date when the serial date is 0.
+
+        """
+        # Test Excel dates strings and corresponding serial date numbers taken
+        # from an Excel file. The 1899-12-31 date is Excel's day 0.
+        excel_dates = [
+            # Random times in Excel's 0-0.9999+ range for 1 day.
+            ('1899-12-31T00:00:00.000', 0),
+            ('1899-12-31T00:15:20.213', 1.0650613425925924E-2),
+            ('1899-12-31T02:24:37.095', 0.10042934027777778),
+            ('1899-12-31T04:56:35.792', 0.2059698148148148),
+            ('1899-12-31T07:31:20.407', 0.31343063657407405),
+            ('1899-12-31T09:37:23.945', 0.40097158564814817),
+            ('1899-12-31T12:09:48.602', 0.50681252314814818),
+            ('1899-12-31T14:37:57.451', 0.60969271990740748),
+            ('1899-12-31T17:04:02.415', 0.71113906250000003),
+            ('1899-12-31T19:14:24.673', 0.80167445601851861),
+            ('1899-12-31T21:39:05.944', 0.90215212962962965),
+            ('1899-12-31T23:17:12.632', 0.97028509259259266),
+            ('1899-12-31T23:59:59.999', 0.99999998842592586),
+        ]
+
+        # Convert the Excel date strings to datetime objects and compare
+        # against the dateitme return value of xldate.xldate_as_datetime().
+        for excel_date in excel_dates:
+            exp = datetime.strptime(excel_date[0], "%Y-%m-%dT%H:%M:%S.%f")
+            got = xldate.xldate_as_datetime(excel_date[1], not_1904)
+
+            self.assertEqual(got, exp)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/xlrd-0.9.2/tests/test_xlsx_comments.py 
new/xlrd-0.9.4/tests/test_xlsx_comments.py
--- old/xlrd-0.9.2/tests/test_xlsx_comments.py  1970-01-01 01:00:00.000000000 
+0100
+++ new/xlrd-0.9.4/tests/test_xlsx_comments.py  2014-01-25 10:49:12.000000000 
+0100
@@ -0,0 +1,46 @@
+from unittest import TestCase
+
+import os
+
+from xlrd import open_workbook
+
+from .base import from_this_dir
+
+class TestXlsxComments(TestCase):
+
+    def test_excel_comments(self):
+        book = open_workbook(from_this_dir('test_comments_excel.xlsx'))
+        sheet = book.sheet_by_index(0)
+
+        note_map = sheet.cell_note_map
+        self.assertEqual(len(note_map), 1)
+        self.assertEqual(note_map[(0, 1)].text, 'hello')
+
+    def test_excel_comments_multiline(self):
+        book = open_workbook(from_this_dir('test_comments_excel.xlsx'))
+        sheet = book.sheet_by_index(1)
+
+        note_map = sheet.cell_note_map
+        self.assertEqual(note_map[(1, 2)].text, '1st line\n2nd line')
+
+    def test_excel_comments_two_t_elements(self):
+        book = open_workbook(from_this_dir('test_comments_excel.xlsx'))
+        sheet = book.sheet_by_index(2)
+
+        note_map = sheet.cell_note_map
+        self.assertEqual(note_map[(0, 0)].text, 'Author:\nTwo t elements')
+
+    def test_excel_comments_no_t_elements(self):
+        book = open_workbook(from_this_dir('test_comments_excel.xlsx'))
+        sheet = book.sheet_by_index(3)
+
+        note_map = sheet.cell_note_map
+        self.assertEqual(note_map[(0,0)].text, '')
+
+    def test_gdocs_comments(self):
+        book = open_workbook(from_this_dir('test_comments_gdocs.xlsx'))
+        sheet = book.sheet_by_index(0)
+
+        note_map = sheet.cell_note_map
+        self.assertEqual(len(note_map), 1)
+        self.assertEqual(note_map[(0, 1)].text, 'Just a test')
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/xlrd-0.9.2/tests/test_xlsx_parse.py 
new/xlrd-0.9.4/tests/test_xlsx_parse.py
--- old/xlrd-0.9.2/tests/test_xlsx_parse.py     1970-01-01 01:00:00.000000000 
+0100
+++ new/xlrd-0.9.4/tests/test_xlsx_parse.py     2015-07-15 08:11:03.000000000 
+0200
@@ -0,0 +1,39 @@
+###############################################################################
+#
+# Test the parsing of problematic xlsx files from bug reports.
+#
+
+import unittest
+import xlrd
+from .base import from_this_dir
+
+
+class TestXlsxParse(unittest.TestCase):
+    # Test parsing of problematic xlsx files. These are usually submitted
+    # as part of bug reports as noted below.
+
+    def test_for_github_issue_96(self):
+        # Test for non-Excel file with forward slash file separator and
+        # lowercase names. https://github.com/python-excel/xlrd/issues/96
+        workbook = xlrd.open_workbook(from_this_dir('apachepoi_49609.xlsx'))
+        worksheet = workbook.sheet_by_index(0)
+
+        # Test reading sample data from the worksheet.
+        cell = worksheet.cell(0, 1)
+        self.assertEqual(cell.value, 'Cycle')
+        self.assertEqual(cell.ctype, xlrd.book.XL_CELL_TEXT)
+
+        cell = worksheet.cell(1, 1)
+        self.assertEqual(cell.value, 1)
+        self.assertEqual(cell.ctype, xlrd.book.XL_CELL_NUMBER)
+
+    def test_for_github_issue_101(self):
+        # Test for non-Excel file with forward slash file separator
+        # https://github.com/python-excel/xlrd/issues/101
+        workbook = 
xlrd.open_workbook(from_this_dir('self_evaluation_report_2014-05-19.xlsx'))
+        worksheet = workbook.sheet_by_index(0)
+
+        # Test reading sample data from the worksheet.
+        cell = worksheet.cell(0, 0)
+        self.assertEqual(cell.value, 'one')
+        self.assertEqual(cell.ctype, xlrd.book.XL_CELL_TEXT)
Files old/xlrd-0.9.2/tests/text_bar.xlsx and new/xlrd-0.9.4/tests/text_bar.xlsx 
differ
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/xlrd-0.9.2/xlrd/__init__.py 
new/xlrd-0.9.4/xlrd/__init__.py
--- old/xlrd-0.9.2/xlrd/__init__.py     2013-04-05 00:36:57.000000000 +0200
+++ new/xlrd-0.9.4/xlrd/__init__.py     2015-07-15 08:11:03.000000000 +0200
@@ -399,7 +399,13 @@
             zf = zipfile.ZipFile(timemachine.BYTES_IO(file_contents))
         else:
             zf = zipfile.ZipFile(filename)
-        component_names = zf.namelist()
+
+        # Workaround for some third party files that use forward slashes and
+        # lower case names. We map the expected name in lowercase to the
+        # actual filename in the zip container.
+        component_names = dict([(name.replace('\\', '/').lower(), name)
+                                for name in zf.namelist()])
+
         if verbosity:
             logfile.write('ZIP component_names:\n')
             pprint.pprint(component_names, logfile)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/xlrd-0.9.2/xlrd/book.py new/xlrd-0.9.4/xlrd/book.py
--- old/xlrd-0.9.2/xlrd/book.py 2013-04-05 00:36:57.000000000 +0200
+++ new/xlrd-0.9.4/xlrd/book.py 2014-01-25 10:49:12.000000000 +0100
@@ -452,22 +452,20 @@
     # @return true if sheet is loaded, false otherwise
     # <br />  -- New in version 0.7.1
     def sheet_loaded(self, sheet_name_or_index):
-        # using type(1) because int won't work with Python 2.1
-        if isinstance(sheet_name_or_index, type(1)):
+        if isinstance(sheet_name_or_index, int):
             sheetx = sheet_name_or_index
         else:
             try:
                 sheetx = self._sheet_names.index(sheet_name_or_index)
             except ValueError:
                 raise XLRDError('No sheet named <%r>' % sheet_name_or_index)
-        return self._sheet_list[sheetx] and True or False # Python 2.1 again
+        return bool(self._sheet_list[sheetx])
 
     ##
     # @param sheet_name_or_index Name or index of sheet to be unloaded.
     # <br />  -- New in version 0.7.1
     def unload_sheet(self, sheet_name_or_index):
-        # using type(1) because int won't work with Python 2.1
-        if isinstance(sheet_name_or_index, type(1)):
+        if isinstance(sheet_name_or_index, int):
             sheetx = sheet_name_or_index
         else:
             try:
@@ -566,43 +564,18 @@
         self.ragged_rows = ragged_rows
 
         if not file_contents:
-            if python_version < (2, 2) and self.use_mmap:
-                # need to open for update
-                open_mode = "r+b"
-            else:
-                open_mode = "rb"
-            retry = False
-            f = None
-            try:
-                try:
-                    f = open(filename, open_mode)
-                except IOError:
-                    e, v = sys.exc_info()[:2]
-                    if open_mode == "r+b" \
-                    and (v.errno == 13 or v.strerror == "Permission denied"):
-                        # Maybe the file is read-only
-                        retry = True
-                        self.use_mmap = False
-                    else:
-                        raise
-                if retry:
-                    f = open(filename, "rb")
+            with open(filename, "rb") as f:
                 f.seek(0, 2) # EOF
                 size = f.tell()
                 f.seek(0, 0) # BOF
                 if size == 0:
                     raise XLRDError("File size is 0 bytes")
                 if self.use_mmap:
-                    if python_version < (2, 2):
-                        self.filestr = mmap.mmap(f.fileno(), size)
-                    else:
-                        self.filestr = mmap.mmap(f.fileno(), size, 
access=mmap.ACCESS_READ)
+                    self.filestr = mmap.mmap(f.fileno(), size, 
access=mmap.ACCESS_READ)
                     self.stream_len = size
                 else:
                     self.filestr = f.read()
                     self.stream_len = len(self.filestr)
-            finally:
-                if f: f.close()
         else:
             self.filestr = file_contents
             self.stream_len = len(file_contents)
@@ -800,11 +773,10 @@
             # we're well & truly stuffed -- let the punter know ASAP.
             try:
                 _unused = unicode(b'trial', self.encoding)
-            except:
-                ei = sys.exc_info()[:2]
+            except BaseException as e:
                 fprintf(self.logfile,
                     "ERROR *** codepage %r -> encoding %r -> %s: %s\n",
-                    self.codepage, self.encoding, 
ei[0].__name__.split(".")[-1], ei[1])
+                    self.codepage, self.encoding, 
type(e).__name__.split(".")[-1], e)
                 raise
         if self.raw_user_name:
             strg = unpack_string(self.user_name, 0, self.encoding, lenlen=1)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/xlrd-0.9.2/xlrd/compdoc.py 
new/xlrd-0.9.4/xlrd/compdoc.py
--- old/xlrd-0.9.2/xlrd/compdoc.py      2013-04-05 00:37:01.000000000 +0200
+++ new/xlrd-0.9.4/xlrd/compdoc.py      2014-01-25 10:49:12.000000000 +0100
@@ -15,7 +15,7 @@
 # 2007-05-07 SJM Meaningful exception instead of IndexError if a SAT (sector 
allocation table) is corrupted.
 # 2007-04-22 SJM Missing "<" in a struct.unpack call => can't open files on 
bigendian platforms.
 
-from __future__ import nested_scopes, print_function
+from __future__ import print_function
 import sys
 from struct import unpack
 from .timemachine import *
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/xlrd-0.9.2/xlrd/examples/xlrdnameAPIdemo.py 
new/xlrd-0.9.4/xlrd/examples/xlrdnameAPIdemo.py
--- old/xlrd-0.9.2/xlrd/examples/xlrdnameAPIdemo.py     2013-04-05 
00:36:57.000000000 +0200
+++ new/xlrd-0.9.4/xlrd/examples/xlrdnameAPIdemo.py     2014-01-25 
10:49:12.000000000 +0100
@@ -77,9 +77,8 @@
     if celltype == xlrd.XL_CELL_DATE:
         try:
             showval = xlrd.xldate_as_tuple(cellvalue, datemode)
-        except xlrd.XLDateError:
-            e1, e2 = sys.exc_info()[:2]
-            showval = "%s:%s" % (e1.__name__, e2)
+        except xlrd.XLDateError as e:
+            showval = "%s:%s" % (type(e).__name__, e)
     elif celltype == xlrd.XL_CELL_ERROR:
         showval = xlrd.error_text_from_code.get(
             cellvalue, '<Unknown error code 0x%02x>' % cellvalue)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/xlrd-0.9.2/xlrd/formatting.py 
new/xlrd-0.9.4/xlrd/formatting.py
--- old/xlrd-0.9.2/xlrd/formatting.py   2013-04-05 00:36:57.000000000 +0200
+++ new/xlrd-0.9.4/xlrd/formatting.py   2014-04-07 16:40:09.000000000 +0200
@@ -10,22 +10,10 @@
 
 # No part of the content of this file was derived from the works of David 
Giffin.
 
-# 2010-10-30 SJM Added space after colon in "# coding" line to work around IBM 
iSeries Python bug
-# 2009-05-31 SJM Fixed problem with non-zero reserved bits in some STYLE 
records in Mac Excel files
-# 2008-08-03 SJM Ignore PALETTE record when Book.formatting_info is false
-# 2008-08-03 SJM Tolerate up to 4 bytes trailing junk on PALETTE record
-# 2008-05-10 SJM Do some XF checks only when Book.formatting_info is true
-# 2008-02-08 SJM Preparation for Excel 2.0 support
-# 2008-02-03 SJM Another tweak to is_date_format_string()
-# 2007-12-04 SJM Added support for Excel 2.x (BIFF2) files.
-# 2007-10-13 SJM Warning: style XF whose parent XF index != 0xFFF
-# 2007-09-08 SJM Work around corrupt STYLE record
-# 2007-07-11 SJM Allow for BIFF2/3-style FORMAT record in BIFF4/8 file
-
 from __future__ import print_function
 
 DEBUG = 0
-import copy, re
+import re
 from struct import unpack
 from .timemachine import *
 from .biffh import BaseObject, unpack_unicode, unpack_string, \
@@ -279,7 +267,7 @@
     if bv >= 50:
         (
             f.height, option_flags, f.colour_index, f.weight,
-            f.escapement_type, f.underline_type, f.family,
+            f.escapement, f.underline_type, f.family,
             f.character_set,
         ) = unpack('<HHHHHBBB', data[0:13])
         f.bold = option_flags & 1
@@ -303,7 +291,7 @@
         f.name = unpack_string(data, 6, book.encoding, lenlen=1)
         # Now cook up the remaining attributes ...
         f.weight = [400, 700][f.bold]
-        f.escapement_type = 0 # None
+        f.escapement = 0 # None
         f.underline_type = f.underlined # None or Single
         f.family = 0 # Unknown / don't care
         f.character_set = 1 # System default (0 means "ANSI Latin")
@@ -319,7 +307,7 @@
         f.name = unpack_string(data, 4, book.encoding, lenlen=1)
         # Now cook up the remaining attributes ...
         f.weight = [400, 700][f.bold]
-        f.escapement_type = 0 # None
+        f.escapement = 0 # None
         f.underline_type = f.underlined # None or Single
         f.family = 0 # Unknown / don't care
         f.character_set = 1 # System default (0 means "ANSI Latin")
@@ -622,7 +610,7 @@
             book.colour_indexes_used[cx] = 1
         elif book.verbosity:
             print("Size of colour table:", len(book.colour_map), 
file=book.logfile)
-            fprintf(self.logfile, "*** Font #%d (%r): colour index 0x%04x is 
unknown\n",
+            fprintf(book.logfile, "*** Font #%d (%r): colour index 0x%04x is 
unknown\n",
                 font.font_index, font.name, cx)
     if book.verbosity >= 1:
         used = sorted(book.colour_indexes_used.keys())
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/xlrd-0.9.2/xlrd/info.py new/xlrd-0.9.4/xlrd/info.py
--- old/xlrd-0.9.2/xlrd/info.py 2013-04-09 21:29:41.000000000 +0200
+++ new/xlrd-0.9.4/xlrd/info.py 2015-07-15 08:20:47.000000000 +0200
@@ -1 +1 @@
-__VERSION__ = "0.9.2"
+__VERSION__ = "0.9.4"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/xlrd-0.9.2/xlrd/sheet.py new/xlrd-0.9.4/xlrd/sheet.py
--- old/xlrd-0.9.2/xlrd/sheet.py        2013-04-05 00:37:29.000000000 +0200
+++ new/xlrd-0.9.4/xlrd/sheet.py        2015-07-15 08:11:03.000000000 +0200
@@ -458,6 +458,11 @@
             ]
 
     ##
+    # Returns a generator for iterating through each row.
+    def get_rows(self):
+        return (self.row(index) for index in range(self.nrows))
+
+    ##
     # Returns a slice of the types
     # of the cells in the given row.
     def row_types(self, rowx, start_colx=0, end_colx=None):
@@ -1046,6 +1051,9 @@
                     self_put_cell(rowx, colx, XL_CELL_BLANK, '', result[pos])
                     pos += 1
             elif rc == XL_DIMENSION or rc == XL_DIMENSION2:
+                if data_len == 0:
+                    # Four zero bytes after some other record. See github 
issue 64.
+                    continue
                 # if data_len == 10:
                 # Was crashing on BIFF 4.0 file w/o the two trailing unused 
bytes.
                 # Reported by Ralph Heimburger.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/xlrd-0.9.2/xlrd/xldate.py 
new/xlrd-0.9.4/xlrd/xldate.py
--- old/xlrd-0.9.2/xlrd/xldate.py       2013-04-05 00:37:01.000000000 +0200
+++ new/xlrd-0.9.4/xlrd/xldate.py       2014-04-07 16:40:04.000000000 +0200
@@ -17,10 +17,16 @@
 # More importantly:
 #    Noon on Gregorian 1900-03-01 (day 61 in the 1900-based system) is JDN 
2415080.0
 #    Noon on Gregorian 1904-01-02 (day  1 in the 1904-based system) is JDN 
2416482.0
+import datetime
 
 _JDN_delta = (2415080 - 61, 2416482 - 1)
 assert _JDN_delta[1] - _JDN_delta[0] == 1462
 
+# Pre-calculate the datetime epochs for efficiency.
+epoch_1904 = datetime.datetime(1904, 1, 1)
+epoch_1900 = datetime.datetime(1899, 12, 31)
+epoch_1900_minus_1 = datetime.datetime(1899, 12, 30)
+
 class XLDateError(ValueError): pass
 
 class XLDateNegative(XLDateError): pass
@@ -90,6 +96,40 @@
     else:
         return ((yreg // 1461) - 4716, mp + 3, d, hour, minute, second)
 
+
+##
+# Convert an Excel date/time number into a datetime.datetime object.
+#
+# @param xldate The Excel number
+# @param datemode 0: 1900-based, 1: 1904-based.
+#
+# @return a datetime.datetime() object.
+#
+def xldate_as_datetime(xldate, datemode):
+    """Convert an Excel date/time number into a datetime.datetime object."""
+
+    # Set the epoch based on the 1900/1904 datemode.
+    if datemode:
+        epoch = epoch_1904
+    else:
+        if xldate < 60:
+            epoch = epoch_1900
+        else:
+            # Workaround Excel 1900 leap year bug by adjusting the epoch.
+            epoch = epoch_1900_minus_1
+
+    # The integer part of the Excel date stores the number of days since
+    # the epoch and the fractional part stores the percentage of the day.
+    days = int(xldate)
+    fraction = xldate - days
+
+    # Get the the integer and decimal seconds in Excel's millisecond 
resolution.
+    seconds = int(round(fraction * 86400000.0))
+    seconds, milliseconds = divmod(seconds, 1000)
+
+    return epoch + datetime.timedelta(days, seconds, 0, milliseconds)
+
+
 # === conversions from date/time to xl numbers
 
 def _leap(y):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/xlrd-0.9.2/xlrd/xlsx.py new/xlrd-0.9.4/xlrd/xlsx.py
--- old/xlrd-0.9.2/xlrd/xlsx.py 2013-04-09 20:58:44.000000000 +0200
+++ new/xlrd-0.9.4/xlrd/xlsx.py 2015-07-15 08:11:03.000000000 +0200
@@ -19,9 +19,10 @@
 
 ET = None
 ET_has_iterparse = False
+Element_has_iter = False
 
 def ensure_elementtree_imported(verbosity, logfile):
-    global ET, ET_has_iterparse
+    global ET, ET_has_iterparse, Element_has_iter
     if ET is not None:
         return
     if "IronPython" in sys.version:
@@ -47,6 +48,7 @@
             ET_has_iterparse = True
         except NotImplementedError:
             pass
+    Element_has_iter = hasattr(ET.ElementTree, 'iter')
     if verbosity:
         etree_version = repr([
             (item, getattr(ET, item))
@@ -225,14 +227,15 @@
                 if bk.verbosity:
                     print(msg, file=bk.logfile)
         name_and_scope_map[key] = nobj
+        sort_data = (nobj.scope, namex, nobj)
         if name_lcase in name_map:
-            name_map[name_lcase].append((nobj.scope, nobj))
+            name_map[name_lcase].append(sort_data)
         else:
-            name_map[name_lcase] = [(nobj.scope, nobj)]
+            name_map[name_lcase] = [sort_data]
     for key in name_map.keys():
         alist = name_map[key]
         alist.sort()
-        name_map[key] = [x[1] for x in alist]
+        name_map[key] = [x[2] for x in alist]
     bk.name_and_scope_map = name_and_scope_map
     bk.name_map = name_map
 
@@ -243,7 +246,7 @@
             fprintf(self.logfile, "\n=== %s ===\n", heading)
         self.tree = ET.parse(stream)
         getmethod = self.tag2meth.get
-        for elem in self.tree.getiterator():
+        for elem in self.tree.iter() if Element_has_iter else 
self.tree.getiterator():
             if self.verbosity >= 3:
                 self.dump_elem(elem)
             meth = getmethod(elem.tag)
@@ -289,7 +292,7 @@
         self.tree = ET.parse(stream)
         getmenu = self.core_props_menu.get
         props = {}
-        for elem in self.tree.getiterator():
+        for elem in self.tree.iter() if Element_has_iter else 
self.tree.getiterator():
             if self.verbosity >= 3:
                 self.dump_elem(elem)
             menu = getmenu(elem.tag)
@@ -363,7 +366,14 @@
             if self.verbosity >= 2:
                 self.dumpout('Ignoring sheet of type %r (name=%r)', reltype, 
name)
             return
-        bk._sheet_visibility.append(True)
+        state = elem.get('state')
+        visibility_map = {
+            None: 0,
+            'visible': 0,
+            'hidden': 1,
+            'veryHidden': 2
+            }
+        bk._sheet_visibility.append(visibility_map[state])
         sheet = Sheet(bk, position=None, name=name, number=sheetx)
         sheet.utter_max_rows = X12_MAX_ROWS
         sheet.utter_max_cols = X12_MAX_COLS
@@ -504,6 +514,7 @@
         self.rowx = -1 # We may need to count them.
         self.bk = sheet.book
         self.sst = self.bk._sharedstrings
+        self.merged_cells = sheet.merged_cells
         self.warned_no_cell_name = 0
         self.warned_no_row_num = 0
         if ET_has_iterparse:
@@ -521,8 +532,34 @@
                 elem.clear() # destroy all child elements (cells)
             elif elem.tag == U_SSML12 + "dimension":
                 self.do_dimension(elem)
+            elif elem.tag == U_SSML12 + "mergeCell":
+                self.do_merge_cell(elem)
         self.finish_off()
-        
+
+    def process_comments_stream(self, stream):
+        root = ET.parse(stream).getroot()
+        author_list = root[0]
+        assert author_list.tag == U_SSML12 + 'authors'
+        authors = [elem.text for elem in author_list]
+        comment_list = root[1]
+        assert comment_list.tag == U_SSML12 + 'commentList'
+        cell_note_map = self.sheet.cell_note_map
+        from .sheet import Note
+        text_tag = U_SSML12 + 'text'
+        r_tag = U_SSML12 + 'r'
+        t_tag = U_SSML12 + 't'
+        for elem in comment_list.findall(U_SSML12 + 'comment'):
+            ts = elem.findall('./' + text_tag + '/' + t_tag)
+            ts += elem.findall('./' + text_tag + '/' + r_tag + '/' + t_tag)
+            ref = elem.get('ref')
+            note = Note()
+            note.author = authors[int(elem.get('authorId'))]
+            note.rowx, note.colx = coords = cell_name_to_rowx_colx(ref)
+            note.text = ''
+            for t in ts:
+                note.text += cooked_text(self, t)
+            cell_note_map[coords] = note
+
     def do_dimension(self, elem):
         ref = elem.get('ref') # example: "A1:Z99" or just "A1"
         if ref:
@@ -532,6 +569,16 @@
             self.sheet._dimnrows = rowx + 1
             self.sheet._dimncols = colx + 1
 
+    def do_merge_cell(self, elem):
+        # The ref attribute should be a cell range like "B1:D5".
+        ref = elem.get('ref')
+        if ref:
+            first_cell_ref, last_cell_ref = ref.split(':')
+            first_rowx, first_colx = cell_name_to_rowx_colx(first_cell_ref)
+            last_rowx, last_colx = cell_name_to_rowx_colx(last_cell_ref)
+            self.merged_cells.append((first_rowx, last_rowx + 1,
+                                      first_colx, last_colx + 1))
+
     def do_row(self, row_elem):
     
         def bad_child_tag(child_tag):
@@ -569,6 +616,8 @@
                 try:
                     for c in cell_name:
                         charx += 1
+                        if c == '$':
+                            continue
                         lv = letter_value[c]
                         if lv:
                             colx = colx * 26 + lv
@@ -683,15 +732,6 @@
         }
     augment_keys(tag2meth, U_SSML12)
 
-def getzflo(zipfile, member_path):
-    # GET a Zipfile File-Like Object for passing to
-    # an XML parser
-    try:
-        return zipfile.open(member_path) # CPython 2.6 onwards
-    except AttributeError:
-        # old way
-        return BYTES_IO(zipfile.read(member_path))
-
 def open_workbook_2007_xml(
     zf,
     component_names,
@@ -718,41 +758,47 @@
     bk.ragged_rows = ragged_rows
 
     x12book = X12Book(bk, logfile, verbosity)
-    zflo = getzflo(zf, 'xl/_rels/workbook.xml.rels')
+    zflo = zf.open(component_names['xl/_rels/workbook.xml.rels'])
     x12book.process_rels(zflo)
     del zflo
-    zflo = getzflo(zf, 'xl/workbook.xml')
+    zflo = zf.open(component_names['xl/workbook.xml'])
     x12book.process_stream(zflo, 'Workbook')
     del zflo
-    props_name = 'docProps/core.xml'
+    props_name = 'docprops/core.xml'
     if props_name in component_names:
-        zflo = getzflo(zf, props_name)
+        zflo = zf.open(component_names[props_name])
         x12book.process_coreprops(zflo)
 
     x12sty = X12Styles(bk, logfile, verbosity)
     if 'xl/styles.xml' in component_names:
-        zflo = getzflo(zf, 'xl/styles.xml')
+        zflo = zf.open(component_names['xl/styles.xml'])
         x12sty.process_stream(zflo, 'styles')
         del zflo
     else:
         # seen in MS sample file MergedCells.xlsx
         pass
 
-    sst_fname = 'xl/sharedStrings.xml'
+    sst_fname = 'xl/sharedstrings.xml'
     x12sst = X12SST(bk, logfile, verbosity)
     if sst_fname in component_names:
-        zflo = getzflo(zf, sst_fname)
+        zflo = zf.open(component_names[sst_fname])
         x12sst.process_stream(zflo, 'SST')
         del zflo
 
     for sheetx in range(bk.nsheets):
         fname = x12book.sheet_targets[sheetx]
-        zflo = getzflo(zf, fname)
+        zflo = zf.open(component_names[fname])
         sheet = bk._sheet_list[sheetx]
         x12sheet = X12Sheet(sheet, logfile, verbosity)
         heading = "Sheet %r (sheetx=%d) from %r" % (sheet.name, sheetx, fname)
         x12sheet.process_stream(zflo, heading)
         del zflo
+        comments_fname = 'xl/comments%d.xml' % (sheetx + 1)
+        if comments_fname in component_names:
+            comments_stream = zf.open(component_names[comments_fname])
+            x12sheet.process_comments_stream(comments_stream)
+            del comments_stream
+
         sheet.tidy_dimensions()
 
     return bk


Reply via email to