Hello community,

here is the log from the commit of package python-pydocumentdb for 
openSUSE:Factory checked in at 2018-09-26 16:16:16
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/python-pydocumentdb (Old)
 and      /work/SRC/openSUSE:Factory/.python-pydocumentdb.new (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "python-pydocumentdb"

Wed Sep 26 16:16:16 2018 rev:2 rq:638009 version:2.3.2

Changes:
--------
--- /work/SRC/openSUSE:Factory/python-pydocumentdb/python-pydocumentdb.changes  
2018-05-13 16:05:16.625867742 +0200
+++ 
/work/SRC/openSUSE:Factory/.python-pydocumentdb.new/python-pydocumentdb.changes 
    2018-09-26 16:16:17.491001770 +0200
@@ -1,0 +2,7 @@
+Thu Sep  6 12:57:59 UTC 2018 - John Paul Adrian Glaubitz 
<adrian.glaub...@suse.com>
+
+- New upstream release
+  + Version 2.3.2
+  + No upstream changelog provided
+
+-------------------------------------------------------------------

Old:
----
  pydocumentdb-2.3.1.tar.gz

New:
----
  pydocumentdb-2.3.2.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ python-pydocumentdb.spec ++++++
--- /var/tmp/diff_new_pack.JjqgS5/_old  2018-09-26 16:16:18.051000843 +0200
+++ /var/tmp/diff_new_pack.JjqgS5/_new  2018-09-26 16:16:18.051000843 +0200
@@ -18,7 +18,7 @@
 
 %{?!python_module:%define python_module() python-%{**} python3-%{**}}
 Name:           python-pydocumentdb
-Version:        2.3.1
+Version:        2.3.2
 Release:        0
 Summary:        Azure DocumentDB Python SDK
 License:        MIT

++++++ pydocumentdb-2.3.1.tar.gz -> pydocumentdb-2.3.2.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/pydocumentdb-2.3.1/PKG-INFO 
new/pydocumentdb-2.3.2/PKG-INFO
--- old/pydocumentdb-2.3.1/PKG-INFO     2017-12-22 01:45:45.000000000 +0100
+++ new/pydocumentdb-2.3.2/PKG-INFO     2018-05-08 22:39:48.000000000 +0200
@@ -1,6 +1,6 @@
 Metadata-Version: 1.1
 Name: pydocumentdb
-Version: 2.3.1
+Version: 2.3.2
 Summary: Azure DocumentDB Python SDK
 Home-page: https://github.com/Azure/azure-documentdb-python
 Author: Microsoft
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/pydocumentdb-2.3.1/README.md 
new/pydocumentdb-2.3.2/README.md
--- old/pydocumentdb-2.3.1/README.md    2017-12-22 00:52:28.000000000 +0100
+++ new/pydocumentdb-2.3.2/README.md    2018-05-08 00:09:38.000000000 +0200
@@ -1,19 +1,21 @@
-This is the README of the Python driver for Microsoft Azure DocumentDB 
database service.
+# Microsoft Azure Cosmos DB Python SDK
 
-Welcome to DocumentDB.
+Welcome to the repo containing all things Python for the Azure Cosmos DB API 
which is published with name 
[pydocumentdb](https://pypi.python.org/pypi/pydocumentdb/). For documentation 
please see the Microsoft Azure 
[link](https://docs.microsoft.com/en-us/azure/cosmos-db/sql-api-sdk-python).
 
 
-0) Pre-requirements:
+## Pre-requirements
 
-    Python 2.7, Python 3.3, Python 3.4, or Python 3.5
-    https://www.python.org/downloads/
+Python 2.7, Python 3.3, Python 3.4, or Python 3.5
+https://www.python.org/downloads/
 
-    If you use Microsoft Visual Studio as IDE (we use 2015), please install the
-    following extension for Python.
-    http://microsoft.github.io/PTVS/
+If you use Microsoft Visual Studio as IDE (we use 2015), please install the
+following extension for Python.
+http://microsoft.github.io/PTVS/
 
+Install Cosmos DB emulator
+Follow instruction at 
https://docs.microsoft.com/en-us/azure/cosmos-db/local-emulator 
 
-1) Installation:
+## Installation:
 
     $ python setup.py install
 
@@ -22,7 +24,12 @@
     $ pip install pydocumentdb
 
 
-2) Testing:
+## Running Testing
+Clone the repo 
+```bash
+git clone https://github.com/Azure/azure-documentdb-python.git
+cd azure-documentdb-python
+```
 
 Most of the test files under test sub-folder require you to enter your Azure 
DocumentDB master key and host endpoint: 
     
@@ -40,7 +47,7 @@
 Most of the test cases create collections in your DocumentDB account. 
Collections are billing entities. By running these test cases, you may incur 
monetary costs on your account.
   
 
-3) To generate documentations:
+## Documentation generation
 
     Install Sphinx: http://sphinx-doc.org/install.html
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/pydocumentdb-2.3.1/doc/conf.py 
new/pydocumentdb-2.3.2/doc/conf.py
--- old/pydocumentdb-2.3.1/doc/conf.py  2017-12-22 00:52:28.000000000 +0100
+++ new/pydocumentdb-2.3.2/doc/conf.py  2018-05-08 00:13:56.000000000 +0200
@@ -52,9 +52,9 @@
 # built documents.
 #
 # The short X.Y version.
-version = '2.3.1'
+version = '2.3.2'
 # The full version, including alpha/beta/rc tags.
-release = '2.3.1'
+release = '2.3.2'
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/pydocumentdb-2.3.1/pydocumentdb/base.py 
new/pydocumentdb-2.3.2/pydocumentdb/base.py
--- old/pydocumentdb-2.3.1/pydocumentdb/base.py 2017-12-22 00:52:28.000000000 
+0100
+++ new/pydocumentdb-2.3.2/pydocumentdb/base.py 2018-05-08 00:09:38.000000000 
+0200
@@ -150,6 +150,9 @@
     if options.get('enableCrossPartitionQuery'):
         headers[http_constants.HttpHeaders.EnableCrossPartitionQuery] = 
options['enableCrossPartitionQuery']
 
+    if options.get('populateQueryMetrics'):
+        headers[http_constants.HttpHeaders.PopulateQueryMetrics] = 
options['populateQueryMetrics']
+
     if document_client.master_key:
         headers[http_constants.HttpHeaders.XDate] = (
             datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT'))
@@ -577,4 +580,4 @@
 
     return tokens
 
-    
\ No newline at end of file
+    
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/pydocumentdb-2.3.1/pydocumentdb/default_retry_policy.py 
new/pydocumentdb-2.3.2/pydocumentdb/default_retry_policy.py
--- old/pydocumentdb-2.3.1/pydocumentdb/default_retry_policy.py 1970-01-01 
01:00:00.000000000 +0100
+++ new/pydocumentdb-2.3.2/pydocumentdb/default_retry_policy.py 2018-05-08 
00:09:38.000000000 +0200
@@ -0,0 +1,74 @@
+#The MIT License (MIT)
+#Copyright (c) 2017 Microsoft Corporation
+
+#Permission is hereby granted, free of charge, to any person obtaining a copy
+#of this software and associated documentation files (the "Software"), to deal
+#in the Software without restriction, including without limitation the rights
+#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+#copies of the Software, and to permit persons to whom the Software is
+#furnished to do so, subject to the following conditions:
+
+#The above copyright notice and this permission notice shall be included in all
+#copies or substantial portions of the Software.
+
+#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+#SOFTWARE.
+
+"""Internal class for connection reset retry policy implementation in the 
Azure Cosmos DB database service.
+"""
+import pydocumentdb.http_constants as http_constants
+
+class _DefaultRetryPolicy(object):
+
+    error_codes = http_constants._ErrorCodes;
+    CONNECTION_ERROR_CODES = [
+            error_codes.WindowsInterruptedFunctionCall,
+            error_codes.WindowsFileHandleNotValid,
+            error_codes.WindowsPermissionDenied,
+            error_codes.WindowsBadAddress,
+            error_codes.WindowsInvalidArgumnet,
+            error_codes.WindowsResourceTemporarilyUnavailable,
+            error_codes.WindowsOperationNowInProgress,
+            error_codes.WindowsAddressAlreadyInUse,
+            error_codes.WindowsConnectionResetByPeer,
+            error_codes.WindowsCannotSendAfterSocketShutdown,
+            error_codes.WindowsConnectionTimedOut,
+            error_codes.WindowsConnectionRefused,
+            error_codes.WindowsNameTooLong,
+            error_codes.WindowsHostIsDown,
+            error_codes.WindowsNoRouteTohost,
+            error_codes.LinuxConnectionReset
+        ]
+
+    def __init__(self, *args):
+        self._max_retry_attempt_count = 10
+        self.current_retry_attempt_count = 0
+        self.retry_after_in_milliseconds = 1000
+        self.args = args
+
+    def needsRetry(self, error_code):
+        if error_code in _DefaultRetryPolicy.CONNECTION_ERROR_CODES:
+            if (len(self.args) > 0):
+                if (self.args[3]['method'] == 'GET') or 
(http_constants.HttpHeaders.IsQuery in self.args[3]['headers']):
+                    return True
+                return False
+            return True
+
+    def ShouldRetry(self, exception):
+        """Returns true if should retry based on the passed-in exception.
+
+        :param (errors.HTTPFailure instance) exception:
+
+        :rtype:
+            boolean
+
+        """
+        if (self.current_retry_attempt_count < self._max_retry_attempt_count) 
and self.needsRetry(exception.status_code):
+            self.current_retry_attempt_count += 1
+            return True
+        return False
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/pydocumentdb-2.3.1/pydocumentdb/endpoint_discovery_retry_policy.py 
new/pydocumentdb-2.3.2/pydocumentdb/endpoint_discovery_retry_policy.py
--- old/pydocumentdb-2.3.1/pydocumentdb/endpoint_discovery_retry_policy.py      
2017-12-22 00:52:28.000000000 +0100
+++ new/pydocumentdb-2.3.2/pydocumentdb/endpoint_discovery_retry_policy.py      
2018-05-08 00:09:38.000000000 +0200
@@ -24,6 +24,14 @@
 
 import logging
 
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.INFO)
+log_formatter = logging.Formatter('%(levelname)s:%(message)s')
+log_handler = logging.StreamHandler()
+log_handler.setFormatter(log_formatter)
+logger.addHandler(log_handler)
+
+
 class _EndpointDiscoveryRetryPolicy(object):
     """The endpoint discovery retry policy class used for geo-replicated 
database accounts
        to handle the write forbidden exceptions due to writable/readable 
location changes
@@ -32,15 +40,12 @@
 
     Max_retry_attempt_count = 120
     Retry_after_in_milliseconds = 1000
-    FORBIDDEN_STATUS_CODE = 403
-    WRITE_FORBIDDEN_SUB_STATUS_CODE = 3
 
     def __init__(self, global_endpoint_manager):
         self.global_endpoint_manager = global_endpoint_manager
         self._max_retry_attempt_count = 
_EndpointDiscoveryRetryPolicy.Max_retry_attempt_count
         self.current_retry_attempt_count = 0
         self.retry_after_in_milliseconds = 
_EndpointDiscoveryRetryPolicy.Retry_after_in_milliseconds
-        logging.basicConfig(format='%(levelname)s:%(message)s', 
level=logging.INFO)
 
     def ShouldRetry(self, exception):
         """Returns true if should retry based on the passed-in exception.
@@ -53,7 +58,7 @@
         """
         if self.current_retry_attempt_count < self._max_retry_attempt_count 
and self.global_endpoint_manager.EnableEndpointDiscovery:
             self.current_retry_attempt_count += 1
-            logging.info('Write location was changed, refreshing the locations 
list from database account and will retry the request.')
+            logger.info('Write location was changed, refreshing the locations 
list from database account and will retry the request.')
 
             # Refresh the endpoint list to refresh the new writable and 
readable locations
             self.global_endpoint_manager.RefreshEndpointList()
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/pydocumentdb-2.3.1/pydocumentdb/execution_context/execution_dispatcher.py 
new/pydocumentdb-2.3.2/pydocumentdb/execution_context/execution_dispatcher.py
--- 
old/pydocumentdb-2.3.1/pydocumentdb/execution_context/execution_dispatcher.py   
    2017-12-22 00:52:28.000000000 +0100
+++ 
new/pydocumentdb-2.3.2/pydocumentdb/execution_context/execution_dispatcher.py   
    2018-05-08 00:09:38.000000000 +0200
@@ -30,6 +30,7 @@
 from pydocumentdb.execution_context.query_execution_info import 
_PartitionedQueryExecutionInfo
 from pydocumentdb.execution_context import endpoint_component
 from pydocumentdb.execution_context import multi_execution_aggregator
+from pydocumentdb.http_constants import StatusCodes, SubStatusCodes
 
 class _ProxyQueryExecutionContext(_QueryExecutionContextBase):
     '''
@@ -92,7 +93,7 @@
         return self._execution_context.fetch_next_block()        
         
     def _is_partitioned_execution_info(self, e):    
-        return e.status_code == 400 and e.sub_status == 1004
+        return e.status_code == StatusCodes.BAD_REQUEST and e.sub_status == 
SubStatusCodes.CROSS_PARTITION_QUERY_NOT_SERVABLE
     
     def _get_partitioned_execution_info(self, e):
         error_msg = json.loads(e._http_error_message)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/pydocumentdb-2.3.1/pydocumentdb/http_constants.py 
new/pydocumentdb-2.3.2/pydocumentdb/http_constants.py
--- old/pydocumentdb-2.3.1/pydocumentdb/http_constants.py       2017-12-22 
00:52:28.000000000 +0100
+++ new/pydocumentdb-2.3.2/pydocumentdb/http_constants.py       2018-05-08 
00:14:30.000000000 +0200
@@ -112,6 +112,7 @@
     SubStatus = 'x-ms-substatus'
     AlternateContentPath = 'x-ms-alt-content-path'
     IsContinuationExpected = "x-ms-documentdb-query-iscontinuationexpected"
+    PopulateQueryMetrics = "x-ms-documentdb-populatequerymetrics"
 
     # Quota Info
     MaxEntityCount = 'x-ms-root-entity-max-count'
@@ -245,7 +246,7 @@
     """
     CurrentVersion = '2017-11-15'
     SDKName = 'documentdb-python-sdk'
-    SDKVersion = '2.3.1'
+    SDKVersion = '2.3.2'
 
 
 class Delimiters:
@@ -266,3 +267,95 @@
     """Constants of http context properties.
     """
     SubscriptionId = 'SubscriptionId'
+
+
+class _ErrorCodes:
+    """Windows Socket Error Codes
+    """
+    WindowsInterruptedFunctionCall = 10004;
+    WindowsFileHandleNotValid = 10009;
+    WindowsPermissionDenied = 10013;
+    WindowsBadAddress = 10014;
+    WindowsInvalidArgumnet = 10022;
+    WindowsResourceTemporarilyUnavailable = 10035;
+    WindowsOperationNowInProgress = 10036;
+    WindowsAddressAlreadyInUse = 10048;
+    WindowsConnectionResetByPeer = 10054;
+    WindowsCannotSendAfterSocketShutdown = 10058;
+    WindowsConnectionTimedOut = 10060;
+    WindowsConnectionRefused = 10061;
+    WindowsNameTooLong = 10063;
+    WindowsHostIsDown = 10064;
+    WindowsNoRouteTohost = 10065;
+
+    """Linux Error Codes
+    """
+    LinuxConnectionReset = 131;
+
+
+class StatusCodes:
+    """HTTP status codes returned by the REST operations
+    """
+    # Success
+    OK = 200
+    CREATED = 201
+    ACCEPTED = 202
+    NO_CONTENT = 204
+
+    NOT_MODIFIED = 304
+
+    # Client Error
+    BAD_REQUEST = 400
+    UNAUTHORIZED = 401
+    FORBIDDEN = 403
+    NOT_FOUND = 404
+    METHOD_NOT_ALLOWED = 405
+    REQUEST_TIMEOUT = 408
+    CONFLICT = 409
+    GONE = 410
+    PRECONDITION_FAILED = 412
+    REQUEST_ENTITY_TOO_LARGE = 413
+    TOO_MANY_REQUESTS = 429
+    RETRY_WITH = 449
+
+    INTERNAL_SERVER_ERROR = 500
+    SERVICE_UNAVAILABLE = 503
+
+    # Operation pause and cancel. These are FAKE status codes for QOS logging 
purpose only.
+    OPERATION_PAUSED = 1200
+    OPERATION_CANCELLED = 1201
+
+
+class SubStatusCodes:
+    """Sub status codes returned by the REST operations specifying the details 
of the operation
+    """
+    UNKNOWN = 0
+
+    # 400: Bad Request Substatus 
+    PARTITION_KEY_MISMATCH = 1001
+    CROSS_PARTITION_QUERY_NOT_SERVABLE = 1004
+
+    # 410: StatusCodeType_Gone: substatus 
+    NAME_CACHE_IS_STALE = 1000
+    PARTITION_KEY_RANGE_GONE = 1002
+    COMPLETING_SPLIT = 1007
+    COMPLETING_PARTITION_MIGRATION = 1008
+        
+    # 403: Forbidden Substatus.
+    WRITE_FORBIDDEN = 3
+    PROVISION_LIMIT_REACHED = 1005
+    DATABASE_ACCOUNT_NOT_FOUND = 1008
+    REDUNDANT_COLLECTION_PUT = 1009
+    SHARED_THROUGHPUT_DATABASE_QUOTA_EXCEEDED = 1010
+    SHARED_THROUGHPUT_OFFER_GROW_NOT_NEEDED = 1011
+
+    # 404: LSN in session token is higher
+    READ_SESSION_NOTAVAILABLE = 1002
+    OWNER_RESOURCE_NOT_FOUND = 1003
+
+    # 409: Conflict exception
+    CONFLICT_WITH_CONTROL_PLANE = 1006
+
+    # 503: Service Unavailable due to region being out of capacity for 
bindable partitions
+    INSUFFICIENT_BINDABLE_PARTITIONS = 1007
+
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/pydocumentdb-2.3.1/pydocumentdb/resource_throttle_retry_policy.py 
new/pydocumentdb-2.3.2/pydocumentdb/resource_throttle_retry_policy.py
--- old/pydocumentdb-2.3.1/pydocumentdb/resource_throttle_retry_policy.py       
2017-12-22 00:52:28.000000000 +0100
+++ new/pydocumentdb-2.3.2/pydocumentdb/resource_throttle_retry_policy.py       
2018-05-08 00:09:38.000000000 +0200
@@ -25,7 +25,6 @@
 import pydocumentdb.http_constants as http_constants
 
 class _ResourceThrottleRetryPolicy(object):
-    THROTTLE_STATUS_CODE = 429
 
     def __init__(self, max_retry_attempt_count, 
fixed_retry_interval_in_milliseconds, max_wait_time_in_seconds):
         self._max_retry_attempt_count = max_retry_attempt_count
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/pydocumentdb-2.3.1/pydocumentdb/retry_options.py 
new/pydocumentdb-2.3.2/pydocumentdb/retry_options.py
--- old/pydocumentdb-2.3.1/pydocumentdb/retry_options.py        2017-12-22 
00:52:28.000000000 +0100
+++ new/pydocumentdb-2.3.2/pydocumentdb/retry_options.py        2018-05-08 
00:09:38.000000000 +0200
@@ -29,7 +29,7 @@
         Max number of retries to be performed for a request. Default value 9.
     :ivar int FixedRetryIntervalInMilliseconds:
         Fixed retry interval in milliseconds to wait between each retry 
ignoring the retryAfter returned as part of the response.
-    :ivar int MaxRetryAttemptCount:
+    :ivar int MaxWaitTimeInSeconds:
         Max wait time in seconds to wait for a request while the retries are 
happening. Default value 30 seconds.
     """
     def __init__(self, max_retry_attempt_count = 9, 
fixed_retry_interval_in_milliseconds = None, max_wait_time_in_seconds = 30):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/pydocumentdb-2.3.1/pydocumentdb/retry_utility.py 
new/pydocumentdb-2.3.2/pydocumentdb/retry_utility.py
--- old/pydocumentdb-2.3.1/pydocumentdb/retry_utility.py        2017-12-22 
00:52:28.000000000 +0100
+++ new/pydocumentdb-2.3.2/pydocumentdb/retry_utility.py        2018-05-08 
00:09:38.000000000 +0200
@@ -27,7 +27,8 @@
 import pydocumentdb.errors as errors
 import pydocumentdb.endpoint_discovery_retry_policy as 
endpoint_discovery_retry_policy
 import pydocumentdb.resource_throttle_retry_policy as 
resource_throttle_retry_policy
-import pydocumentdb.http_constants as http_constants
+import pydocumentdb.default_retry_policy as default_retry_policy
+from pydocumentdb.http_constants import HttpHeaders, StatusCodes, 
SubStatusCodes
 
 def _Execute(client, global_endpoint_manager, function, *args, **kwargs):
     """Exectutes the function with passed parameters applying all retry 
policies
@@ -48,6 +49,7 @@
     resourceThrottle_retry_policy = 
resource_throttle_retry_policy._ResourceThrottleRetryPolicy(client.connection_policy.RetryOptions.MaxRetryAttemptCount,
 
                                                                                
                 
client.connection_policy.RetryOptions.FixedRetryIntervalInMilliseconds, 
                                                                                
                 client.connection_policy.RetryOptions.MaxWaitTimeInSeconds)
+    defaultRetry_policy = default_retry_policy._DefaultRetryPolicy(*args)
 
     while True:
         try:
@@ -57,26 +59,28 @@
                 client.last_response_headers = {}
             
             # setting the throttle related response headers before returning 
the result
-            
client.last_response_headers[http_constants.HttpHeaders.ThrottleRetryCount] = 
resourceThrottle_retry_policy.current_retry_attempt_count
-            
client.last_response_headers[http_constants.HttpHeaders.ThrottleRetryWaitTimeInMs]
 = resourceThrottle_retry_policy.cummulative_wait_time_in_milliseconds
+            client.last_response_headers[HttpHeaders.ThrottleRetryCount] = 
resourceThrottle_retry_policy.current_retry_attempt_count
+            
client.last_response_headers[HttpHeaders.ThrottleRetryWaitTimeInMs] = 
resourceThrottle_retry_policy.cummulative_wait_time_in_milliseconds
             
             return result
         except errors.HTTPFailure as e:
             retry_policy = None
 
-            if (e.status_code == 
endpoint_discovery_retry_policy._EndpointDiscoveryRetryPolicy.FORBIDDEN_STATUS_CODE
-                    and e.sub_status == 
endpoint_discovery_retry_policy._EndpointDiscoveryRetryPolicy.WRITE_FORBIDDEN_SUB_STATUS_CODE):
+            if (e.status_code == StatusCodes.FORBIDDEN
+                    and e.sub_status == SubStatusCodes.WRITE_FORBIDDEN):
                 retry_policy = endpointDiscovery_retry_policy
-            elif e.status_code == 
resource_throttle_retry_policy._ResourceThrottleRetryPolicy.THROTTLE_STATUS_CODE:
+            elif e.status_code == StatusCodes.TOO_MANY_REQUESTS:
                 retry_policy = resourceThrottle_retry_policy
+            else:
+                retry_policy = defaultRetry_policy
 
             # If none of the retry policies applies or there is no retry 
needed, set the throttle related response hedaers and 
             # re-throw the exception back
-            if not (retry_policy and retry_policy.ShouldRetry(e)):
+            if not (retry_policy.ShouldRetry(e)):
                 if not client.last_response_headers:
                     client.last_response_headers = {}
-                
client.last_response_headers[http_constants.HttpHeaders.ThrottleRetryCount] = 
resourceThrottle_retry_policy.current_retry_attempt_count
-                
client.last_response_headers[http_constants.HttpHeaders.ThrottleRetryWaitTimeInMs]
 = resourceThrottle_retry_policy.cummulative_wait_time_in_milliseconds
+                client.last_response_headers[HttpHeaders.ThrottleRetryCount] = 
resourceThrottle_retry_policy.current_retry_attempt_count
+                
client.last_response_headers[HttpHeaders.ThrottleRetryWaitTimeInMs] = 
resourceThrottle_retry_policy.cummulative_wait_time_in_milliseconds
                 raise
             else:
                 # Wait for retry_after_in_milliseconds time before the next 
retry
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/pydocumentdb-2.3.1/pydocumentdb.egg-info/PKG-INFO 
new/pydocumentdb-2.3.2/pydocumentdb.egg-info/PKG-INFO
--- old/pydocumentdb-2.3.1/pydocumentdb.egg-info/PKG-INFO       2017-12-22 
01:45:43.000000000 +0100
+++ new/pydocumentdb-2.3.2/pydocumentdb.egg-info/PKG-INFO       2018-05-08 
22:39:46.000000000 +0200
@@ -1,6 +1,6 @@
 Metadata-Version: 1.1
 Name: pydocumentdb
-Version: 2.3.1
+Version: 2.3.2
 Summary: Azure DocumentDB Python SDK
 Home-page: https://github.com/Azure/azure-documentdb-python
 Author: Microsoft
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/pydocumentdb-2.3.1/pydocumentdb.egg-info/SOURCES.txt 
new/pydocumentdb-2.3.2/pydocumentdb.egg-info/SOURCES.txt
--- old/pydocumentdb-2.3.1/pydocumentdb.egg-info/SOURCES.txt    2017-12-22 
01:45:43.000000000 +0100
+++ new/pydocumentdb-2.3.2/pydocumentdb.egg-info/SOURCES.txt    2018-05-08 
22:39:46.000000000 +0200
@@ -12,6 +12,7 @@
 pydocumentdb/base.py
 pydocumentdb/consistent_hash_ring.py
 pydocumentdb/constants.py
+pydocumentdb/default_retry_policy.py
 pydocumentdb/document_client.py
 pydocumentdb/documents.py
 pydocumentdb/endpoint_discovery_retry_policy.py
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/pydocumentdb-2.3.1/setup.py 
new/pydocumentdb-2.3.2/setup.py
--- old/pydocumentdb-2.3.1/setup.py     2017-12-22 00:52:29.000000000 +0100
+++ new/pydocumentdb-2.3.2/setup.py     2018-05-08 00:14:45.000000000 +0200
@@ -4,7 +4,7 @@
 import setuptools
 
 setup(name='pydocumentdb',
-      version='2.3.1',
+      version='2.3.2',
       description='Azure DocumentDB Python SDK',
       author="Microsoft",
       author_email="askdo...@microsoft.com",
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/pydocumentdb-2.3.1/test/crud_tests.py 
new/pydocumentdb-2.3.2/test/crud_tests.py
--- old/pydocumentdb-2.3.1/test/crud_tests.py   2017-12-22 00:52:29.000000000 
+0100
+++ new/pydocumentdb-2.3.2/test/crud_tests.py   2018-05-08 00:09:38.000000000 
+0200
@@ -38,7 +38,7 @@
 import pydocumentdb.document_client as document_client
 import pydocumentdb.errors as errors
 import pydocumentdb.hash_partition_resolver as hash_partition_resolver
-import pydocumentdb.http_constants as http_constants
+from pydocumentdb.http_constants import HttpHeaders, StatusCodes, 
SubStatusCodes
 import pydocumentdb.murmur_hash as murmur_hash
 import pydocumentdb.range_partition_resolver as range_partition_resolver
 import pydocumentdb.range as partition_range
@@ -140,7 +140,7 @@
         # delete database.
         client.DeleteDatabase(self.GetDatabaseLink(created_db, is_name_based))
         # read database after deletion
-        self.__AssertHTTPFailureWithStatus(404,
+        self.__AssertHTTPFailureWithStatus(StatusCodes.NOT_FOUND,
                                            client.ReadDatabase,
                                            self.GetDatabaseLink(created_db, 
is_name_based))
 
@@ -213,7 +213,7 @@
         # Replacing collection Id should fail.
         change_collection = created_collection.copy()
         change_collection['id'] = 'try_change_id'
-        self.__AssertHTTPFailureWithStatus(400,
+        self.__AssertHTTPFailureWithStatus(StatusCodes.BAD_REQUEST,
                                            client.ReplaceCollection,
                                            
self.GetDocumentCollectionLink(created_db, created_collection, is_name_based),
                                            change_collection)
@@ -222,7 +222,7 @@
         # delete collection
         client.DeleteCollection(self.GetDocumentCollectionLink(created_db, 
created_collection, is_name_based))
         # read collection after deletion
-        self.__AssertHTTPFailureWithStatus(404,
+        self.__AssertHTTPFailureWithStatus(StatusCodes.NOT_FOUND,
                                            client.ReadCollection,
                                            
self.GetDocumentCollectionLink(created_db, created_collection, is_name_based))
 
@@ -295,7 +295,7 @@
         options = { 'partitionKey': 'NY' }
 
         self.__AssertHTTPFailureWithStatus(
-            400,
+            StatusCodes.BAD_REQUEST,
             client.CreateDocument,
             self.GetDocumentCollectionLink(created_db, created_collection),
             document_definition,
@@ -441,7 +441,7 @@
 
         # For ReadDocument, we require to have the partitionKey to be 
specified as part of options otherwise we get BadRequest(status code 400)
         self.__AssertHTTPFailureWithStatus(
-            400,
+            StatusCodes.BAD_REQUEST,
             client.ReadDocument,
             self.GetDocumentLink(created_db, created_collection, 
created_document))
 
@@ -484,7 +484,7 @@
 
         # For DeleteDocument, we require to have the partitionKey to be 
specified as part of options otherwise we get BadRequest(status code 400)
         self.__AssertHTTPFailureWithStatus(
-            400,
+            StatusCodes.BAD_REQUEST,
             client.DeleteDocument,
             self.GetDocumentLink(created_db, created_collection, 
upserted_document))
 
@@ -596,7 +596,7 @@
 
         # Create document in read_collection should fail since it has only 
read permissions for this collection
         self.__AssertHTTPFailureWithStatus(
-            403,
+            StatusCodes.FORBIDDEN,
             restricted_client.CreateDocument,
             self.GetDocumentCollectionLink(created_db, read_collection, False),
             document_definition)
@@ -613,7 +613,7 @@
         options = { 'partitionKey': document_definition.get('key') }
         # Create document should fail since the partitionKey is 2 which is 
different that what is specified as resourcePartitionKey in permission object
         self.__AssertHTTPFailureWithStatus(
-            403,
+            StatusCodes.FORBIDDEN,
             restricted_client.CreateDocument,
             self.GetDocumentCollectionLink(created_db, all_collection, False),
             document_definition,
@@ -628,7 +628,7 @@
 
         # Delete document in read_collection should fail since it has only 
read permissions for this collection
         self.__AssertHTTPFailureWithStatus(
-            403,
+            StatusCodes.FORBIDDEN,
             restricted_client.DeleteDocument,
             self.GetDocumentCollectionLink(created_db, read_collection, False),
             options)
@@ -673,7 +673,7 @@
 
         # Partiton Key value different than what is specified in the stored 
procedure body will cause a bad request(400) error
         self.__AssertHTTPFailureWithStatus(
-            400,
+            StatusCodes.BAD_REQUEST,
             client.ExecuteStoredProcedure,
             self.GetStoredProcedureLink(created_db, created_collection, 
created_sproc),
             None,
@@ -746,7 +746,7 @@
 
         # Currently, we require to have the partitionKey to be specified as 
part of options otherwise we get BadRequest(status code 400)
         self.__AssertHTTPFailureWithStatus(
-            400,
+            StatusCodes.BAD_REQUEST,
             client.CreateAttachmentAndUploadMedia,
             self.GetDocumentLink(db, collection, document),
             content_stream,
@@ -782,7 +782,7 @@
                     'contentType': 'application/text' }
         # Currently, we require to have the partitionKey to be specified as 
part of options otherwise we get BadRequest(status code 400)
         self.__AssertHTTPFailureWithStatus(
-            400,
+            StatusCodes.BAD_REQUEST,
             client.UpsertAttachmentAndUploadMedia,
             self.GetDocumentLink(db, collection, document),
             content_stream,
@@ -817,7 +817,7 @@
 
         # Currently, we require to have the partitionKey to be specified as 
part of options otherwise we get BadRequest(status code 400)
         self.__AssertHTTPFailureWithStatus(
-            400,
+            StatusCodes.BAD_REQUEST,
             client.CreateAttachment,
             self.GetDocumentLink(db, collection, document),
             dynamic_attachment)
@@ -838,7 +838,7 @@
         
         # Currently, we require to have the partitionKey to be specified as 
part of options otherwise we get BadRequest(status code 400)
         self.__AssertHTTPFailureWithStatus(
-            400,
+            StatusCodes.BAD_REQUEST,
             client.ReadAttachment,
             self.GetAttachmentLink(db, collection, document, attachment))
 
@@ -853,7 +853,7 @@
         
         # Currently, we require to have the partitionKey to be specified as 
part of options otherwise we get BadRequest(status code 400)
         self.__AssertHTTPFailureWithStatus(
-            400,
+            StatusCodes.BAD_REQUEST,
             client.ReplaceAttachment,
             self.GetAttachmentLink(db, collection, document, attachment),
             attachment)
@@ -870,7 +870,7 @@
 
         # Currently, we require to have the partitionKey to be specified as 
part of options otherwise we get BadRequest(status code 400)
         self.__AssertHTTPFailureWithStatus(
-            400,
+            StatusCodes.BAD_REQUEST,
             client.UpsertAttachment,
             self.GetDocumentLink(db, collection, document),
             attachment)
@@ -904,7 +904,7 @@
 
         # Currently, we require to have the partitionKey to be specified as 
part of options otherwise we get BadRequest(status code 400)
         self.__AssertHTTPFailureWithStatus(
-            400,
+            StatusCodes.BAD_REQUEST,
             client.DeleteAttachment,
             self.GetAttachmentLink(db, collection, document, attachment))
 
@@ -1009,14 +1009,14 @@
 
         # Currently, we require to have the partitionKey to be specified as 
part of options otherwise we get BadRequest(status code 400)
         self.__AssertHTTPFailureWithStatus(
-            400,
+            StatusCodes.BAD_REQUEST,
             client.ReadConflict,
             self.GetConflictLink(created_db, created_collection, 
conflict_definition))
 
         # read conflict here will return resource not found(404) since there 
is no conflict here
         options = { 'partitionKey': conflict_definition.get('id') }
         self.__AssertHTTPFailureWithStatus(
-            404,
+            StatusCodes.NOT_FOUND,
             client.ReadConflict,
             self.GetConflictLink(created_db, created_collection, 
conflict_definition),
             options)
@@ -1027,14 +1027,14 @@
 
         # Currently, we require to have the partitionKey to be specified as 
part of options otherwise we get BadRequest(status code 400)
         self.__AssertHTTPFailureWithStatus(
-            400,
+            StatusCodes.BAD_REQUEST,
             client.DeleteConflict,
             self.GetConflictLink(created_db, created_collection, 
conflict_definition))
 
         # delete conflict here will return resource not found(404) since there 
is no conflict here
         options = { 'partitionKey': conflict_definition.get('id') }
         self.__AssertHTTPFailureWithStatus(
-            404,
+            StatusCodes.NOT_FOUND,
             client.DeleteConflict,
             self.GetConflictLink(created_db, created_collection, 
conflict_definition),
             options)
@@ -1096,7 +1096,7 @@
                                'key': 'value'}
         # Should throw an error because automatic id generation is disabled.
         self.__AssertHTTPFailureWithStatus(
-            400,
+            StatusCodes.BAD_REQUEST,
             client.CreateDocument,
             self.GetDocumentCollectionLink(created_db, created_collection, 
is_name_based),
             document_definition,
@@ -1120,7 +1120,7 @@
         # duplicated documents are not allowed when 'id' is provided.
         duplicated_definition_with_id = document_definition.copy()
         duplicated_definition_with_id['id'] = created_document['id']
-        self.__AssertHTTPFailureWithStatus(409,
+        self.__AssertHTTPFailureWithStatus(StatusCodes.CONFLICT,
                                            client.CreateDocument,
                                            
self.GetDocumentCollectionLink(created_db, created_collection, is_name_based),
                                            duplicated_definition_with_id)
@@ -1174,7 +1174,7 @@
         # delete document
         client.DeleteDocument(self.GetDocumentLink(created_db, 
created_collection, replaced_document, is_name_based))
         # read documents after deletion
-        self.__AssertHTTPFailureWithStatus(404,
+        self.__AssertHTTPFailureWithStatus(StatusCodes.NOT_FOUND,
                                            client.ReadDocument,
                                            self.GetDocumentLink(created_db, 
created_collection, replaced_document, is_name_based))
     
@@ -1833,7 +1833,7 @@
         # create attachment with invalid content-type
         content_stream = ReadableStream()
         self.__AssertHTTPFailureWithStatus(
-            400,
+            StatusCodes.BAD_REQUEST,
             client.CreateAttachmentAndUploadMedia,
             self.GetDocumentLink(db, collection, document, is_name_based),
             content_stream,
@@ -1849,7 +1849,7 @@
         content_stream = ReadableStream()
         # create colliding attachment
         self.__AssertHTTPFailureWithStatus(
-            409,
+            StatusCodes.CONFLICT,
             client.CreateAttachmentAndUploadMedia,
             self.GetDocumentLink(db, collection, document, is_name_based),
             content_stream,
@@ -2154,7 +2154,7 @@
         # delete user
         client.DeleteUser(self.GetUserLink(db, user, is_name_based))
         # read user after deletion
-        self.__AssertHTTPFailureWithStatus(404,
+        self.__AssertHTTPFailureWithStatus(StatusCodes.NOT_FOUND,
                                            client.ReadUser,
                                            self.GetUserLink(db, user, 
is_name_based))
 
@@ -2279,7 +2279,7 @@
         # delete permission
         client.DeletePermission(self.GetPermissionLink(db, user, 
replaced_permission, is_name_based))
         # read permission after deletion
-        self.__AssertHTTPFailureWithStatus(404,
+        self.__AssertHTTPFailureWithStatus(StatusCodes.NOT_FOUND,
                                            client.ReadPermission,
                                            self.GetPermissionLink(db, user, 
permission, is_name_based))
 
@@ -2462,7 +2462,7 @@
 
         # Client without any authorization will fail.
         client = document_client.DocumentClient(CRUDTests.host, {})
-        self.__AssertHTTPFailureWithStatus(401,
+        self.__AssertHTTPFailureWithStatus(StatusCodes.UNAUTHORIZED,
                                            list,
                                            client.ReadDatabases())
         # Client with master key.
@@ -2481,7 +2481,7 @@
         success_coll1 = col1_client.ReadCollection(
             entities['coll1']['_self'])
         # 2. Failure-- Use Col1 Permission to delete
-        self.__AssertHTTPFailureWithStatus(403,
+        self.__AssertHTTPFailureWithStatus(StatusCodes.FORBIDDEN,
                                            col1_client.DeleteCollection,
                                            success_coll1['_self'])
         # 3. Success-- Use Col1 Permission to Read All Docs
@@ -2588,7 +2588,7 @@
         # delete trigger
         res = client.DeleteTrigger(self.GetTriggerLink(db, collection, 
replaced_trigger, is_name_based))
         # read triggers after deletion
-        self.__AssertHTTPFailureWithStatus(404,
+        self.__AssertHTTPFailureWithStatus(StatusCodes.NOT_FOUND,
                                            client.ReadTrigger,
                                            self.GetTriggerLink(db, collection, 
replaced_trigger, is_name_based))
 
@@ -2746,7 +2746,7 @@
         # delete udf
         res = 
client.DeleteUserDefinedFunction(self.GetUserDefinedFunctionLink(db, 
collection, replaced_udf, is_name_based))
         # read udfs after deletion
-        self.__AssertHTTPFailureWithStatus(404,
+        self.__AssertHTTPFailureWithStatus(StatusCodes.NOT_FOUND,
                                            client.ReadUserDefinedFunction,
                                            self.GetUserDefinedFunctionLink(db, 
collection, replaced_udf, is_name_based))
 
@@ -2910,7 +2910,7 @@
         # delete sproc
         res = client.DeleteStoredProcedure(self.GetStoredProcedureLink(db, 
collection, replaced_sproc, is_name_based))
         # read sprocs after deletion
-        self.__AssertHTTPFailureWithStatus(404,
+        self.__AssertHTTPFailureWithStatus(StatusCodes.NOT_FOUND,
                                            client.ReadStoredProcedure,
                                            self.GetStoredProcedureLink(db, 
collection, replaced_sproc, is_name_based))
 
@@ -3036,19 +3036,19 @@
         result = 
client.ExecuteStoredProcedure(self.GetStoredProcedureLink(created_db, 
created_collection, created_sproc), None)
 
         self.assertEqual(result, 'Success!')
-        self.assertFalse(http_constants.HttpHeaders.ScriptLogResults in 
client.last_response_headers)
+        self.assertFalse(HttpHeaders.ScriptLogResults in 
client.last_response_headers)
 
         options = { 'enableScriptLogging': True }
         result = 
client.ExecuteStoredProcedure(self.GetStoredProcedureLink(created_db, 
created_collection, created_sproc), None, options)
 
         self.assertEqual(result, 'Success!')
-        self.assertEqual('The value of x is 1.', 
client.last_response_headers.get(http_constants.HttpHeaders.ScriptLogResults))
+        self.assertEqual('The value of x is 1.', 
client.last_response_headers.get(HttpHeaders.ScriptLogResults))
 
         options = { 'enableScriptLogging': False }
         result = 
client.ExecuteStoredProcedure(self.GetStoredProcedureLink(created_db, 
created_collection, created_sproc), None, options)
 
         self.assertEqual(result, 'Success!')
-        self.assertFalse(http_constants.HttpHeaders.ScriptLogResults in 
client.last_response_headers)
+        self.assertFalse(HttpHeaders.ScriptLogResults in 
client.last_response_headers)
         
         client.DeleteCollection(self.GetDocumentCollectionLink(created_db, 
created_collection))
         
@@ -3588,11 +3588,11 @@
         self.assertEqual(expected_offer.get('_self'), 
query_one_offer.get('_self'))
         self.assertEqual(expected_offer.get('resource'), 
query_one_offer.get('resource'))
         # Expects an exception when reading offer with bad offer link.
-        self.__AssertHTTPFailureWithStatus(400, client.ReadOffer, 
expected_offer.get('_self')[:-1] + 'x')
+        self.__AssertHTTPFailureWithStatus(StatusCodes.BAD_REQUEST, 
client.ReadOffer, expected_offer.get('_self')[:-1] + 'x')
         # Now delete the collection.
         client.DeleteCollection(collection.get('_self'))
         # Reading fails.
-        self.__AssertHTTPFailureWithStatus(404, client.ReadOffer, 
expected_offer.get('_self'))
+        self.__AssertHTTPFailureWithStatus(StatusCodes.NOT_FOUND, 
client.ReadOffer, expected_offer.get('_self'))
         # Read feed now returns 0 results.
         offers = list(client.ReadOffers())
         self.assertEqual(initial_count, len(offers))
@@ -3622,18 +3622,18 @@
         offer_to_replace_bad_id = dict(offer_to_replace)
         offer_to_replace_bad_id['_rid'] = 'NotAllowed'
         self.__AssertHTTPFailureWithStatus(
-            400, client.ReplaceOffer, offer_to_replace_bad_id['_self'], 
offer_to_replace_bad_id)
+            StatusCodes.BAD_REQUEST, client.ReplaceOffer, 
offer_to_replace_bad_id['_self'], offer_to_replace_bad_id)
         # Expects an exception when replacing an offer with bad rid.
         offer_to_replace_bad_rid = dict(offer_to_replace)
         offer_to_replace_bad_rid['_rid'] = 'InvalidRid'
         self.__AssertHTTPFailureWithStatus(
-            400, client.ReplaceOffer, offer_to_replace_bad_rid['_self'], 
offer_to_replace_bad_rid)
+            StatusCodes.BAD_REQUEST, client.ReplaceOffer, 
offer_to_replace_bad_rid['_self'], offer_to_replace_bad_rid)
         # Expects an exception when replaceing an offer with null id and rid.
         offer_to_replace_null_ids = dict(offer_to_replace)
         offer_to_replace_null_ids['id'] = None
         offer_to_replace_null_ids['_rid'] = None
         self.__AssertHTTPFailureWithStatus(
-            400, client.ReplaceOffer, offer_to_replace_null_ids['_self'], 
offer_to_replace_null_ids)
+            StatusCodes.BAD_REQUEST, client.ReplaceOffer, 
offer_to_replace_null_ids['_self'], offer_to_replace_null_ids)
 
     def test_collection_with_offer_type(self):
         client = document_client.DocumentClient(CRUDTests.host,
@@ -3672,18 +3672,18 @@
         database_account = client.GetDatabaseAccount()
         self.assertEqual(database_account.DatabasesLink, '/dbs/')
         self.assertEqual(database_account.MediaLink, '/media/')
-        if (http_constants.HttpHeaders.MaxMediaStorageUsageInMB in
+        if (HttpHeaders.MaxMediaStorageUsageInMB in
             client.last_response_headers):
             self.assertEqual(
                 database_account.MaxMediaStorageUsageInMB,
                 client.last_response_headers[
-                    http_constants.HttpHeaders.MaxMediaStorageUsageInMB])
-        if (http_constants.HttpHeaders.CurrentMediaStorageUsageInMB in
+                    HttpHeaders.MaxMediaStorageUsageInMB])
+        if (HttpHeaders.CurrentMediaStorageUsageInMB in
             client.last_response_headers):
             self.assertEqual(
                 database_account.CurrentMediaStorageUsageInMB,
                 client.last_response_headers[
-                    http_constants.HttpHeaders.
+                    HttpHeaders.
                     CurrentMediaStorageUsageInMB])
         self.assertTrue(
             database_account.ConsistencyPolicy['defaultConsistencyLevel']
@@ -3700,24 +3700,24 @@
         created_db = client.CreateDatabase({ 'id': CRUDTests.testDbName })
         consistent_coll = 
client.CreateCollection(self.GetDatabaseLink(created_db, is_name_based), { 
'id': 'consistent_coll' })
         client.ReadCollection(self.GetDocumentCollectionLink(created_db, 
consistent_coll, is_name_based))
-        self.assertFalse(http_constants.HttpHeaders.LazyIndexingProgress in 
client.last_response_headers)
-        self.assertTrue(http_constants.HttpHeaders.IndexTransformationProgress 
in client.last_response_headers)
+        self.assertFalse(HttpHeaders.LazyIndexingProgress in 
client.last_response_headers)
+        self.assertTrue(HttpHeaders.IndexTransformationProgress in 
client.last_response_headers)
         lazy_coll = client.CreateCollection(self.GetDatabaseLink(created_db, 
is_name_based),
             {
                 'id': 'lazy_coll',
                 'indexingPolicy': { 'indexingMode' : 
documents.IndexingMode.Lazy }
             })
         client.ReadCollection(self.GetDocumentCollectionLink(created_db, 
lazy_coll, is_name_based))
-        self.assertTrue(http_constants.HttpHeaders.LazyIndexingProgress in 
client.last_response_headers)
-        self.assertTrue(http_constants.HttpHeaders.IndexTransformationProgress 
in client.last_response_headers)
+        self.assertTrue(HttpHeaders.LazyIndexingProgress in 
client.last_response_headers)
+        self.assertTrue(HttpHeaders.IndexTransformationProgress in 
client.last_response_headers)
         none_coll = client.CreateCollection(self.GetDatabaseLink(created_db, 
is_name_based),
             {
                 'id': 'none_coll',
                 'indexingPolicy': { 'indexingMode': 
documents.IndexingMode.NoIndex, 'automatic': False }
             })
         client.ReadCollection(self.GetDocumentCollectionLink(created_db, 
none_coll, is_name_based))
-        self.assertFalse(http_constants.HttpHeaders.LazyIndexingProgress in 
client.last_response_headers)
-        self.assertTrue(http_constants.HttpHeaders.IndexTransformationProgress 
in client.last_response_headers)
+        self.assertFalse(HttpHeaders.LazyIndexingProgress in 
client.last_response_headers)
+        self.assertTrue(HttpHeaders.IndexTransformationProgress in 
client.last_response_headers)
 
     # To run this test, please provide your own CA certs file or download one 
from
     #     http://curl.haxx.se/docs/caextract.html
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/pydocumentdb-2.3.1/test/globaldb_mock_tests.py 
new/pydocumentdb-2.3.2/test/globaldb_mock_tests.py
--- old/pydocumentdb-2.3.1/test/globaldb_mock_tests.py  2017-12-22 
00:52:29.000000000 +0100
+++ new/pydocumentdb-2.3.2/test/globaldb_mock_tests.py  2018-05-08 
00:09:38.000000000 +0200
@@ -26,6 +26,7 @@
 import pydocumentdb.documents as documents
 import pydocumentdb.errors as errors
 import pydocumentdb.constants as constants
+from pydocumentdb.http_constants import StatusCodes
 import pydocumentdb.global_endpoint_manager as global_endpoint_manager
 import pydocumentdb.retry_utility as retry_utility
 import test.test_config as test_config
@@ -149,10 +150,10 @@
         else:
             self.endpoint_discovery_retry_count += 1
             location_changed = True
-            raise errors.HTTPFailure(403, "Forbidden", {'x-ms-substatus' : 3})
+            raise errors.HTTPFailure(StatusCodes.FORBIDDEN, "Forbidden", 
{'x-ms-substatus' : 3})
 
     def MockGetDatabaseAccountStub(self, endpoint):
-        raise errors.HTTPFailure(503, "Service unavailable")
+        raise errors.HTTPFailure(StatusCodes.SERVICE_UNAVAILABLE, "Service 
unavailable")
     
     def MockCreateDatabase(self, client, database):
         self.OriginalExecuteFunction = retry_utility._ExecuteFunction
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/pydocumentdb-2.3.1/test/globaldb_tests.py 
new/pydocumentdb-2.3.2/test/globaldb_tests.py
--- old/pydocumentdb-2.3.1/test/globaldb_tests.py       2017-12-22 
00:52:29.000000000 +0100
+++ new/pydocumentdb-2.3.2/test/globaldb_tests.py       2018-05-08 
00:09:38.000000000 +0200
@@ -31,7 +31,7 @@
 import pydocumentdb.global_endpoint_manager as global_endpoint_manager
 import pydocumentdb.endpoint_discovery_retry_policy as 
endpoint_discovery_retry_policy
 import pydocumentdb.retry_utility as retry_utility
-import pydocumentdb.http_constants as http_constants
+from pydocumentdb.http_constants import HttpHeaders, StatusCodes, 
SubStatusCodes
 import test.test_config as test_config
 
 #IMPORTANT NOTES: 
@@ -124,7 +124,7 @@
         time.sleep(5)
 
         client.ReadDocument(created_document['_self'])
-        content_location = 
str(client.last_response_headers[http_constants.HttpHeaders.ContentLocation])
+        content_location = 
str(client.last_response_headers[HttpHeaders.ContentLocation])
 
         content_location_url = urlparse(content_location)
         host_url = urlparse(Test_globaldb_tests.host)
@@ -146,7 +146,7 @@
         time.sleep(5)
 
         client.ReadDocument(created_document['_self'])
-        content_location = 
str(client.last_response_headers[http_constants.HttpHeaders.ContentLocation])
+        content_location = 
str(client.last_response_headers[HttpHeaders.ContentLocation])
         
         content_location_url = urlparse(content_location)
         write_location_url = urlparse(Test_globaldb_tests.write_location_host)
@@ -168,8 +168,8 @@
         # Create Document will fail for the read location client since it has 
EnableEndpointDiscovery set to false, and hence the request will directly go to 
         # the endpoint that was used to create the client instance(which 
happens to be a read endpoint)
         self.__AssertHTTPFailureWithStatus(
-            403,
-            3,
+            StatusCodes.FORBIDDEN,
+            SubStatusCodes.WRITE_FORBIDDEN,
             read_location_client.CreateDocument,
             self.test_coll['_self'],
             document_definition)
@@ -206,7 +206,7 @@
         time.sleep(5)
 
         client.ReadDocument(created_document['_self'])
-        content_location = 
str(client.last_response_headers[http_constants.HttpHeaders.ContentLocation])
+        content_location = 
str(client.last_response_headers[HttpHeaders.ContentLocation])
 
         content_location_url = urlparse(content_location)
         write_location_url = urlparse(Test_globaldb_tests.write_location_host)
@@ -225,7 +225,7 @@
         time.sleep(5)
 
         client.ReadDocument(created_document['_self'])
-        content_location = 
str(client.last_response_headers[http_constants.HttpHeaders.ContentLocation])
+        content_location = 
str(client.last_response_headers[HttpHeaders.ContentLocation])
 
         content_location_url = urlparse(content_location)
         read_location2_url = urlparse(Test_globaldb_tests.read_location2_host)
@@ -372,8 +372,8 @@
                                 'key': 'value'} 
 
         self.__AssertHTTPFailureWithStatus(
-            403,
-            3,
+            StatusCodes.FORBIDDEN,
+            SubStatusCodes.WRITE_FORBIDDEN,
             client.CreateDocument,
             self.test_coll['_self'],
             document_definition)
@@ -381,7 +381,7 @@
         retry_utility._ExecuteFunction = self.OriginalExecuteFunction
 
     def _MockExecuteFunction(self, function, *args, **kwargs):
-        raise errors.HTTPFailure(403, "Write Forbidden", {'x-ms-substatus' : 
3})
+        raise errors.HTTPFailure(StatusCodes.FORBIDDEN, "Write Forbidden", 
{'x-ms-substatus' : SubStatusCodes.WRITE_FORBIDDEN})
             
     def _MockGetDatabaseAccount(self, url_conection):
         database_account = documents.DatabaseAccount()
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/pydocumentdb-2.3.1/test/query_tests.py 
new/pydocumentdb-2.3.2/test/query_tests.py
--- old/pydocumentdb-2.3.1/test/query_tests.py  2017-12-22 00:52:29.000000000 
+0100
+++ new/pydocumentdb-2.3.2/test/query_tests.py  2018-05-08 00:09:38.000000000 
+0200
@@ -21,7 +21,7 @@
 
         test_db = next(it, None)
         if test_db is not None:
-            client.DeleteDatabase("/dbs/" + cls.testDbName + "/") 
+            client.DeleteDatabase("/dbs/" + cls.testDbName + "/")
             """ change """
 
     @classmethod
@@ -38,10 +38,10 @@
         collection_options = { 'offerThroughput': 10100 }
         created_collection = client.CreateCollection(created_db['_self'], 
collection_definition, collection_options)
 
-    def test_first_and_last_slashes_trimmed_for_query_string (self):
         document_definition = {'pk': 'pk', 'id':'myId'}
         created_doc = client.CreateDocument(created_collection['_self'], 
document_definition)
 
+    def test_first_and_last_slashes_trimmed_for_query_string (self):
         query_options = {'partitionKey': 'pk'}
         collectionLink = '/dbs/' + self.testDbName + '/colls/' + 
self.testCollectionName + '/'
         query = 'SELECT * from ' + self.testCollectionName
@@ -50,3 +50,20 @@
         iter_list = list(query_iterable)
         self.assertEqual(iter_list[0]['id'], 'myId')
 
+    def test_populate_query_metrics (self):
+        query_options = {'partitionKey': 'pk',
+                         'populateQueryMetrics': True}
+        collectionLink = '/dbs/' + self.testDbName + '/colls/' + 
self.testCollectionName + '/'
+        query = 'SELECT * from ' + self.testCollectionName
+        query_iterable = client.QueryDocuments(collectionLink, query, 
query_options)
+
+        iter_list = list(query_iterable)
+        self.assertEqual(iter_list[0]['id'], 'myId')
+
+        METRICS_HEADER_NAME = 'x-ms-documentdb-query-metrics'
+        self.assertTrue(METRICS_HEADER_NAME in client.last_response_headers)
+        metrics_header = client.last_response_headers[METRICS_HEADER_NAME]
+        # Validate header is well-formed: "key1=value1;key2=value2;etc"
+        metrics = metrics_header.split(';')
+        self.assertTrue(len(metrics) > 1)
+        self.assertTrue(all(['=' in x for x in metrics]))
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/pydocumentdb-2.3.1/test/retry_policy_tests.py 
new/pydocumentdb-2.3.2/test/retry_policy_tests.py
--- old/pydocumentdb-2.3.1/test/retry_policy_tests.py   2017-12-22 
00:52:29.000000000 +0100
+++ new/pydocumentdb-2.3.2/test/retry_policy_tests.py   2018-05-08 
00:09:38.000000000 +0200
@@ -25,7 +25,7 @@
 import pydocumentdb.documents as documents
 import pydocumentdb.errors as errors
 import pydocumentdb.retry_options as retry_options
-import pydocumentdb.http_constants as http_constants
+from pydocumentdb.http_constants import HttpHeaders, StatusCodes, 
SubStatusCodes
 import pydocumentdb.retry_utility as retry_utility
 import test.test_config as test_config
 
@@ -44,6 +44,7 @@
     masterKey = test_config._test_config.masterKey
     test_db_name = 'sample database' 
     test_coll_name = 'sample collection'
+    counter = 0;
 
     def __AssertHTTPFailureWithStatus(self, status_code, func, *args, 
**kwargs):
         """Assert HTTP failure with status.
@@ -115,9 +116,9 @@
         try:
             client.CreateDocument(self.created_collection['_self'], 
document_definition)
         except errors.HTTPFailure as e:
-            self.assertEqual(e.status_code, 429)
-            
self.assertEqual(connection_policy.RetryOptions.MaxRetryAttemptCount, 
client.last_response_headers[http_constants.HttpHeaders.ThrottleRetryCount])
-            
self.assertGreaterEqual(client.last_response_headers[http_constants.HttpHeaders.ThrottleRetryWaitTimeInMs],
 connection_policy.RetryOptions.MaxRetryAttemptCount * 
self.retry_after_in_milliseconds)
+            self.assertEqual(e.status_code, StatusCodes.TOO_MANY_REQUESTS)
+            
self.assertEqual(connection_policy.RetryOptions.MaxRetryAttemptCount, 
client.last_response_headers[HttpHeaders.ThrottleRetryCount])
+            
self.assertGreaterEqual(client.last_response_headers[HttpHeaders.ThrottleRetryWaitTimeInMs],
 connection_policy.RetryOptions.MaxRetryAttemptCount * 
self.retry_after_in_milliseconds)
 
         retry_utility._ExecuteFunction = self.OriginalExecuteFunction
 
@@ -138,9 +139,9 @@
         try:
             client.CreateDocument(self.created_collection['_self'], 
document_definition)
         except errors.HTTPFailure as e:
-            self.assertEqual(e.status_code, 429)
-            
self.assertEqual(connection_policy.RetryOptions.MaxRetryAttemptCount, 
client.last_response_headers[http_constants.HttpHeaders.ThrottleRetryCount])
-            
self.assertGreaterEqual(client.last_response_headers[http_constants.HttpHeaders.ThrottleRetryWaitTimeInMs],
 connection_policy.RetryOptions.MaxRetryAttemptCount * 
connection_policy.RetryOptions.FixedRetryIntervalInMilliseconds)
+            self.assertEqual(e.status_code, StatusCodes.TOO_MANY_REQUESTS)
+            
self.assertEqual(connection_policy.RetryOptions.MaxRetryAttemptCount, 
client.last_response_headers[HttpHeaders.ThrottleRetryCount])
+            
self.assertGreaterEqual(client.last_response_headers[HttpHeaders.ThrottleRetryWaitTimeInMs],
 connection_policy.RetryOptions.MaxRetryAttemptCount * 
connection_policy.RetryOptions.FixedRetryIntervalInMilliseconds)
 
         retry_utility._ExecuteFunction = self.OriginalExecuteFunction
 
@@ -161,8 +162,8 @@
         try:
             client.CreateDocument(self.created_collection['_self'], 
document_definition)
         except errors.HTTPFailure as e:
-            self.assertEqual(e.status_code, 429)
-            
self.assertGreaterEqual(client.last_response_headers[http_constants.HttpHeaders.ThrottleRetryWaitTimeInMs],
 connection_policy.RetryOptions.MaxWaitTimeInSeconds * 1000)
+            self.assertEqual(e.status_code, StatusCodes.TOO_MANY_REQUESTS)
+            
self.assertGreaterEqual(client.last_response_headers[HttpHeaders.ThrottleRetryWaitTimeInMs],
 connection_policy.RetryOptions.MaxWaitTimeInSeconds * 1000)
 
         retry_utility._ExecuteFunction = self.OriginalExecuteFunction
 
@@ -191,14 +192,99 @@
                 ]
             }))
         except errors.HTTPFailure as e:
-            self.assertEqual(e.status_code, 429)
-            
self.assertEqual(connection_policy.RetryOptions.MaxRetryAttemptCount, 
client.last_response_headers[http_constants.HttpHeaders.ThrottleRetryCount])
-            
self.assertGreaterEqual(client.last_response_headers[http_constants.HttpHeaders.ThrottleRetryWaitTimeInMs],
 connection_policy.RetryOptions.MaxRetryAttemptCount * 
self.retry_after_in_milliseconds)
+            self.assertEqual(e.status_code, StatusCodes.TOO_MANY_REQUESTS)
+            
self.assertEqual(connection_policy.RetryOptions.MaxRetryAttemptCount, 
client.last_response_headers[HttpHeaders.ThrottleRetryCount])
+            
self.assertGreaterEqual(client.last_response_headers[HttpHeaders.ThrottleRetryWaitTimeInMs],
 connection_policy.RetryOptions.MaxRetryAttemptCount * 
self.retry_after_in_milliseconds)
+
+        retry_utility._ExecuteFunction = self.OriginalExecuteFunction
+
+    def test_default_retry_policy_for_query(self):
+        connection_policy = documents.ConnectionPolicy()
+
+        client = document_client.DocumentClient(Test_retry_policy_tests.host, 
{'masterKey': Test_retry_policy_tests.masterKey}, connection_policy)
+
+        document_definition_1 = { 'id': 'doc1',
+                                  'name': 'sample document',
+                                  'key': 'value'} 
+        document_definition_2 = { 'id': 'doc2',
+                                  'name': 'sample document',
+                                  'key': 'value'} 
+
+        client.CreateDocument(self.created_collection['_self'], 
document_definition_1)
+        client.CreateDocument(self.created_collection['_self'], 
document_definition_2)
+
+        self.OriginalExecuteFunction = retry_utility._ExecuteFunction
+        retry_utility._ExecuteFunction = 
self._MockExecuteFunctionConnectionReset
+
+        docs = client.QueryDocuments(self.created_collection['_self'], "Select 
* from c", {'maxItemCount':1})
+        
+        result_docs = list(docs)
+        self.assertEqual(result_docs[0]['id'], 'doc1')
+        self.assertEqual(result_docs[1]['id'], 'doc2')
+        self.assertEqual(self.counter, 12)
+
+        self.counter = 0;
+        retry_utility._ExecuteFunction = self.OriginalExecuteFunction
+
+        client.DeleteDocument(result_docs[0]['_self'])
+        client.DeleteDocument(result_docs[1]['_self'])
+
+    def test_default_retry_policy_for_read(self):
+        connection_policy = documents.ConnectionPolicy()
+
+        client = document_client.DocumentClient(Test_retry_policy_tests.host, 
{'masterKey': Test_retry_policy_tests.masterKey}, connection_policy)
+        
+        document_definition = { 'id': 'doc',
+                                'name': 'sample document',
+                                'key': 'value'} 
+
+        created_document = 
client.CreateDocument(self.created_collection['_self'], document_definition)
+
+        self.OriginalExecuteFunction = retry_utility._ExecuteFunction
+        retry_utility._ExecuteFunction = 
self._MockExecuteFunctionConnectionReset
+
+        doc = client.ReadDocument(created_document['_self'], {})
+        self.assertEqual(doc['id'], 'doc')
+        self.assertEqual(self.counter, 3)
+        
+        self.counter = 0;
+        retry_utility._ExecuteFunction = self.OriginalExecuteFunction
+                
+        client.DeleteDocument(doc['_self'])
+    
+    def test_default_retry_policy_for_create(self):
+        connection_policy = documents.ConnectionPolicy()
+
+        client = document_client.DocumentClient(Test_retry_policy_tests.host, 
{'masterKey': Test_retry_policy_tests.masterKey}, connection_policy)
+        
+        document_definition = { 'id': 'doc',
+                                'name': 'sample document',
+                                'key': 'value'} 
+
+        self.OriginalExecuteFunction = retry_utility._ExecuteFunction
+        retry_utility._ExecuteFunction = 
self._MockExecuteFunctionConnectionReset
+
+        created_document = {}
+        try :
+            created_document = 
client.CreateDocument(self.created_collection['_self'], document_definition)
+        except Exception as err:
+            self.assertEqual(err.status_code, 10054)
+
+        self.assertDictEqual(created_document, {})
+        self.assertEqual(self.counter, 7)
 
         retry_utility._ExecuteFunction = self.OriginalExecuteFunction
 
     def _MockExecuteFunction(self, function, *args, **kwargs):
-        raise errors.HTTPFailure(429, "Request rate is too large", 
{http_constants.HttpHeaders.RetryAfterInMilliseconds: 
self.retry_after_in_milliseconds})
+        raise errors.HTTPFailure(StatusCodes.TOO_MANY_REQUESTS, "Request rate 
is too large", {HttpHeaders.RetryAfterInMilliseconds: 
self.retry_after_in_milliseconds})
+
+    def _MockExecuteFunctionConnectionReset(self, function, *args, **kwargs):
+        self.counter += 1;
+
+        if self.counter % 3 == 0:
+            return self.OriginalExecuteFunction(function, *args, **kwargs)
+        else:
+            raise errors.HTTPFailure(10054, "Connection was reset", {})
 
 if __name__ == '__main__':
     unittest.main()
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/pydocumentdb-2.3.1/test/ttl_tests.py 
new/pydocumentdb-2.3.2/test/ttl_tests.py
--- old/pydocumentdb-2.3.1/test/ttl_tests.py    2017-12-22 00:52:29.000000000 
+0100
+++ new/pydocumentdb-2.3.2/test/ttl_tests.py    2018-05-08 00:09:38.000000000 
+0200
@@ -24,6 +24,7 @@
 
 import pydocumentdb.document_client as document_client
 import pydocumentdb.errors as errors
+from pydocumentdb.http_constants import StatusCodes
 import test.test_config as test_config
 
 
@@ -92,7 +93,7 @@
 
         # None is an unsupported value for defaultTtl. Valid values are -1 or 
a non-zero positive 32-bit integer value
         self.__AssertHTTPFailureWithStatus(
-            400,
+            StatusCodes.BAD_REQUEST,
             client.CreateCollection,
             created_db['_self'],
             collection_definition)
@@ -102,7 +103,7 @@
 
         # 0 is an unsupported value for defaultTtl. Valid values are -1 or a 
non-zero positive 32-bit integer value
         self.__AssertHTTPFailureWithStatus(
-            400,
+            StatusCodes.BAD_REQUEST,
             client.CreateCollection,
             created_db['_self'],
             collection_definition)
@@ -112,7 +113,7 @@
 
         # -10 is an unsupported value for defaultTtl. Valid values are -1 or a 
non-zero positive 32-bit integer value
         self.__AssertHTTPFailureWithStatus(
-            400,
+            StatusCodes.BAD_REQUEST,
             client.CreateCollection,
             created_db['_self'],
             collection_definition)
@@ -124,7 +125,7 @@
 
         # 0 is an unsupported value for ttl. Valid values are -1 or a non-zero 
positive 32-bit integer value
         self.__AssertHTTPFailureWithStatus(
-            400,
+            StatusCodes.BAD_REQUEST,
             client.CreateDocument,
             created_collection['_self'],
             document_definition)
@@ -134,7 +135,7 @@
 
         # None is an unsupported value for ttl. Valid values are -1 or a 
non-zero positive 32-bit integer value
         self.__AssertHTTPFailureWithStatus(
-            400,
+            StatusCodes.BAD_REQUEST,
             client.CreateDocument,
             created_collection['_self'],
             document_definition)
@@ -144,7 +145,7 @@
         
         # -10 is an unsupported value for ttl. Valid values are -1 or a 
non-zero positive 32-bit integer value
         self.__AssertHTTPFailureWithStatus(
-            400,
+            StatusCodes.BAD_REQUEST,
             client.CreateDocument,
             created_collection['_self'],
             document_definition)
@@ -170,7 +171,7 @@
         
         # the created document should be gone now as it's ttl value would be 
same as defaultTtl value of the collection
         self.__AssertHTTPFailureWithStatus(
-            404,
+            StatusCodes.NOT_FOUND,
             client.ReadDocument,
             created_document['_self'])
 
@@ -192,7 +193,7 @@
 
         # the created document should be gone now as it's ttl value is set to 
2 which overrides the collections's defaultTtl value(5)
         self.__AssertHTTPFailureWithStatus(
-            404,
+            StatusCodes.NOT_FOUND,
             client.ReadDocument,
             created_document['_self'])
 
@@ -210,7 +211,7 @@
 
         # the created document should be gone now as we have waited for (6+4) 
secs which is greater than documents's ttl value of 8
         self.__AssertHTTPFailureWithStatus(
-            404,
+            StatusCodes.NOT_FOUND,
             client.ReadDocument,
             created_document['_self'])
 
@@ -245,7 +246,7 @@
 
         # the created document should be gone now as it's ttl value is set to 
2 which overrides the collections's defaultTtl value(-1)
         self.__AssertHTTPFailureWithStatus(
-            404,
+            StatusCodes.NOT_FOUND,
             client.ReadDocument,
             created_document3['_self'])
 
@@ -299,7 +300,7 @@
 
         # the created document cannot be deleted since it should already be 
gone now
         self.__AssertHTTPFailureWithStatus(
-            404,
+            StatusCodes.NOT_FOUND,
             client.DeleteDocument,
             created_document['_self'])
 
@@ -323,7 +324,7 @@
 
         # the upserted document should be gone now after 10 secs from the last 
write(upsert) of the document
         self.__AssertHTTPFailureWithStatus(
-            404,
+            StatusCodes.NOT_FOUND,
             client.ReadDocument,
             upserted_docment['_self'])
 


Reply via email to