Revision: 313
Author: bslatkin
Date: Thu Jan 21 19:42:07 2010
Log: increase hub feed fetch timeout to 10 seconds
http://code.google.com/p/pubsubhubbub/source/detail?r=313
Modified:
/trunk/hub/async_apiproxy.py
/trunk/hub/main.py
/trunk/hub/urlfetch_async.py
=======================================
--- /trunk/hub/async_apiproxy.py Mon Sep 21 15:18:21 2009
+++ /trunk/hub/async_apiproxy.py Thu Jan 21 19:42:07 2010
@@ -59,13 +59,15 @@
# unclear event ordering dependencies.
self.enqueued = collections.deque()
- def start_call(self, package, call, pbrequest, pbresponse,
user_callback):
+ def start_call(self, package, call, pbrequest, pbresponse, user_callback,
+ deadline=None):
"""user_callback is a callback that takes (response, exception)"""
if not callable(user_callback):
raise TypeError('%r not callable' % user_callback)
rpc = AsyncRPC(package, call, pbrequest, pbresponse,
- lambda: user_callback(pbresponse, None))
+ lambda: user_callback(pbresponse, None),
+ deadline=deadline)
setattr(rpc, 'user_callback', user_callback) # TODO make this pretty
self.enqueued.append(rpc)
show_request = '...'
=======================================
--- /trunk/hub/main.py Mon Oct 26 13:36:06 2009
+++ /trunk/hub/main.py Thu Jan 21 19:42:07 2010
@@ -182,6 +182,9 @@
# Maximum number of redirects to follow when feed fetching.
MAX_REDIRECTS = 7
+# Maximum time to wait for fetching a feed in seconds.
+MAX_FETCH_SECONDS = 10
+
# Number of times to try to split FeedEntryRecord, EventToDeliver, and
# FeedRecord entities when putting them and their size is too large.
PUT_SPLITTING_ATTEMPTS = 10
@@ -1609,7 +1612,8 @@
try:
response = urlfetch.fetch(adjusted_url, method='get',
- follow_redirects=False)
+ follow_redirects=False,
+ deadline=MAX_FETCH_SECONDS)
except urlfetch_errors.Error:
logging.exception('Error encountered while confirming subscription')
return False
@@ -2117,7 +2121,11 @@
apiproxy_errors.Error if any RPC errors are encountered.
urlfetch.Error if
there are any fetching API errors.
"""
- response = urlfetch.fetch(fetch_url, headers=headers,
follow_redirects=False)
+ response = urlfetch.fetch(
+ fetch_url,
+ headers=headers,
+ follow_redirects=False,
+ deadline=MAX_FETCH_SECONDS)
return response.status_code, response.headers, response.content
@@ -2341,7 +2349,8 @@
headers=headers,
payload=payload,
async_proxy=async_proxy,
- callback=callback)
+ callback=callback,
+ deadline=MAX_FETCH_SECONDS)
class PushEventHandler(webapp.RequestHandler):
=======================================
--- /trunk/hub/urlfetch_async.py Fri Jun 26 10:10:31 2009
+++ /trunk/hub/urlfetch_async.py Thu Jan 21 19:42:07 2010
@@ -22,7 +22,8 @@
def fetch(url, payload=None, method=urlfetch.GET, headers={},
- allow_truncated=False, callback=None, async_proxy=None):
+ allow_truncated=False, callback=None, async_proxy=None,
+ deadline=5):
"""Fetches the given HTTP URL, blocking until the result is returned.
Other optional parameters are:
@@ -37,6 +38,8 @@
not None.
async_proxy: If not None, instance of AsyncAPIProxy to use for
executing
asynchronous API calls.
+ deadline: How long to allow the request to wait, in seconds. Defaults
+ to 5 seconds.
We use a HTTP/1.1 compliant proxy to fetch the result.
@@ -85,7 +88,7 @@
allow_truncated)
callback(result, user_exception)
async_proxy.start_call('urlfetch', 'Fetch', request, response,
- completion_callback)
+ completion_callback, deadline=deadline)
return
user_exception = None