[python-urlgrabber/f17] update to latest HEAD
Zdeněk Pavlas
zpavlas at fedoraproject.org
Thu Jun 14 12:52:00 UTC 2012
commit 64a00bdbd5acf31933486137ac1f3c74281e69df
Author: Zdeněk Pavlas <zpavlas at redhat.com>
Date: Thu Jun 14 14:51:27 2012 +0200
update to latest HEAD
python-urlgrabber.spec | 7 +++-
urlgrabber-HEAD.patch | 87 +++++++++++++++++++++++++----------------------
2 files changed, 52 insertions(+), 42 deletions(-)
---
diff --git a/python-urlgrabber.spec b/python-urlgrabber.spec
index 5872e6b..f57e573 100644
--- a/python-urlgrabber.spec
+++ b/python-urlgrabber.spec
@@ -3,7 +3,7 @@
Summary: A high-level cross-protocol url-grabber
Name: python-urlgrabber
Version: 3.9.1
-Release: 12%{?dist}
+Release: 13%{?dist}
Source0: urlgrabber-%{version}.tar.gz
Patch1: urlgrabber-HEAD.patch
@@ -44,6 +44,11 @@ rm -rf $RPM_BUILD_ROOT
%attr(0755,root,root) %{_libexecdir}/urlgrabber-ext-down
%changelog
+* Thu Jun 14 2012 Zdenek Pavlas <zpavlas at redhat.com> - 3.9.1-13
+- update to latest HEAD
+- Start meters immediately, and only when asked to. BZ 831904, 831291.
+- Better CTRL-C handling when downloading metalink/mirrorlist.
+
* Thu Jun 7 2012 Zdenek Pavlas <zpavlas at redhat.com> - 3.9.1-12
- update to latest HEAD
- Abort download when terminal got closed. BZ 806632.
diff --git a/urlgrabber-HEAD.patch b/urlgrabber-HEAD.patch
index 534b571..c77c6c5 100644
--- a/urlgrabber-HEAD.patch
+++ b/urlgrabber-HEAD.patch
@@ -73,7 +73,7 @@ index 518e512..09cd896 100644
print __doc__
diff --git a/scripts/urlgrabber-ext-down b/scripts/urlgrabber-ext-down
new file mode 100755
-index 0000000..c37e6a8
+index 0000000..670750c
--- /dev/null
+++ b/scripts/urlgrabber-ext-down
@@ -0,0 +1,55 @@
@@ -88,7 +88,7 @@ index 0000000..c37e6a8
+def write(fmt, *arg):
+ try: os.write(1, fmt % arg)
+ except OSError, e:
-+ if e.arg[0] != errno.EPIPE: raise
++ if e.args[0] != errno.EPIPE: raise
+ sys.exit(1)
+
+class ProxyProgress:
@@ -216,7 +216,7 @@ index 3e5f3b7..8eeaeda 100644
return (fb,lb)
diff --git a/urlgrabber/grabber.py b/urlgrabber/grabber.py
-index e090e90..c5470b1 100644
+index e090e90..071146c 100644
--- a/urlgrabber/grabber.py
+++ b/urlgrabber/grabber.py
@@ -49,7 +49,7 @@ GENERAL ARGUMENTS (kwargs)
@@ -558,7 +558,13 @@ index e090e90..c5470b1 100644
"""Provides easy opening of URLs with a variety of options.
All options are specified as kwargs. Options may be specified when
-@@ -891,10 +1032,9 @@ class URLGrabber:
+@@ -887,14 +1028,15 @@ class URLGrabber:
+ except KeyboardInterrupt, e:
+ exception = e
+ callback = opts.interrupt_callback
++ if not callback:
++ raise
+
if DEBUG: DEBUG.info('exception: %s', exception)
if callback:
if DEBUG: DEBUG.info('calling callback: %s', callback)
@@ -570,7 +576,7 @@ index e090e90..c5470b1 100644
if (opts.retry is None) or (tries == opts.retry):
if DEBUG: DEBUG.info('retries exceeded, re-raising')
-@@ -912,9 +1052,11 @@ class URLGrabber:
+@@ -912,9 +1054,11 @@ class URLGrabber:
returned that supports them. The file object can be treated
like any other file object.
"""
@@ -582,7 +588,7 @@ index e090e90..c5470b1 100644
def retryfunc(opts, url):
return PyCurlFileObject(url, filename=None, opts=opts)
return self._retry(opts, retryfunc, url)
-@@ -925,12 +1067,17 @@ class URLGrabber:
+@@ -925,12 +1069,17 @@ class URLGrabber:
urlgrab returns the filename of the local file, which may be
different from the passed-in filename if copy_local == 0.
"""
@@ -600,7 +606,7 @@ index e090e90..c5470b1 100644
if scheme == 'file' and not opts.copy_local:
# just return the name of the local file - don't make a
# copy currently
-@@ -950,30 +1097,36 @@ class URLGrabber:
+@@ -950,30 +1099,36 @@ class URLGrabber:
elif not opts.range:
if not opts.checkfunc is None:
@@ -650,7 +656,7 @@ index e090e90..c5470b1 100644
def urlread(self, url, limit=None, **kwargs):
"""read the url into a string, up to 'limit' bytes
-@@ -982,9 +1135,11 @@ class URLGrabber:
+@@ -982,9 +1137,11 @@ class URLGrabber:
"I want the first N bytes" but rather 'read the whole file
into memory, but don't use too much'
"""
@@ -662,7 +668,7 @@ index e090e90..c5470b1 100644
if limit is not None:
limit = limit + 1
-@@ -1000,12 +1155,8 @@ class URLGrabber:
+@@ -1000,12 +1157,8 @@ class URLGrabber:
else: s = fo.read(limit)
if not opts.checkfunc is None:
@@ -677,7 +683,7 @@ index e090e90..c5470b1 100644
finally:
fo.close()
return s
-@@ -1020,6 +1171,7 @@ class URLGrabber:
+@@ -1020,6 +1173,7 @@ class URLGrabber:
return s
def _make_callback(self, callback_obj):
@@ -685,7 +691,7 @@ index e090e90..c5470b1 100644
if callable(callback_obj):
return callback_obj, (), {}
else:
-@@ -1030,7 +1182,7 @@ class URLGrabber:
+@@ -1030,7 +1184,7 @@ class URLGrabber:
default_grabber = URLGrabber()
@@ -694,7 +700,7 @@ index e090e90..c5470b1 100644
def __init__(self, url, filename, opts):
self.fo = None
self._hdr_dump = ''
-@@ -1052,10 +1204,11 @@ class PyCurlFileObject():
+@@ -1052,10 +1206,11 @@ class PyCurlFileObject():
self._reget_length = 0
self._prog_running = False
self._error = (None, None)
@@ -708,7 +714,7 @@ index e090e90..c5470b1 100644
def __getattr__(self, name):
"""This effectively allows us to wrap at the instance level.
Any attribute not found in _this_ object will be searched for
-@@ -1085,9 +1238,14 @@ class PyCurlFileObject():
+@@ -1085,9 +1240,14 @@ class PyCurlFileObject():
return -1
def _hdr_retrieve(self, buf):
@@ -724,7 +730,7 @@ index e090e90..c5470b1 100644
try:
self._hdr_dump += buf
# we have to get the size before we do the progress obj start
-@@ -1104,7 +1262,17 @@ class PyCurlFileObject():
+@@ -1104,7 +1264,17 @@ class PyCurlFileObject():
s = parse150(buf)
if s:
self.size = int(s)
@@ -743,7 +749,7 @@ index e090e90..c5470b1 100644
return len(buf)
except KeyboardInterrupt:
return pycurl.READFUNC_ABORT
-@@ -1113,8 +1281,10 @@ class PyCurlFileObject():
+@@ -1113,8 +1283,10 @@ class PyCurlFileObject():
if self._parsed_hdr:
return self._parsed_hdr
statusend = self._hdr_dump.find('\n')
@@ -754,7 +760,7 @@ index e090e90..c5470b1 100644
self._parsed_hdr = mimetools.Message(hdrfp)
return self._parsed_hdr
-@@ -1127,6 +1297,9 @@ class PyCurlFileObject():
+@@ -1127,6 +1299,9 @@ class PyCurlFileObject():
if not opts:
opts = self.opts
@@ -764,7 +770,7 @@ index e090e90..c5470b1 100644
# defaults we're always going to set
self.curl_obj.setopt(pycurl.NOPROGRESS, False)
-@@ -1136,11 +1309,21 @@ class PyCurlFileObject():
+@@ -1136,11 +1311,21 @@ class PyCurlFileObject():
self.curl_obj.setopt(pycurl.PROGRESSFUNCTION, self._progress_update)
self.curl_obj.setopt(pycurl.FAILONERROR, True)
self.curl_obj.setopt(pycurl.OPT_FILETIME, True)
@@ -786,7 +792,7 @@ index e090e90..c5470b1 100644
# maybe to be options later
self.curl_obj.setopt(pycurl.FOLLOWLOCATION, True)
-@@ -1148,9 +1331,11 @@ class PyCurlFileObject():
+@@ -1148,9 +1333,11 @@ class PyCurlFileObject():
# timeouts
timeout = 300
@@ -801,7 +807,7 @@ index e090e90..c5470b1 100644
# ssl options
if self.scheme == 'https':
-@@ -1158,13 +1343,16 @@ class PyCurlFileObject():
+@@ -1158,13 +1345,16 @@ class PyCurlFileObject():
self.curl_obj.setopt(pycurl.CAPATH, opts.ssl_ca_cert)
self.curl_obj.setopt(pycurl.CAINFO, opts.ssl_ca_cert)
self.curl_obj.setopt(pycurl.SSL_VERIFYPEER, opts.ssl_verify_peer)
@@ -819,7 +825,7 @@ index e090e90..c5470b1 100644
if opts.ssl_cert_type:
self.curl_obj.setopt(pycurl.SSLCERTTYPE, opts.ssl_cert_type)
if opts.ssl_key_pass:
-@@ -1187,28 +1375,24 @@ class PyCurlFileObject():
+@@ -1187,28 +1377,24 @@ class PyCurlFileObject():
if hasattr(opts, 'raw_throttle') and opts.raw_throttle():
self.curl_obj.setopt(pycurl.MAX_RECV_SPEED_LARGE, int(opts.raw_throttle()))
@@ -862,7 +868,7 @@ index e090e90..c5470b1 100644
# our url
self.curl_obj.setopt(pycurl.URL, self.url)
-@@ -1228,12 +1412,14 @@ class PyCurlFileObject():
+@@ -1228,12 +1414,14 @@ class PyCurlFileObject():
code = self.http_code
errcode = e.args[0]
@@ -879,7 +885,7 @@ index e090e90..c5470b1 100644
# this is probably wrong but ultimately this is what happens
# we have a legit http code and a pycurl 'writer failed' code
-@@ -1244,23 +1430,23 @@ class PyCurlFileObject():
+@@ -1244,23 +1432,23 @@ class PyCurlFileObject():
raise KeyboardInterrupt
elif errcode == 28:
@@ -910,7 +916,7 @@ index e090e90..c5470b1 100644
# this is probably wrong but ultimately this is what happens
# we have a legit http code and a pycurl 'writer failed' code
# which almost always means something aborted it from outside
-@@ -1272,33 +1458,94 @@ class PyCurlFileObject():
+@@ -1272,33 +1460,94 @@ class PyCurlFileObject():
elif errcode == 58:
msg = _("problem with the local client certificate")
err = URLGrabError(14, msg)
@@ -1012,7 +1018,7 @@ index e090e90..c5470b1 100644
def _do_open(self):
self.curl_obj = _curl_cache
-@@ -1333,7 +1580,11 @@ class PyCurlFileObject():
+@@ -1333,7 +1582,11 @@ class PyCurlFileObject():
if self.opts.range:
rt = self.opts.range
@@ -1025,7 +1031,7 @@ index e090e90..c5470b1 100644
if rt:
header = range_tuple_to_header(rt)
-@@ -1434,21 +1685,46 @@ class PyCurlFileObject():
+@@ -1434,21 +1687,46 @@ class PyCurlFileObject():
#fh, self._temp_name = mkstemp()
#self.fo = open(self._temp_name, 'wb')
@@ -1079,7 +1085,7 @@ index e090e90..c5470b1 100644
else:
#self.fo = open(self._temp_name, 'r')
self.fo.seek(0)
-@@ -1526,17 +1802,20 @@ class PyCurlFileObject():
+@@ -1526,17 +1804,20 @@ class PyCurlFileObject():
if self._prog_running:
downloaded += self._reget_length
self.opts.progress_obj.update(downloaded)
@@ -1105,7 +1111,7 @@ index e090e90..c5470b1 100644
msg = _("Downloaded more than max size for %s: %s > %s") \
% (self.url, cur, max_size)
-@@ -1544,13 +1823,6 @@ class PyCurlFileObject():
+@@ -1544,13 +1825,6 @@ class PyCurlFileObject():
return True
return False
@@ -1119,7 +1125,7 @@ index e090e90..c5470b1 100644
def read(self, amt=None):
self._fill_buffer(amt)
if amt is None:
-@@ -1582,9 +1854,21 @@ class PyCurlFileObject():
+@@ -1582,9 +1856,21 @@ class PyCurlFileObject():
self.opts.progress_obj.end(self._amount_read)
self.fo.close()
@@ -1142,7 +1148,7 @@ index e090e90..c5470b1 100644
#####################################################################
# DEPRECATED FUNCTIONS
-@@ -1621,6 +1905,434 @@ def retrygrab(url, filename=None, copy_local=0, close_connection=0,
+@@ -1621,6 +1907,433 @@ def retrygrab(url, filename=None, copy_local=0, close_connection=0,
#####################################################################
@@ -1286,12 +1292,7 @@ index e090e90..c5470b1 100644
+ line = line.split(' ', 5)
+ _id, size = map(int, line[:2])
+ if len(line) == 2:
-+ opts = self.running[_id]
-+ m = opts.progress_obj
-+ if m:
-+ if not m.last_update_time:
-+ m.start(text = opts.text)
-+ m.update(size)
++ self.running[_id].progress_obj.update(size)
+ continue
+ # job done
+ opts = self.running.pop(_id)
@@ -1364,8 +1365,9 @@ index e090e90..c5470b1 100644
+ if meter:
+ count = total = 0
+ for opts in _async_queue:
-+ count += 1
-+ total += opts.size
++ if opts.progress_obj:
++ count += 1
++ total += opts.size
+ if meter == 'text':
+ from progress import TextMultiFileMeter
+ meter = TextMultiFileMeter()
@@ -1378,7 +1380,11 @@ index e090e90..c5470b1 100644
+ key, limit = opts.async
+ host_con[key] = host_con.get(key, 0) + 1
+ opts.tries = tries
-+ opts.progress_obj = meter and meter.newMeter()
++ if meter and opts.progress_obj:
++ opts.progress_obj = meter.newMeter()
++ opts.progress_obj.start(text=opts.text, basename=os.path.basename(opts.filename))
++ else:
++ opts.progress_obj = None
+ if DEBUG: DEBUG.info('attempt %i/%s: %s', opts.tries, opts.retry, opts.url)
+ dl.start(opts)
+
@@ -1386,9 +1392,8 @@ index e090e90..c5470b1 100644
+ for opts, size, ug_err in dl.perform():
+ key, limit = opts.async
+ host_con[key] -= 1
-+ if meter:
-+ m = opts.progress_obj
-+ m.basename = os.path.basename(opts.filename)
++ m = opts.progress_obj
++ if m:
+ if ug_err:
+ m.failure(ug_err.args[1])
+ else:
More information about the scm-commits
mailing list