Do not assume Content-Length header is always available
The Content-Length header cannot always be available, for example when
the transfer encoding is chunked.
Recently downloading the gzip'd pgo_training-1.ii has been failing
locally due to this.
Bug: 1324691
Change-Id: I37a1ae5c1ce4727b547d4da9544bb4d037d33835
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/3646072
Reviewed-by: Hans Wennborg <hans@chromium.org>
Reviewed-by: Nico Weber <thakis@chromium.org>
Commit-Queue: Arthur Eubanks <aeubanks@google.com>
Cr-Commit-Position: refs/heads/main@{#1003356}
NOKEYCHECK=True
GitOrigin-RevId: 3501c0afd415c16d549bf4ebc5a1c4f5953a1a88
diff --git a/scripts/update.py b/scripts/update.py
index fb0d759..cb2a5a3 100755
--- a/scripts/update.py
+++ b/scripts/update.py
@@ -100,9 +100,12 @@
request = urllib.request.Request(url)
request.add_header('Accept-Encoding', 'gzip')
response = urllib.request.urlopen(request)
- total_size = int(response.info().get('Content-Length').strip())
+ total_size = None
+ if 'Content-Length' in response.headers:
+ total_size = int(response.headers['Content-Length'].strip())
- is_gzipped = response.info().get('Content-Encoding', '').strip() == 'gzip'
+ is_gzipped = response.headers.get('Content-Encoding',
+ '').strip() == 'gzip'
if is_gzipped:
gzip_decode = zlib.decompressobj(zlib.MAX_WBITS + 16)
@@ -118,11 +121,12 @@
chunk = gzip_decode.decompress(chunk)
output_file.write(chunk)
- num_dots = TOTAL_DOTS * bytes_done // total_size
- sys.stdout.write('.' * (num_dots - dots_printed))
- sys.stdout.flush()
- dots_printed = num_dots
- if bytes_done != total_size:
+ if total_size is not None:
+ num_dots = TOTAL_DOTS * bytes_done // total_size
+ sys.stdout.write('.' * (num_dots - dots_printed))
+ sys.stdout.flush()
+ dots_printed = num_dots
+ if total_size is not None and bytes_done != total_size:
raise urllib.error.URLError("only got %d of %d bytes" %
(bytes_done, total_size))
if is_gzipped: