Trees | Indices | Help |
|
---|
|
1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 import re 4 5 from django.utils.text import compress_string 6 from django.utils.cache import patch_vary_headers 7 8 re_accepts_gzip = re.compile(r'\bgzip\b') 911 """ 12 This middleware compresses content if the browser allows gzip compression. 13 It sets the Vary header accordingly, so that caches will base their storage 14 on the Accept-Encoding header. 15 """5217 # It's not worth compressing non-OK or really short responses. 18 # omeroweb: the tradeoff for less than 8192k of uncompressed text is not worth it most of the times. 19 if response.status_code != 200 or len(response.content) < 8192: 20 return response 21 22 # Avoid gzipping if we've already got a content-encoding. 23 if response.has_header('Content-Encoding'): 24 return response 25 26 # omeroweb: we don't want to compress everything, so doing an opt-in approach 27 ctype = response.get('Content-Type', '').lower() 28 if not "javascript" in ctype and not "text" in ctype: 29 return response 30 31 patch_vary_headers(response, ('Accept-Encoding',)) 32 33 # Avoid gzipping if we've already got a content-encoding. 34 if response.has_header('Content-Encoding'): 35 return response 36 37 # Older versions of IE have issues with gzipped pages containing either 38 # Javascript and PDF. 39 if "msie" in request.META.get('HTTP_USER_AGENT', '').lower(): 40 ctype = response.get('Content-Type', '').lower() 41 if "javascript" in ctype or ctype == "application/pdf": 42 return response 43 44 ae = request.META.get('HTTP_ACCEPT_ENCODING', '') 45 if not re_accepts_gzip.search(ae): 46 return response 47 48 response.content = compress_string(response.content) 49 response['Content-Encoding'] = 'gzip' 50 response['Content-Length'] = str(len(response.content)) 51 return response
Trees | Indices | Help |
|
---|
Generated by Epydoc 3.0.1 on Tue Sep 23 15:04:59 2014 | http://epydoc.sourceforge.net |