Vendored in h2 and its dependencies
diff --git a/tools/ci/install.sh b/tools/ci/install.sh
index 9474198..d1d4368 100755
--- a/tools/ci/install.sh
+++ b/tools/ci/install.sh
@@ -8,5 +8,4 @@
 if [[ $RUN_JOB -eq 1 ]]; then
     pip install -U setuptools
     pip install -U requests
-    pip install -U h2
 fi
diff --git a/tools/localpaths.py b/tools/localpaths.py
index 7eb487e..6758107 100644
--- a/tools/localpaths.py
+++ b/tools/localpaths.py
@@ -18,5 +18,12 @@
 sys.path.insert(0, os.path.join(here, "third_party", "pytest", "src"))
 sys.path.insert(0, os.path.join(here, "third_party", "six"))
 sys.path.insert(0, os.path.join(here, "third_party", "webencodings"))
+sys.path.insert(0, os.path.join(here, "third_party", "h2"))
+sys.path.insert(0, os.path.join(here, "third_party", "hpack"))
+sys.path.insert(0, os.path.join(here, "third_party", "hyperframe"))
+sys.path.insert(0, os.path.join(here, "third_party", "certifi"))
 sys.path.insert(0, os.path.join(here, "webdriver"))
 sys.path.insert(0, os.path.join(here, "wptrunner"))
+
+if sys.version_info[0] == 2:
+    sys.path.insert(0, os.path.join(here, "third_party", "enum"))
diff --git a/tools/third_party/certifi/LICENSE b/tools/third_party/certifi/LICENSE
new file mode 100644
index 0000000..802b53f
--- /dev/null
+++ b/tools/third_party/certifi/LICENSE
@@ -0,0 +1,21 @@
+This packge contains a modified version of ca-bundle.crt:
+
+ca-bundle.crt -- Bundle of CA Root Certificates
+
+Certificate data from Mozilla as of: Thu Nov  3 19:04:19 2011#
+This is a bundle of X.509 certificates of public Certificate Authorities
+(CA). These were automatically extracted from Mozilla's root certificates
+file (certdata.txt).  This file can be found in the mozilla source tree:
+http://mxr.mozilla.org/mozilla/source/security/nss/lib/ckfw/builtins/certdata.txt?raw=1#
+It contains the certificates in PEM format and therefore
+can be directly used with curl / libcurl / php_curl, or with
+an Apache+mod_ssl webserver for SSL client authentication.
+Just configure this file as the SSLCACertificateFile.#
+
+***** BEGIN LICENSE BLOCK *****
+This Source Code Form is subject to the terms of the Mozilla Public License,
+v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain
+one at http://mozilla.org/MPL/2.0/.
+
+***** END LICENSE BLOCK *****
+@(#) $RCSfile: certdata.txt,v $ $Revision: 1.80 $ $Date: 2011/11/03 15:11:58 $
diff --git a/tools/third_party/certifi/MANIFEST.in b/tools/third_party/certifi/MANIFEST.in
new file mode 100644
index 0000000..6077b5f
--- /dev/null
+++ b/tools/third_party/certifi/MANIFEST.in
@@ -0,0 +1 @@
+include MANIFEST.in README.rst LICENSE certifi/cacert.pem
diff --git a/tools/third_party/certifi/PKG-INFO b/tools/third_party/certifi/PKG-INFO
new file mode 100644
index 0000000..73f3643
--- /dev/null
+++ b/tools/third_party/certifi/PKG-INFO
@@ -0,0 +1,69 @@
+Metadata-Version: 1.1
+Name: certifi
+Version: 2018.4.16
+Summary: Python package for providing Mozilla's CA Bundle.
+Home-page: http://certifi.io/
+Author: Kenneth Reitz
+Author-email: me@kennethreitz.com
+License: MPL-2.0
+Description: Certifi: Python SSL Certificates
+        ================================
+        
+        `Certifi`_ is a carefully curated collection of Root Certificates for
+        validating the trustworthiness of SSL certificates while verifying the identity
+        of TLS hosts. It has been extracted from the `Requests`_ project.
+        
+        Installation
+        ------------
+        
+        ``certifi`` is available on PyPI. Simply install it with ``pip``::
+        
+            $ pip install certifi
+        
+        Usage
+        -----
+        
+        To reference the installed certificate authority (CA) bundle, you can use the
+        built-in function::
+        
+            >>> import certifi
+        
+            >>> certifi.where()
+            '/usr/local/lib/python2.7/site-packages/certifi/cacert.pem'
+        
+        Enjoy!
+        
+        1024-bit Root Certificates
+        ~~~~~~~~~~~~~~~~~~~~~~~~~~
+        
+        Browsers and certificate authorities have concluded that 1024-bit keys are
+        unacceptably weak for certificates, particularly root certificates. For this
+        reason, Mozilla has removed any weak (i.e. 1024-bit key) certificate from its
+        bundle, replacing it with an equivalent strong (i.e. 2048-bit or greater key)
+        certificate from the same CA. Because Mozilla removed these certificates from
+        its bundle, ``certifi`` removed them as well.
+        
+        In previous versions, ``certifi`` provided the ``certifi.old_where()`` function
+        to intentionally re-add the 1024-bit roots back into your bundle. This was not
+        recommended in production and therefore was removed. To assist in migrating old
+        code, the function ``certifi.old_where()`` continues to exist as an alias of
+        ``certifi.where()``. Please update your code to use ``certifi.where()``
+        instead. ``certifi.old_where()`` will be removed in 2018.
+        
+        .. _`Certifi`: http://certifi.io/en/latest/
+        .. _`Requests`: http://docs.python-requests.org/en/latest/
+        
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)
+Classifier: Natural Language :: English
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
diff --git a/tools/third_party/certifi/README.rst b/tools/third_party/certifi/README.rst
new file mode 100644
index 0000000..64b3e38
--- /dev/null
+++ b/tools/third_party/certifi/README.rst
@@ -0,0 +1,46 @@
+Certifi: Python SSL Certificates
+================================
+
+`Certifi`_ is a carefully curated collection of Root Certificates for
+validating the trustworthiness of SSL certificates while verifying the identity
+of TLS hosts. It has been extracted from the `Requests`_ project.
+
+Installation
+------------
+
+``certifi`` is available on PyPI. Simply install it with ``pip``::
+
+    $ pip install certifi
+
+Usage
+-----
+
+To reference the installed certificate authority (CA) bundle, you can use the
+built-in function::
+
+    >>> import certifi
+
+    >>> certifi.where()
+    '/usr/local/lib/python2.7/site-packages/certifi/cacert.pem'
+
+Enjoy!
+
+1024-bit Root Certificates
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Browsers and certificate authorities have concluded that 1024-bit keys are
+unacceptably weak for certificates, particularly root certificates. For this
+reason, Mozilla has removed any weak (i.e. 1024-bit key) certificate from its
+bundle, replacing it with an equivalent strong (i.e. 2048-bit or greater key)
+certificate from the same CA. Because Mozilla removed these certificates from
+its bundle, ``certifi`` removed them as well.
+
+In previous versions, ``certifi`` provided the ``certifi.old_where()`` function
+to intentionally re-add the 1024-bit roots back into your bundle. This was not
+recommended in production and therefore was removed. To assist in migrating old
+code, the function ``certifi.old_where()`` continues to exist as an alias of
+``certifi.where()``. Please update your code to use ``certifi.where()``
+instead. ``certifi.old_where()`` will be removed in 2018.
+
+.. _`Certifi`: http://certifi.io/en/latest/
+.. _`Requests`: http://docs.python-requests.org/en/latest/
diff --git a/tools/third_party/certifi/certifi.egg-info/PKG-INFO b/tools/third_party/certifi/certifi.egg-info/PKG-INFO
new file mode 100644
index 0000000..73f3643
--- /dev/null
+++ b/tools/third_party/certifi/certifi.egg-info/PKG-INFO
@@ -0,0 +1,69 @@
+Metadata-Version: 1.1
+Name: certifi
+Version: 2018.4.16
+Summary: Python package for providing Mozilla's CA Bundle.
+Home-page: http://certifi.io/
+Author: Kenneth Reitz
+Author-email: me@kennethreitz.com
+License: MPL-2.0
+Description: Certifi: Python SSL Certificates
+        ================================
+        
+        `Certifi`_ is a carefully curated collection of Root Certificates for
+        validating the trustworthiness of SSL certificates while verifying the identity
+        of TLS hosts. It has been extracted from the `Requests`_ project.
+        
+        Installation
+        ------------
+        
+        ``certifi`` is available on PyPI. Simply install it with ``pip``::
+        
+            $ pip install certifi
+        
+        Usage
+        -----
+        
+        To reference the installed certificate authority (CA) bundle, you can use the
+        built-in function::
+        
+            >>> import certifi
+        
+            >>> certifi.where()
+            '/usr/local/lib/python2.7/site-packages/certifi/cacert.pem'
+        
+        Enjoy!
+        
+        1024-bit Root Certificates
+        ~~~~~~~~~~~~~~~~~~~~~~~~~~
+        
+        Browsers and certificate authorities have concluded that 1024-bit keys are
+        unacceptably weak for certificates, particularly root certificates. For this
+        reason, Mozilla has removed any weak (i.e. 1024-bit key) certificate from its
+        bundle, replacing it with an equivalent strong (i.e. 2048-bit or greater key)
+        certificate from the same CA. Because Mozilla removed these certificates from
+        its bundle, ``certifi`` removed them as well.
+        
+        In previous versions, ``certifi`` provided the ``certifi.old_where()`` function
+        to intentionally re-add the 1024-bit roots back into your bundle. This was not
+        recommended in production and therefore was removed. To assist in migrating old
+        code, the function ``certifi.old_where()`` continues to exist as an alias of
+        ``certifi.where()``. Please update your code to use ``certifi.where()``
+        instead. ``certifi.old_where()`` will be removed in 2018.
+        
+        .. _`Certifi`: http://certifi.io/en/latest/
+        .. _`Requests`: http://docs.python-requests.org/en/latest/
+        
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)
+Classifier: Natural Language :: English
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
diff --git a/tools/third_party/certifi/certifi.egg-info/SOURCES.txt b/tools/third_party/certifi/certifi.egg-info/SOURCES.txt
new file mode 100644
index 0000000..04773ea
--- /dev/null
+++ b/tools/third_party/certifi/certifi.egg-info/SOURCES.txt
@@ -0,0 +1,14 @@
+LICENSE
+MANIFEST.in
+README.rst
+setup.cfg
+setup.py
+certifi/__init__.py
+certifi/__main__.py
+certifi/cacert.pem
+certifi/core.py
+certifi.egg-info/PKG-INFO
+certifi.egg-info/SOURCES.txt
+certifi.egg-info/dependency_links.txt
+certifi.egg-info/not-zip-safe
+certifi.egg-info/top_level.txt
\ No newline at end of file
diff --git a/tools/third_party/certifi/certifi.egg-info/dependency_links.txt b/tools/third_party/certifi/certifi.egg-info/dependency_links.txt
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/tools/third_party/certifi/certifi.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/tools/third_party/certifi/certifi.egg-info/not-zip-safe b/tools/third_party/certifi/certifi.egg-info/not-zip-safe
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/tools/third_party/certifi/certifi.egg-info/not-zip-safe
@@ -0,0 +1 @@
+
diff --git a/tools/third_party/certifi/certifi.egg-info/top_level.txt b/tools/third_party/certifi/certifi.egg-info/top_level.txt
new file mode 100644
index 0000000..963eac5
--- /dev/null
+++ b/tools/third_party/certifi/certifi.egg-info/top_level.txt
@@ -0,0 +1 @@
+certifi
diff --git a/tools/third_party/certifi/certifi/__init__.py b/tools/third_party/certifi/certifi/__init__.py
new file mode 100644
index 0000000..0c4963e
--- /dev/null
+++ b/tools/third_party/certifi/certifi/__init__.py
@@ -0,0 +1,3 @@
+from .core import where, old_where
+
+__version__ = "2018.04.16"
diff --git a/tools/third_party/certifi/certifi/__main__.py b/tools/third_party/certifi/certifi/__main__.py
new file mode 100644
index 0000000..5f1da0d
--- /dev/null
+++ b/tools/third_party/certifi/certifi/__main__.py
@@ -0,0 +1,2 @@
+from certifi import where
+print(where())
diff --git a/tools/third_party/certifi/certifi/cacert.pem b/tools/third_party/certifi/certifi/cacert.pem
new file mode 100644
index 0000000..2713f54
--- /dev/null
+++ b/tools/third_party/certifi/certifi/cacert.pem
@@ -0,0 +1,4400 @@
+
+# Issuer: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA
+# Subject: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA
+# Label: "GlobalSign Root CA"
+# Serial: 4835703278459707669005204
+# MD5 Fingerprint: 3e:45:52:15:09:51:92:e1:b7:5d:37:9f:b1:87:29:8a
+# SHA1 Fingerprint: b1:bc:96:8b:d4:f4:9d:62:2a:a8:9a:81:f2:15:01:52:a4:1d:82:9c
+# SHA256 Fingerprint: eb:d4:10:40:e4:bb:3e:c7:42:c9:e3:81:d3:1e:f2:a4:1a:48:b6:68:5c:96:e7:ce:f3:c1:df:6c:d4:33:1c:99
+-----BEGIN CERTIFICATE-----
+MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkG
+A1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jv
+b3QgQ0ExGzAZBgNVBAMTEkdsb2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAw
+MDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9i
+YWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJHbG9iYWxT
+aWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDaDuaZ
+jc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavp
+xy0Sy6scTHAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp
+1Wrjsok6Vjk4bwY8iGlbKk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdG
+snUOhugZitVtbNV4FpWi6cgKOOvyJBNPc1STE4U6G7weNLWLBYy5d4ux2x8gkasJ
+U26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrXgzT/LCrBbBlDSgeF59N8
+9iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E
+BTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0B
+AQUFAAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOz
+yj1hTdNGCbM+w6DjY1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE
+38NflNUVyRRBnMRddWQVDf9VMOyGj/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymP
+AbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhHhm4qxFYxldBniYUr+WymXUad
+DKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveCX4XSQRjbgbME
+HMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A==
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2
+# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2
+# Label: "GlobalSign Root CA - R2"
+# Serial: 4835703278459682885658125
+# MD5 Fingerprint: 94:14:77:7e:3e:5e:fd:8f:30:bd:41:b0:cf:e7:d0:30
+# SHA1 Fingerprint: 75:e0:ab:b6:13:85:12:27:1c:04:f8:5f:dd:de:38:e4:b7:24:2e:fe
+# SHA256 Fingerprint: ca:42:dd:41:74:5f:d0:b8:1e:b9:02:36:2c:f9:d8:bf:71:9d:a1:bd:1b:1e:fc:94:6f:5b:4c:99:f4:2c:1b:9e
+-----BEGIN CERTIFICATE-----
+MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4G
+A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNp
+Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1
+MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMjETMBEG
+A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6ErPL
+v4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8
+eoLrvozps6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklq
+tTleiDTsvHgMCJiEbKjNS7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzd
+C9XZzPnqJworc5HGnRusyMvo4KD0L5CLTfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pa
+zq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6CygPCm48CAwEAAaOBnDCB
+mTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUm+IH
+V2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5n
+bG9iYWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG
+3lm0mi3f3BmGLjANBgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4Gs
+J0/WwbgcQ3izDJr86iw8bmEbTUsp9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO
+291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu01yiPqFbQfXf5WRDLenVOavS
+ot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG79G+dwfCMNYxd
+AfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7
+TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only
+# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only
+# Label: "Verisign Class 3 Public Primary Certification Authority - G3"
+# Serial: 206684696279472310254277870180966723415
+# MD5 Fingerprint: cd:68:b6:a7:c7:c4:ce:75:e0:1d:4f:57:44:61:92:09
+# SHA1 Fingerprint: 13:2d:0d:45:53:4b:69:97:cd:b2:d5:c3:39:e2:55:76:60:9b:5c:c6
+# SHA256 Fingerprint: eb:04:cf:5e:b1:f3:9a:fa:76:2f:2b:b1:20:f2:96:cb:a5:20:c1:b9:7d:b1:58:95:65:b8:1c:b9:a1:7b:72:44
+-----BEGIN CERTIFICATE-----
+MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQsw
+CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl
+cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu
+LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT
+aWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp
+dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD
+VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT
+aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ
+bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu
+IENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg
+LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMu6nFL8eB8aHm8b
+N3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1EUGO+i2t
+KmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGu
+kxUccLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBm
+CC+Vk7+qRy+oRpfwEuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJ
+Xwzw3sJ2zq/3avL6QaaiMxTJ5Xpj055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWu
+imi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAERSWwauSCPc/L8my/uRan2Te
+2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5fj267Cz3qWhMe
+DGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC
+/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565p
+F4ErWjfJXir0xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGt
+TxzhT5yvDwyd93gN2PQ1VoDat20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited
+# Subject: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited
+# Label: "Entrust.net Premium 2048 Secure Server CA"
+# Serial: 946069240
+# MD5 Fingerprint: ee:29:31:bc:32:7e:9a:e6:e8:b5:f7:51:b4:34:71:90
+# SHA1 Fingerprint: 50:30:06:09:1d:97:d4:f5:ae:39:f7:cb:e7:92:7d:7d:65:2d:34:31
+# SHA256 Fingerprint: 6d:c4:71:72:e0:1c:bc:b0:bf:62:58:0d:89:5f:e2:b8:ac:9a:d4:f8:73:80:1e:0c:10:b9:c8:37:d2:1e:b1:77
+-----BEGIN CERTIFICATE-----
+MIIEKjCCAxKgAwIBAgIEOGPe+DANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChML
+RW50cnVzdC5uZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBp
+bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5
+IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNVBAMTKkVudHJ1c3QubmV0IENlcnRp
+ZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQxNzUwNTFaFw0yOTA3
+MjQxNDE1MTJaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3d3d3
+LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxp
+YWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEG
+A1UEAxMqRW50cnVzdC5uZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgp
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArU1LqRKGsuqjIAcVFmQq
+K0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOLGp18EzoOH1u3Hs/lJBQe
+sYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSrhRSGlVuX
+MlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVT
+XTzWnLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/
+HoZdenoVve8AjhUiVBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH
+4QIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV
+HQ4EFgQUVeSB0RGAvtiJuQijMfmhJAkWuXAwDQYJKoZIhvcNAQEFBQADggEBADub
+j1abMOdTmXx6eadNl9cZlZD7Bh/KM3xGY4+WZiT6QBshJ8rmcnPyT/4xmf3IDExo
+U8aAghOY+rat2l098c5u9hURlIIM7j+VrxGrD9cv3h8Dj1csHsm7mhpElesYT6Yf
+zX1XEC+bBAlahLVu2B064dae0Wx5XnkcFMXj0EyTO2U87d89vqbllRrDtRnDvV5b
+u/8j72gZyxKTJ1wDLW8w0B62GqzeWvfRqqgnpv55gcR5mTNXuhKwqeBCbJPKVt7+
+bYQLCIt+jerXmCHG8+c8eS9enNFMFY3h7CI3zJpDC5fcgJCNs2ebb0gIFVbPv/Er
+fF6adulZkMV8gzURZVE=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust
+# Subject: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust
+# Label: "Baltimore CyberTrust Root"
+# Serial: 33554617
+# MD5 Fingerprint: ac:b6:94:a5:9c:17:e0:d7:91:52:9b:b1:97:06:a6:e4
+# SHA1 Fingerprint: d4:de:20:d0:5e:66:fc:53:fe:1a:50:88:2c:78:db:28:52:ca:e4:74
+# SHA256 Fingerprint: 16:af:57:a9:f6:76:b0:ab:12:60:95:aa:5e:ba:de:f2:2a:b3:11:19:d6:44:ac:95:cd:4b:93:db:f3:f2:6a:eb
+-----BEGIN CERTIFICATE-----
+MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJ
+RTESMBAGA1UEChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYD
+VQQDExlCYWx0aW1vcmUgQ3liZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoX
+DTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMCSUUxEjAQBgNVBAoTCUJhbHRpbW9y
+ZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFsdGltb3JlIEN5YmVy
+VHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKMEuyKr
+mD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjr
+IZ3AQSsBUnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeK
+mpYcqWe4PwzV9/lSEy/CG9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSu
+XmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9XbIGevOF6uvUA65ehD5f/xXtabz5OTZy
+dc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjprl3RjM71oGDHweI12v/ye
+jl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoIVDaGezq1
+BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3
+DQEBBQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT92
+9hkTI7gQCvlYpNRhcL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3Wgx
+jkzSswF07r51XgdIGn9w/xZchMB5hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0
+Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsaY71k5h+3zvDyny67G7fyUIhz
+ksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9HRCwBXbsdtTLS
+R9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp
+-----END CERTIFICATE-----
+
+# Issuer: CN=AddTrust External CA Root O=AddTrust AB OU=AddTrust External TTP Network
+# Subject: CN=AddTrust External CA Root O=AddTrust AB OU=AddTrust External TTP Network
+# Label: "AddTrust External Root"
+# Serial: 1
+# MD5 Fingerprint: 1d:35:54:04:85:78:b0:3f:42:42:4d:bf:20:73:0a:3f
+# SHA1 Fingerprint: 02:fa:f3:e2:91:43:54:68:60:78:57:69:4d:f5:e4:5b:68:85:18:68
+# SHA256 Fingerprint: 68:7f:a4:51:38:22:78:ff:f0:c8:b1:1f:8d:43:d5:76:67:1c:6e:b2:bc:ea:b4:13:fb:83:d9:65:d0:6d:2f:f2
+-----BEGIN CERTIFICATE-----
+MIIENjCCAx6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBvMQswCQYDVQQGEwJTRTEU
+MBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0IEV4dGVybmFs
+IFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBFeHRlcm5hbCBDQSBSb290
+MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEwNDgzOFowbzELMAkGA1UEBhMCU0Ux
+FDASBgNVBAoTC0FkZFRydXN0IEFCMSYwJAYDVQQLEx1BZGRUcnVzdCBFeHRlcm5h
+bCBUVFAgTmV0d29yazEiMCAGA1UEAxMZQWRkVHJ1c3QgRXh0ZXJuYWwgQ0EgUm9v
+dDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf3GjPm8gAELTngTlvt
+H7xsD821+iO2zt6bETOXpClMfZOfvUq8k+0DGuOPz+VtUFrWlymUWoCwSXrbLpX9
+uMq/NzgtHj6RQa1wVsfwTz/oMp50ysiQVOnGXw94nZpAPA6sYapeFI+eh6FqUNzX
+mk6vBbOmcZSccbNQYArHE504B4YCqOmoaSYYkKtMsE8jqzpPhNjfzp/haW+710LX
+a0Tkx63ubUFfclpxCDezeWWkWaCUN/cALw3CknLa0Dhy2xSoRcRdKn23tNbE7qzN
+E0S3ySvdQwAl+mG5aWpYIxG3pzOPVnVZ9c0p10a3CitlttNCbxWyuHv77+ldU9U0
+WicCAwEAAaOB3DCB2TAdBgNVHQ4EFgQUrb2YejS0Jvf6xCZU7wO94CTLVBowCwYD
+VR0PBAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIwSBkTCBjoAUrb2YejS0
+Jvf6xCZU7wO94CTLVBqhc6RxMG8xCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRU
+cnVzdCBBQjEmMCQGA1UECxMdQWRkVHJ1c3QgRXh0ZXJuYWwgVFRQIE5ldHdvcmsx
+IjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENBIFJvb3SCAQEwDQYJKoZIhvcN
+AQEFBQADggEBALCb4IUlwtYj4g+WBpKdQZic2YR5gdkeWxQHIzZlj7DYd7usQWxH
+YINRsPkyPef89iYTx4AWpb9a/IfPeHmJIZriTAcKhjW88t5RxNKWt9x+Tu5w/Rw5
+6wwCURQtjr0W4MHfRnXnJK3s9EK0hZNwEGe6nQY1ShjTK3rMUUKhemPR5ruhxSvC
+Nr4TDea9Y355e6cJDUCrat2PisP29owaQgVR1EX1n6diIWgVIEM8med8vSTYqZEX
+c4g/VhsxOBi0cQ+azcgOno4uG+GMmIPLHzHxREzGBHNJdmAPx/i9F4BrLunMTA5a
+mnkPIAou1Z5jJh5VkpTYghdae9C8x49OhgQ=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc.
+# Subject: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc.
+# Label: "Entrust Root Certification Authority"
+# Serial: 1164660820
+# MD5 Fingerprint: d6:a5:c3:ed:5d:dd:3e:00:c1:3d:87:92:1f:1d:3f:e4
+# SHA1 Fingerprint: b3:1e:b1:b7:40:e3:6c:84:02:da:dc:37:d4:4d:f5:d4:67:49:52:f9
+# SHA256 Fingerprint: 73:c1:76:43:4f:1b:c6:d5:ad:f4:5b:0e:76:e7:27:28:7c:8d:e5:76:16:c1:e6:e6:14:1a:2b:2c:bc:7d:8e:4c
+-----BEGIN CERTIFICATE-----
+MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMC
+VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0
+Lm5ldC9DUFMgaXMgaW5jb3Jwb3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMW
+KGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsGA1UEAxMkRW50cnVzdCBSb290IENl
+cnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0MloXDTI2MTEyNzIw
+NTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMTkw
+NwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSBy
+ZWZlcmVuY2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNV
+BAMTJEVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJ
+KoZIhvcNAQEBBQADggEPADCCAQoCggEBALaVtkNC+sZtKm9I35RMOVcF7sN5EUFo
+Nu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYszA9u3g3s+IIRe7bJWKKf4
+4LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOwwCj0Yzfv9
+KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGI
+rb68j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi
+94DkZfs0Nw4pgHBNrziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOB
+sDCBrTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAi
+gA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1MzQyWjAfBgNVHSMEGDAWgBRo
+kORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DHhmak8fdLQ/uE
+vW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA
+A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9t
+O1KzKtvn1ISMY/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6Zua
+AGAT/3B+XxFNSRuzFVJ7yVTav52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP
+9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTSW3iDVuycNsMm4hH2Z0kdkquM++v/
+eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0tHuu2guQOHXvgR1m
+0vdXcDazv/wor3ElhVsT/h5/WrQ8
+-----END CERTIFICATE-----
+
+# Issuer: CN=GeoTrust Global CA O=GeoTrust Inc.
+# Subject: CN=GeoTrust Global CA O=GeoTrust Inc.
+# Label: "GeoTrust Global CA"
+# Serial: 144470
+# MD5 Fingerprint: f7:75:ab:29:fb:51:4e:b7:77:5e:ff:05:3c:99:8e:f5
+# SHA1 Fingerprint: de:28:f4:a4:ff:e5:b9:2f:a3:c5:03:d1:a3:49:a7:f9:96:2a:82:12
+# SHA256 Fingerprint: ff:85:6a:2d:25:1d:cd:88:d3:66:56:f4:50:12:67:98:cf:ab:aa:de:40:79:9c:72:2d:e4:d2:b5:db:36:a7:3a
+-----BEGIN CERTIFICATE-----
+MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVT
+MRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9i
+YWwgQ0EwHhcNMDIwNTIxMDQwMDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQG
+EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEbMBkGA1UEAxMSR2VvVHJ1c3Qg
+R2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2swYYzD9
+9BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjoBbdq
+fnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDv
+iS2Aelet8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU
+1XupGc1V3sjs0l44U+VcT4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+
+bw8HHa8sHo9gOeL6NlMTOdReJivbPagUvTLrGAMoUgRx5aszPeE4uwc2hGKceeoW
+MPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTA
+ephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVkDBF9qn1l
+uMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKIn
+Z57QzxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfS
+tQWVYrmm3ok9Nns4d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcF
+PseKUgzbFbS9bZvlxrFUaKnjaZC2mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Un
+hw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6pXE0zX5IJL4hmXXeXxx12E6nV
+5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvmMw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=GeoTrust Universal CA O=GeoTrust Inc.
+# Subject: CN=GeoTrust Universal CA O=GeoTrust Inc.
+# Label: "GeoTrust Universal CA"
+# Serial: 1
+# MD5 Fingerprint: 92:65:58:8b:a2:1a:31:72:73:68:5c:b4:a5:7a:07:48
+# SHA1 Fingerprint: e6:21:f3:35:43:79:05:9a:4b:68:30:9d:8a:2f:74:22:15:87:ec:79
+# SHA256 Fingerprint: a0:45:9b:9f:63:b2:25:59:f5:fa:5d:4c:6d:b3:f9:f7:2f:f1:93:42:03:35:78:f0:73:bf:1d:1b:46:cb:b9:12
+-----BEGIN CERTIFICATE-----
+MIIFaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJVUzEW
+MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEeMBwGA1UEAxMVR2VvVHJ1c3QgVW5pdmVy
+c2FsIENBMB4XDTA0MDMwNDA1MDAwMFoXDTI5MDMwNDA1MDAwMFowRTELMAkGA1UE
+BhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xHjAcBgNVBAMTFUdlb1RydXN0
+IFVuaXZlcnNhbCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKYV
+VaCjxuAfjJ0hUNfBvitbtaSeodlyWL0AG0y/YckUHUWCq8YdgNY96xCcOq9tJPi8
+cQGeBvV8Xx7BDlXKg5pZMK4ZyzBIle0iN430SppyZj6tlcDgFgDgEB8rMQ7XlFTT
+QjOgNB0eRXbdT8oYN+yFFXoZCPzVx5zw8qkuEKmS5j1YPakWaDwvdSEYfyh3peFh
+F7em6fgemdtzbvQKoiFs7tqqhZJmr/Z6a4LauiIINQ/PQvE1+mrufislzDoR5G2v
+c7J2Ha3QsnhnGqQ5HFELZ1aD/ThdDc7d8Lsrlh/eezJS/R27tQahsiFepdaVaH/w
+mZ7cRQg+59IJDTWU3YBOU5fXtQlEIGQWFwMCTFMNaN7VqnJNk22CDtucvc+081xd
+VHppCZbW2xHBjXWotM85yM48vCR85mLK4b19p71XZQvk/iXttmkQ3CgaRr0BHdCX
+teGYO8A3ZNY9lO4L4fUorgtWv3GLIylBjobFS1J72HGrH4oVpjuDWtdYAVHGTEHZ
+f9hBZ3KiKN9gg6meyHv8U3NyWfWTehd2Ds735VzZC1U0oqpbtWpU5xPKV+yXbfRe
+Bi9Fi1jUIxaS5BZuKGNZMN9QAZxjiRqf2xeUgnA3wySemkfWWspOqGmJch+RbNt+
+nhutxx9z3SxPGWX9f5NAEC7S8O08ni4oPmkmM8V7AgMBAAGjYzBhMA8GA1UdEwEB
+/wQFMAMBAf8wHQYDVR0OBBYEFNq7LqqwDLiIJlF0XG0D08DYj3rWMB8GA1UdIwQY
+MBaAFNq7LqqwDLiIJlF0XG0D08DYj3rWMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG
+9w0BAQUFAAOCAgEAMXjmx7XfuJRAyXHEqDXsRh3ChfMoWIawC/yOsjmPRFWrZIRc
+aanQmjg8+uUfNeVE44B5lGiku8SfPeE0zTBGi1QrlaXv9z+ZhP015s8xxtxqv6fX
+IwjhmF7DWgh2qaavdy+3YL1ERmrvl/9zlcGO6JP7/TG37FcREUWbMPEaiDnBTzyn
+ANXH/KttgCJwpQzgXQQpAvvLoJHRfNbDflDVnVi+QTjruXU8FdmbyUqDWcDaU/0z
+uzYYm4UPFd3uLax2k7nZAY1IEKj79TiG8dsKxr2EoyNB3tZ3b4XUhRxQ4K5RirqN
+Pnbiucon8l+f725ZDQbYKxek0nxru18UGkiPGkzns0ccjkxFKyDuSN/n3QmOGKja
+QI2SJhFTYXNd673nxE0pN2HrrDktZy4W1vUAg4WhzH92xH3kt0tm7wNFYGm2DFKW
+koRepqO1pD4r2czYG0eq8kTaT/kD6PAUyz/zg97QwVTjt+gKN02LIFkDMBmhLMi9
+ER/frslKxfMnZmaGrGiR/9nmUxwPi1xpZQomyB40w11Re9epnAahNt3ViZS82eQt
+DF4JbAiXfKM9fJP/P6EUp8+1Xevb2xzEdt+Iub1FBZUbrvxGakyvSOPOrg/Sfuvm
+bJxPgWp6ZKy7PtXny3YuxadIwVyQD8vIP/rmMuGNG2+k5o7Y+SlIis5z/iw=
+-----END CERTIFICATE-----
+
+# Issuer: CN=GeoTrust Universal CA 2 O=GeoTrust Inc.
+# Subject: CN=GeoTrust Universal CA 2 O=GeoTrust Inc.
+# Label: "GeoTrust Universal CA 2"
+# Serial: 1
+# MD5 Fingerprint: 34:fc:b8:d0:36:db:9e:14:b3:c2:f2:db:8f:e4:94:c7
+# SHA1 Fingerprint: 37:9a:19:7b:41:85:45:35:0c:a6:03:69:f3:3c:2e:af:47:4f:20:79
+# SHA256 Fingerprint: a0:23:4f:3b:c8:52:7c:a5:62:8e:ec:81:ad:5d:69:89:5d:a5:68:0d:c9:1d:1c:b8:47:7f:33:f8:78:b9:5b:0b
+-----BEGIN CERTIFICATE-----
+MIIFbDCCA1SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBHMQswCQYDVQQGEwJVUzEW
+MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVy
+c2FsIENBIDIwHhcNMDQwMzA0MDUwMDAwWhcNMjkwMzA0MDUwMDAwWjBHMQswCQYD
+VQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1
+c3QgVW5pdmVyc2FsIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC
+AQCzVFLByT7y2dyxUxpZKeexw0Uo5dfR7cXFS6GqdHtXr0om/Nj1XqduGdt0DE81
+WzILAePb63p3NeqqWuDW6KFXlPCQo3RWlEQwAx5cTiuFJnSCegx2oG9NzkEtoBUG
+FF+3Qs17j1hhNNwqCPkuwwGmIkQcTAeC5lvO0Ep8BNMZcyfwqph/Lq9O64ceJHdq
+XbboW0W63MOhBW9Wjo8QJqVJwy7XQYci4E+GymC16qFjwAGXEHm9ADwSbSsVsaxL
+se4YuU6W3Nx2/zu+z18DwPw76L5GG//aQMJS9/7jOvdqdzXQ2o3rXhhqMcceujwb
+KNZrVMaqW9eiLBsZzKIC9ptZvTdrhrVtgrrY6slWvKk2WP0+GfPtDCapkzj4T8Fd
+IgbQl+rhrcZV4IErKIM6+vR7IVEAvlI4zs1meaj0gVbi0IMJR1FbUGrP20gaXT73
+y/Zl92zxlfgCOzJWgjl6W70viRu/obTo/3+NjN8D8WBOWBFM66M/ECuDmgFz2ZRt
+hAAnZqzwcEAJQpKtT5MNYQlRJNiS1QuUYbKHsu3/mjX/hVTK7URDrBs8FmtISgoc
+QIgfksILAAX/8sgCSqSqqcyZlpwvWOB94b67B9xfBHJcMTTD7F8t4D1kkCLm0ey4
+Lt1ZrtmhN79UNdxzMk+MBB4zsslG8dhcyFVQyWi9qLo2CQIDAQABo2MwYTAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAfBgNV
+HSMEGDAWgBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAOBgNVHQ8BAf8EBAMCAYYwDQYJ
+KoZIhvcNAQEFBQADggIBAGbBxiPz2eAubl/oz66wsCVNK/g7WJtAJDday6sWSf+z
+dXkzoS9tcBc0kf5nfo/sm+VegqlVHy/c1FEHEv6sFj4sNcZj/NwQ6w2jqtB8zNHQ
+L1EuxBRa3ugZ4T7GzKQp5y6EqgYweHZUcyiYWTjgAA1i00J9IZ+uPTqM1fp3DRgr
+Fg5fNuH8KrUwJM/gYwx7WBr+mbpCErGR9Hxo4sjoryzqyX6uuyo9DRXcNJW2GHSo
+ag/HtPQTxORb7QrSpJdMKu0vbBKJPfEncKpqA1Ihn0CoZ1Dy81of398j9tx4TuaY
+T1U6U+Pv8vSfx3zYWK8pIpe44L2RLrB27FcRz+8pRPPphXpgY+RdM4kX2TGq2tbz
+GDVyz4crL2MjhF2EjD9XoIj8mZEoJmmZ1I+XRL6O1UixpCgp8RW04eWe3fiPpm8m
+1wk8OhwRDqZsN/etRIcsKMfYdIKz0G9KV7s1KSegi+ghp4dkNl3M2Basx7InQJJV
+OCiNUW7dFGdTbHFcJoRNdVq2fmBWqU2t+5sel/MN2dKXVHfaPRK34B7vCAas+YWH
+6aLcr34YEoP9VhdBLtUpgn2Z9DH2canPLAEnpQW5qrJITirvn5NSUZU8UnOOVkwX
+QMAJKOSLakhT2+zNVVXxxvjpoixMptEmX36vWkzaH6byHCx+rgIW0lbQL1dTR+iS
+-----END CERTIFICATE-----
+
+# Issuer: CN=Visa eCommerce Root O=VISA OU=Visa International Service Association
+# Subject: CN=Visa eCommerce Root O=VISA OU=Visa International Service Association
+# Label: "Visa eCommerce Root"
+# Serial: 25952180776285836048024890241505565794
+# MD5 Fingerprint: fc:11:b8:d8:08:93:30:00:6d:23:f9:7e:eb:52:1e:02
+# SHA1 Fingerprint: 70:17:9b:86:8c:00:a4:fa:60:91:52:22:3f:9f:3e:32:bd:e0:05:62
+# SHA256 Fingerprint: 69:fa:c9:bd:55:fb:0a:c7:8d:53:bb:ee:5c:f1:d5:97:98:9f:d0:aa:ab:20:a2:51:51:bd:f1:73:3e:e7:d1:22
+-----BEGIN CERTIFICATE-----
+MIIDojCCAoqgAwIBAgIQE4Y1TR0/BvLB+WUF1ZAcYjANBgkqhkiG9w0BAQUFADBr
+MQswCQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRl
+cm5hdGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNv
+bW1lcmNlIFJvb3QwHhcNMDIwNjI2MDIxODM2WhcNMjIwNjI0MDAxNjEyWjBrMQsw
+CQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRlcm5h
+dGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNvbW1l
+cmNlIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvV95WHm6h
+2mCxlCfLF9sHP4CFT8icttD0b0/Pmdjh28JIXDqsOTPHH2qLJj0rNfVIsZHBAk4E
+lpF7sDPwsRROEW+1QK8bRaVK7362rPKgH1g/EkZgPI2h4H3PVz4zHvtH8aoVlwdV
+ZqW1LS7YgFmypw23RuwhY/81q6UCzyr0TP579ZRdhE2o8mCP2w4lPJ9zcc+U30rq
+299yOIzzlr3xF7zSujtFWsan9sYXiwGd/BmoKoMWuDpI/k4+oKsGGelT84ATB+0t
+vz8KPFUgOSwsAGl0lUq8ILKpeeUYiZGo3BxN77t+Nwtd/jmliFKMAGzsGHxBvfaL
+dXe6YJ2E5/4tAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD
+AgEGMB0GA1UdDgQWBBQVOIMPPyw/cDMezUb+B4wg4NfDtzANBgkqhkiG9w0BAQUF
+AAOCAQEAX/FBfXxcCLkr4NWSR/pnXKUTwwMhmytMiUbPWU3J/qVAtmPN3XEolWcR
+zCSs00Rsca4BIGsDoo8Ytyk6feUWYFN4PMCvFYP3j1IzJL1kk5fui/fbGKhtcbP3
+LBfQdCVp9/5rPJS+TUtBjE7ic9DjkCJzQ83z7+pzzkWKsKZJ/0x9nXGIxHYdkFsd
+7v3M9+79YKWxehZx0RbQfBI8bGmX265fOZpwLwU8GUYEmSA20GBuYQa7FkKMcPcw
+++DbZqMAAb3mLNqRX6BGi01qnD093QVG/na/oAo85ADmJ7f/hC3euiInlhBx6yLt
+398znM/jra6O1I7mT1GvFpLgXPYHDw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=AAA Certificate Services O=Comodo CA Limited
+# Subject: CN=AAA Certificate Services O=Comodo CA Limited
+# Label: "Comodo AAA Services root"
+# Serial: 1
+# MD5 Fingerprint: 49:79:04:b0:eb:87:19:ac:47:b0:bc:11:51:9b:74:d0
+# SHA1 Fingerprint: d1:eb:23:a4:6d:17:d6:8f:d9:25:64:c2:f1:f1:60:17:64:d8:e3:49
+# SHA256 Fingerprint: d7:a7:a0:fb:5d:7e:27:31:d7:71:e9:48:4e:bc:de:f7:1d:5f:0c:3e:0a:29:48:78:2b:c8:3e:e0:ea:69:9e:f4
+-----BEGIN CERTIFICATE-----
+MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEb
+MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow
+GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmlj
+YXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVowezEL
+MAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE
+BwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNVBAMM
+GEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP
+ADCCAQoCggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQua
+BtDFcCLNSS1UY8y2bmhGC1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe
+3M/vg4aijJRPn2jymJBGhCfHdr/jzDUsi14HZGWCwEiwqJH5YZ92IFCokcdmtet4
+YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszWY19zjNoFmag4qMsXeDZR
+rOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjHYpy+g8cm
+ez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQU
+oBEKIz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF
+MAMBAf8wewYDVR0fBHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20v
+QUFBQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29t
+b2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2VzLmNybDANBgkqhkiG9w0BAQUF
+AAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm7l3sAg9g1o1Q
+GE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz
+Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2
+G9w84FoVxp7Z8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsi
+l2D4kF501KKaU73yqWjgom7C12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3
+smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root Certification Authority O=QuoVadis Limited OU=Root Certification Authority
+# Subject: CN=QuoVadis Root Certification Authority O=QuoVadis Limited OU=Root Certification Authority
+# Label: "QuoVadis Root CA"
+# Serial: 985026699
+# MD5 Fingerprint: 27:de:36:fe:72:b7:00:03:00:9d:f4:f0:1e:6c:04:24
+# SHA1 Fingerprint: de:3f:40:bd:50:93:d3:9b:6c:60:f6:da:bc:07:62:01:00:89:76:c9
+# SHA256 Fingerprint: a4:5e:de:3b:bb:f0:9c:8a:e1:5c:72:ef:c0:72:68:d6:93:a2:1c:99:6f:d5:1e:67:ca:07:94:60:fd:6d:88:73
+-----BEGIN CERTIFICATE-----
+MIIF0DCCBLigAwIBAgIEOrZQizANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJC
+TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDElMCMGA1UECxMcUm9vdCBDZXJ0
+aWZpY2F0aW9uIEF1dGhvcml0eTEuMCwGA1UEAxMlUXVvVmFkaXMgUm9vdCBDZXJ0
+aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMTAzMTkxODMzMzNaFw0yMTAzMTcxODMz
+MzNaMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMSUw
+IwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYDVQQDEyVR
+dW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG
+9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv2G1lVO6V/z68mcLOhrfEYBklbTRvM16z/Yp
+li4kVEAkOPcahdxYTMukJ0KX0J+DisPkBgNbAKVRHnAEdOLB1Dqr1607BxgFjv2D
+rOpm2RgbaIr1VxqYuvXtdj182d6UajtLF8HVj71lODqV0D1VNk7feVcxKh7YWWVJ
+WCCYfqtffp/p1k3sg3Spx2zY7ilKhSoGFPlU5tPaZQeLYzcS19Dsw3sgQUSj7cug
+F+FxZc4dZjH3dgEZyH0DWLaVSR2mEiboxgx24ONmy+pdpibu5cxfvWenAScOospU
+xbF6lR1xHkopigPcakXBpBlebzbNw6Kwt/5cOOJSvPhEQ+aQuwIDAQABo4ICUjCC
+Ak4wPQYIKwYBBQUHAQEEMTAvMC0GCCsGAQUFBzABhiFodHRwczovL29jc3AucXVv
+dmFkaXNvZmZzaG9yZS5jb20wDwYDVR0TAQH/BAUwAwEB/zCCARoGA1UdIASCAREw
+ggENMIIBCQYJKwYBBAG+WAABMIH7MIHUBggrBgEFBQcCAjCBxxqBxFJlbGlhbmNl
+IG9uIHRoZSBRdW9WYWRpcyBSb290IENlcnRpZmljYXRlIGJ5IGFueSBwYXJ0eSBh
+c3N1bWVzIGFjY2VwdGFuY2Ugb2YgdGhlIHRoZW4gYXBwbGljYWJsZSBzdGFuZGFy
+ZCB0ZXJtcyBhbmQgY29uZGl0aW9ucyBvZiB1c2UsIGNlcnRpZmljYXRpb24gcHJh
+Y3RpY2VzLCBhbmQgdGhlIFF1b1ZhZGlzIENlcnRpZmljYXRlIFBvbGljeS4wIgYI
+KwYBBQUHAgEWFmh0dHA6Ly93d3cucXVvdmFkaXMuYm0wHQYDVR0OBBYEFItLbe3T
+KbkGGew5Oanwl4Rqy+/fMIGuBgNVHSMEgaYwgaOAFItLbe3TKbkGGew5Oanwl4Rq
+y+/foYGEpIGBMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1p
+dGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYD
+VQQDEyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggQ6tlCL
+MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAitQUtf70mpKnGdSk
+fnIYj9lofFIk3WdvOXrEql494liwTXCYhGHoG+NpGA7O+0dQoE7/8CQfvbLO9Sf8
+7C9TqnN7Az10buYWnuulLsS/VidQK2K6vkscPFVcQR0kvoIgR13VRH56FmjffU1R
+cHhXHTMe/QKZnAzNCgVPx7uOpHX6Sm2xgI4JVrmcGmD+XcHXetwReNDWXcG31a0y
+mQM6isxUJTkxgXsTIlG6Rmyhu576BGxJJnSP0nPrzDCi5upZIof4l/UO/erMkqQW
+xFIY6iHOsfHmhIHluqmGKPJDWl0Snawe2ajlCmqnf6CHKc/yiU3U7MXi5nrQNiOK
+SnQ2+Q==
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root CA 2 O=QuoVadis Limited
+# Subject: CN=QuoVadis Root CA 2 O=QuoVadis Limited
+# Label: "QuoVadis Root CA 2"
+# Serial: 1289
+# MD5 Fingerprint: 5e:39:7b:dd:f8:ba:ec:82:e9:ac:62:ba:0c:54:00:2b
+# SHA1 Fingerprint: ca:3a:fb:cf:12:40:36:4b:44:b2:16:20:88:80:48:39:19:93:7c:f7
+# SHA256 Fingerprint: 85:a0:dd:7d:d7:20:ad:b7:ff:05:f8:3d:54:2b:20:9d:c7:ff:45:28:f7:d6:77:b1:83:89:fe:a5:e5:c4:9e:86
+-----BEGIN CERTIFICATE-----
+MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x
+GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv
+b3QgQ0EgMjAeFw0wNjExMjQxODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNV
+BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W
+YWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCa
+GMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6XJxg
+Fyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55J
+WpzmM+Yklvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bB
+rrcCaoF6qUWD4gXmuVbBlDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp
++ARz8un+XJiM9XOva7R+zdRcAitMOeGylZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1
+ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt66/3FsvbzSUr5R/7mp/i
+Ucw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1JdxnwQ5hYIiz
+PtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og
+/zOhD7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UH
+oycR7hYQe7xFSkyyBNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuI
+yV77zGHcizN300QyNQliBJIWENieJ0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1Ud
+EwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBQahGK8SEwzJQTU7tD2
+A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGUa6FJpEcwRTEL
+MAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT
+ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2f
+BluornFdLwUvZ+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzn
+g/iN/Ae42l9NLmeyhP3ZRPx3UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2Bl
+fF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodmVjB3pjd4M1IQWK4/YY7yarHvGH5K
+WWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK+JDSV6IZUaUtl0Ha
+B0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrWIozc
+hLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPR
+TUIZ3Ph1WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWD
+mbA4CD/pXvk1B+TJYm5Xf6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0Z
+ohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y
+4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8VCLAAVBpQ570su9t+Oza
+8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root CA 3 O=QuoVadis Limited
+# Subject: CN=QuoVadis Root CA 3 O=QuoVadis Limited
+# Label: "QuoVadis Root CA 3"
+# Serial: 1478
+# MD5 Fingerprint: 31:85:3c:62:94:97:63:b9:aa:fd:89:4e:af:6f:e0:cf
+# SHA1 Fingerprint: 1f:49:14:f7:d8:74:95:1d:dd:ae:02:c0:be:fd:3a:2d:82:75:51:85
+# SHA256 Fingerprint: 18:f1:fc:7f:20:5d:f8:ad:dd:eb:7f:e0:07:dd:57:e3:af:37:5a:9c:4d:8d:73:54:6b:f4:f1:fe:d1:e1:8d:35
+-----BEGIN CERTIFICATE-----
+MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x
+GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv
+b3QgQ0EgMzAeFw0wNjExMjQxOTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNV
+BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W
+YWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDM
+V0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNggDhoB
+4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUr
+H556VOijKTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd
+8lyyBTNvijbO0BNO/79KDDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9Cabwv
+vWhDFlaJKjdhkf2mrk7AyxRllDdLkgbvBNDInIjbC3uBr7E9KsRlOni27tyAsdLT
+mZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwpp5ijJUMv7/FfJuGITfhe
+btfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8nT8KKdjc
+T5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDt
+WAEXMJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZ
+c6tsgLjoC2SToJyMGf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A
+4iLItLRkT9a6fUg+qGkM17uGcclzuD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYD
+VR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHTBgkrBgEEAb5YAAMwgcUwgZMG
+CCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmljYXRlIGNvbnN0
+aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0
+aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVu
+dC4wLQYIKwYBBQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2Nw
+czALBgNVHQ8EBAMCAQYwHQYDVR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4G
+A1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4ywLQoUmkRzBFMQswCQYDVQQGEwJC
+TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UEAxMSUXVvVmFkaXMg
+Um9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZVqyM0
+7ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSem
+d1o417+shvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd
++LJ2w/w4E6oM3kJpK27zPOuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B
+4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadN
+t54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp8kokUvd0/bpO5qgdAm6x
+DYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBCbjPsMZ57
+k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6s
+zHXug/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0j
+Wy10QJLZYxkNc91pvGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeT
+mJlglFwjz1onl14LBQaTNx47aTbrqZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK
+4SVhM7JZG+Ju1zdXtg2pEto=
+-----END CERTIFICATE-----
+
+# Issuer: O=SECOM Trust.net OU=Security Communication RootCA1
+# Subject: O=SECOM Trust.net OU=Security Communication RootCA1
+# Label: "Security Communication Root CA"
+# Serial: 0
+# MD5 Fingerprint: f1:bc:63:6a:54:e0:b5:27:f5:cd:e7:1a:e3:4d:6e:4a
+# SHA1 Fingerprint: 36:b1:2b:49:f9:81:9e:d7:4c:9e:bc:38:0f:c6:56:8f:5d:ac:b2:f7
+# SHA256 Fingerprint: e7:5e:72:ed:9f:56:0e:ec:6e:b4:80:00:73:a4:3f:c3:ad:19:19:5a:39:22:82:01:78:95:97:4a:99:02:6b:6c
+-----BEGIN CERTIFICATE-----
+MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEY
+MBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21t
+dW5pY2F0aW9uIFJvb3RDQTEwHhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5
+WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYD
+VQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEwggEiMA0GCSqGSIb3
+DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw8yl8
+9f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJ
+DKaVv0uMDPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9
+Ms+k2Y7CI9eNqPPYJayX5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/N
+QV3Is00qVUarH9oe4kA92819uZKAnDfdDJZkndwi92SL32HeFZRSFaB9UslLqCHJ
+xrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2JChzAgMBAAGjPzA9MB0G
+A1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYwDwYDVR0T
+AQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vG
+kl3g0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfr
+Uj94nK9NrvjVT8+amCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5
+Bw+SUEmK3TGXX8npN6o7WWWXlDLJs58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJU
+JRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ6rBK+1YWc26sTfcioU+tHXot
+RSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAiFL39vmwLAw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Sonera Class2 CA O=Sonera
+# Subject: CN=Sonera Class2 CA O=Sonera
+# Label: "Sonera Class 2 Root CA"
+# Serial: 29
+# MD5 Fingerprint: a3:ec:75:0f:2e:88:df:fa:48:01:4e:0b:5c:48:6f:fb
+# SHA1 Fingerprint: 37:f7:6d:e6:07:7c:90:c5:b1:3e:93:1a:b7:41:10:b4:f2:e4:9a:27
+# SHA256 Fingerprint: 79:08:b4:03:14:c1:38:10:0b:51:8d:07:35:80:7f:fb:fc:f8:51:8a:00:95:33:71:05:ba:38:6b:15:3d:d9:27
+-----BEGIN CERTIFICATE-----
+MIIDIDCCAgigAwIBAgIBHTANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEP
+MA0GA1UEChMGU29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MyIENBMB4XDTAx
+MDQwNjA3Mjk0MFoXDTIxMDQwNjA3Mjk0MFowOTELMAkGA1UEBhMCRkkxDzANBgNV
+BAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJhIENsYXNzMiBDQTCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAJAXSjWdyvANlsdE+hY3/Ei9vX+ALTU74W+o
+Z6m/AxxNjG8yR9VBaKQTBME1DJqEQ/xcHf+Js+gXGM2RX/uJ4+q/Tl18GybTdXnt
+5oTjV+WtKcT0OijnpXuENmmz/V52vaMtmdOQTiMofRhj8VQ7Jp12W5dCsv+u8E7s
+3TmVToMGf+dJQMjFAbJUWmYdPfz56TwKnoG4cPABi+QjVHzIrviQHgCWctRUz2Ej
+vOr7nQKV0ba5cTppCD8PtOFCx4j1P5iop7oc4HFx71hXgVB6XGt0Rg6DA5jDjqhu
+8nYybieDwnPz3BjotJPqdURrBGAgcVeHnfO+oJAjPYok4doh28MCAwEAAaMzMDEw
+DwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQISqCqWITTXjwwCwYDVR0PBAQDAgEG
+MA0GCSqGSIb3DQEBBQUAA4IBAQBazof5FnIVV0sd2ZvnoiYw7JNn39Yt0jSv9zil
+zqsWuasvfDXLrNAPtEwr/IDva4yRXzZ299uzGxnq9LIR/WFxRL8oszodv7ND6J+/
+3DEIcbCdjdY0RzKQxmUk96BKfARzjzlvF4xytb1LyHr4e4PDKE6cCepnP7JnBBvD
+FNr450kkkdAdavphOe9r5yF1BgfYErQhIHBCcYHaPJo2vqZbDWpsmh+Re/n570K6
+Tk6ezAyNlNzZRZxe7EJQY670XcSxEtzKO6gunRRaBXW37Ndj4ro1tgQIkejanZz2
+ZrUYrAqmVCY0M9IbwdR/GjqOC6oybtv8TyWf2TLHllpwrN9M
+-----END CERTIFICATE-----
+
+# Issuer: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com
+# Subject: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com
+# Label: "XRamp Global CA Root"
+# Serial: 107108908803651509692980124233745014957
+# MD5 Fingerprint: a1:0b:44:b3:ca:10:d8:00:6e:9d:0f:d8:0f:92:0a:d1
+# SHA1 Fingerprint: b8:01:86:d1:eb:9c:86:a5:41:04:cf:30:54:f3:4c:52:b7:e5:58:c6
+# SHA256 Fingerprint: ce:cd:dc:90:50:99:d8:da:df:c5:b1:d2:09:b7:37:cb:e2:c1:8c:fb:2c:10:c0:ff:0b:cf:0d:32:86:fc:1a:a2
+-----BEGIN CERTIFICATE-----
+MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCB
+gjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEk
+MCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRY
+UmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQxMTAxMTcx
+NDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3
+dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2Vy
+dmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB
+dXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS6
+38eMpSe2OAtp87ZOqCwuIR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCP
+KZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMxfoArtYzAQDsRhtDLooY2YKTVMIJt2W7Q
+DxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FEzG+gSqmUsE3a56k0enI4
+qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqsAxcZZPRa
+JSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNVi
+PvryxS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0P
+BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASs
+jVy16bYbMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0
+eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQEwDQYJKoZIhvcNAQEFBQAD
+ggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc/Kh4ZzXxHfAR
+vbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt
+qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLa
+IR9NmXmd4c8nnxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSy
+i6mx5O+aGtA9aZnuqCij4Tyz8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQ
+O+7ETPTsJ3xCwnR8gooJybQDJbw=
+-----END CERTIFICATE-----
+
+# Issuer: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority
+# Subject: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority
+# Label: "Go Daddy Class 2 CA"
+# Serial: 0
+# MD5 Fingerprint: 91:de:06:25:ab:da:fd:32:17:0c:bb:25:17:2a:84:67
+# SHA1 Fingerprint: 27:96:ba:e6:3f:18:01:e2:77:26:1b:a0:d7:77:70:02:8f:20:ee:e4
+# SHA256 Fingerprint: c3:84:6b:f2:4b:9e:93:ca:64:27:4c:0e:c6:7c:1e:cc:5e:02:4f:fc:ac:d2:d7:40:19:35:0e:81:fe:54:6a:e4
+-----BEGIN CERTIFICATE-----
+MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEh
+MB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBE
+YWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3
+MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkGA1UEBhMCVVMxITAfBgNVBAoTGFRo
+ZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28gRGFkZHkgQ2xhc3Mg
+MiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQADggEN
+ADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCA
+PVYYYwhv2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6w
+wdhFJ2+qN1j3hybX2C32qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXi
+EqITLdiOr18SPaAIBQi2XKVlOARFmR6jYGB0xUGlcmIbYsUfb18aQr4CUWWoriMY
+avx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmYvLEHZ6IVDd2gWMZEewo+
+YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0OBBYEFNLE
+sNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h
+/t2oatTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5
+IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmlj
+YXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQAD
+ggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wimPQoZ+YeAEW5p5JYXMP80kWNy
+OO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKtI3lpjbi2Tc7P
+TMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ
+HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mER
+dEr/VxqHD3VILs9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5Cuf
+ReYNnyicsbkqWletNw+vHX/bvZ8=
+-----END CERTIFICATE-----
+
+# Issuer: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority
+# Subject: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority
+# Label: "Starfield Class 2 CA"
+# Serial: 0
+# MD5 Fingerprint: 32:4a:4b:bb:c8:63:69:9b:be:74:9a:c6:dd:1d:46:24
+# SHA1 Fingerprint: ad:7e:1c:28:b0:64:ef:8f:60:03:40:20:14:c3:d0:e3:37:0e:b5:8a
+# SHA256 Fingerprint: 14:65:fa:20:53:97:b8:76:fa:a6:f0:a9:95:8e:55:90:e4:0f:cc:7f:aa:4f:b7:c2:c8:67:75:21:fb:5f:b6:58
+-----BEGIN CERTIFICATE-----
+MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzEl
+MCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMp
+U3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQw
+NjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBoMQswCQYDVQQGEwJVUzElMCMGA1UE
+ChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZp
+ZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqGSIb3
+DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf
+8MOh2tTYbitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN
++lq2cwQlZut3f+dZxkqZJRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0
+X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVmepsZGD3/cVE8MC5fvj13c7JdBmzDI1aa
+K4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSNF4Azbl5KXZnJHoe0nRrA
+1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HFMIHCMB0G
+A1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fR
+zt0fhvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0
+YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBD
+bGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8w
+DQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGsafPzWdqbAYcaT1epoXkJKtv3
+L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLMPUxA2IGvd56D
+eruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl
+xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynp
+VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY
+WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q=
+-----END CERTIFICATE-----
+
+# Issuer: O=Government Root Certification Authority
+# Subject: O=Government Root Certification Authority
+# Label: "Taiwan GRCA"
+# Serial: 42023070807708724159991140556527066870
+# MD5 Fingerprint: 37:85:44:53:32:45:1f:20:f0:f3:95:e1:25:c4:43:4e
+# SHA1 Fingerprint: f4:8b:11:bf:de:ab:be:94:54:20:71:e6:41:de:6b:be:88:2b:40:b9
+# SHA256 Fingerprint: 76:00:29:5e:ef:e8:5b:9e:1f:d6:24:db:76:06:2a:aa:ae:59:81:8a:54:d2:77:4c:d4:c0:b2:c0:11:31:e1:b3
+-----BEGIN CERTIFICATE-----
+MIIFcjCCA1qgAwIBAgIQH51ZWtcvwgZEpYAIaeNe9jANBgkqhkiG9w0BAQUFADA/
+MQswCQYDVQQGEwJUVzEwMC4GA1UECgwnR292ZXJubWVudCBSb290IENlcnRpZmlj
+YXRpb24gQXV0aG9yaXR5MB4XDTAyMTIwNTEzMjMzM1oXDTMyMTIwNTEzMjMzM1ow
+PzELMAkGA1UEBhMCVFcxMDAuBgNVBAoMJ0dvdmVybm1lbnQgUm9vdCBDZXJ0aWZp
+Y2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB
+AJoluOzMonWoe/fOW1mKydGGEghU7Jzy50b2iPN86aXfTEc2pBsBHH8eV4qNw8XR
+IePaJD9IK/ufLqGU5ywck9G/GwGHU5nOp/UKIXZ3/6m3xnOUT0b3EEk3+qhZSV1q
+gQdW8or5BtD3cCJNtLdBuTK4sfCxw5w/cP1T3YGq2GN49thTbqGsaoQkclSGxtKy
+yhwOeYHWtXBiCAEuTk8O1RGvqa/lmr/czIdtJuTJV6L7lvnM4T9TjGxMfptTCAts
+F/tnyMKtsc2AtJfcdgEWFelq16TheEfOhtX7MfP6Mb40qij7cEwdScevLJ1tZqa2
+jWR+tSBqnTuBto9AAGdLiYa4zGX+FVPpBMHWXx1E1wovJ5pGfaENda1UhhXcSTvx
+ls4Pm6Dso3pdvtUqdULle96ltqqvKKyskKw4t9VoNSZ63Pc78/1Fm9G7Q3hub/FC
+VGqY8A2tl+lSXunVanLeavcbYBT0peS2cWeqH+riTcFCQP5nRhc4L0c/cZyu5SHK
+YS1tB6iEfC3uUSXxY5Ce/eFXiGvviiNtsea9P63RPZYLhY3Naye7twWb7LuRqQoH
+EgKXTiCQ8P8NHuJBO9NAOueNXdpm5AKwB1KYXA6OM5zCppX7VRluTI6uSw+9wThN
+Xo+EHWbNxWCWtFJaBYmOlXqYwZE8lSOyDvR5tMl8wUohAgMBAAGjajBoMB0GA1Ud
+DgQWBBTMzO/MKWCkO7GStjz6MmKPrCUVOzAMBgNVHRMEBTADAQH/MDkGBGcqBwAE
+MTAvMC0CAQAwCQYFKw4DAhoFADAHBgVnKgMAAAQUA5vwIhP/lSg209yewDL7MTqK
+UWUwDQYJKoZIhvcNAQEFBQADggIBAECASvomyc5eMN1PhnR2WPWus4MzeKR6dBcZ
+TulStbngCnRiqmjKeKBMmo4sIy7VahIkv9Ro04rQ2JyftB8M3jh+Vzj8jeJPXgyf
+qzvS/3WXy6TjZwj/5cAWtUgBfen5Cv8b5Wppv3ghqMKnI6mGq3ZW6A4M9hPdKmaK
+ZEk9GhiHkASfQlK3T8v+R0F2Ne//AHY2RTKbxkaFXeIksB7jSJaYV0eUVXoPQbFE
+JPPB/hprv4j9wabak2BegUqZIJxIZhm1AHlUD7gsL0u8qV1bYH+Mh6XgUmMqvtg7
+hUAV/h62ZT/FS9p+tXo1KaMuephgIqP0fSdOLeq0dDzpD6QzDxARvBMB1uUO07+1
+EqLhRSPAzAhuYbeJq4PjJB7mXQfnHyA+z2fI56wwbSdLaG5LKlwCCDTb+HbkZ6Mm
+nD+iMsJKxYEYMRBWqoTvLQr/uB930r+lWKBi5NdLkXWNiYCYfm3LU05er/ayl4WX
+udpVBrkk7tfGOB5jGxI7leFYrPLfhNVfmS8NVVvmONsuP3LpSIXLuykTjx44Vbnz
+ssQwmSNOXfJIoRIM3BKQCZBUkQM8R+XVyWXgt0t97EfTsws+rZ7QdAAO671RrcDe
+LMDDav7v3Aun+kbfYNucpllQdSNpc5Oy+fwC00fmcc4QAu4njIT/rEUNE1yDMuAl
+pYYsfPQS
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Assured ID Root CA"
+# Serial: 17154717934120587862167794914071425081
+# MD5 Fingerprint: 87:ce:0b:7b:2a:0e:49:00:e1:58:71:9b:37:a8:93:72
+# SHA1 Fingerprint: 05:63:b8:63:0d:62:d7:5a:bb:c8:ab:1e:4b:df:b5:a8:99:b2:4d:43
+# SHA256 Fingerprint: 3e:90:99:b5:01:5e:8f:48:6c:00:bc:ea:9d:11:1e:e7:21:fa:ba:35:5a:89:bc:f1:df:69:56:1e:3d:c6:32:5c
+-----BEGIN CERTIFICATE-----
+MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBl
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv
+b3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzExMTEwMDAwMDAwWjBlMQswCQYDVQQG
+EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl
+cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwggEi
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7c
+JpSIqvTO9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYP
+mDI2dsze3Tyoou9q+yHyUmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+
+wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4
+VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpyoeb6pNnVFzF1roV9Iq4/
+AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whfGHdPAgMB
+AAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
+BBRF66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYun
+pyGd823IDzANBgkqhkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRC
+dWKuh+vy1dneVrOfzM4UKLkNl2BcEkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTf
+fwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38FnSbNd67IJKusm7Xi+fT8r87cm
+NW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i8b5QZ7dsvfPx
+H2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe
++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g==
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Global Root CA"
+# Serial: 10944719598952040374951832963794454346
+# MD5 Fingerprint: 79:e4:a9:84:0d:7d:3a:96:d7:c0:4f:e2:43:4c:89:2e
+# SHA1 Fingerprint: a8:98:5d:3a:65:e5:e5:c4:b2:d7:d6:6d:40:c6:dd:2f:b1:9c:54:36
+# SHA256 Fingerprint: 43:48:a0:e9:44:4c:78:cb:26:5e:05:8d:5e:89:44:b4:d8:4f:96:62:bd:26:db:25:7f:89:34:a4:43:c7:01:61
+-----BEGIN CERTIFICATE-----
+MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBh
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD
+QTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAwMDAwMDBaMGExCzAJBgNVBAYTAlVT
+MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j
+b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkqhkiG
+9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsB
+CSDMAZOnTjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97
+nh6Vfe63SKMI2tavegw5BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt
+43C/dxC//AH2hdmoRBBYMql1GNXRor5H4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7P
+T19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y7vrTC0LUq7dBMtoM1O/4
+gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQABo2MwYTAO
+BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbR
+TLtm8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUw
+DQYJKoZIhvcNAQEFBQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/Esr
+hMAtudXH/vTBH1jLuG2cenTnmCmrEbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg
+06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIttep3Sp+dWOIrWcBAI+0tKIJF
+PnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886UAb3LujEV0ls
+YSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk
+CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4=
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert High Assurance EV Root CA"
+# Serial: 3553400076410547919724730734378100087
+# MD5 Fingerprint: d4:74:de:57:5c:39:b2:d3:9c:85:83:c5:c0:65:49:8a
+# SHA1 Fingerprint: 5f:b7:ee:06:33:e2:59:db:ad:0c:4c:9a:e6:d3:8f:1a:61:c7:dc:25
+# SHA256 Fingerprint: 74:31:e5:f4:c3:c1:ce:46:90:77:4f:0b:61:e0:54:40:88:3b:a9:a0:1e:d0:0b:a6:ab:d7:80:6e:d3:b1:18:cf
+-----BEGIN CERTIFICATE-----
+MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j
+ZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAwMFoXDTMxMTExMDAwMDAwMFowbDEL
+MAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3
+LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug
+RVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm
++9S75S0tMqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTW
+PNt0OKRKzE0lgvdKpVMSOO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEM
+xChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFB
+Ik5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQNAQTXKFx01p8VdteZOE3
+hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUeh10aUAsg
+EsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQF
+MAMBAf8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaA
+FLE+w2kD+L9HAdSYJhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3Nec
+nzyIZgYIVyHbIUf4KmeqvxgydkAQV8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6z
+eM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFpmyPInngiK3BD41VHMWEZ71jF
+hS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkKmNEVX58Svnw2
+Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe
+vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep
++OkuE6N36B9K
+-----END CERTIFICATE-----
+
+# Issuer: CN=Class 2 Primary CA O=Certplus
+# Subject: CN=Class 2 Primary CA O=Certplus
+# Label: "Certplus Class 2 Primary CA"
+# Serial: 177770208045934040241468760488327595043
+# MD5 Fingerprint: 88:2c:8c:52:b8:a2:3c:f3:f7:bb:03:ea:ae:ac:42:0b
+# SHA1 Fingerprint: 74:20:74:41:72:9c:dd:92:ec:79:31:d8:23:10:8d:c2:81:92:e2:bb
+# SHA256 Fingerprint: 0f:99:3c:8a:ef:97:ba:af:56:87:14:0e:d5:9a:d1:82:1b:b4:af:ac:f0:aa:9a:58:b5:d5:7a:33:8a:3a:fb:cb
+-----BEGIN CERTIFICATE-----
+MIIDkjCCAnqgAwIBAgIRAIW9S/PY2uNp9pTXX8OlRCMwDQYJKoZIhvcNAQEFBQAw
+PTELMAkGA1UEBhMCRlIxETAPBgNVBAoTCENlcnRwbHVzMRswGQYDVQQDExJDbGFz
+cyAyIFByaW1hcnkgQ0EwHhcNOTkwNzA3MTcwNTAwWhcNMTkwNzA2MjM1OTU5WjA9
+MQswCQYDVQQGEwJGUjERMA8GA1UEChMIQ2VydHBsdXMxGzAZBgNVBAMTEkNsYXNz
+IDIgUHJpbWFyeSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANxQ
+ltAS+DXSCHh6tlJw/W/uz7kRy1134ezpfgSN1sxvc0NXYKwzCkTsA18cgCSR5aiR
+VhKC9+Ar9NuuYS6JEI1rbLqzAr3VNsVINyPi8Fo3UjMXEuLRYE2+L0ER4/YXJQyL
+kcAbmXuZVg2v7tK8R1fjeUl7NIknJITesezpWE7+Tt9avkGtrAjFGA7v0lPubNCd
+EgETjdyAYveVqUSISnFOYFWe2yMZeVYHDD9jC1yw4r5+FfyUM1hBOHTE4Y+L3yas
+H7WLO7dDWWuwJKZtkIvEcupdM5i3y95ee++U8Rs+yskhwcWYAqqi9lt3m/V+llU0
+HGdpwPFC40es/CgcZlUCAwEAAaOBjDCBiTAPBgNVHRMECDAGAQH/AgEKMAsGA1Ud
+DwQEAwIBBjAdBgNVHQ4EFgQU43Mt38sOKAze3bOkynm4jrvoMIkwEQYJYIZIAYb4
+QgEBBAQDAgEGMDcGA1UdHwQwMC4wLKAqoCiGJmh0dHA6Ly93d3cuY2VydHBsdXMu
+Y29tL0NSTC9jbGFzczIuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQCnVM+IRBnL39R/
+AN9WM2K191EBkOvDP9GIROkkXe/nFL0gt5o8AP5tn9uQ3Nf0YtaLcF3n5QRIqWh8
+yfFC82x/xXp8HVGIutIKPidd3i1RTtMTZGnkLuPT55sJmabglZvOGtd/vjzOUrMR
+FcEPF80Du5wlFbqidon8BvEY0JNLDnyCt6X09l/+7UCmnYR0ObncHoUW2ikbhiMA
+ybuJfm6AiB4vFLQDJKgybwOaRywwvlbGp0ICcBvqQNi6BQNwB6SW//1IMwrh3KWB
+kJtN3X3n57LNXMhqlfil9o3EXXgIvnsG1knPGTZQIy4I5p4FTUcY1Rbpsda2ENW7
+l7+ijrRU
+-----END CERTIFICATE-----
+
+# Issuer: CN=DST Root CA X3 O=Digital Signature Trust Co.
+# Subject: CN=DST Root CA X3 O=Digital Signature Trust Co.
+# Label: "DST Root CA X3"
+# Serial: 91299735575339953335919266965803778155
+# MD5 Fingerprint: 41:03:52:dc:0f:f7:50:1b:16:f0:02:8e:ba:6f:45:c5
+# SHA1 Fingerprint: da:c9:02:4f:54:d8:f6:df:94:93:5f:b1:73:26:38:ca:6a:d7:7c:13
+# SHA256 Fingerprint: 06:87:26:03:31:a7:24:03:d9:09:f1:05:e6:9b:cf:0d:32:e1:bd:24:93:ff:c6:d9:20:6d:11:bc:d6:77:07:39
+-----BEGIN CERTIFICATE-----
+MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/
+MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT
+DkRTVCBSb290IENBIFgzMB4XDTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVow
+PzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMRcwFQYDVQQD
+Ew5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
+AN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmTrE4O
+rz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEq
+OLl5CjH9UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9b
+xiqKqy69cK3FCxolkHRyxXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw
+7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40dutolucbY38EVAjqr2m7xPi71XAicPNaD
+aeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNV
+HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQMA0GCSqG
+SIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69
+ikugdB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXr
+AvHRAosZy5Q6XkjEGB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZz
+R8srzJmwN0jP41ZL9c8PDHIyh8bwRLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5
+JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubSfZGL+T0yjWW06XyxV3bqxbYo
+Ob8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ
+-----END CERTIFICATE-----
+
+# Issuer: CN=SwissSign Gold CA - G2 O=SwissSign AG
+# Subject: CN=SwissSign Gold CA - G2 O=SwissSign AG
+# Label: "SwissSign Gold CA - G2"
+# Serial: 13492815561806991280
+# MD5 Fingerprint: 24:77:d9:a8:91:d1:3b:fa:88:2d:c2:ff:f8:cd:33:93
+# SHA1 Fingerprint: d8:c5:38:8a:b7:30:1b:1b:6e:d4:7a:e6:45:25:3a:6f:9f:1a:27:61
+# SHA256 Fingerprint: 62:dd:0b:e9:b9:f5:0a:16:3e:a0:f8:e7:5c:05:3b:1e:ca:57:ea:55:c8:68:8f:64:7c:68:81:f2:c8:35:7b:95
+-----BEGIN CERTIFICATE-----
+MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV
+BAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2ln
+biBHb2xkIENBIC0gRzIwHhcNMDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBF
+MQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dpc3NTaWduIEFHMR8wHQYDVQQDExZT
+d2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC
+CgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUqt2/8
+76LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+
+bbqBHH5CjCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c
+6bM8K8vzARO/Ws/BtQpgvd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqE
+emA8atufK+ze3gE/bk3lUIbLtK/tREDFylqM2tIrfKjuvqblCqoOpd8FUrdVxyJd
+MmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvRAiTysybUa9oEVeXBCsdt
+MDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuendjIj3o02y
+MszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69y
+FGkOpeUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPi
+aG59je883WX0XaxR7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxM
+gI93e2CaHt+28kgeDrpOVG2Y4OGiGqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCB
+qTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWyV7
+lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64OfPAeGZe6Drn
+8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov
+L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe6
+45R88a7A3hfm5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczO
+UYrHUDFu4Up+GC9pWbY9ZIEr44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5
+O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOfMke6UiI0HTJ6CVanfCU2qT1L2sCC
+bwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6mGu6uLftIdxf+u+yv
+GPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxpmo/a
+77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCC
+hdiDyyJkvC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid3
+92qgQmwLOM7XdVAyksLfKzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEpp
+Ld6leNcG2mqeSz53OiATIgHQv2ieY2BrNU0LbbqhPcCT4H8js1WtciVORvnSFu+w
+ZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6LqjviOvrv1vA+ACOzB2+htt
+Qc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ
+-----END CERTIFICATE-----
+
+# Issuer: CN=SwissSign Silver CA - G2 O=SwissSign AG
+# Subject: CN=SwissSign Silver CA - G2 O=SwissSign AG
+# Label: "SwissSign Silver CA - G2"
+# Serial: 5700383053117599563
+# MD5 Fingerprint: e0:06:a1:c9:7d:cf:c9:fc:0d:c0:56:75:96:d8:62:13
+# SHA1 Fingerprint: 9b:aa:e5:9f:56:ee:21:cb:43:5a:be:25:93:df:a7:f0:40:d1:1d:cb
+# SHA256 Fingerprint: be:6c:4d:a2:bb:b9:ba:59:b6:f3:93:97:68:37:42:46:c3:c0:05:99:3f:a9:8f:02:0d:1d:ed:be:d4:8a:81:d5
+-----BEGIN CERTIFICATE-----
+MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UE
+BhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWdu
+IFNpbHZlciBDQSAtIEcyMB4XDTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0Nlow
+RzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMY
+U3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A
+MIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644N0Mv
+Fz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7br
+YT7QbNHm+/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieF
+nbAVlDLaYQ1HTWBCrpJH6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH
+6ATK72oxh9TAtvmUcXtnZLi2kUpCe2UuMGoM9ZDulebyzYLs2aFK7PayS+VFheZt
+eJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5hqAaEuSh6XzjZG6k4sIN/
+c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5FZGkECwJ
+MoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRH
+HTBsROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTf
+jNFusB3hB48IHpmccelM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb6
+5i/4z3GcRm25xBWNOHkDRUjvxF3XCO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOB
+rDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU
+F6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRBtjpbO8tFnb0c
+wpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0
+cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIB
+AHPGgeAn0i0P4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShp
+WJHckRE1qTodvBqlYJ7YH39FkWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9
+xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L3XWgwF15kIwb4FDm3jH+mHtwX6WQ
+2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx/uNncqCxv1yL5PqZ
+IseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFaDGi8
+aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2X
+em1ZqSqPe97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQR
+dAtq/gsD/KNVV4n+SsuuWxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/
+OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJDIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+
+hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ubDgEj8Z+7fNzcbBGXJbLy
+tGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u
+-----END CERTIFICATE-----
+
+# Issuer: CN=GeoTrust Primary Certification Authority O=GeoTrust Inc.
+# Subject: CN=GeoTrust Primary Certification Authority O=GeoTrust Inc.
+# Label: "GeoTrust Primary Certification Authority"
+# Serial: 32798226551256963324313806436981982369
+# MD5 Fingerprint: 02:26:c3:01:5e:08:30:37:43:a9:d0:7d:cf:37:e6:bf
+# SHA1 Fingerprint: 32:3c:11:8e:1b:f7:b8:b6:52:54:e2:e2:10:0d:d6:02:90:37:f0:96
+# SHA256 Fingerprint: 37:d5:10:06:c5:12:ea:ab:62:64:21:f1:ec:8c:92:01:3f:c5:f8:2a:e9:8e:e5:33:eb:46:19:b8:de:b4:d0:6c
+-----BEGIN CERTIFICATE-----
+MIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBY
+MQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMo
+R2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEx
+MjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgxCzAJBgNVBAYTAlVTMRYwFAYDVQQK
+Ew1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQcmltYXJ5IENlcnRp
+ZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9
+AWbK7hWNb6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjA
+ZIVcFU2Ix7e64HXprQU9nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE0
+7e9GceBrAqg1cmuXm2bgyxx5X9gaBGgeRwLmnWDiNpcB3841kt++Z8dtd1k7j53W
+kBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGttm/81w7a4DSwDRp35+MI
+mO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4G
+A1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJ
+KoZIhvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ1
+6CePbJC/kRYkRj5KTs4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl
+4b7UVXGYNTq+k+qurUKykG/g/CFNNWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6K
+oKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHaFloxt/m0cYASSJlyc1pZU8Fj
+UjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG1riR/aYNKxoU
+AT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk=
+-----END CERTIFICATE-----
+
+# Issuer: CN=thawte Primary Root CA O=thawte, Inc. OU=Certification Services Division/(c) 2006 thawte, Inc. - For authorized use only
+# Subject: CN=thawte Primary Root CA O=thawte, Inc. OU=Certification Services Division/(c) 2006 thawte, Inc. - For authorized use only
+# Label: "thawte Primary Root CA"
+# Serial: 69529181992039203566298953787712940909
+# MD5 Fingerprint: 8c:ca:dc:0b:22:ce:f5:be:72:ac:41:1a:11:a8:d8:12
+# SHA1 Fingerprint: 91:c6:d6:ee:3e:8a:c8:63:84:e5:48:c2:99:29:5c:75:6c:81:7b:81
+# SHA256 Fingerprint: 8d:72:2f:81:a9:c1:13:c0:79:1d:f1:36:a2:96:6d:b2:6c:95:0a:97:1d:b4:6b:41:99:f4:ea:54:b7:8b:fb:9f
+-----BEGIN CERTIFICATE-----
+MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCB
+qTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf
+Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw
+MDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNV
+BAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3MDAwMDAwWhcNMzYw
+NzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5j
+LjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYG
+A1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl
+IG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqG
+SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCsoPD7gFnUnMekz52hWXMJEEUMDSxuaPFs
+W0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ1CRfBsDMRJSUjQJib+ta
+3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGcq/gcfomk
+6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6
+Sk/KaAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94J
+NqR32HuHUETVPm4pafs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBA
+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XP
+r87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUFAAOCAQEAeRHAS7ORtvzw6WfU
+DW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeEuzLlQRHAd9mz
+YJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX
+xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2
+/qxAeeWsEG89jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/
+LHbTY5xZ3Y+m4Q6gLkH3LpVHz7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7
+jVaMaA==
+-----END CERTIFICATE-----
+
+# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G5 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2006 VeriSign, Inc. - For authorized use only
+# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G5 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2006 VeriSign, Inc. - For authorized use only
+# Label: "VeriSign Class 3 Public Primary Certification Authority - G5"
+# Serial: 33037644167568058970164719475676101450
+# MD5 Fingerprint: cb:17:e4:31:67:3e:e2:09:fe:45:57:93:f3:0a:fa:1c
+# SHA1 Fingerprint: 4e:b6:d5:78:49:9b:1c:cf:5f:58:1e:ad:56:be:3d:9b:67:44:a5:e5
+# SHA256 Fingerprint: 9a:cf:ab:7e:43:c8:d8:80:d0:6b:26:2a:94:de:ee:e4:b4:65:99:89:c3:d0:ca:f1:9b:af:64:05:e4:1a:b7:df
+-----BEGIN CERTIFICATE-----
+MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCB
+yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL
+ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJp
+U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxW
+ZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0
+aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCByjEL
+MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW
+ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2ln
+biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp
+U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y
+aXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvJAgIKXo1
+nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKzj/i5Vbex
+t0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIz
+SdhDY2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQG
+BO+QueQA5N06tRn/Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+
+rCpSx4/VBEnkjWNHiDxpg8v+R70rfk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/
+NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8E
+BAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEwHzAH
+BgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy
+aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKv
+MzEzMA0GCSqGSIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzE
+p6B4Eq1iDkVwZMXnl2YtmAl+X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y
+5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKEKQsTb47bDN0lAtukixlE0kF6BWlK
+WE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiCKm0oHw0LxOXnGiYZ
+4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vEZV8N
+hnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq
+-----END CERTIFICATE-----
+
+# Issuer: CN=SecureTrust CA O=SecureTrust Corporation
+# Subject: CN=SecureTrust CA O=SecureTrust Corporation
+# Label: "SecureTrust CA"
+# Serial: 17199774589125277788362757014266862032
+# MD5 Fingerprint: dc:32:c3:a7:6d:25:57:c7:68:09:9d:ea:2d:a9:a2:d1
+# SHA1 Fingerprint: 87:82:c6:c3:04:35:3b:cf:d2:96:92:d2:59:3e:7d:44:d9:34:ff:11
+# SHA256 Fingerprint: f1:c1:b5:0a:e5:a2:0d:d8:03:0e:c9:f6:bc:24:82:3d:d3:67:b5:25:57:59:b4:e7:1b:61:fc:e9:f7:37:5d:73
+-----BEGIN CERTIFICATE-----
+MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBI
+MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x
+FzAVBgNVBAMTDlNlY3VyZVRydXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIz
+MTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAeBgNVBAoTF1NlY3VyZVRydXN0IENv
+cnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCCASIwDQYJKoZIhvcN
+AQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQXOZEz
+Zum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO
+0gMdA+9tDWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIao
+wW8xQmxSPmjL8xk037uHGFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj
+7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b01k/unK8RCSc43Oz969XL0Imnal0ugBS
+8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmHursCAwEAAaOBnTCBmjAT
+BgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB
+/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCeg
+JYYjaHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGC
+NxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt3
+6Z3q059c4EVlew3KW+JwULKUBRSuSceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/
+3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHfmbx8IVQr5Fiiu1cprp6poxkm
+D5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZnMUFdAvnZyPS
+CPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR
+3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Secure Global CA O=SecureTrust Corporation
+# Subject: CN=Secure Global CA O=SecureTrust Corporation
+# Label: "Secure Global CA"
+# Serial: 9751836167731051554232119481456978597
+# MD5 Fingerprint: cf:f4:27:0d:d4:ed:dc:65:16:49:6d:3d:da:bf:6e:de
+# SHA1 Fingerprint: 3a:44:73:5a:e5:81:90:1f:24:86:61:46:1e:3b:9c:c4:5f:f5:3a:1b
+# SHA256 Fingerprint: 42:00:f5:04:3a:c8:59:0e:bb:52:7d:20:9e:d1:50:30:29:fb:cb:d4:1c:a1:b5:06:ec:27:f1:5a:de:7d:ac:69
+-----BEGIN CERTIFICATE-----
+MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBK
+MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x
+GTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkx
+MjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3Qg
+Q29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwggEiMA0GCSqG
+SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jxYDiJ
+iQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa
+/FHtaMbQbqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJ
+jnIFHovdRIWCQtBJwB1g8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnI
+HmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYVHDGA76oYa8J719rO+TMg1fW9ajMtgQT7
+sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi0XPnj3pDAgMBAAGjgZ0w
+gZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1UdEwEB/wQF
+MAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCsw
+KaAnoCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsG
+AQQBgjcVAQQDAgEAMA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0L
+URYD7xh8yOOvaliTFGCRsoTciE6+OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXO
+H0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cnCDpOGR86p1hcF895P4vkp9Mm
+I50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/53CYNv6ZHdAbY
+iNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc
+f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW
+-----END CERTIFICATE-----
+
+# Issuer: CN=COMODO Certification Authority O=COMODO CA Limited
+# Subject: CN=COMODO Certification Authority O=COMODO CA Limited
+# Label: "COMODO Certification Authority"
+# Serial: 104350513648249232941998508985834464573
+# MD5 Fingerprint: 5c:48:dc:f7:42:72:ec:56:94:6d:1c:cc:71:35:80:75
+# SHA1 Fingerprint: 66:31:bf:9e:f7:4f:9e:b6:c9:d5:a6:0c:ba:6a:be:d1:f7:bd:ef:7b
+# SHA256 Fingerprint: 0c:2c:d6:3d:f7:80:6f:a3:99:ed:e8:09:11:6b:57:5b:f8:79:89:f0:65:18:f9:80:8c:86:05:03:17:8b:af:66
+-----BEGIN CERTIFICATE-----
+MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCB
+gTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
+A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNV
+BAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEyMDEwMDAw
+MDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEbMBkGA1UECBMSR3Jl
+YXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFDT01P
+RE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0
+aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3
+UcEbVASY06m/weaKXTuH+7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI
+2GqGd0S7WWaXUF601CxwRM/aN5VCaTwwxHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8
+Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV4EajcNxo2f8ESIl33rXp
++2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA1KGzqSX+
+DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5O
+nKVIrLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW
+/zAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6g
+PKA6hjhodHRwOi8vY3JsLmNvbW9kb2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9u
+QXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOCAQEAPpiem/Yb6dc5t3iuHXIY
+SdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CPOGEIqB6BCsAv
+IC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/
+RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4
+zJVSk/BwJVmcIGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5dd
+BA6+C4OmF4O5MBKgxTMVBbkN+8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IB
+ZQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C.
+# Subject: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C.
+# Label: "Network Solutions Certificate Authority"
+# Serial: 116697915152937497490437556386812487904
+# MD5 Fingerprint: d3:f3:a6:16:c0:fa:6b:1d:59:b1:2d:96:4d:0e:11:2e
+# SHA1 Fingerprint: 74:f8:a3:c3:ef:e7:b3:90:06:4b:83:90:3c:21:64:60:20:e5:df:ce
+# SHA256 Fingerprint: 15:f0:ba:00:a3:ac:7a:f3:ac:88:4c:07:2b:10:11:a0:77:bd:77:c0:97:f4:01:64:b2:f8:59:8a:bd:83:86:0c
+-----BEGIN CERTIFICATE-----
+MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBi
+MQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu
+MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3Jp
+dHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMxMjM1OTU5WjBiMQswCQYDVQQGEwJV
+UzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydO
+ZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0GCSqG
+SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwz
+c7MEL7xxjOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPP
+OCwGJgl6cvf6UDL4wpPTaaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rl
+mGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXTcrA/vGp97Eh/jcOrqnErU2lBUzS1sLnF
+BgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc/Qzpf14Dl847ABSHJ3A4
+qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMBAAGjgZcw
+gZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIB
+BjAPBgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwu
+bmV0c29sc3NsLmNvbS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3Jp
+dHkuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc8
+6fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q4LqILPxFzBiwmZVRDuwduIj/
+h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/GGUsyfJj4akH
+/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv
+wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHN
+pGxlaKFJdlxDydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey
+-----END CERTIFICATE-----
+
+# Issuer: CN=COMODO ECC Certification Authority O=COMODO CA Limited
+# Subject: CN=COMODO ECC Certification Authority O=COMODO CA Limited
+# Label: "COMODO ECC Certification Authority"
+# Serial: 41578283867086692638256921589707938090
+# MD5 Fingerprint: 7c:62:ff:74:9d:31:53:5e:68:4a:d5:78:aa:1e:bf:23
+# SHA1 Fingerprint: 9f:74:4e:9f:2b:4d:ba:ec:0f:31:2c:50:b6:56:3b:8e:2d:93:c3:11
+# SHA256 Fingerprint: 17:93:92:7a:06:14:54:97:89:ad:ce:2f:8f:34:f7:f0:b6:6d:0f:3a:e3:a3:b8:4d:21:ec:15:db:ba:4f:ad:c7
+-----BEGIN CERTIFICATE-----
+MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTEL
+MAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE
+BxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMT
+IkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwMzA2MDAw
+MDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdy
+ZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09N
+T0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlv
+biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSR
+FtSrYpn1PlILBs5BAH+X4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0J
+cfRK9ChQtP6IHG4/bC8vCVlbpVsLM5niwz2J+Wos77LTBumjQjBAMB0GA1UdDgQW
+BBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/
+BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VGFAkK+qDm
+fQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdv
+GDeAU/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY=
+-----END CERTIFICATE-----
+
+# Issuer: CN=OISTE WISeKey Global Root GA CA O=WISeKey OU=Copyright (c) 2005/OISTE Foundation Endorsed
+# Subject: CN=OISTE WISeKey Global Root GA CA O=WISeKey OU=Copyright (c) 2005/OISTE Foundation Endorsed
+# Label: "OISTE WISeKey Global Root GA CA"
+# Serial: 86718877871133159090080555911823548314
+# MD5 Fingerprint: bc:6c:51:33:a7:e9:d3:66:63:54:15:72:1b:21:92:93
+# SHA1 Fingerprint: 59:22:a1:e1:5a:ea:16:35:21:f8:98:39:6a:46:46:b0:44:1b:0f:a9
+# SHA256 Fingerprint: 41:c9:23:86:6a:b4:ca:d6:b7:ad:57:80:81:58:2e:02:07:97:a6:cb:df:4f:ff:78:ce:83:96:b3:89:37:d7:f5
+-----BEGIN CERTIFICATE-----
+MIID8TCCAtmgAwIBAgIQQT1yx/RrH4FDffHSKFTfmjANBgkqhkiG9w0BAQUFADCB
+ijELMAkGA1UEBhMCQ0gxEDAOBgNVBAoTB1dJU2VLZXkxGzAZBgNVBAsTEkNvcHly
+aWdodCAoYykgMjAwNTEiMCAGA1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNl
+ZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwgUm9vdCBHQSBDQTAeFw0w
+NTEyMTExNjAzNDRaFw0zNzEyMTExNjA5NTFaMIGKMQswCQYDVQQGEwJDSDEQMA4G
+A1UEChMHV0lTZUtleTEbMBkGA1UECxMSQ29weXJpZ2h0IChjKSAyMDA1MSIwIAYD
+VQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBX
+SVNlS2V5IEdsb2JhbCBSb290IEdBIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+MIIBCgKCAQEAy0+zAJs9Nt350UlqaxBJH+zYK7LG+DKBKUOVTJoZIyEVRd7jyBxR
+VVuuk+g3/ytr6dTqvirdqFEr12bDYVxgAsj1znJ7O7jyTmUIms2kahnBAbtzptf2
+w93NvKSLtZlhuAGio9RN1AU9ka34tAhxZK9w8RxrfvbDd50kc3vkDIzh2TbhmYsF
+mQvtRTEJysIA2/dyoJaqlYfQjse2YXMNdmaM3Bu0Y6Kff5MTMPGhJ9vZ/yxViJGg
+4E8HsChWjBgbl0SOid3gF27nKu+POQoxhILYQBRJLnpB5Kf+42TMwVlxSywhp1t9
+4B3RLoGbw9ho972WG6xwsRYUC9tguSYBBQIDAQABo1EwTzALBgNVHQ8EBAMCAYYw
+DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUswN+rja8sHnR3JQmthG+IbJphpQw
+EAYJKwYBBAGCNxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBAEuh/wuHbrP5wUOx
+SPMowB0uyQlB+pQAHKSkq0lPjz0e701vvbyk9vImMMkQyh2I+3QZH4VFvbBsUfk2
+ftv1TDI6QU9bR8/oCy22xBmddMVHxjtqD6wU2zz0c5ypBd8A3HR4+vg1YFkCExh8
+vPtNsCBtQ7tgMHpnM1zFmdH4LTlSc/uMqpclXHLZCB6rTjzjgTGfA6b7wP4piFXa
+hNVQA7bihKOmNqoROgHhGEvWRGizPflTdISzRpFGlgC3gCy24eMQ4tui5yiPAZZi
+Fj4A4xylNoEYokxSdsARo27mHbrjWr42U8U+dY+GaSlYU7Wcu2+fXMUY7N0v4ZjJ
+/L7fCg0=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certigna O=Dhimyotis
+# Subject: CN=Certigna O=Dhimyotis
+# Label: "Certigna"
+# Serial: 18364802974209362175
+# MD5 Fingerprint: ab:57:a6:5b:7d:42:82:19:b5:d8:58:26:28:5e:fd:ff
+# SHA1 Fingerprint: b1:2e:13:63:45:86:a4:6f:1a:b2:60:68:37:58:2d:c4:ac:fd:94:97
+# SHA256 Fingerprint: e3:b6:a2:db:2e:d7:ce:48:84:2f:7a:c5:32:41:c7:b7:1d:54:14:4b:fb:40:c1:1f:3f:1d:0b:42:f5:ee:a1:2d
+-----BEGIN CERTIFICATE-----
+MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNV
+BAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4X
+DTA3MDYyOTE1MTMwNVoXDTI3MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQ
+BgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwIQ2VydGlnbmEwggEiMA0GCSqGSIb3
+DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7qXOEm7RFHYeGifBZ4
+QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyHGxny
+gQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbw
+zBfsV1/pogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q
+130yGLMLLGq/jj8UEYkgDncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2
+JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKfIrjxwo1p3Po6WAbfAgMBAAGjgbwwgbkw
+DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQtCRZvgHyUtVF9lo53BEw
+ZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJBgNVBAYT
+AkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzj
+AQ/JSP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG
+9w0BAQUFAAOCAQEAhQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8h
+bV6lUmPOEvjvKtpv6zf+EwLHyzs+ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFnc
+fca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1kluPBS1xp81HlDQwY9qcEQCYsuu
+HWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY1gkIl2PlwS6w
+t0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw
+WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Deutsche Telekom Root CA 2 O=Deutsche Telekom AG OU=T-TeleSec Trust Center
+# Subject: CN=Deutsche Telekom Root CA 2 O=Deutsche Telekom AG OU=T-TeleSec Trust Center
+# Label: "Deutsche Telekom Root CA 2"
+# Serial: 38
+# MD5 Fingerprint: 74:01:4a:91:b1:08:c4:58:ce:47:cd:f0:dd:11:53:08
+# SHA1 Fingerprint: 85:a4:08:c0:9c:19:3e:5d:51:58:7d:cd:d6:13:30:fd:8c:de:37:bf
+# SHA256 Fingerprint: b6:19:1a:50:d0:c3:97:7f:7d:a9:9b:cd:aa:c8:6a:22:7d:ae:b9:67:9e:c7:0b:a3:b0:c9:d9:22:71:c1:70:d3
+-----BEGIN CERTIFICATE-----
+MIIDnzCCAoegAwIBAgIBJjANBgkqhkiG9w0BAQUFADBxMQswCQYDVQQGEwJERTEc
+MBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxlU2Vj
+IFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290IENB
+IDIwHhcNOTkwNzA5MTIxMTAwWhcNMTkwNzA5MjM1OTAwWjBxMQswCQYDVQQGEwJE
+RTEcMBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxl
+U2VjIFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290
+IENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrC6M14IspFLEU
+ha88EOQ5bzVdSq7d6mGNlUn0b2SjGmBmpKlAIoTZ1KXleJMOaAGtuU1cOs7TuKhC
+QN/Po7qCWWqSG6wcmtoIKyUn+WkjR/Hg6yx6m/UTAtB+NHzCnjwAWav12gz1Mjwr
+rFDa1sPeg5TKqAyZMg4ISFZbavva4VhYAUlfckE8FQYBjl2tqriTtM2e66foai1S
+NNs671x1Udrb8zH57nGYMsRUFUQM+ZtV7a3fGAigo4aKSe5TBY8ZTNXeWHmb0moc
+QqvF1afPaA+W5OFhmHZhyJF81j4A4pFQh+GdCuatl9Idxjp9y7zaAzTVjlsB9WoH
+txa2bkp/AgMBAAGjQjBAMB0GA1UdDgQWBBQxw3kbuvVT1xfgiXotF2wKsyudMzAP
+BgNVHRMECDAGAQH/AgEFMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOC
+AQEAlGRZrTlk5ynrE/5aw4sTV8gEJPB0d8Bg42f76Ymmg7+Wgnxu1MM9756Abrsp
+tJh6sTtU6zkXR34ajgv8HzFZMQSyzhfzLMdiNlXiItiJVbSYSKpk+tYcNthEeFpa
+IzpXl/V6ME+un2pMSyuOoAPjPuCp1NJ70rOo4nI8rZ7/gFnkm0W09juwzTkZmDLl
+6iFhkOQxIY40sfcvNUqFENrnijchvllj4PKFiDFT1FQUhXB59C4Gdyd1Lx+4ivn+
+xbrYNuSD7Odlt79jWvNGr4GUN9RBjNYj1h7P9WgbRGOiWrqnNVmh5XAFmw4jV5mU
+Cm26OWMohpLzGITY+9HPBVZkVw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Cybertrust Global Root O=Cybertrust, Inc
+# Subject: CN=Cybertrust Global Root O=Cybertrust, Inc
+# Label: "Cybertrust Global Root"
+# Serial: 4835703278459682877484360
+# MD5 Fingerprint: 72:e4:4a:87:e3:69:40:80:77:ea:bc:e3:f4:ff:f0:e1
+# SHA1 Fingerprint: 5f:43:e5:b1:bf:f8:78:8c:ac:1c:c7:ca:4a:9a:c6:22:2b:cc:34:c6
+# SHA256 Fingerprint: 96:0a:df:00:63:e9:63:56:75:0c:29:65:dd:0a:08:67:da:0b:9c:bd:6e:77:71:4a:ea:fb:23:49:ab:39:3d:a3
+-----BEGIN CERTIFICATE-----
+MIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYG
+A1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2Jh
+bCBSb290MB4XDTA2MTIxNTA4MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UE
+ChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBS
+b290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA+Mi8vRRQZhP/8NN5
+7CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW0ozS
+J8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2y
+HLtgwEZLAfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iP
+t3sMpTjr3kfb1V05/Iin89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNz
+FtApD0mpSPCzqrdsxacwOUBdrsTiXSZT8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAY
+XSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/
+MB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2MDSgMqAw
+hi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3Js
+MB8GA1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUA
+A4IBAQBW7wojoFROlZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMj
+Wqd8BfP9IjsO0QbE2zZMcwSO5bAi5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUx
+XOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2hO0j9n0Hq0V+09+zv+mKts2o
+omcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+TX3EJIrduPuoc
+A06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW
+WL1WMRJOEcgh4LMRkWXbtKaIOM5V
+-----END CERTIFICATE-----
+
+# Issuer: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority
+# Subject: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority
+# Label: "ePKI Root Certification Authority"
+# Serial: 28956088682735189655030529057352760477
+# MD5 Fingerprint: 1b:2e:00:ca:26:06:90:3d:ad:fe:6f:15:68:d3:6b:b3
+# SHA1 Fingerprint: 67:65:0d:f1:7e:8e:7e:5b:82:40:a4:f4:56:4b:cf:e2:3d:69:c6:f0
+# SHA256 Fingerprint: c0:a6:f4:dc:63:a2:4b:fd:cf:54:ef:2a:6a:08:2a:0a:72:de:35:80:3e:2f:f5:ff:52:7a:e5:d8:72:06:df:d5
+-----BEGIN CERTIFICATE-----
+MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBe
+MQswCQYDVQQGEwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0
+ZC4xKjAoBgNVBAsMIWVQS0kgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe
+Fw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMxMjdaMF4xCzAJBgNVBAYTAlRXMSMw
+IQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEqMCgGA1UECwwhZVBL
+SSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAH
+SyZbCUNsIZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAh
+ijHyl3SJCRImHJ7K2RKilTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3X
+DZoTM1PRYfl61dd4s5oz9wCGzh1NlDivqOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1
+TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX12ruOzjjK9SXDrkb5wdJ
+fzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0OWQqraffA
+sgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uU
+WH1+ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLS
+nT0IFaUQAS2zMnaolQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pH
+dmX2Os+PYhcZewoozRrSgx4hxyy/vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJip
+NiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXiZo1jDiVN1Rmy5nk3pyKdVDEC
+AwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/QkqiMAwGA1UdEwQF
+MAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH
+ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGB
+uvl2ICO1J2B01GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6Yl
+PwZpVnPDimZI+ymBV3QGypzqKOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkP
+JXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdVxrsStZf0X4OFunHB2WyBEXYKCrC/
+gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEPNXubrjlpC2JgQCA2
+j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+rGNm6
+5ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUB
+o2M3IUxExJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS
+/jQ6fbjpKdx2qcgw+BRxgMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2z
+Gp1iro2C6pSe3VkQw63d4k3jMdXH7OjysP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTE
+W9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmODBCEIZ43ygknQW/2xzQ+D
+hNQ+IIX3Sj0rnP0qCglN6oH4EZw=
+-----END CERTIFICATE-----
+
+# Issuer: O=certSIGN OU=certSIGN ROOT CA
+# Subject: O=certSIGN OU=certSIGN ROOT CA
+# Label: "certSIGN ROOT CA"
+# Serial: 35210227249154
+# MD5 Fingerprint: 18:98:c0:d6:e9:3a:fc:f9:b0:f5:0c:f7:4b:01:44:17
+# SHA1 Fingerprint: fa:b7:ee:36:97:26:62:fb:2d:b0:2a:f6:bf:03:fd:e8:7c:4b:2f:9b
+# SHA256 Fingerprint: ea:a9:62:c4:fa:4a:6b:af:eb:e4:15:19:6d:35:1c:cd:88:8d:4f:53:f3:fa:8a:e6:d7:c4:66:a9:4e:60:42:bb
+-----BEGIN CERTIFICATE-----
+MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYT
+AlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBD
+QTAeFw0wNjA3MDQxNzIwMDRaFw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJP
+MREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTCC
+ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7IJUqOtdu0KBuqV5Do
+0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHHrfAQ
+UySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5d
+RdY4zTW2ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQ
+OA7+j0xbm0bqQfWwCHTD0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwv
+JoIQ4uNllAoEwF73XVv4EOLQunpL+943AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08C
+AwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAcYwHQYDVR0O
+BBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IBAQA+0hyJ
+LjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecY
+MnQ8SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ
+44gx+FkagQnIl6Z0x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6I
+Jd1hJyMctTEHBDa0GpC9oHRxUIltvBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNw
+i/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7NzTogVZ96edhBiIL5VaZVDADlN
+9u6wWk5JRFRYX0KD
+-----END CERTIFICATE-----
+
+# Issuer: CN=GeoTrust Primary Certification Authority - G3 O=GeoTrust Inc. OU=(c) 2008 GeoTrust Inc. - For authorized use only
+# Subject: CN=GeoTrust Primary Certification Authority - G3 O=GeoTrust Inc. OU=(c) 2008 GeoTrust Inc. - For authorized use only
+# Label: "GeoTrust Primary Certification Authority - G3"
+# Serial: 28809105769928564313984085209975885599
+# MD5 Fingerprint: b5:e8:34:36:c9:10:44:58:48:70:6d:2e:83:d4:b8:05
+# SHA1 Fingerprint: 03:9e:ed:b8:0b:e7:a0:3c:69:53:89:3b:20:d2:d9:32:3a:4c:2a:fd
+# SHA256 Fingerprint: b4:78:b8:12:25:0d:f8:78:63:5c:2a:a7:ec:7d:15:5e:aa:62:5e:e8:29:16:e2:cd:29:43:61:88:6c:d1:fb:d4
+-----BEGIN CERTIFICATE-----
+MIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCB
+mDELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsT
+MChjKSAyMDA4IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25s
+eTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhv
+cml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIzNTk1OVowgZgxCzAJ
+BgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg
+MjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0
+BgNVBAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg
+LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz
++uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5jK/BGvESyiaHAKAxJcCGVn2TAppMSAmUm
+hsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdEc5IiaacDiGydY8hS2pgn
+5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3CIShwiP/W
+JmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exAL
+DmKudlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZC
+huOl1UcCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYw
+HQYDVR0OBBYEFMR5yo6hTgMdHNxr2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IB
+AQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9cr5HqQ6XErhK8WTTOd8lNNTB
+zU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbEAp7aDHdlDkQN
+kv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD
+AWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUH
+SJsMC8tJP33st/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2G
+spki4cErx5z481+oghLrGREt
+-----END CERTIFICATE-----
+
+# Issuer: CN=thawte Primary Root CA - G2 O=thawte, Inc. OU=(c) 2007 thawte, Inc. - For authorized use only
+# Subject: CN=thawte Primary Root CA - G2 O=thawte, Inc. OU=(c) 2007 thawte, Inc. - For authorized use only
+# Label: "thawte Primary Root CA - G2"
+# Serial: 71758320672825410020661621085256472406
+# MD5 Fingerprint: 74:9d:ea:60:24:c4:fd:22:53:3e:cc:3a:72:d9:29:4f
+# SHA1 Fingerprint: aa:db:bc:22:23:8f:c4:01:a1:27:bb:38:dd:f4:1d:db:08:9e:f0:12
+# SHA256 Fingerprint: a4:31:0d:50:af:18:a6:44:71:90:37:2a:86:af:af:8b:95:1f:fb:43:1d:83:7f:1e:56:88:b4:59:71:ed:15:57
+-----BEGIN CERTIFICATE-----
+MIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDEL
+MAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMp
+IDIwMDcgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAi
+BgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMjAeFw0wNzExMDUwMDAw
+MDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh
+d3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBGb3Ig
+YXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9v
+dCBDQSAtIEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/
+BebfowJPDQfGAFG6DAJSLSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6
+papu+7qzcMBniKI11KOasf2twu8x+qi58/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8E
+BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUmtgAMADna3+FGO6Lts6K
+DPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUNG4k8VIZ3
+KMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41ox
+XZ3Krr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=thawte Primary Root CA - G3 O=thawte, Inc. OU=Certification Services Division/(c) 2008 thawte, Inc. - For authorized use only
+# Subject: CN=thawte Primary Root CA - G3 O=thawte, Inc. OU=Certification Services Division/(c) 2008 thawte, Inc. - For authorized use only
+# Label: "thawte Primary Root CA - G3"
+# Serial: 127614157056681299805556476275995414779
+# MD5 Fingerprint: fb:1b:5d:43:8a:94:cd:44:c6:76:f2:43:4b:47:e7:31
+# SHA1 Fingerprint: f1:8b:53:8d:1b:e9:03:b6:a6:f0:56:43:5b:17:15:89:ca:f3:6b:f2
+# SHA256 Fingerprint: 4b:03:f4:58:07:ad:70:f2:1b:fc:2c:ae:71:c9:fd:e4:60:4c:06:4c:f5:ff:b6:86:ba:e5:db:aa:d7:fd:d3:4c
+-----BEGIN CERTIFICATE-----
+MIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCB
+rjELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf
+Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw
+MDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNV
+BAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0wODA0MDIwMDAwMDBa
+Fw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhhd3Rl
+LCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9u
+MTgwNgYDVQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXpl
+ZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEcz
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsr8nLPvb2FvdeHsbnndm
+gcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2AtP0LMqmsywCPLLEHd5N/8
+YZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC+BsUa0Lf
+b1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS9
+9irY7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2S
+zhkGcuYMXDhpxwTWvGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUk
+OQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNV
+HQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJKoZIhvcNAQELBQADggEBABpA
+2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweKA3rD6z8KLFIW
+oCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu
+t8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7c
+KUGRIjxpp7sC8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fM
+m7v/OeZWYdMKp8RcTGB7BXcmer/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZu
+MdRAGmI0Nj81Aa6sY6A=
+-----END CERTIFICATE-----
+
+# Issuer: CN=GeoTrust Primary Certification Authority - G2 O=GeoTrust Inc. OU=(c) 2007 GeoTrust Inc. - For authorized use only
+# Subject: CN=GeoTrust Primary Certification Authority - G2 O=GeoTrust Inc. OU=(c) 2007 GeoTrust Inc. - For authorized use only
+# Label: "GeoTrust Primary Certification Authority - G2"
+# Serial: 80682863203381065782177908751794619243
+# MD5 Fingerprint: 01:5e:d8:6b:bd:6f:3d:8e:a1:31:f8:12:e0:98:73:6a
+# SHA1 Fingerprint: 8d:17:84:d5:37:f3:03:7d:ec:70:fe:57:8b:51:9a:99:e6:10:d7:b0
+# SHA256 Fingerprint: 5e:db:7a:c4:3b:82:a0:6a:87:61:e8:d7:be:49:79:eb:f2:61:1f:7d:d7:9b:f9:1c:1c:6b:56:6a:21:9e:d7:66
+-----BEGIN CERTIFICATE-----
+MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDEL
+MAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChj
+KSAyMDA3IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2
+MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0
+eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1OVowgZgxCzAJBgNV
+BAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykgMjAw
+NyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNV
+BAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBH
+MjB2MBAGByqGSM49AgEGBSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcL
+So17VDs6bl8VAsBQps8lL33KSLjHUGMcKiEIfJo22Av+0SbFWDEwKCXzXV2juLal
+tJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO
+BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+EVXVMAoG
+CCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGT
+qQ7mndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBucz
+rD6ogRLQy7rQkgu2npaqBA+K
+-----END CERTIFICATE-----
+
+# Issuer: CN=VeriSign Universal Root Certification Authority O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2008 VeriSign, Inc. - For authorized use only
+# Subject: CN=VeriSign Universal Root Certification Authority O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2008 VeriSign, Inc. - For authorized use only
+# Label: "VeriSign Universal Root Certification Authority"
+# Serial: 85209574734084581917763752644031726877
+# MD5 Fingerprint: 8e:ad:b5:01:aa:4d:81:e4:8c:1d:d1:e1:14:00:95:19
+# SHA1 Fingerprint: 36:79:ca:35:66:87:72:30:4d:30:a5:fb:87:3b:0f:a7:7b:b7:0d:54
+# SHA256 Fingerprint: 23:99:56:11:27:a5:71:25:de:8c:ef:ea:61:0d:df:2f:a0:78:b5:c8:06:7f:4e:82:82:90:bf:b8:60:e8:4b:3c
+-----BEGIN CERTIFICATE-----
+MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCB
+vTELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL
+ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJp
+U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MTgwNgYDVQQDEy9W
+ZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe
+Fw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJVUzEX
+MBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0
+IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9y
+IGF1dGhvcml6ZWQgdXNlIG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNh
+bCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF
+AAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj1mCOkdeQmIN65lgZOIzF
+9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGPMiJhgsWH
+H26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+H
+LL729fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN
+/BMReYTtXlT2NJ8IAfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPT
+rJ9VAMf2CGqUuV/c4DPxhGD5WycRtPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1Ud
+EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0GCCsGAQUFBwEMBGEwX6FdoFsw
+WTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2Oa8PPgGrUSBgs
+exkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud
+DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4
+sAPmLGd75JR3Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+
+seQxIcaBlVZaDrHC1LGmWazxY8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz
+4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTxP/jgdFcrGJ2BtMQo2pSXpXDrrB2+
+BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+PwGZsY6rp2aQW9IHR
+lRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4mJO3
+7M2CYfE45k+XmCpajQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G4 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2007 VeriSign, Inc. - For authorized use only
+# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G4 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2007 VeriSign, Inc. - For authorized use only
+# Label: "VeriSign Class 3 Public Primary Certification Authority - G4"
+# Serial: 63143484348153506665311985501458640051
+# MD5 Fingerprint: 3a:52:e1:e7:fd:6f:3a:e3:6f:f3:6f:99:1b:f9:22:41
+# SHA1 Fingerprint: 22:d5:d8:df:8f:02:31:d1:8d:f7:9d:b7:cf:8a:2d:64:c9:3f:6c:3a
+# SHA256 Fingerprint: 69:dd:d7:ea:90:bb:57:c9:3e:13:5d:c8:5e:a6:fc:d5:48:0b:60:32:39:bd:c4:54:fc:75:8b:2a:26:cf:7f:79
+-----BEGIN CERTIFICATE-----
+MIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjEL
+MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW
+ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2ln
+biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp
+U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y
+aXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjELMAkG
+A1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJp
+U2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwg
+SW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2ln
+biBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5
+IC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8Utpkmw4tXNherJI9/gHm
+GUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGzrl0Bp3ve
+fLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUw
+AwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJ
+aW1hZ2UvZ2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYj
+aHR0cDovL2xvZ28udmVyaXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMW
+kf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMDA2gAMGUCMGYhDBgmYFo4e1ZC
+4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIxAJw9SDkjOVga
+FRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA==
+-----END CERTIFICATE-----
+
+# Issuer: CN=NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny O=NetLock Kft. OU=Tan\xfas\xedtv\xe1nykiad\xf3k (Certification Services)
+# Subject: CN=NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny O=NetLock Kft. OU=Tan\xfas\xedtv\xe1nykiad\xf3k (Certification Services)
+# Label: "NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny"
+# Serial: 80544274841616
+# MD5 Fingerprint: c5:a1:b7:ff:73:dd:d6:d7:34:32:18:df:fc:3c:ad:88
+# SHA1 Fingerprint: 06:08:3f:59:3f:15:a1:04:a0:69:a4:6b:a9:03:d0:06:b7:97:09:91
+# SHA256 Fingerprint: 6c:61:da:c3:a2:de:f0:31:50:6b:e0:36:d2:a6:fe:40:19:94:fb:d1:3d:f9:c8:d4:66:59:92:74:c4:46:ec:98
+-----BEGIN CERTIFICATE-----
+MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQG
+EwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3
+MDUGA1UECwwuVGFuw7pzw610dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNl
+cnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBBcmFueSAoQ2xhc3MgR29sZCkgRsWR
+dGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgxMjA2MTUwODIxWjCB
+pzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxOZXRM
+b2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlm
+aWNhdGlvbiBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNz
+IEdvbGQpIEbFkXRhbsO6c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+MIIBCgKCAQEAxCRec75LbRTDofTjl5Bu0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrT
+lF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw/HpYzY6b7cNGbIRwXdrz
+AZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAkH3B5r9s5
+VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRG
+ILdwfzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2
+BJtr+UBdADTHLpl1neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAG
+AQH/AgEEMA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2M
+U9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwWqZw8UQCgwBEIBaeZ5m8BiFRh
+bvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTtaYtOUZcTh5m2C
++C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC
+bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2F
+uLjbvrW5KfnaNwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2
+XjG4Kvte9nHfRCaexOYNkbQudZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Staat der Nederlanden Root CA - G2 O=Staat der Nederlanden
+# Subject: CN=Staat der Nederlanden Root CA - G2 O=Staat der Nederlanden
+# Label: "Staat der Nederlanden Root CA - G2"
+# Serial: 10000012
+# MD5 Fingerprint: 7c:a5:0f:f8:5b:9a:7d:6d:30:ae:54:5a:e3:42:a2:8a
+# SHA1 Fingerprint: 59:af:82:79:91:86:c7:b4:75:07:cb:cf:03:57:46:eb:04:dd:b7:16
+# SHA256 Fingerprint: 66:8c:83:94:7d:a6:3b:72:4b:ec:e1:74:3c:31:a0:e6:ae:d0:db:8e:c5:b3:1b:e3:77:bb:78:4f:91:b6:71:6f
+-----BEGIN CERTIFICATE-----
+MIIFyjCCA7KgAwIBAgIEAJiWjDANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJO
+TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFh
+dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQSAtIEcyMB4XDTA4MDMyNjExMTgxN1oX
+DTIwMDMyNTExMDMxMFowWjELMAkGA1UEBhMCTkwxHjAcBgNVBAoMFVN0YWF0IGRl
+ciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5lZGVybGFuZGVuIFJv
+b3QgQ0EgLSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMVZ5291
+qj5LnLW4rJ4L5PnZyqtdj7U5EILXr1HgO+EASGrP2uEGQxGZqhQlEq0i6ABtQ8Sp
+uOUfiUtnvWFI7/3S4GCI5bkYYCjDdyutsDeqN95kWSpGV+RLufg3fNU254DBtvPU
+Z5uW6M7XxgpT0GtJlvOjCwV3SPcl5XCsMBQgJeN/dVrlSPhOewMHBPqCYYdu8DvE
+pMfQ9XQ+pV0aCPKbJdL2rAQmPlU6Yiile7Iwr/g3wtG61jj99O9JMDeZJiFIhQGp
+5Rbn3JBV3w/oOM2ZNyFPXfUib2rFEhZgF1XyZWampzCROME4HYYEhLoaJXhena/M
+UGDWE4dS7WMfbWV9whUYdMrhfmQpjHLYFhN9C0lK8SgbIHRrxT3dsKpICT0ugpTN
+GmXZK4iambwYfp/ufWZ8Pr2UuIHOzZgweMFvZ9C+X+Bo7d7iscksWXiSqt8rYGPy
+5V6548r6f1CGPqI0GAwJaCgRHOThuVw+R7oyPxjMW4T182t0xHJ04eOLoEq9jWYv
+6q012iDTiIJh8BIitrzQ1aTsr1SIJSQ8p22xcik/Plemf1WvbibG/ufMQFxRRIEK
+eN5KzlW/HdXZt1bv8Hb/C3m1r737qWmRRpdogBQ2HbN/uymYNqUg+oJgYjOk7Na6
+B6duxc8UpufWkjTYgfX8HV2qXB72o007uPc5AgMBAAGjgZcwgZQwDwYDVR0TAQH/
+BAUwAwEB/zBSBgNVHSAESzBJMEcGBFUdIAAwPzA9BggrBgEFBQcCARYxaHR0cDov
+L3d3dy5wa2lvdmVyaGVpZC5ubC9wb2xpY2llcy9yb290LXBvbGljeS1HMjAOBgNV
+HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJFoMocVHYnitfGsNig0jQt8YojrMA0GCSqG
+SIb3DQEBCwUAA4ICAQCoQUpnKpKBglBu4dfYszk78wIVCVBR7y29JHuIhjv5tLyS
+CZa59sCrI2AGeYwRTlHSeYAz+51IvuxBQ4EffkdAHOV6CMqqi3WtFMTC6GY8ggen
+5ieCWxjmD27ZUD6KQhgpxrRW/FYQoAUXvQwjf/ST7ZwaUb7dRUG/kSS0H4zpX897
+IZmflZ85OkYcbPnNe5yQzSipx6lVu6xiNGI1E0sUOlWDuYaNkqbG9AclVMwWVxJK
+gnjIFNkXgiYtXSAfea7+1HAWFpWD2DU5/1JddRwWxRNVz0fMdWVSSt7wsKfkCpYL
++63C4iWEst3kvX5ZbJvw8NjnyvLplzh+ib7M+zkXYT9y2zqR2GUBGR2tUKRXCnxL
+vJxxcypFURmFzI79R6d0lR2o0a9OF7FpJsKqeFdbxU2n5Z4FF5TKsl+gSRiNNOkm
+bEgeqmiSBeGCc1qb3AdbCG19ndeNIdn8FCCqwkXfP+cAslHkwvgFuXkajDTznlvk
+N1trSt8sV4pAWja63XVECDdCcAz+3F4hoKOKwJCcaNpQ5kUQR3i2TtJlycM33+FC
+Y7BXN0Ute4qcvwXqZVUz9zkQxSgqIXobisQk+T8VyJoVIPVVYpbtbZNQvOSqeK3Z
+ywplh6ZmwcSBo3c6WB4L7oOLnR7SUqTMHW+wmG2UMbX4cQrcufx9MmDm66+KAQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Hongkong Post Root CA 1 O=Hongkong Post
+# Subject: CN=Hongkong Post Root CA 1 O=Hongkong Post
+# Label: "Hongkong Post Root CA 1"
+# Serial: 1000
+# MD5 Fingerprint: a8:0d:6f:39:78:b9:43:6d:77:42:6d:98:5a:cc:23:ca
+# SHA1 Fingerprint: d6:da:a8:20:8d:09:d2:15:4d:24:b5:2f:cb:34:6e:b2:58:b2:8a:58
+# SHA256 Fingerprint: f9:e6:7d:33:6c:51:00:2a:c0:54:c6:32:02:2d:66:dd:a2:e7:e3:ff:f1:0a:d0:61:ed:31:d8:bb:b4:10:cf:b2
+-----BEGIN CERTIFICATE-----
+MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsx
+FjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3Qg
+Um9vdCBDQSAxMB4XDTAzMDUxNTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkG
+A1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdr
+b25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1ApzQ
+jVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEn
+PzlTCeqrauh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjh
+ZY4bXSNmO7ilMlHIhqqhqZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9
+nnV0ttgCXjqQesBCNnLsak3c78QA3xMYV18meMjWCnl3v/evt3a5pQuEF10Q6m/h
+q5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNVHRMBAf8ECDAGAQH/AgED
+MA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7ih9legYsC
+mEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI3
+7piol7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clB
+oiMBdDhViw+5LmeiIAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJs
+EhTkYY2sEJCehFC78JZvRZ+K88psT/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpO
+fMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilTc4afU9hDDl3WY4JxHYB0yvbi
+AmvZWg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=SecureSign RootCA11 O=Japan Certification Services, Inc.
+# Subject: CN=SecureSign RootCA11 O=Japan Certification Services, Inc.
+# Label: "SecureSign RootCA11"
+# Serial: 1
+# MD5 Fingerprint: b7:52:74:e2:92:b4:80:93:f2:75:e4:cc:d7:f2:ea:26
+# SHA1 Fingerprint: 3b:c4:9f:48:f8:f3:73:a0:9c:1e:bd:f8:5b:b1:c3:65:c7:d8:11:b3
+# SHA256 Fingerprint: bf:0f:ee:fb:9e:3a:58:1a:d5:f9:e9:db:75:89:98:57:43:d2:61:08:5c:4d:31:4f:6f:5d:72:59:aa:42:16:12
+-----BEGIN CERTIFICATE-----
+MIIDbTCCAlWgAwIBAgIBATANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJKUDEr
+MCkGA1UEChMiSmFwYW4gQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcywgSW5jLjEcMBoG
+A1UEAxMTU2VjdXJlU2lnbiBSb290Q0ExMTAeFw0wOTA0MDgwNDU2NDdaFw0yOTA0
+MDgwNDU2NDdaMFgxCzAJBgNVBAYTAkpQMSswKQYDVQQKEyJKYXBhbiBDZXJ0aWZp
+Y2F0aW9uIFNlcnZpY2VzLCBJbmMuMRwwGgYDVQQDExNTZWN1cmVTaWduIFJvb3RD
+QTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA/XeqpRyQBTvLTJsz
+i1oURaTnkBbR31fSIRCkF/3frNYfp+TbfPfs37gD2pRY/V1yfIw/XwFndBWW4wI8
+h9uuywGOwvNmxoVF9ALGOrVisq/6nL+k5tSAMJjzDbaTj6nU2DbysPyKyiyhFTOV
+MdrAG/LuYpmGYz+/3ZMqg6h2uRMft85OQoWPIucuGvKVCbIFtUROd6EgvanyTgp9
+UK31BQ1FT0Zx/Sg+U/sE2C3XZR1KG/rPO7AxmjVuyIsG0wCR8pQIZUyxNAYAeoni
+8McDWc/V1uinMrPmmECGxc0nEovMe863ETxiYAcjPitAbpSACW22s293bzUIUPsC
+h8U+iQIDAQABo0IwQDAdBgNVHQ4EFgQUW/hNT7KlhtQ60vFjmqC+CfZXt94wDgYD
+VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEB
+AKChOBZmLqdWHyGcBvod7bkixTgm2E5P7KN/ed5GIaGHd48HCJqypMWvDzKYC3xm
+KbabfSVSSUOrTC4rbnpwrxYO4wJs+0LmGJ1F2FXI6Dvd5+H0LgscNFxsWEr7jIhQ
+X5Ucv+2rIrVls4W6ng+4reV6G4pQOh29Dbx7VFALuUKvVaAYga1lme++5Jy/xIWr
+QbJUb9wlze144o4MjQlJ3WN7WmmWAiGovVJZ6X01y8hSyn+B/tlr0/cR7SXf+Of5
+pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061lgeLKBObjBmN
+QSdJQO7e5iNEOdyhIta6A/I=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd.
+# Subject: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd.
+# Label: "Microsec e-Szigno Root CA 2009"
+# Serial: 14014712776195784473
+# MD5 Fingerprint: f8:49:f4:03:bc:44:2d:83:be:48:69:7d:29:64:fc:b1
+# SHA1 Fingerprint: 89:df:74:fe:5c:f4:0f:4a:80:f9:e3:37:7d:54:da:91:e1:01:31:8e
+# SHA256 Fingerprint: 3c:5f:81:fe:a5:fa:b8:2c:64:bf:a2:ea:ec:af:cd:e8:e0:77:fc:86:20:a7:ca:e5:37:16:3d:f3:6e:db:f3:78
+-----BEGIN CERTIFICATE-----
+MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYD
+VQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0
+ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0G
+CSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTAeFw0wOTA2MTYxMTMwMThaFw0y
+OTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3Qx
+FjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3pp
+Z25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o
+dTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvP
+kd6mJviZpWNwrZuuyjNAfW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tc
+cbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG0IMZfcChEhyVbUr02MelTTMuhTlAdX4U
+fIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKApxn1ntxVUwOXewdI/5n7
+N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm1HxdrtbC
+xkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1
++rUCAwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G
+A1UdDgQWBBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPM
+Pcu1SCOhGnqmKrs0aDAbBgNVHREEFDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqG
+SIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0olZMEyL/azXm4Q5DwpL7v8u8h
+mLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfXI/OMn74dseGk
+ddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775
+tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c
+2Pm2G2JwCz02yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5t
+HMN1Rq41Bab2XD0h7lbwyYIiLXpUq3DDfSJlgnCW
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3
+# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3
+# Label: "GlobalSign Root CA - R3"
+# Serial: 4835703278459759426209954
+# MD5 Fingerprint: c5:df:b8:49:ca:05:13:55:ee:2d:ba:1a:c3:3e:b0:28
+# SHA1 Fingerprint: d6:9b:56:11:48:f0:1c:77:c5:45:78:c1:09:26:df:5b:85:69:76:ad
+# SHA256 Fingerprint: cb:b5:22:d7:b7:f1:27:ad:6a:01:13:86:5b:df:1c:d4:10:2e:7d:07:59:af:63:5a:7c:f4:72:0d:c9:63:c5:3b
+-----BEGIN CERTIFICATE-----
+MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4G
+A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNp
+Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4
+MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMzETMBEG
+A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWtiHL8
+RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsT
+gHeMCOFJ0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmm
+KPZpO/bLyCiR5Z2KYVc3rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zd
+QQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjlOCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZ
+XriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2xmmFghcCAwEAAaNCMEAw
+DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI/wS3+o
+LkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZU
+RUm7lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMp
+jjM5RcOO5LlXbKr8EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK
+6fBdRoyV3XpYKBovHd7NADdBj+1EbddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQX
+mcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18YIvDQVETI53O9zJrlAGomecs
+Mx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7rkpeDMdmztcpH
+WD9f
+-----END CERTIFICATE-----
+
+# Issuer: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068
+# Subject: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068
+# Label: "Autoridad de Certificacion Firmaprofesional CIF A62634068"
+# Serial: 6047274297262753887
+# MD5 Fingerprint: 73:3a:74:7a:ec:bb:a3:96:a6:c2:e4:e2:c8:9b:c0:c3
+# SHA1 Fingerprint: ae:c5:fb:3f:c8:e1:bf:c4:e5:4f:03:07:5a:9a:e8:00:b7:f7:b6:fa
+# SHA256 Fingerprint: 04:04:80:28:bf:1f:28:64:d4:8f:9a:d4:d8:32:94:36:6a:82:88:56:55:3f:3b:14:30:3f:90:14:7f:5d:40:ef
+-----BEGIN CERTIFICATE-----
+MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UE
+BhMCRVMxQjBABgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1h
+cHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEy
+MzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIwQAYDVQQDDDlBdXRvcmlkYWQgZGUg
+Q2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBBNjI2MzQwNjgwggIi
+MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDDUtd9
+thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQM
+cas9UX4PB99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefG
+L9ItWY16Ck6WaVICqjaY7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15i
+NA9wBj4gGFrO93IbJWyTdBSTo3OxDqqHECNZXyAFGUftaI6SEspd/NYrspI8IM/h
+X68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyIplD9amML9ZMWGxmPsu2b
+m8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctXMbScyJCy
+Z/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirja
+EbsXLZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/T
+KI8xWVvTyQKmtFLKbpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF
+6NkBiDkal4ZkQdU7hwxu+g/GvUgUvzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVh
+OSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1UdEwEB/wQIMAYBAf8CAQEwDgYD
+VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNHDhpkLzCBpgYD
+VR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp
+cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBv
+ACAAZABlACAAbABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBl
+AGwAbwBuAGEAIAAwADgAMAAxADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF
+661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx51tkljYyGOylMnfX40S2wBEqgLk9
+am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qkR71kMrv2JYSiJ0L1
+ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaPT481
+PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS
+3a/DTg4fJl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5k
+SeTy36LssUzAKh3ntLFlosS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF
+3dvd6qJ2gHN99ZwExEWN57kci57q13XRcrHedUTnQn3iV2t93Jm8PYMo6oCTjcVM
+ZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoRsaS8I8nkvof/uZS2+F0g
+StRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTDKCOM/icz
+Q0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQB
+jLMi6Et8Vcad+qMUu2WFbm5PEn4KPJ2V
+-----END CERTIFICATE-----
+
+# Issuer: CN=Izenpe.com O=IZENPE S.A.
+# Subject: CN=Izenpe.com O=IZENPE S.A.
+# Label: "Izenpe.com"
+# Serial: 917563065490389241595536686991402621
+# MD5 Fingerprint: a6:b0:cd:85:80:da:5c:50:34:a3:39:90:2f:55:67:73
+# SHA1 Fingerprint: 2f:78:3d:25:52:18:a7:4a:65:39:71:b5:2c:a2:9c:45:15:6f:e9:19
+# SHA256 Fingerprint: 25:30:cc:8e:98:32:15:02:ba:d9:6f:9b:1f:ba:1b:09:9e:2d:29:9e:0f:45:48:bb:91:4f:36:3b:c0:d4:53:1f
+-----BEGIN CERTIFICATE-----
+MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4
+MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6
+ZW5wZS5jb20wHhcNMDcxMjEzMTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYD
+VQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5j
+b20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ03rKDx6sp4boFmVq
+scIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAKClaO
+xdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6H
+LmYRY2xU+zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFX
+uaOKmMPsOzTFlUFpfnXCPCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQD
+yCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxTOTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+
+JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbKF7jJeodWLBoBHmy+E60Q
+rLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK0GqfvEyN
+BjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8L
+hij+0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIB
+QFqNeb+Lz0vPqhbBleStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+
+HMh3/1uaD7euBUbl8agW7EekFwIDAQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2lu
+Zm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+SVpFTlBFIFMuQS4gLSBDSUYg
+QTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBGNjIgUzgxQzBB
+BgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx
+MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
+AQYwHQYDVR0OBBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUA
+A4ICAQB4pgwWSp9MiDrAyw6lFn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWb
+laQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbgakEyrkgPH7UIBzg/YsfqikuFgba56
+awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8qhT/AQKM6WfxZSzwo
+JNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Csg1lw
+LDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCT
+VyvehQP5aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGk
+LhObNA5me0mrZJfQRsN5nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJb
+UjWumDqtujWTI6cfSN01RpiyEGjkpTHCClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/
+QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZoQ0iy2+tzJOeRf1SktoA+
+naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1ZWrOZyGls
+QyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Chambers of Commerce Root - 2008 O=AC Camerfirma S.A.
+# Subject: CN=Chambers of Commerce Root - 2008 O=AC Camerfirma S.A.
+# Label: "Chambers of Commerce Root - 2008"
+# Serial: 11806822484801597146
+# MD5 Fingerprint: 5e:80:9e:84:5a:0e:65:0b:17:02:f3:55:18:2a:3e:d7
+# SHA1 Fingerprint: 78:6a:74:ac:76:ab:14:7f:9c:6a:30:50:ba:9e:a8:7e:fe:9a:ce:3c
+# SHA256 Fingerprint: 06:3e:4a:fa:c4:91:df:d3:32:f3:08:9b:85:42:e9:46:17:d8:93:d7:fe:94:4e:10:a7:93:7e:e2:9d:96:93:c0
+-----BEGIN CERTIFICATE-----
+MIIHTzCCBTegAwIBAgIJAKPaQn6ksa7aMA0GCSqGSIb3DQEBBQUAMIGuMQswCQYD
+VQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0
+IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3
+MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xKTAnBgNVBAMTIENoYW1iZXJz
+IG9mIENvbW1lcmNlIFJvb3QgLSAyMDA4MB4XDTA4MDgwMTEyMjk1MFoXDTM4MDcz
+MTEyMjk1MFowga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNlZSBj
+dXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29tL2FkZHJlc3MpMRIw
+EAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVyZmlybWEgUy5BLjEp
+MCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDgwggIiMA0G
+CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCvAMtwNyuAWko6bHiUfaN/Gh/2NdW9
+28sNRHI+JrKQUrpjOyhYb6WzbZSm891kDFX29ufyIiKAXuFixrYp4YFs8r/lfTJq
+VKAyGVn+H4vXPWCGhSRv4xGzdz4gljUha7MI2XAuZPeEklPWDrCQiorjh40G072Q
+DuKZoRuGDtqaCrsLYVAGUvGef3bsyw/QHg3PmTA9HMRFEFis1tPo1+XqxQEHd9ZR
+5gN/ikilTWh1uem8nk4ZcfUyS5xtYBkL+8ydddy/Js2Pk3g5eXNeJQ7KXOt3EgfL
+ZEFHcpOrUMPrCXZkNNI5t3YRCQ12RcSprj1qr7V9ZS+UWBDsXHyvfuK2GNnQm05a
+Sd+pZgvMPMZ4fKecHePOjlO+Bd5gD2vlGts/4+EhySnB8esHnFIbAURRPHsl18Tl
+UlRdJQfKFiC4reRB7noI/plvg6aRArBsNlVq5331lubKgdaX8ZSD6e2wsWsSaR6s
++12pxZjptFtYer49okQ6Y1nUCyXeG0+95QGezdIp1Z8XGQpvvwyQ0wlf2eOKNcx5
+Wk0ZN5K3xMGtr/R5JJqyAQuxr1yW84Ay+1w9mPGgP0revq+ULtlVmhduYJ1jbLhj
+ya6BXBg14JC7vjxPNyK5fuvPnnchpj04gftI2jE9K+OJ9dC1vX7gUMQSibMjmhAx
+hduub+84Mxh2EQIDAQABo4IBbDCCAWgwEgYDVR0TAQH/BAgwBgEB/wIBDDAdBgNV
+HQ4EFgQU+SSsD7K1+HnA+mCIG8TZTQKeFxkwgeMGA1UdIwSB2zCB2IAU+SSsD7K1
++HnA+mCIG8TZTQKeFxmhgbSkgbEwga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpN
+YWRyaWQgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29t
+L2FkZHJlc3MpMRIwEAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVy
+ZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAt
+IDIwMDiCCQCj2kJ+pLGu2jAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRV
+HSAAMCowKAYIKwYBBQUHAgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20w
+DQYJKoZIhvcNAQEFBQADggIBAJASryI1wqM58C7e6bXpeHxIvj99RZJe6dqxGfwW
+PJ+0W2aeaufDuV2I6A+tzyMP3iU6XsxPpcG1Lawk0lgH3qLPaYRgM+gQDROpI9CF
+5Y57pp49chNyM/WqfcZjHwj0/gF/JM8rLFQJ3uIrbZLGOU8W6jx+ekbURWpGqOt1
+glanq6B8aBMz9p0w8G8nOSQjKpD9kCk18pPfNKXG9/jvjA9iSnyu0/VU+I22mlaH
+FoI6M6taIgj3grrqLuBHmrS1RaMFO9ncLkVAO+rcf+g769HsJtg1pDDFOqxXnrN2
+pSB7+R5KBWIBpih1YJeSDW4+TTdDDZIVnBgizVGZoCkaPF+KMjNbMMeJL0eYD6MD
+xvbxrN8y8NmBGuScvfaAFPDRLLmF9dijscilIeUcE5fuDr3fKanvNFNb0+RqE4QG
+tjICxFKuItLcsiFCGtpA8CnJ7AoMXOLQusxI0zcKzBIKinmwPQN/aUv0NCB9szTq
+jktk9T79syNnFQ0EuPAtwQlRPLJsFfClI9eDdOTlLsn+mCdCxqvGnrDQWzilm1De
+fhiYtUU79nm06PcaewaD+9CL2rvHvRirCG88gGtAPxkZumWK5r7VXNM21+9AUiRg
+OGcEMeyP84LG3rlV8zsxkVrctQgVrXYlCg17LofiDKYGvCYQbTed7N14jHyAxfDZ
+d0jQ
+-----END CERTIFICATE-----
+
+# Issuer: CN=Global Chambersign Root - 2008 O=AC Camerfirma S.A.
+# Subject: CN=Global Chambersign Root - 2008 O=AC Camerfirma S.A.
+# Label: "Global Chambersign Root - 2008"
+# Serial: 14541511773111788494
+# MD5 Fingerprint: 9e:80:ff:78:01:0c:2e:c1:36:bd:fe:96:90:6e:08:f3
+# SHA1 Fingerprint: 4a:bd:ee:ec:95:0d:35:9c:89:ae:c7:52:a1:2c:5b:29:f6:d6:aa:0c
+# SHA256 Fingerprint: 13:63:35:43:93:34:a7:69:80:16:a0:d3:24:de:72:28:4e:07:9d:7b:52:20:bb:8f:bd:74:78:16:ee:be:ba:ca
+-----BEGIN CERTIFICATE-----
+MIIHSTCCBTGgAwIBAgIJAMnN0+nVfSPOMA0GCSqGSIb3DQEBBQUAMIGsMQswCQYD
+VQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0
+IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3
+MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAlBgNVBAMTHkdsb2JhbCBD
+aGFtYmVyc2lnbiBSb290IC0gMjAwODAeFw0wODA4MDExMjMxNDBaFw0zODA3MzEx
+MjMxNDBaMIGsMQswCQYDVQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3Vy
+cmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAG
+A1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAl
+BgNVBAMTHkdsb2JhbCBDaGFtYmVyc2lnbiBSb290IC0gMjAwODCCAiIwDQYJKoZI
+hvcNAQEBBQADggIPADCCAgoCggIBAMDfVtPkOpt2RbQT2//BthmLN0EYlVJH6xed
+KYiONWwGMi5HYvNJBL99RDaxccy9Wglz1dmFRP+RVyXfXjaOcNFccUMd2drvXNL7
+G706tcuto8xEpw2uIRU/uXpbknXYpBI4iRmKt4DS4jJvVpyR1ogQC7N0ZJJ0YPP2
+zxhPYLIj0Mc7zmFLmY/CDNBAspjcDahOo7kKrmCgrUVSY7pmvWjg+b4aqIG7HkF4
+ddPB/gBVsIdU6CeQNR1MM62X/JcumIS/LMmjv9GYERTtY/jKmIhYF5ntRQOXfjyG
+HoiMvvKRhI9lNNgATH23MRdaKXoKGCQwoze1eqkBfSbW+Q6OWfH9GzO1KTsXO0G2
+Id3UwD2ln58fQ1DJu7xsepeY7s2MH/ucUa6LcL0nn3HAa6x9kGbo1106DbDVwo3V
+yJ2dwW3Q0L9R5OP4wzg2rtandeavhENdk5IMagfeOx2YItaswTXbo6Al/3K1dh3e
+beksZixShNBFks4c5eUzHdwHU1SjqoI7mjcv3N2gZOnm3b2u/GSFHTynyQbehP9r
+6GsaPMWis0L7iwk+XwhSx2LE1AVxv8Rk5Pihg+g+EpuoHtQ2TS9x9o0o9oOpE9Jh
+wZG7SMA0j0GMS0zbaRL/UJScIINZc+18ofLx/d33SdNDWKBWY8o9PeU1VlnpDsog
+zCtLkykPAgMBAAGjggFqMIIBZjASBgNVHRMBAf8ECDAGAQH/AgEMMB0GA1UdDgQW
+BBS5CcqcHtvTbDprru1U8VuTBjUuXjCB4QYDVR0jBIHZMIHWgBS5CcqcHtvTbDpr
+ru1U8VuTBjUuXqGBsqSBrzCBrDELMAkGA1UEBhMCRVUxQzBBBgNVBAcTOk1hZHJp
+ZCAoc2VlIGN1cnJlbnQgYWRkcmVzcyBhdCB3d3cuY2FtZXJmaXJtYS5jb20vYWRk
+cmVzcykxEjAQBgNVBAUTCUE4Mjc0MzI4NzEbMBkGA1UEChMSQUMgQ2FtZXJmaXJt
+YSBTLkEuMScwJQYDVQQDEx5HbG9iYWwgQ2hhbWJlcnNpZ24gUm9vdCAtIDIwMDiC
+CQDJzdPp1X0jzjAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCow
+KAYIKwYBBQUHAgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZI
+hvcNAQEFBQADggIBAICIf3DekijZBZRG/5BXqfEv3xoNa/p8DhxJJHkn2EaqbylZ
+UohwEurdPfWbU1Rv4WCiqAm57OtZfMY18dwY6fFn5a+6ReAJ3spED8IXDneRRXoz
+X1+WLGiLwUePmJs9wOzL9dWCkoQ10b42OFZyMVtHLaoXpGNR6woBrX/sdZ7LoR/x
+fxKxueRkf2fWIyr0uDldmOghp+G9PUIadJpwr2hsUF1Jz//7Dl3mLEfXgTpZALVz
+a2Mg9jFFCDkO9HB+QHBaP9BrQql0PSgvAm11cpUJjUhjxsYjV5KTXjXBjfkK9yyd
+Yhz2rXzdpjEetrHHfoUm+qRqtdpjMNHvkzeyZi99Bffnt0uYlDXA2TopwZ2yUDMd
+SqlapskD7+3056huirRXhOukP9DuqqqHW2Pok+JrqNS4cnhrG+055F3Lm6qH1U9O
+AP7Zap88MQ8oAgF9mOinsKJknnn4SPIVqczmyETrP3iZ8ntxPjzxmKfFGBI/5rso
+M0LpRQp8bfKGeS/Fghl9CYl8slR2iK7ewfPM4W7bMdaTrpmg7yVqc5iJWzouE4ge
+v8CSlDQb4ye3ix5vQv/n6TebUB0tovkC7stYWDpxvGjjqsGvHCgfotwjZT+B6q6Z
+09gwzxMNTxXJhLynSC34MCN32EZLeW32jO06f2ARePTpm67VVMB0gNELQp/B
+-----END CERTIFICATE-----
+
+# Issuer: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc.
+# Subject: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc.
+# Label: "Go Daddy Root Certificate Authority - G2"
+# Serial: 0
+# MD5 Fingerprint: 80:3a:bc:22:c1:e6:fb:8d:9b:3b:27:4a:32:1b:9a:01
+# SHA1 Fingerprint: 47:be:ab:c9:22:ea:e8:0e:78:78:34:62:a7:9f:45:c2:54:fd:e6:8b
+# SHA256 Fingerprint: 45:14:0b:32:47:eb:9c:c8:c5:b4:f0:d7:b5:30:91:f7:32:92:08:9e:6e:5a:63:e2:74:9d:d3:ac:a9:19:8e:da
+-----BEGIN CERTIFICATE-----
+MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx
+EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT
+EUdvRGFkZHkuY29tLCBJbmMuMTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRp
+ZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIz
+NTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQH
+EwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8GA1UE
+AxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIw
+DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKD
+E6bFIEMBO4Tx5oVJnyfq9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH
+/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD+qK+ihVqf94Lw7YZFAXK6sOoBJQ7Rnwy
+DfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutdfMh8+7ArU6SSYmlRJQVh
+GkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMlNAJWJwGR
+tDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEA
+AaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE
+FDqahQcQZyi27/a9BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmX
+WWcDYfF+OwYxdS2hII5PZYe096acvNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu
+9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r5N9ss4UXnT3ZJE95kTXWXwTr
+gIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYVN8Gb5DKj7Tjo
+2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO
+LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI
+4uJEvlz36hz1
+-----END CERTIFICATE-----
+
+# Issuer: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc.
+# Subject: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc.
+# Label: "Starfield Root Certificate Authority - G2"
+# Serial: 0
+# MD5 Fingerprint: d6:39:81:c6:52:7e:96:69:fc:fc:ca:66:ed:05:f2:96
+# SHA1 Fingerprint: b5:1c:06:7c:ee:2b:0c:3d:f8:55:ab:2d:92:f4:fe:39:d4:e7:0f:0e
+# SHA256 Fingerprint: 2c:e1:cb:0b:f9:d2:f9:e1:02:99:3f:be:21:51:52:c3:b2:dd:0c:ab:de:1c:68:e5:31:9b:83:91:54:db:b7:f5
+-----BEGIN CERTIFICATE-----
+MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMx
+EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT
+HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVs
+ZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAw
+MFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6
+b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQgVGVj
+aG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZp
+Y2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
+ggEBAL3twQP89o/8ArFvW59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMg
+nLRJdzIpVv257IzdIvpy3Cdhl+72WoTsbhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1
+HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNkN3mSwOxGXn/hbVNMYq/N
+Hwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7NfZTD4p7dN
+dloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0
+HZbUJtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO
+BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0G
+CSqGSIb3DQEBCwUAA4IBAQARWfolTwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjU
+sHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx4mcujJUDJi5DnUox9g61DLu3
+4jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUwF5okxBDgBPfg
+8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K
+pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1
+mMpYjn0q7pBZc2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0
+-----END CERTIFICATE-----
+
+# Issuer: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc.
+# Subject: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc.
+# Label: "Starfield Services Root Certificate Authority - G2"
+# Serial: 0
+# MD5 Fingerprint: 17:35:74:af:7b:61:1c:eb:f4:f9:3c:e2:ee:40:f9:a2
+# SHA1 Fingerprint: 92:5a:8f:8d:2c:6d:04:e0:66:5f:59:6a:ff:22:d8:63:e8:25:6f:3f
+# SHA256 Fingerprint: 56:8d:69:05:a2:c8:87:08:a4:b3:02:51:90:ed:cf:ed:b1:97:4a:60:6a:13:c6:e5:29:0f:cb:2a:e6:3e:da:b5
+-----BEGIN CERTIFICATE-----
+MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMx
+EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT
+HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVs
+ZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5
+MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNVBAYTAlVTMRAwDgYD
+VQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFy
+ZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2Vy
+dmljZXMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20p
+OsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm2
+8xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4PahHQUw2eeBGg6345AWh1K
+Ts9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLPLJGmpufe
+hRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk
+6mFBrMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAw
+DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+q
+AdcwKziIorhtSpzyEZGDMA0GCSqGSIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMI
+bw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPPE95Dz+I0swSdHynVv/heyNXB
+ve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTyxQGjhdByPq1z
+qwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd
+iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn
+0q23KXB56jzaYyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCN
+sSi6
+-----END CERTIFICATE-----
+
+# Issuer: CN=AffirmTrust Commercial O=AffirmTrust
+# Subject: CN=AffirmTrust Commercial O=AffirmTrust
+# Label: "AffirmTrust Commercial"
+# Serial: 8608355977964138876
+# MD5 Fingerprint: 82:92:ba:5b:ef:cd:8a:6f:a6:3d:55:f9:84:f6:d6:b7
+# SHA1 Fingerprint: f9:b5:b6:32:45:5f:9c:be:ec:57:5f:80:dc:e9:6e:2c:c7:b2:78:b7
+# SHA256 Fingerprint: 03:76:ab:1d:54:c5:f9:80:3c:e4:b2:e2:01:a0:ee:7e:ef:7b:57:b6:36:e8:a9:3c:9b:8d:48:60:c9:6f:5f:a7
+-----BEGIN CERTIFICATE-----
+MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UE
+BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz
+dCBDb21tZXJjaWFsMB4XDTEwMDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDEL
+MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp
+cm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6EqdbDuKP
+Hx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yr
+ba0F8PrVC8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPAL
+MeIrJmqbTFeurCA+ukV6BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1
+yHp52UKqK39c/s4mT6NmgTWvRLpUHhwwMmWd5jyTXlBOeuM61G7MGvv50jeuJCqr
+VwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNVHQ4EFgQUnZPGU4teyq8/
+nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ
+KoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYG
+XUPGhi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNj
+vbz4YYCanrHOQnDiqX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivt
+Z8SOyUOyXGsViQK8YvxO8rUzqrJv0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9g
+N53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0khsUlHRUe072o0EclNmsxZt9YC
+nlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8=
+-----END CERTIFICATE-----
+
+# Issuer: CN=AffirmTrust Networking O=AffirmTrust
+# Subject: CN=AffirmTrust Networking O=AffirmTrust
+# Label: "AffirmTrust Networking"
+# Serial: 8957382827206547757
+# MD5 Fingerprint: 42:65:ca:be:01:9a:9a:4c:a9:8c:41:49:cd:c0:d5:7f
+# SHA1 Fingerprint: 29:36:21:02:8b:20:ed:02:f5:66:c5:32:d1:d6:ed:90:9f:45:00:2f
+# SHA256 Fingerprint: 0a:81:ec:5a:92:97:77:f1:45:90:4a:f3:8d:5d:50:9f:66:b5:e2:c5:8f:cd:b5:31:05:8b:0e:17:f3:f0:b4:1b
+-----BEGIN CERTIFICATE-----
+MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UE
+BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz
+dCBOZXR3b3JraW5nMB4XDTEwMDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDEL
+MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp
+cm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SEHi3y
+YJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbua
+kCNrmreIdIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRL
+QESxG9fhwoXA3hA/Pe24/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp
+6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gbh+0t+nvujArjqWaJGctB+d1ENmHP4ndG
+yH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNVHQ4EFgQUBx/S55zawm6i
+QLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ
+KoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfO
+tDIuUFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzu
+QY0x2+c06lkh1QF612S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZ
+Lgo/bNjR9eUJtGxUAArgFU2HdW23WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4u
+olu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9/ZFvgrG+CJPbFEfxojfHRZ48
+x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s=
+-----END CERTIFICATE-----
+
+# Issuer: CN=AffirmTrust Premium O=AffirmTrust
+# Subject: CN=AffirmTrust Premium O=AffirmTrust
+# Label: "AffirmTrust Premium"
+# Serial: 7893706540734352110
+# MD5 Fingerprint: c4:5d:0e:48:b6:ac:28:30:4e:0a:bc:f9:38:16:87:57
+# SHA1 Fingerprint: d8:a6:33:2c:e0:03:6f:b1:85:f6:63:4f:7d:6a:06:65:26:32:28:27
+# SHA256 Fingerprint: 70:a7:3f:7f:37:6b:60:07:42:48:90:45:34:b1:14:82:d5:bf:0e:69:8e:cc:49:8d:f5:25:77:eb:f2:e9:3b:9a
+-----BEGIN CERTIFICATE-----
+MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UE
+BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVz
+dCBQcmVtaXVtMB4XDTEwMDEyOTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkG
+A1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1U
+cnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxBLf
+qV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtnBKAQ
+JG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ
++jjeRFcV5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrS
+s8PhaJyJ+HoAVt70VZVs+7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5
+HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmdGPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d7
+70O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5Rp9EixAqnOEhss/n/fauG
+V+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NIS+LI+H+S
+qHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S
+5u046uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4Ia
+C1nEWTJ3s7xgaVY5/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TX
+OwF0lkLgAOIua+rF7nKsu7/+6qqo+Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYE
+FJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/
+BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByvMiPIs0laUZx2
+KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg
+Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B
+8OWycvpEgjNC6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQ
+MKSOyARiqcTtNd56l+0OOF6SL5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc
+0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK+4w1IX2COPKpVJEZNZOUbWo6xbLQ
+u4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmVBtWVyuEklut89pMF
+u+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFgIxpH
+YoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8
+GKa1qF60g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaO
+RtGdFNrHF+QFlozEJLUbzxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6e
+KeC2uAloGRwYQw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=AffirmTrust Premium ECC O=AffirmTrust
+# Subject: CN=AffirmTrust Premium ECC O=AffirmTrust
+# Label: "AffirmTrust Premium ECC"
+# Serial: 8401224907861490260
+# MD5 Fingerprint: 64:b0:09:55:cf:b1:d5:99:e2:be:13:ab:a6:5d:ea:4d
+# SHA1 Fingerprint: b8:23:6b:00:2f:1d:16:86:53:01:55:6c:11:a4:37:ca:eb:ff:c3:bb
+# SHA256 Fingerprint: bd:71:fd:f6:da:97:e4:cf:62:d1:64:7a:dd:25:81:b0:7d:79:ad:f8:39:7e:b4:ec:ba:9c:5e:84:88:82:14:23
+-----BEGIN CERTIFICATE-----
+MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMC
+VVMxFDASBgNVBAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQ
+cmVtaXVtIEVDQzAeFw0xMDAxMjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJ
+BgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1UcnVzdDEgMB4GA1UEAwwXQWZmaXJt
+VHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQNMF4bFZ0D
+0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQN8O9
+ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0G
+A1UdDgQWBBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4G
+A1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/Vs
+aobgxCd05DhT1wV/GzTjxi+zygk8N53X57hG8f2h4nECMEJZh0PUUd+60wkyWs6I
+flc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKMeQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority
+# Subject: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority
+# Label: "Certum Trusted Network CA"
+# Serial: 279744
+# MD5 Fingerprint: d5:e9:81:40:c5:18:69:fc:46:2c:89:75:62:0f:aa:78
+# SHA1 Fingerprint: 07:e0:32:e0:20:b7:2c:3f:19:2f:06:28:a2:59:3a:19:a7:0f:06:9e
+# SHA256 Fingerprint: 5c:58:46:8d:55:f5:8e:49:7e:74:39:82:d2:b5:00:10:b6:d1:65:37:4a:cf:83:a7:d4:a3:2d:b7:68:c4:40:8e
+-----BEGIN CERTIFICATE-----
+MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBM
+MSIwIAYDVQQKExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5D
+ZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBU
+cnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIyMTIwNzM3WhcNMjkxMjMxMTIwNzM3
+WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBUZWNobm9sb2dpZXMg
+Uy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MSIw
+IAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0B
+AQEFAAOCAQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rH
+UV+rpDKmYYe2bg+G0jACl/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LM
+TXPb865Px1bVWqeWifrzq2jUI4ZZJ88JJ7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVU
+BBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4fOQtf/WsX+sWn7Et0brM
+kUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0cvW0QM8x
+AcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNV
+HQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15y
+sHhE49wcrwn9I0j6vSrEuVUEtRCjjSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfL
+I9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1mS1FhIrlQgnXdAIv94nYmem8
+J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5ajZt3hrvJBW8qY
+VoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI
+03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw=
+-----END CERTIFICATE-----
+
+# Issuer: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA
+# Subject: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA
+# Label: "TWCA Root Certification Authority"
+# Serial: 1
+# MD5 Fingerprint: aa:08:8f:f6:f9:7b:b7:f2:b1:a7:1e:9b:ea:ea:bd:79
+# SHA1 Fingerprint: cf:9e:87:6d:d3:eb:fc:42:26:97:a3:b5:a3:7a:a0:76:a9:06:23:48
+# SHA256 Fingerprint: bf:d8:8f:e1:10:1c:41:ae:3e:80:1b:f8:be:56:35:0e:e9:ba:d1:a6:b9:bd:51:5e:dc:5c:6d:5b:87:11:ac:44
+-----BEGIN CERTIFICATE-----
+MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzES
+MBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFU
+V0NBIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMz
+WhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJVEFJV0FO
+LUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlm
+aWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
+AQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFE
+AcK0HMMxQhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HH
+K3XLfJ+utdGdIzdjp9xCoi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeX
+RfwZVzsrb+RH9JlF/h3x+JejiB03HFyP4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/z
+rX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1ry+UPizgN7gr8/g+YnzAx
+3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkq
+hkiG9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeC
+MErJk/9q56YAf4lCmtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdls
+XebQ79NqZp4VKIV66IIArB6nCWlWQtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62D
+lhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVYT0bf+215WfKEIlKuD8z7fDvn
+aspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocnyYh0igzyXxfkZ
+YiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw==
+-----END CERTIFICATE-----
+
+# Issuer: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2
+# Subject: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2
+# Label: "Security Communication RootCA2"
+# Serial: 0
+# MD5 Fingerprint: 6c:39:7d:a4:0e:55:59:b2:3f:d6:41:b1:12:50:de:43
+# SHA1 Fingerprint: 5f:3b:8c:f2:f8:10:b3:7d:78:b4:ce:ec:19:19:c3:73:34:b9:c7:74
+# SHA256 Fingerprint: 51:3b:2c:ec:b8:10:d4:cd:e5:dd:85:39:1a:df:c6:c2:dd:60:d8:7b:b7:36:d2:b5:21:48:4a:a4:7a:0e:be:f6
+-----BEGIN CERTIFICATE-----
+MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDEl
+MCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMe
+U2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoX
+DTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRy
+dXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3VyaXR5IENvbW11bmlj
+YXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANAV
+OVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGr
+zbl+dp+++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVM
+VAX3NuRFg3sUZdbcDE3R3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQ
+hNBqyjoGADdH5H5XTz+L62e4iKrFvlNVspHEfbmwhRkGeC7bYRr6hfVKkaHnFtWO
+ojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1KEOtOghY6rCcMU/Gt1SSw
+awNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8QIH4D5cs
+OPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3
+DQEBCwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpF
+coJxDjrSzG+ntKEju/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXc
+okgfGT+Ok+vx+hfuzU7jBBJV1uXk3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8
+t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6qtnRGEmyR7jTV7JqR50S+kDFy
+1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29mvVXIwAHIRc/
+SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03
+-----END CERTIFICATE-----
+
+# Issuer: CN=Hellenic Academic and Research Institutions RootCA 2011 O=Hellenic Academic and Research Institutions Cert. Authority
+# Subject: CN=Hellenic Academic and Research Institutions RootCA 2011 O=Hellenic Academic and Research Institutions Cert. Authority
+# Label: "Hellenic Academic and Research Institutions RootCA 2011"
+# Serial: 0
+# MD5 Fingerprint: 73:9f:4c:4b:73:5b:79:e9:fa:ba:1c:ef:6e:cb:d5:c9
+# SHA1 Fingerprint: fe:45:65:9b:79:03:5b:98:a1:61:b5:51:2e:ac:da:58:09:48:22:4d
+# SHA256 Fingerprint: bc:10:4f:15:a4:8b:e7:09:dc:a5:42:a7:e1:d4:b9:df:6f:05:45:27:e8:02:ea:a9:2d:59:54:44:25:8a:fe:71
+-----BEGIN CERTIFICATE-----
+MIIEMTCCAxmgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UEBhMCR1Ix
+RDBCBgNVBAoTO0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1
+dGlvbnMgQ2VydC4gQXV0aG9yaXR5MUAwPgYDVQQDEzdIZWxsZW5pYyBBY2FkZW1p
+YyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIFJvb3RDQSAyMDExMB4XDTExMTIw
+NjEzNDk1MloXDTMxMTIwMTEzNDk1MlowgZUxCzAJBgNVBAYTAkdSMUQwQgYDVQQK
+EztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIENl
+cnQuIEF1dGhvcml0eTFAMD4GA1UEAxM3SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl
+c2VhcmNoIEluc3RpdHV0aW9ucyBSb290Q0EgMjAxMTCCASIwDQYJKoZIhvcNAQEB
+BQADggEPADCCAQoCggEBAKlTAOMupvaO+mDYLZU++CwqVE7NuYRhlFhPjz2L5EPz
+dYmNUeTDN9KKiE15HrcS3UN4SoqS5tdI1Q+kOilENbgH9mgdVc04UfCMJDGFr4PJ
+fel3r+0ae50X+bOdOFAPplp5kYCvN66m0zH7tSYJnTxa71HFK9+WXesyHgLacEns
+bgzImjeN9/E2YEsmLIKe0HjzDQ9jpFEw4fkrJxIH2Oq9GGKYsFk3fb7u8yBRQlqD
+75O6aRXxYp2fmTmCobd0LovUxQt7L/DICto9eQqakxylKHJzkUOap9FNhYS5qXSP
+FEDH3N6sQWRstBmbAmNtJGSPRLIl6s5ddAxjMlyNh+UCAwEAAaOBiTCBhjAPBgNV
+HRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQUppFC/RNhSiOeCKQp
+5dgTBCPuQSUwRwYDVR0eBEAwPqA8MAWCAy5ncjAFggMuZXUwBoIELmVkdTAGggQu
+b3JnMAWBAy5ncjAFgQMuZXUwBoEELmVkdTAGgQQub3JnMA0GCSqGSIb3DQEBBQUA
+A4IBAQAf73lB4XtuP7KMhjdCSk4cNx6NZrokgclPEg8hwAOXhiVtXdMiKahsog2p
+6z0GW5k6x8zDmjR/qw7IThzh+uTczQ2+vyT+bOdrwg3IBp5OjWEopmr95fZi6hg8
+TqBTnbI6nOulnJEWtk2C4AwFSKls9cz4y51JtPACpf1wA+2KIaWuE4ZJwzNzvoc7
+dIsXRSZMFpGD/md9zU1jZ/rzAxKWeAaNsWftjj++n08C9bMJL/NMh98qy5V8Acys
+Nnq/onN694/BtZqhFLKPM58N7yLcZnuEvUUXBj08yrl3NI/K6s8/MT7jiOOASSXI
+l7WdmplNsDz4SgCbZN2fOUvRJ9e4
+-----END CERTIFICATE-----
+
+# Issuer: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967
+# Subject: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967
+# Label: "Actalis Authentication Root CA"
+# Serial: 6271844772424770508
+# MD5 Fingerprint: 69:c1:0d:4f:07:a3:1b:c3:fe:56:3d:04:bc:11:f6:a6
+# SHA1 Fingerprint: f3:73:b3:87:06:5a:28:84:8a:f2:f3:4a:ce:19:2b:dd:c7:8e:9c:ac
+# SHA256 Fingerprint: 55:92:60:84:ec:96:3a:64:b9:6e:2a:be:01:ce:0b:a8:6a:64:fb:fe:bc:c7:aa:b5:af:c1:55:b3:7f:d7:60:66
+-----BEGIN CERTIFICATE-----
+MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UE
+BhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8w
+MzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290
+IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDkyMjExMjIwMlowazELMAkGA1UEBhMC
+SVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1
+ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENB
+MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNv
+UTufClrJwkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX
+4ay8IMKx4INRimlNAJZaby/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9
+KK3giq0itFZljoZUj5NDKd45RnijMCO6zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/
+gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1fYVEiVRvjRuPjPdA1Yprb
+rxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2oxgkg4YQ
+51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2F
+be8lEfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxe
+KF+w6D9Fz8+vm2/7hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4F
+v6MGn8i1zeQf1xcGDXqVdFUNaBr8EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbn
+fpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5jF66CyCU3nuDuP/jVo23Eek7
+jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLYiDrIn3hm7Ynz
+ezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt
+ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAL
+e3KHwGCmSUyIWOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70
+jsNjLiNmsGe+b7bAEzlgqqI0JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDz
+WochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKxK3JCaKygvU5a2hi/a5iB0P2avl4V
+SM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+Xlff1ANATIGk0k9j
+pwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC4yyX
+X04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+Ok
+fcvHlXHo2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7R
+K4X9p2jIugErsWx0Hbhzlefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btU
+ZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXemOR/qnuOf0GZvBeyqdn6/axag67XH/JJU
+LysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9vwGYT7JZVEc+NHt4bVaT
+LnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg==
+-----END CERTIFICATE-----
+
+# Issuer: O=Trustis Limited OU=Trustis FPS Root CA
+# Subject: O=Trustis Limited OU=Trustis FPS Root CA
+# Label: "Trustis FPS Root CA"
+# Serial: 36053640375399034304724988975563710553
+# MD5 Fingerprint: 30:c9:e7:1e:6b:e6:14:eb:65:b2:16:69:20:31:67:4d
+# SHA1 Fingerprint: 3b:c0:38:0b:33:c3:f6:a6:0c:86:15:22:93:d9:df:f5:4b:81:c0:04
+# SHA256 Fingerprint: c1:b4:82:99:ab:a5:20:8f:e9:63:0a:ce:55:ca:68:a0:3e:da:5a:51:9c:88:02:a0:d3:a6:73:be:8f:8e:55:7d
+-----BEGIN CERTIFICATE-----
+MIIDZzCCAk+gAwIBAgIQGx+ttiD5JNM2a/fH8YygWTANBgkqhkiG9w0BAQUFADBF
+MQswCQYDVQQGEwJHQjEYMBYGA1UEChMPVHJ1c3RpcyBMaW1pdGVkMRwwGgYDVQQL
+ExNUcnVzdGlzIEZQUyBSb290IENBMB4XDTAzMTIyMzEyMTQwNloXDTI0MDEyMTEx
+MzY1NFowRTELMAkGA1UEBhMCR0IxGDAWBgNVBAoTD1RydXN0aXMgTGltaXRlZDEc
+MBoGA1UECxMTVHJ1c3RpcyBGUFMgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD
+ggEPADCCAQoCggEBAMVQe547NdDfxIzNjpvto8A2mfRC6qc+gIMPpqdZh8mQRUN+
+AOqGeSoDvT03mYlmt+WKVoaTnGhLaASMk5MCPjDSNzoiYYkchU59j9WvezX2fihH
+iTHcDnlkH5nSW7r+f2C/revnPDgpai/lkQtV/+xvWNUtyd5MZnGPDNcE2gfmHhjj
+vSkCqPoc4Vu5g6hBSLwacY3nYuUtsuvffM/bq1rKMfFMIvMFE/eC+XN5DL7XSxzA
+0RU8k0Fk0ea+IxciAIleH2ulrG6nS4zto3Lmr2NNL4XSFDWaLk6M6jKYKIahkQlB
+OrTh4/L68MkKokHdqeMDx4gVOxzUGpTXn2RZEm0CAwEAAaNTMFEwDwYDVR0TAQH/
+BAUwAwEB/zAfBgNVHSMEGDAWgBS6+nEleYtXQSUhhgtx67JkDoshZzAdBgNVHQ4E
+FgQUuvpxJXmLV0ElIYYLceuyZA6LIWcwDQYJKoZIhvcNAQEFBQADggEBAH5Y//01
+GX2cGE+esCu8jowU/yyg2kdbw++BLa8F6nRIW/M+TgfHbcWzk88iNVy2P3UnXwmW
+zaD+vkAMXBJV+JOCyinpXj9WV4s4NvdFGkwozZ5BuO1WTISkQMi4sKUraXAEasP4
+1BIy+Q7DsdwyhEQsb8tGD+pmQQ9P8Vilpg0ND2HepZ5dfWWhPBfnqFVO76DH7cZE
+f1T1o+CP8HxVIo8ptoGj4W1OLBuAZ+ytIJ8MYmHVl/9D7S3B2l0pKoU/rGXuhg8F
+jZBf3+6f9L/uHfuY5H+QK4R4EA5sSVPvFVtlRkpdr7r7OnIdzfYliB6XzCGcKQEN
+ZetX2fNXlrtIzYE=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Buypass Class 2 Root CA O=Buypass AS-983163327
+# Subject: CN=Buypass Class 2 Root CA O=Buypass AS-983163327
+# Label: "Buypass Class 2 Root CA"
+# Serial: 2
+# MD5 Fingerprint: 46:a7:d2:fe:45:fb:64:5a:a8:59:90:9b:78:44:9b:29
+# SHA1 Fingerprint: 49:0a:75:74:de:87:0a:47:fe:58:ee:f6:c7:6b:eb:c6:0b:12:40:99
+# SHA256 Fingerprint: 9a:11:40:25:19:7c:5b:b9:5d:94:e6:3d:55:cd:43:79:08:47:b6:46:b2:3c:df:11:ad:a4:a0:0e:ff:15:fb:48
+-----BEGIN CERTIFICATE-----
+MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd
+MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg
+Q2xhc3MgMiBSb290IENBMB4XDTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1ow
+TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw
+HgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB
+BQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1g1Lr
+6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPV
+L4O2fuPn9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC91
+1K2GScuVr1QGbNgGE41b/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHx
+MlAQTn/0hpPshNOOvEu/XAFOBz3cFIqUCqTqc/sLUegTBxj6DvEr0VQVfTzh97QZ
+QmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeffawrbD02TTqigzXsu8lkB
+arcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgIzRFo1clr
+Us3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLi
+FRhnBkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRS
+P/TizPJhk9H9Z2vXUq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN
+9SG9dKpN6nIDSdvHXx1iY8f93ZHsM+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxP
+AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMmAd+BikoL1Rpzz
+uvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAU18h
+9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s
+A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3t
+OluwlN5E40EIosHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo
++fsicdl9sz1Gv7SEr5AcD48Saq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7
+KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYdDnkM/crqJIByw5c/8nerQyIKx+u2
+DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWDLfJ6v9r9jv6ly0Us
+H8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0oyLQ
+I+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK7
+5t98biGCwWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h
+3PFaTWwyI0PurKju7koSCTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPz
+Y11aWOIv4x3kqdbQCtCev9eBCfHJxyYNrJgWVqA=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Buypass Class 3 Root CA O=Buypass AS-983163327
+# Subject: CN=Buypass Class 3 Root CA O=Buypass AS-983163327
+# Label: "Buypass Class 3 Root CA"
+# Serial: 2
+# MD5 Fingerprint: 3d:3b:18:9e:2c:64:5a:e8:d5:88:ce:0e:f9:37:c2:ec
+# SHA1 Fingerprint: da:fa:f7:fa:66:84:ec:06:8f:14:50:bd:c7:c2:81:a5:bc:a9:64:57
+# SHA256 Fingerprint: ed:f7:eb:bc:a2:7a:2a:38:4d:38:7b:7d:40:10:c6:66:e2:ed:b4:84:3e:4c:29:b4:ae:1d:5b:93:32:e6:b2:4d
+-----BEGIN CERTIFICATE-----
+MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd
+MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg
+Q2xhc3MgMyBSb290IENBMB4XDTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFow
+TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw
+HgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB
+BQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRHsJ8Y
+ZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3E
+N3coTRiR5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9
+tznDDgFHmV0ST9tD+leh7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX
+0DJq1l1sDPGzbjniazEuOQAnFN44wOwZZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c
+/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH2xc519woe2v1n/MuwU8X
+KhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV/afmiSTY
+zIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvS
+O1UQRwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D
+34xFMFbG02SrZvPAXpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgP
+K9Dx2hzLabjKSWJtyNBjYt1gD1iqj6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3
+AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFEe4zf/lb+74suwv
+Tg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAACAj
+QTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV
+cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXS
+IGrs/CIBKM+GuIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2
+HJLw5QY33KbmkJs4j1xrG0aGQ0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsa
+O5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8ZORK15FTAaggiG6cX0S5y2CBNOxv
+033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2KSb12tjE8nVhz36u
+dmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz6MkE
+kbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg41
+3OEMXbugUZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvD
+u79leNKGef9JOxqDDPDeeOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq
+4/g7u9xN12TyUb7mqqta6THuBrxzvxNiCp/HuZc=
+-----END CERTIFICATE-----
+
+# Issuer: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
+# Subject: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
+# Label: "T-TeleSec GlobalRoot Class 3"
+# Serial: 1
+# MD5 Fingerprint: ca:fb:40:a8:4e:39:92:8a:1d:fe:8e:2f:c4:27:ea:ef
+# SHA1 Fingerprint: 55:a6:72:3e:cb:f2:ec:cd:c3:23:74:70:19:9d:2a:be:11:e3:81:d1
+# SHA256 Fingerprint: fd:73:da:d3:1c:64:4f:f1:b4:3b:ef:0c:cd:da:96:71:0b:9c:d9:87:5e:ca:7e:31:70:7a:f3:e9:6d:52:2b:bd
+-----BEGIN CERTIFICATE-----
+MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx
+KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd
+BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl
+YyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgxMDAxMTAyOTU2WhcNMzMxMDAxMjM1
+OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy
+aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50
+ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0G
+CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN
+8ELg63iIVl6bmlQdTQyK9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/
+RLyTPWGrTs0NvvAgJ1gORH8EGoel15YUNpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4
+hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZFiP0Zf3WHHx+xGwpzJFu5
+ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W0eDrXltM
+EnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGj
+QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1
+A/d2O2GCahKqGFPrAyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOy
+WL6ukK2YJ5f+AbGwUgC4TeQbIXQbfsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ
+1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzTucpH9sry9uetuUg/vBa3wW30
+6gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7hP0HHRwA11fXT
+91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml
+e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4p
+TpPDpFQUWw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=EE Certification Centre Root CA O=AS Sertifitseerimiskeskus
+# Subject: CN=EE Certification Centre Root CA O=AS Sertifitseerimiskeskus
+# Label: "EE Certification Centre Root CA"
+# Serial: 112324828676200291871926431888494945866
+# MD5 Fingerprint: 43:5e:88:d4:7d:1a:4a:7e:fd:84:2e:52:eb:01:d4:6f
+# SHA1 Fingerprint: c9:a8:b9:e7:55:80:5e:58:e3:53:77:a7:25:eb:af:c3:7b:27:cc:d7
+# SHA256 Fingerprint: 3e:84:ba:43:42:90:85:16:e7:75:73:c0:99:2f:09:79:ca:08:4e:46:85:68:1f:f1:95:cc:ba:8a:22:9b:8a:76
+-----BEGIN CERTIFICATE-----
+MIIEAzCCAuugAwIBAgIQVID5oHPtPwBMyonY43HmSjANBgkqhkiG9w0BAQUFADB1
+MQswCQYDVQQGEwJFRTEiMCAGA1UECgwZQVMgU2VydGlmaXRzZWVyaW1pc2tlc2t1
+czEoMCYGA1UEAwwfRUUgQ2VydGlmaWNhdGlvbiBDZW50cmUgUm9vdCBDQTEYMBYG
+CSqGSIb3DQEJARYJcGtpQHNrLmVlMCIYDzIwMTAxMDMwMTAxMDMwWhgPMjAzMDEy
+MTcyMzU5NTlaMHUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKDBlBUyBTZXJ0aWZpdHNl
+ZXJpbWlza2Vza3VzMSgwJgYDVQQDDB9FRSBDZXJ0aWZpY2F0aW9uIENlbnRyZSBS
+b290IENBMRgwFgYJKoZIhvcNAQkBFglwa2lAc2suZWUwggEiMA0GCSqGSIb3DQEB
+AQUAA4IBDwAwggEKAoIBAQDIIMDs4MVLqwd4lfNE7vsLDP90jmG7sWLqI9iroWUy
+euuOF0+W2Ap7kaJjbMeMTC55v6kF/GlclY1i+blw7cNRfdCT5mzrMEvhvH2/UpvO
+bntl8jixwKIy72KyaOBhU8E2lf/slLo2rpwcpzIP5Xy0xm90/XsY6KxX7QYgSzIw
+WFv9zajmofxwvI6Sc9uXp3whrj3B9UiHbCe9nyV0gVWw93X2PaRka9ZP585ArQ/d
+MtO8ihJTmMmJ+xAdTX7Nfh9WDSFwhfYggx/2uh8Ej+p3iDXE/+pOoYtNP2MbRMNE
+1CV2yreN1x5KZmTNXMWcg+HCCIia7E6j8T4cLNlsHaFLAgMBAAGjgYowgYcwDwYD
+VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBLyWj7qVhy/
+zQas8fElyalL1BSZMEUGA1UdJQQ+MDwGCCsGAQUFBwMCBggrBgEFBQcDAQYIKwYB
+BQUHAwMGCCsGAQUFBwMEBggrBgEFBQcDCAYIKwYBBQUHAwkwDQYJKoZIhvcNAQEF
+BQADggEBAHv25MANqhlHt01Xo/6tu7Fq1Q+e2+RjxY6hUFaTlrg4wCQiZrxTFGGV
+v9DHKpY5P30osxBAIWrEr7BSdxjhlthWXePdNl4dp1BUoMUq5KqMlIpPnTX/dqQG
+E5Gion0ARD9V04I8GtVbvFZMIi5GQ4okQC3zErg7cBqklrkar4dBGmoYDQZPxz5u
+uSlNDUmJEYcyW+ZLBMjkXOZ0c5RdFpgTlf7727FE5TpwrDdr5rMzcijJs1eg9gIW
+iAYLtqZLICjU3j2LrTcFU3T+bsy8QxdxXvnFzBqpYe73dgzzcvRyrc9yAjYHR8/v
+GVCJYMzpJJUPwssd8m92kMfMdcGWxZ0=
+-----END CERTIFICATE-----
+
+# Issuer: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH
+# Subject: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH
+# Label: "D-TRUST Root Class 3 CA 2 2009"
+# Serial: 623603
+# MD5 Fingerprint: cd:e0:25:69:8d:47:ac:9c:89:35:90:f7:fd:51:3d:2f
+# SHA1 Fingerprint: 58:e8:ab:b0:36:15:33:fb:80:f7:9b:1b:6d:29:d3:ff:8d:5f:00:f0
+# SHA256 Fingerprint: 49:e7:a4:42:ac:f0:ea:62:87:05:00:54:b5:25:64:b6:50:e4:f4:9e:42:e3:48:d6:aa:38:e0:39:e9:57:b1:c1
+-----BEGIN CERTIFICATE-----
+MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRF
+MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBD
+bGFzcyAzIENBIDIgMjAwOTAeFw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NTha
+ME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMM
+HkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIwDQYJKoZIhvcNAQEB
+BQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOADER03
+UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42
+tSHKXzlABF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9R
+ySPocq60vFYJfxLLHLGvKZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsM
+lFqVlNpQmvH/pStmMaTJOKDfHR+4CS7zp+hnUquVH+BGPtikw8paxTGA6Eian5Rp
+/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUCAwEAAaOCARowggEWMA8G
+A1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ4PGEMA4G
+A1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVj
+dG9yeS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUy
+MENBJTIwMiUyMDIwMDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRl
+cmV2b2NhdGlvbmxpc3QwQ6BBoD+GPWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3Js
+L2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAwOS5jcmwwDQYJKoZIhvcNAQEL
+BQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm2H6NMLVwMeni
+acfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0
+o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4K
+zCUqNQT4YJEVdT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8
+PIWmawomDeCTmGCufsYkl4phX5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3Y
+Johw1+qRzT65ysCQblrGXnRl11z+o+I=
+-----END CERTIFICATE-----
+
+# Issuer: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH
+# Subject: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH
+# Label: "D-TRUST Root Class 3 CA 2 EV 2009"
+# Serial: 623604
+# MD5 Fingerprint: aa:c6:43:2c:5e:2d:cd:c4:34:c0:50:4f:11:02:4f:b6
+# SHA1 Fingerprint: 96:c9:1b:0b:95:b4:10:98:42:fa:d0:d8:22:79:fe:60:fa:b9:16:83
+# SHA256 Fingerprint: ee:c5:49:6b:98:8c:e9:86:25:b9:34:09:2e:ec:29:08:be:d0:b0:f3:16:c2:d4:73:0c:84:ea:f1:f3:d3:48:81
+-----BEGIN CERTIFICATE-----
+MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRF
+MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBD
+bGFzcyAzIENBIDIgRVYgMjAwOTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUw
+NDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNV
+BAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAwOTCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfSegpn
+ljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM0
+3TP1YtHhzRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6Z
+qQTMFexgaDbtCHu39b+T7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lR
+p75mpoo6Kr3HGrHhFPC+Oh25z1uxav60sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8
+HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure3511H3a6UCAwEAAaOCASQw
+ggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyvcop9Ntea
+HNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFw
+Oi8vZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xh
+c3MlMjAzJTIwQ0ElMjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1E
+RT9jZXJ0aWZpY2F0ZXJldm9jYXRpb25saXN0MEagRKBChkBodHRwOi8vd3d3LmQt
+dHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xhc3NfM19jYV8yX2V2XzIwMDku
+Y3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+PPoeUSbrh/Yp
+3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05
+nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNF
+CSuGdXzfX2lXANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7na
+xpeG0ILD5EJt/rDiZE4OJudANCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqX
+KVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVvw9y4AyHqnxbxLFS1
+-----END CERTIFICATE-----
+
+# Issuer: CN=CA Disig Root R2 O=Disig a.s.
+# Subject: CN=CA Disig Root R2 O=Disig a.s.
+# Label: "CA Disig Root R2"
+# Serial: 10572350602393338211
+# MD5 Fingerprint: 26:01:fb:d8:27:a7:17:9a:45:54:38:1a:43:01:3b:03
+# SHA1 Fingerprint: b5:61:eb:ea:a4:de:e4:25:4b:69:1a:98:a5:57:47:c2:34:c7:d9:71
+# SHA256 Fingerprint: e2:3d:4a:03:6d:7b:70:e9:f5:95:b1:42:20:79:d2:b9:1e:df:bb:1f:b6:51:a0:63:3e:aa:8a:9d:c5:f8:07:03
+-----BEGIN CERTIFICATE-----
+MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNV
+BAYTAlNLMRMwEQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMu
+MRkwFwYDVQQDExBDQSBEaXNpZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQy
+MDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sxEzARBgNVBAcTCkJyYXRpc2xhdmEx
+EzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERpc2lnIFJvb3QgUjIw
+ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbCw3Oe
+NcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNH
+PWSb6WiaxswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3I
+x2ymrdMxp7zo5eFm1tL7A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbe
+QTg06ov80egEFGEtQX6sx3dOy1FU+16SGBsEWmjGycT6txOgmLcRK7fWV8x8nhfR
+yyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqVg8NTEQxzHQuyRpDRQjrO
+QG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa5Beny912
+H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJ
+QfYEkoopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUD
+i/ZnWejBBhG93c+AAk9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORs
+nLMOPReisjQS1n6yqEm70XooQL6iFh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1
+rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud
+DwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5uQu0wDQYJKoZI
+hvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM
+tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqf
+GopTpti72TVVsRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkb
+lvdhuDvEK7Z4bLQjb/D907JedR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka
++elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W81k/BfDxujRNt+3vrMNDcTa/F1bal
+TFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjxmHHEt38OFdAlab0i
+nSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01utI3
+gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18Dr
+G5gPcFw0sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3Os
+zMOl6W8KjptlwlCFtaOgUxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8x
+L4ysEr3vQCj8KWefshNPZiTEUxnpHikV7+ZtsH8tZ/3zbBt1RqPlShfppNcL
+-----END CERTIFICATE-----
+
+# Issuer: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV
+# Subject: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV
+# Label: "ACCVRAIZ1"
+# Serial: 6828503384748696800
+# MD5 Fingerprint: d0:a0:5a:ee:05:b6:09:94:21:a1:7d:f1:b2:29:82:02
+# SHA1 Fingerprint: 93:05:7a:88:15:c6:4f:ce:88:2f:fa:91:16:52:28:78:bc:53:64:17
+# SHA256 Fingerprint: 9a:6e:c0:12:e1:a7:da:9d:be:34:19:4d:47:8a:d7:c0:db:18:22:fb:07:1d:f1:29:81:49:6e:d1:04:38:41:13
+-----BEGIN CERTIFICATE-----
+MIIH0zCCBbugAwIBAgIIXsO3pkN/pOAwDQYJKoZIhvcNAQEFBQAwQjESMBAGA1UE
+AwwJQUNDVlJBSVoxMRAwDgYDVQQLDAdQS0lBQ0NWMQ0wCwYDVQQKDARBQ0NWMQsw
+CQYDVQQGEwJFUzAeFw0xMTA1MDUwOTM3MzdaFw0zMDEyMzEwOTM3MzdaMEIxEjAQ
+BgNVBAMMCUFDQ1ZSQUlaMTEQMA4GA1UECwwHUEtJQUNDVjENMAsGA1UECgwEQUND
+VjELMAkGA1UEBhMCRVMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCb
+qau/YUqXry+XZpp0X9DZlv3P4uRm7x8fRzPCRKPfmt4ftVTdFXxpNRFvu8gMjmoY
+HtiP2Ra8EEg2XPBjs5BaXCQ316PWywlxufEBcoSwfdtNgM3802/J+Nq2DoLSRYWo
+G2ioPej0RGy9ocLLA76MPhMAhN9KSMDjIgro6TenGEyxCQ0jVn8ETdkXhBilyNpA
+lHPrzg5XPAOBOp0KoVdDaaxXbXmQeOW1tDvYvEyNKKGno6e6Ak4l0Squ7a4DIrhr
+IA8wKFSVf+DuzgpmndFALW4ir50awQUZ0m/A8p/4e7MCQvtQqR0tkw8jq8bBD5L/
+0KIV9VMJcRz/RROE5iZe+OCIHAr8Fraocwa48GOEAqDGWuzndN9wrqODJerWx5eH
+k6fGioozl2A3ED6XPm4pFdahD9GILBKfb6qkxkLrQaLjlUPTAYVtjrs78yM2x/47
+4KElB0iryYl0/wiPgL/AlmXz7uxLaL2diMMxs0Dx6M/2OLuc5NF/1OVYm3z61PMO
+m3WR5LpSLhl+0fXNWhn8ugb2+1KoS5kE3fj5tItQo05iifCHJPqDQsGH+tUtKSpa
+cXpkatcnYGMN285J9Y0fkIkyF/hzQ7jSWpOGYdbhdQrqeWZ2iE9x6wQl1gpaepPl
+uUsXQA+xtrn13k/c4LOsOxFwYIRKQ26ZIMApcQrAZQIDAQABo4ICyzCCAscwfQYI
+KwYBBQUHAQEEcTBvMEwGCCsGAQUFBzAChkBodHRwOi8vd3d3LmFjY3YuZXMvZmls
+ZWFkbWluL0FyY2hpdm9zL2NlcnRpZmljYWRvcy9yYWl6YWNjdjEuY3J0MB8GCCsG
+AQUFBzABhhNodHRwOi8vb2NzcC5hY2N2LmVzMB0GA1UdDgQWBBTSh7Tj3zcnk1X2
+VuqB5TbMjB4/vTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFNKHtOPfNyeT
+VfZW6oHlNsyMHj+9MIIBcwYDVR0gBIIBajCCAWYwggFiBgRVHSAAMIIBWDCCASIG
+CCsGAQUFBwICMIIBFB6CARAAQQB1AHQAbwByAGkAZABhAGQAIABkAGUAIABDAGUA
+cgB0AGkAZgBpAGMAYQBjAGkA8wBuACAAUgBhAO0AegAgAGQAZQAgAGwAYQAgAEEA
+QwBDAFYAIAAoAEEAZwBlAG4AYwBpAGEAIABkAGUAIABUAGUAYwBuAG8AbABvAGcA
+7QBhACAAeQAgAEMAZQByAHQAaQBmAGkAYwBhAGMAaQDzAG4AIABFAGwAZQBjAHQA
+cgDzAG4AaQBjAGEALAAgAEMASQBGACAAUQA0ADYAMAAxADEANQA2AEUAKQAuACAA
+QwBQAFMAIABlAG4AIABoAHQAdABwADoALwAvAHcAdwB3AC4AYQBjAGMAdgAuAGUA
+czAwBggrBgEFBQcCARYkaHR0cDovL3d3dy5hY2N2LmVzL2xlZ2lzbGFjaW9uX2Mu
+aHRtMFUGA1UdHwROMEwwSqBIoEaGRGh0dHA6Ly93d3cuYWNjdi5lcy9maWxlYWRt
+aW4vQXJjaGl2b3MvY2VydGlmaWNhZG9zL3JhaXphY2N2MV9kZXIuY3JsMA4GA1Ud
+DwEB/wQEAwIBBjAXBgNVHREEEDAOgQxhY2N2QGFjY3YuZXMwDQYJKoZIhvcNAQEF
+BQADggIBAJcxAp/n/UNnSEQU5CmH7UwoZtCPNdpNYbdKl02125DgBS4OxnnQ8pdp
+D70ER9m+27Up2pvZrqmZ1dM8MJP1jaGo/AaNRPTKFpV8M9xii6g3+CfYCS0b78gU
+JyCpZET/LtZ1qmxNYEAZSUNUY9rizLpm5U9EelvZaoErQNV/+QEnWCzI7UiRfD+m
+AM/EKXMRNt6GGT6d7hmKG9Ww7Y49nCrADdg9ZuM8Db3VlFzi4qc1GwQA9j9ajepD
+vV+JHanBsMyZ4k0ACtrJJ1vnE5Bc5PUzolVt3OAJTS+xJlsndQAJxGJ3KQhfnlms
+tn6tn1QwIgPBHnFk/vk4CpYY3QIUrCPLBhwepH2NDd4nQeit2hW3sCPdK6jT2iWH
+7ehVRE2I9DZ+hJp4rPcOVkkO1jMl1oRQQmwgEh0q1b688nCBpHBgvgW1m54ERL5h
+I6zppSSMEYCUWqKiuUnSwdzRp+0xESyeGabu4VXhwOrPDYTkF7eifKXeVSUG7szA
+h1xA2syVP1XgNce4hL60Xc16gwFy7ofmXx2utYXGJt/mwZrpHgJHnyqobalbz+xF
+d3+YJ5oyXSrjhO7FmGYvliAd3djDJ9ew+f7Zfc3Qn48LFFhRny+Lwzgt3uiP1o2H
+pPVWQxaZLPSkVrQ0uGE3ycJYgBugl6H8WY3pEfbRD0tVNEYqi4Y7
+-----END CERTIFICATE-----
+
+# Issuer: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA
+# Subject: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA
+# Label: "TWCA Global Root CA"
+# Serial: 3262
+# MD5 Fingerprint: f9:03:7e:cf:e6:9e:3c:73:7a:2a:90:07:69:ff:2b:96
+# SHA1 Fingerprint: 9c:bb:48:53:f6:a4:f6:d3:52:a4:e8:32:52:55:60:13:f5:ad:af:65
+# SHA256 Fingerprint: 59:76:90:07:f7:68:5d:0f:cd:50:87:2f:9f:95:d5:75:5a:5b:2b:45:7d:81:f3:69:2b:61:0a:98:67:2f:0e:1b
+-----BEGIN CERTIFICATE-----
+MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcx
+EjAQBgNVBAoTCVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMT
+VFdDQSBHbG9iYWwgUm9vdCBDQTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5
+NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQKEwlUQUlXQU4tQ0ExEDAOBgNVBAsT
+B1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3QgQ0EwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2CnJfF
+10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz
+0ALfUPZVr2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfCh
+MBwqoJimFb3u/Rk28OKRQ4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbH
+zIh1HrtsBv+baz4X7GGqcXzGHaL3SekVtTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc
+46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1WKKD+u4ZqyPpcC1jcxkt2
+yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99sy2sbZCi
+laLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYP
+oA/pyJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQA
+BDzfuBSO6N+pjWxnkjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcE
+qYSjMq+u7msXi7Kx/mzhkIyIqJdIzshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm
+4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB
+/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6gcFGn90xHNcgL
+1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn
+LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WF
+H6vPNOw/KP4M8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNo
+RI2T9GRwoD2dKAXDOXC4Ynsg/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+
+nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlglPx4mI88k1HtQJAH32RjJMtOcQWh
+15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryPA9gK8kxkRr05YuWW
+6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3mi4TW
+nsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5j
+wa19hAM8EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWz
+aGHQRiapIVJpLesux+t3zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmy
+KwbQBM0=
+-----END CERTIFICATE-----
+
+# Issuer: CN=TeliaSonera Root CA v1 O=TeliaSonera
+# Subject: CN=TeliaSonera Root CA v1 O=TeliaSonera
+# Label: "TeliaSonera Root CA v1"
+# Serial: 199041966741090107964904287217786801558
+# MD5 Fingerprint: 37:41:49:1b:18:56:9a:26:f5:ad:c2:66:fb:40:a5:4c
+# SHA1 Fingerprint: 43:13:bb:96:f1:d5:86:9b:c1:4e:6a:92:f6:cf:f6:34:69:87:82:37
+# SHA256 Fingerprint: dd:69:36:fe:21:f8:f0:77:c1:23:a1:a5:21:c1:22:24:f7:22:55:b7:3e:03:a7:26:06:93:e8:a2:4b:0f:a3:89
+-----BEGIN CERTIFICATE-----
+MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAw
+NzEUMBIGA1UECgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJv
+b3QgQ0EgdjEwHhcNMDcxMDE4MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYD
+VQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwWVGVsaWFTb25lcmEgUm9vdCBDQSB2
+MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+6yfwIaPzaSZVfp3F
+VRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA3GV1
+7CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+X
+Z75Ljo1kB1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+
+/jXh7VB7qTCNGdMJjmhnXb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs
+81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxHoLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkm
+dtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3F0fUTPHSiXk+TT2YqGHe
+Oh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJoWjiUIMu
+sDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4
+pgd7gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fs
+slESl1MpWtTwEhDcTwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQ
+arMCpgKIv7NHfirZ1fpoeDVNAgMBAAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYD
+VR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qWDNXr+nuqF+gTEjANBgkqhkiG
+9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNmzqjMDfz1mgbl
+dxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx
+0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1Tj
+TQpgcmLNkQfWpb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBed
+Y2gea+zDTYa4EzAvXUYNR0PVG6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7
+Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpcc41teyWRyu5FrgZLAMzTsVlQ2jqI
+OylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOTJsjrDNYmiLbAJM+7
+vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2qReW
+t88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcn
+HL/EVlP6Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVx
+SK236thZiNSQvxaz2emsWWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY=
+-----END CERTIFICATE-----
+
+# Issuer: CN=E-Tugra Certification Authority O=E-Tu\u011fra EBG Bili\u015fim Teknolojileri ve Hizmetleri A.\u015e. OU=E-Tugra Sertifikasyon Merkezi
+# Subject: CN=E-Tugra Certification Authority O=E-Tu\u011fra EBG Bili\u015fim Teknolojileri ve Hizmetleri A.\u015e. OU=E-Tugra Sertifikasyon Merkezi
+# Label: "E-Tugra Certification Authority"
+# Serial: 7667447206703254355
+# MD5 Fingerprint: b8:a1:03:63:b0:bd:21:71:70:8a:6f:13:3a:bb:79:49
+# SHA1 Fingerprint: 51:c6:e7:08:49:06:6e:f3:92:d4:5c:a0:0d:6d:a3:62:8f:c3:52:39
+# SHA256 Fingerprint: b0:bf:d5:2b:b0:d7:d9:bd:92:bf:5d:4d:c1:3d:a2:55:c0:2c:54:2f:37:83:65:ea:89:39:11:f5:5e:55:f2:3c
+-----BEGIN CERTIFICATE-----
+MIIGSzCCBDOgAwIBAgIIamg+nFGby1MwDQYJKoZIhvcNAQELBQAwgbIxCzAJBgNV
+BAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBC
+aWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhpem1ldGxlcmkgQS7Fni4xJjAkBgNV
+BAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBNZXJrZXppMSgwJgYDVQQDDB9FLVR1
+Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTEzMDMwNTEyMDk0OFoXDTIz
+MDMwMzEyMDk0OFowgbIxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+
+BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhp
+em1ldGxlcmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBN
+ZXJrZXppMSgwJgYDVQQDDB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5
+MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA4vU/kwVRHoViVF56C/UY
+B4Oufq9899SKa6VjQzm5S/fDxmSJPZQuVIBSOTkHS0vdhQd2h8y/L5VMzH2nPbxH
+D5hw+IyFHnSOkm0bQNGZDbt1bsipa5rAhDGvykPL6ys06I+XawGb1Q5KCKpbknSF
+Q9OArqGIW66z6l7LFpp3RMih9lRozt6Plyu6W0ACDGQXwLWTzeHxE2bODHnv0ZEo
+q1+gElIwcxmOj+GMB6LDu0rw6h8VqO4lzKRG+Bsi77MOQ7osJLjFLFzUHPhdZL3D
+k14opz8n8Y4e0ypQBaNV2cvnOVPAmJ6MVGKLJrD3fY185MaeZkJVgkfnsliNZvcH
+fC425lAcP9tDJMW/hkd5s3kc91r0E+xs+D/iWR+V7kI+ua2oMoVJl0b+SzGPWsut
+dEcf6ZG33ygEIqDUD13ieU/qbIWGvaimzuT6w+Gzrt48Ue7LE3wBf4QOXVGUnhMM
+ti6lTPk5cDZvlsouDERVxcr6XQKj39ZkjFqzAQqptQpHF//vkUAqjqFGOjGY5RH8
+zLtJVor8udBhmm9lbObDyz51Sf6Pp+KJxWfXnUYTTjF2OySznhFlhqt/7x3U+Lzn
+rFpct1pHXFXOVbQicVtbC/DP3KBhZOqp12gKY6fgDT+gr9Oq0n7vUaDmUStVkhUX
+U8u3Zg5mTPj5dUyQ5xJwx0UCAwEAAaNjMGEwHQYDVR0OBBYEFC7j27JJ0JxUeVz6
+Jyr+zE7S6E5UMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAULuPbsknQnFR5
+XPonKv7MTtLoTlQwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAF
+Nzr0TbdF4kV1JI+2d1LoHNgQk2Xz8lkGpD4eKexd0dCrfOAKkEh47U6YA5n+KGCR
+HTAduGN8qOY1tfrTYXbm1gdLymmasoR6d5NFFxWfJNCYExL/u6Au/U5Mh/jOXKqY
+GwXgAEZKgoClM4so3O0409/lPun++1ndYYRP0lSWE2ETPo+Aab6TR7U1Q9Jauz1c
+77NCR807VRMGsAnb/WP2OogKmW9+4c4bU2pEZiNRCHu8W1Ki/QY3OEBhj0qWuJA3
++GbHeJAAFS6LrVE1Uweoa2iu+U48BybNCAVwzDk/dr2l02cmAYamU9JgO3xDf1WK
+vJUawSg5TB9D0pH0clmKuVb8P7Sd2nCcdlqMQ1DujjByTd//SffGqWfZbawCEeI6
+FiWnWAjLb1NBnEg4R2gz0dfHj9R0IdTDBZB6/86WiLEVKV0jq9BgoRJP3vQXzTLl
+yb/IQ639Lo7xr+L0mPoSHyDYwKcMhcWQ9DstliaxLL5Mq+ux0orJ23gTDx4JnW2P
+AJ8C2sH6H3p6CcRK5ogql5+Ji/03X186zjhZhkuvcQu02PJwT58yE+Owp1fl2tpD
+y4Q08ijE6m30Ku/Ba3ba+367hTzSU8JNvnHhRdH9I2cNE3X7z2VnIp2usAnRCf8d
+NL/+I5c30jn6PQ0GC7TbO6Orb1wdtn7os4I07QZcJA==
+-----END CERTIFICATE-----
+
+# Issuer: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
+# Subject: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
+# Label: "T-TeleSec GlobalRoot Class 2"
+# Serial: 1
+# MD5 Fingerprint: 2b:9b:9e:e4:7b:6c:1f:00:72:1a:cc:c1:77:79:df:6a
+# SHA1 Fingerprint: 59:0d:2d:7d:88:4f:40:2e:61:7e:a5:62:32:17:65:cf:17:d8:94:e9
+# SHA256 Fingerprint: 91:e2:f5:78:8d:58:10:eb:a7:ba:58:73:7d:e1:54:8a:8e:ca:cd:01:45:98:bc:0b:14:3e:04:1b:17:05:25:52
+-----BEGIN CERTIFICATE-----
+MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx
+KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd
+BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl
+YyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgxMDAxMTA0MDE0WhcNMzMxMDAxMjM1
+OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy
+aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50
+ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0G
+CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUd
+AqSzm1nzHoqvNK38DcLZSBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiC
+FoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/FvudocP05l03Sx5iRUKrERLMjfTlH6VJi
+1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx9702cu+fjOlbpSD8DT6Iavq
+jnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGVWOHAD3bZ
+wI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGj
+QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/
+WSA2AHmgoCJrjNXyYdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhy
+NsZt+U2e+iKo4YFWz827n+qrkRk4r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPAC
+uvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNfvNoBYimipidx5joifsFvHZVw
+IEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR3p1m0IvVVGb6
+g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN
+9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlP
+BSeOE6Fuwg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Atos TrustedRoot 2011 O=Atos
+# Subject: CN=Atos TrustedRoot 2011 O=Atos
+# Label: "Atos TrustedRoot 2011"
+# Serial: 6643877497813316402
+# MD5 Fingerprint: ae:b9:c4:32:4b:ac:7f:5d:66:cc:77:94:bb:2a:77:56
+# SHA1 Fingerprint: 2b:b1:f5:3e:55:0c:1d:c5:f1:d4:e6:b7:6a:46:4b:55:06:02:ac:21
+# SHA256 Fingerprint: f3:56:be:a2:44:b7:a9:1e:b3:5d:53:ca:9a:d7:86:4a:ce:01:8e:2d:35:d5:f8:f9:6d:df:68:a6:f4:1a:a4:74
+-----BEGIN CERTIFICATE-----
+MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UE
+AwwVQXRvcyBUcnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQG
+EwJERTAeFw0xMTA3MDcxNDU4MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMM
+FUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsGA1UECgwEQXRvczELMAkGA1UEBhMC
+REUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCVhTuXbyo7LjvPpvMp
+Nb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr54rM
+VD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+
+SZFhyBH+DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ
+4J7sVaE3IqKHBAUsR320HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0L
+cp2AMBYHlT8oDv3FdU9T1nSatCQujgKRz3bFmx5VdJx4IbHwLfELn8LVlhgf8FQi
+eowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7Rl+lwrrw7GWzbITAPBgNV
+HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZbNshMBgG
+A1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3
+DQEBCwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8j
+vZfza1zv7v1Apt+hk6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kP
+DpFrdRbhIfzYJsdHt6bPWHJxfrrhTZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pc
+maHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a961qn8FYiqTxlVMYVqL2Gns2D
+lmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G3mB/ufNPRJLv
+KrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited
+# Subject: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited
+# Label: "QuoVadis Root CA 1 G3"
+# Serial: 687049649626669250736271037606554624078720034195
+# MD5 Fingerprint: a4:bc:5b:3f:fe:37:9a:fa:64:f0:e2:fa:05:3d:0b:ab
+# SHA1 Fingerprint: 1b:8e:ea:57:96:29:1a:c9:39:ea:b8:0a:81:1a:73:73:c0:93:79:67
+# SHA256 Fingerprint: 8a:86:6f:d1:b2:76:b5:7e:57:8e:92:1c:65:82:8a:2b:ed:58:e9:f2:f2:88:05:41:34:b7:f1:f4:bf:c9:cc:74
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIUeFhfLq0sGUvjNwc1NBMotZbUZZMwDQYJKoZIhvcNAQEL
+BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
+BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMSBHMzAeFw0xMjAxMTIxNzI3NDRaFw00
+MjAxMTIxNzI3NDRaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
+aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDEgRzMwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQCgvlAQjunybEC0BJyFuTHK3C3kEakEPBtV
+wedYMB0ktMPvhd6MLOHBPd+C5k+tR4ds7FtJwUrVu4/sh6x/gpqG7D0DmVIB0jWe
+rNrwU8lmPNSsAgHaJNM7qAJGr6Qc4/hzWHa39g6QDbXwz8z6+cZM5cOGMAqNF341
+68Xfuw6cwI2H44g4hWf6Pser4BOcBRiYz5P1sZK0/CPTz9XEJ0ngnjybCKOLXSoh
+4Pw5qlPafX7PGglTvF0FBM+hSo+LdoINofjSxxR3W5A2B4GbPgb6Ul5jxaYA/qXp
+UhtStZI5cgMJYr2wYBZupt0lwgNm3fME0UDiTouG9G/lg6AnhF4EwfWQvTA9xO+o
+abw4m6SkltFi2mnAAZauy8RRNOoMqv8hjlmPSlzkYZqn0ukqeI1RPToV7qJZjqlc
+3sX5kCLliEVx3ZGZbHqfPT2YfF72vhZooF6uCyP8Wg+qInYtyaEQHeTTRCOQiJ/G
+KubX9ZqzWB4vMIkIG1SitZgj7Ah3HJVdYdHLiZxfokqRmu8hqkkWCKi9YSgxyXSt
+hfbZxbGL0eUQMk1fiyA6PEkfM4VZDdvLCXVDaXP7a3F98N/ETH3Goy7IlXnLc6KO
+Tk0k+17kBL5yG6YnLUlamXrXXAkgt3+UuU/xDRxeiEIbEbfnkduebPRq34wGmAOt
+zCjvpUfzUwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+BjAdBgNVHQ4EFgQUo5fW816iEOGrRZ88F2Q87gFwnMwwDQYJKoZIhvcNAQELBQAD
+ggIBABj6W3X8PnrHX3fHyt/PX8MSxEBd1DKquGrX1RUVRpgjpeaQWxiZTOOtQqOC
+MTaIzen7xASWSIsBx40Bz1szBpZGZnQdT+3Btrm0DWHMY37XLneMlhwqI2hrhVd2
+cDMT/uFPpiN3GPoajOi9ZcnPP/TJF9zrx7zABC4tRi9pZsMbj/7sPtPKlL92CiUN
+qXsCHKnQO18LwIE6PWThv6ctTr1NxNgpxiIY0MWscgKCP6o6ojoilzHdCGPDdRS5
+YCgtW2jgFqlmgiNR9etT2DGbe+m3nUvriBbP+V04ikkwj+3x6xn0dxoxGE1nVGwv
+b2X52z3sIexe9PSLymBlVNFxZPT5pqOBMzYzcfCkeF9OrYMh3jRJjehZrJ3ydlo2
+8hP0r+AJx2EqbPfgna67hkooby7utHnNkDPDs3b69fBsnQGQ+p6Q9pxyz0fawx/k
+NSBT8lTR32GDpgLiJTjehTItXnOQUl1CxM49S+H5GYQd1aJQzEH7QRTDvdbJWqNj
+ZgKAvQU6O0ec7AAmTPWIUb+oI38YB7AL7YsmoWTTYUrrXJ/es69nA7Mf3W1daWhp
+q1467HxpvMc7hU6eFbm0FU/DlXpY18ls6Wy58yljXrQs8C097Vpl4KlbQMJImYFt
+nh8GKjwStIsPm6Ik8KaN1nrgS7ZklmOVhMJKzRwuJIczYOXD
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited
+# Subject: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited
+# Label: "QuoVadis Root CA 2 G3"
+# Serial: 390156079458959257446133169266079962026824725800
+# MD5 Fingerprint: af:0c:86:6e:bf:40:2d:7f:0b:3e:12:50:ba:12:3d:06
+# SHA1 Fingerprint: 09:3c:61:f3:8b:8b:dc:7d:55:df:75:38:02:05:00:e1:25:f5:c8:36
+# SHA256 Fingerprint: 8f:e4:fb:0a:f9:3a:4d:0d:67:db:0b:eb:b2:3e:37:c7:1b:f3:25:dc:bc:dd:24:0e:a0:4d:af:58:b4:7e:18:40
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQEL
+BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
+BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00
+MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
+aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIgRzMwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFhZiFf
+qq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMW
+n4rjyduYNM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ym
+c5GQYaYDFCDy54ejiK2toIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+
+O7q414AB+6XrW7PFXmAqMaCvN+ggOp+oMiwMzAkd056OXbxMmO7FGmh77FOm6RQ1
+o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+lV0POKa2Mq1W/xPtbAd0j
+IaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZoL1NesNKq
+IcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz
+8eQQsSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43eh
+vNURG3YBZwjgQQvD6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l
+7ZizlWNof/k19N+IxWA1ksB8aRxhlRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALG
+cC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+BjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZIhvcNAQELBQAD
+ggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66
+AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RC
+roijQ1h5fq7KpVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0Ga
+W/ZZGYjeVYg3UQt4XAoeo0L9x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4n
+lv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgzdWqTHBLmYF5vHX/JHyPLhGGfHoJE
++V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6XU/IyAgkwo1jwDQHV
+csaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+NwmNtd
+dbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNg
+KCLjsZWDzYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeM
+HVOyToV7BjjHLPj4sHKNJeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4
+WSr2Rz0ZiC3oheGe7IUIarFsNMkd7EgrO3jtZsSOeWmD3n+M
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited
+# Subject: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited
+# Label: "QuoVadis Root CA 3 G3"
+# Serial: 268090761170461462463995952157327242137089239581
+# MD5 Fingerprint: df:7d:b9:ad:54:6f:68:a1:df:89:57:03:97:43:b0:d7
+# SHA1 Fingerprint: 48:12:bd:92:3c:a8:c4:39:06:e7:30:6d:27:96:e6:a4:cf:22:2e:7d
+# SHA256 Fingerprint: 88:ef:81:de:20:2e:b0:18:45:2e:43:f8:64:72:5c:ea:5f:bd:1f:c2:d9:d2:05:73:07:09:c5:d8:b8:69:0f:46
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIULvWbAiin23r/1aOp7r0DoM8Sah0wDQYJKoZIhvcNAQEL
+BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
+BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMyBHMzAeFw0xMjAxMTIyMDI2MzJaFw00
+MjAxMTIyMDI2MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
+aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDMgRzMwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQCzyw4QZ47qFJenMioKVjZ/aEzHs286IxSR
+/xl/pcqs7rN2nXrpixurazHb+gtTTK/FpRp5PIpM/6zfJd5O2YIyC0TeytuMrKNu
+FoM7pmRLMon7FhY4futD4tN0SsJiCnMK3UmzV9KwCoWdcTzeo8vAMvMBOSBDGzXR
+U7Ox7sWTaYI+FrUoRqHe6okJ7UO4BUaKhvVZR74bbwEhELn9qdIoyhA5CcoTNs+c
+ra1AdHkrAj80//ogaX3T7mH1urPnMNA3I4ZyYUUpSFlob3emLoG+B01vr87ERROR
+FHAGjx+f+IdpsQ7vw4kZ6+ocYfx6bIrc1gMLnia6Et3UVDmrJqMz6nWB2i3ND0/k
+A9HvFZcba5DFApCTZgIhsUfei5pKgLlVj7WiL8DWM2fafsSntARE60f75li59wzw
+eyuxwHApw0BiLTtIadwjPEjrewl5qW3aqDCYz4ByA4imW0aucnl8CAMhZa634Ryl
+sSqiMd5mBPfAdOhx3v89WcyWJhKLhZVXGqtrdQtEPREoPHtht+KPZ0/l7DxMYIBp
+VzgeAVuNVejH38DMdyM0SXV89pgR6y3e7UEuFAUCf+D+IOs15xGsIs5XPd7JMG0Q
+A4XN8f+MFrXBsj6IbGB/kE+V9/YtrQE5BwT6dYB9v0lQ7e/JxHwc64B+27bQ3RP+
+ydOc17KXqQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+BjAdBgNVHQ4EFgQUxhfQvKjqAkPyGwaZXSuQILnXnOQwDQYJKoZIhvcNAQELBQAD
+ggIBADRh2Va1EodVTd2jNTFGu6QHcrxfYWLopfsLN7E8trP6KZ1/AvWkyaiTt3px
+KGmPc+FSkNrVvjrlt3ZqVoAh313m6Tqe5T72omnHKgqwGEfcIHB9UqM+WXzBusnI
+FUBhynLWcKzSt/Ac5IYp8M7vaGPQtSCKFWGafoaYtMnCdvvMujAWzKNhxnQT5Wvv
+oxXqA/4Ti2Tk08HS6IT7SdEQTXlm66r99I0xHnAUrdzeZxNMgRVhvLfZkXdxGYFg
+u/BYpbWcC/ePIlUnwEsBbTuZDdQdm2NnL9DuDcpmvJRPpq3t/O5jrFc/ZSXPsoaP
+0Aj/uHYUbt7lJ+yreLVTubY/6CD50qi+YUbKh4yE8/nxoGibIh6BJpsQBJFxwAYf
+3KDTuVan45gtf4Od34wrnDKOMpTwATwiKp9Dwi7DmDkHOHv8XgBCH/MyJnmDhPbl
+8MFREsALHgQjDFSlTC9JxUrRtm5gDWv8a4uFJGS3iQ6rJUdbPM9+Sb3H6QrG2vd+
+DhcI00iX0HGS8A85PjRqHH3Y8iKuu2n0M7SmSFXRDw4m6Oy2Cy2nhTXN/VnIn9HN
+PlopNLk9hM6xZdRZkZFWdSHBd575euFgndOtBBj0fOtek49TSiIp+EgrPk2GrFt/
+ywaZWWDYWGWVjUTR939+J399roD1B0y2PpxxVJkES/1Y+Zj0
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Assured ID Root G2"
+# Serial: 15385348160840213938643033620894905419
+# MD5 Fingerprint: 92:38:b9:f8:63:24:82:65:2c:57:33:e6:fe:81:8f:9d
+# SHA1 Fingerprint: a1:4b:48:d9:43:ee:0a:0e:40:90:4f:3c:e0:a4:c0:91:93:51:5d:3f
+# SHA256 Fingerprint: 7d:05:eb:b6:82:33:9f:8c:94:51:ee:09:4e:eb:fe:fa:79:53:a1:14:ed:b2:f4:49:49:45:2f:ab:7d:2f:c1:85
+-----BEGIN CERTIFICATE-----
+MIIDljCCAn6gAwIBAgIQC5McOtY5Z+pnI7/Dr5r0SzANBgkqhkiG9w0BAQsFADBl
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv
+b3QgRzIwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQG
+EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl
+cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIwggEi
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ5ygvUj82ckmIkzTz+GoeMVSA
+n61UQbVH35ao1K+ALbkKz3X9iaV9JPrjIgwrvJUXCzO/GU1BBpAAvQxNEP4Htecc
+biJVMWWXvdMX0h5i89vqbFCMP4QMls+3ywPgym2hFEwbid3tALBSfK+RbLE4E9Hp
+EgjAALAcKxHad3A2m67OeYfcgnDmCXRwVWmvo2ifv922ebPynXApVfSr/5Vh88lA
+bx3RvpO704gqu52/clpWcTs/1PPRCv4o76Pu2ZmvA9OPYLfykqGxvYmJHzDNw6Yu
+YjOuFgJ3RFrngQo8p0Quebg/BLxcoIfhG69Rjs3sLPr4/m3wOnyqi+RnlTGNAgMB
+AAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQW
+BBTOw0q5mVXyuNtgv6l+vVa1lzan1jANBgkqhkiG9w0BAQsFAAOCAQEAyqVVjOPI
+QW5pJ6d1Ee88hjZv0p3GeDgdaZaikmkuOGybfQTUiaWxMTeKySHMq2zNixya1r9I
+0jJmwYrA8y8678Dj1JGG0VDjA9tzd29KOVPt3ibHtX2vK0LRdWLjSisCx1BL4Gni
+lmwORGYQRI+tBev4eaymG+g3NJ1TyWGqolKvSnAWhsI6yLETcDbYz+70CjTVW0z9
+B5yiutkBclzzTcHdDrEcDcRjvq30FPuJ7KJBDkzMyFdA0G4Dqs0MjomZmWzwPDCv
+ON9vvKO+KSAnq3T/EyJ43pdSVR6DtVQgA+6uwE9W3jfMw3+qBCe703e4YtsXfJwo
+IhNzbM8m9Yop5w==
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Assured ID Root G3"
+# Serial: 15459312981008553731928384953135426796
+# MD5 Fingerprint: 7c:7f:65:31:0c:81:df:8d:ba:3e:99:e2:5c:ad:6e:fb
+# SHA1 Fingerprint: f5:17:a2:4f:9a:48:c6:c9:f8:a2:00:26:9f:dc:0f:48:2c:ab:30:89
+# SHA256 Fingerprint: 7e:37:cb:8b:4c:47:09:0c:ab:36:55:1b:a6:f4:5d:b8:40:68:0f:ba:16:6a:95:2d:b1:00:71:7f:43:05:3f:c2
+-----BEGIN CERTIFICATE-----
+MIICRjCCAc2gAwIBAgIQC6Fa+h3foLVJRK/NJKBs7DAKBggqhkjOPQQDAzBlMQsw
+CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu
+ZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3Qg
+RzMwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQGEwJV
+UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu
+Y29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwdjAQBgcq
+hkjOPQIBBgUrgQQAIgNiAAQZ57ysRGXtzbg/WPuNsVepRC0FFfLvC/8QdJ+1YlJf
+Zn4f5dwbRXkLzMZTCp2NXQLZqVneAlr2lSoOjThKiknGvMYDOAdfVdp+CW7if17Q
+RSAPWXYQ1qAk8C3eNvJsKTmjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/
+BAQDAgGGMB0GA1UdDgQWBBTL0L2p4ZgFUaFNN6KDec6NHSrkhDAKBggqhkjOPQQD
+AwNnADBkAjAlpIFFAmsSS3V0T8gj43DydXLefInwz5FyYZ5eEJJZVrmDxxDnOOlY
+JjZ91eQ0hjkCMHw2U/Aw5WJjOpnitqM7mzT6HtoQknFekROn3aRukswy1vUhZscv
+6pZjamVFkpUBtA==
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Global Root G2"
+# Serial: 4293743540046975378534879503202253541
+# MD5 Fingerprint: e4:a6:8a:c8:54:ac:52:42:46:0a:fd:72:48:1b:2a:44
+# SHA1 Fingerprint: df:3c:24:f9:bf:d6:66:76:1b:26:80:73:fe:06:d1:cc:8d:4f:82:a4
+# SHA256 Fingerprint: cb:3c:cb:b7:60:31:e5:e0:13:8f:8d:d3:9a:23:f9:de:47:ff:c3:5e:43:c1:14:4c:ea:27:d4:6a:5a:b1:cb:5f
+-----BEGIN CERTIFICATE-----
+MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBh
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBH
+MjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVT
+MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j
+b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkqhkiG
+9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI
+2/Ou8jqJkTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx
+1x7e/dfgy5SDN67sH0NO3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQ
+q2EGnI/yuum06ZIya7XzV+hdG82MHauVBJVJ8zUtluNJbd134/tJS7SsVQepj5Wz
+tCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyMUNGPHgm+F6HmIcr9g+UQ
+vIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQABo0IwQDAP
+BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV
+5uNu5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY
+1Yl9PMWLSn/pvtsrF9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4
+NeF22d+mQrvHRAiGfzZ0JFrabA0UWTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NG
+Fdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBHQRFXGU7Aj64GxJUTFy8bJZ91
+8rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/iyK5S9kJRaTe
+pLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl
+MrY=
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Global Root G3"
+# Serial: 7089244469030293291760083333884364146
+# MD5 Fingerprint: f5:5d:a4:50:a5:fb:28:7e:1e:0f:0d:cc:96:57:56:ca
+# SHA1 Fingerprint: 7e:04:de:89:6a:3e:66:6d:00:e6:87:d3:3f:fa:d9:3b:e8:3d:34:9e
+# SHA256 Fingerprint: 31:ad:66:48:f8:10:41:38:c7:38:f3:9e:a4:32:01:33:39:3e:3a:18:cc:02:29:6e:f9:7c:2a:c9:ef:67:31:d0
+-----BEGIN CERTIFICATE-----
+MIICPzCCAcWgAwIBAgIQBVVWvPJepDU1w6QP1atFcjAKBggqhkjOPQQDAzBhMQsw
+CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu
+ZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMzAe
+Fw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVTMRUw
+EwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20x
+IDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEczMHYwEAYHKoZIzj0CAQYF
+K4EEACIDYgAE3afZu4q4C/sLfyHS8L6+c/MzXRq8NOrexpu80JX28MzQC7phW1FG
+fp4tn+6OYwwX7Adw9c+ELkCDnOg/QW07rdOkFFk2eJ0DQ+4QE2xy3q6Ip6FrtUPO
+Z9wj/wMco+I+o0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAd
+BgNVHQ4EFgQUs9tIpPmhxdiuNkHMEWNpYim8S8YwCgYIKoZIzj0EAwMDaAAwZQIx
+AK288mw/EkrRLTnDCgmXc/SINoyIJ7vmiI1Qhadj+Z4y3maTD/HMsQmP3Wyr+mt/
+oAIwOWZbwmSNuJ5Q3KjVSaLtx9zRSX8XAbjIho9OjIgrqJqpisXRAL34VOKa5Vt8
+sycX
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Trusted Root G4"
+# Serial: 7451500558977370777930084869016614236
+# MD5 Fingerprint: 78:f2:fc:aa:60:1f:2f:b4:eb:c9:37:ba:53:2e:75:49
+# SHA1 Fingerprint: dd:fb:16:cd:49:31:c9:73:a2:03:7d:3f:c8:3a:4d:7d:77:5d:05:e4
+# SHA256 Fingerprint: 55:2f:7b:dc:f1:a7:af:9e:6c:e6:72:01:7f:4f:12:ab:f7:72:40:c7:8e:76:1a:c2:03:d1:d9:d2:0a:c8:99:88
+-----BEGIN CERTIFICATE-----
+MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBi
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3Qg
+RzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBiMQswCQYDVQQGEwJV
+UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu
+Y29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3y
+ithZwuEppz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1If
+xp4VpX6+n6lXFllVcq9ok3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDV
+ySAdYyktzuxeTsiT+CFhmzTrBcZe7FsavOvJz82sNEBfsXpm7nfISKhmV1efVFiO
+DCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGYQJB5w3jHtrHEtWoYOAMQ
+jdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6MUSaM0C/
+CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCi
+EhtmmnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADM
+fRyVw4/3IbKyEbe7f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QY
+uKZ3AeEPlAwhHbJUKSWJbOUOUlFHdL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXK
+chYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8oR7FwI+isX4KJpn15GkvmB0t
+9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+hjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD
+ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2
+SV1EY+CtnJYYZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd
++SeuMIW59mdNOj6PWTkiU0TryF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWc
+fFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy7zBZLq7gcfJW5GqXb5JQbZaNaHqa
+sjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iahixTXTBmyUEFxPT9N
+cCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN5r5N
+0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie
+4u1Ki7wb/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mI
+r/OSmbaz5mEP0oUA51Aa5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1
+/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tKG48BtieVU+i2iW1bvGjUI+iLUaJW+fCm
+gKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP82Z+
+-----END CERTIFICATE-----
+
+# Issuer: CN=COMODO RSA Certification Authority O=COMODO CA Limited
+# Subject: CN=COMODO RSA Certification Authority O=COMODO CA Limited
+# Label: "COMODO RSA Certification Authority"
+# Serial: 101909084537582093308941363524873193117
+# MD5 Fingerprint: 1b:31:b0:71:40:36:cc:14:36:91:ad:c4:3e:fd:ec:18
+# SHA1 Fingerprint: af:e5:d2:44:a8:d1:19:42:30:ff:47:9f:e2:f8:97:bb:cd:7a:8c:b4
+# SHA256 Fingerprint: 52:f0:e1:c4:e5:8e:c6:29:29:1b:60:31:7f:07:46:71:b8:5d:7e:a8:0d:5b:07:27:34:63:53:4b:32:b4:02:34
+-----BEGIN CERTIFICATE-----
+MIIF2DCCA8CgAwIBAgIQTKr5yttjb+Af907YWwOGnTANBgkqhkiG9w0BAQwFADCB
+hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
+A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNV
+BAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMTE5
+MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgT
+EkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR
+Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNh
+dGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCR
+6FSS0gpWsawNJN3Fz0RndJkrN6N9I3AAcbxT38T6KhKPS38QVr2fcHK3YX/JSw8X
+pz3jsARh7v8Rl8f0hj4K+j5c+ZPmNHrZFGvnnLOFoIJ6dq9xkNfs/Q36nGz637CC
+9BR++b7Epi9Pf5l/tfxnQ3K9DADWietrLNPtj5gcFKt+5eNu/Nio5JIk2kNrYrhV
+/erBvGy2i/MOjZrkm2xpmfh4SDBF1a3hDTxFYPwyllEnvGfDyi62a+pGx8cgoLEf
+Zd5ICLqkTqnyg0Y3hOvozIFIQ2dOciqbXL1MGyiKXCJ7tKuY2e7gUYPDCUZObT6Z
++pUX2nwzV0E8jVHtC7ZcryxjGt9XyD+86V3Em69FmeKjWiS0uqlWPc9vqv9JWL7w
+qP/0uK3pN/u6uPQLOvnoQ0IeidiEyxPx2bvhiWC4jChWrBQdnArncevPDt09qZah
+SL0896+1DSJMwBGB7FY79tOi4lu3sgQiUpWAk2nojkxl8ZEDLXB0AuqLZxUpaVIC
+u9ffUGpVRr+goyhhf3DQw6KqLCGqR84onAZFdr+CGCe01a60y1Dma/RMhnEw6abf
+Fobg2P9A3fvQQoh/ozM6LlweQRGBY84YcWsr7KaKtzFcOmpH4MN5WdYgGq/yapiq
+crxXStJLnbsQ/LBMQeXtHT1eKJ2czL+zUdqnR+WEUwIDAQABo0IwQDAdBgNVHQ4E
+FgQUu69+Aj36pvE8hI6t7jiY7NkyMtQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB
+/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAArx1UaEt65Ru2yyTUEUAJNMnMvl
+wFTPoCWOAvn9sKIN9SCYPBMtrFaisNZ+EZLpLrqeLppysb0ZRGxhNaKatBYSaVqM
+4dc+pBroLwP0rmEdEBsqpIt6xf4FpuHA1sj+nq6PK7o9mfjYcwlYRm6mnPTXJ9OV
+2jeDchzTc+CiR5kDOF3VSXkAKRzH7JsgHAckaVd4sjn8OoSgtZx8jb8uk2Intzna
+FxiuvTwJaP+EmzzV1gsD41eeFPfR60/IvYcjt7ZJQ3mFXLrrkguhxuhoqEwWsRqZ
+CuhTLJK7oQkYdQxlqHvLI7cawiiFwxv/0Cti76R7CZGYZ4wUAc1oBmpjIXUDgIiK
+boHGhfKppC3n9KUkEEeDys30jXlYsQab5xoq2Z0B15R97QNKyvDb6KkBPvVWmcke
+jkk9u+UJueBPSZI9FoJAzMxZxuY67RIuaTxslbH9qh17f4a+Hg4yRvv7E491f0yL
+S0Zj/gA0QHDBw7mh3aZw4gSzQbzpgJHqZJx64SIDqZxubw5lT2yHh17zbqD5daWb
+QOhTsiedSrnAdyGN/4fy3ryM7xfft0kL0fJuMAsaDk527RH89elWsn2/x20Kk4yl
+0MC2Hb46TpSi125sC8KKfPog88Tk5c0NqMuRkrF8hey1FGlmDoLnzc7ILaZRfyHB
+NVOFBkpdn627G190
+-----END CERTIFICATE-----
+
+# Issuer: CN=USERTrust RSA Certification Authority O=The USERTRUST Network
+# Subject: CN=USERTrust RSA Certification Authority O=The USERTRUST Network
+# Label: "USERTrust RSA Certification Authority"
+# Serial: 2645093764781058787591871645665788717
+# MD5 Fingerprint: 1b:fe:69:d1:91:b7:19:33:a3:72:a8:0f:e1:55:e5:b5
+# SHA1 Fingerprint: 2b:8f:1b:57:33:0d:bb:a2:d0:7a:6c:51:f7:0e:e9:0d:da:b9:ad:8e
+# SHA256 Fingerprint: e7:93:c9:b0:2f:d8:aa:13:e2:1c:31:22:8a:cc:b0:81:19:64:3b:74:9c:89:89:64:b1:74:6d:46:c3:d4:cb:d2
+-----BEGIN CERTIFICATE-----
+MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCB
+iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl
+cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV
+BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAw
+MjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNV
+BAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU
+aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2Vy
+dGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK
+AoICAQCAEmUXNg7D2wiz0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B
+3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2jY0K2dvKpOyuR+OJv0OwWIJAJPuLodMkY
+tJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFnRghRy4YUVD+8M/5+bJz/
+Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O+T23LLb2
+VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT
+79uq/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6
+c0Plfg6lZrEpfDKEY1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmT
+Yo61Zs8liM2EuLE/pDkP2QKe6xJMlXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97l
+c6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8yexDJtC/QV9AqURE9JnnV4ee
+UB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+eLf8ZxXhyVeE
+Hg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd
+BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8G
+A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPF
+Up/L+M+ZBn8b2kMVn54CVVeWFPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KO
+VWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ7l8wXEskEVX/JJpuXior7gtNn3/3
+ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQEg9zKC7F4iRO/Fjs
+8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM8WcR
+iQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYze
+Sf7dNXGiFSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZ
+XHlKYC6SQK5MNyosycdiyA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/
+qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9cJ2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRB
+VXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGwsAvgnEzDHNb842m1R0aB
+L6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gxQ+6IHdfG
+jjxDah2nGN59PRbxYvnKkKj9
+-----END CERTIFICATE-----
+
+# Issuer: CN=USERTrust ECC Certification Authority O=The USERTRUST Network
+# Subject: CN=USERTrust ECC Certification Authority O=The USERTRUST Network
+# Label: "USERTrust ECC Certification Authority"
+# Serial: 123013823720199481456569720443997572134
+# MD5 Fingerprint: fa:68:bc:d9:b5:7f:ad:fd:c9:1d:06:83:28:cc:24:c1
+# SHA1 Fingerprint: d1:cb:ca:5d:b2:d5:2a:7f:69:3b:67:4d:e5:f0:5a:1d:0c:95:7d:f0
+# SHA256 Fingerprint: 4f:f4:60:d5:4b:9c:86:da:bf:bc:fc:57:12:e0:40:0d:2b:ed:3f:bc:4d:4f:bd:aa:86:e0:6a:dc:d2:a9:ad:7a
+-----BEGIN CERTIFICATE-----
+MIICjzCCAhWgAwIBAgIQXIuZxVqUxdJxVt7NiYDMJjAKBggqhkjOPQQDAzCBiDEL
+MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl
+eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT
+JVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMjAx
+MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNVBAgT
+Ck5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVUaGUg
+VVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlm
+aWNhdGlvbiBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQarFRaqflo
+I+d61SRvU8Za2EurxtW20eZzca7dnNYMYf3boIkDuAUU7FfO7l0/4iGzzvfUinng
+o4N+LZfQYcTxmdwlkWOrfzCjtHDix6EznPO/LlxTsV+zfTJ/ijTjeXmjQjBAMB0G
+A1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1xmNjmjAOBgNVHQ8BAf8EBAMCAQYwDwYD
+VR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjA2Z6EWCNzklwBBHU6+4WMB
+zzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbW
+RNZu9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg=
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4
+# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4
+# Label: "GlobalSign ECC Root CA - R4"
+# Serial: 14367148294922964480859022125800977897474
+# MD5 Fingerprint: 20:f0:27:68:d1:7e:a0:9d:0e:e6:2a:ca:df:5c:89:8e
+# SHA1 Fingerprint: 69:69:56:2e:40:80:f4:24:a1:e7:19:9f:14:ba:f3:ee:58:ab:6a:bb
+# SHA256 Fingerprint: be:c9:49:11:c2:95:56:76:db:6c:0a:55:09:86:d7:6e:3b:a0:05:66:7c:44:2c:97:62:b4:fb:b7:73:de:22:8c
+-----BEGIN CERTIFICATE-----
+MIIB4TCCAYegAwIBAgIRKjikHJYKBN5CsiilC+g0mAIwCgYIKoZIzj0EAwIwUDEk
+MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI0MRMwEQYDVQQKEwpH
+bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX
+DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD
+QSAtIFI0MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu
+MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuMZ5049sJQ6fLjkZHAOkrprlOQcJ
+FspjsbmG+IpXwVfOQvpzofdlQv8ewQCybnMO/8ch5RikqtlxP6jUuc6MHaNCMEAw
+DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFSwe61F
+uOJAf/sKbvu+M8k8o4TVMAoGCCqGSM49BAMCA0gAMEUCIQDckqGgE6bPA7DmxCGX
+kPoUVy0D7O48027KqGx2vKLeuwIgJ6iFJzWbVsaj8kfSt24bAgAXqmemFZHe+pTs
+ewv4n4Q=
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5
+# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5
+# Label: "GlobalSign ECC Root CA - R5"
+# Serial: 32785792099990507226680698011560947931244
+# MD5 Fingerprint: 9f:ad:3b:1c:02:1e:8a:ba:17:74:38:81:0c:a2:bc:08
+# SHA1 Fingerprint: 1f:24:c6:30:cd:a4:18:ef:20:69:ff:ad:4f:dd:5f:46:3a:1b:69:aa
+# SHA256 Fingerprint: 17:9f:bc:14:8a:3d:d0:0f:d2:4e:a1:34:58:cc:43:bf:a7:f5:9c:81:82:d7:83:a5:13:f6:eb:ec:10:0c:89:24
+-----BEGIN CERTIFICATE-----
+MIICHjCCAaSgAwIBAgIRYFlJ4CYuu1X5CneKcflK2GwwCgYIKoZIzj0EAwMwUDEk
+MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpH
+bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX
+DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD
+QSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu
+MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAER0UOlvt9Xb/pOdEh+J8LttV7HpI6SFkc
+8GIxLcB6KP4ap1yztsyX50XUWPrRd21DosCHZTQKH3rd6zwzocWdTaRvQZU4f8ke
+hOvRnkmSh5SHDDqFSmafnVmTTZdhBoZKo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD
+VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUPeYpSJvqB8ohREom3m7e0oPQn1kwCgYI
+KoZIzj0EAwMDaAAwZQIxAOVpEslu28YxuglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg
+515dTguDnFt2KaAJJiFqYgIwcdK1j1zqO+F4CYWodZI7yFz9SO8NdCKoCOJuxUnO
+xwy8p2Fp8fc74SrL+SvzZpA3
+-----END CERTIFICATE-----
+
+# Issuer: CN=Staat der Nederlanden Root CA - G3 O=Staat der Nederlanden
+# Subject: CN=Staat der Nederlanden Root CA - G3 O=Staat der Nederlanden
+# Label: "Staat der Nederlanden Root CA - G3"
+# Serial: 10003001
+# MD5 Fingerprint: 0b:46:67:07:db:10:2f:19:8c:35:50:60:d1:0b:f4:37
+# SHA1 Fingerprint: d8:eb:6b:41:51:92:59:e0:f3:e7:85:00:c0:3d:b6:88:97:c9:ee:fc
+# SHA256 Fingerprint: 3c:4f:b0:b9:5a:b8:b3:00:32:f4:32:b8:6f:53:5f:e1:72:c1:85:d0:fd:39:86:58:37:cf:36:18:7f:a6:f4:28
+-----BEGIN CERTIFICATE-----
+MIIFdDCCA1ygAwIBAgIEAJiiOTANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJO
+TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFh
+dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQSAtIEczMB4XDTEzMTExNDExMjg0MloX
+DTI4MTExMzIzMDAwMFowWjELMAkGA1UEBhMCTkwxHjAcBgNVBAoMFVN0YWF0IGRl
+ciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5lZGVybGFuZGVuIFJv
+b3QgQ0EgLSBHMzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAL4yolQP
+cPssXFnrbMSkUeiFKrPMSjTysF/zDsccPVMeiAho2G89rcKezIJnByeHaHE6n3WW
+IkYFsO2tx1ueKt6c/DrGlaf1F2cY5y9JCAxcz+bMNO14+1Cx3Gsy8KL+tjzk7FqX
+xz8ecAgwoNzFs21v0IJyEavSgWhZghe3eJJg+szeP4TrjTgzkApyI/o1zCZxMdFy
+KJLZWyNtZrVtB0LrpjPOktvA9mxjeM3KTj215VKb8b475lRgsGYeCasH/lSJEULR
+9yS6YHgamPfJEf0WwTUaVHXvQ9Plrk7O53vDxk5hUUurmkVLoR9BvUhTFXFkC4az
+5S6+zqQbwSmEorXLCCN2QyIkHxcE1G6cxvx/K2Ya7Irl1s9N9WMJtxU51nus6+N8
+6U78dULI7ViVDAZCopz35HCz33JvWjdAidiFpNfxC95DGdRKWCyMijmev4SH8RY7
+Ngzp07TKbBlBUgmhHbBqv4LvcFEhMtwFdozL92TkA1CvjJFnq8Xy7ljY3r735zHP
+bMk7ccHViLVlvMDoFxcHErVc0qsgk7TmgoNwNsXNo42ti+yjwUOH5kPiNL6VizXt
+BznaqB16nzaeErAMZRKQFWDZJkBE41ZgpRDUajz9QdwOWke275dhdU/Z/seyHdTt
+XUmzqWrLZoQT1Vyg3N9udwbRcXXIV2+vD3dbAgMBAAGjQjBAMA8GA1UdEwEB/wQF
+MAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRUrfrHkleuyjWcLhL75Lpd
+INyUVzANBgkqhkiG9w0BAQsFAAOCAgEAMJmdBTLIXg47mAE6iqTnB/d6+Oea31BD
+U5cqPco8R5gu4RV78ZLzYdqQJRZlwJ9UXQ4DO1t3ApyEtg2YXzTdO2PCwyiBwpwp
+LiniyMMB8jPqKqrMCQj3ZWfGzd/TtiunvczRDnBfuCPRy5FOCvTIeuXZYzbB1N/8
+Ipf3YF3qKS9Ysr1YvY2WTxB1v0h7PVGHoTx0IsL8B3+A3MSs/mrBcDCw6Y5p4ixp
+gZQJut3+TcCDjJRYwEYgr5wfAvg1VUkvRtTA8KCWAg8zxXHzniN9lLf9OtMJgwYh
+/WA9rjLA0u6NpvDntIJ8CsxwyXmA+P5M9zWEGYox+wrZ13+b8KKaa8MFSu1BYBQw
+0aoRQm7TIwIEC8Zl3d1Sd9qBa7Ko+gE4uZbqKmxnl4mUnrzhVNXkanjvSr0rmj1A
+fsbAddJu+2gw7OyLnflJNZoaLNmzlTnVHpL3prllL+U9bTpITAjc5CgSKL59NVzq
+4BZ+Extq1z7XnvwtdbLBFNUjA9tbbws+eC8N3jONFrdI54OagQ97wUNNVQQXOEpR
+1VmiiXTTn74eS9fGbbeIJG9gkaSChVtWQbzQRKtqE77RLFi3EjNYsjdj3BP1lB0/
+QFH1T/U67cjF68IeHRaVesd+QnGTbksVtzDfqu1XhUisHWrdOWnk4Xl4vs4Fv6EM
+94B7IWcnMFk=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Staat der Nederlanden EV Root CA O=Staat der Nederlanden
+# Subject: CN=Staat der Nederlanden EV Root CA O=Staat der Nederlanden
+# Label: "Staat der Nederlanden EV Root CA"
+# Serial: 10000013
+# MD5 Fingerprint: fc:06:af:7b:e8:1a:f1:9a:b4:e8:d2:70:1f:c0:f5:ba
+# SHA1 Fingerprint: 76:e2:7e:c1:4f:db:82:c1:c0:a6:75:b5:05:be:3d:29:b4:ed:db:bb
+# SHA256 Fingerprint: 4d:24:91:41:4c:fe:95:67:46:ec:4c:ef:a6:cf:6f:72:e2:8a:13:29:43:2f:9d:8a:90:7a:c4:cb:5d:ad:c1:5a
+-----BEGIN CERTIFICATE-----
+MIIFcDCCA1igAwIBAgIEAJiWjTANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJO
+TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSkwJwYDVQQDDCBTdGFh
+dCBkZXIgTmVkZXJsYW5kZW4gRVYgUm9vdCBDQTAeFw0xMDEyMDgxMTE5MjlaFw0y
+MjEyMDgxMTEwMjhaMFgxCzAJBgNVBAYTAk5MMR4wHAYDVQQKDBVTdGFhdCBkZXIg
+TmVkZXJsYW5kZW4xKTAnBgNVBAMMIFN0YWF0IGRlciBOZWRlcmxhbmRlbiBFViBS
+b290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA48d+ifkkSzrS
+M4M1LGns3Amk41GoJSt5uAg94JG6hIXGhaTK5skuU6TJJB79VWZxXSzFYGgEt9nC
+UiY4iKTWO0Cmws0/zZiTs1QUWJZV1VD+hq2kY39ch/aO5ieSZxeSAgMs3NZmdO3d
+Z//BYY1jTw+bbRcwJu+r0h8QoPnFfxZpgQNH7R5ojXKhTbImxrpsX23Wr9GxE46p
+rfNeaXUmGD5BKyF/7otdBwadQ8QpCiv8Kj6GyzyDOvnJDdrFmeK8eEEzduG/L13l
+pJhQDBXd4Pqcfzho0LKmeqfRMb1+ilgnQ7O6M5HTp5gVXJrm0w912fxBmJc+qiXb
+j5IusHsMX/FjqTf5m3VpTCgmJdrV8hJwRVXj33NeN/UhbJCONVrJ0yPr08C+eKxC
+KFhmpUZtcALXEPlLVPxdhkqHz3/KRawRWrUgUY0viEeXOcDPusBCAUCZSCELa6fS
+/ZbV0b5GnUngC6agIk440ME8MLxwjyx1zNDFjFE7PZQIZCZhfbnDZY8UnCHQqv0X
+cgOPvZuM5l5Tnrmd74K74bzickFbIZTTRTeU0d8JOV3nI6qaHcptqAqGhYqCvkIH
+1vI4gnPah1vlPNOePqc7nvQDs/nxfRN0Av+7oeX6AHkcpmZBiFxgV6YuCcS6/ZrP
+px9Aw7vMWgpVSzs4dlG4Y4uElBbmVvMCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB
+/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFP6rAJCYniT8qcwaivsnuL8wbqg7
+MA0GCSqGSIb3DQEBCwUAA4ICAQDPdyxuVr5Os7aEAJSrR8kN0nbHhp8dB9O2tLsI
+eK9p0gtJ3jPFrK3CiAJ9Brc1AsFgyb/E6JTe1NOpEyVa/m6irn0F3H3zbPB+po3u
+2dfOWBfoqSmuc0iH55vKbimhZF8ZE/euBhD/UcabTVUlT5OZEAFTdfETzsemQUHS
+v4ilf0X8rLiltTMMgsT7B/Zq5SWEXwbKwYY5EdtYzXc7LMJMD16a4/CrPmEbUCTC
+wPTxGfARKbalGAKb12NMcIxHowNDXLldRqANb/9Zjr7dn3LDWyvfjFvO5QxGbJKy
+CqNMVEIYFRIYvdr8unRu/8G2oGTYqV9Vrp9canaW2HNnh/tNf1zuacpzEPuKqf2e
+vTY4SUmH9A4U8OmHuD+nT3pajnnUk+S7aFKErGzp85hwVXIy+TSrK0m1zSBi5Dp6
+Z2Orltxtrpfs/J92VoguZs9btsmksNcFuuEnL5O7Jiqik7Ab846+HUCjuTaPPoIa
+Gl6I6lD4WeKDRikL40Rc4ZW2aZCaFG+XroHPaO+Zmr615+F/+PoTRxZMzG0IQOeL
+eG9QgkRQP2YGiqtDhFZKDyAthg710tvSeopLzaXoTvFeJiUBWSOgftL2fiFX1ye8
+FVdMpEbB4IMeDExNH08GGeL5qPQ6gqGyeUN51q1veieQA6TqJIc/2b3Z6fJfUEkc
+7uzXLg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=IdenTrust Commercial Root CA 1 O=IdenTrust
+# Subject: CN=IdenTrust Commercial Root CA 1 O=IdenTrust
+# Label: "IdenTrust Commercial Root CA 1"
+# Serial: 13298821034946342390520003877796839426
+# MD5 Fingerprint: b3:3e:77:73:75:ee:a0:d3:e3:7e:49:63:49:59:bb:c7
+# SHA1 Fingerprint: df:71:7e:aa:4a:d9:4e:c9:55:84:99:60:2d:48:de:5f:bc:f0:3a:25
+# SHA256 Fingerprint: 5d:56:49:9b:e4:d2:e0:8b:cf:ca:d0:8a:3e:38:72:3d:50:50:3b:de:70:69:48:e4:2f:55:60:30:19:e5:28:ae
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIQCgFCgAAAAUUjyES1AAAAAjANBgkqhkiG9w0BAQsFADBK
+MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVu
+VHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwHhcNMTQwMTE2MTgxMjIzWhcNMzQw
+MTE2MTgxMjIzWjBKMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScw
+JQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQCnUBneP5k91DNG8W9RYYKyqU+PZ4ldhNlT
+3Qwo2dfw/66VQ3KZ+bVdfIrBQuExUHTRgQ18zZshq0PirK1ehm7zCYofWjK9ouuU
++ehcCuz/mNKvcbO0U59Oh++SvL3sTzIwiEsXXlfEU8L2ApeN2WIrvyQfYo3fw7gp
+S0l4PJNgiCL8mdo2yMKi1CxUAGc1bnO/AljwpN3lsKImesrgNqUZFvX9t++uP0D1
+bVoE/c40yiTcdCMbXTMTEl3EASX2MN0CXZ/g1Ue9tOsbobtJSdifWwLziuQkkORi
+T0/Br4sOdBeo0XKIanoBScy0RnnGF7HamB4HWfp1IYVl3ZBWzvurpWCdxJ35UrCL
+vYf5jysjCiN2O/cz4ckA82n5S6LgTrx+kzmEB/dEcH7+B1rlsazRGMzyNeVJSQjK
+Vsk9+w8YfYs7wRPCTY/JTw436R+hDmrfYi7LNQZReSzIJTj0+kuniVyc0uMNOYZK
+dHzVWYfCP04MXFL0PfdSgvHqo6z9STQaKPNBiDoT7uje/5kdX7rL6B7yuVBgwDHT
+c+XvvqDtMwt0viAgxGds8AgDelWAf0ZOlqf0Hj7h9tgJ4TNkK2PXMl6f+cB7D3hv
+l7yTmvmcEpB4eoCHFddydJxVdHixuuFucAS6T6C6aMN7/zHwcz09lCqxC0EOoP5N
+iGVreTO01wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB
+/zAdBgNVHQ4EFgQU7UQZwNPwBovupHu+QucmVMiONnYwDQYJKoZIhvcNAQELBQAD
+ggIBAA2ukDL2pkt8RHYZYR4nKM1eVO8lvOMIkPkp165oCOGUAFjvLi5+U1KMtlwH
+6oi6mYtQlNeCgN9hCQCTrQ0U5s7B8jeUeLBfnLOic7iPBZM4zY0+sLj7wM+x8uwt
+LRvM7Kqas6pgghstO8OEPVeKlh6cdbjTMM1gCIOQ045U8U1mwF10A0Cj7oV+wh93
+nAbowacYXVKV7cndJZ5t+qntozo00Fl72u1Q8zW/7esUTTHHYPTa8Yec4kjixsU3
++wYQ+nVZZjFHKdp2mhzpgq7vmrlR94gjmmmVYjzlVYA211QC//G5Xc7UI2/YRYRK
+W2XviQzdFKcgyxilJbQN+QHwotL0AMh0jqEqSI5l2xPE4iUXfeu+h1sXIFRRk0pT
+AwvsXcoz7WL9RccvW9xYoIA55vrX/hMUpu09lEpCdNTDd1lzzY9GvlU47/rokTLq
+l1gEIt44w8y8bckzOmoKaT+gyOpyj4xjhiO9bTyWnpXgSUyqorkqG5w2gXjtw+hG
+4iZZRHUe2XWJUc0QhJ1hYMtd+ZciTY6Y5uN/9lu7rs3KSoFrXgvzUeF0K+l+J6fZ
+mUlO+KWA2yUPHGNiiskzZ2s8EIPGrd6ozRaOjfAHN3Gf8qv8QfXBi+wAN10J5U6A
+7/qxXDgGpRtK4dw4LTzcqx+QGtVKnO7RcGzM7vRX+Bi6hG6H
+-----END CERTIFICATE-----
+
+# Issuer: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust
+# Subject: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust
+# Label: "IdenTrust Public Sector Root CA 1"
+# Serial: 13298821034946342390521976156843933698
+# MD5 Fingerprint: 37:06:a5:b0:fc:89:9d:ba:f4:6b:8c:1a:64:cd:d5:ba
+# SHA1 Fingerprint: ba:29:41:60:77:98:3f:f4:f3:ef:f2:31:05:3b:2e:ea:6d:4d:45:fd
+# SHA256 Fingerprint: 30:d0:89:5a:9a:44:8a:26:20:91:63:55:22:d1:f5:20:10:b5:86:7a:ca:e1:2c:78:ef:95:8f:d4:f4:38:9f:2f
+-----BEGIN CERTIFICATE-----
+MIIFZjCCA06gAwIBAgIQCgFCgAAAAUUjz0Z8AAAAAjANBgkqhkiG9w0BAQsFADBN
+MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVu
+VHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwHhcNMTQwMTE2MTc1MzMyWhcN
+MzQwMTE2MTc1MzMyWjBNMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0
+MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwggIi
+MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2IpT8pEiv6EdrCvsnduTyP4o7
+ekosMSqMjbCpwzFrqHd2hCa2rIFCDQjrVVi7evi8ZX3yoG2LqEfpYnYeEe4IFNGy
+RBb06tD6Hi9e28tzQa68ALBKK0CyrOE7S8ItneShm+waOh7wCLPQ5CQ1B5+ctMlS
+bdsHyo+1W/CD80/HLaXIrcuVIKQxKFdYWuSNG5qrng0M8gozOSI5Cpcu81N3uURF
+/YTLNiCBWS2ab21ISGHKTN9T0a9SvESfqy9rg3LvdYDaBjMbXcjaY8ZNzaxmMc3R
+3j6HEDbhuaR672BQssvKplbgN6+rNBM5Jeg5ZuSYeqoSmJxZZoY+rfGwyj4GD3vw
+EUs3oERte8uojHH01bWRNszwFcYr3lEXsZdMUD2xlVl8BX0tIdUAvwFnol57plzy
+9yLxkA2T26pEUWbMfXYD62qoKjgZl3YNa4ph+bz27nb9cCvdKTz4Ch5bQhyLVi9V
+GxyhLrXHFub4qjySjmm2AcG1hp2JDws4lFTo6tyePSW8Uybt1as5qsVATFSrsrTZ
+2fjXctscvG29ZV/viDUqZi/u9rNl8DONfJhBaUYPQxxp+pu10GFqzcpL2UyQRqsV
+WaFHVCkugyhfHMKiq3IXAAaOReyL4jM9f9oZRORicsPfIsbyVtTdX5Vy7W1f90gD
+W/3FKqD2cyOEEBsB5wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/
+BAUwAwEB/zAdBgNVHQ4EFgQU43HgntinQtnbcZFrlJPrw6PRFKMwDQYJKoZIhvcN
+AQELBQADggIBAEf63QqwEZE4rU1d9+UOl1QZgkiHVIyqZJnYWv6IAcVYpZmxI1Qj
+t2odIFflAWJBF9MJ23XLblSQdf4an4EKwt3X9wnQW3IV5B4Jaj0z8yGa5hV+rVHV
+DRDtfULAj+7AmgjVQdZcDiFpboBhDhXAuM/FSRJSzL46zNQuOAXeNf0fb7iAaJg9
+TaDKQGXSc3z1i9kKlT/YPyNtGtEqJBnZhbMX73huqVjRI9PHE+1yJX9dsXNw0H8G
+lwmEKYBhHfpe/3OsoOOJuBxxFcbeMX8S3OFtm6/n6J91eEyrRjuazr8FGF1NFTwW
+mhlQBJqymm9li1JfPFgEKCXAZmExfrngdbkaqIHWchezxQMxNRF4eKLg6TCMf4Df
+WN88uieW4oA0beOY02QnrEh+KHdcxiVhJfiFDGX6xDIvpZgF5PgLZxYWxoK4Mhn5
++bl53B/N66+rDt0b20XkeucC4pVd/GnwU2lhlXV5C15V5jgclKlZM57IcXR5f1GJ
+tshquDDIajjDbp7hNxbqBWJMWxJH7ae0s1hWx0nzfxJoCTFx8G34Tkf71oXuxVhA
+GaQdp/lLQzfcaFpPz+vCZHTetBXZ9FRUGi8c15dxVJCO2SCdUyt/q4/i6jC8UDfv
+8Ue1fXwsBOxonbRJRBD0ckscZOf85muQ3Wl9af0AVqW3rLatt8o+Ae+c
+-----END CERTIFICATE-----
+
+# Issuer: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only
+# Subject: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only
+# Label: "Entrust Root Certification Authority - G2"
+# Serial: 1246989352
+# MD5 Fingerprint: 4b:e2:c9:91:96:65:0c:f4:0e:5a:93:92:a0:0a:fe:b2
+# SHA1 Fingerprint: 8c:f4:27:fd:79:0c:3a:d1:66:06:8d:e8:1e:57:ef:bb:93:22:72:d4
+# SHA256 Fingerprint: 43:df:57:74:b0:3e:7f:ef:5f:e4:0d:93:1a:7b:ed:f1:bb:2e:6b:42:73:8c:4e:6d:38:41:10:3d:3a:a7:f3:39
+-----BEGIN CERTIFICATE-----
+MIIEPjCCAyagAwIBAgIESlOMKDANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMC
+VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50
+cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3Qs
+IEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVz
+dCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIwHhcNMDkwNzA3MTcy
+NTU0WhcNMzAxMjA3MTc1NTU0WjCBvjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUVu
+dHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwt
+dGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0
+aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmlj
+YXRpb24gQXV0aG9yaXR5IC0gRzIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
+AoIBAQC6hLZy254Ma+KZ6TABp3bqMriVQRrJ2mFOWHLP/vaCeb9zYQYKpSfYs1/T
+RU4cctZOMvJyig/3gxnQaoCAAEUesMfnmr8SVycco2gvCoe9amsOXmXzHHfV1IWN
+cCG0szLni6LVhjkCsbjSR87kyUnEO6fe+1R9V77w6G7CebI6C1XiUJgWMhNcL3hW
+wcKUs/Ja5CeanyTXxuzQmyWC48zCxEXFjJd6BmsqEZ+pCm5IO2/b1BEZQvePB7/1
+U1+cPvQXLOZprE4yTGJ36rfo5bs0vBmLrpxR57d+tVOxMyLlbc9wPBr64ptntoP0
+jaWvYkxN4FisZDQSA/i2jZRjJKRxAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP
+BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqciZ60B7vfec7aVHUbI2fkBJmqzAN
+BgkqhkiG9w0BAQsFAAOCAQEAeZ8dlsa2eT8ijYfThwMEYGprmi5ZiXMRrEPR9RP/
+jTkrwPK9T3CMqS/qF8QLVJ7UG5aYMzyorWKiAHarWWluBh1+xLlEjZivEtRh2woZ
+Rkfz6/djwUAFQKXSt/S1mja/qYh2iARVBCuch38aNzx+LaUa2NSJXsq9rD1s2G2v
+1fN2D807iDginWyTmsQ9v4IbZT+mD12q/OWyFcq1rca8PdCE6OoGcrBNOTJ4vz4R
+nAuknZoh8/CbCzB428Hch0P+vGOaysXCHMnHjf87ElgI5rY97HosTvuDls4MPGmH
+VHOkc8KT/1EQrBVUAdj8BbGJoX90g5pJ19xOe4pIb4tF9g==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only
+# Subject: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only
+# Label: "Entrust Root Certification Authority - EC1"
+# Serial: 51543124481930649114116133369
+# MD5 Fingerprint: b6:7e:1d:f0:58:c5:49:6c:24:3b:3d:ed:98:18:ed:bc
+# SHA1 Fingerprint: 20:d8:06:40:df:9b:25:f5:12:25:3a:11:ea:f7:59:8a:eb:14:b5:47
+# SHA256 Fingerprint: 02:ed:0e:b2:8c:14:da:45:16:5c:56:67:91:70:0d:64:51:d7:fb:56:f0:b2:ab:1d:3b:8e:b0:70:e5:6e:df:f5
+-----BEGIN CERTIFICATE-----
+MIIC+TCCAoCgAwIBAgINAKaLeSkAAAAAUNCR+TAKBggqhkjOPQQDAzCBvzELMAkG
+A1UEBhMCVVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3
+d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDEyIEVu
+dHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEzMDEGA1UEAxMq
+RW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRUMxMB4XDTEy
+MTIxODE1MjUzNloXDTM3MTIxODE1NTUzNlowgb8xCzAJBgNVBAYTAlVTMRYwFAYD
+VQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0
+L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxMiBFbnRydXN0LCBJbmMuIC0g
+Zm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMzAxBgNVBAMTKkVudHJ1c3QgUm9vdCBD
+ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEVDMTB2MBAGByqGSM49AgEGBSuBBAAi
+A2IABIQTydC6bUF74mzQ61VfZgIaJPRbiWlH47jCffHyAsWfoPZb1YsGGYZPUxBt
+ByQnoaD41UcZYUx9ypMn6nQM72+WCf5j7HBdNq1nd67JnXxVRDqiY1Ef9eNi1KlH
+Bz7MIKNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O
+BBYEFLdj5xrdjekIplWDpOBqUEFlEUJJMAoGCCqGSM49BAMDA2cAMGQCMGF52OVC
+R98crlOZF7ZvHH3hvxGU0QOIdeSNiaSKd0bebWHvAvX7td/M/k7//qnmpwIwW5nX
+hTcGtXsI/esni0qU+eH6p44mCOh8kmhtc9hvJqwhAriZtyZBWyVgrtBIGu4G
+-----END CERTIFICATE-----
+
+# Issuer: CN=CFCA EV ROOT O=China Financial Certification Authority
+# Subject: CN=CFCA EV ROOT O=China Financial Certification Authority
+# Label: "CFCA EV ROOT"
+# Serial: 407555286
+# MD5 Fingerprint: 74:e1:b6:ed:26:7a:7a:44:30:33:94:ab:7b:27:81:30
+# SHA1 Fingerprint: e2:b8:29:4b:55:84:ab:6b:58:c2:90:46:6c:ac:3f:b8:39:8f:84:83
+# SHA256 Fingerprint: 5c:c3:d7:8e:4e:1d:5e:45:54:7a:04:e6:87:3e:64:f9:0c:f9:53:6d:1c:cc:2e:f8:00:f3:55:c4:c5:fd:70:fd
+-----BEGIN CERTIFICATE-----
+MIIFjTCCA3WgAwIBAgIEGErM1jANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJD
+TjEwMC4GA1UECgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9y
+aXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJPT1QwHhcNMTIwODA4MDMwNzAxWhcNMjkx
+MjMxMDMwNzAxWjBWMQswCQYDVQQGEwJDTjEwMC4GA1UECgwnQ2hpbmEgRmluYW5j
+aWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJP
+T1QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXXWvNED8fBVnVBU03
+sQ7smCuOFR36k0sXgiFxEFLXUWRwFsJVaU2OFW2fvwwbwuCjZ9YMrM8irq93VCpL
+TIpTUnrD7i7es3ElweldPe6hL6P3KjzJIx1qqx2hp/Hz7KDVRM8Vz3IvHWOX6Jn5
+/ZOkVIBMUtRSqy5J35DNuF++P96hyk0g1CXohClTt7GIH//62pCfCqktQT+x8Rgp
+7hZZLDRJGqgG16iI0gNyejLi6mhNbiyWZXvKWfry4t3uMCz7zEasxGPrb382KzRz
+EpR/38wmnvFyXVBlWY9ps4deMm/DGIq1lY+wejfeWkU7xzbh72fROdOXW3NiGUgt
+hxwG+3SYIElz8AXSG7Ggo7cbcNOIabla1jj0Ytwli3i/+Oh+uFzJlU9fpy25IGvP
+a931DfSCt/SyZi4QKPaXWnuWFo8BGS1sbn85WAZkgwGDg8NNkt0yxoekN+kWzqot
+aK8KgWU6cMGbrU1tVMoqLUuFG7OA5nBFDWteNfB/O7ic5ARwiRIlk9oKmSJgamNg
+TnYGmE69g60dWIolhdLHZR4tjsbftsbhf4oEIRUpdPA+nJCdDC7xij5aqgwJHsfV
+PKPtl8MeNPo4+QgO48BdK4PRVmrJtqhUUy54Mmc9gn900PvhtgVguXDbjgv5E1hv
+cWAQUhC5wUEJ73IfZzF4/5YFjQIDAQABo2MwYTAfBgNVHSMEGDAWgBTj/i39KNAL
+tbq2osS/BqoFjJP7LzAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAd
+BgNVHQ4EFgQU4/4t/SjQC7W6tqLEvwaqBYyT+y8wDQYJKoZIhvcNAQELBQADggIB
+ACXGumvrh8vegjmWPfBEp2uEcwPenStPuiB/vHiyz5ewG5zz13ku9Ui20vsXiObT
+ej/tUxPQ4i9qecsAIyjmHjdXNYmEwnZPNDatZ8POQQaIxffu2Bq41gt/UP+TqhdL
+jOztUmCypAbqTuv0axn96/Ua4CUqmtzHQTb3yHQFhDmVOdYLO6Qn+gjYXB74BGBS
+ESgoA//vU2YApUo0FmZ8/Qmkrp5nGm9BC2sGE5uPhnEFtC+NiWYzKXZUmhH4J/qy
+P5Hgzg0b8zAarb8iXRvTvyUFTeGSGn+ZnzxEk8rUQElsgIfXBDrDMlI1Dlb4pd19
+xIsNER9Tyx6yF7Zod1rg1MvIB671Oi6ON7fQAUtDKXeMOZePglr4UeWJoBjnaH9d
+Ci77o0cOPaYjesYBx4/IXr9tgFa+iiS6M+qf4TIRnvHST4D2G0CvOJ4RUHlzEhLN
+5mydLIhyPDCBBpEi6lmt2hkuIsKNuYyH4Ga8cyNfIWRjgEj1oDwYPZTISEEdQLpe
+/v5WOaHIz16eGWRGENoXkbcFgKyLmZJ956LYBws2J+dIeWCKw9cTXPhyQN9Ky8+Z
+AAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3CekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ
+5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certinomis - Root CA O=Certinomis OU=0002 433998903
+# Subject: CN=Certinomis - Root CA O=Certinomis OU=0002 433998903
+# Label: "Certinomis - Root CA"
+# Serial: 1
+# MD5 Fingerprint: 14:0a:fd:8d:a8:28:b5:38:69:db:56:7e:61:22:03:3f
+# SHA1 Fingerprint: 9d:70:bb:01:a5:a4:a0:18:11:2e:f7:1c:01:b9:32:c5:34:e7:88:a8
+# SHA256 Fingerprint: 2a:99:f5:bc:11:74:b7:3c:bb:1d:62:08:84:e0:1c:34:e5:1c:cb:39:78:da:12:5f:0e:33:26:88:83:bf:41:58
+-----BEGIN CERTIFICATE-----
+MIIFkjCCA3qgAwIBAgIBATANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJGUjET
+MBEGA1UEChMKQ2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxHTAb
+BgNVBAMTFENlcnRpbm9taXMgLSBSb290IENBMB4XDTEzMTAyMTA5MTcxOFoXDTMz
+MTAyMTA5MTcxOFowWjELMAkGA1UEBhMCRlIxEzARBgNVBAoTCkNlcnRpbm9taXMx
+FzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMR0wGwYDVQQDExRDZXJ0aW5vbWlzIC0g
+Um9vdCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANTMCQosP5L2
+fxSeC5yaah1AMGT9qt8OHgZbn1CF6s2Nq0Nn3rD6foCWnoR4kkjW4znuzuRZWJfl
+LieY6pOod5tK8O90gC3rMB+12ceAnGInkYjwSond3IjmFPnVAy//ldu9n+ws+hQV
+WZUKxkd8aRi5pwP5ynapz8dvtF4F/u7BUrJ1Mofs7SlmO/NKFoL21prbcpjp3vDF
+TKWrteoB4owuZH9kb/2jJZOLyKIOSY008B/sWEUuNKqEUL3nskoTuLAPrjhdsKkb
+5nPJWqHZZkCqqU2mNAKthH6yI8H7KsZn9DS2sJVqM09xRLWtwHkziOC/7aOgFLSc
+CbAK42C++PhmiM1b8XcF4LVzbsF9Ri6OSyemzTUK/eVNfaoqoynHWmgE6OXWk6Ri
+wsXm9E/G+Z8ajYJJGYrKWUM66A0ywfRMEwNvbqY/kXPLynNvEiCL7sCCeN5LLsJJ
+wx3tFvYk9CcbXFcx3FXuqB5vbKziRcxXV4p1VxngtViZSTYxPDMBbRZKzbgqg4SG
+m/lg0h9tkQPTYKbVPZrdd5A9NaSfD171UkRpucC63M9933zZxKyGIjK8e2uR73r4
+F2iw4lNVYC2vPsKD2NkJK/DAZNuHi5HMkesE/Xa0lZrmFAYb1TQdvtj/dBxThZng
+WVJKYe2InmtJiUZ+IFrZ50rlau7SZRFDAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIB
+BjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTvkUz1pcMw6C8I6tNxIqSSaHh0
+2TAfBgNVHSMEGDAWgBTvkUz1pcMw6C8I6tNxIqSSaHh02TANBgkqhkiG9w0BAQsF
+AAOCAgEAfj1U2iJdGlg+O1QnurrMyOMaauo++RLrVl89UM7g6kgmJs95Vn6RHJk/
+0KGRHCwPT5iVWVO90CLYiF2cN/z7ZMF4jIuaYAnq1fohX9B0ZedQxb8uuQsLrbWw
+F6YSjNRieOpWauwK0kDDPAUwPk2Ut59KA9N9J0u2/kTO+hkzGm2kQtHdzMjI1xZS
+g081lLMSVX3l4kLr5JyTCcBMWwerx20RoFAXlCOotQqSD7J6wWAsOMwaplv/8gzj
+qh8c3LigkyfeY+N/IZ865Z764BNqdeuWXGKRlI5nU7aJ+BIJy29SWwNyhlCVCNSN
+h4YVH5Uk2KRvms6knZtt0rJ2BobGVgjF6wnaNsIbW0G+YSrjcOa4pvi2WsS9Iff/
+ql+hbHY5ZtbqTFXhADObE5hjyW/QASAJN1LnDE8+zbz1X5YnpyACleAu6AdBBR8V
+btaw5BngDwKTACdyxYvRVB9dSsNAl35VpnzBMwQUAR1JIGkLGZOdblgi90AMRgwj
+Y/M50n92Uaf0yKHxDHYiI0ZSKS3io0EHVmmY0gUJvGnHWmHNj4FgFU2A3ZDifcRQ
+8ow7bkrHxuaAKzyBvBGAFhAn1/DNP3nMcyrDflOR1m749fPH0FFNjkulW+YZFzvW
+gQncItzujrnEj1PhZ7szuIgVRs/taTX/dQ1G885x4cVrhkIGuUE=
+-----END CERTIFICATE-----
+
+# Issuer: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed
+# Subject: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed
+# Label: "OISTE WISeKey Global Root GB CA"
+# Serial: 157768595616588414422159278966750757568
+# MD5 Fingerprint: a4:eb:b9:61:28:2e:b7:2f:98:b0:35:26:90:99:51:1d
+# SHA1 Fingerprint: 0f:f9:40:76:18:d3:d7:6a:4b:98:f0:a8:35:9e:0c:fd:27:ac:cc:ed
+# SHA256 Fingerprint: 6b:9c:08:e8:6e:b0:f7:67:cf:ad:65:cd:98:b6:21:49:e5:49:4a:67:f5:84:5e:7b:d1:ed:01:9f:27:b8:6b:d6
+-----BEGIN CERTIFICATE-----
+MIIDtTCCAp2gAwIBAgIQdrEgUnTwhYdGs/gjGvbCwDANBgkqhkiG9w0BAQsFADBt
+MQswCQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUg
+Rm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9i
+YWwgUm9vdCBHQiBDQTAeFw0xNDEyMDExNTAwMzJaFw0zOTEyMDExNTEwMzFaMG0x
+CzAJBgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBG
+b3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2Jh
+bCBSb290IEdCIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2Be3
+HEokKtaXscriHvt9OO+Y9bI5mE4nuBFde9IllIiCFSZqGzG7qFshISvYD06fWvGx
+WuR51jIjK+FTzJlFXHtPrby/h0oLS5daqPZI7H17Dc0hBt+eFf1Biki3IPShehtX
+1F1Q/7pn2COZH8g/497/b1t3sWtuuMlk9+HKQUYOKXHQuSP8yYFfTvdv37+ErXNk
+u7dCjmn21HYdfp2nuFeKUWdy19SouJVUQHMD9ur06/4oQnc/nSMbsrY9gBQHTC5P
+99UKFg29ZkM3fiNDecNAhvVMKdqOmq0NpQSHiB6F4+lT1ZvIiwNjeOvgGUpuuy9r
+M2RYk61pv48b74JIxwIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw
+AwEB/zAdBgNVHQ4EFgQUNQ/INmNe4qPs+TtmFc5RUuORmj0wEAYJKwYBBAGCNxUB
+BAMCAQAwDQYJKoZIhvcNAQELBQADggEBAEBM+4eymYGQfp3FsLAmzYh7KzKNbrgh
+cViXfa43FK8+5/ea4n32cZiZBKpDdHij40lhPnOMTZTg+XHEthYOU3gf1qKHLwI5
+gSk8rxWYITD+KJAAjNHhy/peyP34EEY7onhCkRd0VQreUGdNZtGn//3ZwLWoo4rO
+ZvUPQ82nK1d7Y0Zqqi5S2PTt4W2tKZB4SLrhI6qjiey1q5bAtEuiHZeeevJuQHHf
+aPFlTc58Bd9TZaml8LGXBHAVRgOY1NK/VLSgWH1Sb9pWJmLU2NuJMW8c8CLC02Ic
+Nc1MaRVUGpCY3useX8p3x8uOPUNpnJpY0CQ73xtAln41rYHHTnG6iBM=
+-----END CERTIFICATE-----
+
+# Issuer: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A.
+# Subject: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A.
+# Label: "SZAFIR ROOT CA2"
+# Serial: 357043034767186914217277344587386743377558296292
+# MD5 Fingerprint: 11:64:c1:89:b0:24:b1:8c:b1:07:7e:89:9e:51:9e:99
+# SHA1 Fingerprint: e2:52:fa:95:3f:ed:db:24:60:bd:6e:28:f3:9c:cc:cf:5e:b3:3f:de
+# SHA256 Fingerprint: a1:33:9d:33:28:1a:0b:56:e5:57:d3:d3:2b:1c:e7:f9:36:7e:b0:94:bd:5f:a7:2a:7e:50:04:c8:de:d7:ca:fe
+-----BEGIN CERTIFICATE-----
+MIIDcjCCAlqgAwIBAgIUPopdB+xV0jLVt+O2XwHrLdzk1uQwDQYJKoZIhvcNAQEL
+BQAwUTELMAkGA1UEBhMCUEwxKDAmBgNVBAoMH0tyYWpvd2EgSXpiYSBSb3psaWN6
+ZW5pb3dhIFMuQS4xGDAWBgNVBAMMD1NaQUZJUiBST09UIENBMjAeFw0xNTEwMTkw
+NzQzMzBaFw0zNTEwMTkwNzQzMzBaMFExCzAJBgNVBAYTAlBMMSgwJgYDVQQKDB9L
+cmFqb3dhIEl6YmEgUm96bGljemVuaW93YSBTLkEuMRgwFgYDVQQDDA9TWkFGSVIg
+Uk9PVCBDQTIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC3vD5QqEvN
+QLXOYeeWyrSh2gwisPq1e3YAd4wLz32ohswmUeQgPYUM1ljj5/QqGJ3a0a4m7utT
+3PSQ1hNKDJA8w/Ta0o4NkjrcsbH/ON7Dui1fgLkCvUqdGw+0w8LBZwPd3BucPbOw
+3gAeqDRHu5rr/gsUvTaE2g0gv/pby6kWIK05YO4vdbbnl5z5Pv1+TW9NL++IDWr6
+3fE9biCloBK0TXC5ztdyO4mTp4CEHCdJckm1/zuVnsHMyAHs6A6KCpbns6aH5db5
+BSsNl0BwPLqsdVqc1U2dAgrSS5tmS0YHF2Wtn2yIANwiieDhZNRnvDF5YTy7ykHN
+XGoAyDw4jlivAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD
+AgEGMB0GA1UdDgQWBBQuFqlKGLXLzPVvUPMjX/hd56zwyDANBgkqhkiG9w0BAQsF
+AAOCAQEAtXP4A9xZWx126aMqe5Aosk3AM0+qmrHUuOQn/6mWmc5G4G18TKI4pAZw
+8PRBEew/R40/cof5O/2kbytTAOD/OblqBw7rHRz2onKQy4I9EYKL0rufKq8h5mOG
+nXkZ7/e7DDWQw4rtTw/1zBLZpD67oPwglV9PJi8RI4NOdQcPv5vRtB3pEAT+ymCP
+oky4rc/hkA/NrgrHXXu3UNLUYfrVFdvXn4dRVOul4+vJhaAlIDf7js4MNIThPIGy
+d05DpYhfhmehPea0XGG2Ptv+tyjFogeutcrKjSoS75ftwjCkySp6+/NNIxuZMzSg
+LvWpCz/UXeHPhJ/iGcJfitYgHuNztw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority
+# Subject: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority
+# Label: "Certum Trusted Network CA 2"
+# Serial: 44979900017204383099463764357512596969
+# MD5 Fingerprint: 6d:46:9e:d9:25:6d:08:23:5b:5e:74:7d:1e:27:db:f2
+# SHA1 Fingerprint: d3:dd:48:3e:2b:bf:4c:05:e8:af:10:f5:fa:76:26:cf:d3:dc:30:92
+# SHA256 Fingerprint: b6:76:f2:ed:da:e8:77:5c:d3:6c:b0:f6:3c:d1:d4:60:39:61:f4:9e:62:65:ba:01:3a:2f:03:07:b6:d0:b8:04
+-----BEGIN CERTIFICATE-----
+MIIF0jCCA7qgAwIBAgIQIdbQSk8lD8kyN/yqXhKN6TANBgkqhkiG9w0BAQ0FADCB
+gDELMAkGA1UEBhMCUEwxIjAgBgNVBAoTGVVuaXpldG8gVGVjaG5vbG9naWVzIFMu
+QS4xJzAlBgNVBAsTHkNlcnR1bSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEkMCIG
+A1UEAxMbQ2VydHVtIFRydXN0ZWQgTmV0d29yayBDQSAyMCIYDzIwMTExMDA2MDgz
+OTU2WhgPMjA0NjEwMDYwODM5NTZaMIGAMQswCQYDVQQGEwJQTDEiMCAGA1UEChMZ
+VW5pemV0byBUZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRp
+ZmljYXRpb24gQXV0aG9yaXR5MSQwIgYDVQQDExtDZXJ0dW0gVHJ1c3RlZCBOZXR3
+b3JrIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC9+Xj45tWA
+DGSdhhuWZGc/IjoedQF97/tcZ4zJzFxrqZHmuULlIEub2pt7uZld2ZuAS9eEQCsn
+0+i6MLs+CRqnSZXvK0AkwpfHp+6bJe+oCgCXhVqqndwpyeI1B+twTUrWwbNWuKFB
+OJvR+zF/j+Bf4bE/D44WSWDXBo0Y+aomEKsq09DRZ40bRr5HMNUuctHFY9rnY3lE
+fktjJImGLjQ/KUxSiyqnwOKRKIm5wFv5HdnnJ63/mgKXwcZQkpsCLL2puTRZCr+E
+Sv/f/rOf69me4Jgj7KZrdxYq28ytOxykh9xGc14ZYmhFV+SQgkK7QtbwYeDBoz1m
+o130GO6IyY0XRSmZMnUCMe4pJshrAua1YkV/NxVaI2iJ1D7eTiew8EAMvE0Xy02i
+sx7QBlrd9pPPV3WZ9fqGGmd4s7+W/jTcvedSVuWz5XV710GRBdxdaeOVDUO5/IOW
+OZV7bIBaTxNyxtd9KXpEulKkKtVBRgkg/iKgtlswjbyJDNXXcPiHUv3a76xRLgez
+Tv7QCdpw75j6VuZt27VXS9zlLCUVyJ4ueE742pyehizKV/Ma5ciSixqClnrDvFAS
+adgOWkaLOusm+iPJtrCBvkIApPjW/jAux9JG9uWOdf3yzLnQh1vMBhBgu4M1t15n
+3kfsmUjxpKEV/q2MYo45VU85FrmxY53/twIDAQABo0IwQDAPBgNVHRMBAf8EBTAD
+AQH/MB0GA1UdDgQWBBS2oVQ5AsOgP46KvPrU+Bym0ToO/TAOBgNVHQ8BAf8EBAMC
+AQYwDQYJKoZIhvcNAQENBQADggIBAHGlDs7k6b8/ONWJWsQCYftMxRQXLYtPU2sQ
+F/xlhMcQSZDe28cmk4gmb3DWAl45oPePq5a1pRNcgRRtDoGCERuKTsZPpd1iHkTf
+CVn0W3cLN+mLIMb4Ck4uWBzrM9DPhmDJ2vuAL55MYIR4PSFk1vtBHxgP58l1cb29
+XN40hz5BsA72udY/CROWFC/emh1auVbONTqwX3BNXuMp8SMoclm2q8KMZiYcdywm
+djWLKKdpoPk79SPdhRB0yZADVpHnr7pH1BKXESLjokmUbOe3lEu6LaTaM4tMpkT/
+WjzGHWTYtTHkpjx6qFcL2+1hGsvxznN3Y6SHb0xRONbkX8eftoEq5IVIeVheO/jb
+AoJnwTnbw3RLPTYe+SmTiGhbqEQZIfCn6IENLOiTNrQ3ssqwGyZ6miUfmpqAnksq
+P/ujmv5zMnHCnsZy4YpoJ/HkD7TETKVhk/iXEAcqMCWpuchxuO9ozC1+9eB+D4Ko
+b7a6bINDd82Kkhehnlt4Fj1F4jNy3eFmypnTycUm/Q1oBEauttmbjL4ZvrHG8hnj
+XALKLNhvSgfZyTXaQHXyxKcZb55CEJh15pWLYLztxRLXis7VmFxWlgPF7ncGNf/P
+5O4/E2Hu29othfDNrp2yGAlFw5Khchf8R7agCyzxxN5DaAhqXzvwdmP7zAYspsbi
+DrW5viSP
+-----END CERTIFICATE-----
+
+# Issuer: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority
+# Subject: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority
+# Label: "Hellenic Academic and Research Institutions RootCA 2015"
+# Serial: 0
+# MD5 Fingerprint: ca:ff:e2:db:03:d9:cb:4b:e9:0f:ad:84:fd:7b:18:ce
+# SHA1 Fingerprint: 01:0c:06:95:a6:98:19:14:ff:bf:5f:c6:b0:b6:95:ea:29:e9:12:a6
+# SHA256 Fingerprint: a0:40:92:9a:02:ce:53:b4:ac:f4:f2:ff:c6:98:1c:e4:49:6f:75:5e:6d:45:fe:0b:2a:69:2b:cd:52:52:3f:36
+-----BEGIN CERTIFICATE-----
+MIIGCzCCA/OgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBpjELMAkGA1UEBhMCR1Ix
+DzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5k
+IFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxQDA+BgNVBAMT
+N0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgUm9v
+dENBIDIwMTUwHhcNMTUwNzA3MTAxMTIxWhcNNDAwNjMwMTAxMTIxWjCBpjELMAkG
+A1UEBhMCR1IxDzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNh
+ZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkx
+QDA+BgNVBAMTN0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1
+dGlvbnMgUm9vdENBIDIwMTUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC
+AQDC+Kk/G4n8PDwEXT2QNrCROnk8ZlrvbTkBSRq0t89/TSNTt5AA4xMqKKYx8ZEA
+4yjsriFBzh/a/X0SWwGDD7mwX5nh8hKDgE0GPt+sr+ehiGsxr/CL0BgzuNtFajT0
+AoAkKAoCFZVedioNmToUW/bLy1O8E00BiDeUJRtCvCLYjqOWXjrZMts+6PAQZe10
+4S+nfK8nNLspfZu2zwnI5dMK/IhlZXQK3HMcXM1AsRzUtoSMTFDPaI6oWa7CJ06C
+ojXdFPQf/7J31Ycvqm59JCfnxssm5uX+Zwdj2EUN3TpZZTlYepKZcj2chF6IIbjV
+9Cz82XBST3i4vTwri5WY9bPRaM8gFH5MXF/ni+X1NYEZN9cRCLdmvtNKzoNXADrD
+gfgXy5I2XdGj2HUb4Ysn6npIQf1FGQatJ5lOwXBH3bWfgVMS5bGMSF0xQxfjjMZ6
+Y5ZLKTBOhE5iGV48zpeQpX8B653g+IuJ3SWYPZK2fu/Z8VFRfS0myGlZYeCsargq
+NhEEelC9MoS+L9xy1dcdFkfkR2YgP/SWxa+OAXqlD3pk9Q0Yh9muiNX6hME6wGko
+LfINaFGq46V3xqSQDqE3izEjR8EJCOtu93ib14L8hCCZSRm2Ekax+0VVFqmjZayc
+Bw/qa9wfLgZy7IaIEuQt218FL+TwA9MmM+eAws1CoRc0CwIDAQABo0IwQDAPBgNV
+HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUcRVnyMjJvXVd
+ctA4GGqd83EkVAswDQYJKoZIhvcNAQELBQADggIBAHW7bVRLqhBYRjTyYtcWNl0I
+XtVsyIe9tC5G8jH4fOpCtZMWVdyhDBKg2mF+D1hYc2Ryx+hFjtyp8iY/xnmMsVMI
+M4GwVhO+5lFc2JsKT0ucVlMC6U/2DWDqTUJV6HwbISHTGzrMd/K4kPFox/la/vot
+9L/J9UUbzjgQKjeKeaO04wlshYaT/4mWJ3iBj2fjRnRUjtkNaeJK9E10A/+yd+2V
+Z5fkscWrv2oj6NSU4kQoYsRL4vDY4ilrGnB+JGGTe08DMiUNRSQrlrRGar9KC/ea
+j8GsGsVn82800vpzY4zvFrCopEYq+OsS7HK07/grfoxSwIuEVPkvPuNVqNxmsdnh
+X9izjFk0WaSrT2y7HxjbdavYy5LNlDhhDgcGH0tGEPEVvo2FXDtKK4F5D7Rpn0lQ
+l033DlZdwJVqwjbDG2jJ9SrcR5q+ss7FJej6A7na+RZukYT1HCjI/CbM1xyQVqdf
+bzoEvM14iQuODy+jqk+iGxI9FghAD/FGTNeqewjBCvVtJ94Cj8rDtSvK6evIIVM4
+pcw72Hc3MKJP2W/R8kCtQXoXxdZKNYm3QdV8hn9VTYNKpXMgwDqvkPGaJI7ZjnHK
+e7iG2rKPmT4dEw0SEe7Uq/DpFXYC5ODfqiAeW2GFZECpkJcNrVPSWh2HagCXZWK0
+vm9qp/UsQu0yrbYhnr68
+-----END CERTIFICATE-----
+
+# Issuer: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority
+# Subject: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority
+# Label: "Hellenic Academic and Research Institutions ECC RootCA 2015"
+# Serial: 0
+# MD5 Fingerprint: 81:e5:b4:17:eb:c2:f5:e1:4b:0d:41:7b:49:92:fe:ef
+# SHA1 Fingerprint: 9f:f1:71:8d:92:d5:9a:f3:7d:74:97:b4:bc:6f:84:68:0b:ba:b6:66
+# SHA256 Fingerprint: 44:b5:45:aa:8a:25:e6:5a:73:ca:15:dc:27:fc:36:d2:4c:1c:b9:95:3a:06:65:39:b1:15:82:dc:48:7b:48:33
+-----BEGIN CERTIFICATE-----
+MIICwzCCAkqgAwIBAgIBADAKBggqhkjOPQQDAjCBqjELMAkGA1UEBhMCR1IxDzAN
+BgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl
+c2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxRDBCBgNVBAMTO0hl
+bGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgRUNDIFJv
+b3RDQSAyMDE1MB4XDTE1MDcwNzEwMzcxMloXDTQwMDYzMDEwMzcxMlowgaoxCzAJ
+BgNVBAYTAkdSMQ8wDQYDVQQHEwZBdGhlbnMxRDBCBgNVBAoTO0hlbGxlbmljIEFj
+YWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9yaXR5
+MUQwQgYDVQQDEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0
+dXRpb25zIEVDQyBSb290Q0EgMjAxNTB2MBAGByqGSM49AgEGBSuBBAAiA2IABJKg
+QehLgoRc4vgxEZmGZE4JJS+dQS8KrjVPdJWyUWRrjWvmP3CV8AVER6ZyOFB2lQJa
+jq4onvktTpnvLEhvTCUp6NFxW98dwXU3tNf6e3pCnGoKVlp8aQuqgAkkbH7BRqNC
+MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFLQi
+C4KZJAEOnLvkDv2/+5cgk5kqMAoGCCqGSM49BAMCA2cAMGQCMGfOFmI4oqxiRaep
+lSTAGiecMjvAwNW6qef4BENThe5SId6d9SWDPp5YSy/XZxMOIQIwBeF1Ad5o7Sof
+TUwJCA3sS61kFyjndc5FZXIhF8siQQ6ME5g4mlRtm8rifOoCWCKR
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certplus Root CA G1 O=Certplus
+# Subject: CN=Certplus Root CA G1 O=Certplus
+# Label: "Certplus Root CA G1"
+# Serial: 1491911565779898356709731176965615564637713
+# MD5 Fingerprint: 7f:09:9c:f7:d9:b9:5c:69:69:56:d5:37:3e:14:0d:42
+# SHA1 Fingerprint: 22:fd:d0:b7:fd:a2:4e:0d:ac:49:2c:a0:ac:a6:7b:6a:1f:e3:f7:66
+# SHA256 Fingerprint: 15:2a:40:2b:fc:df:2c:d5:48:05:4d:22:75:b3:9c:7f:ca:3e:c0:97:80:78:b0:f0:ea:76:e5:61:a6:c7:43:3e
+-----BEGIN CERTIFICATE-----
+MIIFazCCA1OgAwIBAgISESBVg+QtPlRWhS2DN7cs3EYRMA0GCSqGSIb3DQEBDQUA
+MD4xCzAJBgNVBAYTAkZSMREwDwYDVQQKDAhDZXJ0cGx1czEcMBoGA1UEAwwTQ2Vy
+dHBsdXMgUm9vdCBDQSBHMTAeFw0xNDA1MjYwMDAwMDBaFw0zODAxMTUwMDAwMDBa
+MD4xCzAJBgNVBAYTAkZSMREwDwYDVQQKDAhDZXJ0cGx1czEcMBoGA1UEAwwTQ2Vy
+dHBsdXMgUm9vdCBDQSBHMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB
+ANpQh7bauKk+nWT6VjOaVj0W5QOVsjQcmm1iBdTYj+eJZJ+622SLZOZ5KmHNr49a
+iZFluVj8tANfkT8tEBXgfs+8/H9DZ6itXjYj2JizTfNDnjl8KvzsiNWI7nC9hRYt
+6kuJPKNxQv4c/dMcLRC4hlTqQ7jbxofaqK6AJc96Jh2qkbBIb6613p7Y1/oA/caP
+0FG7Yn2ksYyy/yARujVjBYZHYEMzkPZHogNPlk2dT8Hq6pyi/jQu3rfKG3akt62f
+6ajUeD94/vI4CTYd0hYCyOwqaK/1jpTvLRN6HkJKHRUxrgwEV/xhc/MxVoYxgKDE
+EW4wduOU8F8ExKyHcomYxZ3MVwia9Az8fXoFOvpHgDm2z4QTd28n6v+WZxcIbekN
+1iNQMLAVdBM+5S//Ds3EC0pd8NgAM0lm66EYfFkuPSi5YXHLtaW6uOrc4nBvCGrc
+h2c0798wct3zyT8j/zXhviEpIDCB5BmlIOklynMxdCm+4kLV87ImZsdo/Rmz5yCT
+mehd4F6H50boJZwKKSTUzViGUkAksnsPmBIgJPaQbEfIDbsYIC7Z/fyL8inqh3SV
+4EJQeIQEQWGw9CEjjy3LKCHyamz0GqbFFLQ3ZU+V/YDI+HLlJWvEYLF7bY5KinPO
+WftwenMGE9nTdDckQQoRb5fc5+R+ob0V8rqHDz1oihYHAgMBAAGjYzBhMA4GA1Ud
+DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSowcCbkahDFXxd
+Bie0KlHYlwuBsTAfBgNVHSMEGDAWgBSowcCbkahDFXxdBie0KlHYlwuBsTANBgkq
+hkiG9w0BAQ0FAAOCAgEAnFZvAX7RvUz1isbwJh/k4DgYzDLDKTudQSk0YcbX8ACh
+66Ryj5QXvBMsdbRX7gp8CXrc1cqh0DQT+Hern+X+2B50ioUHj3/MeXrKls3N/U/7
+/SMNkPX0XtPGYX2eEeAC7gkE2Qfdpoq3DIMku4NQkv5gdRE+2J2winq14J2by5BS
+S7CTKtQ+FjPlnsZlFT5kOwQ/2wyPX1wdaR+v8+khjPPvl/aatxm2hHSco1S1cE5j
+2FddUyGbQJJD+tZ3VTNPZNX70Cxqjm0lpu+F6ALEUz65noe8zDUa3qHpimOHZR4R
+Kttjd5cUvpoUmRGywO6wT/gUITJDT5+rosuoD6o7BlXGEilXCNQ314cnrUlZp5Gr
+RHpejXDbl85IULFzk/bwg2D5zfHhMf1bfHEhYxQUqq/F3pN+aLHsIqKqkHWetUNy
+6mSjhEv9DKgma3GX7lZjZuhCVPnHHd/Qj1vfyDBviP4NxDMcU6ij/UgQ8uQKTuEV
+V/xuZDDCVRHc6qnNSlSsKWNEz0pAoNZoWRsz+e86i9sgktxChL8Bq4fA1SCC28a5
+g4VCXA9DO2pJNdWY9BW/+mGBDAkgGNLQFwzLSABQ6XaCjGTXOqAHVcweMcDvOrRl
+++O/QmueD6i9a5jc2NvLi6Td11n0bt3+qsOR0C5CB8AMTVPNJLFMWx5R9N/pkvo=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certplus Root CA G2 O=Certplus
+# Subject: CN=Certplus Root CA G2 O=Certplus
+# Label: "Certplus Root CA G2"
+# Serial: 1492087096131536844209563509228951875861589
+# MD5 Fingerprint: a7:ee:c4:78:2d:1b:ee:2d:b9:29:ce:d6:a7:96:32:31
+# SHA1 Fingerprint: 4f:65:8e:1f:e9:06:d8:28:02:e9:54:47:41:c9:54:25:5d:69:cc:1a
+# SHA256 Fingerprint: 6c:c0:50:41:e6:44:5e:74:69:6c:4c:fb:c9:f8:0f:54:3b:7e:ab:bb:44:b4:ce:6f:78:7c:6a:99:71:c4:2f:17
+-----BEGIN CERTIFICATE-----
+MIICHDCCAaKgAwIBAgISESDZkc6uo+jF5//pAq/Pc7xVMAoGCCqGSM49BAMDMD4x
+CzAJBgNVBAYTAkZSMREwDwYDVQQKDAhDZXJ0cGx1czEcMBoGA1UEAwwTQ2VydHBs
+dXMgUm9vdCBDQSBHMjAeFw0xNDA1MjYwMDAwMDBaFw0zODAxMTUwMDAwMDBaMD4x
+CzAJBgNVBAYTAkZSMREwDwYDVQQKDAhDZXJ0cGx1czEcMBoGA1UEAwwTQ2VydHBs
+dXMgUm9vdCBDQSBHMjB2MBAGByqGSM49AgEGBSuBBAAiA2IABM0PW1aC3/BFGtat
+93nwHcmsltaeTpwftEIRyoa/bfuFo8XlGVzX7qY/aWfYeOKmycTbLXku54uNAm8x
+Ik0G42ByRZ0OQneezs/lf4WbGOT8zC5y0xaTTsqZY1yhBSpsBqNjMGEwDgYDVR0P
+AQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNqDYwJ5jtpMxjwj
+FNiPwyCrKGBZMB8GA1UdIwQYMBaAFNqDYwJ5jtpMxjwjFNiPwyCrKGBZMAoGCCqG
+SM49BAMDA2gAMGUCMHD+sAvZ94OX7PNVHdTcswYO/jOYnYs5kGuUIe22113WTNch
+p+e/IQ8rzfcq3IUHnQIxAIYUFuXcsGXCwI4Un78kFmjlvPl5adytRSv3tjFzzAal
+U5ORGpOucGpnutee5WEaXw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=OpenTrust Root CA G1 O=OpenTrust
+# Subject: CN=OpenTrust Root CA G1 O=OpenTrust
+# Label: "OpenTrust Root CA G1"
+# Serial: 1492036577811947013770400127034825178844775
+# MD5 Fingerprint: 76:00:cc:81:29:cd:55:5e:88:6a:7a:2e:f7:4d:39:da
+# SHA1 Fingerprint: 79:91:e8:34:f7:e2:ee:dd:08:95:01:52:e9:55:2d:14:e9:58:d5:7e
+# SHA256 Fingerprint: 56:c7:71:28:d9:8c:18:d9:1b:4c:fd:ff:bc:25:ee:91:03:d4:75:8e:a2:ab:ad:82:6a:90:f3:45:7d:46:0e:b4
+-----BEGIN CERTIFICATE-----
+MIIFbzCCA1egAwIBAgISESCzkFU5fX82bWTCp59rY45nMA0GCSqGSIb3DQEBCwUA
+MEAxCzAJBgNVBAYTAkZSMRIwEAYDVQQKDAlPcGVuVHJ1c3QxHTAbBgNVBAMMFE9w
+ZW5UcnVzdCBSb290IENBIEcxMB4XDTE0MDUyNjA4NDU1MFoXDTM4MDExNTAwMDAw
+MFowQDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCU9wZW5UcnVzdDEdMBsGA1UEAwwU
+T3BlblRydXN0IFJvb3QgQ0EgRzEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK
+AoICAQD4eUbalsUwXopxAy1wpLuwxQjczeY1wICkES3d5oeuXT2R0odsN7faYp6b
+wiTXj/HbpqbfRm9RpnHLPhsxZ2L3EVs0J9V5ToybWL0iEA1cJwzdMOWo010hOHQX
+/uMftk87ay3bfWAfjH1MBcLrARYVmBSO0ZB3Ij/swjm4eTrwSSTilZHcYTSSjFR0
+77F9jAHiOH3BX2pfJLKOYheteSCtqx234LSWSE9mQxAGFiQD4eCcjsZGT44ameGP
+uY4zbGneWK2gDqdkVBFpRGZPTBKnjix9xNRbxQA0MMHZmf4yzgeEtE7NCv82TWLx
+p2NX5Ntqp66/K7nJ5rInieV+mhxNaMbBGN4zK1FGSxyO9z0M+Yo0FMT7MzUj8czx
+Kselu7Cizv5Ta01BG2Yospb6p64KTrk5M0ScdMGTHPjgniQlQ/GbI4Kq3ywgsNw2
+TgOzfALU5nsaqocTvz6hdLubDuHAk5/XpGbKuxs74zD0M1mKB3IDVedzagMxbm+W
+G+Oin6+Sx+31QrclTDsTBM8clq8cIqPQqwWyTBIjUtz9GVsnnB47ev1CI9sjgBPw
+vFEVVJSmdz7QdFG9URQIOTfLHzSpMJ1ShC5VkLG631UAC9hWLbFJSXKAqWLXwPYY
+EQRVzXR7z2FwefR7LFxckvzluFqrTJOVoSfupb7PcSNCupt2LQIDAQABo2MwYTAO
+BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUl0YhVyE1
+2jZVx/PxN3DlCPaTKbYwHwYDVR0jBBgwFoAUl0YhVyE12jZVx/PxN3DlCPaTKbYw
+DQYJKoZIhvcNAQELBQADggIBAB3dAmB84DWn5ph76kTOZ0BP8pNuZtQ5iSas000E
+PLuHIT839HEl2ku6q5aCgZG27dmxpGWX4m9kWaSW7mDKHyP7Rbr/jyTwyqkxf3kf
+gLMtMrpkZ2CvuVnN35pJ06iCsfmYlIrM4LvgBBuZYLFGZdwIorJGnkSI6pN+VxbS
+FXJfLkur1J1juONI5f6ELlgKn0Md/rcYkoZDSw6cMoYsYPXpSOqV7XAp8dUv/TW0
+V8/bhUiZucJvbI/NeJWsZCj9VrDDb8O+WVLhX4SPgPL0DTatdrOjteFkdjpY3H1P
+XlZs5VVZV6Xf8YpmMIzUUmI4d7S+KNfKNsSbBfD4Fdvb8e80nR14SohWZ25g/4/I
+i+GOvUKpMwpZQhISKvqxnUOOBZuZ2mKtVzazHbYNeS2WuOvyDEsMpZTGMKcmGS3t
+TAZQMPH9WD25SxdfGbRqhFS0OE85og2WaMMolP3tLR9Ka0OWLpABEPs4poEL0L91
+09S5zvE/bw4cHjdx5RiHdRk/ULlepEU0rbDK5uUTdg8xFKmOLZTW1YVNcxVPS/Ky
+Pu1svf0OnWZzsD2097+o4BGkxK51CUpjAEggpsadCwmKtODmzj7HPiY46SvepghJ
+AwSQiumPv+i2tCqjI40cHLI5kqiPAlxAOXXUc0ECd97N4EOH1uS6SsNsEn/+KuYj
+1oxx
+-----END CERTIFICATE-----
+
+# Issuer: CN=OpenTrust Root CA G2 O=OpenTrust
+# Subject: CN=OpenTrust Root CA G2 O=OpenTrust
+# Label: "OpenTrust Root CA G2"
+# Serial: 1492012448042702096986875987676935573415441
+# MD5 Fingerprint: 57:24:b6:59:24:6b:ae:c8:fe:1c:0c:20:f2:c0:4e:eb
+# SHA1 Fingerprint: 79:5f:88:60:c5:ab:7c:3d:92:e6:cb:f4:8d:e1:45:cd:11:ef:60:0b
+# SHA256 Fingerprint: 27:99:58:29:fe:6a:75:15:c1:bf:e8:48:f9:c4:76:1d:b1:6c:22:59:29:25:7b:f4:0d:08:94:f2:9e:a8:ba:f2
+-----BEGIN CERTIFICATE-----
+MIIFbzCCA1egAwIBAgISESChaRu/vbm9UpaPI+hIvyYRMA0GCSqGSIb3DQEBDQUA
+MEAxCzAJBgNVBAYTAkZSMRIwEAYDVQQKDAlPcGVuVHJ1c3QxHTAbBgNVBAMMFE9w
+ZW5UcnVzdCBSb290IENBIEcyMB4XDTE0MDUyNjAwMDAwMFoXDTM4MDExNTAwMDAw
+MFowQDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCU9wZW5UcnVzdDEdMBsGA1UEAwwU
+T3BlblRydXN0IFJvb3QgQ0EgRzIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK
+AoICAQDMtlelM5QQgTJT32F+D3Y5z1zCU3UdSXqWON2ic2rxb95eolq5cSG+Ntmh
+/LzubKh8NBpxGuga2F8ORAbtp+Dz0mEL4DKiltE48MLaARf85KxP6O6JHnSrT78e
+CbY2albz4e6WiWYkBuTNQjpK3eCasMSCRbP+yatcfD7J6xcvDH1urqWPyKwlCm/6
+1UWY0jUJ9gNDlP7ZvyCVeYCYitmJNbtRG6Q3ffyZO6v/v6wNj0OxmXsWEH4db0fE
+FY8ElggGQgT4hNYdvJGmQr5J1WqIP7wtUdGejeBSzFfdNTVY27SPJIjki9/ca1TS
+gSuyzpJLHB9G+h3Ykst2Z7UJmQnlrBcUVXDGPKBWCgOz3GIZ38i1MH/1PCZ1Eb3X
+G7OHngevZXHloM8apwkQHZOJZlvoPGIytbU6bumFAYueQ4xncyhZW+vj3CzMpSZy
+YhK05pyDRPZRpOLAeiRXyg6lPzq1O4vldu5w5pLeFlwoW5cZJ5L+epJUzpM5ChaH
+vGOz9bGTXOBut9Dq+WIyiET7vycotjCVXRIouZW+j1MY5aIYFuJWpLIsEPUdN6b4
+t/bQWVyJ98LVtZR00dX+G7bw5tYee9I8y6jj9RjzIR9u701oBnstXW5DiabA+aC/
+gh7PU3+06yzbXfZqfUAkBXKJOAGTy3HCOV0GEfZvePg3DTmEJwIDAQABo2MwYTAO
+BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUajn6QiL3
+5okATV59M4PLuG53hq8wHwYDVR0jBBgwFoAUajn6QiL35okATV59M4PLuG53hq8w
+DQYJKoZIhvcNAQENBQADggIBAJjLq0A85TMCl38th6aP1F5Kr7ge57tx+4BkJamz
+Gj5oXScmp7oq4fBXgwpkTx4idBvpkF/wrM//T2h6OKQQbA2xx6R3gBi2oihEdqc0
+nXGEL8pZ0keImUEiyTCYYW49qKgFbdEfwFFEVn8nNQLdXpgKQuswv42hm1GqO+qT
+RmTFAHneIWv2V6CG1wZy7HBGS4tz3aAhdT7cHcCP009zHIXZ/n9iyJVvttN7jLpT
+wm+bREx50B1ws9efAvSyB7DH5fitIw6mVskpEndI2S9G/Tvw/HRwkqWOOAgfZDC2
+t0v7NqwQjqBSM2OdAzVWxWm9xiNaJ5T2pBL4LTM8oValX9YZ6e18CL13zSdkzJTa
+TkZQh+D5wVOAHrut+0dSixv9ovneDiK3PTNZbNTe9ZUGMg1RGUFcPk8G97krgCf2
+o6p6fAbhQ8MTOWIaNr3gKC6UAuQpLmBVrkA9sHSSXvAgZJY/X0VdiLWK2gKgW0VU
+3jg9CcCoSmVGFvyqv1ROTVu+OEO3KMqLM6oaJbolXCkvW0pujOotnCr2BXbgd5eA
+iN1nE28daCSLT7d0geX0YJ96Vdc+N9oWaz53rK4YcJUIeSkDiv7BO7M/Gg+kO14f
+WKGVyasvc0rQLW6aWQ9VGHgtPFGml4vmu7JwqkwR3v98KzfUetF3NI/n+UL3PIEM
+S1IK
+-----END CERTIFICATE-----
+
+# Issuer: CN=OpenTrust Root CA G3 O=OpenTrust
+# Subject: CN=OpenTrust Root CA G3 O=OpenTrust
+# Label: "OpenTrust Root CA G3"
+# Serial: 1492104908271485653071219941864171170455615
+# MD5 Fingerprint: 21:37:b4:17:16:92:7b:67:46:70:a9:96:d7:a8:13:24
+# SHA1 Fingerprint: 6e:26:64:f3:56:bf:34:55:bf:d1:93:3f:7c:01:de:d8:13:da:8a:a6
+# SHA256 Fingerprint: b7:c3:62:31:70:6e:81:07:8c:36:7c:b8:96:19:8f:1e:32:08:dd:92:69:49:dd:8f:57:09:a4:10:f7:5b:62:92
+-----BEGIN CERTIFICATE-----
+MIICITCCAaagAwIBAgISESDm+Ez8JLC+BUCs2oMbNGA/MAoGCCqGSM49BAMDMEAx
+CzAJBgNVBAYTAkZSMRIwEAYDVQQKDAlPcGVuVHJ1c3QxHTAbBgNVBAMMFE9wZW5U
+cnVzdCBSb290IENBIEczMB4XDTE0MDUyNjAwMDAwMFoXDTM4MDExNTAwMDAwMFow
+QDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCU9wZW5UcnVzdDEdMBsGA1UEAwwUT3Bl
+blRydXN0IFJvb3QgQ0EgRzMwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAARK7liuTcpm
+3gY6oxH84Bjwbhy6LTAMidnW7ptzg6kjFYwvWYpa3RTqnVkrQ7cG7DK2uu5Bta1d
+oYXM6h0UZqNnfkbilPPntlahFVmhTzeXuSIevRHr9LIfXsMUmuXZl5mjYzBhMA4G
+A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRHd8MUi2I5
+DMlv4VBN0BBY3JWIbTAfBgNVHSMEGDAWgBRHd8MUi2I5DMlv4VBN0BBY3JWIbTAK
+BggqhkjOPQQDAwNpADBmAjEAj6jcnboMBBf6Fek9LykBl7+BFjNAk2z8+e2AcG+q
+j9uEwov1NcoG3GRvaBbhj5G5AjEA2Euly8LQCGzpGPta3U1fJAuwACEl74+nBCZx
+4nxp5V2a+EEfOzmTk51V6s2N8fvB
+-----END CERTIFICATE-----
+
+# Issuer: CN=ISRG Root X1 O=Internet Security Research Group
+# Subject: CN=ISRG Root X1 O=Internet Security Research Group
+# Label: "ISRG Root X1"
+# Serial: 172886928669790476064670243504169061120
+# MD5 Fingerprint: 0c:d2:f9:e0:da:17:73:e9:ed:86:4d:a5:e3:70:e7:4e
+# SHA1 Fingerprint: ca:bd:2a:79:a1:07:6a:31:f2:1d:25:36:35:cb:03:9d:43:29:a5:e8
+# SHA256 Fingerprint: 96:bc:ec:06:26:49:76:f3:74:60:77:9a:cf:28:c5:a7:cf:e8:a3:c0:aa:e1:1a:8f:fc:ee:05:c0:bd:df:08:c6
+-----BEGIN CERTIFICATE-----
+MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
+TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
+cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
+WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
+ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
+MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
+h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
+0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
+A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
+T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
+B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
+B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
+KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
+OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
+jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
+qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
+rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
+hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
+ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
+3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
+NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
+ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
+TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
+jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
+oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
+4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
+mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
+emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
+-----END CERTIFICATE-----
+
+# Issuer: O=FNMT-RCM OU=AC RAIZ FNMT-RCM
+# Subject: O=FNMT-RCM OU=AC RAIZ FNMT-RCM
+# Label: "AC RAIZ FNMT-RCM"
+# Serial: 485876308206448804701554682760554759
+# MD5 Fingerprint: e2:09:04:b4:d3:bd:d1:a0:14:fd:1a:d2:47:c4:57:1d
+# SHA1 Fingerprint: ec:50:35:07:b2:15:c4:95:62:19:e2:a8:9a:5b:42:99:2c:4c:2c:20
+# SHA256 Fingerprint: eb:c5:57:0c:29:01:8c:4d:67:b1:aa:12:7b:af:12:f7:03:b4:61:1e:bc:17:b7:da:b5:57:38:94:17:9b:93:fa
+-----BEGIN CERTIFICATE-----
+MIIFgzCCA2ugAwIBAgIPXZONMGc2yAYdGsdUhGkHMA0GCSqGSIb3DQEBCwUAMDsx
+CzAJBgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJ
+WiBGTk1ULVJDTTAeFw0wODEwMjkxNTU5NTZaFw0zMDAxMDEwMDAwMDBaMDsxCzAJ
+BgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJWiBG
+Tk1ULVJDTTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALpxgHpMhm5/
+yBNtwMZ9HACXjywMI7sQmkCpGreHiPibVmr75nuOi5KOpyVdWRHbNi63URcfqQgf
+BBckWKo3Shjf5TnUV/3XwSyRAZHiItQDwFj8d0fsjz50Q7qsNI1NOHZnjrDIbzAz
+WHFctPVrbtQBULgTfmxKo0nRIBnuvMApGGWn3v7v3QqQIecaZ5JCEJhfTzC8PhxF
+tBDXaEAUwED653cXeuYLj2VbPNmaUtu1vZ5Gzz3rkQUCwJaydkxNEJY7kvqcfw+Z
+374jNUUeAlz+taibmSXaXvMiwzn15Cou08YfxGyqxRxqAQVKL9LFwag0Jl1mpdIC
+IfkYtwb1TplvqKtMUejPUBjFd8g5CSxJkjKZqLsXF3mwWsXmo8RZZUc1g16p6DUL
+mbvkzSDGm0oGObVo/CK67lWMK07q87Hj/LaZmtVC+nFNCM+HHmpxffnTtOmlcYF7
+wk5HlqX2doWjKI/pgG6BU6VtX7hI+cL5NqYuSf+4lsKMB7ObiFj86xsc3i1w4peS
+MKGJ47xVqCfWS+2QrYv6YyVZLag13cqXM7zlzced0ezvXg5KkAYmY6252TUtB7p2
+ZSysV4999AeU14ECll2jB0nVetBX+RvnU0Z1qrB5QstocQjpYL05ac70r8NWQMet
+UqIJ5G+GR4of6ygnXYMgrwTJbFaai0b1AgMBAAGjgYMwgYAwDwYDVR0TAQH/BAUw
+AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFPd9xf3E6Jobd2Sn9R2gzL+H
+YJptMD4GA1UdIAQ3MDUwMwYEVR0gADArMCkGCCsGAQUFBwIBFh1odHRwOi8vd3d3
+LmNlcnQuZm5tdC5lcy9kcGNzLzANBgkqhkiG9w0BAQsFAAOCAgEAB5BK3/MjTvDD
+nFFlm5wioooMhfNzKWtN/gHiqQxjAb8EZ6WdmF/9ARP67Jpi6Yb+tmLSbkyU+8B1
+RXxlDPiyN8+sD8+Nb/kZ94/sHvJwnvDKuO+3/3Y3dlv2bojzr2IyIpMNOmqOFGYM
+LVN0V2Ue1bLdI4E7pWYjJ2cJj+F3qkPNZVEI7VFY/uY5+ctHhKQV8Xa7pO6kO8Rf
+77IzlhEYt8llvhjho6Tc+hj507wTmzl6NLrTQfv6MooqtyuGC2mDOL7Nii4LcK2N
+JpLuHvUBKwrZ1pebbuCoGRw6IYsMHkCtA+fdZn71uSANA+iW+YJF1DngoABd15jm
+fZ5nc8OaKveri6E6FO80vFIOiZiaBECEHX5FaZNXzuvO+FB8TxxuBEOb+dY7Ixjp
+6o7RTUaN8Tvkasq6+yO3m/qZASlaWFot4/nUbQ4mrcFuNLwy+AwF+mWj2zs3gyLp
+1txyM/1d8iC9djwj2ij3+RvrWWTV3F9yfiD8zYm1kGdNYno/Tq0dwzn+evQoFt9B
+9kiABdcPUXmsEKvU7ANm5mqwujGSQkBqvjrTcuFqN1W8rB2Vt2lh8kORdOag0wok
+RqEIr9baRRmW1FMdW4R58MD3R++Lj8UGrp1MYp3/RgT408m2ECVAdf4WqslKYIYv
+uu8wd+RU4riEmViAqhOLUTpPSPaLtrM=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Amazon Root CA 1 O=Amazon
+# Subject: CN=Amazon Root CA 1 O=Amazon
+# Label: "Amazon Root CA 1"
+# Serial: 143266978916655856878034712317230054538369994
+# MD5 Fingerprint: 43:c6:bf:ae:ec:fe:ad:2f:18:c6:88:68:30:fc:c8:e6
+# SHA1 Fingerprint: 8d:a7:f9:65:ec:5e:fc:37:91:0f:1c:6e:59:fd:c1:cc:6a:6e:de:16
+# SHA256 Fingerprint: 8e:cd:e6:88:4f:3d:87:b1:12:5b:a3:1a:c3:fc:b1:3d:70:16:de:7f:57:cc:90:4f:e1:cb:97:c6:ae:98:19:6e
+-----BEGIN CERTIFICATE-----
+MIIDQTCCAimgAwIBAgITBmyfz5m/jAo54vB4ikPmljZbyjANBgkqhkiG9w0BAQsF
+ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6
+b24gUm9vdCBDQSAxMB4XDTE1MDUyNjAwMDAwMFoXDTM4MDExNzAwMDAwMFowOTEL
+MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv
+b3QgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALJ4gHHKeNXj
+ca9HgFB0fW7Y14h29Jlo91ghYPl0hAEvrAIthtOgQ3pOsqTQNroBvo3bSMgHFzZM
+9O6II8c+6zf1tRn4SWiw3te5djgdYZ6k/oI2peVKVuRF4fn9tBb6dNqcmzU5L/qw
+IFAGbHrQgLKm+a/sRxmPUDgH3KKHOVj4utWp+UhnMJbulHheb4mjUcAwhmahRWa6
+VOujw5H5SNz/0egwLX0tdHA114gk957EWW67c4cX8jJGKLhD+rcdqsq08p8kDi1L
+93FcXmn/6pUCyziKrlA4b9v7LWIbxcceVOF34GfID5yHI9Y/QCB/IIDEgEw+OyQm
+jgSubJrIqg0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
+AYYwHQYDVR0OBBYEFIQYzIU07LwMlJQuCFmcx7IQTgoIMA0GCSqGSIb3DQEBCwUA
+A4IBAQCY8jdaQZChGsV2USggNiMOruYou6r4lK5IpDB/G/wkjUu0yKGX9rbxenDI
+U5PMCCjjmCXPI6T53iHTfIUJrU6adTrCC2qJeHZERxhlbI1Bjjt/msv0tadQ1wUs
+N+gDS63pYaACbvXy8MWy7Vu33PqUXHeeE6V/Uq2V8viTO96LXFvKWlJbYK8U90vv
+o/ufQJVtMVT8QtPHRh8jrdkPSHCa2XV4cdFyQzR1bldZwgJcJmApzyMZFo6IQ6XU
+5MsI+yMRQ+hDKXJioaldXgjUkK642M4UwtBV8ob2xJNDd2ZhwLnoQdeXeGADbkpy
+rqXRfboQnoZsG4q5WTP468SQvvG5
+-----END CERTIFICATE-----
+
+# Issuer: CN=Amazon Root CA 2 O=Amazon
+# Subject: CN=Amazon Root CA 2 O=Amazon
+# Label: "Amazon Root CA 2"
+# Serial: 143266982885963551818349160658925006970653239
+# MD5 Fingerprint: c8:e5:8d:ce:a8:42:e2:7a:c0:2a:5c:7c:9e:26:bf:66
+# SHA1 Fingerprint: 5a:8c:ef:45:d7:a6:98:59:76:7a:8c:8b:44:96:b5:78:cf:47:4b:1a
+# SHA256 Fingerprint: 1b:a5:b2:aa:8c:65:40:1a:82:96:01:18:f8:0b:ec:4f:62:30:4d:83:ce:c4:71:3a:19:c3:9c:01:1e:a4:6d:b4
+-----BEGIN CERTIFICATE-----
+MIIFQTCCAymgAwIBAgITBmyf0pY1hp8KD+WGePhbJruKNzANBgkqhkiG9w0BAQwF
+ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6
+b24gUm9vdCBDQSAyMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTEL
+MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv
+b3QgQ0EgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK2Wny2cSkxK
+gXlRmeyKy2tgURO8TW0G/LAIjd0ZEGrHJgw12MBvIITplLGbhQPDW9tK6Mj4kHbZ
+W0/jTOgGNk3Mmqw9DJArktQGGWCsN0R5hYGCrVo34A3MnaZMUnbqQ523BNFQ9lXg
+1dKmSYXpN+nKfq5clU1Imj+uIFptiJXZNLhSGkOQsL9sBbm2eLfq0OQ6PBJTYv9K
+8nu+NQWpEjTj82R0Yiw9AElaKP4yRLuH3WUnAnE72kr3H9rN9yFVkE8P7K6C4Z9r
+2UXTu/Bfh+08LDmG2j/e7HJV63mjrdvdfLC6HM783k81ds8P+HgfajZRRidhW+me
+z/CiVX18JYpvL7TFz4QuK/0NURBs+18bvBt+xa47mAExkv8LV/SasrlX6avvDXbR
+8O70zoan4G7ptGmh32n2M8ZpLpcTnqWHsFcQgTfJU7O7f/aS0ZzQGPSSbtqDT6Zj
+mUyl+17vIWR6IF9sZIUVyzfpYgwLKhbcAS4y2j5L9Z469hdAlO+ekQiG+r5jqFoz
+7Mt0Q5X5bGlSNscpb/xVA1wf+5+9R+vnSUeVC06JIglJ4PVhHvG/LopyboBZ/1c6
++XUyo05f7O0oYtlNc/LMgRdg7c3r3NunysV+Ar3yVAhU/bQtCSwXVEqY0VThUWcI
+0u1ufm8/0i2BWSlmy5A5lREedCf+3euvAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMB
+Af8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSwDPBMMPQFWAJI/TPlUq9LhONm
+UjANBgkqhkiG9w0BAQwFAAOCAgEAqqiAjw54o+Ci1M3m9Zh6O+oAA7CXDpO8Wqj2
+LIxyh6mx/H9z/WNxeKWHWc8w4Q0QshNabYL1auaAn6AFC2jkR2vHat+2/XcycuUY
++gn0oJMsXdKMdYV2ZZAMA3m3MSNjrXiDCYZohMr/+c8mmpJ5581LxedhpxfL86kS
+k5Nrp+gvU5LEYFiwzAJRGFuFjWJZY7attN6a+yb3ACfAXVU3dJnJUH/jWS5E4ywl
+7uxMMne0nxrpS10gxdr9HIcWxkPo1LsmmkVwXqkLN1PiRnsn/eBG8om3zEK2yygm
+btmlyTrIQRNg91CMFa6ybRoVGld45pIq2WWQgj9sAq+uEjonljYE1x2igGOpm/Hl
+urR8FLBOybEfdF849lHqm/osohHUqS0nGkWxr7JOcQ3AWEbWaQbLU8uz/mtBzUF+
+fUwPfHJ5elnNXkoOrJupmHN5fLT0zLm4BwyydFy4x2+IoZCn9Kr5v2c69BoVYh63
+n749sSmvZ6ES8lgQGVMDMBu4Gon2nL2XA46jCfMdiyHxtN/kHNGfZQIG6lzWE7OE
+76KlXIx3KadowGuuQNKotOrN8I1LOJwZmhsoVLiJkO/KdYE+HvJkJMcYr07/R54H
+9jVlpNMKVv/1F2Rs76giJUmTtt8AF9pYfl3uxRuw0dFfIRDH+fO6AgonB8Xx1sfT
+4PsJYGw=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Amazon Root CA 3 O=Amazon
+# Subject: CN=Amazon Root CA 3 O=Amazon
+# Label: "Amazon Root CA 3"
+# Serial: 143266986699090766294700635381230934788665930
+# MD5 Fingerprint: a0:d4:ef:0b:f7:b5:d8:49:95:2a:ec:f5:c4:fc:81:87
+# SHA1 Fingerprint: 0d:44:dd:8c:3c:8c:1a:1a:58:75:64:81:e9:0f:2e:2a:ff:b3:d2:6e
+# SHA256 Fingerprint: 18:ce:6c:fe:7b:f1:4e:60:b2:e3:47:b8:df:e8:68:cb:31:d0:2e:bb:3a:da:27:15:69:f5:03:43:b4:6d:b3:a4
+-----BEGIN CERTIFICATE-----
+MIIBtjCCAVugAwIBAgITBmyf1XSXNmY/Owua2eiedgPySjAKBggqhkjOPQQDAjA5
+MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g
+Um9vdCBDQSAzMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG
+A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg
+Q0EgMzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCmXp8ZBf8ANm+gBG1bG8lKl
+ui2yEujSLtf6ycXYqm0fc4E7O5hrOXwzpcVOho6AF2hiRVd9RFgdszflZwjrZt6j
+QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSr
+ttvXBp43rDCGB5Fwx5zEGbF4wDAKBggqhkjOPQQDAgNJADBGAiEA4IWSoxe3jfkr
+BqWTrBqYaGFy+uGh0PsceGCmQ5nFuMQCIQCcAu/xlJyzlvnrxir4tiz+OpAUFteM
+YyRIHN8wfdVoOw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Amazon Root CA 4 O=Amazon
+# Subject: CN=Amazon Root CA 4 O=Amazon
+# Label: "Amazon Root CA 4"
+# Serial: 143266989758080763974105200630763877849284878
+# MD5 Fingerprint: 89:bc:27:d5:eb:17:8d:06:6a:69:d5:fd:89:47:b4:cd
+# SHA1 Fingerprint: f6:10:84:07:d6:f8:bb:67:98:0c:c2:e2:44:c2:eb:ae:1c:ef:63:be
+# SHA256 Fingerprint: e3:5d:28:41:9e:d0:20:25:cf:a6:90:38:cd:62:39:62:45:8d:a5:c6:95:fb:de:a3:c2:2b:0b:fb:25:89:70:92
+-----BEGIN CERTIFICATE-----
+MIIB8jCCAXigAwIBAgITBmyf18G7EEwpQ+Vxe3ssyBrBDjAKBggqhkjOPQQDAzA5
+MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g
+Um9vdCBDQSA0MB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG
+A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg
+Q0EgNDB2MBAGByqGSM49AgEGBSuBBAAiA2IABNKrijdPo1MN/sGKe0uoe0ZLY7Bi
+9i0b2whxIdIA6GO9mif78DluXeo9pcmBqqNbIJhFXRbb/egQbeOc4OO9X4Ri83Bk
+M6DLJC9wuoihKqB1+IGuYgbEgds5bimwHvouXKNCMEAwDwYDVR0TAQH/BAUwAwEB
+/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFNPsxzplbszh2naaVvuc84ZtV+WB
+MAoGCCqGSM49BAMDA2gAMGUCMDqLIfG9fhGt0O9Yli/W651+kI0rz2ZVwyzjKKlw
+CkcO8DdZEv8tmZQoTipPNU0zWgIxAOp1AE47xDqUEpHJWEadIRNyp4iciuRMStuW
+1KyLa2tJElMzrdfkviT8tQp21KW8EA==
+-----END CERTIFICATE-----
+
+# Issuer: CN=LuxTrust Global Root 2 O=LuxTrust S.A.
+# Subject: CN=LuxTrust Global Root 2 O=LuxTrust S.A.
+# Label: "LuxTrust Global Root 2"
+# Serial: 59914338225734147123941058376788110305822489521
+# MD5 Fingerprint: b2:e1:09:00:61:af:f7:f1:91:6f:c4:ad:8d:5e:3b:7c
+# SHA1 Fingerprint: 1e:0e:56:19:0a:d1:8b:25:98:b2:04:44:ff:66:8a:04:17:99:5f:3f
+# SHA256 Fingerprint: 54:45:5f:71:29:c2:0b:14:47:c4:18:f9:97:16:8f:24:c5:8f:c5:02:3b:f5:da:5b:e2:eb:6e:1d:d8:90:2e:d5
+-----BEGIN CERTIFICATE-----
+MIIFwzCCA6ugAwIBAgIUCn6m30tEntpqJIWe5rgV0xZ/u7EwDQYJKoZIhvcNAQEL
+BQAwRjELMAkGA1UEBhMCTFUxFjAUBgNVBAoMDUx1eFRydXN0IFMuQS4xHzAdBgNV
+BAMMFkx1eFRydXN0IEdsb2JhbCBSb290IDIwHhcNMTUwMzA1MTMyMTU3WhcNMzUw
+MzA1MTMyMTU3WjBGMQswCQYDVQQGEwJMVTEWMBQGA1UECgwNTHV4VHJ1c3QgUy5B
+LjEfMB0GA1UEAwwWTHV4VHJ1c3QgR2xvYmFsIFJvb3QgMjCCAiIwDQYJKoZIhvcN
+AQEBBQADggIPADCCAgoCggIBANeFl78RmOnwYoNMPIf5U2o3C/IPPIfOb9wmKb3F
+ibrJgz337spbxm1Jc7TJRqMbNBM/wYlFV/TZsfs2ZUv7COJIcRHIbjuend+JZTem
+hfY7RBi2xjcwYkSSl2l9QjAk5A0MiWtj3sXh306pFGxT4GHO9hcvHTy95iJMHZP1
+EMShduxq3sVs35a0VkBCwGKSMKEtFZSg0iAGCW5qbeXrt77U8PEVfIvmTroTzEsn
+Xpk8F12PgX8zPU/TPxvsXD/wPEx1bvKm1Z3aLQdjAsZy6ZS8TEmVT4hSyNvoaYL4
+zDRbIvCGp4m9SAptZoFtyMhk+wHh9OHe2Z7d21vUKpkmFRseTJIpgp7VkoGSQXAZ
+96Tlk0u8d2cx3Rz9MXANF5kM+Qw5GSoXtTBxVdUPrljhPS80m8+f9niFwpN6cj5m
+j5wWEWCPnolvZ77gR1o7DJpni89Gxq44o/KnvObWhWszJHAiS8sIm7vI+AIpHb4g
+DEa/a4ebsypmQjVGbKq6rfmYe+lQVRQxv7HaLe2ArWgk+2mr2HETMOZns4dA/Yl+
+8kPREd8vZS9kzl8UubG/Mb2HeFpZZYiq/FkySIbWTLkpS5XTdvN3JW1CHDiDTf2j
+X5t/Lax5Gw5CMZdjpPuKadUiDTSQMC6otOBttpSsvItO13D8xTiOZCXhTTmQzsmH
+hFhxAgMBAAGjgagwgaUwDwYDVR0TAQH/BAUwAwEB/zBCBgNVHSAEOzA5MDcGByuB
+KwEBAQowLDAqBggrBgEFBQcCARYeaHR0cHM6Ly9yZXBvc2l0b3J5Lmx1eHRydXN0
+Lmx1MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBT/GCh2+UgFLKGu8SsbK7JT
++Et8szAdBgNVHQ4EFgQU/xgodvlIBSyhrvErGyuyU/hLfLMwDQYJKoZIhvcNAQEL
+BQADggIBAGoZFO1uecEsh9QNcH7X9njJCwROxLHOk3D+sFTAMs2ZMGQXvw/l4jP9
+BzZAcg4atmpZ1gDlaCDdLnINH2pkMSCEfUmmWjfrRcmF9dTHF5kH5ptV5AzoqbTO
+jFu1EVzPig4N1qx3gf4ynCSecs5U89BvolbW7MM3LGVYvlcAGvI1+ut7MV3CwRI9
+loGIlonBWVx65n9wNOeD4rHh4bhY79SV5GCc8JaXcozrhAIuZY+kt9J/Z93I055c
+qqmkoCUUBpvsT34tC38ddfEz2O3OuHVtPlu5mB0xDVbYQw8wkbIEa91WvpWAVWe+
+2M2D2RjuLg+GLZKecBPs3lHJQ3gCpU3I+V/EkVhGFndadKpAvAefMLmx9xIX3eP/
+JEAdemrRTxgKqpAd60Ae36EeRJIQmvKN4dFLRp7oRUKX6kWZ8+xm1QL68qZKJKre
+zrnK+T+Tb/mjuuqlPpmt/f97mfVl7vBZKGfXkJWkE4SphMHozs51k2MavDzq1WQf
+LSoSOcbDWjLtR5EWDrw4wVDej8oqkDQc7kGUnF4ZLvhFSZl0kbAEb+MEWrGrKqv+
+x9CWttrhSmQGbmBNvUJO/3jaJMobtNeWOWyu8Q6qp31IiyBMz2TWuJdGsE7RKlY6
+oJO9r4Ak4Ap+58rVyuiFVdw2KuGUaJPHZnJED4AhMmwlxyOAgwrr
+-----END CERTIFICATE-----
+
+# Issuer: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM
+# Subject: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM
+# Label: "TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1"
+# Serial: 1
+# MD5 Fingerprint: dc:00:81:dc:69:2f:3e:2f:b0:3b:f6:3d:5a:91:8e:49
+# SHA1 Fingerprint: 31:43:64:9b:ec:ce:27:ec:ed:3a:3f:0b:8f:0d:e4:e8:91:dd:ee:ca
+# SHA256 Fingerprint: 46:ed:c3:68:90:46:d5:3a:45:3f:b3:10:4a:b8:0d:ca:ec:65:8b:26:60:ea:16:29:dd:7e:86:79:90:64:87:16
+-----BEGIN CERTIFICATE-----
+MIIEYzCCA0ugAwIBAgIBATANBgkqhkiG9w0BAQsFADCB0jELMAkGA1UEBhMCVFIx
+GDAWBgNVBAcTD0dlYnplIC0gS29jYWVsaTFCMEAGA1UEChM5VHVya2l5ZSBCaWxp
+bXNlbCB2ZSBUZWtub2xvamlrIEFyYXN0aXJtYSBLdXJ1bXUgLSBUVUJJVEFLMS0w
+KwYDVQQLEyRLYW11IFNlcnRpZmlrYXN5b24gTWVya2V6aSAtIEthbXUgU00xNjA0
+BgNVBAMTLVRVQklUQUsgS2FtdSBTTSBTU0wgS29rIFNlcnRpZmlrYXNpIC0gU3Vy
+dW0gMTAeFw0xMzExMjUwODI1NTVaFw00MzEwMjUwODI1NTVaMIHSMQswCQYDVQQG
+EwJUUjEYMBYGA1UEBxMPR2ViemUgLSBLb2NhZWxpMUIwQAYDVQQKEzlUdXJraXll
+IEJpbGltc2VsIHZlIFRla25vbG9qaWsgQXJhc3Rpcm1hIEt1cnVtdSAtIFRVQklU
+QUsxLTArBgNVBAsTJEthbXUgU2VydGlmaWthc3lvbiBNZXJrZXppIC0gS2FtdSBT
+TTE2MDQGA1UEAxMtVFVCSVRBSyBLYW11IFNNIFNTTCBLb2sgU2VydGlmaWthc2kg
+LSBTdXJ1bSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAr3UwM6q7
+a9OZLBI3hNmNe5eA027n/5tQlT6QlVZC1xl8JoSNkvoBHToP4mQ4t4y86Ij5iySr
+LqP1N+RAjhgleYN1Hzv/bKjFxlb4tO2KRKOrbEz8HdDc72i9z+SqzvBV96I01INr
+N3wcwv61A+xXzry0tcXtAA9TNypN9E8Mg/uGz8v+jE69h/mniyFXnHrfA2eJLJ2X
+YacQuFWQfw4tJzh03+f92k4S400VIgLI4OD8D62K18lUUMw7D8oWgITQUVbDjlZ/
+iSIzL+aFCr2lqBs23tPcLG07xxO9WSMs5uWk99gL7eqQQESolbuT1dCANLZGeA4f
+AJNG4e7p+exPFwIDAQABo0IwQDAdBgNVHQ4EFgQUZT/HiobGPN08VFw1+DrtUgxH
+V8gwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL
+BQADggEBACo/4fEyjq7hmFxLXs9rHmoJ0iKpEsdeV31zVmSAhHqT5Am5EM2fKifh
+AHe+SMg1qIGf5LgsyX8OsNJLN13qudULXjS99HMpw+0mFZx+CFOKWI3QSyjfwbPf
+IPP54+M638yclNhOT8NrF7f3cuitZjO1JVOr4PhMqZ398g26rrnZqsZr+ZO7rqu4
+lzwDGrpDxpa5RXI4s6ehlj2Re37AIVNMh+3yC1SVUZPVIqUNivGTDj5UDrDYyU7c
+8jEyVupk+eq1nRZmQnLzf9OxMUP8pI4X8W0jq5Rm+K37DwhuJi1/FwcJsoz7UMCf
+lo3Ptv0AnVoUmr8CRPXBwp8iXqIPoeM=
+-----END CERTIFICATE-----
+
+# Issuer: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD.
+# Subject: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD.
+# Label: "GDCA TrustAUTH R5 ROOT"
+# Serial: 9009899650740120186
+# MD5 Fingerprint: 63:cc:d9:3d:34:35:5c:6f:53:a3:e2:08:70:48:1f:b4
+# SHA1 Fingerprint: 0f:36:38:5b:81:1a:25:c3:9b:31:4e:83:ca:e9:34:66:70:cc:74:b4
+# SHA256 Fingerprint: bf:ff:8f:d0:44:33:48:7d:6a:8a:a6:0c:1a:29:76:7a:9f:c2:bb:b0:5e:42:0f:71:3a:13:b9:92:89:1d:38:93
+-----BEGIN CERTIFICATE-----
+MIIFiDCCA3CgAwIBAgIIfQmX/vBH6nowDQYJKoZIhvcNAQELBQAwYjELMAkGA1UE
+BhMCQ04xMjAwBgNVBAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZ
+IENPLixMVEQuMR8wHQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMB4XDTE0
+MTEyNjA1MTMxNVoXDTQwMTIzMTE1NTk1OVowYjELMAkGA1UEBhMCQ04xMjAwBgNV
+BAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZIENPLixMVEQuMR8w
+HQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEA2aMW8Mh0dHeb7zMNOwZ+Vfy1YI92hhJCfVZmPoiC7XJj
+Dp6L3TQsAlFRwxn9WVSEyfFrs0yw6ehGXTjGoqcuEVe6ghWinI9tsJlKCvLriXBj
+TnnEt1u9ol2x8kECK62pOqPseQrsXzrj/e+APK00mxqriCZ7VqKChh/rNYmDf1+u
+KU49tm7srsHwJ5uu4/Ts765/94Y9cnrrpftZTqfrlYwiOXnhLQiPzLyRuEH3FMEj
+qcOtmkVEs7LXLM3GKeJQEK5cy4KOFxg2fZfmiJqwTTQJ9Cy5WmYqsBebnh52nUpm
+MUHfP/vFBu8btn4aRjb3ZGM74zkYI+dndRTVdVeSN72+ahsmUPI2JgaQxXABZG12
+ZuGR224HwGGALrIuL4xwp9E7PLOR5G62xDtw8mySlwnNR30YwPO7ng/Wi64HtloP
+zgsMR6flPri9fcebNaBhlzpBdRfMK5Z3KpIhHtmVdiBnaM8Nvd/WHwlqmuLMc3Gk
+L30SgLdTMEZeS1SZD2fJpcjyIMGC7J0R38IC+xo70e0gmu9lZJIQDSri3nDxGGeC
+jGHeuLzRL5z7D9Ar7Rt2ueQ5Vfj4oR24qoAATILnsn8JuLwwoC8N9VKejveSswoA
+HQBUlwbgsQfZxw9cZX08bVlX5O2ljelAU58VS6Bx9hoh49pwBiFYFIeFd3mqgnkC
+AwEAAaNCMEAwHQYDVR0OBBYEFOLJQJ9NzuiaoXzPDj9lxSmIahlRMA8GA1UdEwEB
+/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQDRSVfg
+p8xoWLoBDysZzY2wYUWsEe1jUGn4H3++Fo/9nesLqjJHdtJnJO29fDMylyrHBYZm
+DRd9FBUb1Ov9H5r2XpdptxolpAqzkT9fNqyL7FeoPueBihhXOYV0GkLH6VsTX4/5
+COmSdI31R9KrO9b7eGZONn356ZLpBN79SWP8bfsUcZNnL0dKt7n/HipzcEYwv1ry
+L3ml4Y0M2fmyYzeMN2WFcGpcWwlyua1jPLHd+PwyvzeG5LuOmCd+uh8W4XAR8gPf
+JWIyJyYYMoSf/wA6E7qaTfRPuBRwIrHKK5DOKcFw9C+df/KQHtZa37dG/OaG+svg
+IHZ6uqbL9XzeYqWxi+7egmaKTjowHz+Ay60nugxe19CxVsp3cbK1daFQqUBDF8Io
+2c9Si1vIY9RCPqAzekYu9wogRlR+ak8x8YF+QnQ4ZXMn7sZ8uI7XpTrXmKGcjBBV
+09tL7ECQ8s1uV9JiDnxXk7Gnbc2dg7sq5+W2O3FYrf3RRbxake5TFW/TRQl1brqQ
+XR4EzzffHqhmsYzmIGrv/EhOdJhCrylvLmrH+33RZjEizIYAfmaDDEL0vTSSwxrq
+T8p+ck0LcIymSLumoRT2+1hEmRSuqguTaaApJUqlyyvdimYHFngVV3Eb7PVHhPOe
+MTd61X8kreS8/f3MboPoDKi3QWwH3b08hpcv0g==
+-----END CERTIFICATE-----
+
+# Issuer: CN=TrustCor RootCert CA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority
+# Subject: CN=TrustCor RootCert CA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority
+# Label: "TrustCor RootCert CA-1"
+# Serial: 15752444095811006489
+# MD5 Fingerprint: 6e:85:f1:dc:1a:00:d3:22:d5:b2:b2:ac:6b:37:05:45
+# SHA1 Fingerprint: ff:bd:cd:e7:82:c8:43:5e:3c:6f:26:86:5c:ca:a8:3a:45:5b:c3:0a
+# SHA256 Fingerprint: d4:0e:9c:86:cd:8f:e4:68:c1:77:69:59:f4:9e:a7:74:fa:54:86:84:b6:c4:06:f3:90:92:61:f4:dc:e2:57:5c
+-----BEGIN CERTIFICATE-----
+MIIEMDCCAxigAwIBAgIJANqb7HHzA7AZMA0GCSqGSIb3DQEBCwUAMIGkMQswCQYD
+VQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEk
+MCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U
+cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRydXN0Q29y
+IFJvb3RDZXJ0IENBLTEwHhcNMTYwMjA0MTIzMjE2WhcNMjkxMjMxMTcyMzE2WjCB
+pDELMAkGA1UEBhMCUEExDzANBgNVBAgMBlBhbmFtYTEUMBIGA1UEBwwLUGFuYW1h
+IENpdHkxJDAiBgNVBAoMG1RydXN0Q29yIFN5c3RlbXMgUy4gZGUgUi5MLjEnMCUG
+A1UECwweVHJ1c3RDb3IgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MR8wHQYDVQQDDBZU
+cnVzdENvciBSb290Q2VydCBDQS0xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
+CgKCAQEAv463leLCJhJrMxnHQFgKq1mqjQCj/IDHUHuO1CAmujIS2CNUSSUQIpid
+RtLByZ5OGy4sDjjzGiVoHKZaBeYei0i/mJZ0PmnK6bV4pQa81QBeCQryJ3pS/C3V
+seq0iWEk8xoT26nPUu0MJLq5nux+AHT6k61sKZKuUbS701e/s/OojZz0JEsq1pme
+9J7+wH5COucLlVPat2gOkEz7cD+PSiyU8ybdY2mplNgQTsVHCJCZGxdNuWxu72CV
+EY4hgLW9oHPY0LJ3xEXqWib7ZnZ2+AYfYW0PVcWDtxBWcgYHpfOxGgMFZA6dWorW
+hnAbJN7+KIor0Gqw/Hqi3LJ5DotlDwIDAQABo2MwYTAdBgNVHQ4EFgQU7mtJPHo/
+DeOxCbeKyKsZn3MzUOcwHwYDVR0jBBgwFoAU7mtJPHo/DeOxCbeKyKsZn3MzUOcw
+DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQAD
+ggEBACUY1JGPE+6PHh0RU9otRCkZoB5rMZ5NDp6tPVxBb5UrJKF5mDo4Nvu7Zp5I
+/5CQ7z3UuJu0h3U/IJvOcs+hVcFNZKIZBqEHMwwLKeXx6quj7LUKdJDHfXLy11yf
+ke+Ri7fc7Waiz45mO7yfOgLgJ90WmMCV1Aqk5IGadZQ1nJBfiDcGrVmVCrDRZ9MZ
+yonnMlo2HD6CqFqTvsbQZJG2z9m2GM/bftJlo6bEjhcxwft+dtvTheNYsnd6djts
+L1Ac59v2Z3kf9YKVmgenFK+P3CghZwnS1k1aHBkcjndcw5QkPTJrS37UeJSDvjdN
+zl/HHk484IkzlQsPpTLWPFp5LBk=
+-----END CERTIFICATE-----
+
+# Issuer: CN=TrustCor RootCert CA-2 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority
+# Subject: CN=TrustCor RootCert CA-2 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority
+# Label: "TrustCor RootCert CA-2"
+# Serial: 2711694510199101698
+# MD5 Fingerprint: a2:e1:f8:18:0b:ba:45:d5:c7:41:2a:bb:37:52:45:64
+# SHA1 Fingerprint: b8:be:6d:cb:56:f1:55:b9:63:d4:12:ca:4e:06:34:c7:94:b2:1c:c0
+# SHA256 Fingerprint: 07:53:e9:40:37:8c:1b:d5:e3:83:6e:39:5d:ae:a5:cb:83:9e:50:46:f1:bd:0e:ae:19:51:cf:10:fe:c7:c9:65
+-----BEGIN CERTIFICATE-----
+MIIGLzCCBBegAwIBAgIIJaHfyjPLWQIwDQYJKoZIhvcNAQELBQAwgaQxCzAJBgNV
+BAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQw
+IgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRy
+dXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0eTEfMB0GA1UEAwwWVHJ1c3RDb3Ig
+Um9vdENlcnQgQ0EtMjAeFw0xNjAyMDQxMjMyMjNaFw0zNDEyMzExNzI2MzlaMIGk
+MQswCQYDVQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEg
+Q2l0eTEkMCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYD
+VQQLDB5UcnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRy
+dXN0Q29yIFJvb3RDZXJ0IENBLTIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK
+AoICAQCnIG7CKqJiJJWQdsg4foDSq8GbZQWU9MEKENUCrO2fk8eHyLAnK0IMPQo+
+QVqedd2NyuCb7GgypGmSaIwLgQ5WoD4a3SwlFIIvl9NkRvRUqdw6VC0xK5mC8tkq
+1+9xALgxpL56JAfDQiDyitSSBBtlVkxs1Pu2YVpHI7TYabS3OtB0PAx1oYxOdqHp
+2yqlO/rOsP9+aij9JxzIsekp8VduZLTQwRVtDr4uDkbIXvRR/u8OYzo7cbrPb1nK
+DOObXUm4TOJXsZiKQlecdu/vvdFoqNL0Cbt3Nb4lggjEFixEIFapRBF37120Hape
+az6LMvYHL1cEksr1/p3C6eizjkxLAjHZ5DxIgif3GIJ2SDpxsROhOdUuxTTCHWKF
+3wP+TfSvPd9cW436cOGlfifHhi5qjxLGhF5DUVCcGZt45vz27Ud+ez1m7xMTiF88
+oWP7+ayHNZ/zgp6kPwqcMWmLmaSISo5uZk3vFsQPeSghYA2FFn3XVDjxklb9tTNM
+g9zXEJ9L/cb4Qr26fHMC4P99zVvh1Kxhe1fVSntb1IVYJ12/+CtgrKAmrhQhJ8Z3
+mjOAPF5GP/fDsaOGM8boXg25NSyqRsGFAnWAoOsk+xWq5Gd/bnc/9ASKL3x74xdh
+8N0JqSDIvgmk0H5Ew7IwSjiqqewYmgeCK9u4nBit2uBGF6zPXQIDAQABo2MwYTAd
+BgNVHQ4EFgQU2f4hQG6UnrybPZx9mCAZ5YwwYrIwHwYDVR0jBBgwFoAU2f4hQG6U
+nrybPZx9mCAZ5YwwYrIwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYw
+DQYJKoZIhvcNAQELBQADggIBAJ5Fngw7tu/hOsh80QA9z+LqBrWyOrsGS2h60COX
+dKcs8AjYeVrXWoSK2BKaG9l9XE1wxaX5q+WjiYndAfrs3fnpkpfbsEZC89NiqpX+
+MWcUaViQCqoL7jcjx1BRtPV+nuN79+TMQjItSQzL/0kMmx40/W5ulop5A7Zv2wnL
+/V9lFDfhOPXzYRZY5LVtDQsEGz9QLX+zx3oaFoBg+Iof6Rsqxvm6ARppv9JYx1RX
+CI/hOWB3S6xZhBqI8d3LT3jX5+EzLfzuQfogsL7L9ziUwOHQhQ+77Sxzq+3+knYa
+ZH9bDTMJBzN7Bj8RpFxwPIXAz+OQqIN3+tvmxYxoZxBnpVIt8MSZj3+/0WvitUfW
+2dCFmU2Umw9Lje4AWkcdEQOsQRivh7dvDDqPys/cA8GiCcjl/YBeyGBCARsaU1q7
+N6a3vLqE6R5sGtRk2tRD/pOLS/IseRYQ1JMLiI+h2IYURpFHmygk71dSTlxCnKr3
+Sewn6EAes6aJInKc9Q0ztFijMDvd1GpUk74aTfOTlPf8hAs/hCBcNANExdqtvArB
+As8e5ZTZ845b2EzwnexhF7sUMlQMAimTHpKG9n/v55IFDlndmQguLvqcAFLTxWYp
+5KeXRKQOKIETNcX2b2TmQcTVL8w0RSXPQQCWPUouwpaYT05KnJe32x+SMsj/D1Fu
+1uwJ
+-----END CERTIFICATE-----
+
+# Issuer: CN=TrustCor ECA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority
+# Subject: CN=TrustCor ECA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority
+# Label: "TrustCor ECA-1"
+# Serial: 9548242946988625984
+# MD5 Fingerprint: 27:92:23:1d:0a:f5:40:7c:e9:e6:6b:9d:d8:f5:e7:6c
+# SHA1 Fingerprint: 58:d1:df:95:95:67:6b:63:c0:f0:5b:1c:17:4d:8b:84:0b:c8:78:bd
+# SHA256 Fingerprint: 5a:88:5d:b1:9c:01:d9:12:c5:75:93:88:93:8c:af:bb:df:03:1a:b2:d4:8e:91:ee:15:58:9b:42:97:1d:03:9c
+-----BEGIN CERTIFICATE-----
+MIIEIDCCAwigAwIBAgIJAISCLF8cYtBAMA0GCSqGSIb3DQEBCwUAMIGcMQswCQYD
+VQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEk
+MCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U
+cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxFzAVBgNVBAMMDlRydXN0Q29y
+IEVDQS0xMB4XDTE2MDIwNDEyMzIzM1oXDTI5MTIzMTE3MjgwN1owgZwxCzAJBgNV
+BAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQw
+IgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRy
+dXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0eTEXMBUGA1UEAwwOVHJ1c3RDb3Ig
+RUNBLTEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDPj+ARtZ+odnbb
+3w9U73NjKYKtR8aja+3+XzP4Q1HpGjORMRegdMTUpwHmspI+ap3tDvl0mEDTPwOA
+BoJA6LHip1GnHYMma6ve+heRK9jGrB6xnhkB1Zem6g23xFUfJ3zSCNV2HykVh0A5
+3ThFEXXQmqc04L/NyFIduUd+Dbi7xgz2c1cWWn5DkR9VOsZtRASqnKmcp0yJF4Ou
+owReUoCLHhIlERnXDH19MURB6tuvsBzvgdAsxZohmz3tQjtQJvLsznFhBmIhVE5/
+wZ0+fyCMgMsq2JdiyIMzkX2woloPV+g7zPIlstR8L+xNxqE6FXrntl019fZISjZF
+ZtS6mFjBAgMBAAGjYzBhMB0GA1UdDgQWBBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAf
+BgNVHSMEGDAWgBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAPBgNVHRMBAf8EBTADAQH/
+MA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAQEABT41XBVwm8nHc2Fv
+civUwo/yQ10CzsSUuZQRg2dd4mdsdXa/uwyqNsatR5Nj3B5+1t4u/ukZMjgDfxT2
+AHMsWbEhBuH7rBiVDKP/mZb3Kyeb1STMHd3BOuCYRLDE5D53sXOpZCz2HAF8P11F
+hcCF5yWPldwX8zyfGm6wyuMdKulMY/okYWLW2n62HGz1Ah3UKt1VkOsqEUc8Ll50
+soIipX1TH0XsJ5F95yIW6MBoNtjG8U+ARDL54dHRHareqKucBK+tIA5kmE2la8BI
+WJZpTdwHjFGTot+fDz2LYLSCjaoITmJF4PkL0uDgPFveXHEnJcLmA4GLEFPjx1Wi
+tJ/X5g==
+-----END CERTIFICATE-----
+
+# Issuer: CN=SSL.com Root Certification Authority RSA O=SSL Corporation
+# Subject: CN=SSL.com Root Certification Authority RSA O=SSL Corporation
+# Label: "SSL.com Root Certification Authority RSA"
+# Serial: 8875640296558310041
+# MD5 Fingerprint: 86:69:12:c0:70:f1:ec:ac:ac:c2:d5:bc:a5:5b:a1:29
+# SHA1 Fingerprint: b7:ab:33:08:d1:ea:44:77:ba:14:80:12:5a:6f:bd:a9:36:49:0c:bb
+# SHA256 Fingerprint: 85:66:6a:56:2e:e0:be:5c:e9:25:c1:d8:89:0a:6f:76:a8:7e:c1:6d:4d:7d:5f:29:ea:74:19:cf:20:12:3b:69
+-----BEGIN CERTIFICATE-----
+MIIF3TCCA8WgAwIBAgIIeyyb0xaAMpkwDQYJKoZIhvcNAQELBQAwfDELMAkGA1UE
+BhMCVVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQK
+DA9TU0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZp
+Y2F0aW9uIEF1dGhvcml0eSBSU0EwHhcNMTYwMjEyMTczOTM5WhcNNDEwMjEyMTcz
+OTM5WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv
+dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNv
+bSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFJTQTCCAiIwDQYJKoZIhvcN
+AQEBBQADggIPADCCAgoCggIBAPkP3aMrfcvQKv7sZ4Wm5y4bunfh4/WvpOz6Sl2R
+xFdHaxh3a3by/ZPkPQ/CFp4LZsNWlJ4Xg4XOVu/yFv0AYvUiCVToZRdOQbngT0aX
+qhvIuG5iXmmxX9sqAn78bMrzQdjt0Oj8P2FI7bADFB0QDksZ4LtO7IZl/zbzXmcC
+C52GVWH9ejjt/uIZALdvoVBidXQ8oPrIJZK0bnoix/geoeOy3ZExqysdBP+lSgQ3
+6YWkMyv94tZVNHwZpEpox7Ko07fKoZOI68GXvIz5HdkihCR0xwQ9aqkpk8zruFvh
+/l8lqjRYyMEjVJ0bmBHDOJx+PYZspQ9AhnwC9FwCTyjLrnGfDzrIM/4RJTXq/LrF
+YD3ZfBjVsqnTdXgDciLKOsMf7yzlLqn6niy2UUb9rwPW6mBo6oUWNmuF6R7As93E
+JNyAKoFBbZQ+yODJgUEAnl6/f8UImKIYLEJAs/lvOCdLToD0PYFH4Ih86hzOtXVc
+US4cK38acijnALXRdMbX5J+tB5O2UzU1/Dfkw/ZdFr4hc96SCvigY2q8lpJqPvi8
+ZVWb3vUNiSYE/CUapiVpy8JtynziWV+XrOvvLsi81xtZPCvM8hnIk2snYxnP/Okm
++Mpxm3+T/jRnhE6Z6/yzeAkzcLpmpnbtG3PrGqUNxCITIJRWCk4sbE6x/c+cCbqi
+M+2HAgMBAAGjYzBhMB0GA1UdDgQWBBTdBAkHovV6fVJTEpKV7jiAJQ2mWTAPBgNV
+HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFN0ECQei9Xp9UlMSkpXuOIAlDaZZMA4G
+A1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAIBgRlCn7Jp0cHh5wYfGV
+cpNxJK1ok1iOMq8bs3AD/CUrdIWQPXhq9LmLpZc7tRiRux6n+UBbkflVma8eEdBc
+Hadm47GUBwwyOabqG7B52B2ccETjit3E+ZUfijhDPwGFpUenPUayvOUiaPd7nNgs
+PgohyC0zrL/FgZkxdMF1ccW+sfAjRfSda/wZY52jvATGGAslu1OJD7OAUN5F7kR/
+q5R4ZJjT9ijdh9hwZXT7DrkT66cPYakylszeu+1jTBi7qUD3oFRuIIhxdRjqerQ0
+cuAjJ3dctpDqhiVAq+8zD8ufgr6iIPv2tS0a5sKFsXQP+8hlAqRSAUfdSSLBv9jr
+a6x+3uxjMxW3IwiPxg+NQVrdjsW5j+VFP3jbutIbQLH+cU0/4IGiul607BXgk90I
+H37hVZkLId6Tngr75qNJvTYw/ud3sqB1l7UtgYgXZSD32pAAn8lSzDLKNXz1PQ/Y
+K9f1JmzJBjSWFupwWRoyeXkLtoh/D1JIPb9s2KJELtFOt3JY04kTlf5Eq/jXixtu
+nLwsoFvVagCvXzfh1foQC5ichucmj87w7G6KVwuA406ywKBjYZC6VWg3dGq2ktuf
+oYYitmUnDuy2n0Jg5GfCtdpBC8TTi2EbvPofkSvXRAdeuims2cXp71NIWuuA8ShY
+Ic2wBlX7Jz9TkHCpBB5XJ7k=
+-----END CERTIFICATE-----
+
+# Issuer: CN=SSL.com Root Certification Authority ECC O=SSL Corporation
+# Subject: CN=SSL.com Root Certification Authority ECC O=SSL Corporation
+# Label: "SSL.com Root Certification Authority ECC"
+# Serial: 8495723813297216424
+# MD5 Fingerprint: 2e:da:e4:39:7f:9c:8f:37:d1:70:9f:26:17:51:3a:8e
+# SHA1 Fingerprint: c3:19:7c:39:24:e6:54:af:1b:c4:ab:20:95:7a:e2:c3:0e:13:02:6a
+# SHA256 Fingerprint: 34:17:bb:06:cc:60:07:da:1b:96:1c:92:0b:8a:b4:ce:3f:ad:82:0e:4a:a3:0b:9a:cb:c4:a7:4e:bd:ce:bc:65
+-----BEGIN CERTIFICATE-----
+MIICjTCCAhSgAwIBAgIIdebfy8FoW6gwCgYIKoZIzj0EAwIwfDELMAkGA1UEBhMC
+VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T
+U0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZpY2F0
+aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNDAzWhcNNDEwMjEyMTgxNDAz
+WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hvdXN0
+b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNvbSBS
+b290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49AgEGBSuB
+BAAiA2IABEVuqVDEpiM2nl8ojRfLliJkP9x6jh3MCLOicSS6jkm5BBtHllirLZXI
+7Z4INcgn64mMU1jrYor+8FsPazFSY0E7ic3s7LaNGdM0B9y7xgZ/wkWV7Mt/qCPg
+CemB+vNH06NjMGEwHQYDVR0OBBYEFILRhXMw5zUE044CkvvlpNHEIejNMA8GA1Ud
+EwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUgtGFczDnNQTTjgKS++Wk0cQh6M0wDgYD
+VR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2cAMGQCMG/n61kRpGDPYbCWe+0F+S8T
+kdzt5fxQaxFGRrMcIQBiu77D5+jNB5n5DQtdcj7EqgIwH7y6C+IwJPt8bYBVCpk+
+gA0z5Wajs6O7pdWLjwkspl1+4vAHCGht0nxpbl/f5Wpl
+-----END CERTIFICATE-----
+
+# Issuer: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation
+# Subject: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation
+# Label: "SSL.com EV Root Certification Authority RSA R2"
+# Serial: 6248227494352943350
+# MD5 Fingerprint: e1:1e:31:58:1a:ae:54:53:02:f6:17:6a:11:7b:4d:95
+# SHA1 Fingerprint: 74:3a:f0:52:9b:d0:32:a0:f4:4a:83:cd:d4:ba:a9:7b:7c:2e:c4:9a
+# SHA256 Fingerprint: 2e:7b:f1:6c:c2:24:85:a7:bb:e2:aa:86:96:75:07:61:b0:ae:39:be:3b:2f:e9:d0:cc:6d:4e:f7:34:91:42:5c
+-----BEGIN CERTIFICATE-----
+MIIF6zCCA9OgAwIBAgIIVrYpzTS8ePYwDQYJKoZIhvcNAQELBQAwgYIxCzAJBgNV
+BAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4GA1UEBwwHSG91c3RvbjEYMBYGA1UE
+CgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQDDC5TU0wuY29tIEVWIFJvb3QgQ2Vy
+dGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIyMB4XDTE3MDUzMTE4MTQzN1oXDTQy
+MDUzMDE4MTQzN1owgYIxCzAJBgNVBAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4G
+A1UEBwwHSG91c3RvbjEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQD
+DC5TU0wuY29tIEVWIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIy
+MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAjzZlQOHWTcDXtOlG2mvq
+M0fNTPl9fb69LT3w23jhhqXZuglXaO1XPqDQCEGD5yhBJB/jchXQARr7XnAjssuf
+OePPxU7Gkm0mxnu7s9onnQqG6YE3Bf7wcXHswxzpY6IXFJ3vG2fThVUCAtZJycxa
+4bH3bzKfydQ7iEGonL3Lq9ttewkfokxykNorCPzPPFTOZw+oz12WGQvE43LrrdF9
+HSfvkusQv1vrO6/PgN3B0pYEW3p+pKk8OHakYo6gOV7qd89dAFmPZiw+B6KjBSYR
+aZfqhbcPlgtLyEDhULouisv3D5oi53+aNxPN8k0TayHRwMwi8qFG9kRpnMphNQcA
+b9ZhCBHqurj26bNg5U257J8UZslXWNvNh2n4ioYSA0e/ZhN2rHd9NCSFg83XqpyQ
+Gp8hLH94t2S42Oim9HizVcuE0jLEeK6jj2HdzghTreyI/BXkmg3mnxp3zkyPuBQV
+PWKchjgGAGYS5Fl2WlPAApiiECtoRHuOec4zSnaqW4EWG7WK2NAAe15itAnWhmMO
+pgWVSbooi4iTsjQc2KRVbrcc0N6ZVTsj9CLg+SlmJuwgUHfbSguPvuUCYHBBXtSu
+UDkiFCbLsjtzdFVHB3mBOagwE0TlBIqulhMlQg+5U8Sb/M3kHN48+qvWBkofZ6aY
+MBzdLNvcGJVXZsb/XItW9XcCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAfBgNV
+HSMEGDAWgBT5YLvU49U09rj1BoAlp3PbRmmonjAdBgNVHQ4EFgQU+WC71OPVNPa4
+9QaAJadz20ZpqJ4wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQBW
+s47LCp1Jjr+kxJG7ZhcFUZh1++VQLHqe8RT6q9OKPv+RKY9ji9i0qVQBDb6Thi/5
+Sm3HXvVX+cpVHBK+Rw82xd9qt9t1wkclf7nxY/hoLVUE0fKNsKTPvDxeH3jnpaAg
+cLAExbf3cqfeIg29MyVGjGSSJuM+LmOW2puMPfgYCdcDzH2GguDKBAdRUNf/ktUM
+79qGn5nX67evaOI5JpS6aLe/g9Pqemc9YmeuJeVy6OLk7K4S9ksrPJ/psEDzOFSz
+/bdoyNrGj1E8svuR3Bznm53htw1yj+KkxKl4+esUrMZDBcJlOSgYAsOCsp0FvmXt
+ll9ldDz7CTUue5wT/RsPXcdtgTpWD8w74a8CLyKsRspGPKAcTNZEtF4uXBVmCeEm
+Kf7GUmG6sXP/wwyc5WxqlD8UykAWlYTzWamsX0xhk23RO8yilQwipmdnRC652dKK
+QbNmC1r7fSOl8hqw/96bg5Qu0T/fkreRrwU7ZcegbLHNYhLDkBvjJc40vG93drEQ
+w/cFGsDWr3RiSBd3kmmQYRzelYB0VI8YHMPzA9C/pEN1hlMYegouCRw2n5H9gooi
+S9EOUCXdywMMF8mDAAhONU2Ki+3wApRmLER/y5UnlhetCTCstnEXbosX9hwJ1C07
+mKVx01QT2WDz9UtmT/rx7iASjbSsV7FFY6GsdqnC+w==
+-----END CERTIFICATE-----
+
+# Issuer: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation
+# Subject: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation
+# Label: "SSL.com EV Root Certification Authority ECC"
+# Serial: 3182246526754555285
+# MD5 Fingerprint: 59:53:22:65:83:42:01:54:c0:ce:42:b9:5a:7c:f2:90
+# SHA1 Fingerprint: 4c:dd:51:a3:d1:f5:20:32:14:b0:c6:c5:32:23:03:91:c7:46:42:6d
+# SHA256 Fingerprint: 22:a2:c1:f7:bd:ed:70:4c:c1:e7:01:b5:f4:08:c3:10:88:0f:e9:56:b5:de:2a:4a:44:f9:9c:87:3a:25:a7:c8
+-----BEGIN CERTIFICATE-----
+MIIClDCCAhqgAwIBAgIILCmcWxbtBZUwCgYIKoZIzj0EAwIwfzELMAkGA1UEBhMC
+VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T
+U0wgQ29ycG9yYXRpb24xNDAyBgNVBAMMK1NTTC5jb20gRVYgUm9vdCBDZXJ0aWZp
+Y2F0aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNTIzWhcNNDEwMjEyMTgx
+NTIzWjB/MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv
+dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrU1NMLmNv
+bSBFViBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49
+AgEGBSuBBAAiA2IABKoSR5CYG/vvw0AHgyBO8TCCogbR8pKGYfL2IWjKAMTH6kMA
+VIbc/R/fALhBYlzccBYy3h+Z1MzFB8gIH2EWB1E9fVwHU+M1OIzfzZ/ZLg1Kthku
+WnBaBu2+8KGwytAJKaNjMGEwHQYDVR0OBBYEFFvKXuXe0oGqzagtZFG22XKbl+ZP
+MA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUW8pe5d7SgarNqC1kUbbZcpuX
+5k8wDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2gAMGUCMQCK5kCJN+vp1RPZ
+ytRrJPOwPYdGWBrssd9v+1a6cGvHOMzosYxPD/fxZ3YOg9AeUY8CMD32IygmTMZg
+h5Mmm7I1HrrW9zzRHM76JTymGoEVW/MSD2zuZYrJh6j5B+BimoxcSg==
+-----END CERTIFICATE-----
diff --git a/tools/third_party/certifi/certifi/core.py b/tools/third_party/certifi/certifi/core.py
new file mode 100644
index 0000000..eab9d1d
--- /dev/null
+++ b/tools/third_party/certifi/certifi/core.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+certifi.py
+~~~~~~~~~~
+
+This module returns the installation location of cacert.pem.
+"""
+import os
+import warnings
+
+
+class DeprecatedBundleWarning(DeprecationWarning):
+    """
+    The weak security bundle is being deprecated. Please bother your service
+    provider to get them to stop using cross-signed roots.
+    """
+
+
+def where():
+    f = os.path.dirname(__file__)
+
+    return os.path.join(f, 'cacert.pem')
+
+
+def old_where():
+    warnings.warn(
+        "The weak security bundle has been removed. certifi.old_where() is now an alias "
+        "of certifi.where(). Please update your code to use certifi.where() instead. "
+        "certifi.old_where() will be removed in 2018.",
+        DeprecatedBundleWarning
+    )
+    return where()
+
+if __name__ == '__main__':
+    print(where())
diff --git a/tools/third_party/certifi/setup.cfg b/tools/third_party/certifi/setup.cfg
new file mode 100644
index 0000000..163eba3
--- /dev/null
+++ b/tools/third_party/certifi/setup.cfg
@@ -0,0 +1,11 @@
+[bdist_wheel]
+universal = 1
+
+[metadata]
+license_file = LICENSE
+
+[egg_info]
+tag_build = 
+tag_date = 0
+tag_svn_revision = 0
+
diff --git a/tools/third_party/certifi/setup.py b/tools/third_party/certifi/setup.py
new file mode 100755
index 0000000..2c20c26
--- /dev/null
+++ b/tools/third_party/certifi/setup.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+from __future__ import with_statement
+import re
+import os
+import sys
+
+# While I generally consider it an antipattern to try and support both
+# setuptools and distutils with a single setup.py, in this specific instance
+# where certifi is a dependency of setuptools, it can create a circular
+# dependency when projects attempt to unbundle stuff from setuptools and pip.
+# Though we don't really support that, it makes things easier if we do this and
+# should hopefully cause less issues for end users.
+try:
+    from setuptools import setup
+except ImportError:
+    from distutils.core import setup
+
+
+version_regex = r'__version__ = ["\']([^"\']*)["\']'
+with open('certifi/__init__.py', 'r') as f:
+    text = f.read()
+    match = re.search(version_regex, text)
+
+    if match:
+        VERSION = match.group(1)
+    else:
+        raise RuntimeError("No version number found!")
+
+if sys.argv[-1] == 'publish':
+    os.system('python setup.py sdist bdist_wheel upload')
+    sys.exit()
+
+required = []
+setup(
+    name='certifi',
+    version=VERSION,
+    description='Python package for providing Mozilla\'s CA Bundle.',
+    long_description=open('README.rst').read(),
+    author='Kenneth Reitz',
+    author_email='me@kennethreitz.com',
+    url='http://certifi.io/',
+    packages=[
+        'certifi',
+    ],
+    package_dir={'certifi': 'certifi'},
+    package_data={'certifi': ['*.pem']},
+    # data_files=[('certifi', ['certifi/cacert.pem'])],
+    include_package_data=True,
+    zip_safe=False,
+    license='MPL-2.0',
+    classifiers=(
+        'Development Status :: 5 - Production/Stable',
+        'Intended Audience :: Developers',
+        'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',
+        'Natural Language :: English',
+        'Programming Language :: Python',
+        'Programming Language :: Python :: 2',
+        'Programming Language :: Python :: 2.6',
+        'Programming Language :: Python :: 2.7',
+        'Programming Language :: Python :: 3',
+        'Programming Language :: Python :: 3.3',
+        'Programming Language :: Python :: 3.4',
+        'Programming Language :: Python :: 3.5',
+        'Programming Language :: Python :: 3.6',
+    ),
+)
diff --git a/tools/third_party/enum/MANIFEST.in b/tools/third_party/enum/MANIFEST.in
new file mode 100644
index 0000000..98fe77f
--- /dev/null
+++ b/tools/third_party/enum/MANIFEST.in
@@ -0,0 +1,9 @@
+exclude enum/*
+include setup.py
+include README
+include enum/__init__.py
+include enum/test.py
+include enum/LICENSE
+include enum/README
+include enum/doc/enum.pdf
+include enum/doc/enum.rst
diff --git a/tools/third_party/enum/PKG-INFO b/tools/third_party/enum/PKG-INFO
new file mode 100644
index 0000000..98927c4
--- /dev/null
+++ b/tools/third_party/enum/PKG-INFO
@@ -0,0 +1,62 @@
+Metadata-Version: 1.1
+Name: enum34
+Version: 1.1.6
+Summary: Python 3.4 Enum backported to 3.3, 3.2, 3.1, 2.7, 2.6, 2.5, and 2.4
+Home-page: https://bitbucket.org/stoneleaf/enum34
+Author: Ethan Furman
+Author-email: ethan@stoneleaf.us
+License: BSD License
+Description: enum --- support for enumerations
+        ========================================
+        
+        An enumeration is a set of symbolic names (members) bound to unique, constant
+        values.  Within an enumeration, the members can be compared by identity, and
+        the enumeration itself can be iterated over.
+        
+            from enum import Enum
+        
+            class Fruit(Enum):
+                apple = 1
+                banana = 2
+                orange = 3
+        
+            list(Fruit)
+            # [<Fruit.apple: 1>, <Fruit.banana: 2>, <Fruit.orange: 3>]
+        
+            len(Fruit)
+            # 3
+        
+            Fruit.banana
+            # <Fruit.banana: 2>
+        
+            Fruit['banana']
+            # <Fruit.banana: 2>
+        
+            Fruit(2)
+            # <Fruit.banana: 2>
+        
+            Fruit.banana is Fruit['banana'] is Fruit(2)
+            # True
+        
+            Fruit.banana.name
+            # 'banana'
+        
+            Fruit.banana.value
+            # 2
+        
+        Repository and Issue Tracker at https://bitbucket.org/stoneleaf/enum34.
+        
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Programming Language :: Python
+Classifier: Topic :: Software Development
+Classifier: Programming Language :: Python :: 2.4
+Classifier: Programming Language :: Python :: 2.5
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Provides: enum
diff --git a/tools/third_party/enum/README b/tools/third_party/enum/README
new file mode 100644
index 0000000..aa2333d
--- /dev/null
+++ b/tools/third_party/enum/README
@@ -0,0 +1,3 @@
+enum34 is the new Python stdlib enum module available in Python 3.4
+backported for previous versions of Python from 2.4 to 3.3.
+tested on 2.6, 2.7, and 3.3+
diff --git a/tools/third_party/enum/enum/LICENSE b/tools/third_party/enum/enum/LICENSE
new file mode 100644
index 0000000..9003b88
--- /dev/null
+++ b/tools/third_party/enum/enum/LICENSE
@@ -0,0 +1,32 @@
+Copyright (c) 2013, Ethan Furman.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+    Redistributions of source code must retain the above
+    copyright notice, this list of conditions and the
+    following disclaimer.
+
+    Redistributions in binary form must reproduce the above
+    copyright notice, this list of conditions and the following
+    disclaimer in the documentation and/or other materials
+    provided with the distribution.
+
+    Neither the name Ethan Furman nor the names of any
+    contributors may be used to endorse or promote products
+    derived from this software without specific prior written
+    permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
diff --git a/tools/third_party/enum/enum/README b/tools/third_party/enum/enum/README
new file mode 100644
index 0000000..aa2333d
--- /dev/null
+++ b/tools/third_party/enum/enum/README
@@ -0,0 +1,3 @@
+enum34 is the new Python stdlib enum module available in Python 3.4
+backported for previous versions of Python from 2.4 to 3.3.
+tested on 2.6, 2.7, and 3.3+
diff --git a/tools/third_party/enum/enum/__init__.py b/tools/third_party/enum/enum/__init__.py
new file mode 100644
index 0000000..d6ffb3a
--- /dev/null
+++ b/tools/third_party/enum/enum/__init__.py
@@ -0,0 +1,837 @@
+"""Python Enumerations"""
+
+import sys as _sys
+
+__all__ = ['Enum', 'IntEnum', 'unique']
+
+version = 1, 1, 6
+
+pyver = float('%s.%s' % _sys.version_info[:2])
+
+try:
+    any
+except NameError:
+    def any(iterable):
+        for element in iterable:
+            if element:
+                return True
+        return False
+
+try:
+    from collections import OrderedDict
+except ImportError:
+    OrderedDict = None
+
+try:
+    basestring
+except NameError:
+    # In Python 2 basestring is the ancestor of both str and unicode
+    # in Python 3 it's just str, but was missing in 3.1
+    basestring = str
+
+try:
+    unicode
+except NameError:
+    # In Python 3 unicode no longer exists (it's just str)
+    unicode = str
+
+class _RouteClassAttributeToGetattr(object):
+    """Route attribute access on a class to __getattr__.
+
+    This is a descriptor, used to define attributes that act differently when
+    accessed through an instance and through a class.  Instance access remains
+    normal, but access to an attribute through a class will be routed to the
+    class's __getattr__ method; this is done by raising AttributeError.
+
+    """
+    def __init__(self, fget=None):
+        self.fget = fget
+
+    def __get__(self, instance, ownerclass=None):
+        if instance is None:
+            raise AttributeError()
+        return self.fget(instance)
+
+    def __set__(self, instance, value):
+        raise AttributeError("can't set attribute")
+
+    def __delete__(self, instance):
+        raise AttributeError("can't delete attribute")
+
+
+def _is_descriptor(obj):
+    """Returns True if obj is a descriptor, False otherwise."""
+    return (
+            hasattr(obj, '__get__') or
+            hasattr(obj, '__set__') or
+            hasattr(obj, '__delete__'))
+
+
+def _is_dunder(name):
+    """Returns True if a __dunder__ name, False otherwise."""
+    return (name[:2] == name[-2:] == '__' and
+            name[2:3] != '_' and
+            name[-3:-2] != '_' and
+            len(name) > 4)
+
+
+def _is_sunder(name):
+    """Returns True if a _sunder_ name, False otherwise."""
+    return (name[0] == name[-1] == '_' and
+            name[1:2] != '_' and
+            name[-2:-1] != '_' and
+            len(name) > 2)
+
+
+def _make_class_unpicklable(cls):
+    """Make the given class un-picklable."""
+    def _break_on_call_reduce(self, protocol=None):
+        raise TypeError('%r cannot be pickled' % self)
+    cls.__reduce_ex__ = _break_on_call_reduce
+    cls.__module__ = '<unknown>'
+
+
+class _EnumDict(dict):
+    """Track enum member order and ensure member names are not reused.
+
+    EnumMeta will use the names found in self._member_names as the
+    enumeration member names.
+
+    """
+    def __init__(self):
+        super(_EnumDict, self).__init__()
+        self._member_names = []
+
+    def __setitem__(self, key, value):
+        """Changes anything not dundered or not a descriptor.
+
+        If a descriptor is added with the same name as an enum member, the name
+        is removed from _member_names (this may leave a hole in the numerical
+        sequence of values).
+
+        If an enum member name is used twice, an error is raised; duplicate
+        values are not checked for.
+
+        Single underscore (sunder) names are reserved.
+
+        Note:   in 3.x __order__ is simply discarded as a not necessary piece
+                leftover from 2.x
+
+        """
+        if pyver >= 3.0 and key in ('_order_', '__order__'):
+            return
+        elif key == '__order__':
+            key = '_order_'
+        if _is_sunder(key):
+            if key != '_order_':
+                raise ValueError('_names_ are reserved for future Enum use')
+        elif _is_dunder(key):
+            pass
+        elif key in self._member_names:
+            # descriptor overwriting an enum?
+            raise TypeError('Attempted to reuse key: %r' % key)
+        elif not _is_descriptor(value):
+            if key in self:
+                # enum overwriting a descriptor?
+                raise TypeError('Key already defined as: %r' % self[key])
+            self._member_names.append(key)
+        super(_EnumDict, self).__setitem__(key, value)
+
+
+# Dummy value for Enum as EnumMeta explicity checks for it, but of course until
+# EnumMeta finishes running the first time the Enum class doesn't exist.  This
+# is also why there are checks in EnumMeta like `if Enum is not None`
+Enum = None
+
+
+class EnumMeta(type):
+    """Metaclass for Enum"""
+    @classmethod
+    def __prepare__(metacls, cls, bases):
+        return _EnumDict()
+
+    def __new__(metacls, cls, bases, classdict):
+        # an Enum class is final once enumeration items have been defined; it
+        # cannot be mixed with other types (int, float, etc.) if it has an
+        # inherited __new__ unless a new __new__ is defined (or the resulting
+        # class will fail).
+        if type(classdict) is dict:
+            original_dict = classdict
+            classdict = _EnumDict()
+            for k, v in original_dict.items():
+                classdict[k] = v
+
+        member_type, first_enum = metacls._get_mixins_(bases)
+        __new__, save_new, use_args = metacls._find_new_(classdict, member_type,
+                                                        first_enum)
+        # save enum items into separate mapping so they don't get baked into
+        # the new class
+        members = dict((k, classdict[k]) for k in classdict._member_names)
+        for name in classdict._member_names:
+            del classdict[name]
+
+        # py2 support for definition order
+        _order_ = classdict.get('_order_')
+        if _order_ is None:
+            if pyver < 3.0:
+                try:
+                    _order_ = [name for (name, value) in sorted(members.items(), key=lambda item: item[1])]
+                except TypeError:
+                    _order_ = [name for name in sorted(members.keys())]
+            else:
+                _order_ = classdict._member_names
+        else:
+            del classdict['_order_']
+            if pyver < 3.0:
+                _order_ = _order_.replace(',', ' ').split()
+                aliases = [name for name in members if name not in _order_]
+                _order_ += aliases
+
+        # check for illegal enum names (any others?)
+        invalid_names = set(members) & set(['mro'])
+        if invalid_names:
+            raise ValueError('Invalid enum member name(s): %s' % (
+                ', '.join(invalid_names), ))
+
+        # save attributes from super classes so we know if we can take
+        # the shortcut of storing members in the class dict
+        base_attributes = set([a for b in bases for a in b.__dict__])
+        # create our new Enum type
+        enum_class = super(EnumMeta, metacls).__new__(metacls, cls, bases, classdict)
+        enum_class._member_names_ = []               # names in random order
+        if OrderedDict is not None:
+            enum_class._member_map_ = OrderedDict()
+        else:
+            enum_class._member_map_ = {}             # name->value map
+        enum_class._member_type_ = member_type
+
+        # Reverse value->name map for hashable values.
+        enum_class._value2member_map_ = {}
+
+        # instantiate them, checking for duplicates as we go
+        # we instantiate first instead of checking for duplicates first in case
+        # a custom __new__ is doing something funky with the values -- such as
+        # auto-numbering ;)
+        if __new__ is None:
+            __new__ = enum_class.__new__
+        for member_name in _order_:
+            value = members[member_name]
+            if not isinstance(value, tuple):
+                args = (value, )
+            else:
+                args = value
+            if member_type is tuple:   # special case for tuple enums
+                args = (args, )     # wrap it one more time
+            if not use_args or not args:
+                enum_member = __new__(enum_class)
+                if not hasattr(enum_member, '_value_'):
+                    enum_member._value_ = value
+            else:
+                enum_member = __new__(enum_class, *args)
+                if not hasattr(enum_member, '_value_'):
+                    enum_member._value_ = member_type(*args)
+            value = enum_member._value_
+            enum_member._name_ = member_name
+            enum_member.__objclass__ = enum_class
+            enum_member.__init__(*args)
+            # If another member with the same value was already defined, the
+            # new member becomes an alias to the existing one.
+            for name, canonical_member in enum_class._member_map_.items():
+                if canonical_member.value == enum_member._value_:
+                    enum_member = canonical_member
+                    break
+            else:
+                # Aliases don't appear in member names (only in __members__).
+                enum_class._member_names_.append(member_name)
+            # performance boost for any member that would not shadow
+            # a DynamicClassAttribute (aka _RouteClassAttributeToGetattr)
+            if member_name not in base_attributes:
+                setattr(enum_class, member_name, enum_member)
+            # now add to _member_map_
+            enum_class._member_map_[member_name] = enum_member
+            try:
+                # This may fail if value is not hashable. We can't add the value
+                # to the map, and by-value lookups for this value will be
+                # linear.
+                enum_class._value2member_map_[value] = enum_member
+            except TypeError:
+                pass
+
+
+        # If a custom type is mixed into the Enum, and it does not know how
+        # to pickle itself, pickle.dumps will succeed but pickle.loads will
+        # fail.  Rather than have the error show up later and possibly far
+        # from the source, sabotage the pickle protocol for this class so
+        # that pickle.dumps also fails.
+        #
+        # However, if the new class implements its own __reduce_ex__, do not
+        # sabotage -- it's on them to make sure it works correctly.  We use
+        # __reduce_ex__ instead of any of the others as it is preferred by
+        # pickle over __reduce__, and it handles all pickle protocols.
+        unpicklable = False
+        if '__reduce_ex__' not in classdict:
+            if member_type is not object:
+                methods = ('__getnewargs_ex__', '__getnewargs__',
+                        '__reduce_ex__', '__reduce__')
+                if not any(m in member_type.__dict__ for m in methods):
+                    _make_class_unpicklable(enum_class)
+                    unpicklable = True
+
+
+        # double check that repr and friends are not the mixin's or various
+        # things break (such as pickle)
+        for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'):
+            class_method = getattr(enum_class, name)
+            obj_method = getattr(member_type, name, None)
+            enum_method = getattr(first_enum, name, None)
+            if name not in classdict and class_method is not enum_method:
+                if name == '__reduce_ex__' and unpicklable:
+                    continue
+                setattr(enum_class, name, enum_method)
+
+        # method resolution and int's are not playing nice
+        # Python's less than 2.6 use __cmp__
+
+        if pyver < 2.6:
+
+            if issubclass(enum_class, int):
+                setattr(enum_class, '__cmp__', getattr(int, '__cmp__'))
+
+        elif pyver < 3.0:
+
+            if issubclass(enum_class, int):
+                for method in (
+                        '__le__',
+                        '__lt__',
+                        '__gt__',
+                        '__ge__',
+                        '__eq__',
+                        '__ne__',
+                        '__hash__',
+                        ):
+                    setattr(enum_class, method, getattr(int, method))
+
+        # replace any other __new__ with our own (as long as Enum is not None,
+        # anyway) -- again, this is to support pickle
+        if Enum is not None:
+            # if the user defined their own __new__, save it before it gets
+            # clobbered in case they subclass later
+            if save_new:
+                setattr(enum_class, '__member_new__', enum_class.__dict__['__new__'])
+            setattr(enum_class, '__new__', Enum.__dict__['__new__'])
+        return enum_class
+
+    def __bool__(cls):
+        """
+        classes/types should always be True.
+        """
+        return True
+
+    def __call__(cls, value, names=None, module=None, type=None, start=1):
+        """Either returns an existing member, or creates a new enum class.
+
+        This method is used both when an enum class is given a value to match
+        to an enumeration member (i.e. Color(3)) and for the functional API
+        (i.e. Color = Enum('Color', names='red green blue')).
+
+        When used for the functional API: `module`, if set, will be stored in
+        the new class' __module__ attribute; `type`, if set, will be mixed in
+        as the first base class.
+
+        Note: if `module` is not set this routine will attempt to discover the
+        calling module by walking the frame stack; if this is unsuccessful
+        the resulting class will not be pickleable.
+
+        """
+        if names is None:  # simple value lookup
+            return cls.__new__(cls, value)
+        # otherwise, functional API: we're creating a new Enum type
+        return cls._create_(value, names, module=module, type=type, start=start)
+
+    def __contains__(cls, member):
+        return isinstance(member, cls) and member.name in cls._member_map_
+
+    def __delattr__(cls, attr):
+        # nicer error message when someone tries to delete an attribute
+        # (see issue19025).
+        if attr in cls._member_map_:
+            raise AttributeError(
+                    "%s: cannot delete Enum member." % cls.__name__)
+        super(EnumMeta, cls).__delattr__(attr)
+
+    def __dir__(self):
+        return (['__class__', '__doc__', '__members__', '__module__'] +
+                self._member_names_)
+
+    @property
+    def __members__(cls):
+        """Returns a mapping of member name->value.
+
+        This mapping lists all enum members, including aliases. Note that this
+        is a copy of the internal mapping.
+
+        """
+        return cls._member_map_.copy()
+
+    def __getattr__(cls, name):
+        """Return the enum member matching `name`
+
+        We use __getattr__ instead of descriptors or inserting into the enum
+        class' __dict__ in order to support `name` and `value` being both
+        properties for enum members (which live in the class' __dict__) and
+        enum members themselves.
+
+        """
+        if _is_dunder(name):
+            raise AttributeError(name)
+        try:
+            return cls._member_map_[name]
+        except KeyError:
+            raise AttributeError(name)
+
+    def __getitem__(cls, name):
+        return cls._member_map_[name]
+
+    def __iter__(cls):
+        return (cls._member_map_[name] for name in cls._member_names_)
+
+    def __reversed__(cls):
+        return (cls._member_map_[name] for name in reversed(cls._member_names_))
+
+    def __len__(cls):
+        return len(cls._member_names_)
+
+    __nonzero__ = __bool__
+
+    def __repr__(cls):
+        return "<enum %r>" % cls.__name__
+
+    def __setattr__(cls, name, value):
+        """Block attempts to reassign Enum members.
+
+        A simple assignment to the class namespace only changes one of the
+        several possible ways to get an Enum member from the Enum class,
+        resulting in an inconsistent Enumeration.
+
+        """
+        member_map = cls.__dict__.get('_member_map_', {})
+        if name in member_map:
+            raise AttributeError('Cannot reassign members.')
+        super(EnumMeta, cls).__setattr__(name, value)
+
+    def _create_(cls, class_name, names=None, module=None, type=None, start=1):
+        """Convenience method to create a new Enum class.
+
+        `names` can be:
+
+        * A string containing member names, separated either with spaces or
+          commas.  Values are auto-numbered from 1.
+        * An iterable of member names.  Values are auto-numbered from 1.
+        * An iterable of (member name, value) pairs.
+        * A mapping of member name -> value.
+
+        """
+        if pyver < 3.0:
+            # if class_name is unicode, attempt a conversion to ASCII
+            if isinstance(class_name, unicode):
+                try:
+                    class_name = class_name.encode('ascii')
+                except UnicodeEncodeError:
+                    raise TypeError('%r is not representable in ASCII' % class_name)
+        metacls = cls.__class__
+        if type is None:
+            bases = (cls, )
+        else:
+            bases = (type, cls)
+        classdict = metacls.__prepare__(class_name, bases)
+        _order_ = []
+
+        # special processing needed for names?
+        if isinstance(names, basestring):
+            names = names.replace(',', ' ').split()
+        if isinstance(names, (tuple, list)) and isinstance(names[0], basestring):
+            names = [(e, i+start) for (i, e) in enumerate(names)]
+
+        # Here, names is either an iterable of (name, value) or a mapping.
+        item = None  # in case names is empty
+        for item in names:
+            if isinstance(item, basestring):
+                member_name, member_value = item, names[item]
+            else:
+                member_name, member_value = item
+            classdict[member_name] = member_value
+            _order_.append(member_name)
+        # only set _order_ in classdict if name/value was not from a mapping
+        if not isinstance(item, basestring):
+            classdict['_order_'] = ' '.join(_order_)
+        enum_class = metacls.__new__(metacls, class_name, bases, classdict)
+
+        # TODO: replace the frame hack if a blessed way to know the calling
+        # module is ever developed
+        if module is None:
+            try:
+                module = _sys._getframe(2).f_globals['__name__']
+            except (AttributeError, ValueError):
+                pass
+        if module is None:
+            _make_class_unpicklable(enum_class)
+        else:
+            enum_class.__module__ = module
+
+        return enum_class
+
+    @staticmethod
+    def _get_mixins_(bases):
+        """Returns the type for creating enum members, and the first inherited
+        enum class.
+
+        bases: the tuple of bases that was given to __new__
+
+        """
+        if not bases or Enum is None:
+            return object, Enum
+
+
+        # double check that we are not subclassing a class with existing
+        # enumeration members; while we're at it, see if any other data
+        # type has been mixed in so we can use the correct __new__
+        member_type = first_enum = None
+        for base in bases:
+            if  (base is not Enum and
+                    issubclass(base, Enum) and
+                    base._member_names_):
+                raise TypeError("Cannot extend enumerations")
+        # base is now the last base in bases
+        if not issubclass(base, Enum):
+            raise TypeError("new enumerations must be created as "
+                    "`ClassName([mixin_type,] enum_type)`")
+
+        # get correct mix-in type (either mix-in type of Enum subclass, or
+        # first base if last base is Enum)
+        if not issubclass(bases[0], Enum):
+            member_type = bases[0]     # first data type
+            first_enum = bases[-1]  # enum type
+        else:
+            for base in bases[0].__mro__:
+                # most common: (IntEnum, int, Enum, object)
+                # possible:    (<Enum 'AutoIntEnum'>, <Enum 'IntEnum'>,
+                #               <class 'int'>, <Enum 'Enum'>,
+                #               <class 'object'>)
+                if issubclass(base, Enum):
+                    if first_enum is None:
+                        first_enum = base
+                else:
+                    if member_type is None:
+                        member_type = base
+
+        return member_type, first_enum
+
+    if pyver < 3.0:
+        @staticmethod
+        def _find_new_(classdict, member_type, first_enum):
+            """Returns the __new__ to be used for creating the enum members.
+
+            classdict: the class dictionary given to __new__
+            member_type: the data type whose __new__ will be used by default
+            first_enum: enumeration to check for an overriding __new__
+
+            """
+            # now find the correct __new__, checking to see of one was defined
+            # by the user; also check earlier enum classes in case a __new__ was
+            # saved as __member_new__
+            __new__ = classdict.get('__new__', None)
+            if __new__:
+                return None, True, True      # __new__, save_new, use_args
+
+            N__new__ = getattr(None, '__new__')
+            O__new__ = getattr(object, '__new__')
+            if Enum is None:
+                E__new__ = N__new__
+            else:
+                E__new__ = Enum.__dict__['__new__']
+            # check all possibles for __member_new__ before falling back to
+            # __new__
+            for method in ('__member_new__', '__new__'):
+                for possible in (member_type, first_enum):
+                    try:
+                        target = possible.__dict__[method]
+                    except (AttributeError, KeyError):
+                        target = getattr(possible, method, None)
+                    if target not in [
+                            None,
+                            N__new__,
+                            O__new__,
+                            E__new__,
+                            ]:
+                        if method == '__member_new__':
+                            classdict['__new__'] = target
+                            return None, False, True
+                        if isinstance(target, staticmethod):
+                            target = target.__get__(member_type)
+                        __new__ = target
+                        break
+                if __new__ is not None:
+                    break
+            else:
+                __new__ = object.__new__
+
+            # if a non-object.__new__ is used then whatever value/tuple was
+            # assigned to the enum member name will be passed to __new__ and to the
+            # new enum member's __init__
+            if __new__ is object.__new__:
+                use_args = False
+            else:
+                use_args = True
+
+            return __new__, False, use_args
+    else:
+        @staticmethod
+        def _find_new_(classdict, member_type, first_enum):
+            """Returns the __new__ to be used for creating the enum members.
+
+            classdict: the class dictionary given to __new__
+            member_type: the data type whose __new__ will be used by default
+            first_enum: enumeration to check for an overriding __new__
+
+            """
+            # now find the correct __new__, checking to see of one was defined
+            # by the user; also check earlier enum classes in case a __new__ was
+            # saved as __member_new__
+            __new__ = classdict.get('__new__', None)
+
+            # should __new__ be saved as __member_new__ later?
+            save_new = __new__ is not None
+
+            if __new__ is None:
+                # check all possibles for __member_new__ before falling back to
+                # __new__
+                for method in ('__member_new__', '__new__'):
+                    for possible in (member_type, first_enum):
+                        target = getattr(possible, method, None)
+                        if target not in (
+                                None,
+                                None.__new__,
+                                object.__new__,
+                                Enum.__new__,
+                                ):
+                            __new__ = target
+                            break
+                    if __new__ is not None:
+                        break
+                else:
+                    __new__ = object.__new__
+
+            # if a non-object.__new__ is used then whatever value/tuple was
+            # assigned to the enum member name will be passed to __new__ and to the
+            # new enum member's __init__
+            if __new__ is object.__new__:
+                use_args = False
+            else:
+                use_args = True
+
+            return __new__, save_new, use_args
+
+
+########################################################
+# In order to support Python 2 and 3 with a single
+# codebase we have to create the Enum methods separately
+# and then use the `type(name, bases, dict)` method to
+# create the class.
+########################################################
+temp_enum_dict = {}
+temp_enum_dict['__doc__'] = "Generic enumeration.\n\n    Derive from this class to define new enumerations.\n\n"
+
+def __new__(cls, value):
+    # all enum instances are actually created during class construction
+    # without calling this method; this method is called by the metaclass'
+    # __call__ (i.e. Color(3) ), and by pickle
+    if type(value) is cls:
+        # For lookups like Color(Color.red)
+        value = value.value
+        #return value
+    # by-value search for a matching enum member
+    # see if it's in the reverse mapping (for hashable values)
+    try:
+        if value in cls._value2member_map_:
+            return cls._value2member_map_[value]
+    except TypeError:
+        # not there, now do long search -- O(n) behavior
+        for member in cls._member_map_.values():
+            if member.value == value:
+                return member
+    raise ValueError("%s is not a valid %s" % (value, cls.__name__))
+temp_enum_dict['__new__'] = __new__
+del __new__
+
+def __repr__(self):
+    return "<%s.%s: %r>" % (
+            self.__class__.__name__, self._name_, self._value_)
+temp_enum_dict['__repr__'] = __repr__
+del __repr__
+
+def __str__(self):
+    return "%s.%s" % (self.__class__.__name__, self._name_)
+temp_enum_dict['__str__'] = __str__
+del __str__
+
+if pyver >= 3.0:
+    def __dir__(self):
+        added_behavior = [
+                m
+                for cls in self.__class__.mro()
+                for m in cls.__dict__
+                if m[0] != '_' and m not in self._member_map_
+                ]
+        return (['__class__', '__doc__', '__module__', ] + added_behavior)
+    temp_enum_dict['__dir__'] = __dir__
+    del __dir__
+
+def __format__(self, format_spec):
+    # mixed-in Enums should use the mixed-in type's __format__, otherwise
+    # we can get strange results with the Enum name showing up instead of
+    # the value
+
+    # pure Enum branch
+    if self._member_type_ is object:
+        cls = str
+        val = str(self)
+    # mix-in branch
+    else:
+        cls = self._member_type_
+        val = self.value
+    return cls.__format__(val, format_spec)
+temp_enum_dict['__format__'] = __format__
+del __format__
+
+
+####################################
+# Python's less than 2.6 use __cmp__
+
+if pyver < 2.6:
+
+    def __cmp__(self, other):
+        if type(other) is self.__class__:
+            if self is other:
+                return 0
+            return -1
+        return NotImplemented
+        raise TypeError("unorderable types: %s() and %s()" % (self.__class__.__name__, other.__class__.__name__))
+    temp_enum_dict['__cmp__'] = __cmp__
+    del __cmp__
+
+else:
+
+    def __le__(self, other):
+        raise TypeError("unorderable types: %s() <= %s()" % (self.__class__.__name__, other.__class__.__name__))
+    temp_enum_dict['__le__'] = __le__
+    del __le__
+
+    def __lt__(self, other):
+        raise TypeError("unorderable types: %s() < %s()" % (self.__class__.__name__, other.__class__.__name__))
+    temp_enum_dict['__lt__'] = __lt__
+    del __lt__
+
+    def __ge__(self, other):
+        raise TypeError("unorderable types: %s() >= %s()" % (self.__class__.__name__, other.__class__.__name__))
+    temp_enum_dict['__ge__'] = __ge__
+    del __ge__
+
+    def __gt__(self, other):
+        raise TypeError("unorderable types: %s() > %s()" % (self.__class__.__name__, other.__class__.__name__))
+    temp_enum_dict['__gt__'] = __gt__
+    del __gt__
+
+
+def __eq__(self, other):
+    if type(other) is self.__class__:
+        return self is other
+    return NotImplemented
+temp_enum_dict['__eq__'] = __eq__
+del __eq__
+
+def __ne__(self, other):
+    if type(other) is self.__class__:
+        return self is not other
+    return NotImplemented
+temp_enum_dict['__ne__'] = __ne__
+del __ne__
+
+def __hash__(self):
+    return hash(self._name_)
+temp_enum_dict['__hash__'] = __hash__
+del __hash__
+
+def __reduce_ex__(self, proto):
+    return self.__class__, (self._value_, )
+temp_enum_dict['__reduce_ex__'] = __reduce_ex__
+del __reduce_ex__
+
+# _RouteClassAttributeToGetattr is used to provide access to the `name`
+# and `value` properties of enum members while keeping some measure of
+# protection from modification, while still allowing for an enumeration
+# to have members named `name` and `value`.  This works because enumeration
+# members are not set directly on the enum class -- __getattr__ is
+# used to look them up.
+
+@_RouteClassAttributeToGetattr
+def name(self):
+    return self._name_
+temp_enum_dict['name'] = name
+del name
+
+@_RouteClassAttributeToGetattr
+def value(self):
+    return self._value_
+temp_enum_dict['value'] = value
+del value
+
+@classmethod
+def _convert(cls, name, module, filter, source=None):
+    """
+    Create a new Enum subclass that replaces a collection of global constants
+    """
+    # convert all constants from source (or module) that pass filter() to
+    # a new Enum called name, and export the enum and its members back to
+    # module;
+    # also, replace the __reduce_ex__ method so unpickling works in
+    # previous Python versions
+    module_globals = vars(_sys.modules[module])
+    if source:
+        source = vars(source)
+    else:
+        source = module_globals
+    members = dict((name, value) for name, value in source.items() if filter(name))
+    cls = cls(name, members, module=module)
+    cls.__reduce_ex__ = _reduce_ex_by_name
+    module_globals.update(cls.__members__)
+    module_globals[name] = cls
+    return cls
+temp_enum_dict['_convert'] = _convert
+del _convert
+
+Enum = EnumMeta('Enum', (object, ), temp_enum_dict)
+del temp_enum_dict
+
+# Enum has now been created
+###########################
+
+class IntEnum(int, Enum):
+    """Enum where members are also (and must be) ints"""
+
+def _reduce_ex_by_name(self, proto):
+    return self.name
+
+def unique(enumeration):
+    """Class decorator that ensures only unique members exist in an enumeration."""
+    duplicates = []
+    for name, member in enumeration.__members__.items():
+        if name != member.name:
+            duplicates.append((name, member.name))
+    if duplicates:
+        duplicate_names = ', '.join(
+                ["%s -> %s" % (alias, name) for (alias, name) in duplicates]
+                )
+        raise ValueError('duplicate names found in %r: %s' %
+                (enumeration, duplicate_names)
+                )
+    return enumeration
diff --git a/tools/third_party/enum/enum/doc/enum.pdf b/tools/third_party/enum/enum/doc/enum.pdf
new file mode 100644
index 0000000..8c1383a
--- /dev/null
+++ b/tools/third_party/enum/enum/doc/enum.pdf
@@ -0,0 +1,2237 @@
+%PDF-1.4

+%“Œ‹ž ReportLab Generated PDF document http://www.reportlab.com

+1 0 obj

+<< /F1 2 0 R /F2 3 0 R /F3 4 0 R /F4 5 0 R /F5 8 0 R /F6 15 0 R >>

+endobj

+2 0 obj

+<< /BaseFont /Helvetica /Encoding /WinAnsiEncoding /Name /F1 /Subtype /Type1 /Type /Font >>

+endobj

+3 0 obj

+<< /BaseFont /Courier-Bold /Encoding /WinAnsiEncoding /Name /F2 /Subtype /Type1 /Type /Font >>

+endobj

+4 0 obj

+<< /BaseFont /Helvetica-Bold /Encoding /WinAnsiEncoding /Name /F3 /Subtype /Type1 /Type /Font >>

+endobj

+5 0 obj

+<< /BaseFont /Courier /Encoding /WinAnsiEncoding /Name /F4 /Subtype /Type1 /Type /Font >>

+endobj

+6 0 obj

+<< /Border [ 0 0 0 ] /Contents () /Dest [ 22 0 R /XYZ 62.69291 639.3236 0 ] /Rect [ 335.1805 574.4272 405.2473 586.4272 ] /Subtype /Link /Type /Annot >>

+endobj

+7 0 obj

+<< /Border [ 0 0 0 ] /Contents () /Dest [ 22 0 R /XYZ 62.69291 639.3236 0 ] /Rect [ 255.9742 427.4272 321.5378 439.4272 ] /Subtype /Link /Type /Annot >>

+endobj

+8 0 obj

+<< /BaseFont /Helvetica-Oblique /Encoding /WinAnsiEncoding /Name /F5 /Subtype /Type1 /Type /Font >>

+endobj

+9 0 obj

+<< /Border [ 0 0 0 ] /Contents () /Dest [ 26 0 R /XYZ 62.69291 278.1236 0 ] /Rect [ 82.69291 182.2272 201.0729 194.2272 ] /Subtype /Link /Type /Annot >>

+endobj

+10 0 obj

+<< /Annots [ 6 0 R 7 0 R 9 0 R ] /Contents 56 0 R /MediaBox [ 0 0 595.2756 841.8898 ] /Parent 55 0 R /Resources << /Font 1 0 R /ProcSet [ /PDF /Text /ImageB /ImageC /ImageI ] >> /Rotate 0 

+  /Trans <<  >> /Type /Page >>

+endobj

+11 0 obj

+<< /Contents 57 0 R /MediaBox [ 0 0 595.2756 841.8898 ] /Parent 55 0 R /Resources << /Font 1 0 R /ProcSet [ /PDF /Text /ImageB /ImageC /ImageI ] >> /Rotate 0 /Trans <<  >> 

+  /Type /Page >>

+endobj

+12 0 obj

+<< /Contents 58 0 R /MediaBox [ 0 0 595.2756 841.8898 ] /Parent 55 0 R /Resources << /Font 1 0 R /ProcSet [ /PDF /Text /ImageB /ImageC /ImageI ] >> /Rotate 0 /Trans <<  >> 

+  /Type /Page >>

+endobj

+13 0 obj

+<< /Contents 59 0 R /MediaBox [ 0 0 595.2756 841.8898 ] /Parent 55 0 R /Resources << /Font 1 0 R /ProcSet [ /PDF /Text /ImageB /ImageC /ImageI ] >> /Rotate 0 /Trans <<  >> 

+  /Type /Page >>

+endobj

+14 0 obj

+<< /Border [ 0 0 0 ] /Contents () /Dest [ 22 0 R /XYZ 62.69291 157.2236 0 ] /Rect [ 101.6029 741.7736 141.6229 753.7736 ] /Subtype /Link /Type /Annot >>

+endobj

+15 0 obj

+<< /BaseFont /Helvetica-BoldOblique /Encoding /WinAnsiEncoding /Name /F6 /Subtype /Type1 /Type /Font >>

+endobj

+16 0 obj

+<< /Border [ 0 0 0 ] /Contents () /Dest [ 25 0 R /XYZ 62.69291 359.6236 0 ] /Rect [ 327.1529 585.5736 392.7329 597.5736 ] /Subtype /Link /Type /Annot >>

+endobj

+17 0 obj

+<< /Border [ 0 0 0 ] /Contents () /Dest [ 22 0 R /XYZ 62.69291 639.3236 0 ] /Rect [ 181.1917 312.1736 246.4634 324.1736 ] /Subtype /Link /Type /Annot >>

+endobj

+18 0 obj

+<< /Annots [ 14 0 R 16 0 R 17 0 R ] /Contents 60 0 R /MediaBox [ 0 0 595.2756 841.8898 ] /Parent 55 0 R /Resources << /Font 1 0 R /ProcSet [ /PDF /Text /ImageB /ImageC /ImageI ] >> /Rotate 0 

+  /Trans <<  >> /Type /Page >>

+endobj

+19 0 obj

+<< /Border [ 0 0 0 ] /Contents () /Dest [ 26 0 R /XYZ 62.69291 610.8236 0 ] /Rect [ 326.1329 511.3736 357.2629 523.3736 ] /Subtype /Link /Type /Annot >>

+endobj

+20 0 obj

+<< /Border [ 0 0 0 ] /Contents () /Dest [ 25 0 R /XYZ 62.69291 359.6236 0 ] /Rect [ 241.1229 185.9736 306.7029 197.9736 ] /Subtype /Link /Type /Annot >>

+endobj

+21 0 obj

+<< /Annots [ 19 0 R 20 0 R ] /Contents 61 0 R /MediaBox [ 0 0 595.2756 841.8898 ] /Parent 55 0 R /Resources << /Font 1 0 R /ProcSet [ /PDF /Text /ImageB /ImageC /ImageI ] >> /Rotate 0 

+  /Trans <<  >> /Type /Page >>

+endobj

+22 0 obj

+<< /Contents 62 0 R /MediaBox [ 0 0 595.2756 841.8898 ] /Parent 55 0 R /Resources << /Font 1 0 R /ProcSet [ /PDF /Text /ImageB /ImageC /ImageI ] >> /Rotate 0 /Trans <<  >> 

+  /Type /Page >>

+endobj

+23 0 obj

+<< /Contents 63 0 R /MediaBox [ 0 0 595.2756 841.8898 ] /Parent 55 0 R /Resources << /Font 1 0 R /ProcSet [ /PDF /Text /ImageB /ImageC /ImageI ] >> /Rotate 0 /Trans <<  >> 

+  /Type /Page >>

+endobj

+24 0 obj

+<< /Contents 64 0 R /MediaBox [ 0 0 595.2756 841.8898 ] /Parent 55 0 R /Resources << /Font 1 0 R /ProcSet [ /PDF /Text /ImageB /ImageC /ImageI ] >> /Rotate 0 /Trans <<  >> 

+  /Type /Page >>

+endobj

+25 0 obj

+<< /Contents 65 0 R /MediaBox [ 0 0 595.2756 841.8898 ] /Parent 55 0 R /Resources << /Font 1 0 R /ProcSet [ /PDF /Text /ImageB /ImageC /ImageI ] >> /Rotate 0 /Trans <<  >> 

+  /Type /Page >>

+endobj

+26 0 obj

+<< /Contents 66 0 R /MediaBox [ 0 0 595.2756 841.8898 ] /Parent 55 0 R /Resources << /Font 1 0 R /ProcSet [ /PDF /Text /ImageB /ImageC /ImageI ] >> /Rotate 0 /Trans <<  >> 

+  /Type /Page >>

+endobj

+27 0 obj

+<< /Border [ 0 0 0 ] /Contents () /Dest [ 26 0 R /XYZ 62.69291 610.8236 0 ] /Rect [ 309.4094 299.5736 340.6461 311.5736 ] /Subtype /Link /Type /Annot >>

+endobj

+28 0 obj

+<< /Annots [ 27 0 R ] /Contents 67 0 R /MediaBox [ 0 0 595.2756 841.8898 ] /Parent 55 0 R /Resources << /Font 1 0 R /ProcSet [ /PDF /Text /ImageB /ImageC /ImageI ] >> /Rotate 0 

+  /Trans <<  >> /Type /Page >>

+endobj

+29 0 obj

+<< /Outlines 31 0 R /PageLabels 68 0 R /PageMode /UseNone /Pages 55 0 R /Type /Catalog >>

+endobj

+30 0 obj

+<< /Author () /CreationDate (D:20160515202831+08'00') /Creator (\(unspecified\)) /Keywords () /Producer (ReportLab PDF Library - www.reportlab.com) /Subject (\(unspecified\)) 

+  /Title (enum --- support for enumerations) >>

+endobj

+31 0 obj

+<< /Count 27 /First 32 0 R /Last 51 0 R /Type /Outlines >>

+endobj

+32 0 obj

+<< /Dest [ 10 0 R /XYZ 62.69291 660.6772 0 ] /Next 33 0 R /Parent 31 0 R /Title (Module Contents) >>

+endobj

+33 0 obj

+<< /Dest [ 10 0 R /XYZ 62.69291 477.6772 0 ] /Next 34 0 R /Parent 31 0 R /Prev 32 0 R /Title (Creating an Enum) >>

+endobj

+34 0 obj

+<< /Dest [ 11 0 R /XYZ 62.69291 197.0236 0 ] /Next 35 0 R /Parent 31 0 R /Prev 33 0 R /Title (Programmatic access to enumeration members and their attributes) >>

+endobj

+35 0 obj

+<< /Dest [ 12 0 R /XYZ 62.69291 501.4236 0 ] /Next 36 0 R /Parent 31 0 R /Prev 34 0 R /Title (Duplicating enum members and values) >>

+endobj

+36 0 obj

+<< /Dest [ 13 0 R /XYZ 62.69291 215.0236 0 ] /Next 37 0 R /Parent 31 0 R /Prev 35 0 R /Title (Comparisons) >>

+endobj

+37 0 obj

+<< /Dest [ 18 0 R /XYZ 62.69291 362.4236 0 ] /Next 38 0 R /Parent 31 0 R /Prev 36 0 R /Title (Allowed members and attributes of enumerations) >>

+endobj

+38 0 obj

+<< /Dest [ 21 0 R /XYZ 62.69291 498.6236 0 ] /Next 39 0 R /Parent 31 0 R /Prev 37 0 R /Title (Restricted subclassing of enumerations) >>

+endobj

+39 0 obj

+<< /Dest [ 21 0 R /XYZ 62.69291 173.2236 0 ] /Next 40 0 R /Parent 31 0 R /Prev 38 0 R /Title (Pickling) >>

+endobj

+40 0 obj

+<< /Dest [ 22 0 R /XYZ 62.69291 635.8236 0 ] /Next 41 0 R /Parent 31 0 R /Prev 39 0 R /Title (Functional API) >>

+endobj

+41 0 obj

+<< /Count 2 /Dest [ 22 0 R /XYZ 62.69291 187.2236 0 ] /First 42 0 R /Last 43 0 R /Next 44 0 R /Parent 31 0 R 

+  /Prev 40 0 R /Title (Derived Enumerations) >>

+endobj

+42 0 obj

+<< /Dest [ 22 0 R /XYZ 62.69291 154.2236 0 ] /Next 43 0 R /Parent 41 0 R /Title (IntEnum) >>

+endobj

+43 0 obj

+<< /Dest [ 23 0 R /XYZ 62.69291 217.4236 0 ] /Parent 41 0 R /Prev 42 0 R /Title (Others) >>

+endobj

+44 0 obj

+<< /Count 1 /Dest [ 24 0 R /XYZ 62.69291 567.0236 0 ] /First 45 0 R /Last 45 0 R /Next 46 0 R /Parent 31 0 R 

+  /Prev 41 0 R /Title (Decorators) >>

+endobj

+45 0 obj

+<< /Dest [ 24 0 R /XYZ 62.69291 534.0236 0 ] /Parent 44 0 R /Title (unique) >>

+endobj

+46 0 obj

+<< /Count 4 /Dest [ 24 0 R /XYZ 62.69291 356.8236 0 ] /First 47 0 R /Last 50 0 R /Next 51 0 R /Parent 31 0 R 

+  /Prev 44 0 R /Title (Interesting examples) >>

+endobj

+47 0 obj

+<< /Dest [ 24 0 R /XYZ 62.69291 281.8236 0 ] /Next 48 0 R /Parent 46 0 R /Title (AutoNumber) >>

+endobj

+48 0 obj

+<< /Dest [ 25 0 R /XYZ 62.69291 641.8236 0 ] /Next 49 0 R /Parent 46 0 R /Prev 47 0 R /Title (UniqueEnum) >>

+endobj

+49 0 obj

+<< /Dest [ 25 0 R /XYZ 62.69291 356.6236 0 ] /Next 50 0 R /Parent 46 0 R /Prev 48 0 R /Title (OrderedEnum) >>

+endobj

+50 0 obj

+<< /Dest [ 26 0 R /XYZ 62.69291 607.8236 0 ] /Parent 46 0 R /Prev 49 0 R /Title (Planet) >>

+endobj

+51 0 obj

+<< /Count 3 /Dest [ 26 0 R /XYZ 62.69291 274.6236 0 ] /First 52 0 R /Last 54 0 R /Parent 31 0 R /Prev 46 0 R 

+  /Title (How are Enums different?) >>

+endobj

+52 0 obj

+<< /Dest [ 26 0 R /XYZ 62.69291 211.6236 0 ] /Next 53 0 R /Parent 51 0 R /Title (Enum Classes) >>

+endobj

+53 0 obj

+<< /Dest [ 28 0 R /XYZ 62.69291 664.0236 0 ] /Next 54 0 R /Parent 51 0 R /Prev 52 0 R /Title (Enum Members \(aka instances\)) >>

+endobj

+54 0 obj

+<< /Dest [ 28 0 R /XYZ 62.69291 592.0236 0 ] /Parent 51 0 R /Prev 53 0 R /Title (Finer Points) >>

+endobj

+55 0 obj

+<< /Count 12 /Kids [ 10 0 R 11 0 R 12 0 R 13 0 R 18 0 R 21 0 R 22 0 R 23 0 R 24 0 R 25 0 R 

+  26 0 R 28 0 R ] /Type /Pages >>

+endobj

+56 0 obj

+<< /Length 6458 >>

+stream

+1 0 0 1 0 0 cm  BT /F1 12 Tf 14.4 TL ET

+q

+1 0 0 1 62.69291 741.0236 cm

+q

+BT 1 0 0 1 0 4 Tm 73.71488 0 Td 24 TL /F2 20 Tf 0 0 0 rg (enum ) Tj /F3 20 Tf 0 0 0 rg (--- support for enumerations) Tj T* -73.71488 0 Td ET

+Q

+Q

+q

+1 0 0 1 62.69291 702.6772 cm

+n 0 14.17323 m 469.8898 14.17323 l S

+Q

+q

+1 0 0 1 62.69291 672.6772 cm

+q

+BT 1 0 0 1 0 14 Tm 2.091318 Tw 12 TL /F1 10 Tf 0 0 0 rg (An enumeration is a set of symbolic names \(members\) bound to unique, constant values. Within an) Tj T* 0 Tw (enumeration, the members can be compared by identity, and the enumeration itself can be iterated over.) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 639.6772 cm

+q

+BT 1 0 0 1 0 3.5 Tm 21 TL /F3 17.5 Tf 0 0 0 rg (Module Contents) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 609.6772 cm

+q

+BT 1 0 0 1 0 14 Tm 2.027485 Tw 12 TL /F1 10 Tf 0 0 0 rg (This module defines two enumeration classes that can be used to define unique sets of names and) Tj T* 0 Tw (values: ) Tj /F4 10 Tf 0 0 0 rg (Enum ) Tj /F1 10 Tf 0 0 0 rg (and ) Tj /F4 10 Tf 0 0 0 rg (IntEnum) Tj /F1 10 Tf 0 0 0 rg (. It also defines one decorator, ) Tj /F4 10 Tf 0 0 0 rg (unique) Tj /F1 10 Tf 0 0 0 rg (.) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 591.6772 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 2 Tm /F4 10 Tf 12 TL (Enum) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 561.6772 cm

+q

+BT 1 0 0 1 0 14 Tm 1.128443 Tw 12 TL /F1 10 Tf 0 0 0 rg (Base class for creating enumerated constants. See section ) Tj 0 0 .501961 rg (Functional API ) Tj 0 0 0 rg (for an alternate construction) Tj T* 0 Tw (syntax.) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 543.6772 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 2 Tm /F4 10 Tf 12 TL (IntEnum) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 525.6772 cm

+q

+BT 1 0 0 1 0 2 Tm 12 TL /F1 10 Tf 0 0 0 rg (Base class for creating enumerated constants that are also subclasses of ) Tj /F4 10 Tf 0 0 0 rg (int) Tj /F1 10 Tf 0 0 0 rg (.) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 507.6772 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 2 Tm /F4 10 Tf 12 TL (unique) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 489.6772 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 2 Tm /F1 10 Tf 12 TL (Enum class decorator that ensures only one name is bound to any one value.) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 456.6772 cm

+q

+BT 1 0 0 1 0 3.5 Tm 21 TL /F3 17.5 Tf 0 0 0 rg (Creating an Enum) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 414.6772 cm

+q

+BT 1 0 0 1 0 26 Tm 2.432651 Tw 12 TL /F1 10 Tf 0 0 0 rg (Enumerations are created using the ) Tj /F4 10 Tf 0 0 0 rg (class ) Tj /F1 10 Tf 0 0 0 rg (syntax, which makes them easy to read and write. An) Tj T* 0 Tw .533555 Tw (alternative creation method is described in ) Tj 0 0 .501961 rg (Functional API) Tj 0 0 0 rg (. To define an enumeration, subclass ) Tj /F4 10 Tf 0 0 0 rg (Enum ) Tj /F1 10 Tf 0 0 0 rg (as) Tj T* 0 Tw (follows:) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 333.4772 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 72 re B*

+Q

+q

+BT 1 0 0 1 0 50 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( from enum import Enum) Tj T* (>) Tj (>) Tj (>) Tj ( class Color\(Enum\):) Tj T* (...     red = 1) Tj T* (...     green = 2) Tj T* (...     blue = 3) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 313.4772 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 2 Tm /F1 10 Tf 12 TL (Note: Nomenclature) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 307.4772 cm

+Q

+q

+1 0 0 1 62.69291 229.4772 cm

+0 0 0 rg

+BT /F1 10 Tf 12 TL ET

+BT 1 0 0 1 0 2 Tm  T* ET

+q

+1 0 0 1 20 72 cm

+Q

+q

+1 0 0 1 20 72 cm

+Q

+q

+1 0 0 1 20 60 cm

+0 0 0 rg

+BT /F1 10 Tf 12 TL ET

+q

+1 0 0 1 6 -3 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 2 Tm /F1 10 Tf 12 TL 10.5 0 Td (\177) Tj T* -10.5 0 Td ET

+Q

+Q

+q

+1 0 0 1 23 -3 cm

+q

+BT 1 0 0 1 0 2 Tm 12 TL /F1 10 Tf 0 0 0 rg (The class ) Tj /F4 10 Tf 0 0 0 rg (Color ) Tj /F1 10 Tf 0 0 0 rg (is an ) Tj /F5 10 Tf (enumeration ) Tj /F1 10 Tf (\(or ) Tj /F5 10 Tf (enum) Tj /F1 10 Tf (\)) Tj T* ET

+Q

+Q

+q

+Q

+Q

+q

+1 0 0 1 20 54 cm

+Q

+q

+1 0 0 1 20 30 cm

+0 0 0 rg

+BT /F1 10 Tf 12 TL ET

+q

+1 0 0 1 6 9 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 2 Tm /F1 10 Tf 12 TL 10.5 0 Td (\177) Tj T* -10.5 0 Td ET

+Q

+Q

+q

+1 0 0 1 23 -3 cm

+q

+BT 1 0 0 1 0 14 Tm 5.568863 Tw 12 TL /F1 10 Tf 0 0 0 rg (The attributes ) Tj /F4 10 Tf 0 0 0 rg (Color.red) Tj /F1 10 Tf 0 0 0 rg (, ) Tj /F4 10 Tf 0 0 0 rg (Color.green) Tj /F1 10 Tf 0 0 0 rg (, etc., are ) Tj /F5 10 Tf (enumeration members ) Tj /F1 10 Tf (\(or ) Tj /F5 10 Tf (enum) Tj T* 0 Tw (members) Tj /F1 10 Tf (\).) Tj T* ET

+Q

+Q

+q

+Q

+Q

+q

+1 0 0 1 20 24 cm

+Q

+q

+1 0 0 1 20 0 cm

+0 0 0 rg

+BT /F1 10 Tf 12 TL ET

+q

+1 0 0 1 6 9 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 2 Tm /F1 10 Tf 12 TL 10.5 0 Td (\177) Tj T* -10.5 0 Td ET

+Q

+Q

+q

+1 0 0 1 23 -3 cm

+q

+BT 1 0 0 1 0 14 Tm 1.471318 Tw 12 TL /F1 10 Tf 0 0 0 rg (The enum members have ) Tj /F5 10 Tf (names ) Tj /F1 10 Tf (and ) Tj /F5 10 Tf (values ) Tj /F1 10 Tf (\(the name of ) Tj /F4 10 Tf 0 0 0 rg (Color.red ) Tj /F1 10 Tf 0 0 0 rg (is ) Tj /F4 10 Tf 0 0 0 rg (red) Tj /F1 10 Tf 0 0 0 rg (, the value of) Tj T* 0 Tw /F4 10 Tf 0 0 0 rg (Color.blue ) Tj /F1 10 Tf 0 0 0 rg (is ) Tj /F4 10 Tf 0 0 0 rg (3) Tj /F1 10 Tf 0 0 0 rg (, etc.\)) Tj T* ET

+Q

+Q

+q

+Q

+Q

+q

+1 0 0 1 20 0 cm

+Q

+q

+Q

+Q

+q

+1 0 0 1 62.69291 229.4772 cm

+Q

+q

+1 0 0 1 62.69291 211.4772 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 2 Tm /F1 10 Tf 12 TL (Note:) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 205.4772 cm

+Q

+q

+1 0 0 1 62.69291 181.4772 cm

+0 0 0 rg

+BT /F1 10 Tf 12 TL ET

+BT 1 0 0 1 0 2 Tm  T* ET

+q

+1 0 0 1 20 0 cm

+q

+BT 1 0 0 1 0 14 Tm .126235 Tw 12 TL /F1 10 Tf 0 0 0 rg (Even though we use the ) Tj /F4 10 Tf 0 0 0 rg (class ) Tj /F1 10 Tf 0 0 0 rg (syntax to create Enums, Enums are not normal Python classes. See) Tj T* 0 Tw 0 0 .501961 rg (How are Enums different? ) Tj 0 0 0 rg (for more details.) Tj T* ET

+Q

+Q

+q

+Q

+Q

+q

+1 0 0 1 62.69291 181.4772 cm

+Q

+q

+1 0 0 1 62.69291 163.4772 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 2 Tm /F1 10 Tf 12 TL (Enumeration members have human readable string representations:) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 118.2772 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 36 re B*

+Q

+q

+BT 1 0 0 1 0 14 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( print\(Color.red\)) Tj T* (Color.red) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 98.27717 cm

+q

+BT 1 0 0 1 0 2 Tm 12 TL /F1 10 Tf 0 0 0 rg (...while their ) Tj /F4 10 Tf 0 0 0 rg (repr ) Tj /F1 10 Tf 0 0 0 rg (has more information:) Tj T* ET

+Q

+Q

+ 

+endstream

+endobj

+57 0 obj

+<< /Length 4174 >>

+stream

+1 0 0 1 0 0 cm  BT /F1 12 Tf 14.4 TL ET

+q

+1 0 0 1 62.69291 727.8236 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 36 re B*

+Q

+q

+BT 1 0 0 1 0 14 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( print\(repr\(Color.red\)\)) Tj T* (<) Tj (Color.red: 1) Tj (>) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 707.8236 cm

+q

+BT 1 0 0 1 0 2 Tm 12 TL /F1 10 Tf 0 0 0 rg (The ) Tj /F5 10 Tf (type ) Tj /F1 10 Tf (of an enumeration member is the enumeration it belongs to:) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 626.6236 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 72 re B*

+Q

+q

+BT 1 0 0 1 0 50 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( type\(Color.red\)) Tj T* (<) Tj (enum 'Color') Tj (>) Tj  T* (>) Tj (>) Tj (>) Tj ( isinstance\(Color.green, Color\)) Tj T* (True) Tj T* (>) Tj (>) Tj (>) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 606.6236 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 2 Tm /F1 10 Tf 12 TL (Enum members also have a property that contains just their item name:) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 561.4236 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 36 re B*

+Q

+q

+BT 1 0 0 1 0 14 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( print\(Color.red.name\)) Tj T* (red) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 529.4236 cm

+q

+BT 1 0 0 1 0 14 Tm .464985 Tw 12 TL /F1 10 Tf 0 0 0 rg (Enumerations support iteration. In Python 3.x definition order is used; in Python 2.x the definition order is) Tj T* 0 Tw (not available, but class attribute ) Tj /F4 10 Tf 0 0 0 rg (__order__ ) Tj /F1 10 Tf 0 0 0 rg (is supported; otherwise, value order is used:) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 340.2236 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 180 re B*

+Q

+q

+BT 1 0 0 1 0 158 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( class Shake\(Enum\):) Tj T* (...   __order__ = 'vanilla chocolate cookies mint'  # only needed in 2.x) Tj T* (...   vanilla = 7) Tj T* (...   chocolate = 4) Tj T* (...   cookies = 9) Tj T* (...   mint = 3) Tj T* (...) Tj T* (>) Tj (>) Tj (>) Tj ( for shake in Shake:) Tj T* (...   print\(shake\)) Tj T* (...) Tj T* (Shake.vanilla) Tj T* (Shake.chocolate) Tj T* (Shake.cookies) Tj T* (Shake.mint) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 308.2236 cm

+q

+BT 1 0 0 1 0 14 Tm 1.893735 Tw 12 TL /F1 10 Tf 0 0 0 rg (The ) Tj /F4 10 Tf 0 0 0 rg (__order__ ) Tj /F1 10 Tf 0 0 0 rg (attribute is always removed, and in 3.x it is also ignored \(order is definition order\);) Tj T* 0 Tw (however, in the stdlib version it will be ignored but not removed.) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 290.2236 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 2 Tm /F1 10 Tf 12 TL (Enumeration members are hashable, so they can be used in dictionaries and sets:) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 209.0236 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 72 re B*

+Q

+q

+BT 1 0 0 1 0 50 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( apples = {}) Tj T* (>) Tj (>) Tj (>) Tj ( apples[Color.red] = 'red delicious') Tj T* (>) Tj (>) Tj (>) Tj ( apples[Color.green] = 'granny smith') Tj T* (>) Tj (>) Tj (>) Tj ( apples == {Color.red: 'red delicious', Color.green: 'granny smith'}) Tj T* (True) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 155.0236 cm

+q

+BT 1 0 0 1 0 24.5 Tm 21 TL /F3 17.5 Tf 0 0 0 rg (Programmatic access to enumeration members and) Tj T* (their attributes) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 113.0236 cm

+q

+BT 1 0 0 1 0 26 Tm 3.541797 Tw 12 TL /F1 10 Tf 0 0 0 rg (Sometimes it's useful to access members in enumerations programmatically \(i.e. situations where) Tj T* 0 Tw .922651 Tw /F4 10 Tf 0 0 0 rg (Color.red ) Tj /F1 10 Tf 0 0 0 rg (won't do because the exact color is not known at program-writing time\). ) Tj /F4 10 Tf 0 0 0 rg (Enum ) Tj /F1 10 Tf 0 0 0 rg (allows such) Tj T* 0 Tw (access:) Tj T* ET

+Q

+Q

+ 

+endstream

+endobj

+58 0 obj

+<< /Length 3791 >>

+stream

+1 0 0 1 0 0 cm  BT /F1 12 Tf 14.4 TL ET

+q

+1 0 0 1 62.69291 703.8236 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 60 re B*

+Q

+q

+BT 1 0 0 1 0 38 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( Color\(1\)) Tj T* (<) Tj (Color.red: 1) Tj (>) Tj  T* (>) Tj (>) Tj (>) Tj ( Color\(3\)) Tj T* (<) Tj (Color.blue: 3) Tj (>) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 683.8236 cm

+q

+BT 1 0 0 1 0 2 Tm 12 TL /F1 10 Tf 0 0 0 rg (If you want to access enum members by ) Tj /F5 10 Tf (name) Tj /F1 10 Tf (, use item access:) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 614.6236 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 60 re B*

+Q

+q

+BT 1 0 0 1 0 38 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( Color['red']) Tj T* (<) Tj (Color.red: 1) Tj (>) Tj  T* (>) Tj (>) Tj (>) Tj ( Color['green']) Tj T* (<) Tj (Color.green: 2) Tj (>) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 594.6236 cm

+q

+BT 1 0 0 1 0 2 Tm 12 TL /F1 10 Tf 0 0 0 rg (If have an enum member and need its ) Tj /F4 10 Tf 0 0 0 rg (name ) Tj /F1 10 Tf 0 0 0 rg (or ) Tj /F4 10 Tf 0 0 0 rg (value) Tj /F1 10 Tf 0 0 0 rg (:) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 513.4236 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 72 re B*

+Q

+q

+BT 1 0 0 1 0 50 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( member = Color.red) Tj T* (>) Tj (>) Tj (>) Tj ( member.name) Tj T* ('red') Tj T* (>) Tj (>) Tj (>) Tj ( member.value) Tj T* (1) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 480.4236 cm

+q

+BT 1 0 0 1 0 3.5 Tm 21 TL /F3 17.5 Tf 0 0 0 rg (Duplicating enum members and values) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 450.4236 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 14 Tm /F1 10 Tf 12 TL .13832 Tw (Having two enum members \(or any other attribute\) with the same name is invalid; in Python 3.x this would) Tj T* 0 Tw (raise an error, but in Python 2.x the second member simply overwrites the first:) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 249.2236 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 192 re B*

+Q

+q

+BT 1 0 0 1 0 170 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( # python 2.x) Tj T* (>) Tj (>) Tj (>) Tj ( class Shape\(Enum\):) Tj T* (...   square = 2) Tj T* (...   square = 3) Tj T* (...) Tj T* (>) Tj (>) Tj (>) Tj ( Shape.square) Tj T* (<) Tj (Shape.square: 3) Tj (>) Tj  T*  T* (>) Tj (>) Tj (>) Tj ( # python 3.x) Tj T* (>) Tj (>) Tj (>) Tj ( class Shape\(Enum\):) Tj T* (...   square = 2) Tj T* (...   square = 3) Tj T* (Traceback \(most recent call last\):) Tj T* (...) Tj T* (TypeError: Attempted to reuse key: 'square') Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 205.2236 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 26 Tm /F1 10 Tf 12 TL .384987 Tw (However, two enum members are allowed to have the same value. Given two members A and B with the) Tj T* 0 Tw .444772 Tw (same value \(and A defined first\), B is an alias to A. By-value lookup of the value of A and B will return A.) Tj T* 0 Tw (By-name lookup of B will also return A:) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 88.02362 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 492 108 re B*

+Q

+q

+BT 1 0 0 1 0 86 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( class Shape\(Enum\):) Tj T* (...   __order__ = 'square diamond circle alias_for_square'  # only needed in 2.x) Tj T* (...   square = 2) Tj T* (...   diamond = 1) Tj T* (...   circle = 3) Tj T* (...   alias_for_square = 2) Tj T* (...) Tj T* (>) Tj (>) Tj (>) Tj ( Shape.square) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+ 

+endstream

+endobj

+59 0 obj

+<< /Length 4406 >>

+stream

+1 0 0 1 0 0 cm  BT /F1 12 Tf 14.4 TL ET

+q

+1 0 0 1 62.69291 691.8236 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 72 re B*

+Q

+q

+BT 1 0 0 1 0 50 Tm 12 TL /F4 10 Tf 0 0 0 rg (<) Tj (Shape.square: 2) Tj (>) Tj  T* (>) Tj (>) Tj (>) Tj ( Shape.alias_for_square) Tj T* (<) Tj (Shape.square: 2) Tj (>) Tj  T* (>) Tj (>) Tj (>) Tj ( Shape\(2\)) Tj T* (<) Tj (Shape.square: 2) Tj (>) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 659.8236 cm

+q

+BT 1 0 0 1 0 14 Tm 1.074104 Tw 12 TL /F1 10 Tf 0 0 0 rg (Allowing aliases is not always desirable. ) Tj /F4 10 Tf 0 0 0 rg (unique ) Tj /F1 10 Tf 0 0 0 rg (can be used to ensure that none exist in a particular) Tj T* 0 Tw (enumeration:) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 506.6236 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 144 re B*

+Q

+q

+BT 1 0 0 1 0 122 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( from enum import unique) Tj T* (>) Tj (>) Tj (>) Tj ( @unique) Tj T* (... class Mistake\(Enum\):) Tj T* (...   __order__ = 'one two three four'  # only needed in 2.x) Tj T* (...   one = 1) Tj T* (...   two = 2) Tj T* (...   three = 3) Tj T* (...   four = 3) Tj T* (Traceback \(most recent call last\):) Tj T* (...) Tj T* (ValueError: duplicate names found in ) Tj (<) Tj (enum 'Mistake') Tj (>) Tj (: four -) Tj (>) Tj ( three) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 486.6236 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 2 Tm /F1 10 Tf 12 TL (Iterating over the members of an enum does not provide the aliases:) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 441.4236 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 36 re B*

+Q

+q

+BT 1 0 0 1 0 14 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( list\(Shape\)) Tj T* ([) Tj (<) Tj (Shape.square: 2) Tj (>) Tj (, ) Tj (<) Tj (Shape.diamond: 1) Tj (>) Tj (, ) Tj (<) Tj (Shape.circle: 3) Tj (>) Tj (]) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 409.4236 cm

+q

+BT 1 0 0 1 0 14 Tm 1.307126 Tw 12 TL /F1 10 Tf 0 0 0 rg (The special attribute ) Tj /F4 10 Tf 0 0 0 rg (__members__ ) Tj /F1 10 Tf 0 0 0 rg (is a dictionary mapping names to members. It includes all names) Tj T* 0 Tw (defined in the enumeration, including the aliases:) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 304.2236 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 96 re B*

+Q

+q

+BT 1 0 0 1 0 74 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( for name, member in sorted\(Shape.__members__.items\(\)\):) Tj T* (...   name, member) Tj T* (...) Tj T* (\('alias_for_square', ) Tj (<) Tj (Shape.square: 2) Tj (>) Tj (\)) Tj T* (\('circle', ) Tj (<) Tj (Shape.circle: 3) Tj (>) Tj (\)) Tj T* (\('diamond', ) Tj (<) Tj (Shape.diamond: 1) Tj (>) Tj (\)) Tj T* (\('square', ) Tj (<) Tj (Shape.square: 2) Tj (>) Tj (\)) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 272.2236 cm

+q

+BT 1 0 0 1 0 14 Tm .080751 Tw 12 TL /F1 10 Tf 0 0 0 rg (The ) Tj /F4 10 Tf 0 0 0 rg (__members__ ) Tj /F1 10 Tf 0 0 0 rg (attribute can be used for detailed programmatic access to the enumeration members.) Tj T* 0 Tw (For example, finding all the aliases:) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 227.0236 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 486 36 re B*

+Q

+q

+BT 1 0 0 1 0 14 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( [name for name, member in Shape.__members__.items\(\) if member.name != name]) Tj T* (['alias_for_square']) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 194.0236 cm

+q

+BT 1 0 0 1 0 3.5 Tm 21 TL /F3 17.5 Tf 0 0 0 rg (Comparisons) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 176.0236 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 2 Tm /F1 10 Tf 12 TL (Enumeration members are compared by identity:) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 82.82362 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 84 re B*

+Q

+q

+BT 1 0 0 1 0 62 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( Color.red is Color.red) Tj T* (True) Tj T* (>) Tj (>) Tj (>) Tj ( Color.red is Color.blue) Tj T* (False) Tj T* (>) Tj (>) Tj (>) Tj ( Color.red is not Color.blue) Tj T* (True) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+ 

+endstream

+endobj

+60 0 obj

+<< /Length 4521 >>

+stream

+1 0 0 1 0 0 cm  BT /F1 12 Tf 14.4 TL ET

+q

+1 0 0 1 62.69291 741.0236 cm

+q

+BT 1 0 0 1 0 14 Tm 1.131647 Tw 12 TL /F1 10 Tf 0 0 0 rg (Ordered comparisons between enumeration values are ) Tj /F5 10 Tf (not ) Tj /F1 10 Tf (supported. Enum members are not integers) Tj T* 0 Tw (\(but see ) Tj 0 0 .501961 rg (IntEnum ) Tj 0 0 0 rg (below\):) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 671.8236 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 60 re B*

+Q

+q

+BT 1 0 0 1 0 38 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( Color.red ) Tj (<) Tj ( Color.blue) Tj T* (Traceback \(most recent call last\):) Tj T* (  File ") Tj (<) Tj (stdin) Tj (>) Tj (", line 1, in ) Tj (<) Tj (module) Tj (>) Tj  T* (TypeError: unorderable types: Color\(\) ) Tj (<) Tj ( Color\(\)) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 651.8236 cm

+Q

+q

+1 0 0 1 62.69291 568.8236 cm

+.960784 .960784 .862745 rg

+n 0 83 469.8898 -83 re f*

+0 0 0 rg

+BT /F1 10 Tf 12 TL ET

+BT 1 0 0 1 6 57 Tm  T* ET

+q

+1 0 0 1 16 52 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 2.5 Tm /F6 12.5 Tf 15 TL (Warning) Tj T* ET

+Q

+Q

+q

+1 0 0 1 16 16 cm

+q

+BT 1 0 0 1 0 14 Tm .189398 Tw 12 TL /F1 10 Tf 0 0 0 rg (In Python 2 ) Tj /F5 10 Tf (everything ) Tj /F1 10 Tf (is ordered, even though the ordering may not make sense. If you want your) Tj T* 0 Tw (enumerations to have a sensible ordering check out the ) Tj 0 0 .501961 rg (OrderedEnum ) Tj 0 0 0 rg (recipe below.) Tj T* ET

+Q

+Q

+q

+1 J

+1 j

+.662745 .662745 .662745 RG

+.5 w

+n 0 83 m 469.8898 83 l S

+n 0 0 m 469.8898 0 l S

+n 0 0 m 0 83 l S

+n 469.8898 0 m 469.8898 83 l S

+Q

+Q

+q

+1 0 0 1 62.69291 562.8236 cm

+Q

+q

+1 0 0 1 62.69291 544.8236 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 2 Tm /F1 10 Tf 12 TL (Equality comparisons are defined though:) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 451.6236 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 84 re B*

+Q

+q

+BT 1 0 0 1 0 62 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( Color.blue == Color.red) Tj T* (False) Tj T* (>) Tj (>) Tj (>) Tj ( Color.blue != Color.red) Tj T* (True) Tj T* (>) Tj (>) Tj (>) Tj ( Color.blue == Color.blue) Tj T* (True) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 419.6236 cm

+q

+BT 1 0 0 1 0 14 Tm 2.582706 Tw 12 TL /F1 10 Tf 0 0 0 rg (Comparisons against non-enumeration values will always compare not equal \(again, ) Tj /F4 10 Tf 0 0 0 rg (IntEnum ) Tj /F1 10 Tf 0 0 0 rg (was) Tj T* 0 Tw (explicitly designed to behave differently, see below\):) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 374.4236 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 36 re B*

+Q

+q

+BT 1 0 0 1 0 14 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( Color.blue == 2) Tj T* (False) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 341.4236 cm

+q

+BT 1 0 0 1 0 3.5 Tm 21 TL /F3 17.5 Tf 0 0 0 rg (Allowed members and attributes of enumerations) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 287.4236 cm

+q

+BT 1 0 0 1 0 38 Tm 2.755697 Tw 12 TL /F1 10 Tf 0 0 0 rg (The examples above use integers for enumeration values. Using integers is short and handy \(and) Tj T* 0 Tw .241751 Tw (provided by default by the ) Tj 0 0 .501961 rg (Functional API) Tj 0 0 0 rg (\), but not strictly enforced. In the vast majority of use-cases, one) Tj T* 0 Tw .848221 Tw (doesn't care what the actual value of an enumeration is. But if the value ) Tj /F5 10 Tf (is ) Tj /F1 10 Tf (important, enumerations can) Tj T* 0 Tw (have arbitrary values.) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 257.4236 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 14 Tm /F1 10 Tf 12 TL .638735 Tw (Enumerations are Python classes, and can have methods and special methods as usual. If we have this) Tj T* 0 Tw (enumeration:) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 80.22362 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 168 re B*

+Q

+q

+BT 1 0 0 1 0 146 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( class Mood\(Enum\):) Tj T* (...   funky = 1) Tj T* (...   happy = 3) Tj T* (...) Tj T* (...   def describe\(self\):) Tj T* (...     # self is the member here) Tj T* (...     return self.name, self.value) Tj T* (...) Tj T* (...   def __str__\(self\):) Tj T* (...     return 'my custom str! {0}'.format\(self.value\)) Tj T* (...) Tj T* (...   @classmethod) Tj T* (...   def favorite_mood\(cls\):) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+ 

+endstream

+endobj

+61 0 obj

+<< /Length 4627 >>

+stream

+1 0 0 1 0 0 cm  BT /F1 12 Tf 14.4 TL ET

+q

+1 0 0 1 62.69291 727.8236 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 36 re B*

+Q

+q

+BT 1 0 0 1 0 14 Tm 12 TL /F4 10 Tf 0 0 0 rg (...     # cls here is the enumeration) Tj T* (...     return cls.happy) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 707.8236 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 2 Tm /F1 10 Tf 12 TL (Then:) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 614.6236 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 84 re B*

+Q

+q

+BT 1 0 0 1 0 62 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( Mood.favorite_mood\(\)) Tj T* (<) Tj (Mood.happy: 3) Tj (>) Tj  T* (>) Tj (>) Tj (>) Tj ( Mood.happy.describe\(\)) Tj T* (\('happy', 3\)) Tj T* (>) Tj (>) Tj (>) Tj ( str\(Mood.funky\)) Tj T* ('my custom str! 1') Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 558.6236 cm

+q

+BT 1 0 0 1 0 38 Tm 3.14186 Tw 12 TL /F1 10 Tf 0 0 0 rg (The rules for what is allowed are as follows: _sunder_ names \(starting and ending with a single) Tj T* 0 Tw .310651 Tw (underscore\) are reserved by enum and cannot be used; all other attributes defined within an enumeration) Tj T* 0 Tw 2.199213 Tw (will become members of this enumeration, with the exception of ) Tj /F5 10 Tf (__dunder__ ) Tj /F1 10 Tf (names and descriptors) Tj T* 0 Tw (\(methods are also descriptors\).) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 540.6236 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 2 Tm /F1 10 Tf 12 TL (Note:) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 534.6236 cm

+Q

+q

+1 0 0 1 62.69291 510.6236 cm

+0 0 0 rg

+BT /F1 10 Tf 12 TL ET

+BT 1 0 0 1 0 2 Tm  T* ET

+q

+1 0 0 1 20 0 cm

+q

+BT 1 0 0 1 0 14 Tm .979213 Tw 12 TL /F1 10 Tf 0 0 0 rg (If your enumeration defines ) Tj /F4 10 Tf 0 0 0 rg (__new__ ) Tj /F1 10 Tf 0 0 0 rg (and/or ) Tj /F4 10 Tf 0 0 0 rg (__init__ ) Tj /F1 10 Tf 0 0 0 rg (then whatever value\(s\) were given to the) Tj T* 0 Tw (enum member will be passed into those methods. See ) Tj 0 0 .501961 rg (Planet ) Tj 0 0 0 rg (for an example.) Tj T* ET

+Q

+Q

+q

+Q

+Q

+q

+1 0 0 1 62.69291 510.6236 cm

+Q

+q

+1 0 0 1 62.69291 477.6236 cm

+q

+BT 1 0 0 1 0 3.5 Tm 21 TL /F3 17.5 Tf 0 0 0 rg (Restricted subclassing of enumerations) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 447.6236 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 14 Tm /F1 10 Tf 12 TL .778735 Tw (Subclassing an enumeration is allowed only if the enumeration does not define any members. So this is) Tj T* 0 Tw (forbidden:) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 366.4236 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 72 re B*

+Q

+q

+BT 1 0 0 1 0 50 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( class MoreColor\(Color\):) Tj T* (...   pink = 17) Tj T* (Traceback \(most recent call last\):) Tj T* (...) Tj T* (TypeError: Cannot extend enumerations) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 346.4236 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 2 Tm /F1 10 Tf 12 TL (But this is allowed:) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 229.2236 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 108 re B*

+Q

+q

+BT 1 0 0 1 0 86 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( class Foo\(Enum\):) Tj T* (...   def some_behavior\(self\):) Tj T* (...     pass) Tj T* (...) Tj T* (>) Tj (>) Tj (>) Tj ( class Bar\(Foo\):) Tj T* (...   happy = 1) Tj T* (...   sad = 2) Tj T* (...) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 185.2236 cm

+q

+BT 1 0 0 1 0 26 Tm .127984 Tw 12 TL /F1 10 Tf 0 0 0 rg (Allowing subclassing of enums that define members would lead to a violation of some important invariants) Tj T* 0 Tw 1.889985 Tw (of types and instances. On the other hand, it makes sense to allow sharing some common behavior) Tj T* 0 Tw (between a group of enumerations. \(See ) Tj 0 0 .501961 rg (OrderedEnum ) Tj 0 0 0 rg (for an example.\)) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 152.2236 cm

+q

+BT 1 0 0 1 0 3.5 Tm 21 TL /F3 17.5 Tf 0 0 0 rg (Pickling) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 134.2236 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 2 Tm /F1 10 Tf 12 TL (Enumerations can be pickled and unpickled:) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 89.02362 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 36 re B*

+Q

+q

+BT 1 0 0 1 0 14 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( from enum.test_enum import Fruit) Tj T* (>) Tj (>) Tj (>) Tj ( from pickle import dumps, loads) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+ 

+endstream

+endobj

+62 0 obj

+<< /Length 5372 >>

+stream

+1 0 0 1 0 0 cm  BT /F1 12 Tf 14.4 TL ET

+q

+1 0 0 1 62.69291 727.8236 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 36 re B*

+Q

+q

+BT 1 0 0 1 0 14 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( Fruit.tomato is loads\(dumps\(Fruit.tomato, 2\)\)) Tj T* (True) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 695.8236 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 14 Tm /F1 10 Tf 12 TL 1.256457 Tw (The usual restrictions for pickling apply: picklable enums must be defined in the top level of a module,) Tj T* 0 Tw (since unpickling requires them to be importable from that module.) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 677.8236 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 2 Tm /F1 10 Tf 12 TL (Note:) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 671.8236 cm

+Q

+q

+1 0 0 1 62.69291 647.8236 cm

+0 0 0 rg

+BT /F1 10 Tf 12 TL ET

+BT 1 0 0 1 0 2 Tm  T* ET

+q

+1 0 0 1 20 0 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 14 Tm /F1 10 Tf 12 TL .081163 Tw (With pickle protocol version 4 \(introduced in Python 3.4\) it is possible to easily pickle enums nested in) Tj T* 0 Tw (other classes.) Tj T* ET

+Q

+Q

+q

+Q

+Q

+q

+1 0 0 1 62.69291 647.8236 cm

+Q

+q

+1 0 0 1 62.69291 614.8236 cm

+q

+BT 1 0 0 1 0 3.5 Tm 21 TL /F3 17.5 Tf 0 0 0 rg (Functional API) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 596.8236 cm

+q

+BT 1 0 0 1 0 2 Tm 12 TL /F1 10 Tf 0 0 0 rg (The ) Tj /F4 10 Tf 0 0 0 rg (Enum ) Tj /F1 10 Tf 0 0 0 rg (class is callable, providing the following functional API:) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 467.6236 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 120 re B*

+Q

+q

+BT 1 0 0 1 0 98 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( Animal = Enum\('Animal', 'ant bee cat dog'\)) Tj T* (>) Tj (>) Tj (>) Tj ( Animal) Tj T* (<) Tj (enum 'Animal') Tj (>) Tj  T* (>) Tj (>) Tj (>) Tj ( Animal.ant) Tj T* (<) Tj (Animal.ant: 1) Tj (>) Tj  T* (>) Tj (>) Tj (>) Tj ( Animal.ant.value) Tj T* (1) Tj T* (>) Tj (>) Tj (>) Tj ( list\(Animal\)) Tj T* ([) Tj (<) Tj (Animal.ant: 1) Tj (>) Tj (, ) Tj (<) Tj (Animal.bee: 2) Tj (>) Tj (, ) Tj (<) Tj (Animal.cat: 3) Tj (>) Tj (, ) Tj (<) Tj (Animal.dog: 4) Tj (>) Tj (]) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 435.6236 cm

+q

+BT 1 0 0 1 0 14 Tm .602209 Tw 12 TL /F1 10 Tf 0 0 0 rg (The semantics of this API resemble ) Tj /F4 10 Tf 0 0 0 rg (namedtuple) Tj /F1 10 Tf 0 0 0 rg (. The first argument of the call to ) Tj /F4 10 Tf 0 0 0 rg (Enum ) Tj /F1 10 Tf 0 0 0 rg (is the name of) Tj T* 0 Tw (the enumeration.) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 369.6236 cm

+q

+BT 1 0 0 1 0 50 Tm 1.326412 Tw 12 TL /F1 10 Tf 0 0 0 rg (The second argument is the ) Tj /F5 10 Tf (source ) Tj /F1 10 Tf (of enumeration member names. It can be a whitespace-separated) Tj T* 0 Tw .993516 Tw (string of names, a sequence of names, a sequence of 2-tuples with key/value pairs, or a mapping \(e.g.) Tj T* 0 Tw 1.168555 Tw (dictionary\) of names to values. The last two options enable assigning arbitrary values to enumerations;) Tj T* 0 Tw .537485 Tw (the others auto-assign increasing integers starting with 1. A new class derived from ) Tj /F4 10 Tf 0 0 0 rg (Enum ) Tj /F1 10 Tf 0 0 0 rg (is returned. In) Tj T* 0 Tw (other words, the above assignment to ) Tj /F4 10 Tf 0 0 0 rg (Animal ) Tj /F1 10 Tf 0 0 0 rg (is equivalent to:) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 288.4236 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 72 re B*

+Q

+q

+BT 1 0 0 1 0 50 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( class Animals\(Enum\):) Tj T* (...   ant = 1) Tj T* (...   bee = 2) Tj T* (...   cat = 3) Tj T* (...   dog = 4) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 232.4236 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 38 Tm /F1 10 Tf 12 TL 1.239984 Tw (Pickling enums created with the functional API can be tricky as frame stack implementation details are) Tj T* 0 Tw .937132 Tw (used to try and figure out which module the enumeration is being created in \(e.g. it will fail if you use a) Tj T* 0 Tw 1.321163 Tw (utility function in separate module, and also may not work on IronPython or Jython\). The solution is to) Tj T* 0 Tw (specify the module name explicitly as follows:) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 199.2236 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 24 re B*

+Q

+q

+BT 1 0 0 1 0 2 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( Animals = Enum\('Animals', 'ant bee cat dog', module=__name__\)) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 166.2236 cm

+q

+BT 1 0 0 1 0 3.5 Tm 21 TL /F3 17.5 Tf 0 0 0 rg (Derived Enumerations) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 136.2236 cm

+q

+BT 1 0 0 1 0 3 Tm 18 TL /F3 15 Tf 0 0 0 rg (IntEnum) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 94.22362 cm

+q

+BT 1 0 0 1 0 26 Tm 1.99832 Tw 12 TL /F1 10 Tf 0 0 0 rg (A variation of ) Tj /F4 10 Tf 0 0 0 rg (Enum ) Tj /F1 10 Tf 0 0 0 rg (is provided which is also a subclass of ) Tj /F4 10 Tf 0 0 0 rg (int) Tj /F1 10 Tf 0 0 0 rg (. Members of an ) Tj /F4 10 Tf 0 0 0 rg (IntEnum ) Tj /F1 10 Tf 0 0 0 rg (can be) Tj T* 0 Tw .087984 Tw (compared to integers; by extension, integer enumerations of different types can also be compared to each) Tj T* 0 Tw (other:) Tj T* ET

+Q

+Q

+ 

+endstream

+endobj

+63 0 obj

+<< /Length 4141 >>

+stream

+1 0 0 1 0 0 cm  BT /F1 12 Tf 14.4 TL ET

+q

+1 0 0 1 62.69291 571.8236 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 192 re B*

+Q

+q

+BT 1 0 0 1 0 170 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( from enum import IntEnum) Tj T* (>) Tj (>) Tj (>) Tj ( class Shape\(IntEnum\):) Tj T* (...   circle = 1) Tj T* (...   square = 2) Tj T* (...) Tj T* (>) Tj (>) Tj (>) Tj ( class Request\(IntEnum\):) Tj T* (...   post = 1) Tj T* (...   get = 2) Tj T* (...) Tj T* (>) Tj (>) Tj (>) Tj ( Shape == 1) Tj T* (False) Tj T* (>) Tj (>) Tj (>) Tj ( Shape.circle == 1) Tj T* (True) Tj T* (>) Tj (>) Tj (>) Tj ( Shape.circle == Request.post) Tj T* (True) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 551.8236 cm

+q

+BT 1 0 0 1 0 2 Tm 12 TL /F1 10 Tf 0 0 0 rg (However, they still can't be compared to standard ) Tj /F4 10 Tf 0 0 0 rg (Enum ) Tj /F1 10 Tf 0 0 0 rg (enumerations:) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 410.6236 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 132 re B*

+Q

+q

+BT 1 0 0 1 0 110 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( class Shape\(IntEnum\):) Tj T* (...   circle = 1) Tj T* (...   square = 2) Tj T* (...) Tj T* (>) Tj (>) Tj (>) Tj ( class Color\(Enum\):) Tj T* (...   red = 1) Tj T* (...   green = 2) Tj T* (...) Tj T* (>) Tj (>) Tj (>) Tj ( Shape.circle == Color.red) Tj T* (False) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 390.6236 cm

+q

+BT 1 0 0 1 0 2 Tm 12 TL /F4 10 Tf 0 0 0 rg (IntEnum ) Tj /F1 10 Tf 0 0 0 rg (values behave like integers in other ways you'd expect:) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 297.4236 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 84 re B*

+Q

+q

+BT 1 0 0 1 0 62 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( int\(Shape.circle\)) Tj T* (1) Tj T* (>) Tj (>) Tj (>) Tj ( ['a', 'b', 'c'][Shape.circle]) Tj T* ('b') Tj T* (>) Tj (>) Tj (>) Tj ( [i for i in range\(Shape.square\)]) Tj T* ([0, 1]) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 229.4236 cm

+q

+BT 1 0 0 1 0 50 Tm 1.197126 Tw 12 TL /F1 10 Tf 0 0 0 rg (For the vast majority of code, ) Tj /F4 10 Tf 0 0 0 rg (Enum ) Tj /F1 10 Tf 0 0 0 rg (is strongly recommended, since ) Tj /F4 10 Tf 0 0 0 rg (IntEnum ) Tj /F1 10 Tf 0 0 0 rg (breaks some semantic) Tj T* 0 Tw .793318 Tw (promises of an enumeration \(by being comparable to integers, and thus by transitivity to other unrelated) Tj T* 0 Tw .554985 Tw (enumerations\). It should be used only in special cases where there's no other choice; for example, when) Tj T* 0 Tw .746136 Tw (integer constants are replaced with enumerations and backwards compatibility is required with code that) Tj T* 0 Tw (still expects integers.) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 199.4236 cm

+q

+BT 1 0 0 1 0 3 Tm 18 TL /F3 15 Tf 0 0 0 rg (Others) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 181.4236 cm

+q

+BT 1 0 0 1 0 2 Tm 12 TL /F1 10 Tf 0 0 0 rg (While ) Tj /F4 10 Tf 0 0 0 rg (IntEnum ) Tj /F1 10 Tf 0 0 0 rg (is part of the ) Tj /F4 10 Tf 0 0 0 rg (enum ) Tj /F1 10 Tf 0 0 0 rg (module, it would be very simple to implement independently:) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 136.2236 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 36 re B*

+Q

+q

+0 0 0 rg

+BT 1 0 0 1 0 14 Tm /F4 10 Tf 12 TL (class IntEnum\(int, Enum\):) Tj T* (    pass) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 104.2236 cm

+q

+BT 1 0 0 1 0 14 Tm .361412 Tw 12 TL /F1 10 Tf 0 0 0 rg (This demonstrates how similar derived enumerations can be defined; for example a ) Tj /F4 10 Tf 0 0 0 rg (StrEnum ) Tj /F1 10 Tf 0 0 0 rg (that mixes) Tj T* 0 Tw (in ) Tj /F4 10 Tf 0 0 0 rg (str ) Tj /F1 10 Tf 0 0 0 rg (instead of ) Tj /F4 10 Tf 0 0 0 rg (int) Tj /F1 10 Tf 0 0 0 rg (.) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 86.22362 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 2 Tm /F1 10 Tf 12 TL (Some rules:) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 80.22362 cm

+Q

+q

+1 0 0 1 62.69291 80.22362 cm

+Q

+ 

+endstream

+endobj

+64 0 obj

+<< /Length 7108 >>

+stream

+1 0 0 1 0 0 cm  BT /F1 12 Tf 14.4 TL ET

+q

+1 0 0 1 62.69291 741.0236 cm

+0 0 0 rg

+BT /F1 10 Tf 12 TL ET

+q

+1 0 0 1 6 9 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 2 Tm /F1 10 Tf 12 TL 5.66 0 Td (1.) Tj T* -5.66 0 Td ET

+Q

+Q

+q

+1 0 0 1 23 -3 cm

+q

+BT 1 0 0 1 0 14 Tm .477318 Tw 12 TL /F1 10 Tf 0 0 0 rg (When subclassing ) Tj /F4 10 Tf 0 0 0 rg (Enum) Tj /F1 10 Tf 0 0 0 rg (, mix-in types must appear before ) Tj /F4 10 Tf 0 0 0 rg (Enum ) Tj /F1 10 Tf 0 0 0 rg (itself in the sequence of bases, as) Tj T* 0 Tw (in the ) Tj /F4 10 Tf 0 0 0 rg (IntEnum ) Tj /F1 10 Tf 0 0 0 rg (example above.) Tj T* ET

+Q

+Q

+q

+Q

+Q

+q

+1 0 0 1 62.69291 735.0236 cm

+Q

+q

+1 0 0 1 62.69291 699.0236 cm

+0 0 0 rg

+BT /F1 10 Tf 12 TL ET

+q

+1 0 0 1 6 21 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 2 Tm /F1 10 Tf 12 TL 5.66 0 Td (2.) Tj T* -5.66 0 Td ET

+Q

+Q

+q

+1 0 0 1 23 -3 cm

+q

+BT 1 0 0 1 0 26 Tm 1.147045 Tw 12 TL /F1 10 Tf 0 0 0 rg (While ) Tj /F4 10 Tf 0 0 0 rg (Enum ) Tj /F1 10 Tf 0 0 0 rg (can have members of any type, once you mix in an additional type, all the members) Tj T* 0 Tw .420574 Tw (must have values of that type, e.g. ) Tj /F4 10 Tf 0 0 0 rg (int ) Tj /F1 10 Tf 0 0 0 rg (above. This restriction does not apply to mix-ins which only) Tj T* 0 Tw (add methods and don't specify another data type such as ) Tj /F4 10 Tf 0 0 0 rg (int ) Tj /F1 10 Tf 0 0 0 rg (or ) Tj /F4 10 Tf 0 0 0 rg (str) Tj /F1 10 Tf 0 0 0 rg (.) Tj T* ET

+Q

+Q

+q

+Q

+Q

+q

+1 0 0 1 62.69291 693.0236 cm

+Q

+q

+1 0 0 1 62.69291 669.0236 cm

+0 0 0 rg

+BT /F1 10 Tf 12 TL ET

+q

+1 0 0 1 6 9 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 2 Tm /F1 10 Tf 12 TL 5.66 0 Td (3.) Tj T* -5.66 0 Td ET

+Q

+Q

+q

+1 0 0 1 23 -3 cm

+q

+BT 1 0 0 1 0 14 Tm .100542 Tw 12 TL /F1 10 Tf 0 0 0 rg (When another data type is mixed in, the ) Tj /F4 10 Tf 0 0 0 rg (value ) Tj /F1 10 Tf 0 0 0 rg (attribute is ) Tj /F5 10 Tf (not the same ) Tj /F1 10 Tf (as the enum member itself,) Tj T* 0 Tw (although it is equivalant and will compare equal.) Tj T* ET

+Q

+Q

+q

+Q

+Q

+q

+1 0 0 1 62.69291 663.0236 cm

+Q

+q

+1 0 0 1 62.69291 609.0236 cm

+0 0 0 rg

+BT /F1 10 Tf 12 TL ET

+q

+1 0 0 1 6 39 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 2 Tm /F1 10 Tf 12 TL 5.66 0 Td (4.) Tj T* -5.66 0 Td ET

+Q

+Q

+q

+1 0 0 1 23 27 cm

+q

+BT 1 0 0 1 0 14 Tm 1.85998 Tw 12 TL /F1 10 Tf 0 0 0 rg (%-style formatting: ) Tj /F4 10 Tf 0 0 0 rg (%s ) Tj /F1 10 Tf 0 0 0 rg (and ) Tj /F4 10 Tf 0 0 0 rg (%r ) Tj /F1 10 Tf 0 0 0 rg (call ) Tj /F4 10 Tf 0 0 0 rg (Enum) Tj /F1 10 Tf 0 0 0 rg ('s ) Tj /F4 10 Tf 0 0 0 rg (__str__ ) Tj /F1 10 Tf 0 0 0 rg (and ) Tj /F4 10 Tf 0 0 0 rg (__repr__ ) Tj /F1 10 Tf 0 0 0 rg (respectively; other codes) Tj T* 0 Tw (\(such as ) Tj /F4 10 Tf 0 0 0 rg (%i ) Tj /F1 10 Tf 0 0 0 rg (or ) Tj /F4 10 Tf 0 0 0 rg (%h ) Tj /F1 10 Tf 0 0 0 rg (for IntEnum\) treat the enum member as its mixed-in type.) Tj T* ET

+Q

+Q

+q

+1 0 0 1 23 -3 cm

+q

+BT 1 0 0 1 0 14 Tm .067045 Tw 12 TL /F1 10 Tf 0 0 0 rg (Note: Prior to Python 3.4 there is a bug in ) Tj /F4 10 Tf 0 0 0 rg (str) Tj /F1 10 Tf 0 0 0 rg ('s %-formatting: ) Tj /F4 10 Tf 0 0 0 rg (int ) Tj /F1 10 Tf 0 0 0 rg (subclasses are printed as strings) Tj T* 0 Tw (and not numbers when the ) Tj /F4 10 Tf 0 0 0 rg (%d) Tj /F1 10 Tf 0 0 0 rg (, ) Tj /F4 10 Tf 0 0 0 rg (%i) Tj /F1 10 Tf 0 0 0 rg (, or ) Tj /F4 10 Tf 0 0 0 rg (%u ) Tj /F1 10 Tf 0 0 0 rg (codes are used.) Tj T* ET

+Q

+Q

+q

+Q

+Q

+q

+1 0 0 1 62.69291 603.0236 cm

+Q

+q

+1 0 0 1 62.69291 579.0236 cm

+0 0 0 rg

+BT /F1 10 Tf 12 TL ET

+q

+1 0 0 1 6 9 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 2 Tm /F1 10 Tf 12 TL 5.66 0 Td (5.) Tj T* -5.66 0 Td ET

+Q

+Q

+q

+1 0 0 1 23 -3 cm

+q

+BT 1 0 0 1 0 14 Tm 1.880751 Tw 12 TL /F4 10 Tf 0 0 0 rg (str.__format__ ) Tj /F1 10 Tf 0 0 0 rg (\(or ) Tj /F4 10 Tf 0 0 0 rg (format) Tj /F1 10 Tf 0 0 0 rg (\) will use the mixed-in type's ) Tj /F4 10 Tf 0 0 0 rg (__format__) Tj /F1 10 Tf 0 0 0 rg (. If the ) Tj /F4 10 Tf 0 0 0 rg (Enum) Tj /F1 10 Tf 0 0 0 rg ('s ) Tj /F4 10 Tf 0 0 0 rg (str ) Tj /F1 10 Tf 0 0 0 rg (or) Tj T* 0 Tw /F4 10 Tf 0 0 0 rg (repr ) Tj /F1 10 Tf 0 0 0 rg (is desired use the ) Tj /F4 10 Tf 0 0 0 rg (!s ) Tj /F1 10 Tf 0 0 0 rg (or ) Tj /F4 10 Tf 0 0 0 rg (!r) Tj /F1 10 Tf 0 0 0 rg ( ) Tj /F4 10 Tf 0 0 0 rg (str ) Tj /F1 10 Tf 0 0 0 rg (format codes.) Tj T* ET

+Q

+Q

+q

+Q

+Q

+q

+1 0 0 1 62.69291 579.0236 cm

+Q

+q

+1 0 0 1 62.69291 546.0236 cm

+q

+BT 1 0 0 1 0 3.5 Tm 21 TL /F3 17.5 Tf 0 0 0 rg (Decorators) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 516.0236 cm

+q

+BT 1 0 0 1 0 3 Tm 18 TL /F3 15 Tf 0 0 0 rg (unique) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 486.0236 cm

+q

+BT 1 0 0 1 0 14 Tm .287251 Tw 12 TL /F1 10 Tf 0 0 0 rg (A ) Tj /F4 10 Tf 0 0 0 rg (class ) Tj /F1 10 Tf 0 0 0 rg (decorator specifically for enumerations. It searches an enumeration's ) Tj /F4 10 Tf 0 0 0 rg (__members__ ) Tj /F1 10 Tf 0 0 0 rg (gathering) Tj T* 0 Tw (any aliases it finds; if any are found ) Tj /F4 10 Tf 0 0 0 rg (ValueError ) Tj /F1 10 Tf 0 0 0 rg (is raised with the details:) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 368.8236 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 108 re B*

+Q

+q

+BT 1 0 0 1 0 86 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( @unique) Tj T* (... class NoDupes\(Enum\):) Tj T* (...    first = 'one') Tj T* (...    second = 'two') Tj T* (...    third = 'two') Tj T* (Traceback \(most recent call last\):) Tj T* (...) Tj T* (ValueError: duplicate names found in ) Tj (<) Tj (enum 'NoDupes') Tj (>) Tj (: third -) Tj (>) Tj ( second) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 335.8236 cm

+q

+BT 1 0 0 1 0 3.5 Tm 21 TL /F3 17.5 Tf 0 0 0 rg (Interesting examples) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 293.8236 cm

+q

+BT 1 0 0 1 0 26 Tm .593735 Tw 12 TL /F1 10 Tf 0 0 0 rg (While ) Tj /F4 10 Tf 0 0 0 rg (Enum ) Tj /F1 10 Tf 0 0 0 rg (and ) Tj /F4 10 Tf 0 0 0 rg (IntEnum ) Tj /F1 10 Tf 0 0 0 rg (are expected to cover the majority of use-cases, they cannot cover them all.) Tj T* 0 Tw .897045 Tw (Here are recipes for some different types of enumerations that can be used directly, or as examples for) Tj T* 0 Tw (creating one's own.) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 263.8236 cm

+q

+BT 1 0 0 1 0 3 Tm 18 TL /F3 15 Tf 0 0 0 rg (AutoNumber) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 245.8236 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 2 Tm /F1 10 Tf 12 TL (Avoids having to specify the value for each enumeration member:) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 80.62362 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 156 re B*

+Q

+q

+BT 1 0 0 1 0 134 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( class AutoNumber\(Enum\):) Tj T* (...     def __new__\(cls\):) Tj T* (...         value = len\(cls.__members__\) + 1) Tj T* (...         obj = object.__new__\(cls\)) Tj T* (...         obj._value_ = value) Tj T* (...         return obj) Tj T* (...) Tj T* (>) Tj (>) Tj (>) Tj ( class Color\(AutoNumber\):) Tj T* (...     __order__ = "red green blue"  # only needed in 2.x) Tj T* (...     red = \(\)) Tj T* (...     green = \(\)) Tj T* (...     blue = \(\)) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+ 

+endstream

+endobj

+65 0 obj

+<< /Length 4158 >>

+stream

+1 0 0 1 0 0 cm  BT /F1 12 Tf 14.4 TL ET

+q

+1 0 0 1 62.69291 715.8236 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 48 re B*

+Q

+q

+BT 1 0 0 1 0 26 Tm 12 TL /F4 10 Tf 0 0 0 rg (...) Tj T* (>) Tj (>) Tj (>) Tj ( Color.green.value == 2) Tj T* (True) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 695.8236 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 2 Tm /F1 10 Tf 12 TL (Note:) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 689.8236 cm

+Q

+q

+1 0 0 1 62.69291 653.8236 cm

+0 0 0 rg

+BT /F1 10 Tf 12 TL ET

+BT 1 0 0 1 0 2 Tm  T* ET

+q

+1 0 0 1 20 0 cm

+q

+BT 1 0 0 1 0 26 Tm .144104 Tw 12 TL /F1 10 Tf 0 0 0 rg (The ) Tj /F5 10 Tf 0 0 0 rg (__new__ ) Tj /F1 10 Tf 0 0 0 rg (method, if defined, is used during creation of the Enum members; it is then replaced by) Tj T* 0 Tw .799985 Tw (Enum's ) Tj /F5 10 Tf 0 0 0 rg (__new__ ) Tj /F1 10 Tf 0 0 0 rg (which is used after class creation for lookup of existing members. Due to the way) Tj T* 0 Tw (Enums are supposed to behave, there is no way to customize Enum's ) Tj /F5 10 Tf 0 0 0 rg (__new__) Tj /F1 10 Tf 0 0 0 rg (.) Tj T* ET

+Q

+Q

+q

+Q

+Q

+q

+1 0 0 1 62.69291 653.8236 cm

+Q

+q

+1 0 0 1 62.69291 623.8236 cm

+q

+BT 1 0 0 1 0 3 Tm 18 TL /F3 15 Tf 0 0 0 rg (UniqueEnum) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 605.8236 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 2 Tm /F1 10 Tf 12 TL (Raises an error if a duplicate member name is found instead of creating an alias:) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 368.6236 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 228 re B*

+Q

+q

+BT 1 0 0 1 0 206 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( class UniqueEnum\(Enum\):) Tj T* (...     def __init__\(self, *args\):) Tj T* (...         cls = self.__class__) Tj T* (...         if any\(self.value == e.value for e in cls\):) Tj T* (...             a = self.name) Tj T* (...             e = cls\(self.value\).name) Tj T* (...             raise ValueError\() Tj T* (...                     "aliases not allowed in UniqueEnum:  %r --) Tj (>) Tj ( %r") Tj T* (...                     % \(a, e\)\)) Tj T* (...) Tj T* (>) Tj (>) Tj (>) Tj ( class Color\(UniqueEnum\):) Tj T* (...     red = 1) Tj T* (...     green = 2) Tj T* (...     blue = 3) Tj T* (...     grene = 2) Tj T* (Traceback \(most recent call last\):) Tj T* (...) Tj T* (ValueError: aliases not allowed in UniqueEnum:  'grene' --) Tj (>) Tj ( 'green') Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 338.6236 cm

+q

+BT 1 0 0 1 0 3 Tm 18 TL /F3 15 Tf 0 0 0 rg (OrderedEnum) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 308.6236 cm

+q

+BT 1 0 0 1 0 14 Tm 1.335984 Tw 12 TL /F1 10 Tf 0 0 0 rg (An ordered enumeration that is not based on ) Tj /F4 10 Tf 0 0 0 rg (IntEnum ) Tj /F1 10 Tf 0 0 0 rg (and so maintains the normal ) Tj /F4 10 Tf 0 0 0 rg (Enum ) Tj /F1 10 Tf 0 0 0 rg (invariants) Tj T* 0 Tw (\(such as not being comparable to other enumerations\):) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 83.42362 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 216 re B*

+Q

+q

+BT 1 0 0 1 0 194 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( class OrderedEnum\(Enum\):) Tj T* (...     def __ge__\(self, other\):) Tj T* (...         if self.__class__ is other.__class__:) Tj T* (...             return self._value_ ) Tj (>) Tj (= other._value_) Tj T* (...         return NotImplemented) Tj T* (...     def __gt__\(self, other\):) Tj T* (...         if self.__class__ is other.__class__:) Tj T* (...             return self._value_ ) Tj (>) Tj ( other._value_) Tj T* (...         return NotImplemented) Tj T* (...     def __le__\(self, other\):) Tj T* (...         if self.__class__ is other.__class__:) Tj T* (...             return self._value_ ) Tj (<) Tj (= other._value_) Tj T* (...         return NotImplemented) Tj T* (...     def __lt__\(self, other\):) Tj T* (...         if self.__class__ is other.__class__:) Tj T* (...             return self._value_ ) Tj (<) Tj ( other._value_) Tj T* (...         return NotImplemented) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+ 

+endstream

+endobj

+66 0 obj

+<< /Length 4039 >>

+stream

+1 0 0 1 0 0 cm  BT /F1 12 Tf 14.4 TL ET

+q

+1 0 0 1 62.69291 619.8236 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 144 re B*

+Q

+q

+BT 1 0 0 1 0 122 Tm 12 TL /F4 10 Tf 0 0 0 rg (...) Tj T* (>) Tj (>) Tj (>) Tj ( class Grade\(OrderedEnum\):) Tj T* (...     __ordered__ = 'A B C D F') Tj T* (...     A = 5) Tj T* (...     B = 4) Tj T* (...     C = 3) Tj T* (...     D = 2) Tj T* (...     F = 1) Tj T* (...) Tj T* (>) Tj (>) Tj (>) Tj ( Grade.C ) Tj (<) Tj ( Grade.A) Tj T* (True) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 589.8236 cm

+q

+BT 1 0 0 1 0 3 Tm 18 TL /F3 15 Tf 0 0 0 rg (Planet) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 571.8236 cm

+q

+BT 1 0 0 1 0 2 Tm 12 TL /F1 10 Tf 0 0 0 rg (If ) Tj /F4 10 Tf 0 0 0 rg (__new__ ) Tj /F1 10 Tf 0 0 0 rg (or ) Tj /F4 10 Tf 0 0 0 rg (__init__ ) Tj /F1 10 Tf 0 0 0 rg (is defined the value of the enum member will be passed to those methods:) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 286.6236 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 276 re B*

+Q

+q

+BT 1 0 0 1 0 254 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( class Planet\(Enum\):) Tj T* (...     MERCURY = \(3.303e+23, 2.4397e6\)) Tj T* (...     VENUS   = \(4.869e+24, 6.0518e6\)) Tj T* (...     EARTH   = \(5.976e+24, 6.37814e6\)) Tj T* (...     MARS    = \(6.421e+23, 3.3972e6\)) Tj T* (...     JUPITER = \(1.9e+27,   7.1492e7\)) Tj T* (...     SATURN  = \(5.688e+26, 6.0268e7\)) Tj T* (...     URANUS  = \(8.686e+25, 2.5559e7\)) Tj T* (...     NEPTUNE = \(1.024e+26, 2.4746e7\)) Tj T* (...     def __init__\(self, mass, radius\):) Tj T* (...         self.mass = mass       # in kilograms) Tj T* (...         self.radius = radius   # in meters) Tj T* (...     @property) Tj T* (...     def surface_gravity\(self\):) Tj T* (...         # universal gravitational constant  \(m3 kg-1 s-2\)) Tj T* (...         G = 6.67300E-11) Tj T* (...         return G * self.mass / \(self.radius * self.radius\)) Tj T* (...) Tj T* (>) Tj (>) Tj (>) Tj ( Planet.EARTH.value) Tj T* (\(5.976e+24, 6378140.0\)) Tj T* (>) Tj (>) Tj (>) Tj ( Planet.EARTH.surface_gravity) Tj T* (9.802652743337129) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 253.6236 cm

+q

+BT 1 0 0 1 0 3.5 Tm 21 TL /F3 17.5 Tf 0 0 0 rg (How are Enums different?) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 223.6236 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 14 Tm /F1 10 Tf 12 TL 2.090651 Tw (Enums have a custom metaclass that affects many aspects of both derived Enum classes and their) Tj T* 0 Tw (instances \(members\).) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 193.6236 cm

+q

+BT 1 0 0 1 0 3 Tm 18 TL /F3 15 Tf 0 0 0 rg (Enum Classes) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 127.6236 cm

+q

+BT 1 0 0 1 0 50 Tm 1.263615 Tw 12 TL /F1 10 Tf 0 0 0 rg (The ) Tj /F4 10 Tf 0 0 0 rg (EnumMeta ) Tj /F1 10 Tf 0 0 0 rg (metaclass is responsible for providing the ) Tj /F4 10 Tf 0 0 0 rg (__contains__) Tj /F1 10 Tf 0 0 0 rg (, ) Tj /F4 10 Tf 0 0 0 rg (__dir__) Tj /F1 10 Tf 0 0 0 rg (, ) Tj /F4 10 Tf 0 0 0 rg (__iter__ ) Tj /F1 10 Tf 0 0 0 rg (and) Tj T* 0 Tw 2.264724 Tw (other methods that allow one to do things with an ) Tj /F4 10 Tf 0 0 0 rg (Enum ) Tj /F1 10 Tf 0 0 0 rg (class that fail on a typical class, such as) Tj T* 0 Tw 2.594147 Tw /F4 10 Tf 0 0 0 rg (list\(Color\) ) Tj /F1 10 Tf 0 0 0 rg (or ) Tj /F4 10 Tf 0 0 0 rg (some_var) Tj ( ) Tj (in) Tj ( ) Tj (Color) Tj /F1 10 Tf 0 0 0 rg (. ) Tj /F4 10 Tf 0 0 0 rg (EnumMeta ) Tj /F1 10 Tf 0 0 0 rg (is responsible for ensuring that various other) Tj T* 0 Tw 2.196905 Tw (methods on the final ) Tj /F4 10 Tf 0 0 0 rg (Enum ) Tj /F1 10 Tf 0 0 0 rg (class are correct \(such as ) Tj /F4 10 Tf 0 0 0 rg (__new__) Tj /F1 10 Tf 0 0 0 rg (, ) Tj /F4 10 Tf 0 0 0 rg (__getnewargs__) Tj /F1 10 Tf 0 0 0 rg (, ) Tj /F4 10 Tf 0 0 0 rg (__str__ ) Tj /F1 10 Tf 0 0 0 rg (and) Tj T* 0 Tw /F4 10 Tf 0 0 0 rg (__repr__) Tj /F1 10 Tf 0 0 0 rg (\).) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 115.6236 cm

+Q

+ 

+endstream

+endobj

+67 0 obj

+<< /Length 5453 >>

+stream

+1 0 0 1 0 0 cm  BT /F1 12 Tf 14.4 TL ET

+q

+1 0 0 1 62.69291 682.0236 cm

+.960784 .960784 .862745 rg

+n 0 83 469.8898 -83 re f*

+0 0 0 rg

+BT /F1 10 Tf 12 TL ET

+BT 1 0 0 1 6 57 Tm  T* ET

+q

+1 0 0 1 16 52 cm

+q

+0 0 0 rg

+BT 1 0 0 1 0 2.5 Tm /F6 12.5 Tf 15 TL (Note) Tj T* ET

+Q

+Q

+q

+1 0 0 1 16 16 cm

+q

+BT 1 0 0 1 0 14 Tm .686654 Tw 12 TL /F4 10 Tf 0 0 0 rg (__dir__ ) Tj /F1 10 Tf 0 0 0 rg (is not changed in the Python 2 line as it messes up some of the decorators included in) Tj T* 0 Tw (the stdlib.) Tj T* ET

+Q

+Q

+q

+1 J

+1 j

+.662745 .662745 .662745 RG

+.5 w

+n 0 83 m 469.8898 83 l S

+n 0 0 m 469.8898 0 l S

+n 0 0 m 0 83 l S

+n 469.8898 0 m 469.8898 83 l S

+Q

+Q

+q

+1 0 0 1 62.69291 676.0236 cm

+Q

+q

+1 0 0 1 62.69291 646.0236 cm

+q

+BT 1 0 0 1 0 3 Tm 18 TL /F3 15 Tf 0 0 0 rg (Enum Members \(aka instances\)) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 604.0236 cm

+q

+BT 1 0 0 1 0 26 Tm .491984 Tw 12 TL /F1 10 Tf 0 0 0 rg (The most interesting thing about Enum members is that they are singletons. ) Tj /F4 10 Tf 0 0 0 rg (EnumMeta ) Tj /F1 10 Tf 0 0 0 rg (creates them all) Tj T* 0 Tw .084988 Tw (while it is creating the ) Tj /F4 10 Tf 0 0 0 rg (Enum ) Tj /F1 10 Tf 0 0 0 rg (class itself, and then puts a custom ) Tj /F4 10 Tf 0 0 0 rg (__new__ ) Tj /F1 10 Tf 0 0 0 rg (in place to ensure that no new) Tj T* 0 Tw (ones are ever instantiated by returning only the existing member instances.) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 574.0236 cm

+q

+BT 1 0 0 1 0 3 Tm 18 TL /F3 15 Tf 0 0 0 rg (Finer Points) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 520.0236 cm

+q

+BT 1 0 0 1 0 38 Tm 5.488555 Tw 12 TL /F4 10 Tf 0 0 0 rg (Enum ) Tj /F1 10 Tf 0 0 0 rg (members are instances of an ) Tj /F4 10 Tf 0 0 0 rg (Enum ) Tj /F1 10 Tf 0 0 0 rg (class, and even though they are accessible as) Tj T* 0 Tw 1.504147 Tw /F5 10 Tf 0 0 0 rg (EnumClass.member1.member2) Tj /F1 10 Tf 0 0 0 rg (, they should not be accessed directly from the member as that lookup) Tj T* 0 Tw .329985 Tw (may fail or, worse, return something besides the ) Tj /F4 10 Tf 0 0 0 rg (Enum ) Tj /F1 10 Tf 0 0 0 rg (member you were looking for \(changed in version) Tj T* 0 Tw (1.1.1\):) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 390.8236 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 120 re B*

+Q

+q

+BT 1 0 0 1 0 98 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( class FieldTypes\(Enum\):) Tj T* (...     name = 1) Tj T* (...     value = 2) Tj T* (...     size = 3) Tj T* (...) Tj T* (>) Tj (>) Tj (>) Tj ( FieldTypes.value.size) Tj T* (<) Tj (FieldTypes.size: 3) Tj (>) Tj  T* (>) Tj (>) Tj (>) Tj ( FieldTypes.size.value) Tj T* (3) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 370.8236 cm

+q

+BT 1 0 0 1 0 2 Tm 12 TL /F1 10 Tf 0 0 0 rg (The ) Tj /F4 10 Tf 0 0 0 rg (__members__ ) Tj /F1 10 Tf 0 0 0 rg (attribute is only available on the class.) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 316.8236 cm

+q

+BT 1 0 0 1 0 38 Tm 1.374651 Tw 12 TL /F1 10 Tf 0 0 0 rg (In Python 3.x ) Tj /F4 10 Tf 0 0 0 rg (__members__ ) Tj /F1 10 Tf 0 0 0 rg (is always an ) Tj /F4 10 Tf 0 0 0 rg (OrderedDict) Tj /F1 10 Tf 0 0 0 rg (, with the order being the definition order. In) Tj T* 0 Tw 3.009213 Tw (Python 2.7 ) Tj /F4 10 Tf 0 0 0 rg (__members__ ) Tj /F1 10 Tf 0 0 0 rg (is an ) Tj /F4 10 Tf 0 0 0 rg (OrderedDict ) Tj /F1 10 Tf 0 0 0 rg (if ) Tj /F4 10 Tf 0 0 0 rg (__order__ ) Tj /F1 10 Tf 0 0 0 rg (was specified, and a plain ) Tj /F4 10 Tf 0 0 0 rg (dict) Tj T* 0 Tw 1.851318 Tw /F1 10 Tf 0 0 0 rg (otherwise. In all other Python 2.x versions ) Tj /F4 10 Tf 0 0 0 rg (__members__ ) Tj /F1 10 Tf 0 0 0 rg (is a plain ) Tj /F4 10 Tf 0 0 0 rg (dict ) Tj /F1 10 Tf 0 0 0 rg (even if ) Tj /F4 10 Tf 0 0 0 rg (__order__ ) Tj /F1 10 Tf 0 0 0 rg (was) Tj T* 0 Tw (specified as the ) Tj /F4 10 Tf 0 0 0 rg (OrderedDict ) Tj /F1 10 Tf 0 0 0 rg (type didn't exist yet.) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 286.8236 cm

+q

+BT 1 0 0 1 0 14 Tm .106654 Tw 12 TL /F1 10 Tf 0 0 0 rg (If you give your ) Tj /F4 10 Tf 0 0 0 rg (Enum ) Tj /F1 10 Tf 0 0 0 rg (subclass extra methods, like the ) Tj 0 0 .501961 rg (Planet ) Tj 0 0 0 rg (class above, those methods will show up in) Tj T* 0 Tw (a ) Tj /F5 10 Tf 0 0 0 rg (dir ) Tj /F1 10 Tf 0 0 0 rg (of the member, but not of the class:) Tj T* ET

+Q

+Q

+q

+1 0 0 1 62.69291 205.6236 cm

+q

+q

+1 0 0 1 0 0 cm

+q

+1 0 0 1 6.6 6.6 cm

+q

+.662745 .662745 .662745 RG

+.5 w

+.960784 .960784 .862745 rg

+n -6 -6 468.6898 72 re B*

+Q

+q

+BT 1 0 0 1 0 50 Tm 12 TL /F4 10 Tf 0 0 0 rg (>) Tj (>) Tj (>) Tj ( dir\(Planet\)) Tj T* (['EARTH', 'JUPITER', 'MARS', 'MERCURY', 'NEPTUNE', 'SATURN', 'URANUS',) Tj T* ('VENUS', '__class__', '__doc__', '__members__', '__module__']) Tj T* (>) Tj (>) Tj (>) Tj ( dir\(Planet.EARTH\)) Tj T* (['__class__', '__doc__', '__module__', 'name', 'surface_gravity', 'value']) Tj T* ET

+Q

+Q

+Q

+Q

+Q

+q

+1 0 0 1 62.69291 161.6236 cm

+q

+BT 1 0 0 1 0 26 Tm .938935 Tw 12 TL /F1 10 Tf 0 0 0 rg (A ) Tj /F4 10 Tf 0 0 0 rg (__new__ ) Tj /F1 10 Tf 0 0 0 rg (method will only be used for the creation of the ) Tj /F4 10 Tf 0 0 0 rg (Enum ) Tj /F1 10 Tf 0 0 0 rg (members -- after that it is replaced.) Tj T* 0 Tw .949461 Tw (This means if you wish to change how ) Tj /F4 10 Tf 0 0 0 rg (Enum ) Tj /F1 10 Tf 0 0 0 rg (members are looked up you either have to write a helper) Tj T* 0 Tw (function or a ) Tj /F4 10 Tf 0 0 0 rg (classmethod) Tj /F1 10 Tf 0 0 0 rg (.) Tj T* ET

+Q

+Q

+ 

+endstream

+endobj

+68 0 obj

+<< /Nums [ 0 69 0 R 1 70 0 R 2 71 0 R 3 72 0 R 4 73 0 R 

+  5 74 0 R 6 75 0 R 7 76 0 R 8 77 0 R 9 78 0 R 

+  10 79 0 R 11 80 0 R ] >>

+endobj

+69 0 obj

+<< /S /D /St 1 >>

+endobj

+70 0 obj

+<< /S /D /St 2 >>

+endobj

+71 0 obj

+<< /S /D /St 3 >>

+endobj

+72 0 obj

+<< /S /D /St 4 >>

+endobj

+73 0 obj

+<< /S /D /St 5 >>

+endobj

+74 0 obj

+<< /S /D /St 6 >>

+endobj

+75 0 obj

+<< /S /D /St 7 >>

+endobj

+76 0 obj

+<< /S /D /St 8 >>

+endobj

+77 0 obj

+<< /S /D /St 9 >>

+endobj

+78 0 obj

+<< /S /D /St 10 >>

+endobj

+79 0 obj

+<< /S /D /St 11 >>

+endobj

+80 0 obj

+<< /S /D /St 12 >>

+endobj

+xref

+0 81

+0000000000 65535 f

+0000000075 00000 n

+0000000160 00000 n

+0000000270 00000 n

+0000000383 00000 n

+0000000498 00000 n

+0000000606 00000 n

+0000000777 00000 n

+0000000948 00000 n

+0000001066 00000 n

+0000001237 00000 n

+0000001477 00000 n

+0000001687 00000 n

+0000001897 00000 n

+0000002107 00000 n

+0000002279 00000 n

+0000002402 00000 n

+0000002574 00000 n

+0000002746 00000 n

+0000002989 00000 n

+0000003161 00000 n

+0000003333 00000 n

+0000003569 00000 n

+0000003779 00000 n

+0000003989 00000 n

+0000004199 00000 n

+0000004409 00000 n

+0000004619 00000 n

+0000004791 00000 n

+0000005020 00000 n

+0000005129 00000 n

+0000005373 00000 n

+0000005451 00000 n

+0000005571 00000 n

+0000005705 00000 n

+0000005886 00000 n

+0000006039 00000 n

+0000006168 00000 n

+0000006332 00000 n

+0000006488 00000 n

+0000006614 00000 n

+0000006746 00000 n

+0000006924 00000 n

+0000007036 00000 n

+0000007147 00000 n

+0000007315 00000 n

+0000007413 00000 n

+0000007591 00000 n

+0000007706 00000 n

+0000007834 00000 n

+0000007963 00000 n

+0000008074 00000 n

+0000008243 00000 n

+0000008360 00000 n

+0000008508 00000 n

+0000008625 00000 n

+0000008771 00000 n

+0000015286 00000 n

+0000019517 00000 n

+0000023365 00000 n

+0000027828 00000 n

+0000032406 00000 n

+0000037090 00000 n

+0000042519 00000 n

+0000046717 00000 n

+0000053882 00000 n

+0000058097 00000 n

+0000062193 00000 n

+0000067703 00000 n

+0000067856 00000 n

+0000067893 00000 n

+0000067930 00000 n

+0000067967 00000 n

+0000068004 00000 n

+0000068041 00000 n

+0000068078 00000 n

+0000068115 00000 n

+0000068152 00000 n

+0000068189 00000 n

+0000068227 00000 n

+0000068265 00000 n

+trailer

+<< /ID 

+ % ReportLab generated PDF document -- digest (http://www.reportlab.com)

+ [(<}|~gm\352\320\235=\001p\220v\224\336) (<}|~gm\352\320\235=\001p\220v\224\336)]

+ /Info 30 0 R /Root 29 0 R /Size 81 >>

+startxref

+68303

+%%EOF

diff --git a/tools/third_party/enum/enum/doc/enum.rst b/tools/third_party/enum/enum/doc/enum.rst
new file mode 100644
index 0000000..3afc238
--- /dev/null
+++ b/tools/third_party/enum/enum/doc/enum.rst
@@ -0,0 +1,735 @@
+``enum`` --- support for enumerations
+========================================
+
+.. :synopsis: enumerations are sets of symbolic names bound to unique, constant
+  values.
+.. :moduleauthor:: Ethan Furman <ethan@stoneleaf.us>
+.. :sectionauthor:: Barry Warsaw <barry@python.org>,
+.. :sectionauthor:: Eli Bendersky <eliben@gmail.com>,
+.. :sectionauthor:: Ethan Furman <ethan@stoneleaf.us>
+
+----------------
+
+An enumeration is a set of symbolic names (members) bound to unique, constant
+values.  Within an enumeration, the members can be compared by identity, and
+the enumeration itself can be iterated over.
+
+
+Module Contents
+---------------
+
+This module defines two enumeration classes that can be used to define unique
+sets of names and values: ``Enum`` and ``IntEnum``.  It also defines
+one decorator, ``unique``.
+
+``Enum``
+
+Base class for creating enumerated constants.  See section `Functional API`_
+for an alternate construction syntax.
+
+``IntEnum``
+
+Base class for creating enumerated constants that are also subclasses of ``int``.
+
+``unique``
+
+Enum class decorator that ensures only one name is bound to any one value.
+
+
+Creating an Enum
+----------------
+
+Enumerations are created using the ``class`` syntax, which makes them
+easy to read and write.  An alternative creation method is described in
+`Functional API`_.  To define an enumeration, subclass ``Enum`` as
+follows::
+
+    >>> from enum import Enum
+    >>> class Color(Enum):
+    ...     red = 1
+    ...     green = 2
+    ...     blue = 3
+
+Note: Nomenclature
+
+  - The class ``Color`` is an *enumeration* (or *enum*)
+  - The attributes ``Color.red``, ``Color.green``, etc., are
+    *enumeration members* (or *enum members*).
+  - The enum members have *names* and *values* (the name of
+    ``Color.red`` is ``red``, the value of ``Color.blue`` is
+    ``3``, etc.)
+    
+Note:
+
+    Even though we use the ``class`` syntax to create Enums, Enums
+    are not normal Python classes.  See `How are Enums different?`_ for
+    more details.
+
+Enumeration members have human readable string representations::
+
+    >>> print(Color.red)
+    Color.red
+
+...while their ``repr`` has more information::
+
+    >>> print(repr(Color.red))
+    <Color.red: 1>
+
+The *type* of an enumeration member is the enumeration it belongs to::
+
+    >>> type(Color.red)
+    <enum 'Color'>
+    >>> isinstance(Color.green, Color)
+    True
+    >>>
+
+Enum members also have a property that contains just their item name::
+
+    >>> print(Color.red.name)
+    red
+
+Enumerations support iteration.  In Python 3.x definition order is used; in
+Python 2.x the definition order is not available, but class attribute
+``__order__`` is supported;  otherwise, value order is used::
+
+    >>> class Shake(Enum):
+    ...   __order__ = 'vanilla chocolate cookies mint'  # only needed in 2.x
+    ...   vanilla = 7
+    ...   chocolate = 4
+    ...   cookies = 9
+    ...   mint = 3
+    ...
+    >>> for shake in Shake:
+    ...   print(shake)
+    ...
+    Shake.vanilla
+    Shake.chocolate
+    Shake.cookies
+    Shake.mint
+
+The ``__order__`` attribute is always removed, and in 3.x it is also ignored
+(order is definition order); however, in the stdlib version it will be ignored
+but not removed.
+
+Enumeration members are hashable, so they can be used in dictionaries and sets::
+
+    >>> apples = {}
+    >>> apples[Color.red] = 'red delicious'
+    >>> apples[Color.green] = 'granny smith'
+    >>> apples == {Color.red: 'red delicious', Color.green: 'granny smith'}
+    True
+
+
+Programmatic access to enumeration members and their attributes
+---------------------------------------------------------------
+
+Sometimes it's useful to access members in enumerations programmatically (i.e.
+situations where ``Color.red`` won't do because the exact color is not known
+at program-writing time).  ``Enum`` allows such access::
+
+    >>> Color(1)
+    <Color.red: 1>
+    >>> Color(3)
+    <Color.blue: 3>
+
+If you want to access enum members by *name*, use item access::
+
+    >>> Color['red']
+    <Color.red: 1>
+    >>> Color['green']
+    <Color.green: 2>
+
+If have an enum member and need its ``name`` or ``value``::
+
+    >>> member = Color.red
+    >>> member.name
+    'red'
+    >>> member.value
+    1
+
+
+Duplicating enum members and values
+-----------------------------------
+
+Having two enum members (or any other attribute) with the same name is invalid;
+in Python 3.x this would raise an error, but in Python 2.x the second member
+simply overwrites the first::
+
+    >>> # python 2.x
+    >>> class Shape(Enum):
+    ...   square = 2
+    ...   square = 3
+    ...
+    >>> Shape.square
+    <Shape.square: 3>
+
+    >>> # python 3.x
+    >>> class Shape(Enum):
+    ...   square = 2
+    ...   square = 3
+    Traceback (most recent call last):
+    ...
+    TypeError: Attempted to reuse key: 'square'
+
+However, two enum members are allowed to have the same value.  Given two members
+A and B with the same value (and A defined first), B is an alias to A.  By-value
+lookup of the value of A and B will return A.  By-name lookup of B will also
+return A::
+
+    >>> class Shape(Enum):
+    ...   __order__ = 'square diamond circle alias_for_square'  # only needed in 2.x
+    ...   square = 2
+    ...   diamond = 1
+    ...   circle = 3
+    ...   alias_for_square = 2
+    ...
+    >>> Shape.square
+    <Shape.square: 2>
+    >>> Shape.alias_for_square
+    <Shape.square: 2>
+    >>> Shape(2)
+    <Shape.square: 2>
+
+
+Allowing aliases is not always desirable.  ``unique`` can be used to ensure
+that none exist in a particular enumeration::
+
+    >>> from enum import unique
+    >>> @unique
+    ... class Mistake(Enum):
+    ...   __order__ = 'one two three four'  # only needed in 2.x
+    ...   one = 1
+    ...   two = 2
+    ...   three = 3
+    ...   four = 3
+    Traceback (most recent call last):
+    ...
+    ValueError: duplicate names found in <enum 'Mistake'>: four -> three
+
+Iterating over the members of an enum does not provide the aliases::
+
+    >>> list(Shape)
+    [<Shape.square: 2>, <Shape.diamond: 1>, <Shape.circle: 3>]
+
+The special attribute ``__members__`` is a dictionary mapping names to members.
+It includes all names defined in the enumeration, including the aliases::
+
+    >>> for name, member in sorted(Shape.__members__.items()):
+    ...   name, member
+    ...
+    ('alias_for_square', <Shape.square: 2>)
+    ('circle', <Shape.circle: 3>)
+    ('diamond', <Shape.diamond: 1>)
+    ('square', <Shape.square: 2>)
+
+The ``__members__`` attribute can be used for detailed programmatic access to
+the enumeration members.  For example, finding all the aliases::
+
+    >>> [name for name, member in Shape.__members__.items() if member.name != name]
+    ['alias_for_square']
+
+Comparisons
+-----------
+
+Enumeration members are compared by identity::
+
+    >>> Color.red is Color.red
+    True
+    >>> Color.red is Color.blue
+    False
+    >>> Color.red is not Color.blue
+    True
+
+Ordered comparisons between enumeration values are *not* supported.  Enum
+members are not integers (but see `IntEnum`_ below)::
+
+    >>> Color.red < Color.blue
+    Traceback (most recent call last):
+      File "<stdin>", line 1, in <module>
+    TypeError: unorderable types: Color() < Color()
+
+.. warning::
+
+    In Python 2 *everything* is ordered, even though the ordering may not
+    make sense.  If you want your enumerations to have a sensible ordering
+    check out the `OrderedEnum`_ recipe below.
+
+
+Equality comparisons are defined though::
+
+    >>> Color.blue == Color.red
+    False
+    >>> Color.blue != Color.red
+    True
+    >>> Color.blue == Color.blue
+    True
+
+Comparisons against non-enumeration values will always compare not equal
+(again, ``IntEnum`` was explicitly designed to behave differently, see
+below)::
+
+    >>> Color.blue == 2
+    False
+
+
+Allowed members and attributes of enumerations
+----------------------------------------------
+
+The examples above use integers for enumeration values.  Using integers is
+short and handy (and provided by default by the `Functional API`_), but not
+strictly enforced.  In the vast majority of use-cases, one doesn't care what
+the actual value of an enumeration is.  But if the value *is* important,
+enumerations can have arbitrary values.
+
+Enumerations are Python classes, and can have methods and special methods as
+usual.  If we have this enumeration::
+
+    >>> class Mood(Enum):
+    ...   funky = 1
+    ...   happy = 3
+    ... 
+    ...   def describe(self):
+    ...     # self is the member here
+    ...     return self.name, self.value
+    ... 
+    ...   def __str__(self):
+    ...     return 'my custom str! {0}'.format(self.value)
+    ... 
+    ...   @classmethod
+    ...   def favorite_mood(cls):
+    ...     # cls here is the enumeration
+    ...     return cls.happy
+
+Then::
+
+    >>> Mood.favorite_mood()
+    <Mood.happy: 3>
+    >>> Mood.happy.describe()
+    ('happy', 3)
+    >>> str(Mood.funky)
+    'my custom str! 1'
+
+The rules for what is allowed are as follows: _sunder_ names (starting and
+ending with a single underscore) are reserved by enum and cannot be used;
+all other attributes defined within an enumeration will become members of this
+enumeration, with the exception of *__dunder__* names and descriptors (methods
+are also descriptors).
+
+Note:
+
+    If your enumeration defines ``__new__`` and/or ``__init__`` then
+    whatever value(s) were given to the enum member will be passed into
+    those methods.  See `Planet`_ for an example.
+
+
+Restricted subclassing of enumerations
+--------------------------------------
+
+Subclassing an enumeration is allowed only if the enumeration does not define
+any members.  So this is forbidden::
+
+    >>> class MoreColor(Color):
+    ...   pink = 17
+    Traceback (most recent call last):
+    ...
+    TypeError: Cannot extend enumerations
+
+But this is allowed::
+
+    >>> class Foo(Enum):
+    ...   def some_behavior(self):
+    ...     pass
+    ...
+    >>> class Bar(Foo):
+    ...   happy = 1
+    ...   sad = 2
+    ...
+
+Allowing subclassing of enums that define members would lead to a violation of
+some important invariants of types and instances.  On the other hand, it makes
+sense to allow sharing some common behavior between a group of enumerations.
+(See `OrderedEnum`_ for an example.)
+
+
+Pickling
+--------
+
+Enumerations can be pickled and unpickled::
+
+    >>> from enum.test_enum import Fruit
+    >>> from pickle import dumps, loads
+    >>> Fruit.tomato is loads(dumps(Fruit.tomato, 2))
+    True
+
+The usual restrictions for pickling apply: picklable enums must be defined in
+the top level of a module, since unpickling requires them to be importable
+from that module.
+
+Note:
+
+    With pickle protocol version 4 (introduced in Python 3.4) it is possible
+    to easily pickle enums nested in other classes.
+
+
+
+Functional API
+--------------
+
+The ``Enum`` class is callable, providing the following functional API::
+
+    >>> Animal = Enum('Animal', 'ant bee cat dog')
+    >>> Animal
+    <enum 'Animal'>
+    >>> Animal.ant
+    <Animal.ant: 1>
+    >>> Animal.ant.value
+    1
+    >>> list(Animal)
+    [<Animal.ant: 1>, <Animal.bee: 2>, <Animal.cat: 3>, <Animal.dog: 4>]
+
+The semantics of this API resemble ``namedtuple``. The first argument
+of the call to ``Enum`` is the name of the enumeration. 
+
+The second argument is the *source* of enumeration member names.  It can be a
+whitespace-separated string of names, a sequence of names, a sequence of
+2-tuples with key/value pairs, or a mapping (e.g. dictionary) of names to
+values.  The last two options enable assigning arbitrary values to
+enumerations; the others auto-assign increasing integers starting with 1.  A
+new class derived from ``Enum`` is returned.  In other words, the above
+assignment to ``Animal`` is equivalent to::
+
+    >>> class Animals(Enum):
+    ...   ant = 1
+    ...   bee = 2
+    ...   cat = 3
+    ...   dog = 4
+
+Pickling enums created with the functional API can be tricky as frame stack
+implementation details are used to try and figure out which module the
+enumeration is being created in (e.g. it will fail if you use a utility
+function in separate module, and also may not work on IronPython or Jython).
+The solution is to specify the module name explicitly as follows::
+
+    >>> Animals = Enum('Animals', 'ant bee cat dog', module=__name__)
+
+Derived Enumerations
+--------------------
+
+IntEnum
+^^^^^^^
+
+A variation of ``Enum`` is provided which is also a subclass of
+``int``.  Members of an ``IntEnum`` can be compared to integers;
+by extension, integer enumerations of different types can also be compared
+to each other::
+
+    >>> from enum import IntEnum
+    >>> class Shape(IntEnum):
+    ...   circle = 1
+    ...   square = 2
+    ...
+    >>> class Request(IntEnum):
+    ...   post = 1
+    ...   get = 2
+    ...
+    >>> Shape == 1
+    False
+    >>> Shape.circle == 1
+    True
+    >>> Shape.circle == Request.post
+    True
+
+However, they still can't be compared to standard ``Enum`` enumerations::
+
+    >>> class Shape(IntEnum):
+    ...   circle = 1
+    ...   square = 2
+    ...
+    >>> class Color(Enum):
+    ...   red = 1
+    ...   green = 2
+    ...
+    >>> Shape.circle == Color.red
+    False
+
+``IntEnum`` values behave like integers in other ways you'd expect::
+
+    >>> int(Shape.circle)
+    1
+    >>> ['a', 'b', 'c'][Shape.circle]
+    'b'
+    >>> [i for i in range(Shape.square)]
+    [0, 1]
+
+For the vast majority of code, ``Enum`` is strongly recommended,
+since ``IntEnum`` breaks some semantic promises of an enumeration (by
+being comparable to integers, and thus by transitivity to other
+unrelated enumerations).  It should be used only in special cases where
+there's no other choice; for example, when integer constants are
+replaced with enumerations and backwards compatibility is required with code
+that still expects integers.
+
+
+Others
+^^^^^^
+
+While ``IntEnum`` is part of the ``enum`` module, it would be very
+simple to implement independently::
+
+    class IntEnum(int, Enum):
+        pass
+
+This demonstrates how similar derived enumerations can be defined; for example
+a ``StrEnum`` that mixes in ``str`` instead of ``int``.
+
+Some rules:
+
+1. When subclassing ``Enum``, mix-in types must appear before
+   ``Enum`` itself in the sequence of bases, as in the ``IntEnum``
+   example above.
+2. While ``Enum`` can have members of any type, once you mix in an
+   additional type, all the members must have values of that type, e.g.
+   ``int`` above.  This restriction does not apply to mix-ins which only
+   add methods and don't specify another data type such as ``int`` or
+   ``str``.
+3. When another data type is mixed in, the ``value`` attribute is *not the
+   same* as the enum member itself, although it is equivalant and will compare
+   equal.
+4. %-style formatting:  ``%s`` and ``%r`` call ``Enum``'s ``__str__`` and
+   ``__repr__`` respectively; other codes (such as ``%i`` or ``%h`` for
+   IntEnum) treat the enum member as its mixed-in type.
+
+   Note: Prior to Python 3.4 there is a bug in ``str``'s %-formatting: ``int``
+   subclasses are printed as strings and not numbers when the ``%d``, ``%i``,
+   or ``%u`` codes are used.
+5. ``str.__format__`` (or ``format``) will use the mixed-in
+   type's ``__format__``.  If the ``Enum``'s ``str`` or
+   ``repr`` is desired use the ``!s`` or ``!r`` ``str`` format codes.
+
+
+Decorators
+----------
+
+unique
+^^^^^^
+
+A ``class`` decorator specifically for enumerations.  It searches an
+enumeration's ``__members__`` gathering any aliases it finds; if any are
+found ``ValueError`` is raised with the details::
+
+    >>> @unique
+    ... class NoDupes(Enum):
+    ...    first = 'one'
+    ...    second = 'two'
+    ...    third = 'two'
+    Traceback (most recent call last):
+    ...
+    ValueError: duplicate names found in <enum 'NoDupes'>: third -> second
+
+
+Interesting examples
+--------------------
+
+While ``Enum`` and ``IntEnum`` are expected to cover the majority of
+use-cases, they cannot cover them all.  Here are recipes for some different
+types of enumerations that can be used directly, or as examples for creating
+one's own.
+
+
+AutoNumber
+^^^^^^^^^^
+
+Avoids having to specify the value for each enumeration member::
+
+    >>> class AutoNumber(Enum):
+    ...     def __new__(cls):
+    ...         value = len(cls.__members__) + 1
+    ...         obj = object.__new__(cls)
+    ...         obj._value_ = value
+    ...         return obj
+    ...
+    >>> class Color(AutoNumber):
+    ...     __order__ = "red green blue"  # only needed in 2.x
+    ...     red = ()
+    ...     green = ()
+    ...     blue = ()
+    ...
+    >>> Color.green.value == 2
+    True
+
+Note:
+
+    The `__new__` method, if defined, is used during creation of the Enum
+    members; it is then replaced by Enum's `__new__` which is used after
+    class creation for lookup of existing members.  Due to the way Enums are
+    supposed to behave, there is no way to customize Enum's `__new__`.
+
+
+UniqueEnum
+^^^^^^^^^^
+
+Raises an error if a duplicate member name is found instead of creating an
+alias::
+
+    >>> class UniqueEnum(Enum):
+    ...     def __init__(self, *args):
+    ...         cls = self.__class__
+    ...         if any(self.value == e.value for e in cls):
+    ...             a = self.name
+    ...             e = cls(self.value).name
+    ...             raise ValueError(
+    ...                     "aliases not allowed in UniqueEnum:  %r --> %r"
+    ...                     % (a, e))
+    ... 
+    >>> class Color(UniqueEnum):
+    ...     red = 1
+    ...     green = 2
+    ...     blue = 3
+    ...     grene = 2
+    Traceback (most recent call last):
+    ...
+    ValueError: aliases not allowed in UniqueEnum:  'grene' --> 'green'
+    
+
+OrderedEnum
+^^^^^^^^^^^
+
+An ordered enumeration that is not based on ``IntEnum`` and so maintains
+the normal ``Enum`` invariants (such as not being comparable to other
+enumerations)::
+
+    >>> class OrderedEnum(Enum):
+    ...     def __ge__(self, other):
+    ...         if self.__class__ is other.__class__:
+    ...             return self._value_ >= other._value_
+    ...         return NotImplemented
+    ...     def __gt__(self, other):
+    ...         if self.__class__ is other.__class__:
+    ...             return self._value_ > other._value_
+    ...         return NotImplemented
+    ...     def __le__(self, other):
+    ...         if self.__class__ is other.__class__:
+    ...             return self._value_ <= other._value_
+    ...         return NotImplemented
+    ...     def __lt__(self, other):
+    ...         if self.__class__ is other.__class__:
+    ...             return self._value_ < other._value_
+    ...         return NotImplemented
+    ...
+    >>> class Grade(OrderedEnum):
+    ...     __ordered__ = 'A B C D F'
+    ...     A = 5
+    ...     B = 4
+    ...     C = 3
+    ...     D = 2
+    ...     F = 1
+    ...
+    >>> Grade.C < Grade.A
+    True
+
+
+Planet
+^^^^^^
+
+If ``__new__`` or ``__init__`` is defined the value of the enum member
+will be passed to those methods::
+
+    >>> class Planet(Enum):
+    ...     MERCURY = (3.303e+23, 2.4397e6)
+    ...     VENUS   = (4.869e+24, 6.0518e6)
+    ...     EARTH   = (5.976e+24, 6.37814e6)
+    ...     MARS    = (6.421e+23, 3.3972e6)
+    ...     JUPITER = (1.9e+27,   7.1492e7)
+    ...     SATURN  = (5.688e+26, 6.0268e7)
+    ...     URANUS  = (8.686e+25, 2.5559e7)
+    ...     NEPTUNE = (1.024e+26, 2.4746e7)
+    ...     def __init__(self, mass, radius):
+    ...         self.mass = mass       # in kilograms
+    ...         self.radius = radius   # in meters
+    ...     @property
+    ...     def surface_gravity(self):
+    ...         # universal gravitational constant  (m3 kg-1 s-2)
+    ...         G = 6.67300E-11
+    ...         return G * self.mass / (self.radius * self.radius)
+    ... 
+    >>> Planet.EARTH.value
+    (5.976e+24, 6378140.0)
+    >>> Planet.EARTH.surface_gravity
+    9.802652743337129
+
+
+How are Enums different?
+------------------------
+
+Enums have a custom metaclass that affects many aspects of both derived Enum
+classes and their instances (members).
+
+
+Enum Classes
+^^^^^^^^^^^^
+
+The ``EnumMeta`` metaclass is responsible for providing the
+``__contains__``, ``__dir__``, ``__iter__`` and other methods that
+allow one to do things with an ``Enum`` class that fail on a typical
+class, such as ``list(Color)`` or ``some_var in Color``.  ``EnumMeta`` is
+responsible for ensuring that various other methods on the final ``Enum``
+class are correct (such as ``__new__``, ``__getnewargs__``,
+``__str__`` and ``__repr__``).
+
+.. note::
+
+    ``__dir__`` is not changed in the Python 2 line as it messes up some
+    of the decorators included in the stdlib.
+
+
+Enum Members (aka instances)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The most interesting thing about Enum members is that they are singletons.
+``EnumMeta`` creates them all while it is creating the ``Enum``
+class itself, and then puts a custom ``__new__`` in place to ensure
+that no new ones are ever instantiated by returning only the existing
+member instances.
+
+
+Finer Points
+^^^^^^^^^^^^
+
+``Enum`` members are instances of an ``Enum`` class, and even though they
+are accessible as `EnumClass.member1.member2`, they should not be
+accessed directly from the member as that lookup may fail or, worse,
+return something besides the ``Enum`` member you were looking for
+(changed in version 1.1.1)::
+
+    >>> class FieldTypes(Enum):
+    ...     name = 1
+    ...     value = 2
+    ...     size = 3
+    ...
+    >>> FieldTypes.value.size
+    <FieldTypes.size: 3>
+    >>> FieldTypes.size.value
+    3
+
+The ``__members__`` attribute is only available on the class.
+
+In Python 3.x ``__members__`` is always an ``OrderedDict``, with the order being
+the definition order.  In Python 2.7 ``__members__`` is an ``OrderedDict`` if
+``__order__`` was specified, and a plain ``dict`` otherwise.  In all other Python
+2.x versions ``__members__`` is a plain ``dict`` even if ``__order__`` was specified
+as the ``OrderedDict`` type didn't exist yet.
+
+If you give your ``Enum`` subclass extra methods, like the `Planet`_
+class above, those methods will show up in a `dir` of the member,
+but not of the class::
+
+    >>> dir(Planet)
+    ['EARTH', 'JUPITER', 'MARS', 'MERCURY', 'NEPTUNE', 'SATURN', 'URANUS',
+    'VENUS', '__class__', '__doc__', '__members__', '__module__']
+    >>> dir(Planet.EARTH)
+    ['__class__', '__doc__', '__module__', 'name', 'surface_gravity', 'value']
+
+A ``__new__`` method will only be used for the creation of the
+``Enum`` members -- after that it is replaced.  This means if you wish to
+change how ``Enum`` members are looked up you either have to write a
+helper function or a ``classmethod``.
diff --git a/tools/third_party/enum/enum/test.py b/tools/third_party/enum/enum/test.py
new file mode 100644
index 0000000..d9edfae
--- /dev/null
+++ b/tools/third_party/enum/enum/test.py
@@ -0,0 +1,1820 @@
+from pickle import dumps, loads, PicklingError, HIGHEST_PROTOCOL
+import sys
+import unittest
+pyver = float('%s.%s' % sys.version_info[:2])
+if pyver < 2.5:
+    sys.path.insert(0, '.')
+import enum
+from enum import Enum, IntEnum, unique, EnumMeta
+
+if pyver < 2.6:
+    from __builtin__ import enumerate as bltin_enumerate
+    def enumerate(thing, start=0):
+        result = []
+        for i, item in bltin_enumerate(thing):
+            i = i + start
+            result.append((i, item))
+        return result
+
+try:
+    any
+except NameError:
+    def any(iterable):
+        for element in iterable:
+            if element:
+                return True
+        return False
+
+try:
+    unicode
+except NameError:
+    unicode = str
+
+try:
+    from collections import OrderedDict
+except ImportError:
+    OrderedDict = None
+
+# for pickle tests
+try:
+    class Stooges(Enum):
+        LARRY = 1
+        CURLY = 2
+        MOE = 3
+except Exception:
+    Stooges = sys.exc_info()[1]
+
+try:
+    class IntStooges(int, Enum):
+        LARRY = 1
+        CURLY = 2
+        MOE = 3
+except Exception:
+    IntStooges = sys.exc_info()[1]
+
+try:
+    class FloatStooges(float, Enum):
+        LARRY = 1.39
+        CURLY = 2.72
+        MOE = 3.142596
+except Exception:
+    FloatStooges = sys.exc_info()[1]
+
+# for pickle test and subclass tests
+try:
+    class StrEnum(str, Enum):
+        'accepts only string values'
+    class Name(StrEnum):
+        BDFL = 'Guido van Rossum'
+        FLUFL = 'Barry Warsaw'
+except Exception:
+    Name = sys.exc_info()[1]
+
+try:
+    Question = Enum('Question', 'who what when where why', module=__name__)
+except Exception:
+    Question = sys.exc_info()[1]
+
+try:
+    Answer = Enum('Answer', 'him this then there because')
+except Exception:
+    Answer = sys.exc_info()[1]
+
+try:
+    Theory = Enum('Theory', 'rule law supposition', qualname='spanish_inquisition')
+except Exception:
+    Theory = sys.exc_info()[1]
+
+# for doctests
+try:
+    class Fruit(Enum):
+        tomato = 1
+        banana = 2
+        cherry = 3
+except Exception:
+    pass
+
+def test_pickle_dump_load(assertion, source, target=None,
+        protocol=(0, HIGHEST_PROTOCOL)):
+    start, stop = protocol
+    failures = []
+    for protocol in range(start, stop+1):
+        try:
+            if target is None:
+                assertion(loads(dumps(source, protocol=protocol)) is source)
+            else:
+                assertion(loads(dumps(source, protocol=protocol)), target)
+        except Exception:
+            exc, tb = sys.exc_info()[1:]
+            failures.append('%2d: %s' %(protocol, exc))
+    if failures:
+        raise ValueError('Failed with protocols: %s' % ', '.join(failures))
+
+def test_pickle_exception(assertion, exception, obj,
+        protocol=(0, HIGHEST_PROTOCOL)):
+    start, stop = protocol
+    failures = []
+    for protocol in range(start, stop+1):
+        try:
+            assertion(exception, dumps, obj, protocol=protocol)
+        except Exception:
+            exc = sys.exc_info()[1]
+            failures.append('%d: %s %s' % (protocol, exc.__class__.__name__, exc))
+    if failures:
+        raise ValueError('Failed with protocols: %s' % ', '.join(failures))
+
+
+class TestHelpers(unittest.TestCase):
+    # _is_descriptor, _is_sunder, _is_dunder
+
+    def test_is_descriptor(self):
+        class foo:
+            pass
+        for attr in ('__get__','__set__','__delete__'):
+            obj = foo()
+            self.assertFalse(enum._is_descriptor(obj))
+            setattr(obj, attr, 1)
+            self.assertTrue(enum._is_descriptor(obj))
+
+    def test_is_sunder(self):
+        for s in ('_a_', '_aa_'):
+            self.assertTrue(enum._is_sunder(s))
+
+        for s in ('a', 'a_', '_a', '__a', 'a__', '__a__', '_a__', '__a_', '_',
+                '__', '___', '____', '_____',):
+            self.assertFalse(enum._is_sunder(s))
+
+    def test_is_dunder(self):
+        for s in ('__a__', '__aa__'):
+            self.assertTrue(enum._is_dunder(s))
+        for s in ('a', 'a_', '_a', '__a', 'a__', '_a_', '_a__', '__a_', '_',
+                '__', '___', '____', '_____',):
+            self.assertFalse(enum._is_dunder(s))
+
+
+class TestEnum(unittest.TestCase):
+    def setUp(self):
+        class Season(Enum):
+            SPRING = 1
+            SUMMER = 2
+            AUTUMN = 3
+            WINTER = 4
+        self.Season = Season
+
+        class Konstants(float, Enum):
+            E = 2.7182818
+            PI = 3.1415926
+            TAU = 2 * PI
+        self.Konstants = Konstants
+
+        class Grades(IntEnum):
+            A = 5
+            B = 4
+            C = 3
+            D = 2
+            F = 0
+        self.Grades = Grades
+
+        class Directional(str, Enum):
+            EAST = 'east'
+            WEST = 'west'
+            NORTH = 'north'
+            SOUTH = 'south'
+        self.Directional = Directional
+
+        from datetime import date
+        class Holiday(date, Enum):
+            NEW_YEAR = 2013, 1, 1
+            IDES_OF_MARCH = 2013, 3, 15
+        self.Holiday = Holiday
+
+    if pyver >= 3.0:     # do not specify custom `dir` on previous versions
+        def test_dir_on_class(self):
+            Season = self.Season
+            self.assertEqual(
+                set(dir(Season)),
+                set(['__class__', '__doc__', '__members__', '__module__',
+                    'SPRING', 'SUMMER', 'AUTUMN', 'WINTER']),
+                )
+
+        def test_dir_on_item(self):
+            Season = self.Season
+            self.assertEqual(
+                set(dir(Season.WINTER)),
+                set(['__class__', '__doc__', '__module__', 'name', 'value']),
+                )
+
+        def test_dir_with_added_behavior(self):
+            class Test(Enum):
+                this = 'that'
+                these = 'those'
+                def wowser(self):
+                    return ("Wowser! I'm %s!" % self.name)
+            self.assertEqual(
+                    set(dir(Test)),
+                    set(['__class__', '__doc__', '__members__', '__module__', 'this', 'these']),
+                    )
+            self.assertEqual(
+                    set(dir(Test.this)),
+                    set(['__class__', '__doc__', '__module__', 'name', 'value', 'wowser']),
+                    )
+
+        def test_dir_on_sub_with_behavior_on_super(self):
+            # see issue22506
+            class SuperEnum(Enum):
+                def invisible(self):
+                    return "did you see me?"
+            class SubEnum(SuperEnum):
+                sample = 5
+            self.assertEqual(
+                    set(dir(SubEnum.sample)),
+                    set(['__class__', '__doc__', '__module__', 'name', 'value', 'invisible']),
+                    )
+
+    if pyver >= 2.7:    # OrderedDict first available here
+        def test_members_is_ordereddict_if_ordered(self):
+            class Ordered(Enum):
+                __order__ = 'first second third'
+                first = 'bippity'
+                second = 'boppity'
+                third = 'boo'
+            self.assertTrue(type(Ordered.__members__) is OrderedDict)
+
+        def test_members_is_ordereddict_if_not_ordered(self):
+            class Unordered(Enum):
+                this = 'that'
+                these = 'those'
+            self.assertTrue(type(Unordered.__members__) is OrderedDict)
+
+    if pyver >= 3.0:     # all objects are ordered in Python 2.x
+        def test_members_is_always_ordered(self):
+            class AlwaysOrdered(Enum):
+                first = 1
+                second = 2
+                third = 3
+            self.assertTrue(type(AlwaysOrdered.__members__) is OrderedDict)
+
+        def test_comparisons(self):
+            def bad_compare():
+                Season.SPRING > 4
+            Season = self.Season
+            self.assertNotEqual(Season.SPRING, 1)
+            self.assertRaises(TypeError, bad_compare)
+
+            class Part(Enum):
+                SPRING = 1
+                CLIP = 2
+                BARREL = 3
+
+            self.assertNotEqual(Season.SPRING, Part.SPRING)
+            def bad_compare():
+                Season.SPRING < Part.CLIP
+            self.assertRaises(TypeError, bad_compare)
+
+    def test_enum_in_enum_out(self):
+        Season = self.Season
+        self.assertTrue(Season(Season.WINTER) is Season.WINTER)
+
+    def test_enum_value(self):
+        Season = self.Season
+        self.assertEqual(Season.SPRING.value, 1)
+
+    def test_intenum_value(self):
+        self.assertEqual(IntStooges.CURLY.value, 2)
+
+    def test_enum(self):
+        Season = self.Season
+        lst = list(Season)
+        self.assertEqual(len(lst), len(Season))
+        self.assertEqual(len(Season), 4, Season)
+        self.assertEqual(
+            [Season.SPRING, Season.SUMMER, Season.AUTUMN, Season.WINTER], lst)
+
+        for i, season in enumerate('SPRING SUMMER AUTUMN WINTER'.split()):
+            i += 1
+            e = Season(i)
+            self.assertEqual(e, getattr(Season, season))
+            self.assertEqual(e.value, i)
+            self.assertNotEqual(e, i)
+            self.assertEqual(e.name, season)
+            self.assertTrue(e in Season)
+            self.assertTrue(type(e) is Season)
+            self.assertTrue(isinstance(e, Season))
+            self.assertEqual(str(e), 'Season.' + season)
+            self.assertEqual(
+                    repr(e),
+                    '<Season.%s: %s>' % (season, i),
+                    )
+
+    def test_value_name(self):
+        Season = self.Season
+        self.assertEqual(Season.SPRING.name, 'SPRING')
+        self.assertEqual(Season.SPRING.value, 1)
+        def set_name(obj, new_value):
+            obj.name = new_value
+        def set_value(obj, new_value):
+            obj.value = new_value
+        self.assertRaises(AttributeError, set_name, Season.SPRING, 'invierno', )
+        self.assertRaises(AttributeError, set_value, Season.SPRING, 2)
+
+    def test_attribute_deletion(self):
+        class Season(Enum):
+            SPRING = 1
+            SUMMER = 2
+            AUTUMN = 3
+            WINTER = 4
+
+            def spam(cls):
+                pass
+
+        self.assertTrue(hasattr(Season, 'spam'))
+        del Season.spam
+        self.assertFalse(hasattr(Season, 'spam'))
+
+        self.assertRaises(AttributeError, delattr, Season, 'SPRING')
+        self.assertRaises(AttributeError, delattr, Season, 'DRY')
+        self.assertRaises(AttributeError, delattr, Season.SPRING, 'name')
+
+    def test_bool_of_class(self):
+        class Empty(Enum):
+            pass
+        self.assertTrue(bool(Empty))
+
+    def test_bool_of_member(self):
+        class Count(Enum):
+            zero = 0
+            one = 1
+            two = 2
+        for member in Count:
+            self.assertTrue(bool(member))
+
+    def test_invalid_names(self):
+        def create_bad_class_1():
+            class Wrong(Enum):
+                mro = 9
+        def create_bad_class_2():
+            class Wrong(Enum):
+                _reserved_ = 3
+        self.assertRaises(ValueError, create_bad_class_1)
+        self.assertRaises(ValueError, create_bad_class_2)
+
+    def test_contains(self):
+        Season = self.Season
+        self.assertTrue(Season.AUTUMN in Season)
+        self.assertTrue(3 not in Season)
+
+        val = Season(3)
+        self.assertTrue(val in Season)
+
+        class OtherEnum(Enum):
+            one = 1; two = 2
+        self.assertTrue(OtherEnum.two not in Season)
+
+    if pyver >= 2.6:     # when `format` came into being
+
+        def test_format_enum(self):
+            Season = self.Season
+            self.assertEqual('{0}'.format(Season.SPRING),
+                             '{0}'.format(str(Season.SPRING)))
+            self.assertEqual( '{0:}'.format(Season.SPRING),
+                              '{0:}'.format(str(Season.SPRING)))
+            self.assertEqual('{0:20}'.format(Season.SPRING),
+                             '{0:20}'.format(str(Season.SPRING)))
+            self.assertEqual('{0:^20}'.format(Season.SPRING),
+                             '{0:^20}'.format(str(Season.SPRING)))
+            self.assertEqual('{0:>20}'.format(Season.SPRING),
+                             '{0:>20}'.format(str(Season.SPRING)))
+            self.assertEqual('{0:<20}'.format(Season.SPRING),
+                             '{0:<20}'.format(str(Season.SPRING)))
+
+        def test_format_enum_custom(self):
+            class TestFloat(float, Enum):
+                one = 1.0
+                two = 2.0
+                def __format__(self, spec):
+                    return 'TestFloat success!'
+            self.assertEqual('{0}'.format(TestFloat.one), 'TestFloat success!')
+
+        def assertFormatIsValue(self, spec, member):
+            self.assertEqual(spec.format(member), spec.format(member.value))
+
+        def test_format_enum_date(self):
+            Holiday = self.Holiday
+            self.assertFormatIsValue('{0}', Holiday.IDES_OF_MARCH)
+            self.assertFormatIsValue('{0:}', Holiday.IDES_OF_MARCH)
+            self.assertFormatIsValue('{0:20}', Holiday.IDES_OF_MARCH)
+            self.assertFormatIsValue('{0:^20}', Holiday.IDES_OF_MARCH)
+            self.assertFormatIsValue('{0:>20}', Holiday.IDES_OF_MARCH)
+            self.assertFormatIsValue('{0:<20}', Holiday.IDES_OF_MARCH)
+            self.assertFormatIsValue('{0:%Y %m}', Holiday.IDES_OF_MARCH)
+            self.assertFormatIsValue('{0:%Y %m %M:00}', Holiday.IDES_OF_MARCH)
+
+        def test_format_enum_float(self):
+            Konstants = self.Konstants
+            self.assertFormatIsValue('{0}', Konstants.TAU)
+            self.assertFormatIsValue('{0:}', Konstants.TAU)
+            self.assertFormatIsValue('{0:20}', Konstants.TAU)
+            self.assertFormatIsValue('{0:^20}', Konstants.TAU)
+            self.assertFormatIsValue('{0:>20}', Konstants.TAU)
+            self.assertFormatIsValue('{0:<20}', Konstants.TAU)
+            self.assertFormatIsValue('{0:n}', Konstants.TAU)
+            self.assertFormatIsValue('{0:5.2}', Konstants.TAU)
+            self.assertFormatIsValue('{0:f}', Konstants.TAU)
+
+        def test_format_enum_int(self):
+            Grades = self.Grades
+            self.assertFormatIsValue('{0}', Grades.C)
+            self.assertFormatIsValue('{0:}', Grades.C)
+            self.assertFormatIsValue('{0:20}', Grades.C)
+            self.assertFormatIsValue('{0:^20}', Grades.C)
+            self.assertFormatIsValue('{0:>20}', Grades.C)
+            self.assertFormatIsValue('{0:<20}', Grades.C)
+            self.assertFormatIsValue('{0:+}', Grades.C)
+            self.assertFormatIsValue('{0:08X}', Grades.C)
+            self.assertFormatIsValue('{0:b}', Grades.C)
+
+        def test_format_enum_str(self):
+            Directional = self.Directional
+            self.assertFormatIsValue('{0}', Directional.WEST)
+            self.assertFormatIsValue('{0:}', Directional.WEST)
+            self.assertFormatIsValue('{0:20}', Directional.WEST)
+            self.assertFormatIsValue('{0:^20}', Directional.WEST)
+            self.assertFormatIsValue('{0:>20}', Directional.WEST)
+            self.assertFormatIsValue('{0:<20}', Directional.WEST)
+
+    def test_hash(self):
+        Season = self.Season
+        dates = {}
+        dates[Season.WINTER] = '1225'
+        dates[Season.SPRING] = '0315'
+        dates[Season.SUMMER] = '0704'
+        dates[Season.AUTUMN] = '1031'
+        self.assertEqual(dates[Season.AUTUMN], '1031')
+
+    def test_enum_duplicates(self):
+        _order_ = "SPRING SUMMER AUTUMN WINTER"
+        class Season(Enum):
+            SPRING = 1
+            SUMMER = 2
+            AUTUMN = FALL = 3
+            WINTER = 4
+            ANOTHER_SPRING = 1
+        lst = list(Season)
+        self.assertEqual(
+            lst,
+            [Season.SPRING, Season.SUMMER,
+             Season.AUTUMN, Season.WINTER,
+            ])
+        self.assertTrue(Season.FALL is Season.AUTUMN)
+        self.assertEqual(Season.FALL.value, 3)
+        self.assertEqual(Season.AUTUMN.value, 3)
+        self.assertTrue(Season(3) is Season.AUTUMN)
+        self.assertTrue(Season(1) is Season.SPRING)
+        self.assertEqual(Season.FALL.name, 'AUTUMN')
+        self.assertEqual(
+                set([k for k,v in Season.__members__.items() if v.name != k]),
+                set(['FALL', 'ANOTHER_SPRING']),
+                )
+
+    if pyver >= 3.0:
+        cls = vars()
+        result = {'Enum':Enum}
+        exec("""def test_duplicate_name(self):
+            with self.assertRaises(TypeError):
+                class Color(Enum):
+                    red = 1
+                    green = 2
+                    blue = 3
+                    red = 4
+
+            with self.assertRaises(TypeError):
+                class Color(Enum):
+                    red = 1
+                    green = 2
+                    blue = 3
+                    def red(self):
+                        return 'red'
+
+            with self.assertRaises(TypeError):
+                class Color(Enum):
+                    @property
+
+                    def red(self):
+                        return 'redder'
+                    red = 1
+                    green = 2
+                    blue = 3""",
+            result)
+        cls['test_duplicate_name'] = result['test_duplicate_name']
+
+    def test_enum_with_value_name(self):
+        class Huh(Enum):
+            name = 1
+            value = 2
+        self.assertEqual(
+            list(Huh),
+            [Huh.name, Huh.value],
+            )
+        self.assertTrue(type(Huh.name) is Huh)
+        self.assertEqual(Huh.name.name, 'name')
+        self.assertEqual(Huh.name.value, 1)
+
+    def test_intenum_from_scratch(self):
+        class phy(int, Enum):
+            pi = 3
+            tau = 2 * pi
+        self.assertTrue(phy.pi < phy.tau)
+
+    def test_intenum_inherited(self):
+        class IntEnum(int, Enum):
+            pass
+        class phy(IntEnum):
+            pi = 3
+            tau = 2 * pi
+        self.assertTrue(phy.pi < phy.tau)
+
+    def test_floatenum_from_scratch(self):
+        class phy(float, Enum):
+            pi = 3.1415926
+            tau = 2 * pi
+        self.assertTrue(phy.pi < phy.tau)
+
+    def test_floatenum_inherited(self):
+        class FloatEnum(float, Enum):
+            pass
+        class phy(FloatEnum):
+            pi = 3.1415926
+            tau = 2 * pi
+        self.assertTrue(phy.pi < phy.tau)
+
+    def test_strenum_from_scratch(self):
+        class phy(str, Enum):
+            pi = 'Pi'
+            tau = 'Tau'
+        self.assertTrue(phy.pi < phy.tau)
+
+    def test_strenum_inherited(self):
+        class StrEnum(str, Enum):
+            pass
+        class phy(StrEnum):
+            pi = 'Pi'
+            tau = 'Tau'
+        self.assertTrue(phy.pi < phy.tau)
+
+    def test_intenum(self):
+        class WeekDay(IntEnum):
+            SUNDAY = 1
+            MONDAY = 2
+            TUESDAY = 3
+            WEDNESDAY = 4
+            THURSDAY = 5
+            FRIDAY = 6
+            SATURDAY = 7
+
+        self.assertEqual(['a', 'b', 'c'][WeekDay.MONDAY], 'c')
+        self.assertEqual([i for i in range(WeekDay.TUESDAY)], [0, 1, 2])
+
+        lst = list(WeekDay)
+        self.assertEqual(len(lst), len(WeekDay))
+        self.assertEqual(len(WeekDay), 7)
+        target = 'SUNDAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY'
+        target = target.split()
+        for i, weekday in enumerate(target):
+            i += 1
+            e = WeekDay(i)
+            self.assertEqual(e, i)
+            self.assertEqual(int(e), i)
+            self.assertEqual(e.name, weekday)
+            self.assertTrue(e in WeekDay)
+            self.assertEqual(lst.index(e)+1, i)
+            self.assertTrue(0 < e < 8)
+            self.assertTrue(type(e) is WeekDay)
+            self.assertTrue(isinstance(e, int))
+            self.assertTrue(isinstance(e, Enum))
+
+    def test_intenum_duplicates(self):
+        class WeekDay(IntEnum):
+            __order__ = 'SUNDAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY'
+            SUNDAY = 1
+            MONDAY = 2
+            TUESDAY = TEUSDAY = 3
+            WEDNESDAY = 4
+            THURSDAY = 5
+            FRIDAY = 6
+            SATURDAY = 7
+        self.assertTrue(WeekDay.TEUSDAY is WeekDay.TUESDAY)
+        self.assertEqual(WeekDay(3).name, 'TUESDAY')
+        self.assertEqual([k for k,v in WeekDay.__members__.items()
+                if v.name != k], ['TEUSDAY', ])
+
+    def test_pickle_enum(self):
+        if isinstance(Stooges, Exception):
+            raise Stooges
+        test_pickle_dump_load(self.assertTrue, Stooges.CURLY)
+        test_pickle_dump_load(self.assertTrue, Stooges)
+
+    def test_pickle_int(self):
+        if isinstance(IntStooges, Exception):
+            raise IntStooges
+        test_pickle_dump_load(self.assertTrue, IntStooges.CURLY)
+        test_pickle_dump_load(self.assertTrue, IntStooges)
+
+    def test_pickle_float(self):
+        if isinstance(FloatStooges, Exception):
+            raise FloatStooges
+        test_pickle_dump_load(self.assertTrue, FloatStooges.CURLY)
+        test_pickle_dump_load(self.assertTrue, FloatStooges)
+
+    def test_pickle_enum_function(self):
+        if isinstance(Answer, Exception):
+            raise Answer
+        test_pickle_dump_load(self.assertTrue, Answer.him)
+        test_pickle_dump_load(self.assertTrue, Answer)
+
+    def test_pickle_enum_function_with_module(self):
+        if isinstance(Question, Exception):
+            raise Question
+        test_pickle_dump_load(self.assertTrue, Question.who)
+        test_pickle_dump_load(self.assertTrue, Question)
+
+    if pyver == 3.4:
+        def test_class_nested_enum_and_pickle_protocol_four(self):
+            # would normally just have this directly in the class namespace
+            class NestedEnum(Enum):
+                twigs = 'common'
+                shiny = 'rare'
+
+            self.__class__.NestedEnum = NestedEnum
+            self.NestedEnum.__qualname__ = '%s.NestedEnum' % self.__class__.__name__
+            test_pickle_exception(
+                    self.assertRaises, PicklingError, self.NestedEnum.twigs,
+                    protocol=(0, 3))
+            test_pickle_dump_load(self.assertTrue, self.NestedEnum.twigs,
+                    protocol=(4, HIGHEST_PROTOCOL))
+
+    elif pyver == 3.5:
+        def test_class_nested_enum_and_pickle_protocol_four(self):
+            # would normally just have this directly in the class namespace
+            class NestedEnum(Enum):
+                twigs = 'common'
+                shiny = 'rare'
+
+            self.__class__.NestedEnum = NestedEnum
+            self.NestedEnum.__qualname__ = '%s.NestedEnum' % self.__class__.__name__
+            test_pickle_dump_load(self.assertTrue, self.NestedEnum.twigs,
+                    protocol=(0, HIGHEST_PROTOCOL))
+
+    def test_exploding_pickle(self):
+        BadPickle = Enum('BadPickle', 'dill sweet bread-n-butter')
+        enum._make_class_unpicklable(BadPickle)
+        globals()['BadPickle'] = BadPickle
+        test_pickle_exception(self.assertRaises, TypeError, BadPickle.dill)
+        test_pickle_exception(self.assertRaises, PicklingError, BadPickle)
+
+    def test_string_enum(self):
+        class SkillLevel(str, Enum):
+            master = 'what is the sound of one hand clapping?'
+            journeyman = 'why did the chicken cross the road?'
+            apprentice = 'knock, knock!'
+        self.assertEqual(SkillLevel.apprentice, 'knock, knock!')
+
+    def test_getattr_getitem(self):
+        class Period(Enum):
+            morning = 1
+            noon = 2
+            evening = 3
+            night = 4
+        self.assertTrue(Period(2) is Period.noon)
+        self.assertTrue(getattr(Period, 'night') is Period.night)
+        self.assertTrue(Period['morning'] is Period.morning)
+
+    def test_getattr_dunder(self):
+        Season = self.Season
+        self.assertTrue(getattr(Season, '__hash__'))
+
+    def test_iteration_order(self):
+        class Season(Enum):
+            _order_ = 'SUMMER WINTER AUTUMN SPRING'
+            SUMMER = 2
+            WINTER = 4
+            AUTUMN = 3
+            SPRING = 1
+        self.assertEqual(
+                list(Season),
+                [Season.SUMMER, Season.WINTER, Season.AUTUMN, Season.SPRING],
+                )
+
+    def test_iteration_order_reversed(self):
+        self.assertEqual(
+                list(reversed(self.Season)),
+                [self.Season.WINTER, self.Season.AUTUMN, self.Season.SUMMER,
+                 self.Season.SPRING]
+                )
+
+    def test_iteration_order_with_unorderable_values(self):
+        class Complex(Enum):
+            a = complex(7, 9)
+            b = complex(3.14, 2)
+            c = complex(1, -1)
+            d = complex(-77, 32)
+        self.assertEqual(
+                list(Complex),
+                [Complex.a, Complex.b, Complex.c, Complex.d],
+                )
+
+    def test_programatic_function_string(self):
+        SummerMonth = Enum('SummerMonth', 'june july august')
+        lst = list(SummerMonth)
+        self.assertEqual(len(lst), len(SummerMonth))
+        self.assertEqual(len(SummerMonth), 3, SummerMonth)
+        self.assertEqual(
+                [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+                lst,
+                )
+        for i, month in enumerate('june july august'.split()):
+            i += 1
+            e = SummerMonth(i)
+            self.assertEqual(int(e.value), i)
+            self.assertNotEqual(e, i)
+            self.assertEqual(e.name, month)
+            self.assertTrue(e in SummerMonth)
+            self.assertTrue(type(e) is SummerMonth)
+
+    def test_programatic_function_string_with_start(self):
+        SummerMonth = Enum('SummerMonth', 'june july august', start=10)
+        lst = list(SummerMonth)
+        self.assertEqual(len(lst), len(SummerMonth))
+        self.assertEqual(len(SummerMonth), 3, SummerMonth)
+        self.assertEqual(
+                [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+                lst,
+                )
+        for i, month in enumerate('june july august'.split(), 10):
+            e = SummerMonth(i)
+            self.assertEqual(int(e.value), i)
+            self.assertNotEqual(e, i)
+            self.assertEqual(e.name, month)
+            self.assertTrue(e in SummerMonth)
+            self.assertTrue(type(e) is SummerMonth)
+
+    def test_programatic_function_string_list(self):
+        SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'])
+        lst = list(SummerMonth)
+        self.assertEqual(len(lst), len(SummerMonth))
+        self.assertEqual(len(SummerMonth), 3, SummerMonth)
+        self.assertEqual(
+                [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+                lst,
+                )
+        for i, month in enumerate('june july august'.split()):
+            i += 1
+            e = SummerMonth(i)
+            self.assertEqual(int(e.value), i)
+            self.assertNotEqual(e, i)
+            self.assertEqual(e.name, month)
+            self.assertTrue(e in SummerMonth)
+            self.assertTrue(type(e) is SummerMonth)
+
+    def test_programatic_function_string_list_with_start(self):
+        SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'], start=20)
+        lst = list(SummerMonth)
+        self.assertEqual(len(lst), len(SummerMonth))
+        self.assertEqual(len(SummerMonth), 3, SummerMonth)
+        self.assertEqual(
+                [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+                lst,
+                )
+        for i, month in enumerate('june july august'.split(), 20):
+            e = SummerMonth(i)
+            self.assertEqual(int(e.value), i)
+            self.assertNotEqual(e, i)
+            self.assertEqual(e.name, month)
+            self.assertTrue(e in SummerMonth)
+            self.assertTrue(type(e) is SummerMonth)
+
+    def test_programatic_function_iterable(self):
+        SummerMonth = Enum(
+                'SummerMonth',
+                (('june', 1), ('july', 2), ('august', 3))
+                )
+        lst = list(SummerMonth)
+        self.assertEqual(len(lst), len(SummerMonth))
+        self.assertEqual(len(SummerMonth), 3, SummerMonth)
+        self.assertEqual(
+                [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+                lst,
+                )
+        for i, month in enumerate('june july august'.split()):
+            i += 1
+            e = SummerMonth(i)
+            self.assertEqual(int(e.value), i)
+            self.assertNotEqual(e, i)
+            self.assertEqual(e.name, month)
+            self.assertTrue(e in SummerMonth)
+            self.assertTrue(type(e) is SummerMonth)
+
+    def test_programatic_function_from_dict(self):
+        SummerMonth = Enum(
+                'SummerMonth',
+                dict((('june', 1), ('july', 2), ('august', 3)))
+                )
+        lst = list(SummerMonth)
+        self.assertEqual(len(lst), len(SummerMonth))
+        self.assertEqual(len(SummerMonth), 3, SummerMonth)
+        if pyver < 3.0:
+            self.assertEqual(
+                    [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+                    lst,
+                    )
+        for i, month in enumerate('june july august'.split()):
+            i += 1
+            e = SummerMonth(i)
+            self.assertEqual(int(e.value), i)
+            self.assertNotEqual(e, i)
+            self.assertEqual(e.name, month)
+            self.assertTrue(e in SummerMonth)
+            self.assertTrue(type(e) is SummerMonth)
+
+    def test_programatic_function_type(self):
+        SummerMonth = Enum('SummerMonth', 'june july august', type=int)
+        lst = list(SummerMonth)
+        self.assertEqual(len(lst), len(SummerMonth))
+        self.assertEqual(len(SummerMonth), 3, SummerMonth)
+        self.assertEqual(
+                [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+                lst,
+                )
+        for i, month in enumerate('june july august'.split()):
+            i += 1
+            e = SummerMonth(i)
+            self.assertEqual(e, i)
+            self.assertEqual(e.name, month)
+            self.assertTrue(e in SummerMonth)
+            self.assertTrue(type(e) is SummerMonth)
+
+    def test_programatic_function_type_with_start(self):
+        SummerMonth = Enum('SummerMonth', 'june july august', type=int, start=30)
+        lst = list(SummerMonth)
+        self.assertEqual(len(lst), len(SummerMonth))
+        self.assertEqual(len(SummerMonth), 3, SummerMonth)
+        self.assertEqual(
+                [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+                lst,
+                )
+        for i, month in enumerate('june july august'.split(), 30):
+            e = SummerMonth(i)
+            self.assertEqual(e, i)
+            self.assertEqual(e.name, month)
+            self.assertTrue(e in SummerMonth)
+            self.assertTrue(type(e) is SummerMonth)
+
+    def test_programatic_function_type_from_subclass(self):
+        SummerMonth = IntEnum('SummerMonth', 'june july august')
+        lst = list(SummerMonth)
+        self.assertEqual(len(lst), len(SummerMonth))
+        self.assertEqual(len(SummerMonth), 3, SummerMonth)
+        self.assertEqual(
+                [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+                lst,
+                )
+        for i, month in enumerate('june july august'.split()):
+            i += 1
+            e = SummerMonth(i)
+            self.assertEqual(e, i)
+            self.assertEqual(e.name, month)
+            self.assertTrue(e in SummerMonth)
+            self.assertTrue(type(e) is SummerMonth)
+
+    def test_programatic_function_type_from_subclass_with_start(self):
+        SummerMonth = IntEnum('SummerMonth', 'june july august', start=40)
+        lst = list(SummerMonth)
+        self.assertEqual(len(lst), len(SummerMonth))
+        self.assertEqual(len(SummerMonth), 3, SummerMonth)
+        self.assertEqual(
+                [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+                lst,
+                )
+        for i, month in enumerate('june july august'.split(), 40):
+            e = SummerMonth(i)
+            self.assertEqual(e, i)
+            self.assertEqual(e.name, month)
+            self.assertTrue(e in SummerMonth)
+            self.assertTrue(type(e) is SummerMonth)
+
+    def test_programatic_function_unicode(self):
+        SummerMonth = Enum('SummerMonth', unicode('june july august'))
+        lst = list(SummerMonth)
+        self.assertEqual(len(lst), len(SummerMonth))
+        self.assertEqual(len(SummerMonth), 3, SummerMonth)
+        self.assertEqual(
+                [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+                lst,
+                )
+        for i, month in enumerate(unicode('june july august').split()):
+            i += 1
+            e = SummerMonth(i)
+            self.assertEqual(int(e.value), i)
+            self.assertNotEqual(e, i)
+            self.assertEqual(e.name, month)
+            self.assertTrue(e in SummerMonth)
+            self.assertTrue(type(e) is SummerMonth)
+
+    def test_programatic_function_unicode_list(self):
+        SummerMonth = Enum('SummerMonth', [unicode('june'), unicode('july'), unicode('august')])
+        lst = list(SummerMonth)
+        self.assertEqual(len(lst), len(SummerMonth))
+        self.assertEqual(len(SummerMonth), 3, SummerMonth)
+        self.assertEqual(
+                [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+                lst,
+                )
+        for i, month in enumerate(unicode('june july august').split()):
+            i += 1
+            e = SummerMonth(i)
+            self.assertEqual(int(e.value), i)
+            self.assertNotEqual(e, i)
+            self.assertEqual(e.name, month)
+            self.assertTrue(e in SummerMonth)
+            self.assertTrue(type(e) is SummerMonth)
+
+    def test_programatic_function_unicode_iterable(self):
+        SummerMonth = Enum(
+                'SummerMonth',
+                ((unicode('june'), 1), (unicode('july'), 2), (unicode('august'), 3))
+                )
+        lst = list(SummerMonth)
+        self.assertEqual(len(lst), len(SummerMonth))
+        self.assertEqual(len(SummerMonth), 3, SummerMonth)
+        self.assertEqual(
+                [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+                lst,
+                )
+        for i, month in enumerate(unicode('june july august').split()):
+            i += 1
+            e = SummerMonth(i)
+            self.assertEqual(int(e.value), i)
+            self.assertNotEqual(e, i)
+            self.assertEqual(e.name, month)
+            self.assertTrue(e in SummerMonth)
+            self.assertTrue(type(e) is SummerMonth)
+
+    def test_programatic_function_from_unicode_dict(self):
+        SummerMonth = Enum(
+                'SummerMonth',
+                dict(((unicode('june'), 1), (unicode('july'), 2), (unicode('august'), 3)))
+                )
+        lst = list(SummerMonth)
+        self.assertEqual(len(lst), len(SummerMonth))
+        self.assertEqual(len(SummerMonth), 3, SummerMonth)
+        if pyver < 3.0:
+            self.assertEqual(
+                    [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+                    lst,
+                    )
+        for i, month in enumerate(unicode('june july august').split()):
+            i += 1
+            e = SummerMonth(i)
+            self.assertEqual(int(e.value), i)
+            self.assertNotEqual(e, i)
+            self.assertEqual(e.name, month)
+            self.assertTrue(e in SummerMonth)
+            self.assertTrue(type(e) is SummerMonth)
+
+    def test_programatic_function_unicode_type(self):
+        SummerMonth = Enum('SummerMonth', unicode('june july august'), type=int)
+        lst = list(SummerMonth)
+        self.assertEqual(len(lst), len(SummerMonth))
+        self.assertEqual(len(SummerMonth), 3, SummerMonth)
+        self.assertEqual(
+                [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+                lst,
+                )
+        for i, month in enumerate(unicode('june july august').split()):
+            i += 1
+            e = SummerMonth(i)
+            self.assertEqual(e, i)
+            self.assertEqual(e.name, month)
+            self.assertTrue(e in SummerMonth)
+            self.assertTrue(type(e) is SummerMonth)
+
+    def test_programatic_function_unicode_type_from_subclass(self):
+        SummerMonth = IntEnum('SummerMonth', unicode('june july august'))
+        lst = list(SummerMonth)
+        self.assertEqual(len(lst), len(SummerMonth))
+        self.assertEqual(len(SummerMonth), 3, SummerMonth)
+        self.assertEqual(
+                [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+                lst,
+                )
+        for i, month in enumerate(unicode('june july august').split()):
+            i += 1
+            e = SummerMonth(i)
+            self.assertEqual(e, i)
+            self.assertEqual(e.name, month)
+            self.assertTrue(e in SummerMonth)
+            self.assertTrue(type(e) is SummerMonth)
+
+    def test_programmatic_function_unicode_class(self):
+        if pyver < 3.0:
+            class_names = unicode('SummerMonth'), 'S\xfcmm\xe9rM\xf6nth'.decode('latin1')
+        else:
+            class_names = 'SummerMonth', 'S\xfcmm\xe9rM\xf6nth'
+        for i, class_name in enumerate(class_names):
+            if pyver < 3.0 and i == 1:
+                self.assertRaises(TypeError, Enum, class_name, unicode('june july august'))
+            else:
+                SummerMonth = Enum(class_name, unicode('june july august'))
+                lst = list(SummerMonth)
+                self.assertEqual(len(lst), len(SummerMonth))
+                self.assertEqual(len(SummerMonth), 3, SummerMonth)
+                self.assertEqual(
+                        [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+                        lst,
+                        )
+                for i, month in enumerate(unicode('june july august').split()):
+                    i += 1
+                    e = SummerMonth(i)
+                    self.assertEqual(e.value, i)
+                    self.assertEqual(e.name, month)
+                    self.assertTrue(e in SummerMonth)
+                    self.assertTrue(type(e) is SummerMonth)
+
+    def test_subclassing(self):
+        if isinstance(Name, Exception):
+            raise Name
+        self.assertEqual(Name.BDFL, 'Guido van Rossum')
+        self.assertTrue(Name.BDFL, Name('Guido van Rossum'))
+        self.assertTrue(Name.BDFL is getattr(Name, 'BDFL'))
+        test_pickle_dump_load(self.assertTrue, Name.BDFL)
+
+    def test_extending(self):
+        def bad_extension():
+            class Color(Enum):
+                red = 1
+                green = 2
+                blue = 3
+            class MoreColor(Color):
+                cyan = 4
+                magenta = 5
+                yellow = 6
+        self.assertRaises(TypeError, bad_extension)
+
+    def test_exclude_methods(self):
+        class whatever(Enum):
+            this = 'that'
+            these = 'those'
+            def really(self):
+                return 'no, not %s' % self.value
+        self.assertFalse(type(whatever.really) is whatever)
+        self.assertEqual(whatever.this.really(), 'no, not that')
+
+    def test_wrong_inheritance_order(self):
+        def wrong_inherit():
+            class Wrong(Enum, str):
+                NotHere = 'error before this point'
+        self.assertRaises(TypeError, wrong_inherit)
+
+    def test_intenum_transitivity(self):
+        class number(IntEnum):
+            one = 1
+            two = 2
+            three = 3
+        class numero(IntEnum):
+            uno = 1
+            dos = 2
+            tres = 3
+        self.assertEqual(number.one, numero.uno)
+        self.assertEqual(number.two, numero.dos)
+        self.assertEqual(number.three, numero.tres)
+
+    def test_introspection(self):
+        class Number(IntEnum):
+            one = 100
+            two = 200
+        self.assertTrue(Number.one._member_type_ is int)
+        self.assertTrue(Number._member_type_ is int)
+        class String(str, Enum):
+            yarn = 'soft'
+            rope = 'rough'
+            wire = 'hard'
+        self.assertTrue(String.yarn._member_type_ is str)
+        self.assertTrue(String._member_type_ is str)
+        class Plain(Enum):
+            vanilla = 'white'
+            one = 1
+        self.assertTrue(Plain.vanilla._member_type_ is object)
+        self.assertTrue(Plain._member_type_ is object)
+
+    def test_wrong_enum_in_call(self):
+        class Monochrome(Enum):
+            black = 0
+            white = 1
+        class Gender(Enum):
+            male = 0
+            female = 1
+        self.assertRaises(ValueError, Monochrome, Gender.male)
+
+    def test_wrong_enum_in_mixed_call(self):
+        class Monochrome(IntEnum):
+            black = 0
+            white = 1
+        class Gender(Enum):
+            male = 0
+            female = 1
+        self.assertRaises(ValueError, Monochrome, Gender.male)
+
+    def test_mixed_enum_in_call_1(self):
+        class Monochrome(IntEnum):
+            black = 0
+            white = 1
+        class Gender(IntEnum):
+            male = 0
+            female = 1
+        self.assertTrue(Monochrome(Gender.female) is Monochrome.white)
+
+    def test_mixed_enum_in_call_2(self):
+        class Monochrome(Enum):
+            black = 0
+            white = 1
+        class Gender(IntEnum):
+            male = 0
+            female = 1
+        self.assertTrue(Monochrome(Gender.male) is Monochrome.black)
+
+    def test_flufl_enum(self):
+        class Fluflnum(Enum):
+            def __int__(self):
+                return int(self.value)
+        class MailManOptions(Fluflnum):
+            option1 = 1
+            option2 = 2
+            option3 = 3
+        self.assertEqual(int(MailManOptions.option1), 1)
+
+    def test_no_such_enum_member(self):
+        class Color(Enum):
+            red = 1
+            green = 2
+            blue = 3
+        self.assertRaises(ValueError, Color, 4)
+        self.assertRaises(KeyError, Color.__getitem__, 'chartreuse')
+
+    def test_new_repr(self):
+        class Color(Enum):
+            red = 1
+            green = 2
+            blue = 3
+            def __repr__(self):
+                return "don't you just love shades of %s?" % self.name
+        self.assertEqual(
+                repr(Color.blue),
+                "don't you just love shades of blue?",
+                )
+
+    def test_inherited_repr(self):
+        class MyEnum(Enum):
+            def __repr__(self):
+                return "My name is %s." % self.name
+        class MyIntEnum(int, MyEnum):
+            this = 1
+            that = 2
+            theother = 3
+        self.assertEqual(repr(MyIntEnum.that), "My name is that.")
+
+    def test_multiple_mixin_mro(self):
+        class auto_enum(EnumMeta):
+            def __new__(metacls, cls, bases, classdict):
+                original_dict = classdict
+                classdict = enum._EnumDict()
+                for k, v in original_dict.items():
+                    classdict[k] = v
+                temp = type(classdict)()
+                names = set(classdict._member_names)
+                i = 0
+                for k in classdict._member_names:
+                    v = classdict[k]
+                    if v == ():
+                        v = i
+                    else:
+                        i = v
+                    i += 1
+                    temp[k] = v
+                for k, v in classdict.items():
+                    if k not in names:
+                        temp[k] = v
+                return super(auto_enum, metacls).__new__(
+                        metacls, cls, bases, temp)
+
+        AutoNumberedEnum = auto_enum('AutoNumberedEnum', (Enum,), {})
+
+        AutoIntEnum = auto_enum('AutoIntEnum', (IntEnum,), {})
+
+        class TestAutoNumber(AutoNumberedEnum):
+            a = ()
+            b = 3
+            c = ()
+
+        class TestAutoInt(AutoIntEnum):
+            a = ()
+            b = 3
+            c = ()
+
+    def test_subclasses_with_getnewargs(self):
+        class NamedInt(int):
+            __qualname__ = 'NamedInt'  # needed for pickle protocol 4
+            def __new__(cls, *args):
+                _args = args
+                if len(args) < 1:
+                    raise TypeError("name and value must be specified")
+                name, args = args[0], args[1:]
+                self = int.__new__(cls, *args)
+                self._intname = name
+                self._args = _args
+                return self
+            def __getnewargs__(self):
+                return self._args
+            @property
+            def __name__(self):
+                return self._intname
+            def __repr__(self):
+                # repr() is updated to include the name and type info
+                return "%s(%r, %s)" % (type(self).__name__,
+                                             self.__name__,
+                                             int.__repr__(self))
+            def __str__(self):
+                # str() is unchanged, even if it relies on the repr() fallback
+                base = int
+                base_str = base.__str__
+                if base_str.__objclass__ is object:
+                    return base.__repr__(self)
+                return base_str(self)
+            # for simplicity, we only define one operator that
+            # propagates expressions
+            def __add__(self, other):
+                temp = int(self) + int( other)
+                if isinstance(self, NamedInt) and isinstance(other, NamedInt):
+                    return NamedInt(
+                        '(%s + %s)' % (self.__name__, other.__name__),
+                        temp )
+                else:
+                    return temp
+
+        class NEI(NamedInt, Enum):
+            __qualname__ = 'NEI'  # needed for pickle protocol 4
+            x = ('the-x', 1)
+            y = ('the-y', 2)
+
+        self.assertTrue(NEI.__new__ is Enum.__new__)
+        self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
+        globals()['NamedInt'] = NamedInt
+        globals()['NEI'] = NEI
+        NI5 = NamedInt('test', 5)
+        self.assertEqual(NI5, 5)
+        test_pickle_dump_load(self.assertTrue, NI5, 5)
+        self.assertEqual(NEI.y.value, 2)
+        test_pickle_dump_load(self.assertTrue, NEI.y)
+
+    if pyver >= 3.4:
+        def test_subclasses_with_getnewargs_ex(self):
+            class NamedInt(int):
+                __qualname__ = 'NamedInt'       # needed for pickle protocol 4
+                def __new__(cls, *args):
+                    _args = args
+                    if len(args) < 2:
+                        raise TypeError("name and value must be specified")
+                    name, args = args[0], args[1:]
+                    self = int.__new__(cls, *args)
+                    self._intname = name
+                    self._args = _args
+                    return self
+                def __getnewargs_ex__(self):
+                    return self._args, {}
+                @property
+                def __name__(self):
+                    return self._intname
+                def __repr__(self):
+                    # repr() is updated to include the name and type info
+                    return "{}({!r}, {})".format(type(self).__name__,
+                                                 self.__name__,
+                                                 int.__repr__(self))
+                def __str__(self):
+                    # str() is unchanged, even if it relies on the repr() fallback
+                    base = int
+                    base_str = base.__str__
+                    if base_str.__objclass__ is object:
+                        return base.__repr__(self)
+                    return base_str(self)
+                # for simplicity, we only define one operator that
+                # propagates expressions
+                def __add__(self, other):
+                    temp = int(self) + int( other)
+                    if isinstance(self, NamedInt) and isinstance(other, NamedInt):
+                        return NamedInt(
+                            '({0} + {1})'.format(self.__name__, other.__name__),
+                            temp )
+                    else:
+                        return temp
+
+            class NEI(NamedInt, Enum):
+                __qualname__ = 'NEI'      # needed for pickle protocol 4
+                x = ('the-x', 1)
+                y = ('the-y', 2)
+
+
+            self.assertIs(NEI.__new__, Enum.__new__)
+            self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
+            globals()['NamedInt'] = NamedInt
+            globals()['NEI'] = NEI
+            NI5 = NamedInt('test', 5)
+            self.assertEqual(NI5, 5)
+            test_pickle_dump_load(self.assertEqual, NI5, 5, protocol=(4, HIGHEST_PROTOCOL))
+            self.assertEqual(NEI.y.value, 2)
+            test_pickle_dump_load(self.assertTrue, NEI.y, protocol=(4, HIGHEST_PROTOCOL))
+
+    def test_subclasses_with_reduce(self):
+        class NamedInt(int):
+            __qualname__ = 'NamedInt'       # needed for pickle protocol 4
+            def __new__(cls, *args):
+                _args = args
+                if len(args) < 1:
+                    raise TypeError("name and value must be specified")
+                name, args = args[0], args[1:]
+                self = int.__new__(cls, *args)
+                self._intname = name
+                self._args = _args
+                return self
+            def __reduce__(self):
+                return self.__class__, self._args
+            @property
+            def __name__(self):
+                return self._intname
+            def __repr__(self):
+                # repr() is updated to include the name and type info
+                return "%s(%r, %s)" % (type(self).__name__,
+                                             self.__name__,
+                                             int.__repr__(self))
+            def __str__(self):
+                # str() is unchanged, even if it relies on the repr() fallback
+                base = int
+                base_str = base.__str__
+                if base_str.__objclass__ is object:
+                    return base.__repr__(self)
+                return base_str(self)
+            # for simplicity, we only define one operator that
+            # propagates expressions
+            def __add__(self, other):
+                temp = int(self) + int( other)
+                if isinstance(self, NamedInt) and isinstance(other, NamedInt):
+                    return NamedInt(
+                        '(%s + %s)' % (self.__name__, other.__name__),
+                        temp )
+                else:
+                    return temp
+
+        class NEI(NamedInt, Enum):
+            __qualname__ = 'NEI'      # needed for pickle protocol 4
+            x = ('the-x', 1)
+            y = ('the-y', 2)
+
+
+        self.assertTrue(NEI.__new__ is Enum.__new__)
+        self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
+        globals()['NamedInt'] = NamedInt
+        globals()['NEI'] = NEI
+        NI5 = NamedInt('test', 5)
+        self.assertEqual(NI5, 5)
+        test_pickle_dump_load(self.assertEqual, NI5, 5)
+        self.assertEqual(NEI.y.value, 2)
+        test_pickle_dump_load(self.assertTrue, NEI.y)
+
+    def test_subclasses_with_reduce_ex(self):
+        class NamedInt(int):
+            __qualname__ = 'NamedInt'       # needed for pickle protocol 4
+            def __new__(cls, *args):
+                _args = args
+                if len(args) < 1:
+                    raise TypeError("name and value must be specified")
+                name, args = args[0], args[1:]
+                self = int.__new__(cls, *args)
+                self._intname = name
+                self._args = _args
+                return self
+            def __reduce_ex__(self, proto):
+                return self.__class__, self._args
+            @property
+            def __name__(self):
+                return self._intname
+            def __repr__(self):
+                # repr() is updated to include the name and type info
+                return "%s(%r, %s)" % (type(self).__name__,
+                                             self.__name__,
+                                             int.__repr__(self))
+            def __str__(self):
+                # str() is unchanged, even if it relies on the repr() fallback
+                base = int
+                base_str = base.__str__
+                if base_str.__objclass__ is object:
+                    return base.__repr__(self)
+                return base_str(self)
+            # for simplicity, we only define one operator that
+            # propagates expressions
+            def __add__(self, other):
+                temp = int(self) + int( other)
+                if isinstance(self, NamedInt) and isinstance(other, NamedInt):
+                    return NamedInt(
+                        '(%s + %s)' % (self.__name__, other.__name__),
+                        temp )
+                else:
+                    return temp
+
+        class NEI(NamedInt, Enum):
+            __qualname__ = 'NEI'      # needed for pickle protocol 4
+            x = ('the-x', 1)
+            y = ('the-y', 2)
+
+
+        self.assertTrue(NEI.__new__ is Enum.__new__)
+        self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
+        globals()['NamedInt'] = NamedInt
+        globals()['NEI'] = NEI
+        NI5 = NamedInt('test', 5)
+        self.assertEqual(NI5, 5)
+        test_pickle_dump_load(self.assertEqual, NI5, 5)
+        self.assertEqual(NEI.y.value, 2)
+        test_pickle_dump_load(self.assertTrue, NEI.y)
+
+    def test_subclasses_without_direct_pickle_support(self):
+        class NamedInt(int):
+            __qualname__ = 'NamedInt'
+            def __new__(cls, *args):
+                _args = args
+                name, args = args[0], args[1:]
+                if len(args) == 0:
+                    raise TypeError("name and value must be specified")
+                self = int.__new__(cls, *args)
+                self._intname = name
+                self._args = _args
+                return self
+            @property
+            def __name__(self):
+                return self._intname
+            def __repr__(self):
+                # repr() is updated to include the name and type info
+                return "%s(%r, %s)" % (type(self).__name__,
+                                             self.__name__,
+                                             int.__repr__(self))
+            def __str__(self):
+                # str() is unchanged, even if it relies on the repr() fallback
+                base = int
+                base_str = base.__str__
+                if base_str.__objclass__ is object:
+                    return base.__repr__(self)
+                return base_str(self)
+            # for simplicity, we only define one operator that
+            # propagates expressions
+            def __add__(self, other):
+                temp = int(self) + int( other)
+                if isinstance(self, NamedInt) and isinstance(other, NamedInt):
+                    return NamedInt(
+                        '(%s + %s)' % (self.__name__, other.__name__),
+                        temp )
+                else:
+                    return temp
+
+        class NEI(NamedInt, Enum):
+            __qualname__ = 'NEI'
+            x = ('the-x', 1)
+            y = ('the-y', 2)
+
+        self.assertTrue(NEI.__new__ is Enum.__new__)
+        self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
+        globals()['NamedInt'] = NamedInt
+        globals()['NEI'] = NEI
+        NI5 = NamedInt('test', 5)
+        self.assertEqual(NI5, 5)
+        self.assertEqual(NEI.y.value, 2)
+        test_pickle_exception(self.assertRaises, TypeError, NEI.x)
+        test_pickle_exception(self.assertRaises, PicklingError, NEI)
+
+    def test_subclasses_without_direct_pickle_support_using_name(self):
+        class NamedInt(int):
+            __qualname__ = 'NamedInt'
+            def __new__(cls, *args):
+                _args = args
+                name, args = args[0], args[1:]
+                if len(args) == 0:
+                    raise TypeError("name and value must be specified")
+                self = int.__new__(cls, *args)
+                self._intname = name
+                self._args = _args
+                return self
+            @property
+            def __name__(self):
+                return self._intname
+            def __repr__(self):
+                # repr() is updated to include the name and type info
+                return "%s(%r, %s)" % (type(self).__name__,
+                                             self.__name__,
+                                             int.__repr__(self))
+            def __str__(self):
+                # str() is unchanged, even if it relies on the repr() fallback
+                base = int
+                base_str = base.__str__
+                if base_str.__objclass__ is object:
+                    return base.__repr__(self)
+                return base_str(self)
+            # for simplicity, we only define one operator that
+            # propagates expressions
+            def __add__(self, other):
+                temp = int(self) + int( other)
+                if isinstance(self, NamedInt) and isinstance(other, NamedInt):
+                    return NamedInt(
+                        '(%s + %s)' % (self.__name__, other.__name__),
+                        temp )
+                else:
+                    return temp
+
+        class NEI(NamedInt, Enum):
+            __qualname__ = 'NEI'
+            x = ('the-x', 1)
+            y = ('the-y', 2)
+            def __reduce_ex__(self, proto):
+                return getattr, (self.__class__, self._name_)
+
+        self.assertTrue(NEI.__new__ is Enum.__new__)
+        self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
+        globals()['NamedInt'] = NamedInt
+        globals()['NEI'] = NEI
+        NI5 = NamedInt('test', 5)
+        self.assertEqual(NI5, 5)
+        self.assertEqual(NEI.y.value, 2)
+        test_pickle_dump_load(self.assertTrue, NEI.y)
+        test_pickle_dump_load(self.assertTrue, NEI)
+
+    def test_tuple_subclass(self):
+        class SomeTuple(tuple, Enum):
+            __qualname__ = 'SomeTuple'
+            first = (1, 'for the money')
+            second = (2, 'for the show')
+            third = (3, 'for the music')
+        self.assertTrue(type(SomeTuple.first) is SomeTuple)
+        self.assertTrue(isinstance(SomeTuple.second, tuple))
+        self.assertEqual(SomeTuple.third, (3, 'for the music'))
+        globals()['SomeTuple'] = SomeTuple
+        test_pickle_dump_load(self.assertTrue, SomeTuple.first)
+
+    def test_duplicate_values_give_unique_enum_items(self):
+        class AutoNumber(Enum):
+            __order__ = 'enum_m enum_d enum_y'
+            enum_m = ()
+            enum_d = ()
+            enum_y = ()
+            def __new__(cls):
+                value = len(cls.__members__) + 1
+                obj = object.__new__(cls)
+                obj._value_ = value
+                return obj
+            def __int__(self):
+                return int(self._value_)
+        self.assertEqual(int(AutoNumber.enum_d), 2)
+        self.assertEqual(AutoNumber.enum_y.value, 3)
+        self.assertTrue(AutoNumber(1) is AutoNumber.enum_m)
+        self.assertEqual(
+            list(AutoNumber),
+            [AutoNumber.enum_m, AutoNumber.enum_d, AutoNumber.enum_y],
+            )
+
+    def test_inherited_new_from_enhanced_enum(self):
+        class AutoNumber2(Enum):
+            def __new__(cls):
+                value = len(cls.__members__) + 1
+                obj = object.__new__(cls)
+                obj._value_ = value
+                return obj
+            def __int__(self):
+                return int(self._value_)
+        class Color(AutoNumber2):
+            _order_ = 'red green blue'
+            red = ()
+            green = ()
+            blue = ()
+        self.assertEqual(len(Color), 3, "wrong number of elements: %d (should be %d)" % (len(Color), 3))
+        self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
+        if pyver >= 3.0:
+            self.assertEqual(list(map(int, Color)), [1, 2, 3])
+
+    def test_inherited_new_from_mixed_enum(self):
+        class AutoNumber3(IntEnum):
+            def __new__(cls):
+                value = len(cls.__members__) + 1
+                obj = int.__new__(cls, value)
+                obj._value_ = value
+                return obj
+        class Color(AutoNumber3):
+            red = ()
+            green = ()
+            blue = ()
+        self.assertEqual(len(Color), 3, "wrong number of elements: %d (should be %d)" % (len(Color), 3))
+        Color.red
+        Color.green
+        Color.blue
+
+    def test_equality(self):
+        class AlwaysEqual:
+            def __eq__(self, other):
+                return True
+        class OrdinaryEnum(Enum):
+            a = 1
+        self.assertEqual(AlwaysEqual(), OrdinaryEnum.a)
+        self.assertEqual(OrdinaryEnum.a, AlwaysEqual())
+
+    def test_ordered_mixin(self):
+        class OrderedEnum(Enum):
+            def __ge__(self, other):
+                if self.__class__ is other.__class__:
+                    return self._value_ >= other._value_
+                return NotImplemented
+            def __gt__(self, other):
+                if self.__class__ is other.__class__:
+                    return self._value_ > other._value_
+                return NotImplemented
+            def __le__(self, other):
+                if self.__class__ is other.__class__:
+                    return self._value_ <= other._value_
+                return NotImplemented
+            def __lt__(self, other):
+                if self.__class__ is other.__class__:
+                    return self._value_ < other._value_
+                return NotImplemented
+        class Grade(OrderedEnum):
+            __order__ = 'A B C D F'
+            A = 5
+            B = 4
+            C = 3
+            D = 2
+            F = 1
+        self.assertEqual(list(Grade), [Grade.A, Grade.B, Grade.C, Grade.D, Grade.F])
+        self.assertTrue(Grade.A > Grade.B)
+        self.assertTrue(Grade.F <= Grade.C)
+        self.assertTrue(Grade.D < Grade.A)
+        self.assertTrue(Grade.B >= Grade.B)
+
+    def test_extending2(self):
+        def bad_extension():
+            class Shade(Enum):
+                def shade(self):
+                    print(self.name)
+            class Color(Shade):
+                red = 1
+                green = 2
+                blue = 3
+            class MoreColor(Color):
+                cyan = 4
+                magenta = 5
+                yellow = 6
+        self.assertRaises(TypeError, bad_extension)
+
+    def test_extending3(self):
+        class Shade(Enum):
+            def shade(self):
+                return self.name
+        class Color(Shade):
+            def hex(self):
+                return '%s hexlified!' % self.value
+        class MoreColor(Color):
+            cyan = 4
+            magenta = 5
+            yellow = 6
+        self.assertEqual(MoreColor.magenta.hex(), '5 hexlified!')
+
+    def test_no_duplicates(self):
+        def bad_duplicates():
+            class UniqueEnum(Enum):
+                def __init__(self, *args):
+                    cls = self.__class__
+                    if any(self.value == e.value for e in cls):
+                        a = self.name
+                        e = cls(self.value).name
+                        raise ValueError(
+                                "aliases not allowed in UniqueEnum:  %r --> %r"
+                                % (a, e)
+                                )
+            class Color(UniqueEnum):
+                red = 1
+                green = 2
+                blue = 3
+            class Color(UniqueEnum):
+                red = 1
+                green = 2
+                blue = 3
+                grene = 2
+        self.assertRaises(ValueError, bad_duplicates)
+
+    def test_init(self):
+        class Planet(Enum):
+            MERCURY = (3.303e+23, 2.4397e6)
+            VENUS   = (4.869e+24, 6.0518e6)
+            EARTH   = (5.976e+24, 6.37814e6)
+            MARS    = (6.421e+23, 3.3972e6)
+            JUPITER = (1.9e+27,   7.1492e7)
+            SATURN  = (5.688e+26, 6.0268e7)
+            URANUS  = (8.686e+25, 2.5559e7)
+            NEPTUNE = (1.024e+26, 2.4746e7)
+            def __init__(self, mass, radius):
+                self.mass = mass       # in kilograms
+                self.radius = radius   # in meters
+            @property
+            def surface_gravity(self):
+                # universal gravitational constant  (m3 kg-1 s-2)
+                G = 6.67300E-11
+                return G * self.mass / (self.radius * self.radius)
+        self.assertEqual(round(Planet.EARTH.surface_gravity, 2), 9.80)
+        self.assertEqual(Planet.EARTH.value, (5.976e+24, 6.37814e6))
+
+    def test_nonhash_value(self):
+        class AutoNumberInAList(Enum):
+            def __new__(cls):
+                value = [len(cls.__members__) + 1]
+                obj = object.__new__(cls)
+                obj._value_ = value
+                return obj
+        class ColorInAList(AutoNumberInAList):
+            _order_ = 'red green blue'
+            red = ()
+            green = ()
+            blue = ()
+        self.assertEqual(list(ColorInAList), [ColorInAList.red, ColorInAList.green, ColorInAList.blue])
+        self.assertEqual(ColorInAList.red.value, [1])
+        self.assertEqual(ColorInAList([1]), ColorInAList.red)
+
+    def test_conflicting_types_resolved_in_new(self):
+        class LabelledIntEnum(int, Enum):
+            def __new__(cls, *args):
+                value, label = args
+                obj = int.__new__(cls, value)
+                obj.label = label
+                obj._value_ = value
+                return obj
+
+        class LabelledList(LabelledIntEnum):
+            unprocessed = (1, "Unprocessed")
+            payment_complete = (2, "Payment Complete")
+
+        self.assertEqual(list(LabelledList), [LabelledList.unprocessed, LabelledList.payment_complete])
+        self.assertEqual(LabelledList.unprocessed, 1)
+        self.assertEqual(LabelledList(1), LabelledList.unprocessed)
+
+    def test_empty_with_functional_api(self):
+        empty = enum.IntEnum('Foo', {})
+        self.assertEqual(len(empty), 0)
+
+
+class TestUnique(unittest.TestCase):
+    """2.4 doesn't allow class decorators, use function syntax."""
+
+    def test_unique_clean(self):
+        class Clean(Enum):
+            one = 1
+            two = 'dos'
+            tres = 4.0
+        unique(Clean)
+        class Cleaner(IntEnum):
+            single = 1
+            double = 2
+            triple = 3
+        unique(Cleaner)
+
+    def test_unique_dirty(self):
+        try:
+            class Dirty(Enum):
+                __order__ = 'one two tres'
+                one = 1
+                two = 'dos'
+                tres = 1
+            unique(Dirty)
+        except ValueError:
+            exc = sys.exc_info()[1]
+            message = exc.args[0]
+        self.assertTrue('tres -> one' in message)
+
+        try:
+            class Dirtier(IntEnum):
+                _order_ = 'single double triple turkey'
+                single = 1
+                double = 1
+                triple = 3
+                turkey = 3
+            unique(Dirtier)
+        except ValueError:
+            exc = sys.exc_info()[1]
+            message = exc.args[0]
+        self.assertTrue('double -> single' in message)
+        self.assertTrue('turkey -> triple' in message)
+
+
+class TestMe(unittest.TestCase):
+
+    pass
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/tools/third_party/enum/enum34.egg-info/PKG-INFO b/tools/third_party/enum/enum34.egg-info/PKG-INFO
new file mode 100644
index 0000000..98927c4
--- /dev/null
+++ b/tools/third_party/enum/enum34.egg-info/PKG-INFO
@@ -0,0 +1,62 @@
+Metadata-Version: 1.1
+Name: enum34
+Version: 1.1.6
+Summary: Python 3.4 Enum backported to 3.3, 3.2, 3.1, 2.7, 2.6, 2.5, and 2.4
+Home-page: https://bitbucket.org/stoneleaf/enum34
+Author: Ethan Furman
+Author-email: ethan@stoneleaf.us
+License: BSD License
+Description: enum --- support for enumerations
+        ========================================
+        
+        An enumeration is a set of symbolic names (members) bound to unique, constant
+        values.  Within an enumeration, the members can be compared by identity, and
+        the enumeration itself can be iterated over.
+        
+            from enum import Enum
+        
+            class Fruit(Enum):
+                apple = 1
+                banana = 2
+                orange = 3
+        
+            list(Fruit)
+            # [<Fruit.apple: 1>, <Fruit.banana: 2>, <Fruit.orange: 3>]
+        
+            len(Fruit)
+            # 3
+        
+            Fruit.banana
+            # <Fruit.banana: 2>
+        
+            Fruit['banana']
+            # <Fruit.banana: 2>
+        
+            Fruit(2)
+            # <Fruit.banana: 2>
+        
+            Fruit.banana is Fruit['banana'] is Fruit(2)
+            # True
+        
+            Fruit.banana.name
+            # 'banana'
+        
+            Fruit.banana.value
+            # 2
+        
+        Repository and Issue Tracker at https://bitbucket.org/stoneleaf/enum34.
+        
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Programming Language :: Python
+Classifier: Topic :: Software Development
+Classifier: Programming Language :: Python :: 2.4
+Classifier: Programming Language :: Python :: 2.5
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Provides: enum
diff --git a/tools/third_party/enum/enum34.egg-info/SOURCES.txt b/tools/third_party/enum/enum34.egg-info/SOURCES.txt
new file mode 100644
index 0000000..e6855e6
--- /dev/null
+++ b/tools/third_party/enum/enum34.egg-info/SOURCES.txt
@@ -0,0 +1,13 @@
+MANIFEST.in
+README
+setup.py
+enum/LICENSE
+enum/README
+enum/__init__.py
+enum/test.py
+enum/doc/enum.pdf
+enum/doc/enum.rst
+enum34.egg-info/PKG-INFO
+enum34.egg-info/SOURCES.txt
+enum34.egg-info/dependency_links.txt
+enum34.egg-info/top_level.txt
\ No newline at end of file
diff --git a/tools/third_party/enum/enum34.egg-info/dependency_links.txt b/tools/third_party/enum/enum34.egg-info/dependency_links.txt
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/tools/third_party/enum/enum34.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/tools/third_party/enum/enum34.egg-info/top_level.txt b/tools/third_party/enum/enum34.egg-info/top_level.txt
new file mode 100644
index 0000000..e3caefb
--- /dev/null
+++ b/tools/third_party/enum/enum34.egg-info/top_level.txt
@@ -0,0 +1 @@
+enum
diff --git a/tools/third_party/enum/setup.cfg b/tools/third_party/enum/setup.cfg
new file mode 100644
index 0000000..861a9f5
--- /dev/null
+++ b/tools/third_party/enum/setup.cfg
@@ -0,0 +1,5 @@
+[egg_info]
+tag_build = 
+tag_date = 0
+tag_svn_revision = 0
+
diff --git a/tools/third_party/enum/setup.py b/tools/third_party/enum/setup.py
new file mode 100644
index 0000000..8b28400
--- /dev/null
+++ b/tools/third_party/enum/setup.py
@@ -0,0 +1,99 @@
+import os

+import sys

+import setuptools

+from distutils.core import setup

+

+

+if sys.version_info[:2] < (2, 7):

+    required = ['ordereddict']

+else:

+    required = []

+

+long_desc = '''\

+enum --- support for enumerations

+========================================

+

+An enumeration is a set of symbolic names (members) bound to unique, constant

+values.  Within an enumeration, the members can be compared by identity, and

+the enumeration itself can be iterated over.

+

+    from enum import Enum

+

+    class Fruit(Enum):

+        apple = 1

+        banana = 2

+        orange = 3

+

+    list(Fruit)

+    # [<Fruit.apple: 1>, <Fruit.banana: 2>, <Fruit.orange: 3>]

+

+    len(Fruit)

+    # 3

+

+    Fruit.banana

+    # <Fruit.banana: 2>

+

+    Fruit['banana']

+    # <Fruit.banana: 2>

+

+    Fruit(2)

+    # <Fruit.banana: 2>

+

+    Fruit.banana is Fruit['banana'] is Fruit(2)

+    # True

+

+    Fruit.banana.name

+    # 'banana'

+

+    Fruit.banana.value

+    # 2

+

+Repository and Issue Tracker at https://bitbucket.org/stoneleaf/enum34.

+'''

+

+py2_only = ()

+py3_only = ()

+make = [

+        'rst2pdf enum/doc/enum.rst --output=enum/doc/enum.pdf',

+        ]

+

+

+data = dict(

+        name='enum34',

+        version='1.1.6',

+        url='https://bitbucket.org/stoneleaf/enum34',

+        packages=['enum'],

+        package_data={

+            'enum' : [

+                'LICENSE',

+                'README',

+                'doc/enum.rst',

+                'doc/enum.pdf',

+                'test.py',

+                ]

+            },

+        license='BSD License',

+        description='Python 3.4 Enum backported to 3.3, 3.2, 3.1, 2.7, 2.6, 2.5, and 2.4',

+        long_description=long_desc,

+        provides=['enum'],

+        install_requires=required,

+        author='Ethan Furman',

+        author_email='ethan@stoneleaf.us',

+        classifiers=[

+            'Development Status :: 5 - Production/Stable',

+            'Intended Audience :: Developers',

+            'License :: OSI Approved :: BSD License',

+            'Programming Language :: Python',

+            'Topic :: Software Development',

+            'Programming Language :: Python :: 2.4',

+            'Programming Language :: Python :: 2.5',

+            'Programming Language :: Python :: 2.6',

+            'Programming Language :: Python :: 2.7',

+            'Programming Language :: Python :: 3.3',

+            'Programming Language :: Python :: 3.4',

+            'Programming Language :: Python :: 3.5',

+            ],

+        )

+

+if __name__ == '__main__':

+    setup(**data)

diff --git a/tools/third_party/h2/.coveragerc b/tools/third_party/h2/.coveragerc
new file mode 100755
index 0000000..153e38d
--- /dev/null
+++ b/tools/third_party/h2/.coveragerc
@@ -0,0 +1,18 @@
+[run]
+branch = True
+source = h2
+
+[report]
+fail_under = 100
+show_missing = True
+exclude_lines =
+    pragma: no cover
+    .*:.* # Python \d.*
+    assert False, "Should not be reachable"
+    .*:.* # Platform-specific:
+
+[paths]
+source =
+    h2/
+    .tox/*/lib/python*/site-packages/h2
+    .tox/pypy*/site-packages/h2
diff --git a/tools/third_party/h2/.gitignore b/tools/third_party/h2/.gitignore
new file mode 100755
index 0000000..9d19e10
--- /dev/null
+++ b/tools/third_party/h2/.gitignore
@@ -0,0 +1,11 @@
+build/
+env/
+dist/
+*.egg-info/
+*.pyc
+__pycache__
+.coverage
+.tox/
+.hypothesis/
+.cache/
+_trial_temp/
diff --git a/tools/third_party/h2/.travis.yml b/tools/third_party/h2/.travis.yml
new file mode 100755
index 0000000..7b5a468
--- /dev/null
+++ b/tools/third_party/h2/.travis.yml
@@ -0,0 +1,41 @@
+sudo: false
+language: python
+
+cache:
+  directories:
+    - $HOME/.cache/pip
+    - .hypothesis
+
+matrix:
+  include:
+    # Main tests on supported Python versions.
+    - python: "2.7"
+      env: TOXENV=py27
+    - python: "3.3"
+      env: TOXENV=py33
+    - python: "3.4"
+      env: TOXENV=py34
+    - python: "3.5"
+      env: TOXENV=py35
+    - python: "3.6"
+      env: TOXENV=py36
+    - python: "pypy-5.3.1"
+      env: TOXENV=pypy
+
+    # Linting, docs, and other non-test stuff.
+    - python: "3.4"
+      env: TOXENV=lint
+    - python: "3.5"
+      env: TOXENV=docs
+    - python: "2.7"
+      env: TOXENV=packaging
+
+    # Test we haven't broken our major dependencies.
+    - python: "2.7"
+      env: TOXENV=py27-twistedMaster
+
+install:
+  - "pip install -U pip setuptools"
+  - "pip install -U tox"
+script:
+  - tox -- --hypothesis-profile travis
diff --git a/tools/third_party/h2/CONTRIBUTORS.rst b/tools/third_party/h2/CONTRIBUTORS.rst
new file mode 100755
index 0000000..bdc4d06
--- /dev/null
+++ b/tools/third_party/h2/CONTRIBUTORS.rst
@@ -0,0 +1,114 @@
+Hyper-h2 is written and maintained by Cory Benfield and various contributors:
+
+Development Lead
+````````````````
+
+- Cory Benfield <cory@lukasa.co.uk>
+
+Contributors
+````````````
+
+In chronological order:
+
+- Robert Collins (@rbtcollins)
+
+  - Provided invaluable and substantial early input into API design and layout.
+  - Added code preventing ``Proxy-Authorization`` from getting added to HPACK
+    compression contexts.
+
+- Maximilian Hils (@maximilianhils)
+
+  - Added asyncio example.
+
+- Alex Chan (@alexwlchan)
+
+  - Fixed docstring, added URLs to README.
+
+- Glyph Lefkowitz (@glyph)
+
+  - Improved example Twisted server.
+
+- Thomas Kriechbaumer (@Kriechi)
+
+  - Fixed incorrect arguments being passed to ``StreamIDTooLowError``.
+  - Added new arguments to ``close_connection``.
+
+- WeiZheng Xu (@boyxuper)
+
+  - Reported a bug relating to hyper-h2's updating of the connection window in
+    response to SETTINGS_INITIAL_WINDOW_SIZE.
+
+- Evgeny Tataurov (@etataurov)
+
+  - Added the ``additional_data`` field to the ``ConnectionTerminated`` event.
+
+- Brett Cannon (@brettcannon)
+
+  - Changed Travis status icon to SVG.
+  - Documentation improvements.
+
+- Felix Yan (@felixonmars)
+
+  - Widened allowed version numbers of enum34.
+  - Updated test requirements.
+
+- Keith Dart (@kdart)
+
+  - Fixed curio example server flow control problems.
+
+- Gil Gonçalves (@LuRsT)
+
+  - Added code forbidding non-RFC 7540 pseudo-headers.
+
+- Louis Taylor (@kragniz)
+
+  - Cleaned up the README
+
+- Berker Peksag (@berkerpeksag)
+
+  - Improved the docstring for ``StreamIDTooLowError``.
+
+- Adrian Lewis (@aidylewis)
+
+  - Fixed the broken Twisted HEAD request example.
+  - Added verification logic for ensuring that responses to HEAD requests have
+    no body.
+
+- Lorenzo (@Mec-iS)
+
+  - Changed documentation to stop using dictionaries for header blocks.
+
+- Kracekumar Ramaraj (@kracekumar)
+
+  - Cleaned up Twisted example.
+
+- @mlvnd
+
+  - Cleaned up curio example.
+
+- Tom Offermann (@toffer)
+
+  - Added Tornado example.
+
+- Tarashish Mishra (@sunu)
+
+  - Added code to reject header fields with leading/trailing whitespace.
+  - Added code to remove leading/trailing whitespace from sent header fields.
+
+- Nate Prewitt (@nateprewitt)
+
+  - Added code to validate that trailers do not contain pseudo-header fields.
+
+- Chun-Han, Hsiao (@chhsiao90)
+
+  - Fixed a bug with invalid ``HTTP2-Settings`` header output in plaintext
+    upgrade.
+
+- Bhavishya (@bhavishyagopesh)
+
+  - Added support for equality testing to ``h2.settings.Settings`` objects.
+
+- Fred Thomsen (@fredthomsen)
+
+  - Added logging.
+
diff --git a/tools/third_party/h2/HISTORY.rst b/tools/third_party/h2/HISTORY.rst
new file mode 100755
index 0000000..e9fa745
--- /dev/null
+++ b/tools/third_party/h2/HISTORY.rst
@@ -0,0 +1,701 @@
+Release History
+===============
+
+3.0.1 (2017-04-03)
+------------------
+
+Bugfixes
+~~~~~~~~
+
+- CONTINUATION frames sent on closed streams previously caused stream errors
+  of type STREAM_CLOSED. RFC 7540 § 6.10 requires that these be connection
+  errors of type PROTOCOL_ERROR, and so this release changes to match that
+  behaviour.
+- Remote peers incrementing their inbound connection window beyond the maximum
+  allowed value now cause stream-level errors, rather than connection-level
+  errors, allowing connections to stay up longer.
+- h2 now rejects receiving and sending request header blocks that are missing
+  any of the mandatory pseudo-header fields (:path, :scheme, and :method).
+- h2 now rejects receiving and sending request header blocks that have an empty
+  :path pseudo-header value.
+- h2 now rejects receiving and sending request header blocks that contain
+  response-only pseudo-headers, and vice versa.
+- h2 now correct respects user-initiated changes to the HEADER_TABLE_SIZE
+  local setting, and ensures that if users shrink or increase the header
+  table size it is policed appropriately.
+
+
+2.6.2 (2017-04-03)
+------------------
+
+Bugfixes
+~~~~~~~~
+
+- CONTINUATION frames sent on closed streams previously caused stream errors
+  of type STREAM_CLOSED. RFC 7540 § 6.10 requires that these be connection
+  errors of type PROTOCOL_ERROR, and so this release changes to match that
+  behaviour.
+- Remote peers incrementing their inbound connection window beyond the maximum
+  allowed value now cause stream-level errors, rather than connection-level
+  errors, allowing connections to stay up longer.
+- h2 now rejects receiving and sending request header blocks that are missing
+  any of the mandatory pseudo-header fields (:path, :scheme, and :method).
+- h2 now rejects receiving and sending request header blocks that have an empty
+  :path pseudo-header value.
+- h2 now rejects receiving and sending request header blocks that contain
+  response-only pseudo-headers, and vice versa.
+- h2 now correct respects user-initiated changes to the HEADER_TABLE_SIZE
+  local setting, and ensures that if users shrink or increase the header
+  table size it is policed appropriately.
+
+
+2.5.4 (2017-04-03)
+------------------
+
+Bugfixes
+~~~~~~~~
+
+- CONTINUATION frames sent on closed streams previously caused stream errors
+  of type STREAM_CLOSED. RFC 7540 § 6.10 requires that these be connection
+  errors of type PROTOCOL_ERROR, and so this release changes to match that
+  behaviour.
+- Remote peers incrementing their inbound connection window beyond the maximum
+  allowed value now cause stream-level errors, rather than connection-level
+  errors, allowing connections to stay up longer.
+- h2 now correct respects user-initiated changes to the HEADER_TABLE_SIZE
+  local setting, and ensures that if users shrink or increase the header
+  table size it is policed appropriately.
+
+
+3.0.0 (2017-03-24)
+------------------
+
+API Changes (Backward-Incompatible)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- By default, hyper-h2 now joins together received cookie header fields, per
+  RFC 7540 Section 8.1.2.5.
+- Added a ``normalize_inbound_headers`` flag to the ``H2Configuration`` object
+  that defaults to ``True``. Setting this to ``False`` changes the behaviour
+  from the previous point back to the v2 behaviour.
+- Removed deprecated fields from ``h2.errors`` module.
+- Removed deprecated fields from ``h2.settings`` module.
+- Removed deprecated ``client_side`` and ``header_encoding`` arguments from
+  ``H2Connection``.
+- Removed deprecated ``client_side`` and ``header_encoding`` properties from
+  ``H2Connection``.
+- ``dict`` objects are no longer allowed for user-supplied headers.
+- The default header encoding is now ``None``, not ``utf-8``: this means that
+  all events that carry headers now return those headers as byte strings by
+  default. The header encoding can be set back to ``utf-8`` to restore the old
+  behaviour.
+
+API Changes (Backward-Compatible)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- Added new ``UnknownFrameReceived`` event that fires when unknown extension
+  frames have been received. This only fires when using hyperframe 5.0 or
+  later: earlier versions of hyperframe cause us to silently ignore extension
+  frames.
+
+Bugfixes
+~~~~~~~~
+
+None
+
+
+2.6.1 (2017-03-16)
+------------------
+
+Bugfixes
+~~~~~~~~
+
+- Allowed hyperframe v5 support while continuing to ignore unexpected frames.
+
+
+2.5.3 (2017-03-16)
+------------------
+
+Bugfixes
+~~~~~~~~
+
+- Allowed hyperframe v5 support while continuing to ignore unexpected frames.
+
+
+2.4.4 (2017-03-16)
+------------------
+
+Bugfixes
+~~~~~~~~
+
+- Allowed hyperframe v5 support while continuing to ignore unexpected frames.
+
+
+2.6.0 (2017-02-28)
+------------------
+
+API Changes (Backward-Compatible)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- Added a new ``h2.events.Event`` class that acts as a base class for all
+  events.
+- Rather than reject outbound Connection-specific headers, h2 will now
+  normalize the header block by removing them.
+- Implement equality for the ``h2.settings.Settings`` class.
+- Added ``h2.settings.SettingCodes``, an enum that is used to store all the
+  HTTP/2 setting codes. This allows us to use a better printed representation of
+  the setting code in most places that it is used.
+- The ``setting`` field in ``ChangedSetting`` for the ``RemoteSettingsChanged``
+  and ``SettingsAcknowledged`` events has been updated to be instances of
+  ``SettingCodes`` whenever they correspond to a known setting code. When they
+  are an unknown setting code, they are instead ``int``. As ``SettingCodes`` is
+  a subclass of ``int``, this is non-breaking.
+- Deprecated the other fields in ``h2.settings``. These will be removed in
+  3.0.0.
+- Added an optional ``pad_length`` parameter to ``H2Connection.send_data``
+  to allow the user to include padding on a data frame.
+- Added a new parameter to the ``h2.config.H2Configuration`` initializer which
+  takes a logger.  This allows us to log by providing a logger that conforms
+  to the requirements of this module so that it can be used in different
+  environments.
+
+Bugfixes
+~~~~~~~~
+
+- Correctly reject pushed request header blocks whenever they have malformed
+  request header blocks.
+- Correctly normalize pushed request header blocks whenever they have
+  normalizable header fields.
+- Remote peers are now allowed to send zero or any positive number as a value
+  for ``SETTINGS_MAX_HEADER_LIST_SIZE``, where previously sending zero would
+  raise a ``InvalidSettingsValueError``.
+- Resolved issue where the ``HTTP2-Settings`` header value for plaintext
+  upgrade that was emitted by ``initiate_upgrade_connection`` included the
+  *entire* ``SETTINGS`` frame, instead of just the payload.
+- Resolved issue where the ``HTTP2-Settings`` header value sent by a client for
+  plaintext upgrade would be ignored by ``initiate_upgrade_connection``, rather
+  than have those settings applied appropriately.
+- Resolved an issue whereby certain frames received from a peer in the CLOSED
+  state would trigger connection errors when RFC 7540 says they should have
+  triggered stream errors instead. Added more detailed stream closure tracking
+  to ensure we don't throw away connections unnecessarily.
+
+
+2.5.2 (2017-01-27)
+------------------
+
+- Resolved issue where the ``HTTP2-Settings`` header value for plaintext
+  upgrade that was emitted by ``initiate_upgrade_connection`` included the
+  *entire* ``SETTINGS`` frame, instead of just the payload.
+- Resolved issue where the ``HTTP2-Settings`` header value sent by a client for
+  plaintext upgrade would be ignored by ``initiate_upgrade_connection``, rather
+  than have those settings applied appropriately.
+
+
+2.4.3 (2017-01-27)
+------------------
+
+- Resolved issue where the ``HTTP2-Settings`` header value for plaintext
+  upgrade that was emitted by ``initiate_upgrade_connection`` included the
+  *entire* ``SETTINGS`` frame, instead of just the payload.
+- Resolved issue where the ``HTTP2-Settings`` header value sent by a client for
+  plaintext upgrade would be ignored by ``initiate_upgrade_connection``, rather
+  than have those settings applied appropriately.
+
+
+2.3.4 (2017-01-27)
+------------------
+
+- Resolved issue where the ``HTTP2-Settings`` header value for plaintext
+  upgrade that was emitted by ``initiate_upgrade_connection`` included the
+  *entire* ``SETTINGS`` frame, instead of just the payload.
+- Resolved issue where the ``HTTP2-Settings`` header value sent by a client for
+  plaintext upgrade would be ignored by ``initiate_upgrade_connection``, rather
+  than have those settings applied appropriately.
+
+
+2.5.1 (2016-12-17)
+------------------
+
+Bugfixes
+~~~~~~~~
+
+- Remote peers are now allowed to send zero or any positive number as a value
+  for ``SETTINGS_MAX_HEADER_LIST_SIZE``, where previously sending zero would
+  raise a ``InvalidSettingsValueError``.
+
+
+2.5.0 (2016-10-25)
+------------------
+
+API Changes (Backward-Compatible)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- Added a new ``H2Configuration`` object that allows rich configuration of
+  a ``H2Connection``. This object supersedes the prior keyword arguments to the
+  ``H2Connection`` object, which are now deprecated and will be removed in 3.0.
+- Added support for automated window management via the
+  ``acknowledge_received_data`` method. See the documentation for more details.
+- Added a ``DenialOfServiceError`` that is raised whenever a behaviour that
+  looks like a DoS attempt is encountered: for example, an overly large
+  decompressed header list. This is a subclass of ``ProtocolError``.
+- Added support for setting and managing ``SETTINGS_MAX_HEADER_LIST_SIZE``.
+  This setting is now defaulted to 64kB.
+- Added ``h2.errors.ErrorCodes``, an enum that is used to store all the HTTP/2
+  error codes. This allows us to use a better printed representation of the
+  error code in most places that it is used.
+- The ``error_code`` fields on ``ConnectionTerminated`` and ``StreamReset``
+  events have been updated to be instances of ``ErrorCodes`` whenever they
+  correspond to a known error code. When they are an unknown error code, they
+  are instead ``int``. As ``ErrorCodes`` is a subclass of ``int``, this is
+  non-breaking.
+- Deprecated the other fields in ``h2.errors``. These will be removed in 3.0.0.
+
+Bugfixes
+~~~~~~~~
+
+- Correctly reject request header blocks with neither :authority nor Host
+  headers, or header blocks which contain mismatched :authority and Host
+  headers, per RFC 7540 Section 8.1.2.3.
+- Correctly expect that responses to HEAD requests will have no body regardless
+  of the value of the Content-Length header, and reject those that do.
+- Correctly refuse to send header blocks that contain neither :authority nor
+  Host headers, or header blocks which contain mismatched :authority and Host
+  headers, per RFC 7540 Section 8.1.2.3.
+- Hyper-h2 will now reject header field names and values that contain leading
+  or trailing whitespace.
+- Correctly strip leading/trailing whitespace from header field names and
+  values.
+- Correctly refuse to send header blocks with a TE header whose value is not
+  ``trailers``, per RFC 7540 Section 8.1.2.2.
+- Correctly refuse to send header blocks with connection-specific headers,
+  per RFC 7540 Section 8.1.2.2.
+- Correctly refuse to send header blocks that contain duplicate pseudo-header
+  fields, or with pseudo-header fields that appear after ordinary header fields,
+  per RFC 7540 Section 8.1.2.1.
+
+  This may cause passing a dictionary as the header block to ``send_headers``
+  to throw a ``ProtocolError``, because dictionaries are unordered and so they
+  may trip this check.  Passing dictionaries here is deprecated, and callers
+  should change to using a sequence of 2-tuples as their header blocks.
+- Correctly reject trailers that contain HTTP/2 pseudo-header fields, per RFC
+  7540 Section 8.1.2.1.
+- Correctly refuse to send trailers that contain HTTP/2 pseudo-header fields,
+  per RFC 7540 Section 8.1.2.1.
+- Correctly reject responses that do not contain the ``:status`` header field,
+  per RFC 7540 Section 8.1.2.4.
+- Correctly refuse to send responses that do not contain the ``:status`` header
+  field, per RFC 7540 Section 8.1.2.4.
+- Correctly update the maximum frame size when the user updates the value of
+  that setting. Prior to this release, if the user updated the maximum frame
+  size hyper-h2 would ignore the update, preventing the remote peer from using
+  the higher frame sizes.
+
+2.4.2 (2016-10-25)
+------------------
+
+Bugfixes
+~~~~~~~~
+
+- Correctly update the maximum frame size when the user updates the value of
+  that setting. Prior to this release, if the user updated the maximum frame
+  size hyper-h2 would ignore the update, preventing the remote peer from using
+  the higher frame sizes.
+
+2.3.3 (2016-10-25)
+------------------
+
+Bugfixes
+~~~~~~~~
+
+- Correctly update the maximum frame size when the user updates the value of
+  that setting. Prior to this release, if the user updated the maximum frame
+  size hyper-h2 would ignore the update, preventing the remote peer from using
+  the higher frame sizes.
+
+2.2.7 (2016-10-25)
+------------------
+
+*Final 2.2.X release*
+
+Bugfixes
+~~~~~~~~
+
+- Correctly update the maximum frame size when the user updates the value of
+  that setting. Prior to this release, if the user updated the maximum frame
+  size hyper-h2 would ignore the update, preventing the remote peer from using
+  the higher frame sizes.
+
+2.4.1 (2016-08-23)
+------------------
+
+Bugfixes
+~~~~~~~~
+
+- Correctly expect that responses to HEAD requests will have no body regardless
+  of the value of the Content-Length header, and reject those that do.
+
+2.3.2 (2016-08-23)
+------------------
+
+Bugfixes
+~~~~~~~~
+
+- Correctly expect that responses to HEAD requests will have no body regardless
+  of the value of the Content-Length header, and reject those that do.
+
+2.4.0 (2016-07-01)
+------------------
+
+API Changes (Backward-Compatible)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- Adds ``additional_data`` to ``H2Connection.close_connection``, allowing the
+  user to send additional debug data on the GOAWAY frame.
+- Adds ``last_stream_id`` to ``H2Connection.close_connection``, allowing the
+  user to manually control what the reported last stream ID is.
+- Add new method: ``prioritize``.
+- Add support for emitting stream priority information when sending headers
+  frames using three new keyword arguments: ``priority_weight``,
+  ``priority_depends_on``, and ``priority_exclusive``.
+- Add support for "related events": events that fire simultaneously on a single
+  frame.
+
+
+2.3.1 (2016-05-12)
+------------------
+
+Bugfixes
+~~~~~~~~
+
+- Resolved ``AttributeError`` encountered when receiving more than one sequence
+  of CONTINUATION frames on a given connection.
+
+
+2.2.5 (2016-05-12)
+------------------
+
+Bugfixes
+~~~~~~~~
+
+- Resolved ``AttributeError`` encountered when receiving more than one sequence
+  of CONTINUATION frames on a given connection.
+
+
+2.3.0 (2016-04-26)
+------------------
+
+API Changes (Backward-Compatible)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- Added a new flag to the ``H2Connection`` constructor: ``header_encoding``,
+  that controls what encoding is used (if any) to decode the headers from bytes
+  to unicode. This defaults to UTF-8 for backward compatibility. To disable the
+  decode and use bytes exclusively, set the field to False, None, or the empty
+  string. This affects all headers, including those pushed by servers.
+- Bumped the minimum version of HPACK allowed from 2.0 to 2.2.
+- Added support for advertising RFC 7838 Alternative services.
+- Allowed users to provide ``hpack.HeaderTuple`` and
+  ``hpack.NeverIndexedHeaderTuple`` objects to all methods that send headers.
+- Changed all events that carry headers to emit ``hpack.HeaderTuple`` and
+  ``hpack.NeverIndexedHeaderTuple`` instead of plain tuples. This allows users
+  to maintain header indexing state.
+- Added support for plaintext upgrade with the ``initiate_upgrade_connection``
+  method.
+
+Bugfixes
+~~~~~~~~
+
+- Automatically ensure that all ``Authorization`` and ``Proxy-Authorization``
+  headers, as well as short ``Cookie`` headers, are prevented from being added
+  to encoding contexts.
+
+2.2.4 (2016-04-25)
+------------------
+
+Bugfixes
+~~~~~~~~
+
+- Correctly forbid pseudo-headers that were not defined in RFC 7540.
+- Ignore AltSvc frames, rather than exploding when receiving them.
+
+2.1.5 (2016-04-25)
+------------------
+
+*Final 2.1.X release*
+
+Bugfixes
+~~~~~~~~
+
+- Correctly forbid pseudo-headers that were not defined in RFC 7540.
+- Ignore AltSvc frames, rather than exploding when receiving them.
+
+2.2.3 (2016-04-13)
+------------------
+
+Bugfixes
+~~~~~~~~
+
+- Allowed the 4.X series of hyperframe releases as dependencies.
+
+2.1.4 (2016-04-13)
+------------------
+
+Bugfixes
+~~~~~~~~
+
+- Allowed the 4.X series of hyperframe releases as dependencies.
+
+
+2.2.2 (2016-04-05)
+------------------
+
+Bugfixes
+~~~~~~~~
+
+- Fixed issue where informational responses were erroneously not allowed to be
+  sent in the ``HALF_CLOSED_REMOTE`` state.
+- Fixed issue where informational responses were erroneously not allowed to be
+  received in the ``HALF_CLOSED_LOCAL`` state.
+- Fixed issue where we allowed information responses to be sent or received
+  after final responses.
+
+2.2.1 (2016-03-23)
+------------------
+
+Bugfixes
+~~~~~~~~
+
+- Fixed issue where users using locales that did not default to UTF-8 were
+  unable to install source distributions of the package.
+
+2.2.0 (2016-03-23)
+------------------
+
+API Changes (Backward-Compatible)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- Added support for sending informational responses (responses with 1XX status)
+  codes as part of the standard flow. HTTP/2 allows zero or more informational
+  responses with no upper limit: hyper-h2 does too.
+- Added support for receiving informational responses (responses with 1XX
+  status) codes as part of the standard flow. HTTP/2 allows zero or more
+  informational responses with no upper limit: hyper-h2 does too.
+- Added a new event: ``ReceivedInformationalResponse``. This response is fired
+  when informational responses (those with 1XX status codes).
+- Added an ``additional_data`` field to the ``ConnectionTerminated`` event that
+  carries any additional data sent on the GOAWAY frame. May be ``None`` if no
+  such data was sent.
+- Added the ``initial_values`` optional argument to the ``Settings`` object.
+
+Bugfixes
+~~~~~~~~
+
+- Correctly reject all of the connection-specific headers mentioned in RFC 7540
+  § 8.1.2.2, not just the ``Connection:`` header.
+- Defaulted the value of ``SETTINGS_MAX_CONCURRENT_STREAMS`` to 100, unless
+  explicitly overridden. This is a safe defensive initial value for this
+  setting.
+
+2.1.3 (2016-03-16)
+------------------
+
+Deprecations
+~~~~~~~~~~~~
+
+- Passing dictionaries to ``send_headers`` as the header block is deprecated,
+  and will be removed in 3.0.
+
+2.1.2 (2016-02-17)
+------------------
+
+Bugfixes
+~~~~~~~~
+
+- Reject attempts to push streams on streams that were themselves pushed:
+  streams can only be pushed on streams that were initiated by the client.
+- Correctly allow CONTINUATION frames to extend the header block started by a
+  PUSH_PROMISE frame.
+- Changed our handling of frames received on streams that were reset by the
+  user.
+
+  Previously these would, at best, cause ProtocolErrors to be raised and the
+  connection to be torn down (rather defeating the point of resetting streams
+  at all) and, at worst, would cause subtle inconsistencies in state between
+  hyper-h2 and the remote peer that could lead to header block decoding errors
+  or flow control blockages.
+
+  Now when the user resets a stream all further frames received on that stream
+  are ignored except where they affect some form of connection-level state,
+  where they have their effect and are then ignored.
+- Fixed a bug whereby receiving a PUSH_PROMISE frame on a stream that was
+  closed would cause a RST_STREAM frame to be emitted on the closed-stream,
+  but not the newly-pushed one. Now this causes a ``ProtocolError``.
+
+2.1.1 (2016-02-05)
+------------------
+
+Bugfixes
+~~~~~~~~
+
+- Added debug representations for all events.
+- Fixed problems with setup.py that caused trouble on older setuptools/pip
+  installs.
+
+2.1.0 (2016-02-02)
+------------------
+
+API Changes (Backward-Compatible)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- Added new field to ``DataReceived``: ``flow_controlled_length``. This is the
+  length of the frame including padded data, allowing users to correctly track
+  changes to the flow control window.
+- Defined new ``UnsupportedFrameError``, thrown when frames that are known to
+  hyperframe but not supported by hyper-h2 are received. For
+  backward-compatibility reasons, this is a ``ProtocolError`` *and* a
+  ``KeyError``.
+
+Bugfixes
+~~~~~~~~
+
+- Hyper-h2 now correctly accounts for padding when maintaining flow control
+  windows.
+- Resolved a bug where hyper-h2 would mistakenly apply
+  SETTINGS_INITIAL_WINDOW_SIZE to the connection flow control window in
+  addition to the stream-level flow control windows.
+- Invalid Content-Length headers now throw ``ProtocolError`` exceptions and
+  correctly tear the connection down, instead of leaving the connection in an
+  indeterminate state.
+- Invalid header blocks now throw ``ProtocolError``, rather than a grab bag of
+  possible other exceptions.
+
+2.0.0 (2016-01-25)
+------------------
+
+API Changes (Breaking)
+~~~~~~~~~~~~~~~~~~~~~~
+
+- Attempts to open streams with invalid stream IDs, either by the remote peer
+  or by the user, are now rejected as a ``ProtocolError``. Previously these
+  were allowed, and would cause remote peers to error.
+- Receiving frames that have invalid padding now causes the connection to be
+  terminated with a ``ProtocolError`` being raised. Previously these passed
+  undetected.
+- Settings values set by both the user and the remote peer are now validated
+  when they're set. If they're invalid, a new ``InvalidSettingsValueError`` is
+  raised and, if set by the remote peer, a connection error is signaled.
+  Previously, it was possible to set invalid values. These would either be
+  caught when building frames, or would be allowed to stand.
+- Settings changes no longer require user action to be acknowledged: hyper-h2
+  acknowledges them automatically. This moves the location where some
+  exceptions may be thrown, and also causes the ``acknowledge_settings`` method
+  to be removed from the public API.
+- Removed a number of methods on the ``H2Connection`` object from the public,
+  semantically versioned API, by renaming them to have leading underscores.
+  Specifically, removed:
+
+    - ``get_stream_by_id``
+    - ``get_or_create_stream``
+    - ``begin_new_stream``
+    - ``receive_frame``
+    - ``acknowledge_settings``
+
+- Added full support for receiving CONTINUATION frames, including policing
+  logic about when and how they are received. Previously, receiving
+  CONTINUATION frames was not supported and would throw exceptions.
+- All public API functions on ``H2Connection`` except for ``receive_data`` no
+  longer return lists of events, because these lists were always empty. Events
+  are now only raised by ``receive_data``.
+- Calls to ``increment_flow_control_window`` with out of range values now raise
+  ``ValueError`` exceptions. Previously they would be allowed, or would cause
+  errors when serializing frames.
+
+API Changes (Backward-Compatible)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- Added ``PriorityUpdated`` event for signaling priority changes.
+- Added ``get_next_available_stream_id`` function.
+- Receiving DATA frames on streams not in the OPEN or HALF_CLOSED_LOCAL states
+  now causes a stream reset, rather than a connection reset. The error is now
+  also classified as a ``StreamClosedError``, rather than a more generic
+  ``ProtocolError``.
+- Receiving HEADERS or PUSH_PROMISE frames in the HALF_CLOSED_REMOTE state now
+  causes a stream reset, rather than a connection reset.
+- Receiving frames that violate the max frame size now causes connection errors
+  with error code FRAME_SIZE_ERROR, not a generic PROTOCOL_ERROR. This
+  condition now also raises a ``FrameTooLargeError``, a new subclass of
+  ``ProtocolError``.
+- Made ``NoSuchStreamError`` a subclass of ``ProtocolError``.
+- The ``StreamReset`` event is now also fired whenever a protocol error from
+  the remote peer forces a stream to close early. This is only fired once.
+- The ``StreamReset`` event now carries a flag, ``remote_reset``, that is set
+  to ``True`` in all cases where ``StreamReset`` would previously have fired
+  (e.g. when the remote peer sent a RST_STREAM), and is set to ``False`` when
+  it fires because the remote peer made a protocol error.
+- Hyper-h2 now rejects attempts by peers to increment a flow control window by
+  zero bytes.
+- Hyper-h2 now rejects peers sending header blocks that are ill-formed for a
+  number of reasons as set out in RFC 7540 Section 8.1.2.
+- Attempting to send non-PRIORITY frames on closed streams now raises
+  ``StreamClosedError``.
+- Remote peers attempting to increase the flow control window beyond
+  ``2**31 - 1``, either by window increment or by settings frame, are now
+  rejected as ``ProtocolError``.
+- Local attempts to increase the flow control window beyond ``2**31 - 1`` by
+  window increment are now rejected as ``ProtocolError``.
+- The bytes that represent individual settings are now available in
+  ``h2.settings``, instead of needing users to import them from hyperframe.
+
+Bugfixes
+~~~~~~~~
+
+- RFC 7540 requires that a separate minimum stream ID be used for inbound and
+  outbound streams. Hyper-h2 now obeys this requirement.
+- Hyper-h2 now does a better job of reporting the last stream ID it has
+  partially handled when terminating connections.
+- Fixed an error in the arguments of ``StreamIDTooLowError``.
+- Prevent ``ValueError`` leaking from Hyperframe.
+- Prevent ``struct.error`` and ``InvalidFrameError`` leaking from Hyperframe.
+
+1.1.1 (2015-11-17)
+------------------
+
+Bugfixes
+~~~~~~~~
+
+- Forcibly lowercase all header names to improve compatibility with
+  implementations that demand lower-case header names.
+
+1.1.0 (2015-10-28)
+------------------
+
+API Changes (Backward-Compatible)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- Added a new ``ConnectionTerminated`` event, which fires when GOAWAY frames
+  are received.
+- Added a subclass of ``NoSuchStreamError``, called ``StreamClosedError``, that
+  fires when actions are taken on a stream that is closed and has had its state
+  flushed from the system.
+- Added ``StreamIDTooLowError``, raised when the user or the remote peer
+  attempts to create a stream with an ID lower than one previously used in the
+  dialog. Inherits from ``ValueError`` for backward-compatibility reasons.
+
+Bugfixes
+~~~~~~~~
+
+- Do not throw ``ProtocolError`` when attempting to send multiple GOAWAY
+  frames on one connection.
+- We no longer forcefully change the decoder table size when settings changes
+  are ACKed, instead waiting for remote acknowledgement of the change.
+- Improve the performance of checking whether a stream is open.
+- We now attempt to lazily garbage collect closed streams, to avoid having the
+  state hang around indefinitely, leaking memory.
+- Avoid further per-stream allocations, leading to substantial performance
+  improvements when many short-lived streams are used.
+
+1.0.0 (2015-10-15)
+------------------
+
+- First production release!
diff --git a/tools/third_party/h2/LICENSE b/tools/third_party/h2/LICENSE
new file mode 100755
index 0000000..7bb76c5
--- /dev/null
+++ b/tools/third_party/h2/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015-2016 Cory Benfield and contributors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/tools/third_party/h2/MANIFEST.in b/tools/third_party/h2/MANIFEST.in
new file mode 100755
index 0000000..3c8b988
--- /dev/null
+++ b/tools/third_party/h2/MANIFEST.in
@@ -0,0 +1,7 @@
+include README.rst LICENSE CONTRIBUTORS.rst HISTORY.rst tox.ini test_requirements.txt .coveragerc Makefile
+recursive-include test *.py
+graft docs
+prune docs/build
+graft visualizer
+recursive-include examples *.py *.crt *.key *.pem *.csr
+recursive-include utils *.sh
diff --git a/tools/third_party/h2/Makefile b/tools/third_party/h2/Makefile
new file mode 100755
index 0000000..6890774
--- /dev/null
+++ b/tools/third_party/h2/Makefile
@@ -0,0 +1,9 @@
+.PHONY: publish test
+
+publish:
+	rm -rf dist/
+	python setup.py sdist bdist_wheel
+	twine upload -s dist/*
+
+test:
+	py.test -n 4 --cov h2 test/
diff --git a/tools/third_party/h2/README.rst b/tools/third_party/h2/README.rst
new file mode 100755
index 0000000..7140d37
--- /dev/null
+++ b/tools/third_party/h2/README.rst
@@ -0,0 +1,65 @@
+===============================
+hyper-h2: HTTP/2 Protocol Stack
+===============================
+
+.. image:: https://raw.github.com/Lukasa/hyper/development/docs/source/images/hyper.png
+
+.. image:: https://travis-ci.org/python-hyper/hyper-h2.svg?branch=master
+    :target: https://travis-ci.org/python-hyper/hyper-h2
+
+This repository contains a pure-Python implementation of a HTTP/2 protocol
+stack. It's written from the ground up to be embeddable in whatever program you
+choose to use, ensuring that you can speak HTTP/2 regardless of your
+programming paradigm.
+
+You use it like this:
+
+.. code-block:: python
+
+    import h2.connection
+
+    conn = h2.connection.H2Connection()
+    conn.send_headers(stream_id=stream_id, headers=headers)
+    conn.send_data(stream_id, data)
+    socket.sendall(conn.data_to_send())
+    events = conn.receive_data(socket_data)
+
+This repository does not provide a parsing layer, a network layer, or any rules
+about concurrency. Instead, it's a purely in-memory solution, defined in terms
+of data actions and HTTP/2 frames. This is one building block of a full Python
+HTTP implementation.
+
+To install it, just run:
+
+.. code-block:: console
+
+    $ pip install h2
+
+Documentation
+=============
+
+Documentation is available at http://python-hyper.org/h2/.
+
+Contributing
+============
+
+``hyper-h2`` welcomes contributions from anyone! Unlike many other projects we
+are happy to accept cosmetic contributions and small contributions, in addition
+to large feature requests and changes.
+
+Before you contribute (either by opening an issue or filing a pull request),
+please `read the contribution guidelines`_.
+
+.. _read the contribution guidelines: http://python-hyper.org/en/latest/contributing.html
+
+License
+=======
+
+``hyper-h2`` is made available under the MIT License. For more details, see the
+``LICENSE`` file in the repository.
+
+Authors
+=======
+
+``hyper-h2`` is maintained by Cory Benfield, with contributions from others. For
+more details about the contributors, please see ``CONTRIBUTORS.rst``.
diff --git a/tools/third_party/h2/docs/Makefile b/tools/third_party/h2/docs/Makefile
new file mode 100755
index 0000000..32b233b
--- /dev/null
+++ b/tools/third_party/h2/docs/Makefile
@@ -0,0 +1,177 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS    =
+SPHINXBUILD   = sphinx-build
+PAPER         =
+BUILDDIR      = build
+
+# User-friendly check for sphinx-build
+ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
+$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
+endif
+
+# Internal variables.
+PAPEROPT_a4     = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
+
+help:
+	@echo "Please use \`make <target>' where <target> is one of"
+	@echo "  html       to make standalone HTML files"
+	@echo "  dirhtml    to make HTML files named index.html in directories"
+	@echo "  singlehtml to make a single large HTML file"
+	@echo "  pickle     to make pickle files"
+	@echo "  json       to make JSON files"
+	@echo "  htmlhelp   to make HTML files and a HTML help project"
+	@echo "  qthelp     to make HTML files and a qthelp project"
+	@echo "  devhelp    to make HTML files and a Devhelp project"
+	@echo "  epub       to make an epub"
+	@echo "  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+	@echo "  latexpdf   to make LaTeX files and run them through pdflatex"
+	@echo "  latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
+	@echo "  text       to make text files"
+	@echo "  man        to make manual pages"
+	@echo "  texinfo    to make Texinfo files"
+	@echo "  info       to make Texinfo files and run them through makeinfo"
+	@echo "  gettext    to make PO message catalogs"
+	@echo "  changes    to make an overview of all changed/added/deprecated items"
+	@echo "  xml        to make Docutils-native XML files"
+	@echo "  pseudoxml  to make pseudoxml-XML files for display purposes"
+	@echo "  linkcheck  to check all external links for integrity"
+	@echo "  doctest    to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+	rm -rf $(BUILDDIR)/*
+
+html:
+	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+	$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+	@echo
+	@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+	@echo
+	@echo "Build finished; now you can process the pickle files."
+
+json:
+	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+	@echo
+	@echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+	@echo
+	@echo "Build finished; now you can run HTML Help Workshop with the" \
+	      ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+	@echo
+	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
+	      ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+	@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/hyper-h2.qhcp"
+	@echo "To view the help file:"
+	@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/hyper-h2.qhc"
+
+devhelp:
+	$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+	@echo
+	@echo "Build finished."
+	@echo "To view the help file:"
+	@echo "# mkdir -p $$HOME/.local/share/devhelp/hyper-h2"
+	@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/hyper-h2"
+	@echo "# devhelp"
+
+epub:
+	$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+	@echo
+	@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo
+	@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+	@echo "Run \`make' in that directory to run these through (pdf)latex" \
+	      "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo "Running LaTeX files through pdflatex..."
+	$(MAKE) -C $(BUILDDIR)/latex all-pdf
+	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+latexpdfja:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo "Running LaTeX files through platex and dvipdfmx..."
+	$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
+	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+	$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+	@echo
+	@echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+	$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+	@echo
+	@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo:
+	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+	@echo
+	@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+	@echo "Run \`make' in that directory to run these through makeinfo" \
+	      "(use \`make info' here to do that automatically)."
+
+info:
+	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+	@echo "Running Texinfo files through makeinfo..."
+	make -C $(BUILDDIR)/texinfo info
+	@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext:
+	$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+	@echo
+	@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes:
+	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+	@echo
+	@echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+	@echo
+	@echo "Link check complete; look for any errors in the above output " \
+	      "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+	@echo "Testing of doctests in the sources finished, look at the " \
+	      "results in $(BUILDDIR)/doctest/output.txt."
+
+xml:
+	$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
+	@echo
+	@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
+
+pseudoxml:
+	$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
+	@echo
+	@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
diff --git a/tools/third_party/h2/docs/make.bat b/tools/third_party/h2/docs/make.bat
new file mode 100755
index 0000000..537686d
--- /dev/null
+++ b/tools/third_party/h2/docs/make.bat
@@ -0,0 +1,242 @@
+@ECHO OFF

+

+REM Command file for Sphinx documentation

+

+if "%SPHINXBUILD%" == "" (

+	set SPHINXBUILD=sphinx-build

+)

+set BUILDDIR=build

+set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% source

+set I18NSPHINXOPTS=%SPHINXOPTS% source

+if NOT "%PAPER%" == "" (

+	set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%

+	set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%

+)

+

+if "%1" == "" goto help

+

+if "%1" == "help" (

+	:help

+	echo.Please use `make ^<target^>` where ^<target^> is one of

+	echo.  html       to make standalone HTML files

+	echo.  dirhtml    to make HTML files named index.html in directories

+	echo.  singlehtml to make a single large HTML file

+	echo.  pickle     to make pickle files

+	echo.  json       to make JSON files

+	echo.  htmlhelp   to make HTML files and a HTML help project

+	echo.  qthelp     to make HTML files and a qthelp project

+	echo.  devhelp    to make HTML files and a Devhelp project

+	echo.  epub       to make an epub

+	echo.  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter

+	echo.  text       to make text files

+	echo.  man        to make manual pages

+	echo.  texinfo    to make Texinfo files

+	echo.  gettext    to make PO message catalogs

+	echo.  changes    to make an overview over all changed/added/deprecated items

+	echo.  xml        to make Docutils-native XML files

+	echo.  pseudoxml  to make pseudoxml-XML files for display purposes

+	echo.  linkcheck  to check all external links for integrity

+	echo.  doctest    to run all doctests embedded in the documentation if enabled

+	goto end

+)

+

+if "%1" == "clean" (

+	for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i

+	del /q /s %BUILDDIR%\*

+	goto end

+)

+

+

+%SPHINXBUILD% 2> nul

+if errorlevel 9009 (

+	echo.

+	echo.The 'sphinx-build' command was not found. Make sure you have Sphinx

+	echo.installed, then set the SPHINXBUILD environment variable to point

+	echo.to the full path of the 'sphinx-build' executable. Alternatively you

+	echo.may add the Sphinx directory to PATH.

+	echo.

+	echo.If you don't have Sphinx installed, grab it from

+	echo.http://sphinx-doc.org/

+	exit /b 1

+)

+

+if "%1" == "html" (

+	%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished. The HTML pages are in %BUILDDIR%/html.

+	goto end

+)

+

+if "%1" == "dirhtml" (

+	%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.

+	goto end

+)

+

+if "%1" == "singlehtml" (

+	%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.

+	goto end

+)

+

+if "%1" == "pickle" (

+	%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished; now you can process the pickle files.

+	goto end

+)

+

+if "%1" == "json" (

+	%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished; now you can process the JSON files.

+	goto end

+)

+

+if "%1" == "htmlhelp" (

+	%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished; now you can run HTML Help Workshop with the ^

+.hhp project file in %BUILDDIR%/htmlhelp.

+	goto end

+)

+

+if "%1" == "qthelp" (

+	%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished; now you can run "qcollectiongenerator" with the ^

+.qhcp project file in %BUILDDIR%/qthelp, like this:

+	echo.^> qcollectiongenerator %BUILDDIR%\qthelp\hyper-h2.qhcp

+	echo.To view the help file:

+	echo.^> assistant -collectionFile %BUILDDIR%\qthelp\hyper-h2.ghc

+	goto end

+)

+

+if "%1" == "devhelp" (

+	%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished.

+	goto end

+)

+

+if "%1" == "epub" (

+	%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished. The epub file is in %BUILDDIR%/epub.

+	goto end

+)

+

+if "%1" == "latex" (

+	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.

+	goto end

+)

+

+if "%1" == "latexpdf" (

+	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex

+	cd %BUILDDIR%/latex

+	make all-pdf

+	cd %BUILDDIR%/..

+	echo.

+	echo.Build finished; the PDF files are in %BUILDDIR%/latex.

+	goto end

+)

+

+if "%1" == "latexpdfja" (

+	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex

+	cd %BUILDDIR%/latex

+	make all-pdf-ja

+	cd %BUILDDIR%/..

+	echo.

+	echo.Build finished; the PDF files are in %BUILDDIR%/latex.

+	goto end

+)

+

+if "%1" == "text" (

+	%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished. The text files are in %BUILDDIR%/text.

+	goto end

+)

+

+if "%1" == "man" (

+	%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished. The manual pages are in %BUILDDIR%/man.

+	goto end

+)

+

+if "%1" == "texinfo" (

+	%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.

+	goto end

+)

+

+if "%1" == "gettext" (

+	%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished. The message catalogs are in %BUILDDIR%/locale.

+	goto end

+)

+

+if "%1" == "changes" (

+	%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.The overview file is in %BUILDDIR%/changes.

+	goto end

+)

+

+if "%1" == "linkcheck" (

+	%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Link check complete; look for any errors in the above output ^

+or in %BUILDDIR%/linkcheck/output.txt.

+	goto end

+)

+

+if "%1" == "doctest" (

+	%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Testing of doctests in the sources finished, look at the ^

+results in %BUILDDIR%/doctest/output.txt.

+	goto end

+)

+

+if "%1" == "xml" (

+	%SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished. The XML files are in %BUILDDIR%/xml.

+	goto end

+)

+

+if "%1" == "pseudoxml" (

+	%SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.

+	goto end

+)

+

+:end

diff --git a/tools/third_party/h2/docs/source/_static/.keep b/tools/third_party/h2/docs/source/_static/.keep
new file mode 100755
index 0000000..e69de29
--- /dev/null
+++ b/tools/third_party/h2/docs/source/_static/.keep
diff --git a/tools/third_party/h2/docs/source/_static/h2.connection.H2ConnectionStateMachine.dot.png b/tools/third_party/h2/docs/source/_static/h2.connection.H2ConnectionStateMachine.dot.png
new file mode 100755
index 0000000..f2c814e
--- /dev/null
+++ b/tools/third_party/h2/docs/source/_static/h2.connection.H2ConnectionStateMachine.dot.png
Binary files differ
diff --git a/tools/third_party/h2/docs/source/_static/h2.stream.H2StreamStateMachine.dot.png b/tools/third_party/h2/docs/source/_static/h2.stream.H2StreamStateMachine.dot.png
new file mode 100755
index 0000000..85bcb68
--- /dev/null
+++ b/tools/third_party/h2/docs/source/_static/h2.stream.H2StreamStateMachine.dot.png
Binary files differ
diff --git a/tools/third_party/h2/docs/source/advanced-usage.rst b/tools/third_party/h2/docs/source/advanced-usage.rst
new file mode 100755
index 0000000..40496f0
--- /dev/null
+++ b/tools/third_party/h2/docs/source/advanced-usage.rst
@@ -0,0 +1,325 @@
+Advanced Usage
+==============
+
+Priority
+--------
+
+.. versionadded:: 2.0.0
+
+`RFC 7540`_ has a fairly substantial and complex section describing how to
+build a HTTP/2 priority tree, and the effect that should have on sending data
+from a server.
+
+Hyper-h2 does not enforce any priority logic by default for servers. This is
+because scheduling data sends is outside the scope of this library, as it
+likely requires fairly substantial understanding of the scheduler being used.
+
+However, for servers that *do* want to follow the priority recommendations
+given by clients, the Hyper project provides `an implementation`_ of the
+`RFC 7540`_ priority tree that will be useful to plug into a server. That,
+combined with the :class:`PriorityUpdated <h2.events.PriorityUpdated>` event from
+this library, can be used to build a server that conforms to RFC 7540's
+recommendations for priority handling.
+
+Related Events
+--------------
+
+.. versionadded:: 2.4.0
+
+In the 2.4.0 release hyper-h2 added support for signaling "related events".
+These are a HTTP/2-only construct that exist because certain HTTP/2 events can
+occur simultaneously: that is, one HTTP/2 frame can cause multiple state
+transitions to occur at the same time. One example of this is a HEADERS frame
+that contains priority information and carries the END_STREAM flag: this would
+cause three events to fire (one of the various request/response received
+events, a :class:`PriorityUpdated <h2.events.PriorityUpdated>` event, and a
+:class:`StreamEnded <h2.events.StreamEnded>` event).
+
+Ordinarily hyper-h2's logic will emit those events to you one at a time. This
+means that you may attempt to process, for example, a
+:class:`DataReceived <h2.events.DataReceived>` event, not knowing that the next
+event out will be a :class:`StreamEnded <h2.events.StreamEnded>` event.
+hyper-h2 *does* know this, however, and so will forbid you from taking certain
+actions that are a violation of the HTTP/2 protocol.
+
+To avoid this asymmetry of information, events that can occur simultaneously
+now carry properties for their "related events". These allow users to find the
+events that can have occurred simultaneously with each other before the event
+is emitted by hyper-h2. The following objects have "related events":
+
+- :class:`RequestReceived <h2.events.RequestReceived>`:
+
+    - :data:`stream_ended <h2.events.RequestReceived.stream_ended>`: any
+      :class:`StreamEnded <h2.events.StreamEnded>` event that occurred at the
+      same time as receiving this request.
+
+    - :data:`priority_updated
+      <h2.events.RequestReceived.priority_updated>`: any
+      :class:`PriorityUpdated <h2.events.PriorityUpdated>` event that occurred
+      at the same time as receiving this request.
+
+- :class:`ResponseReceived <h2.events.ResponseReceived>`:
+
+    - :data:`stream_ended <h2.events.ResponseReceived.stream_ended>`: any
+      :class:`StreamEnded <h2.events.StreamEnded>` event that occurred at the
+      same time as receiving this response.
+
+    - :data:`priority_updated
+      <h2.events.ResponseReceived.priority_updated>`: any
+      :class:`PriorityUpdated <h2.events.PriorityUpdated>` event that occurred
+      at the same time as receiving this response.
+
+- :class:`TrailersReceived <h2.events.TrailersReceived>`:
+
+    - :data:`stream_ended <h2.events.TrailersReceived.stream_ended>`: any
+      :class:`StreamEnded <h2.events.StreamEnded>` event that occurred at the
+      same time as receiving this set of trailers. This will **always** be
+      present for trailers, as they must terminate streams.
+
+    - :data:`priority_updated
+      <h2.events.TrailersReceived.priority_updated>`: any
+      :class:`PriorityUpdated <h2.events.PriorityUpdated>` event that occurred
+      at the same time as receiving this response.
+
+- :class:`InformationalResponseReceived
+  <h2.events.InformationalResponseReceived>`:
+
+    - :data:`priority_updated
+      <h2.events.InformationalResponseReceived.priority_updated>`: any
+      :class:`PriorityUpdated <h2.events.PriorityUpdated>` event that occurred
+      at the same time as receiving this informational response.
+
+- :class:`DataReceived <h2.events.DataReceived>`:
+
+    - :data:`stream_ended <h2.events.DataReceived.stream_ended>`: any
+      :class:`StreamEnded <h2.events.StreamEnded>` event that occurred at the
+      same time as receiving this data.
+
+
+.. warning:: hyper-h2 does not know if you are looking for related events or
+             expecting to find events in the event stream. Therefore, it will
+             always emit "related events" in the event stream. If you are using
+             the "related events" event pattern, you will want to be careful to
+             avoid double-processing related events.
+
+.. _h2-connection-advanced:
+
+Connections: Advanced
+---------------------
+
+Thread Safety
+~~~~~~~~~~~~~
+
+``H2Connection`` objects are *not* thread-safe. They cannot safely be accessed
+from multiple threads at once. This is a deliberate design decision: it is not
+trivially possible to design the ``H2Connection`` object in a way that would
+be either lock-free or have the locks at a fine granularity.
+
+Your implementations should bear this in mind, and handle it appropriately. It
+should be simple enough to use locking alongside the ``H2Connection``: simply
+lock around the connection object itself. Because the ``H2Connection`` object
+does no I/O it should be entirely safe to do that. Alternatively, have a single
+thread take ownership of the ``H2Connection`` and use a message-passing
+interface to serialize access to the ``H2Connection``.
+
+If you are using a non-threaded concurrency approach (e.g. Twisted), this
+should not affect you.
+
+Internal Buffers
+~~~~~~~~~~~~~~~~
+
+In order to avoid doing I/O, the ``H2Connection`` employs an internal buffer.
+This buffer is *unbounded* in size: it can potentially grow infinitely. This
+means that, if you are not making sure to regularly empty it, you are at risk
+of exceeding the memory limit of a single process and finding your program
+crashes.
+
+It is highly recommended that you send data at regular intervals, ideally as
+soon as possible.
+
+.. _advanced-sending-data:
+
+Sending Data
+~~~~~~~~~~~~
+
+When sending data on the network, it's important to remember that you may not
+be able to send an unbounded amount of data at once. Particularly when using
+TCP, it is often the case that there are limits on how much data may be in
+flight at any one time. These limits can be very low, and your operating system
+will only buffer so much data in memory before it starts to complain.
+
+For this reason, it is possible to consume only a subset of the data available
+when you call :meth:`data_to_send <h2.connection.H2Connection.data_to_send>`.
+However, once you have pulled the data out of the ``H2Connection`` internal
+buffer, it is *not* possible to put it back on again. For that reason, it is
+adviseable that you confirm how much space is available in the OS buffer before
+sending.
+
+Alternatively, use tools made available by your framework. For example, the
+Python standard library :mod:`socket <python:socket>` module provides a
+:meth:`sendall <python:socket.socket.sendall>` method that will automatically
+block until all the data has been sent. This will enable you to always use the
+unbounded form of
+:meth:`data_to_send <h2.connection.H2Connection.data_to_send>`, and will help
+you avoid subtle bugs.
+
+When To Send
+~~~~~~~~~~~~
+
+In addition to knowing how much data to send (see :ref:`advanced-sending-data`)
+it is important to know when to send data. For hyper-h2, this amounts to
+knowing when to call :meth:`data_to_send
+<h2.connection.H2Connection.data_to_send>`.
+
+Hyper-h2 may write data into its send buffer at two times. The first is
+whenever :meth:`receive_data <h2.connection.H2Connection.receive_data>` is
+called. This data is sent in response to some control frames that require no
+user input: for example, responding to PING frames. The second time is in
+response to user action: whenever a user calls a method like
+:meth:`send_headers <h2.connection.H2Connection.send_headers>`, data may be
+written into the buffer.
+
+In a standard design for a hyper-h2 consumer, then, that means there are two
+places where you'll potentially want to send data. The first is in your
+"receive data" loop. This is where you take the data you receive, pass it into
+:meth:`receive_data <h2.connection.H2Connection.receive_data>`, and then
+dispatch events. For this loop, it is usually best to save sending data until
+the loop is complete: that allows you to empty the buffer only once.
+
+The other place you'll want to send the data is when initiating requests or
+taking any other active, unprompted action on the connection. In this instance,
+you'll want to make all the relevant ``send_*`` calls, and *then* call
+:meth:`data_to_send <h2.connection.H2Connection.data_to_send>`.
+
+Headers
+-------
+
+HTTP/2 defines several "special header fields" which are used to encode data
+that was previously sent in either the request or status line of HTTP/1.1.
+These header fields are distinguished from ordinary header fields because their
+field name begins with a ``:`` character. The special header fields defined in
+`RFC 7540`_ are:
+
+- ``:status``
+- ``:path``
+- ``:method``
+- ``:scheme``
+- ``:authority``
+
+`RFC 7540`_ **mandates** that all of these header fields appear *first* in the
+header block, before the ordinary header fields. This could cause difficulty if
+the :meth:`send_headers <h2.connection.H2Connection.send_headers>` method
+accepted a plain ``dict`` for the ``headers`` argument, because ``dict``
+objects are unordered. For this reason, we require that you provide a list of
+two-tuples.
+
+.. _RFC 7540: https://tools.ietf.org/html/rfc7540
+.. _an implementation: http://python-hyper.org/projects/priority/en/latest/
+
+Flow Control
+------------
+
+HTTP/2 defines a complex flow control system that uses a sliding window of
+data on both a per-stream and per-connection basis. Essentially, each
+implementation allows its peer to send a specific amount of data at any time
+(the "flow control window") before it must stop. Each stream has a separate
+window, and the connection as a whole has a window. Each window can be opened
+by an implementation by sending a ``WINDOW_UPDATE`` frame, either on a specific
+stream (causing the window for that stream to be opened), or on stream ``0``,
+which causes the window for the entire connection to be opened.
+
+In HTTP/2, only data in ``DATA`` frames is flow controlled. All other frames
+are exempt from flow control. Each ``DATA`` frame consumes both stream and
+connection flow control window bytes. This means that the maximum amount of
+data that can be sent on any one stream before a ``WINDOW_UPDATE`` frame is
+received is the *lower* of the stream and connection windows. The maximum
+amount of data that can be sent on *all* streams before a ``WINDOW_UPDATE``
+frame is received is the size of the connection flow control window.
+
+Working With Flow Control
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The amount of flow control window a ``DATA`` frame consumes is the sum of both
+its contained application data *and* the amount of padding used. hyper-h2 shows
+this to the user in a :class:`DataReceived <h2.events.DataReceived>` event by
+using the :data:`flow_controlled_length
+<h2.events.DataReceived.flow_controlled_length>` field. When working with flow
+control in hyper-h2, users *must* use this field: simply using
+``len(datareceived.data)`` can eventually lead to deadlock.
+
+When data has been received and given to the user in a :class:`DataReceived
+<h2.events.DataReceived>`, it is the responsibility of the user to re-open the
+flow control window when the user is ready for more data. hyper-h2 does not do
+this automatically to avoid flooding the user with data: if we did, the remote
+peer could send unbounded amounts of data that the user would need to buffer
+before processing.
+
+To re-open the flow control window, then, the user must call
+:meth:`increment_flow_control_window
+<h2.connection.H2Connection.increment_flow_control_window>` with the
+:data:`flow_controlled_length <h2.events.DataReceived.flow_controlled_length>`
+of the received data. hyper-h2 requires that you manage both the connection
+and the stream flow control windows separately, so you may need to increment
+both the stream the data was received on and stream ``0``.
+
+When sending data, a HTTP/2 implementation must not send more than flow control
+window available for that stream. As noted above, the maximum amount of data
+that can be sent on the stream is the minimum of the stream and the connection
+flow control windows. You can find out how much data you can send on a given
+stream by using the :meth:`local_flow_control_window
+<h2.connection.H2Connection.local_flow_control_window>` method, which will do
+all of these calculations for you. If you attempt to send more than this amount
+of data on a stream, hyper-h2 will throw a :class:`ProtocolError
+<h2.exceptions.ProtocolError>` and refuse to send the data.
+
+In hyper-h2, receiving a ``WINDOW_UPDATE`` frame causes a :class:`WindowUpdated
+<h2.events.WindowUpdated>` event to fire. This will notify you that there is
+potentially more room in a flow control window. Note that, just because an
+increment of a given size was received *does not* mean that that much more data
+can be sent: remember that both the connection and stream flow control windows
+constrain how much data can be sent.
+
+As a result, when a :class:`WindowUpdated <h2.events.WindowUpdated>` event
+fires with a non-zero stream ID, and the user has more data to send on that
+stream, the user should call :meth:`local_flow_control_window
+<h2.connection.H2Connection.local_flow_control_window>` to check if there
+really is more room to send data on that stream.
+
+When a :class:`WindowUpdated <h2.events.WindowUpdated>` event fires with a
+stream ID of ``0``, that may have unblocked *all* streams that are currently
+blocked. The user should use :meth:`local_flow_control_window
+<h2.connection.H2Connection.local_flow_control_window>` to check all blocked
+streams to see if more data is available.
+
+Auto Flow Control
+~~~~~~~~~~~~~~~~~
+
+.. versionadded:: 2.5.0
+
+In most cases, there is no advantage for users in managing their own flow
+control strategies. While particular high performance or specific-use-case
+applications may gain value from directly controlling the emission of
+``WINDOW_UPDATE`` frames, the average application can use a
+lowest-common-denominator strategy to emit those frames. As of version 2.5.0,
+hyper-h2 now provides this automatic strategy for users, if they want to use
+it.
+
+This automatic strategy is built around a single method:
+:meth:`acknowledge_received_data
+<h2.connection.H2Connection.acknowledge_received_data>`. This method
+flags to the connection object that your application has dealt with a certain
+number of flow controlled bytes, and that the window should be incremented in
+some way. Whenever your application has "processed" some received bytes, this
+method should be called to signal that they have been processed.
+
+The key difference between this method and :meth:`increment_flow_control_window
+<h2.connection.H2Connection.increment_flow_control_window>` is that the method
+:meth:`acknowledge_received_data
+<h2.connection.H2Connection.acknowledge_received_data>` does not guarantee that
+it will emit a ``WINDOW_UPDATE`` frame, and if it does it will not necessarily
+emit them for *only* the stream or *only* the frame. Instead, the
+``WINDOW_UPDATE`` frames will be *coalesced*: they will be emitted only when
+a certain number of bytes have been freed up.
+
+For most applications, this method should be preferred to the manual flow
+control mechanism.
diff --git a/tools/third_party/h2/docs/source/api.rst b/tools/third_party/h2/docs/source/api.rst
new file mode 100755
index 0000000..2c71d38
--- /dev/null
+++ b/tools/third_party/h2/docs/source/api.rst
@@ -0,0 +1,166 @@
+Hyper-h2 API
+============
+
+This document details the API of Hyper-h2.
+
+Semantic Versioning
+-------------------
+
+Hyper-h2 follows semantic versioning for its public API. Please note that the
+guarantees of semantic versioning apply only to the API that is *documented
+here*. Simply because a method or data field is not prefaced by an underscore
+does not make it part of Hyper-h2's public API. Anything not documented here is
+subject to change at any time.
+
+Connection
+----------
+
+.. autoclass:: h2.connection.H2Connection
+   :members:
+   :exclude-members: inbound_flow_control_window
+
+
+Configuration
+-------------
+
+.. autoclass:: h2.config.H2Configuration
+   :members:
+
+
+.. _h2-events-api:
+
+Events
+------
+
+.. autoclass:: h2.events.RequestReceived
+   :members:
+
+.. autoclass:: h2.events.ResponseReceived
+   :members:
+
+.. autoclass:: h2.events.TrailersReceived
+   :members:
+
+.. autoclass:: h2.events.InformationalResponseReceived
+   :members:
+
+.. autoclass:: h2.events.DataReceived
+   :members:
+
+.. autoclass:: h2.events.WindowUpdated
+   :members:
+
+.. autoclass:: h2.events.RemoteSettingsChanged
+   :members:
+
+.. autoclass:: h2.events.PingAcknowledged
+   :members:
+
+.. autoclass:: h2.events.StreamEnded
+   :members:
+
+.. autoclass:: h2.events.StreamReset
+   :members:
+
+.. autoclass:: h2.events.PushedStreamReceived
+   :members:
+
+.. autoclass:: h2.events.SettingsAcknowledged
+   :members:
+
+.. autoclass:: h2.events.PriorityUpdated
+   :members:
+
+.. autoclass:: h2.events.ConnectionTerminated
+   :members:
+
+.. autoclass:: h2.events.AlternativeServiceAvailable
+   :members:
+
+.. autoclass:: h2.events.UnknownFrameReceived
+   :members:
+
+
+Exceptions
+----------
+
+.. autoclass:: h2.exceptions.H2Error
+   :members:
+
+.. autoclass:: h2.exceptions.NoSuchStreamError
+   :show-inheritance:
+   :members:
+
+.. autoclass:: h2.exceptions.StreamClosedError
+   :show-inheritance:
+   :members:
+
+.. autoclass:: h2.exceptions.RFC1122Error
+   :show-inheritance:
+   :members:
+
+
+Protocol Errors
+~~~~~~~~~~~~~~~
+
+.. autoclass:: h2.exceptions.ProtocolError
+   :show-inheritance:
+   :members:
+
+.. autoclass:: h2.exceptions.FrameTooLargeError
+   :show-inheritance:
+   :members:
+
+.. autoclass:: h2.exceptions.FrameDataMissingError
+   :show-inheritance:
+   :members:
+
+.. autoclass:: h2.exceptions.TooManyStreamsError
+   :show-inheritance:
+   :members:
+
+.. autoclass:: h2.exceptions.FlowControlError
+   :show-inheritance:
+   :members:
+
+.. autoclass:: h2.exceptions.StreamIDTooLowError
+   :show-inheritance:
+   :members:
+
+.. autoclass:: h2.exceptions.InvalidSettingsValueError
+   :members:
+
+.. autoclass:: h2.exceptions.NoAvailableStreamIDError
+   :show-inheritance:
+   :members:
+
+.. autoclass:: h2.exceptions.InvalidBodyLengthError
+   :show-inheritance:
+   :members:
+
+.. autoclass:: h2.exceptions.UnsupportedFrameError
+   :members:
+
+.. autoclass:: h2.exceptions.DenialOfServiceError
+   :show-inheritance:
+   :members:
+
+
+HTTP/2 Error Codes
+------------------
+
+.. automodule:: h2.errors
+   :members:
+
+
+Settings
+--------
+
+.. autoclass:: h2.settings.SettingCodes
+   :members:
+
+.. autoclass:: h2.settings.Settings
+   :inherited-members:
+
+.. autoclass:: h2.settings.ChangedSetting
+   :members:
diff --git a/tools/third_party/h2/docs/source/asyncio-example.rst b/tools/third_party/h2/docs/source/asyncio-example.rst
new file mode 100755
index 0000000..d3afbfd
--- /dev/null
+++ b/tools/third_party/h2/docs/source/asyncio-example.rst
@@ -0,0 +1,17 @@
+Asyncio Example Server
+======================
+
+This example is a basic HTTP/2 server written using `asyncio`_, using some
+functionality that was introduced in Python 3.5. This server represents
+basically just the same JSON-headers-returning server that was built in the
+:doc:`basic-usage` document.
+
+This example demonstrates some basic asyncio techniques.
+
+.. literalinclude:: ../../examples/asyncio/asyncio-server.py
+   :language: python
+   :linenos:
+   :encoding: utf-8
+
+
+.. _asyncio: https://docs.python.org/3/library/asyncio.html
diff --git a/tools/third_party/h2/docs/source/basic-usage.rst b/tools/third_party/h2/docs/source/basic-usage.rst
new file mode 100755
index 0000000..2d5743a
--- /dev/null
+++ b/tools/third_party/h2/docs/source/basic-usage.rst
@@ -0,0 +1,735 @@
+Getting Started: Writing Your Own HTTP/2 Server
+===============================================
+
+This document explains how to get started writing fully-fledged HTTP/2
+implementations using Hyper-h2 as the underlying protocol stack. It covers the
+basic concepts you need to understand, and talks you through writing a very
+simple HTTP/2 server.
+
+This document assumes you're moderately familiar with writing Python, and have
+*some* understanding of how computer networks work. If you don't, you'll find
+it a lot easier if you get some understanding of those concepts first and then
+return to this documentation.
+
+
+.. _h2-connection-basic:
+
+Connections
+-----------
+
+Hyper-h2's core object is the
+:class:`H2Connection <h2.connection.H2Connection>` object. This object is an
+abstract representation of the state of a single HTTP/2 connection, and holds
+all the important protocol state. When using Hyper-h2, this object will be the
+first thing you create and the object that does most of the heavy lifting.
+
+The interface to this object is relatively simple. For sending data, you
+call the object with methods indicating what actions you want to perform: for
+example, you may want to send headers (you'd use the
+:meth:`send_headers <h2.connection.H2Connection.send_headers>` method), or
+send data (you'd use the
+:meth:`send_data <h2.connection.H2Connection.send_data>` method). After you've
+decided what actions you want to perform, you get some bytes out of the object
+that represent the HTTP/2-encoded representation of your actions, and send them
+out over the network however you see fit.
+
+When you receive data from the network, you pass that data in to the
+``H2Connection`` object, which returns a list of *events*.
+These events, covered in more detail later in :ref:`h2-events-basic`, define
+the set of actions the remote peer has performed on the connection, as
+represented by the HTTP/2-encoded data you just passed to the object.
+
+Thus, you end up with a simple loop (which you may recognise as a more-specific
+form of an `event loop`_):
+
+    1. First, you perform some actions.
+    2. You send the data created by performing those actions to the network.
+    3. You read data from the network.
+    4. You decode those into events.
+    5. The events cause you to trigger some actions: go back to step 1.
+
+Of course, HTTP/2 is more complex than that, but in the very simplest case you
+can write a fairly effective HTTP/2 tool using just that kind of loop. Later in
+this document, we'll do just that.
+
+Some important subtleties of ``H2Connection`` objects are covered in
+:doc:`advanced-usage`: see :ref:`h2-connection-advanced` for more information.
+However, one subtlety should be covered, and that is this: Hyper-h2's
+``H2Connection`` object doesn't do I/O. Let's talk briefly about why.
+
+I/O
+~~~
+
+Any useful HTTP/2 tool eventually needs to do I/O. This is because it's not
+very useful to be able to speak to other computers using a protocol like HTTP/2
+unless you actually *speak* to them sometimes.
+
+However, doing I/O is not a trivial thing: there are lots of different ways to
+do it, and once you choose a way to do it your code usually won't work well
+with the approaches you *didn't* choose.
+
+While there are lots of different ways to do I/O, when it comes down to it
+all HTTP/2 implementations transform bytes received into events, and events
+into bytes to send. So there's no reason to have lots of different versions of
+this core protocol code: one for Twisted, one for gevent, one for threading,
+and one for synchronous code.
+
+This is why we said at the top that Hyper-h2 is a *HTTP/2 Protocol Stack*, not
+a *fully-fledged implementation*. Hyper-h2 knows how to transform bytes into
+events and back, but that's it. The I/O and smarts might be different, but
+the core HTTP/2 logic is the same: that's what Hyper-h2 provides.
+
+Not doing I/O makes Hyper-h2 general, and also relatively simple. It has an
+easy-to-understand performance envelope, it's easy to test (and as a result
+easy to get correct behaviour out of), and it behaves in a reproducible way.
+These are all great traits to have in a library that is doing something quite
+complex.
+
+This document will talk you through how to build a relatively simple HTTP/2
+implementation using Hyper-h2, to give you an understanding of where it fits in
+your software.
+
+
+.. _h2-events-basic:
+
+Events
+------
+
+When writing a HTTP/2 implementation it's important to know what the remote
+peer is doing: if you didn't care, writing networked programs would be a lot
+easier!
+
+Hyper-h2 encodes the actions of the remote peer in the form of *events*. When
+you receive data from the remote peer and pass it into your ``H2Connection``
+object (see :ref:`h2-connection-basic`), the ``H2Connection`` returns a list
+of objects, each one representing a single event that has occurred. Each
+event refers to a single action the remote peer has taken.
+
+Some events are fairly high-level, referring to things that are more general
+than HTTP/2: for example, the
+:class:`RequestReceived <h2.events.RequestReceived>` event is a general HTTP
+concept, not just a HTTP/2 one. Other events are extremely HTTP/2-specific:
+for example, :class:`PushedStreamReceived <h2.events.PushedStreamReceived>`
+refers to Server Push, a very HTTP/2-specific concept.
+
+The reason these events exist is that Hyper-h2 is intended to be very general.
+This means that, in many cases, Hyper-h2 does not know exactly what to do in
+response to an event. Your code will need to handle these events, and make
+decisions about what to do. That's the major role of any HTTP/2 implementation
+built on top of Hyper-h2.
+
+A full list of events is available in :ref:`h2-events-api`. For the purposes
+of this example, we will handle only a small set of events.
+
+
+Writing Your Server
+-------------------
+
+Armed with the knowledge you just obtained, we're going to write a very simple
+HTTP/2 web server. The goal of this server is to write a server that can handle
+a HTTP GET, and that returns the headers sent by the client, encoded in JSON.
+Basically, something a lot like `httpbin.org/get`_. Nothing fancy, but this is
+a good way to get a handle on how you should interact with Hyper-h2.
+
+For the sake of simplicity, we're going to write this using the Python standard
+library, in Python 3. In reality, you'll probably want to use an asynchronous
+framework of some kind: see the `examples directory`_ in the repository for
+some examples of how you'd do that.
+
+Before we start, create a new file called ``h2server.py``: we'll use that as
+our workspace. Additionally, you should install Hyper-h2: follow the
+instructions in :doc:`installation`.
+
+Step 1: Sockets
+~~~~~~~~~~~~~~~
+
+To begin with, we need to make sure we can listen for incoming data and send it
+back. To do that, we need to use the `standard library's socket module`_. For
+now we're going to skip doing TLS: if you want to reach your server from your
+web browser, though, you'll need to add TLS and some other function. Consider
+looking at our examples in our `examples directory`_ instead.
+
+Let's begin. First, open up ``h2server.py``. We need to import the socket
+module and start listening for connections.
+
+This is not a socket tutorial, so we're not going to dive too deeply into how
+this works. If you want more detail about sockets, there are lots of good
+tutorials on the web that you should investigate.
+
+When you want to listen for incoming connections, the you need to *bind* an
+address first. So let's do that. Try setting up your file to look like this:
+
+.. code-block:: python
+
+    import socket
+
+    sock = socket.socket()
+    sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+    sock.bind(('0.0.0.0', 8080))
+    sock.listen(5)
+
+    while True:
+        print(sock.accept())
+
+In a shell window, execute this program (``python h2server.py``). Then, open
+another shell and run ``curl http://localhost:8080/``. In the first shell, you
+should see something like this:
+
+.. code-block:: console
+
+    $ python h2server.py
+    (<socket.socket fd=4, family=AddressFamily.AF_INET, type=SocketKind.SOCK_STREAM, proto=0, laddr=('127.0.0.1', 8080), raddr=('127.0.0.1', 58800)>, ('127.0.0.1', 58800))
+
+Run that ``curl`` command a few more times. You should see a few more similar
+lines appear. Note that the ``curl`` command itself will exit with an error.
+That's fine: it happens because we didn't send any data.
+
+Now go ahead and stop the server running by hitting Ctrl+C in the first shell.
+You should see a ``KeyboardInterrupt`` error take the process down.
+
+What's the program above doing? Well, first it creates a
+:func:`socket <python:socket.socket>` object. This socket is then *bound* to
+a specific address: ``('0.0.0.0', 8080)``. This is a special address: it means
+that this socket should be listening for any traffic to TCP port 8080. Don't
+worry about the call to ``setsockopt``: it just makes sure you can run this
+program repeatedly.
+
+We then loop forever calling the :meth:`accept <python:socket.socket.accept>`
+method on the socket. The accept method blocks until someone attempts to
+connect to our TCP port: when they do, it returns a tuple: the first element is
+a new socket object, the second element is a tuple of the address the new
+connection is from. You can see this in the output from our ``h2server.py``
+script.
+
+At this point, we have a script that can accept inbound connections. This is a
+good start! Let's start getting HTTP/2 involved.
+
+
+Step 2: Add a H2Connection
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Now that we can listen for socket information, we want to prepare our HTTP/2
+connection object and start handing it data. For now, let's just see what
+happens as we feed it data.
+
+To make HTTP/2 connections, we need a tool that knows how to speak HTTP/2.
+Most versions of curl in the wild don't, so let's install a Python tool. In
+your Python environment, run ``pip install hyper``. This will install a Python
+command-line HTTP/2 tool called ``hyper``. To confirm that it works, try
+running this command and verifying that the output looks similar to the one
+shown below:
+
+.. code-block:: console
+
+    $ hyper GET http://http2bin.org/get
+    {'args': {},
+     'headers': {'Connection': 'keep-alive',
+                 'Host': 'http2bin.org',
+                 'Via': '2 http2bin.org'},
+     'origin': '10.0.0.2',
+     'url': 'http://http2bin.org/get'}
+
+Assuming it works, you're now ready to start sending HTTP/2 data.
+
+Back in our ``h2server.py`` script, we're going to want to start handling data.
+Let's add a function that takes a socket returned from ``accept``, and reads
+data from it. Let's call that function ``handle``. That function should create
+a :class:`H2Connection <h2.connection.H2Connection>` object and then loop on
+the socket, reading data and passing it to the connection.
+
+To read data from a socket we need to call ``recv``. The ``recv`` function
+takes a number as its argument, which is the *maximum* amount of data to be
+returned from a single call (note that ``recv`` will return as soon as any data
+is available, even if that amount is vastly less than the number you passed to
+it). For the purposes of writing this kind of software the specific value is
+not enormously useful, but should not be overly large. For that reason, when
+you're unsure, a number like 4096 or 65535 is a good bet. We'll use 65535 for
+this example.
+
+The function should look something like this:
+
+.. code-block:: python
+
+    import h2.connection
+
+    def handle(sock):
+        conn = h2.connection.H2Connection(client_side=False)
+
+        while True:
+            data = sock.recv(65535)
+            print(conn.receive_data(data))
+
+Let's update our main loop so that it passes data on to our new data handling
+function. Your ``h2server.py`` should end up looking a like this:
+
+.. code-block:: python
+
+    import socket
+
+    import h2.connection
+
+    def handle(sock):
+        conn = h2.connection.H2Connection(client_side=False)
+
+        while True:
+            data = sock.recv(65535)
+            if not data:
+                break
+
+            print(conn.receive_data(data))
+
+
+    sock = socket.socket()
+    sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+    sock.bind(('0.0.0.0', 8080))
+    sock.listen(5)
+
+    while True:
+        handle(sock.accept()[0])
+
+Running that in one shell, in your other shell you can run
+``hyper --h2 GET http://localhost:8080/``. That shell should hang, and you
+should then see the following output from your ``h2server.py`` shell:
+
+.. code-block:: console
+
+    $ python h2server.py
+    [<h2.events.RemoteSettingsChanged object at 0x10c4ee390>]
+
+You'll then need to kill ``hyper`` and ``h2server.py`` with Ctrl+C. Feel free
+to do this a few times, to see how things behave.
+
+So, what did we see here? When the connection was opened, we used the
+:meth:`recv <python:socket.socket.recv>` method to read some data from the
+socket, in a loop. We then passed that data to the connection object, which
+returned us a single event object:
+:class:`RemoteSettingsChanged <h2.events.RemoteSettingsChanged>`.
+
+But what we didn't see was anything else. So it seems like all ``hyper`` did
+was change its settings, but nothing else. If you look at the other ``hyper``
+window, you'll notice that it hangs for a while and then eventually fails with
+a socket timeout. It was waiting for something: what?
+
+Well, it turns out that at the start of a connection, both sides need to send
+a bit of data, called "the HTTP/2 preamble". We don't need to get into too much
+detail here, but basically both sides need to send a single block of HTTP/2
+data that tells the other side what their settings are. ``hyper`` did that,
+but we didn't.
+
+Let's do that next.
+
+
+Step 3: Sending the Preamble
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Hyper-h2 makes doing connection setup really easy. All you need to do is call
+the
+:meth:`initiate_connection <h2.connection.H2Connection.initiate_connection>`
+method, and then send the corresponding data. Let's update our ``handle``
+function to do just that:
+
+.. code-block:: python
+
+    def handle(sock):
+        conn = h2.connection.H2Connection(client_side=False)
+        conn.initiate_connection()
+        sock.sendall(conn.data_to_send())
+
+        while True:
+            data = sock.recv(65535)
+            print(conn.receive_data(data))
+
+
+The big change here is the call to ``initiate_connection``, but there's another
+new method in there:
+:meth:`data_to_send <h2.connection.H2Connection.data_to_send>`.
+
+When you make function calls on your ``H2Connection`` object, these will often
+want to cause HTTP/2 data to be written out to the network. But Hyper-h2
+doesn't do any I/O, so it can't do that itself. Instead, it writes it to an
+internal buffer. You can retrieve data from this buffer using the
+``data_to_send`` method. There are some subtleties about that method, but we
+don't need to worry about them right now: all we need to do is make sure we're
+sending whatever data is outstanding.
+
+Your ``h2server.py`` script should now look like this:
+
+.. code-block:: python
+
+    import socket
+
+    import h2.connection
+
+    def handle(sock):
+        conn = h2.connection.H2Connection(client_side=False)
+        conn.initiate_connection()
+        sock.sendall(conn.data_to_send())
+
+        while True:
+            data = sock.recv(65535)
+            if not data:
+                break
+
+            print(conn.receive_data(data))
+
+
+    sock = socket.socket()
+    sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+    sock.bind(('0.0.0.0', 8080))
+    sock.listen(5)
+
+    while True:
+        handle(sock.accept()[0])
+
+
+With this change made, rerun your ``h2server.py`` script and hit it with the
+same ``hyper`` command: ``hyper --h2 GET http://localhost:8080/``. The
+``hyper`` command still hangs, but this time we get a bit more output from our
+``h2server.py`` script:
+
+.. code-block:: console
+
+    $ python h2server.py
+    [<h2.events.RemoteSettingsChanged object at 0x10292d390>]
+    [<h2.events.SettingsAcknowledged object at 0x102b3a160>]
+    [<h2.events.RequestReceived object at 0x102b3a3c8>, <h2.events.StreamEnded object at 0x102b3a400>]
+
+So, what's happening?
+
+The first thing to note is that we're going around our loop more than once now.
+First, we receive some data that triggers a
+:class:`RemoteSettingsChanged <h2.events.RemoteSettingsChanged>` event.
+Then, we get some more data that triggers a
+:class:`SettingsAcknowledged <h2.events.SettingsAcknowledged>` event.
+Finally, even more data that triggers *two* events:
+:class:`RequestReceived <h2.events.RequestReceived>` and
+:class:`StreamEnded <h2.events.StreamEnded>`.
+
+So, what's happening is that ``hyper`` is telling us about its settings,
+acknowledging ours, and then sending us a request. Then it ends a *stream*,
+which is a HTTP/2 communications channel that holds a request and response
+pair.
+
+A stream isn't done until it's either *reset* or both sides *close* it:
+in this sense it's bi-directional. So what the ``StreamEnded`` event tells us
+is that ``hyper`` is closing its half of the stream: it won't send us any more
+data on that stream. That means the request is done.
+
+So why is ``hyper`` hanging? Well, we haven't sent a response yet: let's do
+that.
+
+
+Step 4: Handling Events
+~~~~~~~~~~~~~~~~~~~~~~~
+
+What we want to do is send a response when we receive a request. Happily, we
+get an event when we receive a request, so we can use that to be our signal.
+
+Let's define a new function that sends a response. For now, this response can
+just be a little bit of data that prints "it works!".
+
+The function should take the ``H2Connection`` object, and the event that
+signaled the request. Let's define it.
+
+.. code-block:: python
+
+    def send_response(conn, event):
+        stream_id = event.stream_id
+        conn.send_headers(
+            stream_id=stream_id,
+            headers=[
+                (':status', '200'),
+                ('server', 'basic-h2-server/1.0')
+            ],
+        )
+        conn.send_data(
+            stream_id=stream_id,
+            data=b'it works!',
+            end_stream=True
+        )
+
+So while this is only a short function, there's quite a lot going on here we
+need to unpack. Firstly, what's a stream ID? Earlier we discussed streams
+briefly, to say that they're a bi-directional communications channel that holds
+a request and response pair. Part of what makes HTTP/2 great is that there can
+be lots of streams going on at once, sending and receiving different requests
+and responses. To identify each stream, we use a *stream ID*. These are unique
+across the lifetime of a connection, and they go in ascending order.
+
+Most ``H2Connection`` functions take a stream ID: they require you to actively
+tell the connection which one to use. In this case, as a simple server, we will
+never need to choose a stream ID ourselves: the client will always choose one
+for us. That means we'll always be able to get the one we need off the events
+that fire.
+
+Next, we send some *headers*. In HTTP/2, a response is made up of some set of
+headers, and optionally some data. The headers have to come first: if you're a
+client then you'll be sending *request* headers, but in our case these headers
+are our *response* headers.
+
+Mostly these aren't very exciting, but you'll notice once special header in
+there: ``:status``. This is a HTTP/2-specific header, and it's used to hold the
+HTTP status code that used to go at the top of a HTTP response. Here, we're
+saying the response is ``200 OK``, which is successful.
+
+To send headers in Hyper-h2, you use the
+:meth:`send_headers <h2.connection.H2Connection.send_headers>` function.
+
+Next, we want to send the body data. To do that, we use the
+:meth:`send_data <h2.connection.H2Connection.send_data>` function. This also
+takes a stream ID. Note that the data is binary: Hyper-h2 does not work with
+unicode strings, so you *must* pass bytestrings to the ``H2Connection``. The
+one exception is headers: Hyper-h2 will automatically encode those into UTF-8.
+
+The last thing to note is that on our call to ``send_data``, we set
+``end_stream`` to ``True``. This tells Hyper-h2 (and the remote peer) that
+we're done with sending data: the response is over. Because we know that
+``hyper`` will have ended its side of the stream, when we end ours the stream
+will be totally done with.
+
+We're nearly ready to go with this: we just need to plumb this function in.
+Let's amend our ``handle`` function again:
+
+.. code-block:: python
+
+    import h2.events
+
+    def handle(sock):
+        conn = h2.connection.H2Connection(client_side=False)
+        conn.initiate_connection()
+        sock.sendall(conn.data_to_send())
+
+        while True:
+            data = sock.recv(65535)
+            if not data:
+                break
+
+            events = conn.receive_data(data)
+            for event in events:
+                if isinstance(event, h2.events.RequestReceived):
+                    send_response(conn, event)
+
+            data_to_send = conn.data_to_send()
+            if data_to_send:
+                sock.sendall(data_to_send)
+
+The changes here are all at the end. Now, when we receive some events, we
+look through them for the ``RequestReceived`` event. If we find it, we make
+sure we send a response.
+
+Then, at the bottom of the loop we check whether we have any data to send, and
+if we do, we send it. Then, we repeat again.
+
+With these changes, your ``h2server.py`` file should look like this:
+
+.. code-block:: python
+
+    import socket
+
+    import h2.connection
+    import h2.events
+
+    def send_response(conn, event):
+        stream_id = event.stream_id
+        conn.send_headers(
+            stream_id=stream_id,
+            headers=[
+                (':status', '200'),
+                ('server', 'basic-h2-server/1.0')
+            ],
+        )
+        conn.send_data(
+            stream_id=stream_id,
+            data=b'it works!',
+            end_stream=True
+        )
+
+    def handle(sock):
+        conn = h2.connection.H2Connection(client_side=False)
+        conn.initiate_connection()
+        sock.sendall(conn.data_to_send())
+
+        while True:
+            data = sock.recv(65535)
+            if not data:
+                break
+
+            events = conn.receive_data(data)
+            for event in events:
+                if isinstance(event, h2.events.RequestReceived):
+                    send_response(conn, event)
+
+            data_to_send = conn.data_to_send()
+            if data_to_send:
+                sock.sendall(data_to_send)
+
+
+    sock = socket.socket()
+    sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+    sock.bind(('0.0.0.0', 8080))
+    sock.listen(5)
+
+    while True:
+        handle(sock.accept()[0])
+
+Alright. Let's run this, and then run our ``hyper`` command again.
+
+This time, nothing is printed from our server, and the ``hyper`` side prints
+``it works!``. Success! Try running it a few more times, and we can see that
+not only does it work the first time, it works the other times too!
+
+We can speak HTTP/2! Let's add the final step: returning the JSON-encoded
+request headers.
+
+Step 5: Returning Headers
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+If we want to return the request headers in JSON, the first thing we have to do
+is find them. Handily, if you check the documentation for
+:class:`RequestReceived <h2.events.RequestReceived>` you'll find that this
+event carries, in addition to the stream ID, the request headers.
+
+This means we can make a really simple change to our ``send_response``
+function to take those headers and encode them as a JSON object. Let's do that:
+
+.. code-block:: python
+
+    import json
+
+    def send_response(conn, event):
+        stream_id = event.stream_id
+        response_data = json.dumps(dict(event.headers)).encode('utf-8')
+
+        conn.send_headers(
+            stream_id=stream_id,
+            headers=[
+                (':status', '200'),
+                ('server', 'basic-h2-server/1.0'),
+                ('content-length', str(len(response_data))),
+                ('content-type', 'application/json'),
+            ],
+        )
+        conn.send_data(
+            stream_id=stream_id,
+            data=response_data,
+            end_stream=True
+        )
+
+This is a really simple change, but it's all we need to do: a few extra headers
+and the JSON dump, but that's it.
+
+Section 6: Bringing It All Together
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This should be all we need!
+
+Let's take all the work we just did and throw that into our ``h2server.py``
+file, which should now look like this:
+
+.. code-block:: python
+
+    import json
+    import socket
+
+    import h2.connection
+    import h2.events
+
+    def send_response(conn, event):
+        stream_id = event.stream_id
+        response_data = json.dumps(dict(event.headers)).encode('utf-8')
+
+        conn.send_headers(
+            stream_id=stream_id,
+            headers=[
+                (':status', '200'),
+                ('server', 'basic-h2-server/1.0'),
+                ('content-length', str(len(response_data))),
+                ('content-type', 'application/json'),
+            ],
+        )
+        conn.send_data(
+            stream_id=stream_id,
+            data=response_data,
+            end_stream=True
+        )
+
+    def handle(sock):
+        conn = h2.connection.H2Connection(client_side=False)
+        conn.initiate_connection()
+        sock.sendall(conn.data_to_send())
+
+        while True:
+            data = sock.recv(65535)
+            if not data:
+                break
+
+            events = conn.receive_data(data)
+            for event in events:
+                if isinstance(event, h2.events.RequestReceived):
+                    send_response(conn, event)
+
+            data_to_send = conn.data_to_send()
+            if data_to_send:
+                sock.sendall(data_to_send)
+
+
+    sock = socket.socket()
+    sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+    sock.bind(('0.0.0.0', 8080))
+    sock.listen(5)
+
+    while True:
+        handle(sock.accept()[0])
+
+Now, execute ``h2server.py`` and then point ``hyper`` at it again. You should
+see something like the following output from ``hyper``:
+
+.. code-block:: console
+
+    $ hyper --h2 GET http://localhost:8080/
+    {":scheme": "http", ":authority": "localhost", ":method": "GET", ":path": "/"}
+
+Here you can see the HTTP/2 request 'special headers' that ``hyper`` sends.
+These are similar to the ``:status`` header we have to send on our response:
+they encode important parts of the HTTP request in a clearly-defined way. If
+you were writing a client stack using Hyper-h2, you'd need to make sure you
+were sending those headers.
+
+Congratulations!
+~~~~~~~~~~~~~~~~
+
+Congratulations! You've written your first HTTP/2 server! If you want to extend
+it, there are a few directions you could investigate:
+
+- We didn't handle a few events that we saw were being raised: you could add
+  some methods to handle those appropriately.
+- Right now our server is single threaded, so it can only handle one client at
+  a time. Consider rewriting this server to use threads, or writing this
+  server again using your favourite asynchronous programming framework.
+
+  If you plan to use threads, you should know that a ``H2Connection`` object is
+  deliberately not thread-safe. As a possible design pattern, consider creating
+  threads and passing the sockets returned by ``accept`` to those threads, and
+  then letting those threads create their own ``H2Connection`` objects.
+- Take a look at some of our long-form code examples in :doc:`examples`.
+- Alternatively, try playing around with our examples in our repository's
+  `examples directory`_. These examples are a bit more fully-featured, and can
+  be reached from your web browser. Try adjusting what they do, or adding new
+  features to them!
+- You may want to make this server reachable from your web browser. To do that,
+  you'll need to add proper TLS support to your server. This can be tricky, and
+  in many cases requires `PyOpenSSL`_ in addition to the other libraries you
+  have installed. Check the `Eventlet example`_ to see what PyOpenSSL code is
+  required to TLS-ify your server.
+
+
+
+.. _event loop: https://en.wikipedia.org/wiki/Event_loop
+.. _httpbin.org/get: https://httpbin.org/get
+.. _examples directory: https://github.com/python-hyper/hyper-h2/tree/master/examples
+.. _standard library's socket module: https://docs.python.org/3.5/library/socket.html
+.. _Application Layer Protocol Negotiation: https://en.wikipedia.org/wiki/Application-Layer_Protocol_Negotiation
+.. _get your certificate here: https://raw.githubusercontent.com/python-hyper/hyper-h2/master/examples/twisted/server.crt
+.. _get your private key here: https://raw.githubusercontent.com/python-hyper/hyper-h2/master/examples/twisted/server.key
+.. _PyOpenSSL: http://pyopenssl.readthedocs.org/
+.. _Eventlet example: https://github.com/python-hyper/hyper-h2/blob/master/examples/eventlet/eventlet-server.py
diff --git a/tools/third_party/h2/docs/source/conf.py b/tools/third_party/h2/docs/source/conf.py
new file mode 100755
index 0000000..a3b3e8a
--- /dev/null
+++ b/tools/third_party/h2/docs/source/conf.py
@@ -0,0 +1,270 @@
+# -*- coding: utf-8 -*-
+#
+# hyper-h2 documentation build configuration file, created by
+# sphinx-quickstart on Thu Sep 17 10:06:02 2015.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys
+import os
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+sys.path.insert(0, os.path.abspath('../..'))
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+    'sphinx.ext.autodoc',
+    'sphinx.ext.intersphinx',
+    'sphinx.ext.viewcode',
+]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'hyper-h2'
+copyright = u'2015, Cory Benfield'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = '3.0.1'
+# The full version, including alpha/beta/rc tags.
+release = '3.0.1'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = []
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+#keep_warnings = False
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  See the documentation for
+# a list of builtin themes.
+html_theme = 'default'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further.  For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents.  If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar.  Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+#html_extra_path = []
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'hyper-h2doc'
+
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+#  author, documentclass [howto, manual, or own class]).
+latex_documents = [
+  ('index', 'hyper-h2.tex', u'hyper-h2 Documentation',
+   u'Cory Benfield', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+    ('index', 'hyper-h2', u'hyper-h2 Documentation',
+     [u'Cory Benfield'], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+#  dir menu entry, description, category)
+texinfo_documents = [
+  ('index', 'hyper-h2', u'hyper-h2 Documentation',
+   u'Cory Benfield', 'hyper-h2', 'One line description of project.',
+   'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+#texinfo_no_detailmenu = False
+
+
+# Example configuration for intersphinx: refer to the Python standard library.
+intersphinx_mapping = {
+    'python': ('https://docs.python.org/3.5/', None),
+    'hpack': ('https://python-hyper.org/hpack/en/stable/', None),
+    'pyopenssl': ('https://pyopenssl.readthedocs.org/en/latest/', None),
+}
diff --git a/tools/third_party/h2/docs/source/contributors.rst b/tools/third_party/h2/docs/source/contributors.rst
new file mode 100755
index 0000000..d84c791
--- /dev/null
+++ b/tools/third_party/h2/docs/source/contributors.rst
@@ -0,0 +1,4 @@
+Contributors
+============
+
+.. include:: ../../CONTRIBUTORS.rst
diff --git a/tools/third_party/h2/docs/source/curio-example.rst b/tools/third_party/h2/docs/source/curio-example.rst
new file mode 100755
index 0000000..7cdb616
--- /dev/null
+++ b/tools/third_party/h2/docs/source/curio-example.rst
@@ -0,0 +1,17 @@
+Curio Example Server
+====================
+
+This example is a basic HTTP/2 server written using `curio`_, David Beazley's
+example of how to build a concurrent networking framework using Python 3.5's
+new ``async``/``await`` syntax.
+
+This example is notable for demonstrating the correct use of HTTP/2 flow
+control with Hyper-h2. It is also a good example of the brand new syntax.
+
+.. literalinclude:: ../../examples/curio/curio-server.py
+   :language: python
+   :linenos:
+   :encoding: utf-8
+
+
+.. _curio: https://curio.readthedocs.org/en/latest/
diff --git a/tools/third_party/h2/docs/source/eventlet-example.rst b/tools/third_party/h2/docs/source/eventlet-example.rst
new file mode 100755
index 0000000..a23b5e2
--- /dev/null
+++ b/tools/third_party/h2/docs/source/eventlet-example.rst
@@ -0,0 +1,19 @@
+Eventlet Example Server
+=======================
+
+This example is a basic HTTP/2 server written using the `eventlet`_ concurrent
+networking framework. This example is notable for demonstrating how to
+configure `PyOpenSSL`_, which `eventlet`_ uses for its TLS layer.
+
+In terms of HTTP/2 functionality, this example is very simple: it returns the
+request headers as a JSON document to the caller. It does not obey HTTP/2 flow
+control, which is a flaw, but it is otherwise functional.
+
+.. literalinclude:: ../../examples/eventlet/eventlet-server.py
+   :language: python
+   :linenos:
+   :encoding: utf-8
+
+
+.. _eventlet: http://eventlet.net/
+.. _PyOpenSSL: https://pyopenssl.readthedocs.org/en/stable/
diff --git a/tools/third_party/h2/docs/source/examples.rst b/tools/third_party/h2/docs/source/examples.rst
new file mode 100755
index 0000000..ed7c503
--- /dev/null
+++ b/tools/third_party/h2/docs/source/examples.rst
@@ -0,0 +1,28 @@
+Code Examples
+=============
+
+This section of the documentation contains long-form code examples. These are
+intended as references for developers that would like to get an understanding
+of how Hyper-h2 fits in with various Python I/O frameworks.
+
+Example Servers
+---------------
+
+.. toctree::
+   :maxdepth: 2
+
+   asyncio-example
+   twisted-example
+   eventlet-example
+   curio-example
+   tornado-example
+   wsgi-example
+
+Example Clients
+---------------
+
+.. toctree::
+   :maxdepth: 2
+
+   twisted-head-example
+   twisted-post-example
diff --git a/tools/third_party/h2/docs/source/index.rst b/tools/third_party/h2/docs/source/index.rst
new file mode 100755
index 0000000..be85dec
--- /dev/null
+++ b/tools/third_party/h2/docs/source/index.rst
@@ -0,0 +1,41 @@
+.. hyper-h2 documentation master file, created by
+   sphinx-quickstart on Thu Sep 17 10:06:02 2015.
+   You can adapt this file completely to your liking, but it should at least
+   contain the root `toctree` directive.
+
+Hyper-h2: A pure-Python HTTP/2 protocol stack
+=============================================
+
+Hyper-h2 is a HTTP/2 protocol stack, written entirely in Python. The goal of
+Hyper-h2 is to be a common HTTP/2 stack for the Python ecosystem,
+usable in all programs regardless of concurrency model or environment.
+
+To achieve this, Hyper-h2 is entirely self-contained: it does no I/O of any
+kind, leaving that up to a wrapper library to control. This ensures that it can
+seamlessly work in all kinds of environments, from single-threaded code to
+Twisted.
+
+Its goal is to be 100% compatible with RFC 7540, implementing a complete HTTP/2
+protocol stack build on a set of finite state machines. Its secondary goals are
+to be fast, clear, and efficient.
+
+For usage examples, see :doc:`basic-usage` or consult the examples in the
+repository.
+
+Contents
+--------
+
+.. toctree::
+   :maxdepth: 2
+
+   installation
+   basic-usage
+   negotiating-http2
+   examples
+   advanced-usage
+   low-level
+   api
+   testimonials
+   release-process
+   release-notes
+   contributors
\ No newline at end of file
diff --git a/tools/third_party/h2/docs/source/installation.rst b/tools/third_party/h2/docs/source/installation.rst
new file mode 100755
index 0000000..683085f
--- /dev/null
+++ b/tools/third_party/h2/docs/source/installation.rst
@@ -0,0 +1,18 @@
+Installation
+============
+
+Hyper-h2 is a pure-python project. This means installing it is extremely
+simple. To get the latest release from PyPI, simply run:
+
+.. code-block:: console
+
+    $ pip install h2
+
+Alternatively, feel free to download one of the release tarballs from
+`our GitHub page`_, extract it to your favourite directory, and then run
+
+.. code-block:: console
+
+    $ python setup.py install
+
+.. _our GitHub page: https://github.com/python-hyper/hyper-h2
diff --git a/tools/third_party/h2/docs/source/low-level.rst b/tools/third_party/h2/docs/source/low-level.rst
new file mode 100755
index 0000000..824ba8e
--- /dev/null
+++ b/tools/third_party/h2/docs/source/low-level.rst
@@ -0,0 +1,159 @@
+Low-Level Details
+=================
+
+.. warning:: This section of the documentation covers low-level implementation
+             details of hyper-h2. This is most likely to be of use to hyper-h2
+             developers and to other HTTP/2 implementers, though it could well
+             be of general interest. Feel free to peruse it, but if you're
+             looking for information about how to *use* hyper-h2 you should
+             consider looking elsewhere.
+
+State Machines
+--------------
+
+hyper-h2 is fundamentally built on top of a pair of interacting Finite State
+Machines. One of these FSMs manages per-connection state, and another manages
+per-stream state. Almost without exception (see :ref:`priority` for more
+details) every single frame is unconditionally translated into events for
+both state machines and those state machines are turned.
+
+The advantages of a system such as this is that the finite state machines can
+very densely encode the kinds of things that are allowed at any particular
+moment in a HTTP/2 connection. However, most importantly, almost all protocols
+are defined *in terms* of finite state machines: that is, protocol descriptions
+can be reduced to a number of states and inputs. That makes FSMs a very natural
+tool for implementing protocol stacks.
+
+Indeed, most protocol implementations that do not explicitly encode a finite
+state machine almost always *implicitly* encode a finite state machine, by
+using classes with a bunch of variables that amount to state-tracking
+variables, or by using the call-stack as an implicit state tracking mechanism.
+While these methods are not immediately problematic, they tend to lack
+*explicitness*, and can lead to subtle bugs of the form "protocol action X is
+incorrectly allowed in state Y".
+
+For these reasons, we have implemented two *explicit* finite state machines.
+These machines aim to encode most of the protocol-specific state, in particular
+regarding what frame is allowed at what time. This target goal is sometimes not
+achieved: in particular, as of this writing the *stream* FSM contains a number
+of other state variables that really ought to be rolled into the state machine
+itself in the form of new states, or in the form of a transformation of the
+FSM to use state *vectors* instead of state *scalars*.
+
+The following sections contain some implementers notes on these FSMs.
+
+Connection State Machine
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+The "outer" state machine, the first one that is encountered when sending or
+receiving data, is the connection state machine. This state machine tracks
+whole-connection state.
+
+This state machine is primarily intended to forbid certain actions on the basis
+of whether the implementation is acting as a client or a server. For example,
+clients are not permitted to send ``PUSH_PROMISE`` frames: this state machine
+forbids that by refusing to define a valid transition from the ``CLIENT_OPEN``
+state for the ``SEND_PUSH_PROMISE`` event.
+
+Otherwise, this particular state machine triggers no side-effects. It has a
+very coarse, high-level, functionality.
+
+A visual representation of this FSM is shown below:
+
+.. image:: _static/h2.connection.H2ConnectionStateMachine.dot.png
+   :alt: A visual representation of the connection FSM.
+   :target: _static/h2.connection.H2ConnectionStateMachine.dot.png
+
+
+.. _stream-state-machine:
+
+Stream State Machine
+~~~~~~~~~~~~~~~~~~~~
+
+Once the connection state machine has been spun, any frame that belongs to a
+stream is passed to the stream state machine for its given stream. Each stream
+has its own instance of the state machine, but all of them share the transition
+table: this is because the table itself is sufficiently large that having it be
+per-instance would be a ridiculous memory overhead.
+
+Unlike the connection state machine, the stream state machine is quite complex.
+This is because it frequently needs to encode some side-effects. The most
+common side-effect is emitting a ``RST_STREAM`` frame when an error is
+encountered: the need to do this means that far more transitions need to be
+encoded than for the connection state machine.
+
+Many of the side-effect functions in this state machine also raise
+:class:`ProtocolError <h2.exceptions.ProtocolError>` exceptions. This is almost
+always done on the basis of an extra state variable, which is an annoying code
+smell: it should always be possible for the state machine itself to police
+these using explicit state management. A future refactor will hopefully address
+this problem by making these additional state variables part of the state
+definitions in the FSM, which will lead to an expansion of the number of states
+but a greater degree of simplicity in understanding and tracking what is going
+on in the state machine.
+
+The other action taken by the side-effect functions defined here is returning
+:ref:`events <h2-events-basic>`. Most of these events are returned directly to
+the user, and reflect the specific state transition that has taken place, but
+some of the events are purely *internal*: they are used to signal to other
+parts of the hyper-h2 codebase what action has been taken.
+
+The major use of the internal events functionality at this time is for
+validating header blocks: there are different rules for request headers than
+there are for response headers, and different rules again for trailers. The
+internal events are used to determine *exactly what* kind of data the user is
+attempting to send, and using that information to do the correct kind of
+validation. This approach ensures that the final source of truth about what's
+happening at the protocol level lives inside the FSM, which is an extremely
+important design principle we want to continue to enshrine in hyper-h2.
+
+A visual representation of this FSM is shown below:
+
+.. image:: _static/h2.stream.H2StreamStateMachine.dot.png
+   :alt: A visual representation of the stream FSM.
+   :target: _static/h2.stream.H2StreamStateMachine.dot.png
+
+
+.. _priority:
+
+Priority
+~~~~~~~~
+
+In the :ref:`stream-state-machine` section we said that any frame that belongs
+to a stream is passed to the stream state machine. This turns out to be not
+quite true.
+
+Specifically, while ``PRIORITY`` frames are technically sent on a given stream
+(that is, `RFC 7540 Section 6.3`_ defines them as "always identifying a stream"
+and forbids the use of stream ID ``0`` for them), in practice they are almost
+completely exempt from the usual stream FSM behaviour. Specifically, the RFC
+has this to say:
+
+    The ``PRIORITY`` frame can be sent on a stream in any state, though it
+    cannot be sent between consecutive frames that comprise a single
+    header block (Section 4.3).
+
+Given that the consecutive header block requirement is handled outside of the
+FSMs, this section of the RFC essentially means that there is *never* a
+situation where it is invalid to receive a ``PRIORITY`` frame. This means that
+including it in the stream FSM would require that we allow ``SEND_PRIORITY``
+and ``RECV_PRIORITY`` in all states.
+
+This is not a totally onerous task: however, another key note is that hyper-h2
+uses the *absence* of a stream state machine to flag a closed stream. This is
+primarily for memory conservation reasons: if we needed to keep around an FSM
+for every stream we've ever seen, that would cause long-lived HTTP/2
+connections to consume increasingly large amounts of memory. On top of this,
+it would require us to create a stream FSM each time we received a ``PRIORITY``
+frame for a given stream, giving a malicious peer an easy route to force a
+hyper-h2 user to allocate nearly unbounded amounts of memory.
+
+For this reason, hyper-h2 circumvents the stream FSM entirely for ``PRIORITY``
+frames. Instead, these frames are treated as being connection-level frames that
+*just happen* to identify a specific stream. They do not bring streams into
+being, or in any sense interact with hyper-h2's view of streams. Their stream
+details are treated as strictly metadata that hyper-h2 is not interested in
+beyond being able to parse it out.
+
+
+.. _RFC 7540 Section 6.3: https://tools.ietf.org/html/rfc7540#section-6.3
diff --git a/tools/third_party/h2/docs/source/negotiating-http2.rst b/tools/third_party/h2/docs/source/negotiating-http2.rst
new file mode 100755
index 0000000..d4c72b2
--- /dev/null
+++ b/tools/third_party/h2/docs/source/negotiating-http2.rst
@@ -0,0 +1,100 @@
+Negotiating HTTP/2
+==================
+
+`RFC 7540`_ specifies three methods of negotiating HTTP/2 connections. This document outlines how to use Hyper-h2 with each one.
+
+.. _starting-alpn:
+
+HTTPS URLs (ALPN and NPN)
+-------------------------
+
+Starting HTTP/2 for HTTPS URLs is outlined in `RFC 7540 Section 3.3`_. In this case, the client and server use a TLS extension to negotiate HTTP/2: typically either or both of `NPN`_ or `ALPN`_. How to use NPN and ALPN is currently not covered in this document: please consult the documentation for either the :mod:`ssl module <python:ssl>` in the standard library, or the :mod:`PyOpenSSL <pyopenssl:OpenSSL.SSL>` third-party modules, for more on this topic.
+
+This method is the simplest to use once the TLS connection is established. To use it with Hyper-h2, after you've established the connection and confirmed that HTTP/2 has been negotiated with `ALPN`_, create a :class:`H2Connection <h2.connection.H2Connection>` object and call :meth:`H2Connection.initiate_connection <h2.connection.H2Connection.initiate_connection>`. This will ensure that the appropriate preamble data is placed in the data buffer. You should then immediately send the data returned by :meth:`H2Connection.data_to_send <h2.connection.H2Connection.data_to_send>` on your TLS connection.
+
+At this point, you're free to use all the HTTP/2 functionality provided by Hyper-h2.
+
+Server Setup Example
+~~~~~~~~~~~~~~~~~~~~
+
+This example uses the APIs as defined in Python 3.5. If you are using an older version of Python you may not have access to the APIs used here. As noted above, please consult the documentation for the :mod:`ssl module <python:ssl>` to confirm.
+
+.. literalinclude:: ../../examples/fragments/server_https_setup_fragment.py
+   :language: python
+   :linenos:
+   :encoding: utf-8
+
+
+Client Setup Example
+~~~~~~~~~~~~~~~~~~~~
+
+The client example is very similar to the server example above. The :class:`SSLContext <python:ssl.SSLContext>` object requires some minor changes, as does the :class:`H2Connection <h2.connection.H2Connection>`, but the bulk of the code is the same.
+
+.. literalinclude:: ../../examples/fragments/client_https_setup_fragment.py
+   :language: python
+   :linenos:
+   :encoding: utf-8
+
+
+.. _starting-upgrade:
+
+HTTP URLs (Upgrade)
+-------------------
+
+Starting HTTP/2 for HTTP URLs is outlined in `RFC 7540 Section 3.2`_. In this case, the client and server use the HTTP Upgrade mechanism originally described in `RFC 7230 Section 6.7`_. The client sends its initial HTTP/1.1 request with two extra headers. The first is ``Upgrade: h2c``, which requests upgrade to cleartext HTTP/2. The second is a ``HTTP2-Settings`` header, which contains a specially formatted string that encodes a HTTP/2 Settings frame.
+
+To do this with Hyper-h2 you have two slightly different flows: one for clients, one for servers.
+
+Clients
+~~~~~~~
+
+For a client, when sending the first request you should manually add your ``Upgrade`` header. You should then create a :class:`H2Connection <h2.connection.H2Connection>` object and call :meth:`H2Connection.initiate_upgrade_connection <h2.connection.H2Connection.initiate_upgrade_connection>` with no arguments. This method will return a bytestring to use as the value of your ``HTTP2-Settings`` header.
+
+If the server returns a ``101`` status code, it has accepted the upgrade, and you should immediately send the data returned by :meth:`H2Connection.data_to_send <h2.connection.H2Connection.data_to_send>`. Now you should consume the entire ``101`` header block. All data after the ``101`` header block is HTTP/2 data that should be fed directly to :meth:`H2Connection.receive_data <h2.connection.H2Connection.receive_data>` and handled as normal with Hyper-h2.
+
+If the server does not return a ``101`` status code then it is not upgrading. Continue with HTTP/1.1 as normal: you may throw away your :class:`H2Connection <h2.connection.H2Connection>` object, as it is of no further use.
+
+The server will respond to your original request in HTTP/2. Please pay attention to the events received from Hyper-h2, as they will define the server's response.
+
+Client Example
+^^^^^^^^^^^^^^
+
+The code below demonstrates how to handle a plaintext upgrade from the perspective of the client. For the purposes of keeping the example code as simple and generic as possible it uses the synchronous socket API that comes with the Python standard library: if you want to use asynchronous I/O, you will need to translate this code to the appropriate idiom.
+
+.. literalinclude:: ../../examples/fragments/client_upgrade_fragment.py
+   :language: python
+   :linenos:
+   :encoding: utf-8
+
+
+Servers
+~~~~~~~
+
+If the first request you receive on a connection from the client contains an ``Upgrade`` header with the ``h2c`` token in it, and you're willing to upgrade, you should create a :class:`H2Connection <h2.connection.H2Connection>` object and call :meth:`H2Connection.initiate_upgrade_connection <h2.connection.H2Connection.initiate_upgrade_connection>` with the value of the ``HTTP2-Settings`` header (as a bytestring) as the only argument.
+
+Then, you should send back a ``101`` response that contains ``h2c`` in the ``Upgrade`` header. That response will inform the client that you're switching to HTTP/2. Then, you should immediately send the data that is returned to you by :meth:`H2Connection.data_to_send <h2.connection.H2Connection.data_to_send>` on the connection: this is a necessary part of the HTTP/2 upgrade process.
+
+At this point, you may now respond to the original HTTP/1.1 request in HTTP/2 by calling the appropriate methods on the :class:`H2Connection <h2.connection.H2Connection>` object. No further HTTP/1.1 may be sent on this connection: from this point onward, all data sent by you and the client will be HTTP/2 data.
+
+Server Example
+^^^^^^^^^^^^^^
+
+The code below demonstrates how to handle a plaintext upgrade from the perspective of the server. For the purposes of keeping the example code as simple and generic as possible it uses the synchronous socket API that comes with the Python standard library: if you want to use asynchronous I/O, you will need to translate this code to the appropriate idiom.
+
+.. literalinclude:: ../../examples/fragments/server_upgrade_fragment.py
+   :language: python
+   :linenos:
+   :encoding: utf-8
+
+
+Prior Knowledge
+---------------
+
+It's possible that you as a client know that a particular server supports HTTP/2, and that you do not need to perform any of the negotiations described above. In that case, you may follow the steps in :ref:`starting-alpn`, ignoring all references to ALPN and NPN: there's no need to perform the upgrade dance described in :ref:`starting-upgrade`.
+
+.. _RFC 7540: https://tools.ietf.org/html/rfc7540
+.. _RFC 7540 Section 3.2: https://tools.ietf.org/html/rfc7540#section-3.2
+.. _RFC 7540 Section 3.3: https://tools.ietf.org/html/rfc7540#section-3.3
+.. _NPN: https://en.wikipedia.org/wiki/Application-Layer_Protocol_Negotiation
+.. _ALPN: https://en.wikipedia.org/wiki/Application-Layer_Protocol_Negotiation
+.. _RFC 7230 Section 6.7: https://tools.ietf.org/html/rfc7230#section-6.7
diff --git a/tools/third_party/h2/docs/source/release-notes.rst b/tools/third_party/h2/docs/source/release-notes.rst
new file mode 100755
index 0000000..fa425f1
--- /dev/null
+++ b/tools/third_party/h2/docs/source/release-notes.rst
@@ -0,0 +1,101 @@
+Release Notes
+=============
+
+This document contains release notes for Hyper-h2. In addition to the
+:ref:`detailed-release-notes` found at the bottom of this document, this
+document also includes a high-level prose overview of each major release after
+1.0.0.
+
+High Level Notes
+----------------
+
+3.0.0: 24 March 2017
+~~~~~~~~~~~~~~~~~~~~
+
+The Hyper-h2 team and the Hyper project are delighted to announce the release
+of Hyper-h2 version 3.0.0! Unlike the really notable 2.0.0 release, this
+release is proportionally quite small: however, it has the effect of removing a
+lot of cruft and complexity that has built up in the codebase over the lifetime
+of the v2 release series.
+
+This release was motivated primarily by discovering that applications that
+attempted to use both HTTP/1.1 and HTTP/2 using hyper-h2 would encounter
+problems with cookies, because hyper-h2 did not join together cookie headers as
+required by RFC 7540. Normally adding such behaviour would be a non-breaking
+change, but we previously had no flags to prevent normalization of received
+HTTP headers.
+
+Because it makes no sense for the cookie to be split *by default*, we needed to
+add a controlling flag and set it to true. The breaking nature of this change
+is very subtle, and it's possible most users would never notice, but
+nevertheless it *is* a breaking change and we need to treat it as such.
+
+Happily, we can take this opportunity to finalise a bunch of deprecations we'd
+made over the past year. The v2 release series was long-lived and successful,
+having had a series of releases across the past year-and-a-bit, and the Hyper
+team are very proud of it. However, it's time to open a new chapter, and remove
+the deprecated code.
+
+The past year has been enormously productive for the Hyper team. A total of 30
+v2 releases were made, an enormous amount of work. A good number of people have
+made their first contribution in this time, more than I can thank reasonably
+without taking up an unreasonable amount of space in this document, so instead
+I invite you to check out `our awesome contributor list`_.
+
+We're looking forward to the next chapter in hyper-h2: it's been a fun ride so
+far, and we hope even more of you come along and join in the fun over the next
+year!
+
+.. _our awesome contributor list: https://github.com/python-hyper/hyper-h2/graphs/contributors
+
+
+2.0.0: 25 January 2016
+~~~~~~~~~~~~~~~~~~~~~~
+
+The Hyper-h2 team and the Hyper project are delighted to announce the release
+of Hyper-h2 version 2.0.0! This is an enormous release that contains a gigantic
+collection of new features and fixes, with the goal of making it easier than
+ever to use Hyper-h2 to build a compliant HTTP/2 server or client.
+
+An enormous chunk of this work has been focused on tighter enforcement of
+restrictions in RFC 7540, ensuring that we correctly police the actions of
+remote peers, and error appropriately when those peers violate the
+specification. Several of these constitute breaking changes, because data that
+was previously received and handled without obvious error now raises
+``ProtocolError`` exceptions and causes the connection to be terminated.
+
+Additionally, the public API was cleaned up and had several helper methods that
+had been inavertently exposed removed from the public API. The team wants to
+stress that while Hyper-h2 follows semantic versioning, the guarantees of
+semver apply only to the public API as documented in :doc:`api`. Reducing the
+surface area of these APIs makes it easier for us to continue to ensure that
+the guarantees of semver are respected on our public API.
+
+We also attempted to clear up some of the warts that had appeared in the API,
+and add features that are helpful for implementing HTTP/2 endpoints. For
+example, the :class:`H2Connection <h2.connection.H2Connection>` object now
+exposes a method for generating the next stream ID that your client or server
+can use to initiate a connection (:meth:`get_next_available_stream_id
+<h2.connection.H2Connection.get_next_available_stream_id>`). We also removed
+some needless return values that were guaranteed to return empty lists, which
+were an attempt to make a forward-looking guarantee that was entirely unneeded.
+
+Altogether, this has been an extremely productive period for Hyper-h2, and a
+lot of great work has been done by the community. To that end, we'd also like
+to extend a great thankyou to those contributors who made their first contribution
+to the project between release 1.0.0 and 2.0.0. Many thanks to:
+`Thomas Kriechbaumer`_, `Alex Chan`_, `Maximilian Hils`_, and `Glyph`_. For a
+full historical list of contributors, see :doc:`contributors`.
+
+We're looking forward to the next few months of Python HTTP/2 work, and hoping
+that you'll find lots of excellent HTTP/2 applications to build with Hyper-h2!
+
+
+.. _Thomas Kriechbaumer: https://github.com/Kriechi
+.. _Alex Chan: https://github.com/alexwlchan
+.. _Maximilian Hils: https://github.com/mhils
+.. _Glyph: https://github.com/glyph
+
+
+.. _detailed-release-notes:
+.. include:: ../../HISTORY.rst
diff --git a/tools/third_party/h2/docs/source/release-process.rst b/tools/third_party/h2/docs/source/release-process.rst
new file mode 100755
index 0000000..e7b4606
--- /dev/null
+++ b/tools/third_party/h2/docs/source/release-process.rst
@@ -0,0 +1,56 @@
+Release Process
+===============
+
+Because of Hyper-h2's place at the bottom of the dependency tree, it is
+extremely important that the project maintains a diligent release schedule.
+This document outlines our process for managing releases.
+
+Versioning
+----------
+
+Hyper-h2 follows `semantic versioning`_ of its public API when it comes to
+numbering releases. The public API of Hyper-h2 is strictly limited to the
+entities listed in the :doc:`api` documentation: anything not mentioned in that
+document is not considered part of the public API and is not covered by the
+versioning guarantees given by semantic versioning.
+
+Maintenance
+-----------
+
+Hyper-h2 has the notion of a "release series", given by a major and minor
+version number: for example, there is the 2.1 release series. When each minor
+release is made and a release series is born, a branch is made off the release
+tag: for example, for the 2.1 release series, the 2.1.X branch.
+
+All changes merged into the master branch will be evaluated for whether they
+can be considered 'bugfixes' only (that is, they do not affect the public API).
+If they can, they will also be cherry-picked back to all active maintenance
+branches that require the bugfix. If the bugfix is not necessary, because the
+branch in question is unaffected by that bug, the bugfix will not be
+backported.
+
+Supported Release Series'
+-------------------------
+
+The developers of Hyper-h2 commit to supporting the following release series:
+
+- The most recent, as identified by the first two numbers in the highest
+  version currently released.
+- The immediately prior release series.
+
+The only exception to this policy is that no release series earlier than the
+2.1 series will be supported. In this context, "supported" means that they will
+continue to receive bugfix releases.
+
+For releases other than the ones identified above, no support is guaranteed.
+The developers may *choose* to support such a release series, but they do not
+promise to.
+
+The exception here is for security vulnerabilities. If a security vulnerability
+is identified in an out-of-support release series, the developers will do their
+best to patch it and issue an emergency release. For more information, see
+`our security documentation`_.
+
+
+.. _semantic versioning: http://semver.org/
+.. _our security documentation: http://python-hyper.org/en/latest/security.html
diff --git a/tools/third_party/h2/docs/source/testimonials.rst b/tools/third_party/h2/docs/source/testimonials.rst
new file mode 100755
index 0000000..ec32fb9
--- /dev/null
+++ b/tools/third_party/h2/docs/source/testimonials.rst
@@ -0,0 +1,9 @@
+Testimonials
+============
+
+Glyph Lefkowitz
+~~~~~~~~~~~~~~~
+
+Frankly, Hyper-h2 is almost SURREAL in how well-factored and decoupled the implementation is from I/O.  If libraries in the Python ecosystem looked like this generally, Twisted would be a much better platform than it is.  (Frankly, most of Twisted's _own_ protocol implementations should aspire to such cleanliness.)
+
+(`Source <https://twistedmatrix.com/pipermail/twisted-python/2015-November/029894.html>`_)
diff --git a/tools/third_party/h2/docs/source/tornado-example.rst b/tools/third_party/h2/docs/source/tornado-example.rst
new file mode 100755
index 0000000..c7a8071
--- /dev/null
+++ b/tools/third_party/h2/docs/source/tornado-example.rst
@@ -0,0 +1,16 @@
+Tornado Example Server
+======================
+
+This example is a basic HTTP/2 server written using the `Tornado`_ asynchronous
+networking library.
+
+The server returns the request headers as a JSON document to the caller, just
+like the example from the :doc:`basic-usage` document.
+
+.. literalinclude:: ../../examples/tornado/tornado-server.py
+   :language: python
+   :linenos:
+   :encoding: utf-8
+
+
+.. _Tornado: http://www.tornadoweb.org/
diff --git a/tools/third_party/h2/docs/source/twisted-example.rst b/tools/third_party/h2/docs/source/twisted-example.rst
new file mode 100755
index 0000000..10d1116
--- /dev/null
+++ b/tools/third_party/h2/docs/source/twisted-example.rst
@@ -0,0 +1,18 @@
+Twisted Example Server
+======================
+
+This example is a basic HTTP/2 server written for the `Twisted`_ asynchronous
+networking framework. This is a relatively fleshed out example, and in
+particular it makes sure to obey HTTP/2 flow control rules.
+
+This server differs from some of the other example servers by serving files,
+rather than simply sending JSON responses. This makes the example lengthier,
+but also brings it closer to a real-world use-case.
+
+.. literalinclude:: ../../examples/twisted/twisted-server.py
+   :language: python
+   :linenos:
+   :encoding: utf-8
+
+
+.. _Twisted: https://twistedmatrix.com/
\ No newline at end of file
diff --git a/tools/third_party/h2/docs/source/twisted-head-example.rst b/tools/third_party/h2/docs/source/twisted-head-example.rst
new file mode 100755
index 0000000..2075352
--- /dev/null
+++ b/tools/third_party/h2/docs/source/twisted-head-example.rst
@@ -0,0 +1,17 @@
+Twisted Example Client: Head Requests
+=====================================
+
+This example is a basic HTTP/2 client written for the `Twisted`_ asynchronous
+networking framework.
+
+This client is fairly simple: it makes a hard-coded HEAD request to
+http2bin.org and prints out the response data. Its purpose is to demonstrate
+how to write a very basic HTTP/2 client implementation.
+
+.. literalinclude:: ../../examples/twisted/head_request.py
+   :language: python
+   :linenos:
+   :encoding: utf-8
+
+
+.. _Twisted: https://twistedmatrix.com/
\ No newline at end of file
diff --git a/tools/third_party/h2/docs/source/twisted-post-example.rst b/tools/third_party/h2/docs/source/twisted-post-example.rst
new file mode 100755
index 0000000..21bc8ac
--- /dev/null
+++ b/tools/third_party/h2/docs/source/twisted-post-example.rst
@@ -0,0 +1,18 @@
+Twisted Example Client: Post Requests
+=====================================
+
+This example is a basic HTTP/2 client written for the `Twisted`_ asynchronous
+networking framework.
+
+This client is fairly simple: it makes a hard-coded POST request to
+http2bin.org and prints out the response data, sending a file that is provided
+on the command line or the script itself. Its purpose is to demonstrate how to
+write a HTTP/2 client implementation that handles flow control.
+
+.. literalinclude:: ../../examples/twisted/post_request.py
+   :language: python
+   :linenos:
+   :encoding: utf-8
+
+
+.. _Twisted: https://twistedmatrix.com/
diff --git a/tools/third_party/h2/docs/source/wsgi-example.rst b/tools/third_party/h2/docs/source/wsgi-example.rst
new file mode 100755
index 0000000..8251389
--- /dev/null
+++ b/tools/third_party/h2/docs/source/wsgi-example.rst
@@ -0,0 +1,23 @@
+Example HTTP/2-only WSGI Server
+===============================
+
+This example is a more complex HTTP/2 server that acts as a WSGI server,
+passing data to an arbitrary WSGI application. This example is written using
+`asyncio`_. The server supports most of PEP-3333, and so could in principle be
+used as a production WSGI server: however, that's *not recommended* as certain
+shortcuts have been taken to ensure ease of implementation and understanding.
+
+The main advantages of this example are:
+
+1. It properly demonstrates HTTP/2 flow control management.
+2. It demonstrates how to plug hyper-h2 into a larger, more complex
+   application.
+
+
+.. literalinclude:: ../../examples/asyncio/wsgi-server.py
+   :language: python
+   :linenos:
+   :encoding: utf-8
+
+
+.. _asyncio: https://docs.python.org/3/library/asyncio.html
diff --git a/tools/third_party/h2/examples/asyncio/asyncio-server.py b/tools/third_party/h2/examples/asyncio/asyncio-server.py
new file mode 100755
index 0000000..0e5b912
--- /dev/null
+++ b/tools/third_party/h2/examples/asyncio/asyncio-server.py
@@ -0,0 +1,155 @@
+# -*- coding: utf-8 -*-
+"""
+asyncio-server.py
+~~~~~~~~~~~~~~~~~
+
+A fully-functional HTTP/2 server using asyncio. Requires Python 3.5+.
+
+This example demonstrates handling requests with bodies, as well as handling
+those without. In particular, it demonstrates the fact that DataReceived may
+be called multiple times, and that applications must handle that possibility.
+
+Please note that this example does not handle flow control, and so only works
+properly for relatively small requests. Please see other examples to understand
+how flow control should work.
+"""
+import asyncio
+import io
+import json
+import ssl
+import collections
+from typing import List, Tuple
+
+from h2.config import H2Configuration
+from h2.connection import H2Connection
+from h2.events import (
+    ConnectionTerminated, DataReceived, RequestReceived, StreamEnded
+)
+from h2.errors import ErrorCodes
+from h2.exceptions import ProtocolError
+
+
+RequestData = collections.namedtuple('RequestData', ['headers', 'data'])
+
+
+class H2Protocol(asyncio.Protocol):
+    def __init__(self):
+        config = H2Configuration(client_side=False, header_encoding='utf-8')
+        self.conn = H2Connection(config=config)
+        self.transport = None
+        self.stream_data = {}
+
+    def connection_made(self, transport: asyncio.Transport):
+        self.transport = transport
+        self.conn.initiate_connection()
+        self.transport.write(self.conn.data_to_send())
+
+    def data_received(self, data: bytes):
+        try:
+            events = self.conn.receive_data(data)
+        except ProtocolError as e:
+            self.transport.write(self.conn.data_to_send())
+            self.transport.close()
+        else:
+            self.transport.write(self.conn.data_to_send())
+            for event in events:
+                if isinstance(event, RequestReceived):
+                    self.request_received(event.headers, event.stream_id)
+                elif isinstance(event, DataReceived):
+                    self.receive_data(event.data, event.stream_id)
+                elif isinstance(event, StreamEnded):
+                    self.stream_complete(event.stream_id)
+                elif isinstance(event, ConnectionTerminated):
+                    self.transport.close()
+
+                self.transport.write(self.conn.data_to_send())
+
+    def request_received(self, headers: List[Tuple[str, str]], stream_id: int):
+        headers = collections.OrderedDict(headers)
+        method = headers[':method']
+
+        # We only support GET and POST.
+        if method not in ('GET', 'POST'):
+            self.return_405(headers, stream_id)
+            return
+
+        # Store off the request data.
+        request_data = RequestData(headers, io.BytesIO())
+        self.stream_data[stream_id] = request_data
+
+    def stream_complete(self, stream_id: int):
+        """
+        When a stream is complete, we can send our response.
+        """
+        try:
+            request_data = self.stream_data[stream_id]
+        except KeyError:
+            # Just return, we probably 405'd this already
+            return
+
+        headers = request_data.headers
+        body = request_data.data.getvalue().decode('utf-8')
+
+        data = json.dumps(
+            {"headers": headers, "body": body}, indent=4
+        ).encode("utf8")
+
+        response_headers = (
+            (':status', '200'),
+            ('content-type', 'application/json'),
+            ('content-length', str(len(data))),
+            ('server', 'asyncio-h2'),
+        )
+        self.conn.send_headers(stream_id, response_headers)
+        self.conn.send_data(stream_id, data, end_stream=True)
+
+    def return_405(self, headers: List[Tuple[str, str]], stream_id: int):
+        """
+        We don't support the given method, so we want to return a 405 response.
+        """
+        response_headers = (
+            (':status', '405'),
+            ('content-length', '0'),
+            ('server', 'asyncio-h2'),
+        )
+        self.conn.send_headers(stream_id, response_headers, end_stream=True)
+
+    def receive_data(self, data: bytes, stream_id: int):
+        """
+        We've received some data on a stream. If that stream is one we're
+        expecting data on, save it off. Otherwise, reset the stream.
+        """
+        try:
+            stream_data = self.stream_data[stream_id]
+        except KeyError:
+            self.conn.reset_stream(
+                stream_id, error_code=ErrorCodes.PROTOCOL_ERROR
+            )
+        else:
+            stream_data.data.write(data)
+
+
+ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
+ssl_context.options |= (
+    ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_COMPRESSION
+)
+ssl_context.set_ciphers("ECDHE+AESGCM")
+ssl_context.load_cert_chain(certfile="cert.crt", keyfile="cert.key")
+ssl_context.set_alpn_protocols(["h2"])
+
+loop = asyncio.get_event_loop()
+# Each client connection will create a new protocol instance
+coro = loop.create_server(H2Protocol, '127.0.0.1', 8443, ssl=ssl_context)
+server = loop.run_until_complete(coro)
+
+# Serve requests until Ctrl+C is pressed
+print('Serving on {}'.format(server.sockets[0].getsockname()))
+try:
+    loop.run_forever()
+except KeyboardInterrupt:
+    pass
+
+# Close the server
+server.close()
+loop.run_until_complete(server.wait_closed())
+loop.close()
diff --git a/tools/third_party/h2/examples/asyncio/cert.crt b/tools/third_party/h2/examples/asyncio/cert.crt
new file mode 100755
index 0000000..d6cf7d5
--- /dev/null
+++ b/tools/third_party/h2/examples/asyncio/cert.crt
@@ -0,0 +1,21 @@
+-----BEGIN CERTIFICATE-----
+MIIDhTCCAm2gAwIBAgIJAOrxh0dOYJLdMA0GCSqGSIb3DQEBCwUAMFkxCzAJBgNV
+BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX
+aWRnaXRzIFB0eSBMdGQxEjAQBgNVBAMMCWxvY2FsaG9zdDAeFw0xNTA5MTkxNDE2
+NDRaFw0xNTEwMTkxNDE2NDRaMFkxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21l
+LVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxEjAQBgNV
+BAMMCWxvY2FsaG9zdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMqt
+A1iu8EN00FU0eBcBGlLVmNEgV7Jkbukra+kwS8j/U2y50QPGJc/FiIVDfuBqk5dL
+ACTNc6A/FQcXvWmOc5ixmC3QKKasMpuofqKz0V9C6irZdYXZ9rcsW0gHQIr989yd
+R+N1VbIlEVW/T9FJL3B2UD9GVIkUELzm47CSOWZvAxQUlsx8CUNuUCWqyZJoqTFN
+j0LeJDOWGCsug1Pkj0Q1x+jMVL6l6Zf6vMkLNOMsOsWsxUk+0L3tl/OzcTgUOCsw
+UzY59RIi6Rudrp0oaU8NuHr91yiSqPbKFlX10M9KwEEdnIpcxhND3dacrDycj3ux
+eWlqKync2vOFUkhwiaMCAwEAAaNQME4wHQYDVR0OBBYEFA0PN+PGoofZ+QIys2Jy
+1Zz94vBOMB8GA1UdIwQYMBaAFA0PN+PGoofZ+QIys2Jy1Zz94vBOMAwGA1UdEwQF
+MAMBAf8wDQYJKoZIhvcNAQELBQADggEBAEplethBoPpcP3EbR5Rz6snDDIcbtAJu
+Ngd0YZppGT+P0DYnPJva4vRG3bb84ZMSuppz5j67qD6DdWte8UXhK8BzWiHzwmQE
+QmbKyzzTMKQgTNFntpx5cgsSvTtrHpNYoMHzHOmyAOboNeM0DWiRXsYLkWTitLTN
+qbOpstwPubExbT9lPjLclntShT/lCupt+zsbnrR9YiqlYFY/fDzfAybZhrD5GMBY
+XdMPItwAc/sWvH31yztarjkLmld76AGCcO5r8cSR/cX98SicyfjOBbSco8GkjYNY
+582gTPkKGYpStuN7GNT5tZmxvMq935HRa2XZvlAIe8ufp8EHVoYiF3c=
+-----END CERTIFICATE-----
diff --git a/tools/third_party/h2/examples/asyncio/cert.key b/tools/third_party/h2/examples/asyncio/cert.key
new file mode 100755
index 0000000..bda69e8
--- /dev/null
+++ b/tools/third_party/h2/examples/asyncio/cert.key
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAyq0DWK7wQ3TQVTR4FwEaUtWY0SBXsmRu6Str6TBLyP9TbLnR
+A8Ylz8WIhUN+4GqTl0sAJM1zoD8VBxe9aY5zmLGYLdAopqwym6h+orPRX0LqKtl1
+hdn2tyxbSAdAiv3z3J1H43VVsiURVb9P0UkvcHZQP0ZUiRQQvObjsJI5Zm8DFBSW
+zHwJQ25QJarJkmipMU2PQt4kM5YYKy6DU+SPRDXH6MxUvqXpl/q8yQs04yw6xazF
+ST7Qve2X87NxOBQ4KzBTNjn1EiLpG52unShpTw24ev3XKJKo9soWVfXQz0rAQR2c
+ilzGE0Pd1pysPJyPe7F5aWorKdza84VSSHCJowIDAQABAoIBACp+nh4BB/VMz8Wd
+q7Q/EfLeQB1Q57JKpoqTBRwueSVai3ZXe4CMEi9/HkG6xiZtkiZ9njkZLq4hq9oB
+2z//kzMnwV2RsIRJxI6ohGy+wR51HD4BvEdlTPpY/Yabpqe92VyfSYxidKZWaU0O
+QMED1EODOw4ZQ+4928iPrJu//PMB4e7TFao0b9Fk/XLWtu5/tQZz9jsrlTi1zthh
+7n+oaGNhfTeIJJL4jrhTrKW1CLHXATtr9SJlfZ3wbMxQVeyj2wUlP1V0M6kBuhNj
+tbGbMpixD5iCNJ49Cm2PHg+wBOfS3ADGIpi3PcGw5mb8nB3N9eGBRPhLShAlq5Hi
+Lv4tyykCgYEA8u3b3xJ04pxWYN25ou/Sc8xzgDCK4XvDNdHVTuZDjLVA+VTVPzql
+lw7VvJArsx47MSPvsaX/+4hQXYtfnR7yJpx6QagvQ+z4ludnIZYrQwdUmb9pFL1s
+8UNj+3j9QFRPenIiIQ8qxxNIQ9w2HsVQ8scvc9CjYop/YYAPaQyHaL8CgYEA1ZSz
+CR4NcpfgRSILdhb1dLcyw5Qus1VOSAx3DYkhDkMiB8XZwgMdJjwehJo9yaqRCLE8
+Sw5znMnkfoZpu7+skrjK0FqmMpXMH9gIszHvFG8wSw/6+2HIWS19/wOu8dh95LuC
+0zurMk8rFqxgWMWF20afhgYrUz42cvUTo10FVB0CgYEAt7mW6W3PArfUSCxIwmb4
+VmXREKkl0ATHDYQl/Cb//YHzot467TgQll883QB4XF5HzBFurX9rSzO7/BN1e6I0
+52i+ubtWC9xD4fUetXMaQvZfUGxIL8xXgVxDWKQXfLiG54c8Mp6C7s6xf8kjEUCP
+yR1F0SSA/Pzb+8RbY0p7eocCgYA+1rs+SXtHZev0KyoYGnUpW+Uxqd17ofOgOxqj
+/t6c5Z+TjeCdtnDTGQkZlo/rT6XQWuUUaDIXxUbW+xEMzj4mBPyXBLS1WWFvVQ5q
+OpzO9E/PJeqAH6rkof/aEelc+oc/zvOU1o9uA+D3kMvgEm1psIOq2RHSMhGvDPA0
+NmAk+QKBgQCwd1681GagdIYSZUCBecnLtevXmIsJyDW2yR1NNcIe/ukcVQREMDvy
+5DDkhnGDgnV1D5gYcXb34g9vYvbfTnBMl/JXmMAAG1kIS+3pvHyN6f1poVe3yJV1
+yHVuvymnJxKnyaV0L3ntepVvV0vVNIkA3oauoUTLto6txBI+b/ImDA==
+-----END RSA PRIVATE KEY-----
diff --git a/tools/third_party/h2/examples/asyncio/wsgi-server.py b/tools/third_party/h2/examples/asyncio/wsgi-server.py
new file mode 100755
index 0000000..09ab2ce
--- /dev/null
+++ b/tools/third_party/h2/examples/asyncio/wsgi-server.py
@@ -0,0 +1,760 @@
+# -*- coding: utf-8 -*-
+"""
+asyncio-server.py
+~~~~~~~~~~~~~~~~~
+
+A fully-functional WSGI server, written using hyper-h2. Requires asyncio.
+
+To test it, try installing httpin from pip (``pip install httpbin``) and then
+running the server (``python asyncio-server.py httpbin:app``).
+
+This server does not support HTTP/1.1: it is a HTTP/2-only WSGI server. The
+purpose of this code is to demonstrate how to integrate hyper-h2 into a more
+complex application, and to demonstrate several principles of concurrent
+programming.
+
+The architecture looks like this:
+
++---------------------------------+
+|     1x HTTP/2 Server Thread     |
+|        (running asyncio)        |
++---------------------------------+
++---------------------------------+
+|    N WSGI Application Threads   |
+|           (no asyncio)          |
++---------------------------------+
+
+Essentially, we spin up an asyncio-based event loop in the main thread. This
+launches one HTTP/2 Protocol instance for each inbound connection, all of which
+will read and write data from within the main thread in an asynchronous manner.
+
+When each HTTP request comes in, the server will build the WSGI environment
+dictionary and create a ``Stream`` object. This object will hold the relevant
+state for the request/response pair and will act as the WSGI side of the logic.
+That object will then be passed to a background thread pool, and when a worker
+is available the WSGI logic will begin to be executed. This model ensures that
+the asyncio web server itself is never blocked by the WSGI application.
+
+The WSGI application and the HTTP/2 server communicate via an asyncio queue,
+together with locks and threading events. The locks themselves are implicit in
+asyncio's "call_soon_threadsafe", which allows for a background thread to
+register an action with the main asyncio thread. When the asyncio thread
+eventually takes the action in question it sets as threading event, signaling
+to the background thread that it is free to continue its work.
+
+To make the WSGI application work with flow control, there is a very important
+invariant that must be observed. Any WSGI action that would cause data to be
+emitted to the network MUST be accompanied by a threading Event that is not
+set until that data has been written to the transport. This ensures that the
+WSGI application *blocks* until the data is actually sent. The reason we
+require this invariant is that the HTTP/2 server may choose to re-order some
+data chunks for flow control reasons: that is, the application for stream X may
+have actually written its data first, but the server may elect to send the data
+for stream Y first. This means that it's vital that there not be *two* writes
+for stream X active at any one point or they may get reordered, which would be
+particularly terrible.
+
+Thus, the server must cooperate to ensure that each threading event only fires
+when the *complete* data for that event has been written to the asyncio
+transport. Any earlier will cause untold craziness.
+"""
+import asyncio
+import importlib
+import queue
+import ssl
+import sys
+import threading
+
+from h2.config import H2Configuration
+from h2.connection import H2Connection
+from h2.events import (
+    DataReceived, RequestReceived, WindowUpdated, StreamEnded, StreamReset
+)
+
+
+# Used to signal that a request has completed.
+#
+# This is a convenient way to do "in-band" signaling of stream completion
+# without doing anything so heavyweight as using a class. Essentially, we can
+# test identity against this empty object. In fact, this is so convenient that
+# we use this object for all streams, for data in both directions: in and out.
+END_DATA_SENTINEL = object()
+
+# The WSGI callable. Stored here so that the protocol instances can get hold
+# of the data.
+APPLICATION = None
+
+
+class H2Protocol(asyncio.Protocol):
+    def __init__(self):
+        config = H2Configuration(client_side=False, header_encoding='utf-8')
+
+        # Our server-side state machine.
+        self.conn = H2Connection(config=config)
+
+        # The backing transport.
+        self.transport = None
+
+        # A dictionary of ``Stream`` objects, keyed by their stream ID. This
+        # makes it easy to route data to the correct WSGI application instance.
+        self.streams = {}
+
+        # A queue of data emitted by WSGI applications that has not yet been
+        # sent. Each stream may only have one chunk of data in either this
+        # queue or the flow_controlled_data dictionary at any one time.
+        self._stream_data = asyncio.Queue()
+
+        # Data that has been pulled off the queue that is for a stream blocked
+        # behind flow control limitations. This is used to avoid spinning on
+        # _stream_data queue when a stream cannot have its data sent. Data that
+        # cannot be sent on the connection when it is popped off the queue gets
+        # placed here until the stream flow control window opens up again.
+        self._flow_controlled_data = {}
+
+        # A reference to the loop in which this protocol runs. This is needed
+        # to synchronise up with background threads.
+        self._loop = asyncio.get_event_loop()
+
+        # Any streams that have been remotely reset. We keep track of these to
+        # ensure that we don't emit data from a WSGI application whose stream
+        # has been cancelled.
+        self._reset_streams = set()
+
+        # Keep track of the loop sending task so we can kill it when the
+        # connection goes away.
+        self._send_loop_task = None
+
+    def connection_made(self, transport):
+        """
+        The connection has been made. Here we need to save off our transport,
+        do basic HTTP/2 connection setup, and then start our data writing
+        coroutine.
+        """
+        self.transport = transport
+        self.conn.initiate_connection()
+        self.transport.write(self.conn.data_to_send())
+        self._send_loop_task = self._loop.create_task(self.sending_loop())
+
+    def connection_lost(self, exc):
+        """
+        With the end of the connection, we just want to cancel our data sending
+        coroutine.
+        """
+        self._send_loop_task.cancel()
+
+    def data_received(self, data):
+        """
+        Process inbound data.
+        """
+        events = self.conn.receive_data(data)
+
+        for event in events:
+            if isinstance(event, RequestReceived):
+                self.request_received(event)
+            elif isinstance(event, DataReceived):
+                self.data_frame_received(event)
+            elif isinstance(event, WindowUpdated):
+                self.window_opened(event)
+            elif isinstance(event, StreamEnded):
+                self.end_stream(event)
+            elif isinstance(event, StreamReset):
+                self.reset_stream(event)
+
+        outbound_data = self.conn.data_to_send()
+        if outbound_data:
+            self.transport.write(outbound_data)
+
+    def window_opened(self, event):
+        """
+        The flow control window got opened.
+
+        This is important because it's possible that we were unable to send
+        some WSGI data because the flow control window was too small. If that
+        happens, the sending_loop coroutine starts buffering data.
+
+        As the window gets opened, we need to unbuffer the data. We do that by
+        placing the data chunks back on the back of the send queue and letting
+        the sending loop take another shot at sending them.
+
+        This system only works because we require that each stream only have
+        *one* data chunk in the sending queue at any time. The threading events
+        force this invariant to remain true.
+        """
+        if event.stream_id:
+            # This is specific to a single stream.
+            if event.stream_id in self._flow_controlled_data:
+                self._stream_data.put_nowait(
+                    self._flow_controlled_data.pop(event.stream_id)
+                )
+        else:
+            # This event is specific to the connection. Free up *all* the
+            # streams. This is a bit tricky, but we *must not* yield the flow
+            # of control here or it all goes wrong.
+            for data in self._flow_controlled_data.values():
+                self._stream_data.put_nowait(data)
+
+            self._flow_controlled_data = {}
+
+    @asyncio.coroutine
+    def sending_loop(self):
+        """
+        A call that loops forever, attempting to send data. This sending loop
+        contains most of the flow-control smarts of this class: it pulls data
+        off of the asyncio queue and then attempts to send it.
+
+        The difficulties here are all around flow control. Specifically, a
+        chunk of data may be too large to send. In this case, what will happen
+        is that this coroutine will attempt to send what it can and will then
+        store the unsent data locally. When a flow control event comes in that
+        data will be freed up and placed back onto the asyncio queue, causing
+        it to pop back up into the sending logic of this coroutine.
+
+        This method explicitly *does not* handle HTTP/2 priority. That adds an
+        extra layer of complexity to what is already a fairly complex method,
+        and we'll look at how to do it another time.
+
+        This coroutine explicitly *does not end*.
+        """
+        while True:
+            stream_id, data, event = yield from self._stream_data.get()
+
+            # If this stream got reset, just drop the data on the floor. Note
+            # that we need to reset the event here to make sure that
+            # application doesn't lock up.
+            if stream_id in self._reset_streams:
+                event.set()
+
+            # Check if the body is done. If it is, this is really easy! Again,
+            # we *must* set the event here or the application will lock up.
+            if data is END_DATA_SENTINEL:
+                self.conn.end_stream(stream_id)
+                self.transport.write(self.conn.data_to_send())
+                event.set()
+                continue
+
+            # We need to send data, but not to exceed the flow control window.
+            # For that reason, grab only the data that fits: we'll buffer the
+            # rest.
+            window_size = self.conn.local_flow_control_window(stream_id)
+            chunk_size = min(window_size, len(data))
+            data_to_send = data[:chunk_size]
+            data_to_buffer = data[chunk_size:]
+
+            if data_to_send:
+                # There's a maximum frame size we have to respect. Because we
+                # aren't paying any attention to priority here, we can quite
+                # safely just split this string up into chunks of max frame
+                # size and blast them out.
+                #
+                # In a *real* application you'd want to consider priority here.
+                max_size = self.conn.max_outbound_frame_size
+                chunks = (
+                    data_to_send[x:x+max_size]
+                    for x in range(0, len(data_to_send), max_size)
+                )
+                for chunk in chunks:
+                    self.conn.send_data(stream_id, chunk)
+                self.transport.write(self.conn.data_to_send())
+
+            # If there's data left to buffer, we should do that. Put it in a
+            # dictionary and *don't set the event*: the app must not generate
+            # any more data until we got rid of all of this data.
+            if data_to_buffer:
+                self._flow_controlled_data[stream_id] = (
+                    stream_id, data_to_buffer, event
+                )
+            else:
+                # We sent everything. We can let the WSGI app progress.
+                event.set()
+
+    def request_received(self, event):
+        """
+        A HTTP/2 request has been received. We need to invoke the WSGI
+        application in a background thread to handle it.
+        """
+        # First, we are going to want an object to hold all the relevant state
+        # for this request/response. For that, we have a stream object. We
+        # need to store the stream object somewhere reachable for when data
+        # arrives later.
+        s = Stream(event.stream_id, self)
+        self.streams[event.stream_id] = s
+
+        # Next, we need to build the WSGI environ dictionary.
+        environ = _build_environ_dict(event.headers, s)
+
+        # Finally, we want to throw these arguments out to a threadpool and
+        # let it run.
+        self._loop.run_in_executor(
+            None,
+            s.run_in_threadpool,
+            APPLICATION,
+            environ,
+        )
+
+    def data_frame_received(self, event):
+        """
+        Data has been received by WSGI server and needs to be dispatched to a
+        running application.
+
+        Note that the flow control window is not modified here. That's
+        deliberate: see Stream.__next__ for a longer discussion of why.
+        """
+        # Grab the stream in question from our dictionary and pass it on.
+        stream = self.streams[event.stream_id]
+        stream.receive_data(event.data, event.flow_controlled_length)
+
+    def end_stream(self, event):
+        """
+        The stream data is complete.
+        """
+        stream = self.streams[event.stream_id]
+        stream.request_complete()
+
+    def reset_stream(self, event):
+        """
+        A stream got forcefully reset.
+
+        This is a tricky thing to deal with because WSGI doesn't really have a
+        good notion for it. Essentially, you have to let the application run
+        until completion, but not actually let it send any data.
+
+        We do that by discarding any data we currently have for it, and then
+        marking the stream as reset to allow us to spot when that stream is
+        trying to send data and drop that data on the floor.
+
+        We then *also* signal the WSGI application that no more data is
+        incoming, to ensure that it does not attempt to do further reads of the
+        data.
+        """
+        if event.stream_id in self._flow_controlled_data:
+            del self._flow_controlled_data
+
+        self._reset_streams.add(event.stream_id)
+        self.end_stream(event)
+
+    def data_for_stream(self, stream_id, data):
+        """
+        Thread-safe method called from outside the main asyncio thread in order
+        to send data on behalf of a WSGI application.
+
+        Places data being written by a stream on an asyncio queue. Returns a
+        threading event that will fire when that data is sent.
+        """
+        event = threading.Event()
+        self._loop.call_soon_threadsafe(
+            self._stream_data.put_nowait,
+            (stream_id, data, event)
+        )
+        return event
+
+    def send_response(self, stream_id, headers):
+        """
+        Thread-safe method called from outside the main asyncio thread in order
+        to send the HTTP response headers on behalf of a WSGI application.
+
+        Returns a threading event that will fire when the headers have been
+        emitted to the network.
+        """
+        event = threading.Event()
+
+        def _inner_send(stream_id, headers, event):
+            self.conn.send_headers(stream_id, headers, end_stream=False)
+            self.transport.write(self.conn.data_to_send())
+            event.set()
+
+        self._loop.call_soon_threadsafe(
+            _inner_send,
+            stream_id,
+            headers,
+            event
+        )
+        return event
+
+    def open_flow_control_window(self, stream_id, increment):
+        """
+        Opens a flow control window for the given stream by the given amount.
+        Called from a WSGI thread. Does not return an event because there's no
+        need to block on this action, it may take place at any time.
+        """
+        def _inner_open(stream_id, increment):
+            self.conn.increment_flow_control_window(increment, stream_id)
+            self.conn.increment_flow_control_window(increment, None)
+            self.transport.write(self.conn.data_to_send())
+
+        self._loop.call_soon_threadsafe(
+            _inner_open,
+            stream_id,
+            increment,
+        )
+
+
+class Stream:
+    """
+    This class holds all of the state for a single stream. It also provides
+    several of the callables used by the WSGI application. Finally, it provides
+    the logic for actually interfacing with the WSGI application.
+
+    For these reasons, the object has *strict* requirements on thread-safety.
+    While the object can be initialized in the main WSGI thread, the
+    ``run_in_threadpool`` method *must* be called from outside that thread. At
+    that point, the main WSGI thread may only call specific methods.
+    """
+    def __init__(self, stream_id, protocol):
+        self.stream_id = stream_id
+        self._protocol = protocol
+
+        # Queue for data that has been received from the network. This is a
+        # thread-safe queue, to allow both the WSGI application to block on
+        # receiving more data and to allow the asyncio server to keep sending
+        # more data.
+        #
+        # This queue is unbounded in size, but in practice it cannot contain
+        # too much data because the flow control window doesn't get adjusted
+        # unless data is removed from it.
+        self._received_data = queue.Queue()
+
+        # This buffer is used to hold partial chunks of data from
+        # _received_data that were not returned out of ``read`` and friends.
+        self._temp_buffer = b''
+
+        # Temporary variables that allow us to keep hold of the headers and
+        # response status until such time as the application needs us to send
+        # them.
+        self._response_status = b''
+        self._response_headers = []
+        self._headers_emitted = False
+
+        # Whether the application has received all the data from the network
+        # or not. This allows us to short-circuit some reads.
+        self._complete = False
+
+    def receive_data(self, data, flow_controlled_size):
+        """
+        Called by the H2Protocol when more data has been received from the
+        network.
+
+        Places the data directly on the queue in a thread-safe manner without
+        blocking. Does not introspect or process the data.
+        """
+        self._received_data.put_nowait((data, flow_controlled_size))
+
+    def request_complete(self):
+        """
+        Called by the H2Protocol when all the request data has been received.
+
+        This works by placing the ``END_DATA_SENTINEL`` on the queue. The
+        reading code knows, when it sees the ``END_DATA_SENTINEL``, to expect
+        no more data from the network. This ensures that the state of the
+        application only changes when it has finished processing the data from
+        the network, even though the server may have long-since finished
+        receiving all the data for this request.
+        """
+        self._received_data.put_nowait((END_DATA_SENTINEL, None))
+
+    def run_in_threadpool(self, wsgi_application, environ):
+        """
+        This method should be invoked in a threadpool. At the point this method
+        is invoked, the only safe methods to call from the original thread are
+        ``receive_data`` and ``request_complete``: any other method is unsafe.
+
+        This method handles the WSGI logic. It invokes the application callable
+        in this thread, passing control over to the WSGI application. It then
+        ensures that the data makes it back to the HTTP/2 connection via
+        the thread-safe APIs provided below.
+        """
+        result = wsgi_application(environ, self.start_response)
+
+        try:
+            for data in result:
+                self.write(data)
+        finally:
+            # This signals that we're done with data. The server will know that
+            # this allows it to clean up its state: we're done here.
+            self.write(END_DATA_SENTINEL)
+
+    # The next few methods are called by the WSGI application. Firstly, the
+    # three methods provided by the input stream.
+    def read(self, size=None):
+        """
+        Called by the WSGI application to read data.
+
+        This method is the one of two that explicitly pumps the input data
+        queue, which means it deals with the ``_complete`` flag and the
+        ``END_DATA_SENTINEL``.
+        """
+        # If we've already seen the END_DATA_SENTINEL, return immediately.
+        if self._complete:
+            return b''
+
+        # If we've been asked to read everything, just iterate over ourselves.
+        if size is None:
+            return b''.join(self)
+
+        # Otherwise, as long as we don't have enough data, spin looking for
+        # another data chunk.
+        data = b''
+        while len(data) < size:
+            try:
+                chunk = next(self)
+            except StopIteration:
+                break
+
+            # Concatenating strings this way is slow, but that's ok, this is
+            # just a demo.
+            data += chunk
+
+        # We have *at least* enough data to return, but we may have too much.
+        # If we do, throw it on a buffer: we'll use it later.
+        to_return = data[:size]
+        self._temp_buffer = data[size:]
+        return to_return
+
+    def readline(self, hint=None):
+        """
+        Called by the WSGI application to read a single line of data.
+
+        This method rigorously observes the ``hint`` parameter: it will only
+        ever read that much data. It then splits the data on a newline
+        character and throws everything it doesn't need into a buffer.
+        """
+        data = self.read(hint)
+        first_newline = data.find(b'\n')
+        if first_newline == -1:
+            # No newline, return all the data
+            return data
+
+        # We want to slice the data so that the head *includes* the first
+        # newline. Then, any data left in this line we don't care about should
+        # be prepended to the internal buffer.
+        head, tail = data[:first_newline + 1], data[first_newline + 1:]
+        self._temp_buffer = tail + self._temp_buffer
+
+        return head
+
+    def readlines(self, hint=None):
+        """
+        Called by the WSGI application to read several lines of data.
+
+        This method is really pretty stupid. It rigorously observes the
+        ``hint`` parameter, and quite happily returns the input split into
+        lines.
+        """
+        # This method is *crazy inefficient*, but it's also a pretty stupid
+        # method to call.
+        data = self.read(hint)
+        lines = data.split(b'\n')
+
+        # Split removes the newline character, but we want it, so put it back.
+        lines = [line + b'\n' for line in lines]
+
+        # Except if the last character was a newline character we now have an
+        # extra line that is just a newline: pull that out.
+        if lines[-1] == b'\n':
+            lines = lines[:-1]
+        return lines
+
+    def start_response(self, status, response_headers, exc_info=None):
+        """
+        This is the PEP-3333 mandated start_response callable.
+
+        All it does is store the headers for later sending, and return our
+        ```write`` callable.
+        """
+        if self._headers_emitted and exc_info is not None:
+            raise exc_info[1].with_traceback(exc_info[2])
+
+        assert not self._response_status or exc_info is not None
+        self._response_status = status
+        self._response_headers = response_headers
+
+        return self.write
+
+    def write(self, data):
+        """
+        Provides some data to write.
+
+        This function *blocks* until such time as the data is allowed by
+        HTTP/2 flow control. This allows a client to slow or pause the response
+        as needed.
+
+        This function is not supposed to be used, according to PEP-3333, but
+        once we have it it becomes quite convenient to use it, so this app
+        actually runs all writes through this function.
+        """
+        if not self._headers_emitted:
+            self._emit_headers()
+        event = self._protocol.data_for_stream(self.stream_id, data)
+        event.wait()
+        return
+
+    def _emit_headers(self):
+        """
+        Sends the response headers.
+
+        This is only called from the write callable and should only ever be
+        called once. It does some minor processing (converts the status line
+        into a status code because reason phrases are evil) and then passes
+        the headers on to the server. This call explicitly blocks until the
+        server notifies us that the headers have reached the network.
+        """
+        assert self._response_status and self._response_headers
+        assert not self._headers_emitted
+        self._headers_emitted = True
+
+        # We only need the status code
+        status = self._response_status.split(" ", 1)[0]
+        headers = [(":status", status)]
+        headers.extend(self._response_headers)
+        event = self._protocol.send_response(self.stream_id, headers)
+        event.wait()
+        return
+
+    # These two methods implement the iterator protocol. This allows a WSGI
+    # application to iterate over this Stream object to get the data.
+    def __iter__(self):
+        return self
+
+    def __next__(self):
+        # If the complete request has been read, abort immediately.
+        if self._complete:
+            raise StopIteration()
+
+        # If we have data stored in a temporary buffer for any reason, return
+        # that and clear the buffer.
+        #
+        # This can actually only happen when the application uses one of the
+        # read* callables, but that's fine.
+        if self._temp_buffer:
+            buffered_data = self._temp_buffer
+            self._temp_buffer = b''
+            return buffered_data
+
+        # Otherwise, pull data off the queue (blocking as needed). If this is
+        # the end of the request, we're done here: mark ourselves as complete
+        # and call it time. Otherwise, open the flow control window an
+        # appropriate amount and hand the chunk off.
+        chunk, chunk_size = self._received_data.get()
+        if chunk is END_DATA_SENTINEL:
+            self._complete = True
+            raise StopIteration()
+
+        # Let's talk a little bit about why we're opening the flow control
+        # window *here*, and not in the server thread.
+        #
+        # The purpose of HTTP/2 flow control is to allow for servers and
+        # clients to avoid needing to buffer data indefinitely because their
+        # peer is producing data faster than they can consume it. As a result,
+        # it's important that the flow control window be opened as late in the
+        # processing as possible. In this case, we open the flow control window
+        # exactly when the server hands the data to the application. This means
+        # that the flow control window essentially signals to the remote peer
+        # how much data hasn't even been *seen* by the application yet.
+        #
+        # If you wanted to be really clever you could consider not opening the
+        # flow control window until the application asks for the *next* chunk
+        # of data. That means that any buffers at the application level are now
+        # included in the flow control window processing. In my opinion, the
+        # advantage of that process does not outweigh the extra logical
+        # complexity involved in doing it, so we don't bother here.
+        #
+        # Another note: you'll notice that we don't include the _temp_buffer in
+        # our flow control considerations. This means you could in principle
+        # lead us to buffer slightly more than one connection flow control
+        # window's worth of data. That risk is considered acceptable for the
+        # much simpler logic available here.
+        #
+        # Finally, this is a pretty dumb flow control window management scheme:
+        # it causes us to emit a *lot* of window updates. A smarter server
+        # would want to use the content-length header to determine whether
+        # flow control window updates need to be emitted at all, and then to be
+        # more efficient about emitting them to avoid firing them off really
+        # frequently. For an example like this, there's very little gained by
+        # worrying about that.
+        self._protocol.open_flow_control_window(self.stream_id, chunk_size)
+
+        return chunk
+
+
+def _build_environ_dict(headers, stream):
+    """
+    Build the WSGI environ dictionary for a given request. To do that, we'll
+    temporarily create a dictionary for the headers. While this isn't actually
+    a valid way to represent headers, we know that the special headers we need
+    can only have one appearance in the block.
+
+    This code is arguably somewhat incautious: the conversion to dictionary
+    should only happen in a way that allows us to correctly join headers that
+    appear multiple times. That's acceptable in a demo app: in a productised
+    version you'd want to fix it.
+    """
+    header_dict = dict(headers)
+    path = header_dict.pop(u':path')
+    try:
+        path, query = path.split(u'?', 1)
+    except ValueError:
+        query = u""
+    server_name = header_dict.pop(u':authority')
+    try:
+        server_name, port = server_name.split(u':', 1)
+    except ValueError as e:
+        port = "8443"
+
+    environ = {
+        u'REQUEST_METHOD': header_dict.pop(u':method'),
+        u'SCRIPT_NAME': u'',
+        u'PATH_INFO': path,
+        u'QUERY_STRING': query,
+        u'SERVER_NAME': server_name,
+        u'SERVER_PORT': port,
+        u'SERVER_PROTOCOL': u'HTTP/2',
+        u'HTTPS': u"on",
+        u'SSL_PROTOCOL': u'TLSv1.2',
+        u'wsgi.version': (1, 0),
+        u'wsgi.url_scheme': header_dict.pop(u':scheme'),
+        u'wsgi.input': stream,
+        u'wsgi.errors': sys.stderr,
+        u'wsgi.multithread': True,
+        u'wsgi.multiprocess': False,
+        u'wsgi.run_once': False,
+    }
+    if u'content-type' in header_dict:
+        environ[u'CONTENT_TYPE'] = header_dict[u'content-type']
+    if u'content-length' in header_dict:
+        environ[u'CONTENT_LENGTH'] = header_dict[u'content-length']
+    for name, value in header_dict.items():
+        environ[u'HTTP_' + name.upper()] = value
+    return environ
+
+
+# Set up the WSGI app.
+application_string = sys.argv[1]
+path, func = application_string.split(':', 1)
+module = importlib.import_module(path)
+APPLICATION = getattr(module, func)
+
+# Set up TLS
+ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
+ssl_context.options |= (
+    ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_COMPRESSION
+)
+ssl_context.set_ciphers("ECDHE+AESGCM")
+ssl_context.load_cert_chain(certfile="cert.crt", keyfile="cert.key")
+ssl_context.set_alpn_protocols(["h2"])
+
+# Do the asnycio bits
+loop = asyncio.get_event_loop()
+# Each client connection will create a new protocol instance
+coro = loop.create_server(H2Protocol, '127.0.0.1', 8443, ssl=ssl_context)
+server = loop.run_until_complete(coro)
+
+# Serve requests until Ctrl+C is pressed
+print('Serving on {}'.format(server.sockets[0].getsockname()))
+try:
+    loop.run_forever()
+except KeyboardInterrupt:
+    pass
+
+# Close the server
+server.close()
+loop.run_until_complete(server.wait_closed())
+loop.close()
diff --git a/tools/third_party/h2/examples/curio/curio-server.py b/tools/third_party/h2/examples/curio/curio-server.py
new file mode 100755
index 0000000..01604b4
--- /dev/null
+++ b/tools/third_party/h2/examples/curio/curio-server.py
@@ -0,0 +1,208 @@
+#!/usr/bin/env python3.5
+# -*- coding: utf-8 -*-
+"""
+curio-server.py
+~~~~~~~~~~~~~~~
+
+A fully-functional HTTP/2 server written for curio.
+
+Requires Python 3.5+.
+"""
+import mimetypes
+import os
+import sys
+
+from curio import Kernel, Event, spawn, socket, ssl
+
+import h2.config
+import h2.connection
+import h2.events
+
+
+# The maximum amount of a file we'll send in a single DATA frame.
+READ_CHUNK_SIZE = 8192
+
+
+def create_listening_ssl_socket(address, certfile, keyfile):
+    """
+    Create and return a listening TLS socket on a given address.
+    """
+    ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
+    ssl_context.options |= (
+        ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_COMPRESSION
+    )
+    ssl_context.set_ciphers("ECDHE+AESGCM")
+    ssl_context.load_cert_chain(certfile=certfile, keyfile=keyfile)
+    ssl_context.set_alpn_protocols(["h2"])
+
+    sock = socket.socket()
+    sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+    sock = ssl_context.wrap_socket(sock)
+    sock.bind(address)
+    sock.listen()
+
+    return sock
+
+
+async def h2_server(address, root, certfile, keyfile):
+    """
+    Create an HTTP/2 server at the given address.
+    """
+    sock = create_listening_ssl_socket(address, certfile, keyfile)
+    print("Now listening on %s:%d" % address)
+
+    async with sock:
+        while True:
+            client, _ = await sock.accept()
+            server = H2Server(client, root)
+            await spawn(server.run())
+
+
+class H2Server:
+    """
+    A basic HTTP/2 file server. This is essentially very similar to
+    SimpleHTTPServer from the standard library, but uses HTTP/2 instead of
+    HTTP/1.1.
+    """
+    def __init__(self, sock, root):
+        config = h2.config.H2Configuration(
+            client_side=False, header_encoding='utf-8'
+        )
+        self.sock = sock
+        self.conn = h2.connection.H2Connection(config=config)
+        self.root = root
+        self.flow_control_events = {}
+
+    async def run(self):
+        """
+        Loop over the connection, managing it appropriately.
+        """
+        self.conn.initiate_connection()
+        await self.sock.sendall(self.conn.data_to_send())
+
+        while True:
+            # 65535 is basically arbitrary here: this amounts to "give me
+            # whatever data you have".
+            data = await self.sock.recv(65535)
+            if not data:
+                break
+
+            events = self.conn.receive_data(data)
+            for event in events:
+                if isinstance(event, h2.events.RequestReceived):
+                    await spawn(
+                        self.request_received(event.headers, event.stream_id)
+                    )
+                elif isinstance(event, h2.events.DataReceived):
+                    self.conn.reset_stream(event.stream_id)
+                elif isinstance(event, h2.events.WindowUpdated):
+                    await self.window_updated(event)
+
+            await self.sock.sendall(self.conn.data_to_send())
+
+    async def request_received(self, headers, stream_id):
+        """
+        Handle a request by attempting to serve a suitable file.
+        """
+        headers = dict(headers)
+        assert headers[':method'] == 'GET'
+
+        path = headers[':path'].lstrip('/')
+        full_path = os.path.join(self.root, path)
+
+        if not os.path.exists(full_path):
+            response_headers = (
+                (':status', '404'),
+                ('content-length', '0'),
+                ('server', 'curio-h2'),
+            )
+            self.conn.send_headers(
+                stream_id, response_headers, end_stream=True
+            )
+            await self.sock.sendall(self.conn.data_to_send())
+        else:
+            await self.send_file(full_path, stream_id)
+
+    async def send_file(self, file_path, stream_id):
+        """
+        Send a file, obeying the rules of HTTP/2 flow control.
+        """
+        filesize = os.stat(file_path).st_size
+        content_type, content_encoding = mimetypes.guess_type(file_path)
+        response_headers = [
+            (':status', '200'),
+            ('content-length', str(filesize)),
+            ('server', 'curio-h2'),
+        ]
+        if content_type:
+            response_headers.append(('content-type', content_type))
+        if content_encoding:
+            response_headers.append(('content-encoding', content_encoding))
+
+        self.conn.send_headers(stream_id, response_headers)
+        await self.sock.sendall(self.conn.data_to_send())
+
+        with open(file_path, 'rb', buffering=0) as f:
+            await self._send_file_data(f, stream_id)
+
+    async def _send_file_data(self, fileobj, stream_id):
+        """
+        Send the data portion of a file. Handles flow control rules.
+        """
+        while True:
+            while not self.conn.local_flow_control_window(stream_id):
+                await self.wait_for_flow_control(stream_id)
+
+            chunk_size = min(
+                self.conn.local_flow_control_window(stream_id),
+                READ_CHUNK_SIZE,
+            )
+
+            data = fileobj.read(chunk_size)
+            keep_reading = (len(data) == chunk_size)
+
+            self.conn.send_data(stream_id, data, not keep_reading)
+            await self.sock.sendall(self.conn.data_to_send())
+
+            if not keep_reading:
+                break
+
+    async def wait_for_flow_control(self, stream_id):
+        """
+        Blocks until the flow control window for a given stream is opened.
+        """
+        evt = Event()
+        self.flow_control_events[stream_id] = evt
+        await evt.wait()
+
+    async def window_updated(self, event):
+        """
+        Unblock streams waiting on flow control, if needed.
+        """
+        stream_id = event.stream_id
+
+        if stream_id and stream_id in self.flow_control_events:
+            evt = self.flow_control_events.pop(stream_id)
+            await evt.set()
+        elif not stream_id:
+            # Need to keep a real list here to use only the events present at
+            # this time.
+            blocked_streams = list(self.flow_control_events.keys())
+            for stream_id in blocked_streams:
+                event = self.flow_control_events.pop(stream_id)
+                await event.set()
+        return
+
+
+if __name__ == '__main__':
+    host = sys.argv[2] if len(sys.argv) > 2 else "localhost"
+    kernel = Kernel(with_monitor=True)
+    print("Try GETting:")
+    print("    On OSX after 'brew install curl --with-c-ares --with-libidn --with-nghttp2 --with-openssl':")
+    print("/usr/local/opt/curl/bin/curl --tlsv1.2 --http2 -k https://localhost:5000/bundle.js")
+    print("Or open a browser to: https://localhost:5000/")
+    print("   (Accept all the warnings)")
+    kernel.run(h2_server((host, 5000),
+                         sys.argv[1],
+                         "{}.crt.pem".format(host),
+                         "{}.key".format(host)))
diff --git a/tools/third_party/h2/examples/curio/localhost.crt.pem b/tools/third_party/h2/examples/curio/localhost.crt.pem
new file mode 100755
index 0000000..d6cf7d5
--- /dev/null
+++ b/tools/third_party/h2/examples/curio/localhost.crt.pem
@@ -0,0 +1,21 @@
+-----BEGIN CERTIFICATE-----
+MIIDhTCCAm2gAwIBAgIJAOrxh0dOYJLdMA0GCSqGSIb3DQEBCwUAMFkxCzAJBgNV
+BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX
+aWRnaXRzIFB0eSBMdGQxEjAQBgNVBAMMCWxvY2FsaG9zdDAeFw0xNTA5MTkxNDE2
+NDRaFw0xNTEwMTkxNDE2NDRaMFkxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21l
+LVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxEjAQBgNV
+BAMMCWxvY2FsaG9zdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMqt
+A1iu8EN00FU0eBcBGlLVmNEgV7Jkbukra+kwS8j/U2y50QPGJc/FiIVDfuBqk5dL
+ACTNc6A/FQcXvWmOc5ixmC3QKKasMpuofqKz0V9C6irZdYXZ9rcsW0gHQIr989yd
+R+N1VbIlEVW/T9FJL3B2UD9GVIkUELzm47CSOWZvAxQUlsx8CUNuUCWqyZJoqTFN
+j0LeJDOWGCsug1Pkj0Q1x+jMVL6l6Zf6vMkLNOMsOsWsxUk+0L3tl/OzcTgUOCsw
+UzY59RIi6Rudrp0oaU8NuHr91yiSqPbKFlX10M9KwEEdnIpcxhND3dacrDycj3ux
+eWlqKync2vOFUkhwiaMCAwEAAaNQME4wHQYDVR0OBBYEFA0PN+PGoofZ+QIys2Jy
+1Zz94vBOMB8GA1UdIwQYMBaAFA0PN+PGoofZ+QIys2Jy1Zz94vBOMAwGA1UdEwQF
+MAMBAf8wDQYJKoZIhvcNAQELBQADggEBAEplethBoPpcP3EbR5Rz6snDDIcbtAJu
+Ngd0YZppGT+P0DYnPJva4vRG3bb84ZMSuppz5j67qD6DdWte8UXhK8BzWiHzwmQE
+QmbKyzzTMKQgTNFntpx5cgsSvTtrHpNYoMHzHOmyAOboNeM0DWiRXsYLkWTitLTN
+qbOpstwPubExbT9lPjLclntShT/lCupt+zsbnrR9YiqlYFY/fDzfAybZhrD5GMBY
+XdMPItwAc/sWvH31yztarjkLmld76AGCcO5r8cSR/cX98SicyfjOBbSco8GkjYNY
+582gTPkKGYpStuN7GNT5tZmxvMq935HRa2XZvlAIe8ufp8EHVoYiF3c=
+-----END CERTIFICATE-----
diff --git a/tools/third_party/h2/examples/curio/localhost.key b/tools/third_party/h2/examples/curio/localhost.key
new file mode 100755
index 0000000..bda69e8
--- /dev/null
+++ b/tools/third_party/h2/examples/curio/localhost.key
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAyq0DWK7wQ3TQVTR4FwEaUtWY0SBXsmRu6Str6TBLyP9TbLnR
+A8Ylz8WIhUN+4GqTl0sAJM1zoD8VBxe9aY5zmLGYLdAopqwym6h+orPRX0LqKtl1
+hdn2tyxbSAdAiv3z3J1H43VVsiURVb9P0UkvcHZQP0ZUiRQQvObjsJI5Zm8DFBSW
+zHwJQ25QJarJkmipMU2PQt4kM5YYKy6DU+SPRDXH6MxUvqXpl/q8yQs04yw6xazF
+ST7Qve2X87NxOBQ4KzBTNjn1EiLpG52unShpTw24ev3XKJKo9soWVfXQz0rAQR2c
+ilzGE0Pd1pysPJyPe7F5aWorKdza84VSSHCJowIDAQABAoIBACp+nh4BB/VMz8Wd
+q7Q/EfLeQB1Q57JKpoqTBRwueSVai3ZXe4CMEi9/HkG6xiZtkiZ9njkZLq4hq9oB
+2z//kzMnwV2RsIRJxI6ohGy+wR51HD4BvEdlTPpY/Yabpqe92VyfSYxidKZWaU0O
+QMED1EODOw4ZQ+4928iPrJu//PMB4e7TFao0b9Fk/XLWtu5/tQZz9jsrlTi1zthh
+7n+oaGNhfTeIJJL4jrhTrKW1CLHXATtr9SJlfZ3wbMxQVeyj2wUlP1V0M6kBuhNj
+tbGbMpixD5iCNJ49Cm2PHg+wBOfS3ADGIpi3PcGw5mb8nB3N9eGBRPhLShAlq5Hi
+Lv4tyykCgYEA8u3b3xJ04pxWYN25ou/Sc8xzgDCK4XvDNdHVTuZDjLVA+VTVPzql
+lw7VvJArsx47MSPvsaX/+4hQXYtfnR7yJpx6QagvQ+z4ludnIZYrQwdUmb9pFL1s
+8UNj+3j9QFRPenIiIQ8qxxNIQ9w2HsVQ8scvc9CjYop/YYAPaQyHaL8CgYEA1ZSz
+CR4NcpfgRSILdhb1dLcyw5Qus1VOSAx3DYkhDkMiB8XZwgMdJjwehJo9yaqRCLE8
+Sw5znMnkfoZpu7+skrjK0FqmMpXMH9gIszHvFG8wSw/6+2HIWS19/wOu8dh95LuC
+0zurMk8rFqxgWMWF20afhgYrUz42cvUTo10FVB0CgYEAt7mW6W3PArfUSCxIwmb4
+VmXREKkl0ATHDYQl/Cb//YHzot467TgQll883QB4XF5HzBFurX9rSzO7/BN1e6I0
+52i+ubtWC9xD4fUetXMaQvZfUGxIL8xXgVxDWKQXfLiG54c8Mp6C7s6xf8kjEUCP
+yR1F0SSA/Pzb+8RbY0p7eocCgYA+1rs+SXtHZev0KyoYGnUpW+Uxqd17ofOgOxqj
+/t6c5Z+TjeCdtnDTGQkZlo/rT6XQWuUUaDIXxUbW+xEMzj4mBPyXBLS1WWFvVQ5q
+OpzO9E/PJeqAH6rkof/aEelc+oc/zvOU1o9uA+D3kMvgEm1psIOq2RHSMhGvDPA0
+NmAk+QKBgQCwd1681GagdIYSZUCBecnLtevXmIsJyDW2yR1NNcIe/ukcVQREMDvy
+5DDkhnGDgnV1D5gYcXb34g9vYvbfTnBMl/JXmMAAG1kIS+3pvHyN6f1poVe3yJV1
+yHVuvymnJxKnyaV0L3ntepVvV0vVNIkA3oauoUTLto6txBI+b/ImDA==
+-----END RSA PRIVATE KEY-----
diff --git a/tools/third_party/h2/examples/eventlet/eventlet-server.py b/tools/third_party/h2/examples/eventlet/eventlet-server.py
new file mode 100755
index 0000000..ff34e2f
--- /dev/null
+++ b/tools/third_party/h2/examples/eventlet/eventlet-server.py
@@ -0,0 +1,102 @@
+# -*- coding: utf-8 -*-
+"""
+eventlet-server.py
+~~~~~~~~~~~~~~~~~~
+
+A fully-functional HTTP/2 server written for Eventlet.
+"""
+import collections
+import json
+
+import eventlet
+
+from eventlet.green.OpenSSL import SSL, crypto
+from h2.config import H2Configuration
+from h2.connection import H2Connection
+from h2.events import RequestReceived, DataReceived
+
+
+class ConnectionManager(object):
+    """
+    An object that manages a single HTTP/2 connection.
+    """
+    def __init__(self, sock):
+        config = H2Configuration(client_side=False)
+        self.sock = sock
+        self.conn = H2Connection(config=config)
+
+    def run_forever(self):
+        self.conn.initiate_connection()
+        self.sock.sendall(self.conn.data_to_send())
+
+        while True:
+            data = self.sock.recv(65535)
+            if not data:
+                break
+
+            events = self.conn.receive_data(data)
+
+            for event in events:
+                if isinstance(event, RequestReceived):
+                    self.request_received(event.headers, event.stream_id)
+                elif isinstance(event, DataReceived):
+                    self.conn.reset_stream(event.stream_id)
+
+            self.sock.sendall(self.conn.data_to_send())
+
+    def request_received(self, headers, stream_id):
+        headers = collections.OrderedDict(headers)
+        data = json.dumps({'headers': headers}, indent=4).encode('utf-8')
+
+        response_headers = (
+            (':status', '200'),
+            ('content-type', 'application/json'),
+            ('content-length', len(data)),
+            ('server', 'eventlet-h2'),
+        )
+        self.conn.send_headers(stream_id, response_headers)
+        self.conn.send_data(stream_id, data, end_stream=True)
+
+
+def alpn_callback(conn, protos):
+    if b'h2' in protos:
+        return b'h2'
+
+    raise RuntimeError("No acceptable protocol offered!")
+
+
+def npn_advertise_cb(conn):
+    return [b'h2']
+
+
+# Let's set up SSL. This is a lot of work in PyOpenSSL.
+options = (
+    SSL.OP_NO_COMPRESSION |
+    SSL.OP_NO_SSLv2 |
+    SSL.OP_NO_SSLv3 |
+    SSL.OP_NO_TLSv1 |
+    SSL.OP_NO_TLSv1_1
+)
+context = SSL.Context(SSL.SSLv23_METHOD)
+context.set_options(options)
+context.set_verify(SSL.VERIFY_NONE, lambda *args: True)
+context.use_privatekey_file('server.key')
+context.use_certificate_file('server.crt')
+context.set_npn_advertise_callback(npn_advertise_cb)
+context.set_alpn_select_callback(alpn_callback)
+context.set_cipher_list(
+    "ECDHE+AESGCM"
+)
+context.set_tmp_ecdh(crypto.get_elliptic_curve(u'prime256v1'))
+
+server = eventlet.listen(('0.0.0.0', 443))
+server = SSL.Connection(context, server)
+pool = eventlet.GreenPool()
+
+while True:
+    try:
+        new_sock, _ = server.accept()
+        manager = ConnectionManager(new_sock)
+        pool.spawn_n(manager.run_forever)
+    except (SystemExit, KeyboardInterrupt):
+        break
diff --git a/tools/third_party/h2/examples/eventlet/server.crt b/tools/third_party/h2/examples/eventlet/server.crt
new file mode 100755
index 0000000..bc8a4c0
--- /dev/null
+++ b/tools/third_party/h2/examples/eventlet/server.crt
@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDUjCCAjoCCQCQmNzzpQTCijANBgkqhkiG9w0BAQUFADBrMQswCQYDVQQGEwJH
+QjEPMA0GA1UECBMGTG9uZG9uMQ8wDQYDVQQHEwZMb25kb24xETAPBgNVBAoTCGh5
+cGVyLWgyMREwDwYDVQQLEwhoeXBleS1oMjEUMBIGA1UEAxMLZXhhbXBsZS5jb20w
+HhcNMTUwOTE2MjAyOTA0WhcNMTYwOTE1MjAyOTA0WjBrMQswCQYDVQQGEwJHQjEP
+MA0GA1UECBMGTG9uZG9uMQ8wDQYDVQQHEwZMb25kb24xETAPBgNVBAoTCGh5cGVy
+LWgyMREwDwYDVQQLEwhoeXBleS1oMjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wggEi
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC74ZeB4Jdb5cnC9KXXLJuzjwTg
+45q5EeShDYQe0TbKgreiUP6clU3BR0fFAVedN1q/LOuQ1HhvrDk1l4TfGF2bpCIq
+K+U9CnzcQknvdpyyVeOLtSsCjOPk4xydHwkQxwJvHVdtJx4CzDDqGbHNHCF/9gpQ
+lsa3JZW+tIZLK0XMEPFQ4XFXgegxTStO7kBBPaVIgG9Ooqc2MG4rjMNUpxa28WF1
+SyqWTICf2N8T/C+fPzbQLKCWrFrKUP7WQlOaqPNQL9bCDhSTPRTwQOc2/MzVZ9gT
+Xr0Z+JMTXwkSMKO52adE1pmKt00jJ1ecZBiJFyjx0X6hH+/59dLbG/7No+PzAgMB
+AAEwDQYJKoZIhvcNAQEFBQADggEBAG3UhOCa0EemL2iY+C+PR6CwEHQ+n7vkBzNz
+gKOG+Q39spyzqU1qJAzBxLTE81bIQbDg0R8kcLWHVH2y4zViRxZ0jHUFKMgjONW+
+Aj4evic/2Y/LxpLxCajECq/jeMHYrmQONszf9pbc0+exrQpgnwd8asfsM3d/FJS2
+5DIWryCKs/61m9vYL8icWx/9cnfPkBoNv1ER+V1L1TH3ARvABh406SBaeqLTm/kG
+MNuKytKWJsQbNlxzWHVgkKzVsBKvYj0uIEJpClIhbe6XNYRDy8T8mKXVWhJuxH4p
+/agmCG3nxO8aCrUK/EVmbWmVIfCH3t7jlwMX1nJ8MsRE7Ydnk8I=
+-----END CERTIFICATE-----
diff --git a/tools/third_party/h2/examples/eventlet/server.key b/tools/third_party/h2/examples/eventlet/server.key
new file mode 100755
index 0000000..11f9ea0
--- /dev/null
+++ b/tools/third_party/h2/examples/eventlet/server.key
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEAu+GXgeCXW+XJwvSl1yybs48E4OOauRHkoQ2EHtE2yoK3olD+
+nJVNwUdHxQFXnTdavyzrkNR4b6w5NZeE3xhdm6QiKivlPQp83EJJ73acslXji7Ur
+Aozj5OMcnR8JEMcCbx1XbSceAsww6hmxzRwhf/YKUJbGtyWVvrSGSytFzBDxUOFx
+V4HoMU0rTu5AQT2lSIBvTqKnNjBuK4zDVKcWtvFhdUsqlkyAn9jfE/wvnz820Cyg
+lqxaylD+1kJTmqjzUC/Wwg4Ukz0U8EDnNvzM1WfYE169GfiTE18JEjCjudmnRNaZ
+irdNIydXnGQYiRco8dF+oR/v+fXS2xv+zaPj8wIDAQABAoIBAQCsdq278+0c13d4
+tViSh4k5r1w8D9IUdp9XU2/nVgckqA9nOVAvbkJc3FC+P7gsQgbUHKj0XoVbhU1S
+q461t8kduPH/oiGhAcKR8WurHEdE0OC6ewhLJAeCMRQwCrAorXXHh7icIt9ClCuG
+iSWUcXEy5Cidx3oL3r1xvIbV85fzdDtE9RC1I/kMjAy63S47YGiqh5vYmJkCa8rG
+Dsd1sEMDPr63XJpqJj3uHRcPvySgXTa+ssTmUH8WJlPTjvDB5hnPz+lkk2JKVPNu
+8adzftZ6hSun+tsc4ZJp8XhGu/m/7MjxWh8MeupLHlXcOEsnj4uHQQsOM3zHojr3
+aDCZiC1pAoGBAOAhwe1ujoS2VJ5RXJ9KMs7eBER/02MDgWZjo54Jv/jFxPWGslKk
+QQceuTe+PruRm41nzvk3q4iZXt8pG0bvpgigN2epcVx/O2ouRsUWWBT0JrVlEzha
+TIvWjtZ5tSQExXgHL3VlM9+ka40l+NldLSPn25+prizaqhalWuvTpP23AoGBANaY
+VhEI6yhp0BBUSATEv9lRgkwx3EbcnXNXPQjDMOthsyfq7FxbdOBEK1rwSDyuE6Ij
+zQGcTOfdiur5Ttg0OQilTJIXJAlpoeecOQ9yGma08c5FMXVJJvcZUuWRZWg1ocQj
+/hx0WVE9NwOoKwTBERv8HX7vJOFRZyvgkJwFxoulAoGAe4m/1XoZrga9z2GzNs10
+AdgX7BW00x+MhH4pIiPnn1yK+nYa9jg4647Asnv3IfXZEnEEgRNxReKbi0+iDFBt
+aNW+lDGuHTi37AfD1EBDnpEQgO1MUcRb6rwBkTAWatsCaO00+HUmyX9cFLm4Vz7n
+caILyQ6CxZBlLgRIgDHxADMCgYEAtubsJGTHmZBmSCStpXLUWbOBLNQqfTM398DZ
+QoirP1PsUQ+IGUfSG/u+QCogR6fPEBkXeFHxsoY/Cvsm2lvYaKgK1VFn46Xm2vNq
+JuIH4pZCqp6LAv4weddZslT0a5eaowRSZ4o7PmTAaRuCXvD3VjTSJwhJFMo+90TV
+vEWn7gkCgYEAkk+unX9kYmKoUdLh22/tzQekBa8WqMxXDwzBCECTAs2GlpL/f73i
+zD15TnaNfLP6Q5RNb0N9tb0Gz1wSkwI1+jGAQLnh2K9X9cIVIqJn8Mf/KQa/wUDV
+Tb1j7FoGUEgX7vbsyWuTd8P76kNYyGqCss1XmbttcSolqpbIdlSUcO0=
+-----END RSA PRIVATE KEY-----
diff --git a/tools/third_party/h2/examples/fragments/client_https_setup_fragment.py b/tools/third_party/h2/examples/fragments/client_https_setup_fragment.py
new file mode 100755
index 0000000..9958c1b
--- /dev/null
+++ b/tools/third_party/h2/examples/fragments/client_https_setup_fragment.py
@@ -0,0 +1,110 @@
+# -*- coding: utf-8 -*-
+"""
+Client HTTPS Setup
+~~~~~~~~~~~~~~~~~~
+
+This example code fragment demonstrates how to set up a HTTP/2 client that
+negotiates HTTP/2 using NPN and ALPN. For the sake of maximum explanatory value
+this code uses the synchronous, low-level sockets API: however, if you're not
+using sockets directly (e.g. because you're using asyncio), you should focus on
+the set up required for the SSLContext object. For other concurrency libraries
+you may need to use other setup (e.g. for Twisted you'll need to use
+IProtocolNegotiationFactory).
+
+This code requires Python 3.5 or later.
+"""
+import h2.connection
+import socket
+import ssl
+
+
+def establish_tcp_connection():
+    """
+    This function establishes a client-side TCP connection. How it works isn't
+    very important to this example. For the purpose of this example we connect
+    to localhost.
+    """
+    return socket.create_connection(('localhost', 443))
+
+
+def get_http2_ssl_context():
+    """
+    This function creates an SSLContext object that is suitably configured for
+    HTTP/2. If you're working with Python TLS directly, you'll want to do the
+    exact same setup as this function does.
+    """
+    # Get the basic context from the standard library.
+    ctx = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH)
+
+    # RFC 7540 Section 9.2: Implementations of HTTP/2 MUST use TLS version 1.2
+    # or higher. Disable TLS 1.1 and lower.
+    ctx.options |= (
+        ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
+    )
+
+    # RFC 7540 Section 9.2.1: A deployment of HTTP/2 over TLS 1.2 MUST disable
+    # compression.
+    ctx.options |= ssl.OP_NO_COMPRESSION
+
+    # RFC 7540 Section 9.2.2: "deployments of HTTP/2 that use TLS 1.2 MUST
+    # support TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256". In practice, the
+    # blacklist defined in this section allows only the AES GCM and ChaCha20
+    # cipher suites with ephemeral key negotiation.
+    ctx.set_ciphers("ECDHE+AESGCM:ECDHE+CHACHA20:DHE+AESGCM:DHE+CHACHA20")
+
+    # We want to negotiate using NPN and ALPN. ALPN is mandatory, but NPN may
+    # be absent, so allow that. This setup allows for negotiation of HTTP/1.1.
+    ctx.set_alpn_protocols(["h2", "http/1.1"])
+
+    try:
+        ctx.set_npn_protocols(["h2", "http/1.1"])
+    except NotImplementedError:
+        pass
+
+    return ctx
+
+
+def negotiate_tls(tcp_conn, context):
+    """
+    Given an established TCP connection and a HTTP/2-appropriate TLS context,
+    this function:
+
+    1. wraps TLS around the TCP connection.
+    2. confirms that HTTP/2 was negotiated and, if it was not, throws an error.
+    """
+    # Note that SNI is mandatory for HTTP/2, so you *must* pass the
+    # server_hostname argument.
+    tls_conn = context.wrap_socket(tcp_conn, server_hostname='localhost')
+
+    # Always prefer the result from ALPN to that from NPN.
+    # You can only check what protocol was negotiated once the handshake is
+    # complete.
+    negotiated_protocol = tls_conn.selected_alpn_protocol()
+    if negotiated_protocol is None:
+        negotiated_protocol = tls_conn.selected_npn_protocol()
+
+    if negotiated_protocol != "h2":
+        raise RuntimeError("Didn't negotiate HTTP/2!")
+
+    return tls_conn
+
+
+def main():
+    # Step 1: Set up your TLS context.
+    context = get_http2_ssl_context()
+
+    # Step 2: Create a TCP connection.
+    connection = establish_tcp_connection()
+
+    # Step 3: Wrap the connection in TLS and validate that we negotiated HTTP/2
+    tls_connection = negotiate_tls(connection, context)
+
+    # Step 4: Create a client-side H2 connection.
+    http2_connection = h2.connection.H2Connection()
+
+    # Step 5: Initiate the connection
+    http2_connection.initiate_connection()
+    tls_connection.sendall(http2_connection.data_to_send())
+
+    # The TCP, TLS, and HTTP/2 handshakes are now complete. You can enter your
+    # main loop now.
diff --git a/tools/third_party/h2/examples/fragments/client_upgrade_fragment.py b/tools/third_party/h2/examples/fragments/client_upgrade_fragment.py
new file mode 100755
index 0000000..34ff298
--- /dev/null
+++ b/tools/third_party/h2/examples/fragments/client_upgrade_fragment.py
@@ -0,0 +1,103 @@
+# -*- coding: utf-8 -*-
+"""
+Client Plaintext Upgrade
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+This example code fragment demonstrates how to set up a HTTP/2 client that uses
+the plaintext HTTP Upgrade mechanism to negotiate HTTP/2 connectivity. For
+maximum explanatory value it uses the synchronous socket API that comes with
+the Python standard library. In product code you will want to use an actual
+HTTP/1.1 client if possible.
+
+This code requires Python 3.5 or later.
+"""
+import h2.connection
+import socket
+
+
+def establish_tcp_connection():
+    """
+    This function establishes a client-side TCP connection. How it works isn't
+    very important to this example. For the purpose of this example we connect
+    to localhost.
+    """
+    return socket.create_connection(('localhost', 80))
+
+
+def send_initial_request(connection, settings):
+    """
+    For the sake of this upgrade demonstration, we're going to issue a GET
+    request against the root of the site. In principle the best request to
+    issue for an upgrade is actually ``OPTIONS *``, but this is remarkably
+    poorly supported and can break in weird ways.
+    """
+    # Craft our initial request per RFC 7540 Section 3.2. This requires two
+    # special header fields: the Upgrade headre, and the HTTP2-Settings header.
+    # The value of the HTTP2-Settings header field comes from h2.
+    request = (
+        b"GET / HTTP/1.1\r\n" +
+        b"Host: localhost\r\n" +
+        b"Upgrade: h2c\r\n" +
+        b"HTTP2-Settings: " + settings + "\r\n"
+        b"\r\n"
+    )
+    connection.sendall(request)
+
+
+def get_upgrade_response(connection):
+    """
+    This function reads from the socket until the HTTP/1.1 end-of-headers
+    sequence (CRLFCRLF) is received. It then checks what the status code of the
+    response is.
+
+    This is not a substitute for proper HTTP/1.1 parsing, but it's good enough
+    for example purposes.
+    """
+    data = b''
+    while b'\r\n\r\n' not in data:
+        data += connection.recv(8192)
+
+    headers, rest = data.split(b'\r\n\r\n', 1)
+
+    # An upgrade response begins HTTP/1.1 101 Switching Protocols. Look for the
+    # code. In production code you should also check that the upgrade is to
+    # h2c, but here we know we only offered one upgrade so there's only one
+    # possible upgrade in use.
+    split_headers = headers.split()
+    if split_headers[1] != b'101':
+        raise RuntimeError("Not upgrading!")
+
+    # We don't care about the HTTP/1.1 data anymore, but we do care about
+    # any other data we read from the socket: this is going to be HTTP/2 data
+    # that must be passed to the H2Connection.
+    return rest
+
+
+def main():
+    """
+    The client upgrade flow.
+    """
+    # Step 1: Establish the TCP connecton.
+    connection = establish_tcp_connection()
+
+    # Step 2: Create H2 Connection object, put it in upgrade mode, and get the
+    # value of the HTTP2-Settings header we want to use.
+    h2_connection = h2.connection.H2Connection()
+    settings_header_value = h2_connection.initiate_upgrade_connection()
+
+    # Step 3: Send the initial HTTP/1.1 request with the upgrade fields.
+    send_initial_request(connection, settings_header_value)
+
+    # Step 4: Read the HTTP/1.1 response, look for 101 response.
+    extra_data = get_upgrade_response(connection)
+
+    # Step 5: Immediately send the pending HTTP/2 data.
+    connection.sendall(h2_connection.data_to_send())
+
+    # Step 6: Feed the body data to the connection.
+    events = connection.receive_data(extra_data)
+
+    # Now you can enter your main loop, beginning by processing the first set
+    # of events above. These events may include ResponseReceived, which will
+    # contain the response to the request we made in Step 3.
+    main_loop(events)
diff --git a/tools/third_party/h2/examples/fragments/server_https_setup_fragment.py b/tools/third_party/h2/examples/fragments/server_https_setup_fragment.py
new file mode 100755
index 0000000..14ec0ef
--- /dev/null
+++ b/tools/third_party/h2/examples/fragments/server_https_setup_fragment.py
@@ -0,0 +1,112 @@
+# -*- coding: utf-8 -*-
+"""
+Server HTTPS Setup
+~~~~~~~~~~~~~~~~~~
+
+This example code fragment demonstrates how to set up a HTTP/2 server that
+negotiates HTTP/2 using NPN and ALPN. For the sake of maximum explanatory value
+this code uses the synchronous, low-level sockets API: however, if you're not
+using sockets directly (e.g. because you're using asyncio), you should focus on
+the set up required for the SSLContext object. For other concurrency libraries
+you may need to use other setup (e.g. for Twisted you'll need to use
+IProtocolNegotiationFactory).
+
+This code requires Python 3.5 or later.
+"""
+import h2.config
+import h2.connection
+import socket
+import ssl
+
+
+def establish_tcp_connection():
+    """
+    This function establishes a server-side TCP connection. How it works isn't
+    very important to this example.
+    """
+    bind_socket = socket.socket()
+    bind_socket.bind(('', 443))
+    bind_socket.listen(5)
+    return bind_socket.accept()[0]
+
+
+def get_http2_ssl_context():
+    """
+    This function creates an SSLContext object that is suitably configured for
+    HTTP/2. If you're working with Python TLS directly, you'll want to do the
+    exact same setup as this function does.
+    """
+    # Get the basic context from the standard library.
+    ctx = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)
+
+    # RFC 7540 Section 9.2: Implementations of HTTP/2 MUST use TLS version 1.2
+    # or higher. Disable TLS 1.1 and lower.
+    ctx.options |= (
+        ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
+    )
+
+    # RFC 7540 Section 9.2.1: A deployment of HTTP/2 over TLS 1.2 MUST disable
+    # compression.
+    ctx.options |= ssl.OP_NO_COMPRESSION
+
+    # RFC 7540 Section 9.2.2: "deployments of HTTP/2 that use TLS 1.2 MUST
+    # support TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256". In practice, the
+    # blacklist defined in this section allows only the AES GCM and ChaCha20
+    # cipher suites with ephemeral key negotiation.
+    ctx.set_ciphers("ECDHE+AESGCM:ECDHE+CHACHA20:DHE+AESGCM:DHE+CHACHA20")
+
+    # We want to negotiate using NPN and ALPN. ALPN is mandatory, but NPN may
+    # be absent, so allow that. This setup allows for negotiation of HTTP/1.1.
+    ctx.set_alpn_protocols(["h2", "http/1.1"])
+
+    try:
+        ctx.set_npn_protocols(["h2", "http/1.1"])
+    except NotImplementedError:
+        pass
+
+    return ctx
+
+
+def negotiate_tls(tcp_conn, context):
+    """
+    Given an established TCP connection and a HTTP/2-appropriate TLS context,
+    this function:
+
+    1. wraps TLS around the TCP connection.
+    2. confirms that HTTP/2 was negotiated and, if it was not, throws an error.
+    """
+    tls_conn = context.wrap_socket(tcp_conn, server_side=True)
+
+    # Always prefer the result from ALPN to that from NPN.
+    # You can only check what protocol was negotiated once the handshake is
+    # complete.
+    negotiated_protocol = tls_conn.selected_alpn_protocol()
+    if negotiated_protocol is None:
+        negotiated_protocol = tls_conn.selected_npn_protocol()
+
+    if negotiated_protocol != "h2":
+        raise RuntimeError("Didn't negotiate HTTP/2!")
+
+    return tls_conn
+
+
+def main():
+    # Step 1: Set up your TLS context.
+    context = get_http2_ssl_context()
+
+    # Step 2: Receive a TCP connection.
+    connection = establish_tcp_connection()
+
+    # Step 3: Wrap the connection in TLS and validate that we negotiated HTTP/2
+    tls_connection = negotiate_tls(connection, context)
+
+    # Step 4: Create a server-side H2 connection.
+    config = h2.config.H2Configuration(client_side=False)
+    http2_connection = h2.connection.H2Connection(config=config)
+
+    # Step 5: Initiate the connection
+    http2_connection.initiate_connection()
+    tls_connection.sendall(http2_connection.data_to_send())
+
+    # The TCP, TLS, and HTTP/2 handshakes are now complete. You can enter your
+    # main loop now.
diff --git a/tools/third_party/h2/examples/fragments/server_upgrade_fragment.py b/tools/third_party/h2/examples/fragments/server_upgrade_fragment.py
new file mode 100755
index 0000000..5b77c05
--- /dev/null
+++ b/tools/third_party/h2/examples/fragments/server_upgrade_fragment.py
@@ -0,0 +1,100 @@
+# -*- coding: utf-8 -*-
+"""
+Server Plaintext Upgrade
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+This example code fragment demonstrates how to set up a HTTP/2 server that uses
+the plaintext HTTP Upgrade mechanism to negotiate HTTP/2 connectivity. For
+maximum explanatory value it uses the synchronous socket API that comes with
+the Python standard library. In product code you will want to use an actual
+HTTP/1.1 server library if possible.
+
+This code requires Python 3.5 or later.
+"""
+import h2.config
+import h2.connection
+import re
+import socket
+
+
+def establish_tcp_connection():
+    """
+    This function establishes a server-side TCP connection. How it works isn't
+    very important to this example.
+    """
+    bind_socket = socket.socket()
+    bind_socket.bind(('', 443))
+    bind_socket.listen(5)
+    return bind_socket.accept()[0]
+
+
+def receive_initial_request(connection):
+    """
+    We're going to receive a request. For the sake of this example, we're going
+    to assume that the first request has no body. If it doesn't have the
+    Upgrade: h2c header field and the HTTP2-Settings header field, we'll throw
+    errors.
+
+    In production code, you should use a proper HTTP/1.1 parser and actually
+    serve HTTP/1.1 requests!
+
+    Returns the value of the HTTP2-Settings header field.
+    """
+    data = b''
+    while not data.endswith(b'\r\n\r\n'):
+        data += connection.recv(8192)
+
+    match = re.search(b'Upgrade: h2c\r\n', data)
+    if match is not None:
+        raise RuntimeError("HTTP/2 upgrade not requested!")
+
+    # We need to look for the HTTP2-Settings header field. Again, in production
+    # code you shouldn't use regular expressions for this, but it's good enough
+    # for the example.
+    match = re.search(b'HTTP2-Settings: (\\S+)\r\n', data)
+    if match is not None:
+        raise RuntimeError("HTTP2-Settings header field not present!")
+
+    return match.group(1)
+
+
+def send_upgrade_response(connection):
+    """
+    This function writes the 101 Switching Protocols response.
+    """
+    response = (
+        b"HTTP/1.1 101 Switching Protocols\r\n"
+        b"Upgrade: h2c\r\n"
+        b"\r\n"
+    )
+    connection.sendall(response)
+
+
+def main():
+    """
+    The server upgrade flow.
+    """
+    # Step 1: Establish the TCP connecton.
+    connection = establish_tcp_connection()
+
+    # Step 2: Read the response. We expect this to request an upgrade.
+    settings_header_value = receive_initial_request(connection)
+
+    # Step 3: Create a H2Connection object in server mode, and pass it the
+    # value of the HTTP2-Settings header field.
+    config = h2.config.H2Configuration(client_side=False)
+    h2_connection = h2.connection.H2Connection(config=config)
+    h2_connection.initiate_upgrade_connection(
+        settings_header=settings_header_value
+    )
+
+    # Step 4: Send the 101 Switching Protocols response.
+    send_upgrade_response(connection)
+
+    # Step 5: Send pending HTTP/2 data.
+    connection.sendall(h2_connection.data_to_send())
+
+    # At this point, you can enter your main loop. The first step has to be to
+    # send the response to the initial HTTP/1.1 request you received on stream
+    # 1.
+    main_loop()
diff --git a/tools/third_party/h2/examples/tornado/server.crt b/tools/third_party/h2/examples/tornado/server.crt
new file mode 100755
index 0000000..bc8a4c0
--- /dev/null
+++ b/tools/third_party/h2/examples/tornado/server.crt
@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDUjCCAjoCCQCQmNzzpQTCijANBgkqhkiG9w0BAQUFADBrMQswCQYDVQQGEwJH
+QjEPMA0GA1UECBMGTG9uZG9uMQ8wDQYDVQQHEwZMb25kb24xETAPBgNVBAoTCGh5
+cGVyLWgyMREwDwYDVQQLEwhoeXBleS1oMjEUMBIGA1UEAxMLZXhhbXBsZS5jb20w
+HhcNMTUwOTE2MjAyOTA0WhcNMTYwOTE1MjAyOTA0WjBrMQswCQYDVQQGEwJHQjEP
+MA0GA1UECBMGTG9uZG9uMQ8wDQYDVQQHEwZMb25kb24xETAPBgNVBAoTCGh5cGVy
+LWgyMREwDwYDVQQLEwhoeXBleS1oMjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wggEi
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC74ZeB4Jdb5cnC9KXXLJuzjwTg
+45q5EeShDYQe0TbKgreiUP6clU3BR0fFAVedN1q/LOuQ1HhvrDk1l4TfGF2bpCIq
+K+U9CnzcQknvdpyyVeOLtSsCjOPk4xydHwkQxwJvHVdtJx4CzDDqGbHNHCF/9gpQ
+lsa3JZW+tIZLK0XMEPFQ4XFXgegxTStO7kBBPaVIgG9Ooqc2MG4rjMNUpxa28WF1
+SyqWTICf2N8T/C+fPzbQLKCWrFrKUP7WQlOaqPNQL9bCDhSTPRTwQOc2/MzVZ9gT
+Xr0Z+JMTXwkSMKO52adE1pmKt00jJ1ecZBiJFyjx0X6hH+/59dLbG/7No+PzAgMB
+AAEwDQYJKoZIhvcNAQEFBQADggEBAG3UhOCa0EemL2iY+C+PR6CwEHQ+n7vkBzNz
+gKOG+Q39spyzqU1qJAzBxLTE81bIQbDg0R8kcLWHVH2y4zViRxZ0jHUFKMgjONW+
+Aj4evic/2Y/LxpLxCajECq/jeMHYrmQONszf9pbc0+exrQpgnwd8asfsM3d/FJS2
+5DIWryCKs/61m9vYL8icWx/9cnfPkBoNv1ER+V1L1TH3ARvABh406SBaeqLTm/kG
+MNuKytKWJsQbNlxzWHVgkKzVsBKvYj0uIEJpClIhbe6XNYRDy8T8mKXVWhJuxH4p
+/agmCG3nxO8aCrUK/EVmbWmVIfCH3t7jlwMX1nJ8MsRE7Ydnk8I=
+-----END CERTIFICATE-----
diff --git a/tools/third_party/h2/examples/tornado/server.key b/tools/third_party/h2/examples/tornado/server.key
new file mode 100755
index 0000000..11f9ea0
--- /dev/null
+++ b/tools/third_party/h2/examples/tornado/server.key
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEAu+GXgeCXW+XJwvSl1yybs48E4OOauRHkoQ2EHtE2yoK3olD+
+nJVNwUdHxQFXnTdavyzrkNR4b6w5NZeE3xhdm6QiKivlPQp83EJJ73acslXji7Ur
+Aozj5OMcnR8JEMcCbx1XbSceAsww6hmxzRwhf/YKUJbGtyWVvrSGSytFzBDxUOFx
+V4HoMU0rTu5AQT2lSIBvTqKnNjBuK4zDVKcWtvFhdUsqlkyAn9jfE/wvnz820Cyg
+lqxaylD+1kJTmqjzUC/Wwg4Ukz0U8EDnNvzM1WfYE169GfiTE18JEjCjudmnRNaZ
+irdNIydXnGQYiRco8dF+oR/v+fXS2xv+zaPj8wIDAQABAoIBAQCsdq278+0c13d4
+tViSh4k5r1w8D9IUdp9XU2/nVgckqA9nOVAvbkJc3FC+P7gsQgbUHKj0XoVbhU1S
+q461t8kduPH/oiGhAcKR8WurHEdE0OC6ewhLJAeCMRQwCrAorXXHh7icIt9ClCuG
+iSWUcXEy5Cidx3oL3r1xvIbV85fzdDtE9RC1I/kMjAy63S47YGiqh5vYmJkCa8rG
+Dsd1sEMDPr63XJpqJj3uHRcPvySgXTa+ssTmUH8WJlPTjvDB5hnPz+lkk2JKVPNu
+8adzftZ6hSun+tsc4ZJp8XhGu/m/7MjxWh8MeupLHlXcOEsnj4uHQQsOM3zHojr3
+aDCZiC1pAoGBAOAhwe1ujoS2VJ5RXJ9KMs7eBER/02MDgWZjo54Jv/jFxPWGslKk
+QQceuTe+PruRm41nzvk3q4iZXt8pG0bvpgigN2epcVx/O2ouRsUWWBT0JrVlEzha
+TIvWjtZ5tSQExXgHL3VlM9+ka40l+NldLSPn25+prizaqhalWuvTpP23AoGBANaY
+VhEI6yhp0BBUSATEv9lRgkwx3EbcnXNXPQjDMOthsyfq7FxbdOBEK1rwSDyuE6Ij
+zQGcTOfdiur5Ttg0OQilTJIXJAlpoeecOQ9yGma08c5FMXVJJvcZUuWRZWg1ocQj
+/hx0WVE9NwOoKwTBERv8HX7vJOFRZyvgkJwFxoulAoGAe4m/1XoZrga9z2GzNs10
+AdgX7BW00x+MhH4pIiPnn1yK+nYa9jg4647Asnv3IfXZEnEEgRNxReKbi0+iDFBt
+aNW+lDGuHTi37AfD1EBDnpEQgO1MUcRb6rwBkTAWatsCaO00+HUmyX9cFLm4Vz7n
+caILyQ6CxZBlLgRIgDHxADMCgYEAtubsJGTHmZBmSCStpXLUWbOBLNQqfTM398DZ
+QoirP1PsUQ+IGUfSG/u+QCogR6fPEBkXeFHxsoY/Cvsm2lvYaKgK1VFn46Xm2vNq
+JuIH4pZCqp6LAv4weddZslT0a5eaowRSZ4o7PmTAaRuCXvD3VjTSJwhJFMo+90TV
+vEWn7gkCgYEAkk+unX9kYmKoUdLh22/tzQekBa8WqMxXDwzBCECTAs2GlpL/f73i
+zD15TnaNfLP6Q5RNb0N9tb0Gz1wSkwI1+jGAQLnh2K9X9cIVIqJn8Mf/KQa/wUDV
+Tb1j7FoGUEgX7vbsyWuTd8P76kNYyGqCss1XmbttcSolqpbIdlSUcO0=
+-----END RSA PRIVATE KEY-----
diff --git a/tools/third_party/h2/examples/tornado/tornado-server.py b/tools/third_party/h2/examples/tornado/tornado-server.py
new file mode 100755
index 0000000..e7d08ab
--- /dev/null
+++ b/tools/third_party/h2/examples/tornado/tornado-server.py
@@ -0,0 +1,92 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+tornado-server.py
+~~~~~~~~~~~~~~~~~
+
+A fully-functional HTTP/2 server written for Tornado.
+"""
+import collections
+import json
+import ssl
+
+import tornado.gen
+import tornado.ioloop
+import tornado.iostream
+import tornado.tcpserver
+
+from h2.config import H2Configuration
+from h2.connection import H2Connection
+from h2.events import RequestReceived, DataReceived
+
+
+def create_ssl_context(certfile, keyfile):
+    ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
+    ssl_context.options |= (
+        ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_COMPRESSION
+    )
+    ssl_context.set_ciphers("ECDHE+AESGCM")
+    ssl_context.load_cert_chain(certfile=certfile, keyfile=keyfile)
+    ssl_context.set_alpn_protocols(["h2"])
+    return ssl_context
+
+
+class H2Server(tornado.tcpserver.TCPServer):
+
+    @tornado.gen.coroutine
+    def handle_stream(self, stream, address):
+        handler = EchoHeadersHandler(stream)
+        yield handler.handle()
+
+
+class EchoHeadersHandler(object):
+
+    def __init__(self, stream):
+        self.stream = stream
+
+        config = H2Configuration(client_side=False)
+        self.conn = H2Connection(config=config)
+
+    @tornado.gen.coroutine
+    def handle(self):
+        self.conn.initiate_connection()
+        yield self.stream.write(self.conn.data_to_send())
+
+        while True:
+            try:
+                data = yield self.stream.read_bytes(65535, partial=True)
+                if not data:
+                    break
+
+                events = self.conn.receive_data(data)
+                for event in events:
+                    if isinstance(event, RequestReceived):
+                        self.request_received(event.headers, event.stream_id)
+                    elif isinstance(event, DataReceived):
+                        self.conn.reset_stream(event.stream_id)
+
+                yield self.stream.write(self.conn.data_to_send())
+
+            except tornado.iostream.StreamClosedError:
+                break
+
+    def request_received(self, headers, stream_id):
+        headers = collections.OrderedDict(headers)
+        data = json.dumps({'headers': headers}, indent=4).encode('utf-8')
+
+        response_headers = (
+            (':status', '200'),
+            ('content-type', 'application/json'),
+            ('content-length', str(len(data))),
+            ('server', 'tornado-h2'),
+        )
+        self.conn.send_headers(stream_id, response_headers)
+        self.conn.send_data(stream_id, data, end_stream=True)
+
+
+if __name__ == '__main__':
+    ssl_context = create_ssl_context('server.crt', 'server.key')
+    server = H2Server(ssl_options=ssl_context)
+    server.listen(8888)
+    io_loop = tornado.ioloop.IOLoop.current()
+    io_loop.start()
diff --git a/tools/third_party/h2/examples/twisted/head_request.py b/tools/third_party/h2/examples/twisted/head_request.py
new file mode 100755
index 0000000..72fe5f8
--- /dev/null
+++ b/tools/third_party/h2/examples/twisted/head_request.py
@@ -0,0 +1,111 @@
+# -*- coding: utf-8 -*-
+"""
+head_request.py
+~~~~~~~~~~~~~~~
+
+A short example that demonstrates a client that makes HEAD requests to certain
+websites.
+
+This example is intended as a reproduction of nghttp2 issue 396, for the
+purposes of compatibility testing.
+"""
+from __future__ import print_function
+
+from twisted.internet import reactor
+from twisted.internet.endpoints import connectProtocol, SSL4ClientEndpoint
+from twisted.internet.protocol import Protocol
+from twisted.internet.ssl import optionsForClientTLS
+from hyperframe.frame import SettingsFrame
+from h2.connection import H2Connection
+from h2.events import (
+    ResponseReceived, DataReceived, StreamEnded,
+    StreamReset, SettingsAcknowledged,
+)
+
+
+AUTHORITY = u'http2bin.org'
+PATH = '/'
+SIZE = 4096
+
+
+class H2Protocol(Protocol):
+    def __init__(self):
+        self.conn = H2Connection()
+        self.known_proto = None
+        self.request_made = False
+
+    def connectionMade(self):
+        self.conn.initiate_connection()
+
+        # This reproduces the error in #396, by changing the header table size.
+        self.conn.update_settings({SettingsFrame.HEADER_TABLE_SIZE: SIZE})
+
+        self.transport.write(self.conn.data_to_send())
+
+    def dataReceived(self, data):
+        if not self.known_proto:
+            self.known_proto = self.transport.negotiatedProtocol
+            assert self.known_proto == b'h2'
+
+        events = self.conn.receive_data(data)
+
+        for event in events:
+            if isinstance(event, ResponseReceived):
+                self.handleResponse(event.headers, event.stream_id)
+            elif isinstance(event, DataReceived):
+                self.handleData(event.data, event.stream_id)
+            elif isinstance(event, StreamEnded):
+                self.endStream(event.stream_id)
+            elif isinstance(event, SettingsAcknowledged):
+                self.settingsAcked(event)
+            elif isinstance(event, StreamReset):
+                reactor.stop()
+                raise RuntimeError("Stream reset: %d" % event.error_code)
+            else:
+                print(event)
+
+        data = self.conn.data_to_send()
+        if data:
+            self.transport.write(data)
+
+    def settingsAcked(self, event):
+        # Having received the remote settings change, lets send our request.
+        if not self.request_made:
+            self.sendRequest()
+
+    def handleResponse(self, response_headers, stream_id):
+        for name, value in response_headers:
+            print("%s: %s" % (name.decode('utf-8'), value.decode('utf-8')))
+
+        print("")
+
+    def handleData(self, data, stream_id):
+        print(data, end='')
+
+    def endStream(self, stream_id):
+        self.conn.close_connection()
+        self.transport.write(self.conn.data_to_send())
+        self.transport.loseConnection()
+        reactor.stop()
+
+    def sendRequest(self):
+        request_headers = [
+            (':method', 'HEAD'),
+            (':authority', AUTHORITY),
+            (':scheme', 'https'),
+            (':path', PATH),
+            ('user-agent', 'hyper-h2/1.0.0'),
+        ]
+        self.conn.send_headers(1, request_headers, end_stream=True)
+        self.request_made = True
+
+options = optionsForClientTLS(
+    hostname=AUTHORITY,
+    acceptableProtocols=[b'h2'],
+)
+
+connectProtocol(
+    SSL4ClientEndpoint(reactor, AUTHORITY, 443, options),
+    H2Protocol()
+)
+reactor.run()
diff --git a/tools/third_party/h2/examples/twisted/post_request.py b/tools/third_party/h2/examples/twisted/post_request.py
new file mode 100755
index 0000000..746f132
--- /dev/null
+++ b/tools/third_party/h2/examples/twisted/post_request.py
@@ -0,0 +1,249 @@
+# -*- coding: utf-8 -*-
+"""
+post_request.py
+~~~~~~~~~~~~~~~
+
+A short example that demonstrates a client that makes POST requests to certain
+websites.
+
+This example is intended to demonstrate how to handle uploading request bodies.
+In this instance, a file will be uploaded. In order to handle arbitrary files,
+this example also demonstrates how to obey HTTP/2 flow control rules.
+
+Takes one command-line argument: a path to a file in the filesystem to upload.
+If none is present, uploads this file.
+"""
+from __future__ import print_function
+
+import mimetypes
+import os
+import sys
+
+from twisted.internet import reactor, defer
+from twisted.internet.endpoints import connectProtocol, SSL4ClientEndpoint
+from twisted.internet.protocol import Protocol
+from twisted.internet.ssl import optionsForClientTLS
+from h2.connection import H2Connection
+from h2.events import (
+    ResponseReceived, DataReceived, StreamEnded, StreamReset, WindowUpdated,
+    SettingsAcknowledged,
+)
+
+
+AUTHORITY = u'http2bin.org'
+PATH = '/post'
+
+
+class H2Protocol(Protocol):
+    def __init__(self, file_path):
+        self.conn = H2Connection()
+        self.known_proto = None
+        self.request_made = False
+        self.request_complete = False
+        self.file_path = file_path
+        self.flow_control_deferred = None
+        self.fileobj = None
+        self.file_size = None
+
+    def connectionMade(self):
+        """
+        Called by Twisted when the TCP connection is established. We can start
+        sending some data now: we should open with the connection preamble.
+        """
+        self.conn.initiate_connection()
+        self.transport.write(self.conn.data_to_send())
+
+    def dataReceived(self, data):
+        """
+        Called by Twisted when data is received on the connection.
+
+        We need to check a few things here. Firstly, we want to validate that
+        we actually negotiated HTTP/2: if we didn't, we shouldn't proceed!
+
+        Then, we want to pass the data to the protocol stack and check what
+        events occurred.
+        """
+        if not self.known_proto:
+            self.known_proto = self.transport.negotiatedProtocol
+            assert self.known_proto == b'h2'
+
+        events = self.conn.receive_data(data)
+
+        for event in events:
+            if isinstance(event, ResponseReceived):
+                self.handleResponse(event.headers)
+            elif isinstance(event, DataReceived):
+                self.handleData(event.data)
+            elif isinstance(event, StreamEnded):
+                self.endStream()
+            elif isinstance(event, SettingsAcknowledged):
+                self.settingsAcked(event)
+            elif isinstance(event, StreamReset):
+                reactor.stop()
+                raise RuntimeError("Stream reset: %d" % event.error_code)
+            elif isinstance(event, WindowUpdated):
+                self.windowUpdated(event)
+
+        data = self.conn.data_to_send()
+        if data:
+            self.transport.write(data)
+
+    def settingsAcked(self, event):
+        """
+        Called when the remote party ACKs our settings. We send a SETTINGS
+        frame as part of the preamble, so if we want to be very polite we can
+        wait until the ACK for that frame comes before we start sending our
+        request.
+        """
+        if not self.request_made:
+            self.sendRequest()
+
+    def handleResponse(self, response_headers):
+        """
+        Handle the response by printing the response headers.
+        """
+        for name, value in response_headers:
+            print("%s: %s" % (name.decode('utf-8'), value.decode('utf-8')))
+
+        print("")
+
+    def handleData(self, data):
+        """
+        We handle data that's received by just printing it.
+        """
+        print(data, end='')
+
+    def endStream(self):
+        """
+        We call this when the stream is cleanly ended by the remote peer. That
+        means that the response is complete.
+
+        Because this code only makes a single HTTP/2 request, once we receive
+        the complete response we can safely tear the connection down and stop
+        the reactor. We do that as cleanly as possible.
+        """
+        self.request_complete = True
+        self.conn.close_connection()
+        self.transport.write(self.conn.data_to_send())
+        self.transport.loseConnection()
+
+    def windowUpdated(self, event):
+        """
+        We call this when the flow control window for the connection or the
+        stream has been widened. If there's a flow control deferred present
+        (that is, if we're blocked behind the flow control), we fire it.
+        Otherwise, we do nothing.
+        """
+        if self.flow_control_deferred is None:
+            return
+
+        # Make sure we remove the flow control deferred to avoid firing it
+        # more than once.
+        flow_control_deferred = self.flow_control_deferred
+        self.flow_control_deferred = None
+        flow_control_deferred.callback(None)
+
+    def connectionLost(self, reason=None):
+        """
+        Called by Twisted when the connection is gone. Regardless of whether
+        it was clean or not, we want to stop the reactor.
+        """
+        if self.fileobj is not None:
+            self.fileobj.close()
+
+        if reactor.running:
+            reactor.stop()
+
+    def sendRequest(self):
+        """
+        Send the POST request.
+
+        A POST request is made up of one headers frame, and then 0+ data
+        frames. This method begins by sending the headers, and then starts a
+        series of calls to send data.
+        """
+        # First, we need to work out how large the file is.
+        self.file_size = os.stat(self.file_path).st_size
+
+        # Next, we want to guess a content-type and content-encoding.
+        content_type, content_encoding = mimetypes.guess_type(self.file_path)
+
+        # Now we can build a header block.
+        request_headers = [
+            (':method', 'POST'),
+            (':authority', AUTHORITY),
+            (':scheme', 'https'),
+            (':path', PATH),
+            ('user-agent', 'hyper-h2/1.0.0'),
+            ('content-length', str(self.file_size)),
+        ]
+
+        if content_type is not None:
+            request_headers.append(('content-type', content_type))
+
+            if content_encoding is not None:
+                request_headers.append(('content-encoding', content_encoding))
+
+        self.conn.send_headers(1, request_headers)
+        self.request_made = True
+
+        # We can now open the file.
+        self.fileobj = open(self.file_path, 'rb')
+
+        # We now need to send all the relevant data. We do this by checking
+        # what the acceptable amount of data is to send, and sending it. If we
+        # find ourselves blocked behind flow control, we then place a deferred
+        # and wait until that deferred fires.
+        self.sendFileData()
+
+    def sendFileData(self):
+        """
+        Send some file data on the connection.
+        """
+        # Firstly, check what the flow control window is for stream 1.
+        window_size = self.conn.local_flow_control_window(stream_id=1)
+
+        # Next, check what the maximum frame size is.
+        max_frame_size = self.conn.max_outbound_frame_size
+
+        # We will send no more than the window size or the remaining file size
+        # of data in this call, whichever is smaller.
+        bytes_to_send = min(window_size, self.file_size)
+
+        # We now need to send a number of data frames.
+        while bytes_to_send > 0:
+            chunk_size = min(bytes_to_send, max_frame_size)
+            data_chunk = self.fileobj.read(chunk_size)
+            self.conn.send_data(stream_id=1, data=data_chunk)
+
+            bytes_to_send -= chunk_size
+            self.file_size -= chunk_size
+
+        # We've prepared a whole chunk of data to send. If the file is fully
+        # sent, we also want to end the stream: we're done here.
+        if self.file_size == 0:
+            self.conn.end_stream(stream_id=1)
+        else:
+            # We've still got data left to send but the window is closed. Save
+            # a Deferred that will call us when the window gets opened.
+            self.flow_control_deferred = defer.Deferred()
+            self.flow_control_deferred.addCallback(self.sendFileData)
+
+        self.transport.write(self.conn.data_to_send())
+
+
+try:
+    filename = sys.argv[1]
+except IndexError:
+    filename = __file__
+
+options = optionsForClientTLS(
+    hostname=AUTHORITY,
+    acceptableProtocols=[b'h2'],
+)
+
+connectProtocol(
+    SSL4ClientEndpoint(reactor, AUTHORITY, 443, options),
+    H2Protocol(filename)
+)
+reactor.run()
diff --git a/tools/third_party/h2/examples/twisted/server.crt b/tools/third_party/h2/examples/twisted/server.crt
new file mode 100755
index 0000000..bc8a4c0
--- /dev/null
+++ b/tools/third_party/h2/examples/twisted/server.crt
@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDUjCCAjoCCQCQmNzzpQTCijANBgkqhkiG9w0BAQUFADBrMQswCQYDVQQGEwJH
+QjEPMA0GA1UECBMGTG9uZG9uMQ8wDQYDVQQHEwZMb25kb24xETAPBgNVBAoTCGh5
+cGVyLWgyMREwDwYDVQQLEwhoeXBleS1oMjEUMBIGA1UEAxMLZXhhbXBsZS5jb20w
+HhcNMTUwOTE2MjAyOTA0WhcNMTYwOTE1MjAyOTA0WjBrMQswCQYDVQQGEwJHQjEP
+MA0GA1UECBMGTG9uZG9uMQ8wDQYDVQQHEwZMb25kb24xETAPBgNVBAoTCGh5cGVy
+LWgyMREwDwYDVQQLEwhoeXBleS1oMjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wggEi
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC74ZeB4Jdb5cnC9KXXLJuzjwTg
+45q5EeShDYQe0TbKgreiUP6clU3BR0fFAVedN1q/LOuQ1HhvrDk1l4TfGF2bpCIq
+K+U9CnzcQknvdpyyVeOLtSsCjOPk4xydHwkQxwJvHVdtJx4CzDDqGbHNHCF/9gpQ
+lsa3JZW+tIZLK0XMEPFQ4XFXgegxTStO7kBBPaVIgG9Ooqc2MG4rjMNUpxa28WF1
+SyqWTICf2N8T/C+fPzbQLKCWrFrKUP7WQlOaqPNQL9bCDhSTPRTwQOc2/MzVZ9gT
+Xr0Z+JMTXwkSMKO52adE1pmKt00jJ1ecZBiJFyjx0X6hH+/59dLbG/7No+PzAgMB
+AAEwDQYJKoZIhvcNAQEFBQADggEBAG3UhOCa0EemL2iY+C+PR6CwEHQ+n7vkBzNz
+gKOG+Q39spyzqU1qJAzBxLTE81bIQbDg0R8kcLWHVH2y4zViRxZ0jHUFKMgjONW+
+Aj4evic/2Y/LxpLxCajECq/jeMHYrmQONszf9pbc0+exrQpgnwd8asfsM3d/FJS2
+5DIWryCKs/61m9vYL8icWx/9cnfPkBoNv1ER+V1L1TH3ARvABh406SBaeqLTm/kG
+MNuKytKWJsQbNlxzWHVgkKzVsBKvYj0uIEJpClIhbe6XNYRDy8T8mKXVWhJuxH4p
+/agmCG3nxO8aCrUK/EVmbWmVIfCH3t7jlwMX1nJ8MsRE7Ydnk8I=
+-----END CERTIFICATE-----
diff --git a/tools/third_party/h2/examples/twisted/server.csr b/tools/third_party/h2/examples/twisted/server.csr
new file mode 100755
index 0000000..cadb53a
--- /dev/null
+++ b/tools/third_party/h2/examples/twisted/server.csr
@@ -0,0 +1,17 @@
+-----BEGIN CERTIFICATE REQUEST-----
+MIICsDCCAZgCAQAwazELMAkGA1UEBhMCR0IxDzANBgNVBAgTBkxvbmRvbjEPMA0G
+A1UEBxMGTG9uZG9uMREwDwYDVQQKEwhoeXBlci1oMjERMA8GA1UECxMIaHlwZXkt
+aDIxFDASBgNVBAMTC2V4YW1wbGUuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+MIIBCgKCAQEAu+GXgeCXW+XJwvSl1yybs48E4OOauRHkoQ2EHtE2yoK3olD+nJVN
+wUdHxQFXnTdavyzrkNR4b6w5NZeE3xhdm6QiKivlPQp83EJJ73acslXji7UrAozj
+5OMcnR8JEMcCbx1XbSceAsww6hmxzRwhf/YKUJbGtyWVvrSGSytFzBDxUOFxV4Ho
+MU0rTu5AQT2lSIBvTqKnNjBuK4zDVKcWtvFhdUsqlkyAn9jfE/wvnz820Cyglqxa
+ylD+1kJTmqjzUC/Wwg4Ukz0U8EDnNvzM1WfYE169GfiTE18JEjCjudmnRNaZirdN
+IydXnGQYiRco8dF+oR/v+fXS2xv+zaPj8wIDAQABoAAwDQYJKoZIhvcNAQEFBQAD
+ggEBACZpSoZWxHU5uagpM2Vinh2E7CXiMAlBc6NXhQMD/3fycr9sX4d/+y9Gy3bL
+OfEOHBPlQVGrt05aiTh7m5s3HQfsH8l3RfKpfzCfoqd2ESVwgB092bJwY9fBnkw/
+UzIHvSnlaKc78h+POUoATOb4faQ8P04wzJHzckbCDI8zRzBZTMVGuiWUopq7K5Ce
+VSesbqHHnW9ob/apigKNE0k7et/28NOXNEP90tTsz98yN3TP+Nv9puwvT9JZOOoG
+0PZIQKJIaZ1NZoNQHLN9gXz012XWa99cBE0qNiBUugXlNhXjkIIM8FIhDQOREB18
+0KDxEma+A0quyjnDMwPSoZsMca4=
+-----END CERTIFICATE REQUEST-----
diff --git a/tools/third_party/h2/examples/twisted/server.key b/tools/third_party/h2/examples/twisted/server.key
new file mode 100755
index 0000000..11f9ea0
--- /dev/null
+++ b/tools/third_party/h2/examples/twisted/server.key
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEAu+GXgeCXW+XJwvSl1yybs48E4OOauRHkoQ2EHtE2yoK3olD+
+nJVNwUdHxQFXnTdavyzrkNR4b6w5NZeE3xhdm6QiKivlPQp83EJJ73acslXji7Ur
+Aozj5OMcnR8JEMcCbx1XbSceAsww6hmxzRwhf/YKUJbGtyWVvrSGSytFzBDxUOFx
+V4HoMU0rTu5AQT2lSIBvTqKnNjBuK4zDVKcWtvFhdUsqlkyAn9jfE/wvnz820Cyg
+lqxaylD+1kJTmqjzUC/Wwg4Ukz0U8EDnNvzM1WfYE169GfiTE18JEjCjudmnRNaZ
+irdNIydXnGQYiRco8dF+oR/v+fXS2xv+zaPj8wIDAQABAoIBAQCsdq278+0c13d4
+tViSh4k5r1w8D9IUdp9XU2/nVgckqA9nOVAvbkJc3FC+P7gsQgbUHKj0XoVbhU1S
+q461t8kduPH/oiGhAcKR8WurHEdE0OC6ewhLJAeCMRQwCrAorXXHh7icIt9ClCuG
+iSWUcXEy5Cidx3oL3r1xvIbV85fzdDtE9RC1I/kMjAy63S47YGiqh5vYmJkCa8rG
+Dsd1sEMDPr63XJpqJj3uHRcPvySgXTa+ssTmUH8WJlPTjvDB5hnPz+lkk2JKVPNu
+8adzftZ6hSun+tsc4ZJp8XhGu/m/7MjxWh8MeupLHlXcOEsnj4uHQQsOM3zHojr3
+aDCZiC1pAoGBAOAhwe1ujoS2VJ5RXJ9KMs7eBER/02MDgWZjo54Jv/jFxPWGslKk
+QQceuTe+PruRm41nzvk3q4iZXt8pG0bvpgigN2epcVx/O2ouRsUWWBT0JrVlEzha
+TIvWjtZ5tSQExXgHL3VlM9+ka40l+NldLSPn25+prizaqhalWuvTpP23AoGBANaY
+VhEI6yhp0BBUSATEv9lRgkwx3EbcnXNXPQjDMOthsyfq7FxbdOBEK1rwSDyuE6Ij
+zQGcTOfdiur5Ttg0OQilTJIXJAlpoeecOQ9yGma08c5FMXVJJvcZUuWRZWg1ocQj
+/hx0WVE9NwOoKwTBERv8HX7vJOFRZyvgkJwFxoulAoGAe4m/1XoZrga9z2GzNs10
+AdgX7BW00x+MhH4pIiPnn1yK+nYa9jg4647Asnv3IfXZEnEEgRNxReKbi0+iDFBt
+aNW+lDGuHTi37AfD1EBDnpEQgO1MUcRb6rwBkTAWatsCaO00+HUmyX9cFLm4Vz7n
+caILyQ6CxZBlLgRIgDHxADMCgYEAtubsJGTHmZBmSCStpXLUWbOBLNQqfTM398DZ
+QoirP1PsUQ+IGUfSG/u+QCogR6fPEBkXeFHxsoY/Cvsm2lvYaKgK1VFn46Xm2vNq
+JuIH4pZCqp6LAv4weddZslT0a5eaowRSZ4o7PmTAaRuCXvD3VjTSJwhJFMo+90TV
+vEWn7gkCgYEAkk+unX9kYmKoUdLh22/tzQekBa8WqMxXDwzBCECTAs2GlpL/f73i
+zD15TnaNfLP6Q5RNb0N9tb0Gz1wSkwI1+jGAQLnh2K9X9cIVIqJn8Mf/KQa/wUDV
+Tb1j7FoGUEgX7vbsyWuTd8P76kNYyGqCss1XmbttcSolqpbIdlSUcO0=
+-----END RSA PRIVATE KEY-----
diff --git a/tools/third_party/h2/examples/twisted/twisted-server.py b/tools/third_party/h2/examples/twisted/twisted-server.py
new file mode 100755
index 0000000..7cde609
--- /dev/null
+++ b/tools/third_party/h2/examples/twisted/twisted-server.py
@@ -0,0 +1,182 @@
+# -*- coding: utf-8 -*-
+"""
+twisted-server.py
+~~~~~~~~~~~~~~~~~
+
+A fully-functional HTTP/2 server written for Twisted.
+"""
+import functools
+import mimetypes
+import os
+import os.path
+import sys
+
+from OpenSSL import crypto
+from twisted.internet.defer import Deferred, inlineCallbacks
+from twisted.internet.protocol import Protocol, Factory
+from twisted.internet import endpoints, reactor, ssl
+from h2.config import H2Configuration
+from h2.connection import H2Connection
+from h2.events import (
+    RequestReceived, DataReceived, WindowUpdated
+)
+
+
+def close_file(file, d):
+    file.close()
+
+
+READ_CHUNK_SIZE = 8192
+
+
+class H2Protocol(Protocol):
+    def __init__(self, root):
+        config = H2Configuration(client_side=False)
+        self.conn = H2Connection(config=config)
+        self.known_proto = None
+        self.root = root
+
+        self._flow_control_deferreds = {}
+
+    def connectionMade(self):
+        self.conn.initiate_connection()
+        self.transport.write(self.conn.data_to_send())
+
+    def dataReceived(self, data):
+        if not self.known_proto:
+            self.known_proto = True
+
+        events = self.conn.receive_data(data)
+        if self.conn.data_to_send:
+            self.transport.write(self.conn.data_to_send())
+
+        for event in events:
+            if isinstance(event, RequestReceived):
+                self.requestReceived(event.headers, event.stream_id)
+            elif isinstance(event, DataReceived):
+                self.dataFrameReceived(event.stream_id)
+            elif isinstance(event, WindowUpdated):
+                self.windowUpdated(event)
+
+    def requestReceived(self, headers, stream_id):
+        headers = dict(headers)  # Invalid conversion, fix later.
+        assert headers[b':method'] == b'GET'
+
+        path = headers[b':path'].lstrip(b'/')
+        full_path = os.path.join(self.root, path)
+
+        if not os.path.exists(full_path):
+            response_headers = (
+                (':status', '404'),
+                ('content-length', '0'),
+                ('server', 'twisted-h2'),
+            )
+            self.conn.send_headers(
+                stream_id, response_headers, end_stream=True
+            )
+            self.transport.write(self.conn.data_to_send())
+        else:
+            self.sendFile(full_path, stream_id)
+
+        return
+
+    def dataFrameReceived(self, stream_id):
+        self.conn.reset_stream(stream_id)
+        self.transport.write(self.conn.data_to_send())
+
+    def sendFile(self, file_path, stream_id):
+        filesize = os.stat(file_path).st_size
+        content_type, content_encoding = mimetypes.guess_type(file_path)
+        response_headers = [
+            (':status', '200'),
+            ('content-length', str(filesize)),
+            ('server', 'twisted-h2'),
+        ]
+        if content_type:
+            response_headers.append(('content-type', content_type))
+        if content_encoding:
+            response_headers.append(('content-encoding', content_encoding))
+
+        self.conn.send_headers(stream_id, response_headers)
+        self.transport.write(self.conn.data_to_send())
+
+        f = open(file_path, 'rb')
+        d = self._send_file(f, stream_id)
+        d.addErrback(functools.partial(close_file, f))
+
+    def windowUpdated(self, event):
+        """
+        Handle a WindowUpdated event by firing any waiting data sending
+        callbacks.
+        """
+        stream_id = event.stream_id
+
+        if stream_id and stream_id in self._flow_control_deferreds:
+            d = self._flow_control_deferreds.pop(stream_id)
+            d.callback(event.delta)
+        elif not stream_id:
+            for d in self._flow_control_deferreds.values():
+                d.callback(event.delta)
+
+            self._flow_control_deferreds = {}
+
+        return
+
+    @inlineCallbacks
+    def _send_file(self, file, stream_id):
+        """
+        This callback sends more data for a given file on the stream.
+        """
+        keep_reading = True
+        while keep_reading:
+            while not self.conn.remote_flow_control_window(stream_id):
+                yield self.wait_for_flow_control(stream_id)
+
+            chunk_size = min(
+                self.conn.remote_flow_control_window(stream_id), READ_CHUNK_SIZE
+            )
+            data = file.read(chunk_size)
+            keep_reading = len(data) == chunk_size
+            self.conn.send_data(stream_id, data, not keep_reading)
+            self.transport.write(self.conn.data_to_send())
+
+            if not keep_reading:
+                break
+
+        file.close()
+
+    def wait_for_flow_control(self, stream_id):
+        """
+        Returns a Deferred that fires when the flow control window is opened.
+        """
+        d = Deferred()
+        self._flow_control_deferreds[stream_id] = d
+        return d
+
+
+class H2Factory(Factory):
+    def __init__(self, root):
+        self.root = root
+
+    def buildProtocol(self, addr):
+        return H2Protocol(self.root)
+
+
+root = sys.argv[1]
+
+with open('server.crt', 'r') as f:
+    cert_data = f.read()
+with open('server.key', 'r') as f:
+    key_data = f.read()
+
+cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_data)
+key = crypto.load_privatekey(crypto.FILETYPE_PEM, key_data)
+options = ssl.CertificateOptions(
+    privateKey=key,
+    certificate=cert,
+    acceptableProtocols=[b'h2'],
+)
+
+endpoint = endpoints.SSL4ServerEndpoint(reactor, 8080, options, backlog=128)
+endpoint.listen(H2Factory(root))
+reactor.run()
diff --git a/tools/third_party/h2/h2/__init__.py b/tools/third_party/h2/h2/__init__.py
new file mode 100755
index 0000000..38805dd
--- /dev/null
+++ b/tools/third_party/h2/h2/__init__.py
@@ -0,0 +1,8 @@
+# -*- coding: utf-8 -*-
+"""
+h2
+~~
+
+A HTTP/2 implementation.
+"""
+__version__ = '3.0.1'
diff --git a/tools/third_party/h2/h2/config.py b/tools/third_party/h2/h2/config.py
new file mode 100755
index 0000000..08129a4
--- /dev/null
+++ b/tools/third_party/h2/h2/config.py
@@ -0,0 +1,164 @@
+# -*- coding: utf-8 -*-
+"""
+h2/config
+~~~~~~~~~
+
+Objects for controlling the configuration of the HTTP/2 stack.
+"""
+
+
+class _BooleanConfigOption(object):
+    """
+    Descriptor for handling a boolean config option.  This will block
+    attempts to set boolean config options to non-bools.
+    """
+    def __init__(self, name):
+        self.name = name
+        self.attr_name = '_%s' % self.name
+
+    def __get__(self, instance, owner):
+        return getattr(instance, self.attr_name)
+
+    def __set__(self, instance, value):
+        if not isinstance(value, bool):
+            raise ValueError("%s must be a bool" % self.name)
+        setattr(instance, self.attr_name, value)
+
+
+class DummyLogger(object):
+    """
+    An Logger object that does not actual logging, hence a DummyLogger.
+
+    For the class the log operation is merely a no-op.  The intent is to avoid
+    conditionals being sprinkled throughout the hyper-h2 code for calls to
+    logging functions when no logger is passed into the corresponding object.
+    """
+    def __init__(self, *vargs):
+        pass
+
+    def debug(self, *vargs, **kwargs):
+        """
+        No-op logging. Only level needed for now.
+        """
+        pass
+
+
+class H2Configuration(object):
+    """
+    An object that controls the way a single HTTP/2 connection behaves.
+
+    This object allows the users to customize behaviour. In particular, it
+    allows users to enable or disable optional features, or to otherwise handle
+    various unusual behaviours.
+
+    This object has very little behaviour of its own: it mostly just ensures
+    that configuration is self-consistent.
+
+    :param client_side: Whether this object is to be used on the client side of
+        a connection, or on the server side. Affects the logic used by the
+        state machine, the default settings values, the allowable stream IDs,
+        and several other properties. Defaults to ``True``.
+    :type client_side: ``bool``
+
+    :param header_encoding: Controls whether the headers emitted by this object
+        in events are transparently decoded to ``unicode`` strings, and what
+        encoding is used to do that decoding. This defaults to ``None``,
+        meaning that headers will be returned as bytes. To automatically
+        decode headers (that is, to return them as unicode strings), this can
+        be set to the string name of any encoding, e.g. ``'utf-8'``.
+
+        .. versionchanged:: 3.0.0
+           Changed default value from ``'utf-8'`` to ``None``
+
+    :type header_encoding: ``str``, ``False``, or ``None``
+
+    :param validate_outbound_headers: Controls whether the headers emitted
+        by this object are validated against the rules in RFC 7540.
+        Disabling this setting will cause outbound header validation to
+        be skipped, and allow the object to emit headers that may be illegal
+        according to RFC 7540. Defaults to ``True``.
+    :type validate_outbound_headers: ``bool``
+
+    :param normalize_outbound_headers: Controls whether the headers emitted
+        by this object are normalized before sending.  Disabling this setting
+        will cause outbound header normalization to be skipped, and allow
+        the object to emit headers that may be illegal according to
+        RFC 7540. Defaults to ``True``.
+    :type normalize_outbound_headers: ``bool``
+
+    :param validate_inbound_headers: Controls whether the headers received
+        by this object are validated against the rules in RFC 7540.
+        Disabling this setting will cause inbound header validation to
+        be skipped, and allow the object to receive headers that may be illegal
+        according to RFC 7540. Defaults to ``True``.
+    :type validate_inbound_headers: ``bool``
+
+    :param normalize_inbound_headers: Controls whether the headers received by
+        this object are normalized according to the rules of RFC 7540.
+        Disabling this setting may lead to hyper-h2 emitting header blocks that
+        some RFCs forbid, e.g. with multiple cookie fields.
+
+        .. versionadded:: 3.0.0
+
+    :type normalize_inbound_headers: ``bool``
+
+    :param logger: A logger that conforms to the requirements for this module,
+        those being no I/O and no context switches, which is needed in order
+        to run in asynchronous operation.
+
+        .. versionadded:: 2.6.0
+
+    :type logger: ``logging.Logger``
+    """
+    client_side = _BooleanConfigOption('client_side')
+    validate_outbound_headers = _BooleanConfigOption(
+        'validate_outbound_headers'
+    )
+    normalize_outbound_headers = _BooleanConfigOption(
+        'normalize_outbound_headers'
+    )
+    validate_inbound_headers = _BooleanConfigOption(
+        'validate_inbound_headers'
+    )
+    normalize_inbound_headers = _BooleanConfigOption(
+        'normalize_inbound_headers'
+    )
+
+    def __init__(self,
+                 client_side=True,
+                 header_encoding=None,
+                 validate_outbound_headers=True,
+                 normalize_outbound_headers=True,
+                 validate_inbound_headers=True,
+                 normalize_inbound_headers=True,
+                 logger=None):
+        self.client_side = client_side
+        self.header_encoding = header_encoding
+        self.validate_outbound_headers = validate_outbound_headers
+        self.normalize_outbound_headers = normalize_outbound_headers
+        self.validate_inbound_headers = validate_inbound_headers
+        self.normalize_inbound_headers = normalize_inbound_headers
+        self.logger = logger or DummyLogger(__name__)
+
+    @property
+    def header_encoding(self):
+        """
+        Controls whether the headers emitted by this object in events are
+        transparently decoded to ``unicode`` strings, and what encoding is used
+        to do that decoding. This defaults to ``None``, meaning that headers
+        will be returned as bytes. To automatically decode headers (that is, to
+        return them as unicode strings), this can be set to the string name of
+        any encoding, e.g. ``'utf-8'``.
+        """
+        return self._header_encoding
+
+    @header_encoding.setter
+    def header_encoding(self, value):
+        """
+        Enforces constraints on the value of header encoding.
+        """
+        if not isinstance(value, (bool, str, type(None))):
+            raise ValueError("header_encoding must be bool, string, or None")
+        if value is True:
+            raise ValueError("header_encoding cannot be True")
+        self._header_encoding = value
diff --git a/tools/third_party/h2/h2/connection.py b/tools/third_party/h2/h2/connection.py
new file mode 100755
index 0000000..4405183
--- /dev/null
+++ b/tools/third_party/h2/h2/connection.py
@@ -0,0 +1,1998 @@
+# -*- coding: utf-8 -*-
+"""
+h2/connection
+~~~~~~~~~~~~~
+
+An implementation of a HTTP/2 connection.
+"""
+import base64
+
+from enum import Enum, IntEnum
+
+from hyperframe.exceptions import InvalidPaddingError
+from hyperframe.frame import (
+    GoAwayFrame, WindowUpdateFrame, HeadersFrame, DataFrame, PingFrame,
+    PushPromiseFrame, SettingsFrame, RstStreamFrame, PriorityFrame,
+    ContinuationFrame, AltSvcFrame, ExtensionFrame
+)
+from hpack.hpack import Encoder, Decoder
+from hpack.exceptions import HPACKError, OversizedHeaderListError
+
+from .config import H2Configuration
+from .errors import ErrorCodes, _error_code_from_int
+from .events import (
+    WindowUpdated, RemoteSettingsChanged, PingAcknowledged,
+    SettingsAcknowledged, ConnectionTerminated, PriorityUpdated,
+    AlternativeServiceAvailable, UnknownFrameReceived
+)
+from .exceptions import (
+    ProtocolError, NoSuchStreamError, FlowControlError, FrameTooLargeError,
+    TooManyStreamsError, StreamClosedError, StreamIDTooLowError,
+    NoAvailableStreamIDError, RFC1122Error, DenialOfServiceError
+)
+from .frame_buffer import FrameBuffer
+from .settings import Settings, SettingCodes
+from .stream import H2Stream, StreamClosedBy
+from .utilities import guard_increment_window
+from .windows import WindowManager
+
+
+class ConnectionState(Enum):
+    IDLE = 0
+    CLIENT_OPEN = 1
+    SERVER_OPEN = 2
+    CLOSED = 3
+
+
+class ConnectionInputs(Enum):
+    SEND_HEADERS = 0
+    SEND_PUSH_PROMISE = 1
+    SEND_DATA = 2
+    SEND_GOAWAY = 3
+    SEND_WINDOW_UPDATE = 4
+    SEND_PING = 5
+    SEND_SETTINGS = 6
+    SEND_RST_STREAM = 7
+    SEND_PRIORITY = 8
+    RECV_HEADERS = 9
+    RECV_PUSH_PROMISE = 10
+    RECV_DATA = 11
+    RECV_GOAWAY = 12
+    RECV_WINDOW_UPDATE = 13
+    RECV_PING = 14
+    RECV_SETTINGS = 15
+    RECV_RST_STREAM = 16
+    RECV_PRIORITY = 17
+    SEND_ALTERNATIVE_SERVICE = 18  # Added in 2.3.0
+    RECV_ALTERNATIVE_SERVICE = 19  # Added in 2.3.0
+
+
+class AllowedStreamIDs(IntEnum):
+    EVEN = 0
+    ODD = 1
+
+
+class H2ConnectionStateMachine(object):
+    """
+    A single HTTP/2 connection state machine.
+
+    This state machine, while defined in its own class, is logically part of
+    the H2Connection class also defined in this file. The state machine itself
+    maintains very little state directly, instead focusing entirely on managing
+    state transitions.
+    """
+    # For the purposes of this state machine we treat HEADERS and their
+    # associated CONTINUATION frames as a single jumbo frame. The protocol
+    # allows/requires this by preventing other frames from being interleved in
+    # between HEADERS/CONTINUATION frames.
+    #
+    # The _transitions dictionary contains a mapping of tuples of
+    # (state, input) to tuples of (side_effect_function, end_state). This map
+    # contains all allowed transitions: anything not in this map is invalid
+    # and immediately causes a transition to ``closed``.
+
+    _transitions = {
+        # State: idle
+        (ConnectionState.IDLE, ConnectionInputs.SEND_HEADERS):
+            (None, ConnectionState.CLIENT_OPEN),
+        (ConnectionState.IDLE, ConnectionInputs.RECV_HEADERS):
+            (None, ConnectionState.SERVER_OPEN),
+        (ConnectionState.IDLE, ConnectionInputs.SEND_SETTINGS):
+            (None, ConnectionState.IDLE),
+        (ConnectionState.IDLE, ConnectionInputs.RECV_SETTINGS):
+            (None, ConnectionState.IDLE),
+        (ConnectionState.IDLE, ConnectionInputs.SEND_WINDOW_UPDATE):
+            (None, ConnectionState.IDLE),
+        (ConnectionState.IDLE, ConnectionInputs.RECV_WINDOW_UPDATE):
+            (None, ConnectionState.IDLE),
+        (ConnectionState.IDLE, ConnectionInputs.SEND_PING):
+            (None, ConnectionState.IDLE),
+        (ConnectionState.IDLE, ConnectionInputs.RECV_PING):
+            (None, ConnectionState.IDLE),
+        (ConnectionState.IDLE, ConnectionInputs.SEND_GOAWAY):
+            (None, ConnectionState.CLOSED),
+        (ConnectionState.IDLE, ConnectionInputs.RECV_GOAWAY):
+            (None, ConnectionState.CLOSED),
+        (ConnectionState.IDLE, ConnectionInputs.SEND_PRIORITY):
+            (None, ConnectionState.IDLE),
+        (ConnectionState.IDLE, ConnectionInputs.RECV_PRIORITY):
+            (None, ConnectionState.IDLE),
+        (ConnectionState.IDLE, ConnectionInputs.SEND_ALTERNATIVE_SERVICE):
+            (None, ConnectionState.SERVER_OPEN),
+        (ConnectionState.IDLE, ConnectionInputs.RECV_ALTERNATIVE_SERVICE):
+            (None, ConnectionState.CLIENT_OPEN),
+
+        # State: open, client side.
+        (ConnectionState.CLIENT_OPEN, ConnectionInputs.SEND_HEADERS):
+            (None, ConnectionState.CLIENT_OPEN),
+        (ConnectionState.CLIENT_OPEN, ConnectionInputs.SEND_DATA):
+            (None, ConnectionState.CLIENT_OPEN),
+        (ConnectionState.CLIENT_OPEN, ConnectionInputs.SEND_GOAWAY):
+            (None, ConnectionState.CLOSED),
+        (ConnectionState.CLIENT_OPEN, ConnectionInputs.SEND_WINDOW_UPDATE):
+            (None, ConnectionState.CLIENT_OPEN),
+        (ConnectionState.CLIENT_OPEN, ConnectionInputs.SEND_PING):
+            (None, ConnectionState.CLIENT_OPEN),
+        (ConnectionState.CLIENT_OPEN, ConnectionInputs.SEND_SETTINGS):
+            (None, ConnectionState.CLIENT_OPEN),
+        (ConnectionState.CLIENT_OPEN, ConnectionInputs.SEND_PRIORITY):
+            (None, ConnectionState.CLIENT_OPEN),
+        (ConnectionState.CLIENT_OPEN, ConnectionInputs.RECV_HEADERS):
+            (None, ConnectionState.CLIENT_OPEN),
+        (ConnectionState.CLIENT_OPEN, ConnectionInputs.RECV_PUSH_PROMISE):
+            (None, ConnectionState.CLIENT_OPEN),
+        (ConnectionState.CLIENT_OPEN, ConnectionInputs.RECV_DATA):
+            (None, ConnectionState.CLIENT_OPEN),
+        (ConnectionState.CLIENT_OPEN, ConnectionInputs.RECV_GOAWAY):
+            (None, ConnectionState.CLOSED),
+        (ConnectionState.CLIENT_OPEN, ConnectionInputs.RECV_WINDOW_UPDATE):
+            (None, ConnectionState.CLIENT_OPEN),
+        (ConnectionState.CLIENT_OPEN, ConnectionInputs.RECV_PING):
+            (None, ConnectionState.CLIENT_OPEN),
+        (ConnectionState.CLIENT_OPEN, ConnectionInputs.RECV_SETTINGS):
+            (None, ConnectionState.CLIENT_OPEN),
+        (ConnectionState.CLIENT_OPEN, ConnectionInputs.SEND_RST_STREAM):
+            (None, ConnectionState.CLIENT_OPEN),
+        (ConnectionState.CLIENT_OPEN, ConnectionInputs.RECV_RST_STREAM):
+            (None, ConnectionState.CLIENT_OPEN),
+        (ConnectionState.CLIENT_OPEN, ConnectionInputs.RECV_PRIORITY):
+            (None, ConnectionState.CLIENT_OPEN),
+        (ConnectionState.CLIENT_OPEN,
+            ConnectionInputs.RECV_ALTERNATIVE_SERVICE):
+                (None, ConnectionState.CLIENT_OPEN),
+
+        # State: open, server side.
+        (ConnectionState.SERVER_OPEN, ConnectionInputs.SEND_HEADERS):
+            (None, ConnectionState.SERVER_OPEN),
+        (ConnectionState.SERVER_OPEN, ConnectionInputs.SEND_PUSH_PROMISE):
+            (None, ConnectionState.SERVER_OPEN),
+        (ConnectionState.SERVER_OPEN, ConnectionInputs.SEND_DATA):
+            (None, ConnectionState.SERVER_OPEN),
+        (ConnectionState.SERVER_OPEN, ConnectionInputs.SEND_GOAWAY):
+            (None, ConnectionState.CLOSED),
+        (ConnectionState.SERVER_OPEN, ConnectionInputs.SEND_WINDOW_UPDATE):
+            (None, ConnectionState.SERVER_OPEN),
+        (ConnectionState.SERVER_OPEN, ConnectionInputs.SEND_PING):
+            (None, ConnectionState.SERVER_OPEN),
+        (ConnectionState.SERVER_OPEN, ConnectionInputs.SEND_SETTINGS):
+            (None, ConnectionState.SERVER_OPEN),
+        (ConnectionState.SERVER_OPEN, ConnectionInputs.SEND_PRIORITY):
+            (None, ConnectionState.SERVER_OPEN),
+        (ConnectionState.SERVER_OPEN, ConnectionInputs.RECV_HEADERS):
+            (None, ConnectionState.SERVER_OPEN),
+        (ConnectionState.SERVER_OPEN, ConnectionInputs.RECV_DATA):
+            (None, ConnectionState.SERVER_OPEN),
+        (ConnectionState.SERVER_OPEN, ConnectionInputs.RECV_GOAWAY):
+            (None, ConnectionState.CLOSED),
+        (ConnectionState.SERVER_OPEN, ConnectionInputs.RECV_WINDOW_UPDATE):
+            (None, ConnectionState.SERVER_OPEN),
+        (ConnectionState.SERVER_OPEN, ConnectionInputs.RECV_PING):
+            (None, ConnectionState.SERVER_OPEN),
+        (ConnectionState.SERVER_OPEN, ConnectionInputs.RECV_SETTINGS):
+            (None, ConnectionState.SERVER_OPEN),
+        (ConnectionState.SERVER_OPEN, ConnectionInputs.RECV_PRIORITY):
+            (None, ConnectionState.SERVER_OPEN),
+        (ConnectionState.SERVER_OPEN, ConnectionInputs.SEND_RST_STREAM):
+            (None, ConnectionState.SERVER_OPEN),
+        (ConnectionState.SERVER_OPEN, ConnectionInputs.RECV_RST_STREAM):
+            (None, ConnectionState.SERVER_OPEN),
+        (ConnectionState.SERVER_OPEN,
+            ConnectionInputs.SEND_ALTERNATIVE_SERVICE):
+                (None, ConnectionState.SERVER_OPEN),
+        (ConnectionState.SERVER_OPEN,
+            ConnectionInputs.RECV_ALTERNATIVE_SERVICE):
+                (None, ConnectionState.SERVER_OPEN),
+
+        # State: closed
+        (ConnectionState.CLOSED, ConnectionInputs.SEND_GOAWAY):
+            (None, ConnectionState.CLOSED),
+        (ConnectionState.CLOSED, ConnectionInputs.RECV_GOAWAY):
+            (None, ConnectionState.CLOSED),
+    }
+
+    def __init__(self):
+        self.state = ConnectionState.IDLE
+
+    def process_input(self, input_):
+        """
+        Process a specific input in the state machine.
+        """
+        if not isinstance(input_, ConnectionInputs):
+            raise ValueError("Input must be an instance of ConnectionInputs")
+
+        try:
+            func, target_state = self._transitions[(self.state, input_)]
+        except KeyError:
+            old_state = self.state
+            self.state = ConnectionState.CLOSED
+            raise ProtocolError(
+                "Invalid input %s in state %s" % (input_, old_state)
+            )
+        else:
+            self.state = target_state
+            if func is not None:  # pragma: no cover
+                return func()
+
+            return []
+
+
+class H2Connection(object):
+    """
+    A low-level HTTP/2 connection object. This handles building and receiving
+    frames and maintains both connection and per-stream state for all streams
+    on this connection.
+
+    This wraps a HTTP/2 Connection state machine implementation, ensuring that
+    frames can only be sent/received when the connection is in a valid state.
+    It also builds stream state machines on demand to ensure that the
+    constraints of those state machines are met as well. Attempts to create
+    frames that cannot be sent will raise a ``ProtocolError``.
+
+    .. versionchanged:: 2.3.0
+       Added the ``header_encoding`` keyword argument.
+
+    .. versionchanged:: 2.5.0
+       Added the ``config`` keyword argument. Deprecated the ``client_side``
+       and ``header_encoding`` parameters.
+
+    .. versionchanged:: 3.0.0
+       Removed deprecated parameters and properties.
+
+    :param config: The configuration for the HTTP/2 connection.
+
+        .. versionadded:: 2.5.0
+
+    :type config: :class:`H2Configuration <h2.config.H2Configuration>`
+    """
+    # The initial maximum outbound frame size. This can be changed by receiving
+    # a settings frame.
+    DEFAULT_MAX_OUTBOUND_FRAME_SIZE = 65535
+
+    # The initial maximum inbound frame size. This is somewhat arbitrarily
+    # chosen.
+    DEFAULT_MAX_INBOUND_FRAME_SIZE = 2**24
+
+    # The highest acceptable stream ID.
+    HIGHEST_ALLOWED_STREAM_ID = 2**31 - 1
+
+    # The largest acceptable window increment.
+    MAX_WINDOW_INCREMENT = 2**31 - 1
+
+    # The initial default value of SETTINGS_MAX_HEADER_LIST_SIZE.
+    DEFAULT_MAX_HEADER_LIST_SIZE = 2**16
+
+    def __init__(self, config=None):
+        self.state_machine = H2ConnectionStateMachine()
+        self.streams = {}
+        self.highest_inbound_stream_id = 0
+        self.highest_outbound_stream_id = 0
+        self.encoder = Encoder()
+        self.decoder = Decoder()
+
+        # This won't always actually do anything: for versions of HPACK older
+        # than 2.3.0 it does nothing. However, we have to try!
+        self.decoder.max_header_list_size = self.DEFAULT_MAX_HEADER_LIST_SIZE
+
+        #: The configuration for this HTTP/2 connection object.
+        #:
+        #: .. versionadded:: 2.5.0
+        self.config = config
+        if self.config is None:
+            self.config = H2Configuration(
+                client_side=True,
+            )
+
+        # Objects that store settings, including defaults.
+        #
+        # We set the MAX_CONCURRENT_STREAMS value to 100 because its default is
+        # unbounded, and that's a dangerous default because it allows
+        # essentially unbounded resources to be allocated regardless of how
+        # they will be used. 100 should be suitable for the average
+        # application. This default obviously does not apply to the remote
+        # peer's settings: the remote peer controls them!
+        #
+        # We also set MAX_HEADER_LIST_SIZE to a reasonable value. This is to
+        # advertise our defence against CVE-2016-6581. However, not all
+        # versions of HPACK will let us do it. That's ok: we should at least
+        # suggest that we're not vulnerable.
+        self.local_settings = Settings(
+            client=self.config.client_side,
+            initial_values={
+                SettingCodes.MAX_CONCURRENT_STREAMS: 100,
+                SettingCodes.MAX_HEADER_LIST_SIZE:
+                    self.DEFAULT_MAX_HEADER_LIST_SIZE,
+            }
+        )
+        self.remote_settings = Settings(client=not self.config.client_side)
+
+        # The curent value of the connection flow control windows on the
+        # connection.
+        self.outbound_flow_control_window = (
+            self.remote_settings.initial_window_size
+        )
+
+        #: The maximum size of a frame that can be emitted by this peer, in
+        #: bytes.
+        self.max_outbound_frame_size = self.remote_settings.max_frame_size
+
+        #: The maximum size of a frame that can be received by this peer, in
+        #: bytes.
+        self.max_inbound_frame_size = self.local_settings.max_frame_size
+
+        # Buffer for incoming data.
+        self.incoming_buffer = FrameBuffer(server=not self.config.client_side)
+
+        # A private variable to store a sequence of received header frames
+        # until completion.
+        self._header_frames = []
+
+        # Data that needs to be sent.
+        self._data_to_send = b''
+
+        # Keeps track of how streams are closed.
+        # Used to ensure that we don't blow up in the face of frames that were
+        # in flight when a RST_STREAM was sent.
+        # Also used to determine whether we should consider a frame received
+        # while a stream is closed as either a stream error or a connection
+        # error.
+        self._closed_streams = {}
+
+        # The flow control window manager for the connection.
+        self._inbound_flow_control_window_manager = WindowManager(
+            max_window_size=self.local_settings.initial_window_size
+        )
+
+        # When in doubt use dict-dispatch.
+        self._frame_dispatch_table = {
+            HeadersFrame: self._receive_headers_frame,
+            PushPromiseFrame: self._receive_push_promise_frame,
+            SettingsFrame: self._receive_settings_frame,
+            DataFrame: self._receive_data_frame,
+            WindowUpdateFrame: self._receive_window_update_frame,
+            PingFrame: self._receive_ping_frame,
+            RstStreamFrame: self._receive_rst_stream_frame,
+            PriorityFrame: self._receive_priority_frame,
+            GoAwayFrame: self._receive_goaway_frame,
+            ContinuationFrame: self._receive_naked_continuation,
+            AltSvcFrame: self._receive_alt_svc_frame,
+            ExtensionFrame: self._receive_unknown_frame
+        }
+
+    def _prepare_for_sending(self, frames):
+        if not frames:
+            return
+        self._data_to_send += b''.join(f.serialize() for f in frames)
+        assert all(f.body_len <= self.max_outbound_frame_size for f in frames)
+
+    def _open_streams(self, remainder):
+        """
+        A common method of counting number of open streams. Returns the number
+        of streams that are open *and* that have (stream ID % 2) == remainder.
+        While it iterates, also deletes any closed streams.
+        """
+        count = 0
+        to_delete = []
+
+        for stream_id, stream in self.streams.items():
+            if stream.open and (stream_id % 2 == remainder):
+                count += 1
+            elif stream.closed:
+                to_delete.append(stream_id)
+
+        for stream_id in to_delete:
+            stream = self.streams.pop(stream_id)
+            self._closed_streams[stream_id] = stream.closed_by
+
+        return count
+
+    @property
+    def open_outbound_streams(self):
+        """
+        The current number of open outbound streams.
+        """
+        outbound_numbers = int(self.config.client_side)
+        return self._open_streams(outbound_numbers)
+
+    @property
+    def open_inbound_streams(self):
+        """
+        The current number of open inbound streams.
+        """
+        inbound_numbers = int(not self.config.client_side)
+        return self._open_streams(inbound_numbers)
+
+    @property
+    def inbound_flow_control_window(self):
+        """
+        The size of the inbound flow control window for the connection. This is
+        rarely publicly useful: instead, use :meth:`remote_flow_control_window
+        <h2.connection.H2Connection.remote_flow_control_window>`. This
+        shortcut is largely present to provide a shortcut to this data.
+        """
+        return self._inbound_flow_control_window_manager.current_window_size
+
+    def _begin_new_stream(self, stream_id, allowed_ids):
+        """
+        Initiate a new stream.
+
+        .. versionchanged:: 2.0.0
+           Removed this function from the public API.
+
+        :param stream_id: The ID of the stream to open.
+        :param allowed_ids: What kind of stream ID is allowed.
+        """
+        self.config.logger.debug(
+            "Attempting to initiate stream ID %d", stream_id
+        )
+        outbound = self._stream_id_is_outbound(stream_id)
+        highest_stream_id = (
+            self.highest_outbound_stream_id if outbound else
+            self.highest_inbound_stream_id
+        )
+
+        if stream_id <= highest_stream_id:
+            raise StreamIDTooLowError(stream_id, highest_stream_id)
+
+        if (stream_id % 2) != int(allowed_ids):
+            raise ProtocolError(
+                "Invalid stream ID for peer."
+            )
+
+        s = H2Stream(
+            stream_id,
+            config=self.config,
+            inbound_window_size=self.local_settings.initial_window_size,
+            outbound_window_size=self.remote_settings.initial_window_size
+        )
+        self.config.logger.debug("Stream ID %d created", stream_id)
+        s.max_inbound_frame_size = self.max_inbound_frame_size
+        s.max_outbound_frame_size = self.max_outbound_frame_size
+
+        self.streams[stream_id] = s
+        self.config.logger.debug("Current streams: %s", self.streams.keys())
+
+        if outbound:
+            self.highest_outbound_stream_id = stream_id
+        else:
+            self.highest_inbound_stream_id = stream_id
+
+        return s
+
+    def initiate_connection(self):
+        """
+        Provides any data that needs to be sent at the start of the connection.
+        Must be called for both clients and servers.
+        """
+        self.config.logger.debug("Initializing connection")
+        self.state_machine.process_input(ConnectionInputs.SEND_SETTINGS)
+        if self.config.client_side:
+            preamble = b'PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n'
+        else:
+            preamble = b''
+
+        f = SettingsFrame(0)
+        for setting, value in self.local_settings.items():
+            f.settings[setting] = value
+        self.config.logger.debug(
+            "Send Settings frame: %s", self.local_settings
+        )
+
+        self._data_to_send += preamble + f.serialize()
+
+    def initiate_upgrade_connection(self, settings_header=None):
+        """
+        Call to initialise the connection object for use with an upgraded
+        HTTP/2 connection (i.e. a connection negotiated using the
+        ``Upgrade: h2c`` HTTP header).
+
+        This method differs from :meth:`initiate_connection
+        <h2.connection.H2Connection.initiate_connection>` in several ways.
+        Firstly, it handles the additional SETTINGS frame that is sent in the
+        ``HTTP2-Settings`` header field. When called on a client connection,
+        this method will return a bytestring that the caller can put in the
+        ``HTTP2-Settings`` field they send on their initial request. When
+        called on a server connection, the user **must** provide the value they
+        received from the client in the ``HTTP2-Settings`` header field to the
+        ``settings_header`` argument, which will be used appropriately.
+
+        Additionally, this method sets up stream 1 in a half-closed state
+        appropriate for this side of the connection, to reflect the fact that
+        the request is already complete.
+
+        Finally, this method also prepares the appropriate preamble to be sent
+        after the upgrade.
+
+        .. versionadded:: 2.3.0
+
+        :param settings_header: (optional, server-only): The value of the
+             ``HTTP2-Settings`` header field received from the client.
+        :type settings_header: ``bytes``
+
+        :returns: For clients, a bytestring to put in the ``HTTP2-Settings``.
+            For servers, returns nothing.
+        :rtype: ``bytes`` or ``None``
+        """
+        self.config.logger.debug(
+            "Upgrade connection. Current settings: %s", self.local_settings
+        )
+
+        frame_data = None
+        # Begin by getting the preamble in place.
+        self.initiate_connection()
+
+        if self.config.client_side:
+            f = SettingsFrame(0)
+            for setting, value in self.local_settings.items():
+                f.settings[setting] = value
+
+            frame_data = f.serialize_body()
+            frame_data = base64.urlsafe_b64encode(frame_data)
+        elif settings_header:
+            # We have a settings header from the client. This needs to be
+            # applied, but we want to throw away the ACK. We do this by
+            # inserting the data into a Settings frame and then passing it to
+            # the state machine, but ignoring the return value.
+            settings_header = base64.urlsafe_b64decode(settings_header)
+            f = SettingsFrame(0)
+            f.parse_body(settings_header)
+            self._receive_settings_frame(f)
+
+        # Set up appropriate state. Stream 1 in a half-closed state:
+        # half-closed(local) for clients, half-closed(remote) for servers.
+        # Additionally, we need to set up the Connection state machine.
+        connection_input = (
+            ConnectionInputs.SEND_HEADERS if self.config.client_side
+            else ConnectionInputs.RECV_HEADERS
+        )
+        self.config.logger.debug("Process input %s", connection_input)
+        self.state_machine.process_input(connection_input)
+
+        # Set up stream 1.
+        self._begin_new_stream(stream_id=1, allowed_ids=AllowedStreamIDs.ODD)
+        self.streams[1].upgrade(self.config.client_side)
+        return frame_data
+
+    def _get_or_create_stream(self, stream_id, allowed_ids):
+        """
+        Gets a stream by its stream ID. Will create one if one does not already
+        exist. Use allowed_ids to circumvent the usual stream ID rules for
+        clients and servers.
+
+        .. versionchanged:: 2.0.0
+           Removed this function from the public API.
+        """
+        try:
+            return self.streams[stream_id]
+        except KeyError:
+            return self._begin_new_stream(stream_id, allowed_ids)
+
+    def _get_stream_by_id(self, stream_id):
+        """
+        Gets a stream by its stream ID. Raises NoSuchStreamError if the stream
+        ID does not correspond to a known stream and is higher than the current
+        maximum: raises if it is lower than the current maximum.
+
+        .. versionchanged:: 2.0.0
+           Removed this function from the public API.
+        """
+        try:
+            return self.streams[stream_id]
+        except KeyError:
+            outbound = self._stream_id_is_outbound(stream_id)
+            highest_stream_id = (
+                self.highest_outbound_stream_id if outbound else
+                self.highest_inbound_stream_id
+            )
+
+            if stream_id > highest_stream_id:
+                raise NoSuchStreamError(stream_id)
+            else:
+                raise StreamClosedError(stream_id)
+
+    def get_next_available_stream_id(self):
+        """
+        Returns an integer suitable for use as the stream ID for the next
+        stream created by this endpoint. For server endpoints, this stream ID
+        will be even. For client endpoints, this stream ID will be odd. If no
+        stream IDs are available, raises :class:`NoAvailableStreamIDError
+        <h2.exceptions.NoAvailableStreamIDError>`.
+
+        .. warning:: The return value from this function does not change until
+                     the stream ID has actually been used by sending or pushing
+                     headers on that stream. For that reason, it should be
+                     called as close as possible to the actual use of the
+                     stream ID.
+
+        .. versionadded:: 2.0.0
+
+        :raises: :class:`NoAvailableStreamIDError
+            <h2.exceptions.NoAvailableStreamIDError>`
+        :returns: The next free stream ID this peer can use to initiate a
+            stream.
+        :rtype: ``int``
+        """
+        # No streams have been opened yet, so return the lowest allowed stream
+        # ID.
+        if not self.highest_outbound_stream_id:
+            next_stream_id = 1 if self.config.client_side else 2
+        else:
+            next_stream_id = self.highest_outbound_stream_id + 2
+        self.config.logger.debug(
+            "Next available stream ID %d", next_stream_id
+        )
+        if next_stream_id > self.HIGHEST_ALLOWED_STREAM_ID:
+            raise NoAvailableStreamIDError("Exhausted allowed stream IDs")
+
+        return next_stream_id
+
+    def send_headers(self, stream_id, headers, end_stream=False,
+                     priority_weight=None, priority_depends_on=None,
+                     priority_exclusive=None):
+        """
+        Send headers on a given stream.
+
+        This function can be used to send request or response headers: the kind
+        that are sent depends on whether this connection has been opened as a
+        client or server connection, and whether the stream was opened by the
+        remote peer or not.
+
+        If this is a client connection, calling ``send_headers`` will send the
+        headers as a request. It will also implicitly open the stream being
+        used. If this is a client connection and ``send_headers`` has *already*
+        been called, this will send trailers instead.
+
+        If this is a server connection, calling ``send_headers`` will send the
+        headers as a response. It is a protocol error for a server to open a
+        stream by sending headers. If this is a server connection and
+        ``send_headers`` has *already* been called, this will send trailers
+        instead.
+
+        When acting as a server, you may call ``send_headers`` any number of
+        times allowed by the following rules, in this order:
+
+        - zero or more times with ``(':status', '1XX')`` (where ``1XX`` is a
+          placeholder for any 100-level status code).
+        - once with any other status header.
+        - zero or one time for trailers.
+
+        That is, you are allowed to send as many informational responses as you
+        like, followed by one complete response and zero or one HTTP trailer
+        blocks.
+
+        Clients may send one or two header blocks: one request block, and
+        optionally one trailer block.
+
+        If it is important to send HPACK "never indexed" header fields (as
+        defined in `RFC 7451 Section 7.1.3
+        <https://tools.ietf.org/html/rfc7541#section-7.1.3>`_), the user may
+        instead provide headers using the HPACK library's :class:`HeaderTuple
+        <hpack:hpack.HeaderTuple>` and :class:`NeverIndexedHeaderTuple
+        <hpack:hpack.NeverIndexedHeaderTuple>` objects.
+
+        This method also allows users to prioritize the stream immediately,
+        by sending priority information on the HEADERS frame directly. To do
+        this, any one of ``priority_weight``, ``priority_depends_on``, or
+        ``priority_exclusive`` must be set to a value that is not ``None``. For
+        more information on the priority fields, see :meth:`prioritize
+        <h2.connection.H2Connection.prioritize>`.
+
+        .. warning:: In HTTP/2, it is mandatory that all the HTTP/2 special
+            headers (that is, ones whose header keys begin with ``:``) appear
+            at the start of the header block, before any normal headers.
+
+        .. versionchanged:: 2.3.0
+           Added support for using :class:`HeaderTuple
+           <hpack:hpack.HeaderTuple>` objects to store headers.
+
+        .. versionchanged:: 2.4.0
+           Added the ability to provide priority keyword arguments:
+           ``priority_weight``, ``priority_depends_on``, and
+           ``priority_exclusive``.
+
+        :param stream_id: The stream ID to send the headers on. If this stream
+            does not currently exist, it will be created.
+        :type stream_id: ``int``
+
+        :param headers: The request/response headers to send.
+        :type headers: An iterable of two tuples of bytestrings or
+            :class:`HeaderTuple <hpack:hpack.HeaderTuple>` objects.
+
+        :param end_stream: Whether this headers frame should end the stream
+            immediately (that is, whether no more data will be sent after this
+            frame). Defaults to ``False``.
+        :type end_stream: ``bool``
+
+        :param priority_weight: Sets the priority weight of the stream. See
+            :meth:`prioritize <h2.connection.H2Connection.prioritize>` for more
+            about how this field works. Defaults to ``None``, which means that
+            no priority information will be sent.
+        :type priority_weight: ``int`` or ``None``
+
+        :param priority_depends_on: Sets which stream this one depends on for
+            priority purposes. See :meth:`prioritize
+            <h2.connection.H2Connection.prioritize>` for more about how this
+            field works. Defaults to ``None``, which means that no priority
+            information will be sent.
+        :type priority_depends_on: ``int`` or ``None``
+
+        :param priority_exclusive: Sets whether this stream exclusively depends
+            on the stream given in ``priority_depends_on`` for priority
+            purposes. See :meth:`prioritize
+            <h2.connection.H2Connection.prioritize>` for more about how this
+            field workds. Defaults to ``None``, which means that no priority
+            information will be sent.
+        :type priority_depends_on: ``bool`` or ``None``
+
+        :returns: Nothing
+        """
+        self.config.logger.debug(
+            "Send headers on stream ID %d", stream_id
+        )
+
+        # Check we can open the stream.
+        if stream_id not in self.streams:
+            max_open_streams = self.remote_settings.max_concurrent_streams
+            if (self.open_outbound_streams + 1) > max_open_streams:
+                raise TooManyStreamsError(
+                    "Max outbound streams is %d, %d open" %
+                    (max_open_streams, self.open_outbound_streams)
+                )
+
+        self.state_machine.process_input(ConnectionInputs.SEND_HEADERS)
+        stream = self._get_or_create_stream(
+            stream_id, AllowedStreamIDs(self.config.client_side)
+        )
+        frames = stream.send_headers(
+            headers, self.encoder, end_stream
+        )
+
+        # We may need to send priority information.
+        priority_present = (
+            (priority_weight is not None) or
+            (priority_depends_on is not None) or
+            (priority_exclusive is not None)
+        )
+
+        if priority_present:
+            if not self.config.client_side:
+                raise RFC1122Error("Servers SHOULD NOT prioritize streams.")
+
+            headers_frame = frames[0]
+            headers_frame.flags.add('PRIORITY')
+            frames[0] = _add_frame_priority(
+                headers_frame,
+                priority_weight,
+                priority_depends_on,
+                priority_exclusive
+            )
+
+        self._prepare_for_sending(frames)
+
+    def send_data(self, stream_id, data, end_stream=False, pad_length=None):
+        """
+        Send data on a given stream.
+
+        This method does no breaking up of data: if the data is larger than the
+        value returned by :meth:`local_flow_control_window
+        <h2.connection.H2Connection.local_flow_control_window>` for this stream
+        then a :class:`FlowControlError <h2.exceptions.FlowControlError>` will
+        be raised. If the data is larger than :data:`max_outbound_frame_size
+        <h2.connection.H2Connection.max_outbound_frame_size>` then a
+        :class:`FrameTooLargeError <h2.exceptions.FrameTooLargeError>` will be
+        raised.
+
+        Hyper-h2 does this to avoid buffering the data internally. If the user
+        has more data to send than hyper-h2 will allow, consider breaking it up
+        and buffering it externally.
+
+        :param stream_id: The ID of the stream on which to send the data.
+        :type stream_id: ``int``
+        :param data: The data to send on the stream.
+        :type data: ``bytes``
+        :param end_stream: (optional) Whether this is the last data to be sent
+            on the stream. Defaults to ``False``.
+        :type end_stream: ``bool``
+        :param pad_length: (optional) Length of the padding to apply to the
+            data frame. Defaults to ``None`` for no use of padding. Note that
+            a value of ``0`` results in padding of length ``0``
+            (with the "padding" flag set on the frame).
+
+            .. versionadded:: 2.6.0
+
+        :type pad_length: ``int``
+        :returns: Nothing
+        """
+        self.config.logger.debug(
+            "Send data on stream ID %d with len %d", stream_id, len(data)
+        )
+        frame_size = len(data)
+        if pad_length is not None:
+            if not isinstance(pad_length, int):
+                raise TypeError("pad_length must be an int")
+            if pad_length < 0 or pad_length > 255:
+                raise ValueError("pad_length must be within range: [0, 255]")
+            # Account for padding bytes plus the 1-byte padding length field.
+            frame_size += pad_length + 1
+        self.config.logger.debug(
+            "Frame size on stream ID %d is %d", stream_id, frame_size
+        )
+
+        if frame_size > self.local_flow_control_window(stream_id):
+            raise FlowControlError(
+                "Cannot send %d bytes, flow control window is %d." %
+                (frame_size, self.local_flow_control_window(stream_id))
+            )
+        elif frame_size > self.max_outbound_frame_size:
+            raise FrameTooLargeError(
+                "Cannot send frame size %d, max frame size is %d" %
+                (frame_size, self.max_outbound_frame_size)
+            )
+
+        self.state_machine.process_input(ConnectionInputs.SEND_DATA)
+        frames = self.streams[stream_id].send_data(
+            data, end_stream, pad_length=pad_length
+        )
+
+        self._prepare_for_sending(frames)
+
+        self.outbound_flow_control_window -= frame_size
+        self.config.logger.debug(
+            "Outbound flow control window size is %d",
+            self.outbound_flow_control_window
+        )
+        assert self.outbound_flow_control_window >= 0
+
+    def end_stream(self, stream_id):
+        """
+        Cleanly end a given stream.
+
+        This method ends a stream by sending an empty DATA frame on that stream
+        with the ``END_STREAM`` flag set.
+
+        :param stream_id: The ID of the stream to end.
+        :type stream_id: ``int``
+        :returns: Nothing
+        """
+        self.config.logger.debug("End stream ID %d", stream_id)
+        self.state_machine.process_input(ConnectionInputs.SEND_DATA)
+        frames = self.streams[stream_id].end_stream()
+        self._prepare_for_sending(frames)
+
+    def increment_flow_control_window(self, increment, stream_id=None):
+        """
+        Increment a flow control window, optionally for a single stream. Allows
+        the remote peer to send more data.
+
+        .. versionchanged:: 2.0.0
+           Rejects attempts to increment the flow control window by out of
+           range values with a ``ValueError``.
+
+        :param increment: The amount to increment the flow control window by.
+        :type increment: ``int``
+        :param stream_id: (optional) The ID of the stream that should have its
+            flow control window opened. If not present or ``None``, the
+            connection flow control window will be opened instead.
+        :type stream_id: ``int`` or ``None``
+        :returns: Nothing
+        :raises: ``ValueError``
+        """
+        if not (1 <= increment <= self.MAX_WINDOW_INCREMENT):
+            raise ValueError(
+                "Flow control increment must be between 1 and %d" %
+                self.MAX_WINDOW_INCREMENT
+            )
+
+        self.state_machine.process_input(ConnectionInputs.SEND_WINDOW_UPDATE)
+
+        if stream_id is not None:
+            stream = self.streams[stream_id]
+            frames = stream.increase_flow_control_window(
+                increment
+            )
+        else:
+            self._inbound_flow_control_window_manager.window_opened(increment)
+            f = WindowUpdateFrame(0)
+            f.window_increment = increment
+            frames = [f]
+
+        self.config.logger.debug(
+            "Increase stream ID %d flow control window by %d",
+            stream_id, increment
+        )
+        self._prepare_for_sending(frames)
+
+    def push_stream(self, stream_id, promised_stream_id, request_headers):
+        """
+        Push a response to the client by sending a PUSH_PROMISE frame.
+
+        If it is important to send HPACK "never indexed" header fields (as
+        defined in `RFC 7451 Section 7.1.3
+        <https://tools.ietf.org/html/rfc7541#section-7.1.3>`_), the user may
+        instead provide headers using the HPACK library's :class:`HeaderTuple
+        <hpack:hpack.HeaderTuple>` and :class:`NeverIndexedHeaderTuple
+        <hpack:hpack.NeverIndexedHeaderTuple>` objects.
+
+        :param stream_id: The ID of the stream that this push is a response to.
+        :type stream_id: ``int``
+        :param promised_stream_id: The ID of the stream that the pushed
+            response will be sent on.
+        :type promised_stream_id: ``int``
+        :param request_headers: The headers of the request that the pushed
+            response will be responding to.
+        :type request_headers: An iterable of two tuples of bytestrings or
+            :class:`HeaderTuple <hpack:hpack.HeaderTuple>` objects.
+        :returns: Nothing
+        """
+        self.config.logger.debug(
+            "Send Push Promise frame on stream ID %d", stream_id
+        )
+
+        if not self.remote_settings.enable_push:
+            raise ProtocolError("Remote peer has disabled stream push")
+
+        self.state_machine.process_input(ConnectionInputs.SEND_PUSH_PROMISE)
+        stream = self._get_stream_by_id(stream_id)
+
+        # We need to prevent users pushing streams in response to streams that
+        # they themselves have already pushed: see #163 and RFC 7540 § 6.6. The
+        # easiest way to do that is to assert that the stream_id is not even:
+        # this shortcut works because only servers can push and the state
+        # machine will enforce this.
+        if (stream_id % 2) == 0:
+            raise ProtocolError("Cannot recursively push streams.")
+
+        new_stream = self._begin_new_stream(
+            promised_stream_id, AllowedStreamIDs.EVEN
+        )
+        self.streams[promised_stream_id] = new_stream
+
+        frames = stream.push_stream_in_band(
+            promised_stream_id, request_headers, self.encoder
+        )
+        new_frames = new_stream.locally_pushed()
+        self._prepare_for_sending(frames + new_frames)
+
+    def ping(self, opaque_data):
+        """
+        Send a PING frame.
+
+        :param opaque_data: A bytestring of length 8 that will be sent in the
+                            PING frame.
+        :returns: Nothing
+        """
+        self.config.logger.debug("Send Ping frame")
+
+        if not isinstance(opaque_data, bytes) or len(opaque_data) != 8:
+            raise ValueError("Invalid value for ping data: %r" % opaque_data)
+
+        self.state_machine.process_input(ConnectionInputs.SEND_PING)
+        f = PingFrame(0)
+        f.opaque_data = opaque_data
+        self._prepare_for_sending([f])
+
+    def reset_stream(self, stream_id, error_code=0):
+        """
+        Reset a stream.
+
+        This method forcibly closes a stream by sending a RST_STREAM frame for
+        a given stream. This is not a graceful closure. To gracefully end a
+        stream, try the :meth:`end_stream
+        <h2.connection.H2Connection.end_stream>` method.
+
+        :param stream_id: The ID of the stream to reset.
+        :type stream_id: ``int``
+        :param error_code: (optional) The error code to use to reset the
+            stream. Defaults to :data:`ErrorCodes.NO_ERROR
+            <h2.errors.ErrorCodes.NO_ERROR>`.
+        :type error_code: ``int``
+        :returns: Nothing
+        """
+        self.config.logger.debug("Reset stream ID %d", stream_id)
+        self.state_machine.process_input(ConnectionInputs.SEND_RST_STREAM)
+        stream = self._get_stream_by_id(stream_id)
+        frames = stream.reset_stream(error_code)
+        self._prepare_for_sending(frames)
+
+    def close_connection(self, error_code=0, additional_data=None,
+                         last_stream_id=None):
+
+        """
+        Close a connection, emitting a GOAWAY frame.
+
+        .. versionchanged:: 2.4.0
+           Added ``additional_data`` and ``last_stream_id`` arguments.
+
+        :param error_code: (optional) The error code to send in the GOAWAY
+            frame.
+        :param additional_data: (optional) Additional debug data indicating
+            a reason for closing the connection. Must be a bytestring.
+        :param last_stream_id: (optional) The last stream which was processed
+            by the sender. Defaults to ``highest_inbound_stream_id``.
+        :returns: Nothing
+        """
+        self.config.logger.debug("Close connection")
+        self.state_machine.process_input(ConnectionInputs.SEND_GOAWAY)
+
+        # Additional_data must be bytes
+        if additional_data is not None:
+            assert isinstance(additional_data, bytes)
+
+        if last_stream_id is None:
+            last_stream_id = self.highest_inbound_stream_id
+
+        f = GoAwayFrame(
+            stream_id=0,
+            last_stream_id=last_stream_id,
+            error_code=error_code,
+            additional_data=(additional_data or b'')
+        )
+        self._prepare_for_sending([f])
+
+    def update_settings(self, new_settings):
+        """
+        Update the local settings. This will prepare and emit the appropriate
+        SETTINGS frame.
+
+        :param new_settings: A dictionary of {setting: new value}
+        """
+        self.config.logger.debug(
+            "Update connection settings to %s", new_settings
+        )
+        self.state_machine.process_input(ConnectionInputs.SEND_SETTINGS)
+        self.local_settings.update(new_settings)
+        s = SettingsFrame(0)
+        s.settings = new_settings
+        self._prepare_for_sending([s])
+
+    def advertise_alternative_service(self,
+                                      field_value,
+                                      origin=None,
+                                      stream_id=None):
+        """
+        Notify a client about an available Alternative Service.
+
+        An Alternative Service is defined in `RFC 7838
+        <https://tools.ietf.org/html/rfc7838>`_. An Alternative Service
+        notification informs a client that a given origin is also available
+        elsewhere.
+
+        Alternative Services can be advertised in two ways. Firstly, they can
+        be advertised explicitly: that is, a server can say "origin X is also
+        available at Y". To advertise like this, set the ``origin`` argument
+        and not the ``stream_id`` argument. Alternatively, they can be
+        advertised implicitly: that is, a server can say "the origin you're
+        contacting on stream X is also available at Y". To advertise like this,
+        set the ``stream_id`` argument and not the ``origin`` argument.
+
+        The explicit method of advertising can be done as long as the
+        connection is active. The implicit method can only be done after the
+        client has sent the request headers and before the server has sent the
+        response headers: outside of those points, Hyper-h2 will forbid sending
+        the Alternative Service advertisement by raising a ProtocolError.
+
+        The ``field_value`` parameter is specified in RFC 7838. Hyper-h2 does
+        not validate or introspect this argument: the user is required to
+        ensure that it's well-formed. ``field_value`` corresponds to RFC 7838's
+        "Alternative Service Field Value".
+
+        .. note:: It is strongly preferred to use the explicit method of
+                  advertising Alternative Services. The implicit method of
+                  advertising Alternative Services has a number of subtleties
+                  and can lead to inconsistencies between the server and
+                  client. Hyper-h2 allows both mechanisms, but caution is
+                  strongly advised.
+
+        .. versionadded:: 2.3.0
+
+        :param field_value: The RFC 7838 Alternative Service Field Value. This
+            argument is not introspected by Hyper-h2: the user is responsible
+            for ensuring that it is well-formed.
+        :type field_value: ``bytes``
+
+        :param origin: The origin/authority to which the Alternative Service
+            being advertised applies. Must not be provided at the same time as
+            ``stream_id``.
+        :type origin: ``bytes`` or ``None``
+
+        :param stream_id: The ID of the stream which was sent to the authority
+            for which this Alternative Service advertisement applies. Must not
+            be provided at the same time as ``origin``.
+        :type stream_id: ``int`` or ``None``
+
+        :returns: Nothing.
+        """
+        if not isinstance(field_value, bytes):
+            raise ValueError("Field must be bytestring.")
+
+        if origin is not None and stream_id is not None:
+            raise ValueError("Must not provide both origin and stream_id")
+
+        self.state_machine.process_input(
+            ConnectionInputs.SEND_ALTERNATIVE_SERVICE
+        )
+
+        if origin is not None:
+            # This ALTSVC is sent on stream zero.
+            f = AltSvcFrame(stream_id=0)
+            f.origin = origin
+            f.field = field_value
+            frames = [f]
+        else:
+            stream = self._get_stream_by_id(stream_id)
+            frames = stream.advertise_alternative_service(field_value)
+
+        self._prepare_for_sending(frames)
+
+    def prioritize(self, stream_id, weight=None, depends_on=None,
+                   exclusive=None):
+        """
+        Notify a server about the priority of a stream.
+
+        Stream priorities are a form of guidance to a remote server: they
+        inform the server about how important a given response is, so that the
+        server may allocate its resources (e.g. bandwidth, CPU time, etc.)
+        accordingly. This exists to allow clients to ensure that the most
+        important data arrives earlier, while less important data does not
+        starve out the more important data.
+
+        Stream priorities are explained in depth in `RFC 7540 Section 5.3
+        <https://tools.ietf.org/html/rfc7540#section-5.3>`_.
+
+        This method updates the priority information of a single stream. It may
+        be called well before a stream is actively in use, or well after a
+        stream is closed.
+
+        .. warning:: RFC 7540 allows for servers to change the priority of
+                     streams. However, hyper-h2 **does not** allow server
+                     stacks to do this. This is because most clients do not
+                     adequately know how to respond when provided conflicting
+                     priority information, and relatively little utility is
+                     provided by making that functionality available.
+
+        .. note:: hyper-h2 **does not** maintain any information about the
+                  RFC 7540 priority tree. That means that hyper-h2 does not
+                  prevent incautious users from creating invalid priority
+                  trees, particularly by creating priority loops. While some
+                  basic error checking is provided by hyper-h2, users are
+                  strongly recommended to understand their prioritisation
+                  strategies before using the priority tools here.
+
+        .. note:: Priority information is strictly advisory. Servers are
+                  allowed to disregard it entirely. Avoid relying on the idea
+                  that your priority signaling will definitely be obeyed.
+
+        .. versionadded:: 2.4.0
+
+        :param stream_id: The ID of the stream to prioritize.
+        :type stream_id: ``int``
+
+        :param weight: The weight to give the stream. Defaults to ``16``, the
+             default weight of any stream. May be any value between ``1`` and
+             ``256`` inclusive. The relative weight of a stream indicates what
+             proportion of available resources will be allocated to that
+             stream.
+        :type weight: ``int``
+
+        :param depends_on: The ID of the stream on which this stream depends.
+             This stream will only be progressed if it is impossible to
+             progress the parent stream (the one on which this one depends).
+             Passing the value ``0`` means that this stream does not depend on
+             any other. Defaults to ``0``.
+        :type depends_on: ``int``
+
+        :param exclusive: Whether this stream is an exclusive dependency of its
+            "parent" stream (i.e. the stream given by ``depends_on``). If a
+            stream is an exclusive dependency of another, that means that all
+            previously-set children of the parent are moved to become children
+            of the new exclusively-dependent stream. Defaults to ``False``.
+        :type exclusive: ``bool``
+        """
+        if not self.config.client_side:
+            raise RFC1122Error("Servers SHOULD NOT prioritize streams.")
+
+        self.state_machine.process_input(
+            ConnectionInputs.SEND_PRIORITY
+        )
+
+        frame = PriorityFrame(stream_id)
+        frame = _add_frame_priority(frame, weight, depends_on, exclusive)
+
+        self._prepare_for_sending([frame])
+
+    def local_flow_control_window(self, stream_id):
+        """
+        Returns the maximum amount of data that can be sent on stream
+        ``stream_id``.
+
+        This value will never be larger than the total data that can be sent on
+        the connection: even if the given stream allows more data, the
+        connection window provides a logical maximum to the amount of data that
+        can be sent.
+
+        The maximum data that can be sent in a single data frame on a stream
+        is either this value, or the maximum frame size, whichever is
+        *smaller*.
+
+        :param stream_id: The ID of the stream whose flow control window is
+            being queried.
+        :type stream_id: ``int``
+        :returns: The amount of data in bytes that can be sent on the stream
+            before the flow control window is exhausted.
+        :rtype: ``int``
+        """
+        stream = self._get_stream_by_id(stream_id)
+        return min(
+            self.outbound_flow_control_window,
+            stream.outbound_flow_control_window
+        )
+
+    def remote_flow_control_window(self, stream_id):
+        """
+        Returns the maximum amount of data the remote peer can send on stream
+        ``stream_id``.
+
+        This value will never be larger than the total data that can be sent on
+        the connection: even if the given stream allows more data, the
+        connection window provides a logical maximum to the amount of data that
+        can be sent.
+
+        The maximum data that can be sent in a single data frame on a stream
+        is either this value, or the maximum frame size, whichever is
+        *smaller*.
+
+        :param stream_id: The ID of the stream whose flow control window is
+            being queried.
+        :type stream_id: ``int``
+        :returns: The amount of data in bytes that can be received on the
+            stream before the flow control window is exhausted.
+        :rtype: ``int``
+        """
+        stream = self._get_stream_by_id(stream_id)
+        return min(
+            self.inbound_flow_control_window,
+            stream.inbound_flow_control_window
+        )
+
+    def acknowledge_received_data(self, acknowledged_size, stream_id):
+        """
+        Inform the :class:`H2Connection <h2.connection.H2Connection>` that a
+        certain number of flow-controlled bytes have been processed, and that
+        the space should be handed back to the remote peer at an opportune
+        time.
+
+        .. versionadded:: 2.5.0
+
+        :param acknowledged_size: The total *flow-controlled size* of the data
+            that has been processed. Note that this must include the amount of
+            padding that was sent with that data.
+        :type acknowledged_size: ``int``
+        :param stream_id: The ID of the stream on which this data was received.
+        :type stream_id: ``int``
+        :returns: Nothing
+        :rtype: ``None``
+        """
+        self.config.logger.debug(
+            "Ack received data on stream ID %d with size %d",
+            stream_id, acknowledged_size
+        )
+        if stream_id <= 0:
+            raise ValueError(
+                "Stream ID %d is not valid for acknowledge_received_data" %
+                stream_id
+            )
+        if acknowledged_size < 0:
+            raise ValueError("Cannot acknowledge negative data")
+
+        frames = []
+
+        conn_manager = self._inbound_flow_control_window_manager
+        conn_increment = conn_manager.process_bytes(acknowledged_size)
+        if conn_increment:
+            f = WindowUpdateFrame(0)
+            f.window_increment = conn_increment
+            frames.append(f)
+
+        try:
+            stream = self._get_stream_by_id(stream_id)
+        except StreamClosedError:
+            # The stream is already gone. We're not worried about incrementing
+            # the window in this case.
+            pass
+        else:
+            # No point incrementing the windows of closed streams.
+            if stream.open:
+                frames.extend(
+                    stream.acknowledge_received_data(acknowledged_size)
+                )
+
+        self._prepare_for_sending(frames)
+
+    def data_to_send(self, amt=None):
+        """
+        Returns some data for sending out of the internal data buffer.
+
+        This method is analogous to ``read`` on a file-like object, but it
+        doesn't block. Instead, it returns as much data as the user asks for,
+        or less if that much data is not available. It does not perform any
+        I/O, and so uses a different name.
+
+        :param amt: (optional) The maximum amount of data to return. If not
+            set, or set to ``None``, will return as much data as possible.
+        :type amt: ``int``
+        :returns: A bytestring containing the data to send on the wire.
+        :rtype: ``bytes``
+        """
+        if amt is None:
+            data = self._data_to_send
+            self._data_to_send = b''
+            return data
+        else:
+            data = self._data_to_send[:amt]
+            self._data_to_send = self._data_to_send[amt:]
+            return data
+
+    def clear_outbound_data_buffer(self):
+        """
+        Clears the outbound data buffer, such that if this call was immediately
+        followed by a call to
+        :meth:`data_to_send <h2.connection.H2Connection.data_to_send>`, that
+        call would return no data.
+
+        This method should not normally be used, but is made available to avoid
+        exposing implementation details.
+        """
+        self._data_to_send = b''
+
+    def _acknowledge_settings(self):
+        """
+        Acknowledge settings that have been received.
+
+        .. versionchanged:: 2.0.0
+           Removed from public API, removed useless ``event`` parameter, made
+           automatic.
+
+        :returns: Nothing
+        """
+        self.state_machine.process_input(ConnectionInputs.SEND_SETTINGS)
+
+        changes = self.remote_settings.acknowledge()
+
+        if SettingCodes.INITIAL_WINDOW_SIZE in changes:
+            setting = changes[SettingCodes.INITIAL_WINDOW_SIZE]
+            self._flow_control_change_from_settings(
+                setting.original_value,
+                setting.new_value,
+            )
+
+        # HEADER_TABLE_SIZE changes by the remote part affect our encoder: cf.
+        # RFC 7540 Section 6.5.2.
+        if SettingCodes.HEADER_TABLE_SIZE in changes:
+            setting = changes[SettingCodes.HEADER_TABLE_SIZE]
+            self.encoder.header_table_size = setting.new_value
+
+        if SettingCodes.MAX_FRAME_SIZE in changes:
+            setting = changes[SettingCodes.MAX_FRAME_SIZE]
+            self.max_outbound_frame_size = setting.new_value
+            for stream in self.streams.values():
+                stream.max_outbound_frame_size = setting.new_value
+
+        f = SettingsFrame(0)
+        f.flags.add('ACK')
+        return [f]
+
+    def _flow_control_change_from_settings(self, old_value, new_value):
+        """
+        Update flow control windows in response to a change in the value of
+        SETTINGS_INITIAL_WINDOW_SIZE.
+
+        When this setting is changed, it automatically updates all flow control
+        windows by the delta in the settings values. Note that it does not
+        increment the *connection* flow control window, per section 6.9.2 of
+        RFC 7540.
+        """
+        delta = new_value - old_value
+
+        for stream in self.streams.values():
+            stream.outbound_flow_control_window = guard_increment_window(
+                stream.outbound_flow_control_window,
+                delta
+            )
+
+    def _inbound_flow_control_change_from_settings(self, old_value, new_value):
+        """
+        Update remote flow control windows in response to a change in the value
+        of SETTINGS_INITIAL_WINDOW_SIZE.
+
+        When this setting is changed, it automatically updates all remote flow
+        control windows by the delta in the settings values.
+        """
+        delta = new_value - old_value
+
+        for stream in self.streams.values():
+            stream._inbound_flow_control_change_from_settings(delta)
+
+    def receive_data(self, data):
+        """
+        Pass some received HTTP/2 data to the connection for handling.
+
+        :param data: The data received from the remote peer on the network.
+        :type data: ``bytes``
+        :returns: A list of events that the remote peer triggered by sending
+            this data.
+        """
+        self.config.logger.debug(
+            "Process received data on connection. Received data: %r", data
+        )
+
+        events = []
+        self.incoming_buffer.add_data(data)
+        self.incoming_buffer.max_frame_size = self.max_inbound_frame_size
+
+        try:
+            for frame in self.incoming_buffer:
+                events.extend(self._receive_frame(frame))
+        except InvalidPaddingError:
+            self._terminate_connection(ErrorCodes.PROTOCOL_ERROR)
+            raise ProtocolError("Received frame with invalid padding.")
+        except ProtocolError as e:
+            # For whatever reason, receiving the frame caused a protocol error.
+            # We should prepare to emit a GoAway frame before throwing the
+            # exception up further. No need for an event: the exception will
+            # do fine.
+            self._terminate_connection(e.error_code)
+            raise
+
+        return events
+
+    def _receive_frame(self, frame):
+        """
+        Handle a frame received on the connection.
+
+        .. versionchanged:: 2.0.0
+           Removed from the public API.
+        """
+        try:
+            # I don't love using __class__ here, maybe reconsider it.
+            frames, events = self._frame_dispatch_table[frame.__class__](frame)
+        except StreamClosedError as e:
+            # If the stream was closed by RST_STREAM, we just send a RST_STREAM
+            # to the remote peer. Otherwise, this is a connection error, and so
+            # we will re-raise to trigger one.
+            if self._stream_is_closed_by_reset(e.stream_id):
+                f = RstStreamFrame(e.stream_id)
+                f.error_code = e.error_code
+                self._prepare_for_sending([f])
+                events = e._events
+            else:
+                raise
+        except StreamIDTooLowError as e:
+            # The stream ID seems invalid. This may happen when the closed
+            # stream has been cleaned up, or when the remote peer has opened a
+            # new stream with a higher stream ID than this one, forcing it
+            # closed implicitly.
+            #
+            # Check how the stream was closed: depending on the mechanism, it
+            # is either a stream error or a connection error.
+            if self._stream_is_closed_by_reset(e.stream_id):
+                # Closed by RST_STREAM is a stream error.
+                f = RstStreamFrame(e.stream_id)
+                f.error_code = ErrorCodes.STREAM_CLOSED
+                self._prepare_for_sending([f])
+                events = []
+            elif self._stream_is_closed_by_end(e.stream_id):
+                # Closed by END_STREAM is a connection error.
+                raise StreamClosedError(e.stream_id)
+            else:
+                # Closed implicitly, also a connection error, but of type
+                # PROTOCOL_ERROR.
+                raise
+        else:
+            self._prepare_for_sending(frames)
+
+        return events
+
+    def _terminate_connection(self, error_code):
+        """
+        Terminate the connection early. Used in error handling blocks to send
+        GOAWAY frames.
+        """
+        f = GoAwayFrame(0)
+        f.last_stream_id = self.highest_inbound_stream_id
+        f.error_code = error_code
+        self.state_machine.process_input(ConnectionInputs.SEND_GOAWAY)
+        self._prepare_for_sending([f])
+
+    def _receive_headers_frame(self, frame):
+        """
+        Receive a headers frame on the connection.
+        """
+        # If necessary, check we can open the stream. Also validate that the
+        # stream ID is valid.
+        if frame.stream_id not in self.streams:
+            max_open_streams = self.local_settings.max_concurrent_streams
+            if (self.open_inbound_streams + 1) > max_open_streams:
+                raise TooManyStreamsError(
+                    "Max outbound streams is %d, %d open" %
+                    (max_open_streams, self.open_outbound_streams)
+                )
+
+        # Let's decode the headers. We handle headers as bytes internally up
+        # until we hang them off the event, at which point we may optionally
+        # convert them to unicode.
+        headers = _decode_headers(self.decoder, frame.data)
+
+        events = self.state_machine.process_input(
+            ConnectionInputs.RECV_HEADERS
+        )
+        stream = self._get_or_create_stream(
+            frame.stream_id, AllowedStreamIDs(not self.config.client_side)
+        )
+        frames, stream_events = stream.receive_headers(
+            headers,
+            'END_STREAM' in frame.flags,
+            self.config.header_encoding
+        )
+
+        if 'PRIORITY' in frame.flags:
+            p_frames, p_events = self._receive_priority_frame(frame)
+            stream_events[0].priority_updated = p_events[0]
+            stream_events.extend(p_events)
+            assert not p_frames
+
+        return frames, events + stream_events
+
+    def _receive_push_promise_frame(self, frame):
+        """
+        Receive a push-promise frame on the connection.
+        """
+        if not self.local_settings.enable_push:
+            raise ProtocolError("Received pushed stream")
+
+        pushed_headers = _decode_headers(self.decoder, frame.data)
+
+        events = self.state_machine.process_input(
+            ConnectionInputs.RECV_PUSH_PROMISE
+        )
+
+        try:
+            stream = self._get_stream_by_id(frame.stream_id)
+        except NoSuchStreamError:
+            # We need to check if the parent stream was reset by us. If it was
+            # then we presume that the PUSH_PROMISE was in flight when we reset
+            # the parent stream. Rather than accept the new stream, just reset
+            # it.
+            #
+            # If this was closed naturally, however, we should call this a
+            # PROTOCOL_ERROR: pushing a stream on a naturally closed stream is
+            # a real problem because it creates a brand new stream that the
+            # remote peer now believes exists.
+            if (self._stream_closed_by(frame.stream_id) ==
+                    StreamClosedBy.SEND_RST_STREAM):
+                f = RstStreamFrame(frame.promised_stream_id)
+                f.error_code = ErrorCodes.REFUSED_STREAM
+                return [f], events
+
+            raise ProtocolError("Attempted to push on closed stream.")
+
+        # We need to prevent peers pushing streams in response to streams that
+        # they themselves have already pushed: see #163 and RFC 7540 § 6.6. The
+        # easiest way to do that is to assert that the stream_id is not even:
+        # this shortcut works because only servers can push and the state
+        # machine will enforce this.
+        if (frame.stream_id % 2) == 0:
+            raise ProtocolError("Cannot recursively push streams.")
+
+        try:
+            frames, stream_events = stream.receive_push_promise_in_band(
+                frame.promised_stream_id,
+                pushed_headers,
+                self.config.header_encoding,
+            )
+        except StreamClosedError:
+            # The parent stream was reset by us, so we presume that
+            # PUSH_PROMISE was in flight when we reset the parent stream.
+            # So we just reset the new stream.
+            f = RstStreamFrame(frame.promised_stream_id)
+            f.error_code = ErrorCodes.REFUSED_STREAM
+            return [f], events
+
+        new_stream = self._begin_new_stream(
+            frame.promised_stream_id, AllowedStreamIDs.EVEN
+        )
+        self.streams[frame.promised_stream_id] = new_stream
+        new_stream.remotely_pushed(pushed_headers)
+
+        return frames, events + stream_events
+
+    def _receive_data_frame(self, frame):
+        """
+        Receive a data frame on the connection.
+        """
+        flow_controlled_length = frame.flow_controlled_length
+
+        events = self.state_machine.process_input(
+            ConnectionInputs.RECV_DATA
+        )
+        self._inbound_flow_control_window_manager.window_consumed(
+            flow_controlled_length
+        )
+        stream = self._get_stream_by_id(frame.stream_id)
+        frames, stream_events = stream.receive_data(
+            frame.data,
+            'END_STREAM' in frame.flags,
+            flow_controlled_length
+        )
+        return frames, events + stream_events
+
+    def _receive_settings_frame(self, frame):
+        """
+        Receive a SETTINGS frame on the connection.
+        """
+        events = self.state_machine.process_input(
+            ConnectionInputs.RECV_SETTINGS
+        )
+
+        # This is an ack of the local settings.
+        if 'ACK' in frame.flags:
+            changed_settings = self._local_settings_acked()
+            ack_event = SettingsAcknowledged()
+            ack_event.changed_settings = changed_settings
+            events.append(ack_event)
+            return [], events
+
+        # Add the new settings.
+        self.remote_settings.update(frame.settings)
+        events.append(
+            RemoteSettingsChanged.from_settings(
+                self.remote_settings, frame.settings
+            )
+        )
+        frames = self._acknowledge_settings()
+
+        return frames, events
+
+    def _receive_window_update_frame(self, frame):
+        """
+        Receive a WINDOW_UPDATE frame on the connection.
+        """
+        # Validate the frame.
+        if not (1 <= frame.window_increment <= self.MAX_WINDOW_INCREMENT):
+            raise ProtocolError(
+                "Flow control increment must be between 1 and %d, received %d"
+                % (self.MAX_WINDOW_INCREMENT, frame.window_increment)
+            )
+
+        events = self.state_machine.process_input(
+            ConnectionInputs.RECV_WINDOW_UPDATE
+        )
+
+        if frame.stream_id:
+            stream = self._get_stream_by_id(frame.stream_id)
+            frames, stream_events = stream.receive_window_update(
+                frame.window_increment
+            )
+        else:
+            # Increment our local flow control window.
+            self.outbound_flow_control_window = guard_increment_window(
+                self.outbound_flow_control_window,
+                frame.window_increment
+            )
+
+            # FIXME: Should we split this into one event per active stream?
+            window_updated_event = WindowUpdated()
+            window_updated_event.stream_id = 0
+            window_updated_event.delta = frame.window_increment
+            stream_events = [window_updated_event]
+            frames = []
+
+        return frames, events + stream_events
+
+    def _receive_ping_frame(self, frame):
+        """
+        Receive a PING frame on the connection.
+        """
+        events = self.state_machine.process_input(
+            ConnectionInputs.RECV_PING
+        )
+        flags = []
+
+        if 'ACK' in frame.flags:
+            evt = PingAcknowledged()
+            evt.ping_data = frame.opaque_data
+            events.append(evt)
+        else:
+            f = PingFrame(0)
+            f.flags = {'ACK'}
+            f.opaque_data = frame.opaque_data
+            flags.append(f)
+
+        return flags, events
+
+    def _receive_rst_stream_frame(self, frame):
+        """
+        Receive a RST_STREAM frame on the connection.
+        """
+        events = self.state_machine.process_input(
+            ConnectionInputs.RECV_RST_STREAM
+        )
+        try:
+            stream = self._get_stream_by_id(frame.stream_id)
+        except NoSuchStreamError:
+            # The stream is missing. That's ok, we just do nothing here.
+            stream_frames = []
+            stream_events = []
+        else:
+            stream_frames, stream_events = stream.stream_reset(frame)
+
+        return stream_frames, events + stream_events
+
+    def _receive_priority_frame(self, frame):
+        """
+        Receive a PRIORITY frame on the connection.
+        """
+        events = self.state_machine.process_input(
+            ConnectionInputs.RECV_PRIORITY
+        )
+
+        event = PriorityUpdated()
+        event.stream_id = frame.stream_id
+        event.depends_on = frame.depends_on
+        event.exclusive = frame.exclusive
+
+        # Weight is an integer between 1 and 256, but the byte only allows
+        # 0 to 255: add one.
+        event.weight = frame.stream_weight + 1
+
+        # A stream may not depend on itself.
+        if event.depends_on == frame.stream_id:
+            raise ProtocolError(
+                "Stream %d may not depend on itself" % frame.stream_id
+            )
+        events.append(event)
+
+        return [], events
+
+    def _receive_goaway_frame(self, frame):
+        """
+        Receive a GOAWAY frame on the connection.
+        """
+        events = self.state_machine.process_input(
+            ConnectionInputs.RECV_GOAWAY
+        )
+
+        # Clear the outbound data buffer: we cannot send further data now.
+        self.clear_outbound_data_buffer()
+
+        # Fire an appropriate ConnectionTerminated event.
+        new_event = ConnectionTerminated()
+        new_event.error_code = _error_code_from_int(frame.error_code)
+        new_event.last_stream_id = frame.last_stream_id
+        new_event.additional_data = (frame.additional_data
+                                     if frame.additional_data else None)
+        events.append(new_event)
+
+        return [], events
+
+    def _receive_naked_continuation(self, frame):
+        """
+        A naked CONTINUATION frame has been received. This is always an error,
+        but the type of error it is depends on the state of the stream and must
+        transition the state of the stream, so we need to pass it to the
+        appropriate stream.
+        """
+        stream = self._get_stream_by_id(frame.stream_id)
+        stream.receive_continuation()
+        assert False, "Should not be reachable"
+
+    def _receive_alt_svc_frame(self, frame):
+        """
+        An ALTSVC frame has been received. This frame, specified in RFC 7838,
+        is used to advertise alternative places where the same service can be
+        reached.
+
+        This frame can optionally be received either on a stream or on stream
+        0, and its semantics are different in each case.
+        """
+        events = self.state_machine.process_input(
+            ConnectionInputs.RECV_ALTERNATIVE_SERVICE
+        )
+        frames = []
+
+        if frame.stream_id:
+            # Given that it makes no sense to receive ALTSVC on a stream
+            # before that stream has been opened with a HEADERS frame, the
+            # ALTSVC frame cannot create a stream. If the stream is not
+            # present, we simply ignore the frame.
+            try:
+                stream = self._get_stream_by_id(frame.stream_id)
+            except (NoSuchStreamError, StreamClosedError):
+                pass
+            else:
+                stream_frames, stream_events = stream.receive_alt_svc(frame)
+                frames.extend(stream_frames)
+                events.extend(stream_events)
+        else:
+            # This frame is sent on stream 0. The origin field on the frame
+            # must be present, though if it isn't it's not a ProtocolError
+            # (annoyingly), we just need to ignore it.
+            if not frame.origin:
+                return frames, events
+
+            # If we're a server, we want to ignore this (RFC 7838 says so).
+            if not self.config.client_side:
+                return frames, events
+
+            event = AlternativeServiceAvailable()
+            event.origin = frame.origin
+            event.field_value = frame.field
+            events.append(event)
+
+        return frames, events
+
+    def _receive_unknown_frame(self, frame):
+        """
+        We have received a frame that we do not understand. This is almost
+        certainly an extension frame, though it's impossible to be entirely
+        sure.
+
+        RFC 7540 § 5.5 says that we MUST ignore unknown frame types: so we
+        do. We do notify the user that we received one, however.
+        """
+        # All we do here is log.
+        self.config.logger.debug(
+            "Received unknown extension frame (ID %d)", frame.stream_id
+        )
+        event = UnknownFrameReceived()
+        event.frame = frame
+        return [], [event]
+
+    def _local_settings_acked(self):
+        """
+        Handle the local settings being ACKed, update internal state.
+        """
+        changes = self.local_settings.acknowledge()
+
+        if SettingCodes.INITIAL_WINDOW_SIZE in changes:
+            setting = changes[SettingCodes.INITIAL_WINDOW_SIZE]
+            self._inbound_flow_control_change_from_settings(
+                setting.original_value,
+                setting.new_value,
+            )
+
+        if SettingCodes.MAX_HEADER_LIST_SIZE in changes:
+            setting = changes[SettingCodes.MAX_HEADER_LIST_SIZE]
+            self.decoder.max_header_list_size = setting.new_value
+
+        if SettingCodes.MAX_FRAME_SIZE in changes:
+            setting = changes[SettingCodes.MAX_FRAME_SIZE]
+            self.max_inbound_frame_size = setting.new_value
+
+        if SettingCodes.HEADER_TABLE_SIZE in changes:
+            setting = changes[SettingCodes.HEADER_TABLE_SIZE]
+            # This is safe across all hpack versions: some versions just won't
+            # respect it.
+            self.decoder.max_allowed_table_size = setting.new_value
+
+        return changes
+
+    def _stream_id_is_outbound(self, stream_id):
+        """
+        Returns ``True`` if the stream ID corresponds to an outbound stream
+        (one initiated by this peer), returns ``False`` otherwise.
+        """
+        return (stream_id % 2 == int(self.config.client_side))
+
+    def _stream_closed_by(self, stream_id):
+        """
+        Returns how the stream was closed.
+
+        The return value will be either a member of
+        ``h2.stream.StreamClosedBy`` or ``None``. If ``None``, the stream was
+        closed implicitly by the peer opening a stream with a higher stream ID
+        before opening this one.
+        """
+        if stream_id in self.streams:
+            return self.streams[stream_id].closed_by
+        if stream_id in self._closed_streams:
+            return self._closed_streams[stream_id]
+        return None
+
+    def _stream_is_closed_by_reset(self, stream_id):
+        """
+        Returns ``True`` if the stream was closed by sending or receiving a
+        RST_STREAM frame. Returns ``False`` otherwise.
+        """
+        return self._stream_closed_by(stream_id) in (
+            StreamClosedBy.RECV_RST_STREAM, StreamClosedBy.SEND_RST_STREAM
+        )
+
+    def _stream_is_closed_by_end(self, stream_id):
+        """
+        Returns ``True`` if the stream was closed by sending or receiving an
+        END_STREAM flag in a HEADERS or DATA frame. Returns ``False``
+        otherwise.
+        """
+        return self._stream_closed_by(stream_id) in (
+            StreamClosedBy.RECV_END_STREAM, StreamClosedBy.SEND_END_STREAM
+        )
+
+
+def _add_frame_priority(frame, weight=None, depends_on=None, exclusive=None):
+    """
+    Adds priority data to a given frame. Does not change any flags set on that
+    frame: if the caller is adding priority information to a HEADERS frame they
+    must set that themselves.
+
+    This method also deliberately sets defaults for anything missing.
+
+    This method validates the input values.
+    """
+    # A stream may not depend on itself.
+    if depends_on == frame.stream_id:
+        raise ProtocolError(
+            "Stream %d may not depend on itself" % frame.stream_id
+        )
+
+    # Weight must be between 1 and 256.
+    if weight is not None:
+        if weight > 256 or weight < 1:
+            raise ProtocolError(
+                "Weight must be between 1 and 256, not %d" % weight
+            )
+        else:
+            # Weight is an integer between 1 and 256, but the byte only allows
+            # 0 to 255: subtract one.
+            weight -= 1
+
+    # Set defaults for anything not provided.
+    weight = weight if weight is not None else 15
+    depends_on = depends_on if depends_on is not None else 0
+    exclusive = exclusive if exclusive is not None else False
+
+    frame.stream_weight = weight
+    frame.depends_on = depends_on
+    frame.exclusive = exclusive
+
+    return frame
+
+
+def _decode_headers(decoder, encoded_header_block):
+    """
+    Decode a HPACK-encoded header block, translating HPACK exceptions into
+    sensible hyper-h2 errors.
+
+    This only ever returns bytestring headers: hyper-h2 may emit them as
+    unicode later, but internally it processes them as bytestrings only.
+    """
+    try:
+        return decoder.decode(encoded_header_block, raw=True)
+    except OversizedHeaderListError as e:
+        # This is a symptom of a HPACK bomb attack: the user has
+        # disregarded our requirements on how large a header block we'll
+        # accept.
+        raise DenialOfServiceError("Oversized header block: %s" % e)
+    except (HPACKError, IndexError, TypeError, UnicodeDecodeError) as e:
+        # We should only need HPACKError here, but versions of HPACK older
+        # than 2.1.0 throw all three others as well. For maximum
+        # compatibility, catch all of them.
+        raise ProtocolError("Error decoding header block: %s" % e)
diff --git a/tools/third_party/h2/h2/errors.py b/tools/third_party/h2/h2/errors.py
new file mode 100755
index 0000000..baef200
--- /dev/null
+++ b/tools/third_party/h2/h2/errors.py
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+"""
+h2/errors
+~~~~~~~~~~~~~~~~~~~
+
+Global error code registry containing the established HTTP/2 error codes.
+
+The current registry is available at:
+https://tools.ietf.org/html/rfc7540#section-11.4
+"""
+import enum
+
+
+class ErrorCodes(enum.IntEnum):
+    """
+    All known HTTP/2 error codes.
+
+    .. versionadded:: 2.5.0
+    """
+    #: Graceful shutdown.
+    NO_ERROR = 0x0
+
+    #: Protocol error detected.
+    PROTOCOL_ERROR = 0x1
+
+    #: Implementation fault.
+    INTERNAL_ERROR = 0x2
+
+    #: Flow-control limits exceeded.
+    FLOW_CONTROL_ERROR = 0x3
+
+    #: Settings not acknowledged.
+    SETTINGS_TIMEOUT = 0x4
+
+    #: Frame received for closed stream.
+    STREAM_CLOSED = 0x5
+
+    #: Frame size incorrect.
+    FRAME_SIZE_ERROR = 0x6
+
+    #: Stream not processed.
+    REFUSED_STREAM = 0x7
+
+    #: Stream cancelled.
+    CANCEL = 0x8
+
+    #: Compression state not updated.
+    COMPRESSION_ERROR = 0x9
+
+    #: TCP connection error for CONNECT method.
+    CONNECT_ERROR = 0xa
+
+    #: Processing capacity exceeded.
+    ENHANCE_YOUR_CALM = 0xb
+
+    #: Negotiated TLS parameters not acceptable.
+    INADEQUATE_SECURITY = 0xc
+
+    #: Use HTTP/1.1 for the request.
+    HTTP_1_1_REQUIRED = 0xd
+
+
+def _error_code_from_int(code):
+    """
+    Given an integer error code, returns either one of :class:`ErrorCodes
+    <h2.errors.ErrorCodes>` or, if not present in the known set of codes,
+    returns the integer directly.
+    """
+    try:
+        return ErrorCodes(code)
+    except ValueError:
+        return code
+
+
+__all__ = ['ErrorCodes']
diff --git a/tools/third_party/h2/h2/events.py b/tools/third_party/h2/h2/events.py
new file mode 100755
index 0000000..ff3ec3d
--- /dev/null
+++ b/tools/third_party/h2/h2/events.py
@@ -0,0 +1,619 @@
+# -*- coding: utf-8 -*-
+"""
+h2/events
+~~~~~~~~~
+
+Defines Event types for HTTP/2.
+
+Events are returned by the H2 state machine to allow implementations to keep
+track of events triggered by receiving data. Each time data is provided to the
+H2 state machine it processes the data and returns a list of Event objects.
+"""
+import binascii
+
+from .settings import ChangedSetting, _setting_code_from_int
+
+
+class Event(object):
+    """
+    Base class for h2 events.
+    """
+    pass
+
+
+class RequestReceived(Event):
+    """
+    The RequestReceived event is fired whenever request headers are received.
+    This event carries the HTTP headers for the given request and the stream ID
+    of the new stream.
+
+    .. versionchanged:: 2.3.0
+       Changed the type of ``headers`` to :class:`HeaderTuple
+       <hpack:hpack.HeaderTuple>`. This has no effect on current users.
+
+    .. versionchanged:: 2.4.0
+       Added ``stream_ended`` and ``priority_updated`` properties.
+    """
+    def __init__(self):
+        #: The Stream ID for the stream this request was made on.
+        self.stream_id = None
+
+        #: The request headers.
+        self.headers = None
+
+        #: If this request also ended the stream, the associated
+        #: :class:`StreamEnded <h2.events.StreamEnded>` event will be available
+        #: here.
+        #:
+        #: .. versionadded:: 2.4.0
+        self.stream_ended = None
+
+        #: If this request also had associated priority information, the
+        #: associated :class:`PriorityUpdated <h2.events.PriorityUpdated>`
+        #: event will be available here.
+        #:
+        #: .. versionadded:: 2.4.0
+        self.priority_updated = None
+
+    def __repr__(self):
+        return "<RequestReceived stream_id:%s, headers:%s>" % (
+            self.stream_id, self.headers
+        )
+
+
+class ResponseReceived(Event):
+    """
+    The ResponseReceived event is fired whenever response headers are received.
+    This event carries the HTTP headers for the given response and the stream
+    ID of the new stream.
+
+    .. versionchanged:: 2.3.0
+       Changed the type of ``headers`` to :class:`HeaderTuple
+       <hpack:hpack.HeaderTuple>`. This has no effect on current users.
+
+   .. versionchanged:: 2.4.0
+      Added ``stream_ended`` and ``priority_updated`` properties.
+    """
+    def __init__(self):
+        #: The Stream ID for the stream this response was made on.
+        self.stream_id = None
+
+        #: The response headers.
+        self.headers = None
+
+        #: If this response also ended the stream, the associated
+        #: :class:`StreamEnded <h2.events.StreamEnded>` event will be available
+        #: here.
+        #:
+        #: .. versionadded:: 2.4.0
+        self.stream_ended = None
+
+        #: If this response also had associated priority information, the
+        #: associated :class:`PriorityUpdated <h2.events.PriorityUpdated>`
+        #: event will be available here.
+        #:
+        #: .. versionadded:: 2.4.0
+        self.priority_updated = None
+
+    def __repr__(self):
+        return "<ResponseReceived stream_id:%s, headers:%s>" % (
+            self.stream_id, self.headers
+        )
+
+
+class TrailersReceived(Event):
+    """
+    The TrailersReceived event is fired whenever trailers are received on a
+    stream. Trailers are a set of headers sent after the body of the
+    request/response, and are used to provide information that wasn't known
+    ahead of time (e.g. content-length). This event carries the HTTP header
+    fields that form the trailers and the stream ID of the stream on which they
+    were received.
+
+    .. versionchanged:: 2.3.0
+       Changed the type of ``headers`` to :class:`HeaderTuple
+       <hpack:hpack.HeaderTuple>`. This has no effect on current users.
+
+    .. versionchanged:: 2.4.0
+       Added ``stream_ended`` and ``priority_updated`` properties.
+    """
+    def __init__(self):
+        #: The Stream ID for the stream on which these trailers were received.
+        self.stream_id = None
+
+        #: The trailers themselves.
+        self.headers = None
+
+        #: Trailers always end streams. This property has the associated
+        #: :class:`StreamEnded <h2.events.StreamEnded>` in it.
+        #:
+        #: .. versionadded:: 2.4.0
+        self.stream_ended = None
+
+        #: If the trailers also set associated priority information, the
+        #: associated :class:`PriorityUpdated <h2.events.PriorityUpdated>`
+        #: event will be available here.
+        #:
+        #: .. versionadded:: 2.4.0
+        self.priority_updated = None
+
+    def __repr__(self):
+        return "<TrailersReceived stream_id:%s, headers:%s>" % (
+            self.stream_id, self.headers
+        )
+
+
+class _HeadersSent(Event):
+    """
+    The _HeadersSent event is fired whenever headers are sent.
+
+    This is an internal event, used to determine validation steps on
+    outgoing header blocks.
+    """
+    pass
+
+
+class _ResponseSent(_HeadersSent):
+    """
+    The _ResponseSent event is fired whenever response headers are sent
+    on a stream.
+
+    This is an internal event, used to determine validation steps on
+    outgoing header blocks.
+    """
+    pass
+
+
+class _RequestSent(_HeadersSent):
+    """
+    The _RequestSent event is fired whenever request headers are sent
+    on a stream.
+
+    This is an internal event, used to determine validation steps on
+    outgoing header blocks.
+    """
+    pass
+
+
+class _TrailersSent(_HeadersSent):
+    """
+    The _TrailersSent event is fired whenever trailers are sent on a
+    stream. Trailers are a set of headers sent after the body of the
+    request/response, and are used to provide information that wasn't known
+    ahead of time (e.g. content-length).
+
+    This is an internal event, used to determine validation steps on
+    outgoing header blocks.
+    """
+    pass
+
+
+class _PushedRequestSent(_HeadersSent):
+    """
+    The _PushedRequestSent event is fired whenever pushed request headers are
+    sent.
+
+    This is an internal event, used to determine validation steps on outgoing
+    header blocks.
+    """
+    pass
+
+
+class InformationalResponseReceived(Event):
+    """
+    The InformationalResponseReceived event is fired when an informational
+    response (that is, one whose status code is a 1XX code) is received from
+    the remote peer.
+
+    The remote peer may send any number of these, from zero upwards. These
+    responses are most commonly sent in response to requests that have the
+    ``expect: 100-continue`` header field present. Most users can safely
+    ignore this event unless you are intending to use the
+    ``expect: 100-continue`` flow, or are for any reason expecting a different
+    1XX status code.
+
+    .. versionadded:: 2.2.0
+
+    .. versionchanged:: 2.3.0
+       Changed the type of ``headers`` to :class:`HeaderTuple
+       <hpack:hpack.HeaderTuple>`. This has no effect on current users.
+
+    .. versionchanged:: 2.4.0
+       Added ``priority_updated`` property.
+    """
+    def __init__(self):
+        #: The Stream ID for the stream this informational response was made
+        #: on.
+        self.stream_id = None
+
+        #: The headers for this informational response.
+        self.headers = None
+
+        #: If this response also had associated priority information, the
+        #: associated :class:`PriorityUpdated <h2.events.PriorityUpdated>`
+        #: event will be available here.
+        #:
+        #: .. versionadded:: 2.4.0
+        self.priority_updated = None
+
+    def __repr__(self):
+        return "<InformationalResponseReceived stream_id:%s, headers:%s>" % (
+            self.stream_id, self.headers
+        )
+
+
+class DataReceived(Event):
+    """
+    The DataReceived event is fired whenever data is received on a stream from
+    the remote peer. The event carries the data itself, and the stream ID on
+    which the data was received.
+
+    .. versionchanged:: 2.4.0
+       Added ``stream_ended`` property.
+    """
+    def __init__(self):
+        #: The Stream ID for the stream this data was received on.
+        self.stream_id = None
+
+        #: The data itself.
+        self.data = None
+
+        #: The amount of data received that counts against the flow control
+        #: window. Note that padding counts against the flow control window, so
+        #: when adjusting flow control you should always use this field rather
+        #: than ``len(data)``.
+        self.flow_controlled_length = None
+
+        #: If this data chunk also completed the stream, the associated
+        #: :class:`StreamEnded <h2.events.StreamEnded>` event will be available
+        #: here.
+        #:
+        #: .. versionadded:: 2.4.0
+        self.stream_ended = None
+
+    def __repr__(self):
+        return (
+            "<DataReceived stream_id:%s, "
+            "flow_controlled_length:%s, "
+            "data:%s>" % (
+                self.stream_id,
+                self.flow_controlled_length,
+                _bytes_representation(self.data[:20]),
+            )
+        )
+
+
+class WindowUpdated(Event):
+    """
+    The WindowUpdated event is fired whenever a flow control window changes
+    size. HTTP/2 defines flow control windows for connections and streams: this
+    event fires for both connections and streams. The event carries the ID of
+    the stream to which it applies (set to zero if the window update applies to
+    the connection), and the delta in the window size.
+    """
+    def __init__(self):
+        #: The Stream ID of the stream whose flow control window was changed.
+        #: May be ``0`` if the connection window was changed.
+        self.stream_id = None
+
+        #: The window delta.
+        self.delta = None
+
+    def __repr__(self):
+        return "<WindowUpdated stream_id:%s, delta:%s>" % (
+            self.stream_id, self.delta
+        )
+
+
+class RemoteSettingsChanged(Event):
+    """
+    The RemoteSettingsChanged event is fired whenever the remote peer changes
+    its settings. It contains a complete inventory of changed settings,
+    including their previous values.
+
+    In HTTP/2, settings changes need to be acknowledged. hyper-h2 automatically
+    acknowledges settings changes for efficiency. However, it is possible that
+    the caller may not be happy with the changed setting.
+
+    When this event is received, the caller should confirm that the new
+    settings are acceptable. If they are not acceptable, the user should close
+    the connection with the error code :data:`PROTOCOL_ERROR
+    <h2.errors.ErrorCodes.PROTOCOL_ERROR>`.
+
+    .. versionchanged:: 2.0.0
+       Prior to this version the user needed to acknowledge settings changes.
+       This is no longer the case: hyper-h2 now automatically acknowledges
+       them.
+    """
+    def __init__(self):
+        #: A dictionary of setting byte to
+        #: :class:`ChangedSetting <h2.settings.ChangedSetting>`, representing
+        #: the changed settings.
+        self.changed_settings = {}
+
+    @classmethod
+    def from_settings(cls, old_settings, new_settings):
+        """
+        Build a RemoteSettingsChanged event from a set of changed settings.
+
+        :param old_settings: A complete collection of old settings, in the form
+                             of a dictionary of ``{setting: value}``.
+        :param new_settings: All the changed settings and their new values, in
+                             the form of a dictionary of ``{setting: value}``.
+        """
+        e = cls()
+        for setting, new_value in new_settings.items():
+            setting = _setting_code_from_int(setting)
+            original_value = old_settings.get(setting)
+            change = ChangedSetting(setting, original_value, new_value)
+            e.changed_settings[setting] = change
+
+        return e
+
+    def __repr__(self):
+        return "<RemoteSettingsChanged changed_settings:{%s}>" % (
+            ", ".join(repr(cs) for cs in self.changed_settings.values()),
+        )
+
+
+class PingAcknowledged(Event):
+    """
+    The PingAcknowledged event is fired whenever a user-emitted PING is
+    acknowledged. This contains the data in the ACK'ed PING, allowing the
+    user to correlate PINGs and calculate RTT.
+    """
+    def __init__(self):
+        #: The data included on the ping.
+        self.ping_data = None
+
+    def __repr__(self):
+        return "<PingAcknowledged ping_data:%s>" % (
+            _bytes_representation(self.ping_data),
+        )
+
+
+class StreamEnded(Event):
+    """
+    The StreamEnded event is fired whenever a stream is ended by a remote
+    party. The stream may not be fully closed if it has not been closed
+    locally, but no further data or headers should be expected on that stream.
+    """
+    def __init__(self):
+        #: The Stream ID of the stream that was closed.
+        self.stream_id = None
+
+    def __repr__(self):
+        return "<StreamEnded stream_id:%s>" % self.stream_id
+
+
+class StreamReset(Event):
+    """
+    The StreamReset event is fired in two situations. The first is when the
+    remote party forcefully resets the stream. The second is when the remote
+    party has made a protocol error which only affects a single stream. In this
+    case, Hyper-h2 will terminate the stream early and return this event.
+
+    .. versionchanged:: 2.0.0
+       This event is now fired when Hyper-h2 automatically resets a stream.
+    """
+    def __init__(self):
+        #: The Stream ID of the stream that was reset.
+        self.stream_id = None
+
+        #: The error code given. Either one of :class:`ErrorCodes
+        #: <h2.errors.ErrorCodes>` or ``int``
+        self.error_code = None
+
+        #: Whether the remote peer sent a RST_STREAM or we did.
+        self.remote_reset = True
+
+    def __repr__(self):
+        return "<StreamReset stream_id:%s, error_code:%s, remote_reset:%s>" % (
+            self.stream_id, self.error_code, self.remote_reset
+        )
+
+
+class PushedStreamReceived(Event):
+    """
+    The PushedStreamReceived event is fired whenever a pushed stream has been
+    received from a remote peer. The event carries on it the new stream ID, the
+    ID of the parent stream, and the request headers pushed by the remote peer.
+    """
+    def __init__(self):
+        #: The Stream ID of the stream created by the push.
+        self.pushed_stream_id = None
+
+        #: The Stream ID of the stream that the push is related to.
+        self.parent_stream_id = None
+
+        #: The request headers, sent by the remote party in the push.
+        self.headers = None
+
+    def __repr__(self):
+        return (
+            "<PushedStreamReceived pushed_stream_id:%s, parent_stream_id:%s, "
+            "headers:%s>" % (
+                self.pushed_stream_id,
+                self.parent_stream_id,
+                self.headers,
+            )
+        )
+
+
+class SettingsAcknowledged(Event):
+    """
+    The SettingsAcknowledged event is fired whenever a settings ACK is received
+    from the remote peer. The event carries on it the settings that were
+    acknowedged, in the same format as
+    :class:`h2.events.RemoteSettingsChanged`.
+    """
+    def __init__(self):
+        #: A dictionary of setting byte to
+        #: :class:`ChangedSetting <h2.settings.ChangedSetting>`, representing
+        #: the changed settings.
+        self.changed_settings = {}
+
+    def __repr__(self):
+        return "<SettingsAcknowledged changed_settings:{%s}>" % (
+            ", ".join(repr(cs) for cs in self.changed_settings.values()),
+        )
+
+
+class PriorityUpdated(Event):
+    """
+    The PriorityUpdated event is fired whenever a stream sends updated priority
+    information. This can occur when the stream is opened, or at any time
+    during the stream lifetime.
+
+    This event is purely advisory, and does not need to be acted on.
+
+    .. versionadded:: 2.0.0
+    """
+    def __init__(self):
+        #: The ID of the stream whose priority information is being updated.
+        self.stream_id = None
+
+        #: The new stream weight. May be the same as the original stream
+        #: weight. An integer between 1 and 256.
+        self.weight = None
+
+        #: The stream ID this stream now depends on. May be ``0``.
+        self.depends_on = None
+
+        #: Whether the stream *exclusively* depends on the parent stream. If it
+        #: does, this stream should inherit the current children of its new
+        #: parent.
+        self.exclusive = None
+
+    def __repr__(self):
+        return (
+            "<PriorityUpdated stream_id:%s, weight:%s, depends_on:%s, "
+            "exclusive:%s>" % (
+                self.stream_id,
+                self.weight,
+                self.depends_on,
+                self.exclusive
+            )
+        )
+
+
+class ConnectionTerminated(Event):
+    """
+    The ConnectionTerminated event is fired when a connection is torn down by
+    the remote peer using a GOAWAY frame. Once received, no further action may
+    be taken on the connection: a new connection must be established.
+    """
+    def __init__(self):
+        #: The error code cited when tearing down the connection. Should be
+        #: one of :class:`ErrorCodes <h2.errors.ErrorCodes>`, but may not be if
+        #: unknown HTTP/2 extensions are being used.
+        self.error_code = None
+
+        #: The stream ID of the last stream the remote peer saw. This can
+        #: provide an indication of what data, if any, never reached the remote
+        #: peer and so can safely be resent.
+        self.last_stream_id = None
+
+        #: Additional debug data that can be appended to GOAWAY frame.
+        self.additional_data = None
+
+    def __repr__(self):
+        return (
+            "<ConnectionTerminated error_code:%s, last_stream_id:%s, "
+            "additional_data:%s>" % (
+                self.error_code,
+                self.last_stream_id,
+                _bytes_representation(
+                    self.additional_data[:20]
+                    if self.additional_data else None)
+            )
+        )
+
+
+class AlternativeServiceAvailable(Event):
+    """
+    The AlternativeServiceAvailable event is fired when the remote peer
+    advertises an `RFC 7838 <https://tools.ietf.org/html/rfc7838>`_ Alternative
+    Service using an ALTSVC frame.
+
+    This event always carries the origin to which the ALTSVC information
+    applies. That origin is either supplied by the server directly, or inferred
+    by hyper-h2 from the ``:authority`` pseudo-header field that was sent by
+    the user when initiating a given stream.
+
+    This event also carries what RFC 7838 calls the "Alternative Service Field
+    Value", which is formatted like a HTTP header field and contains the
+    relevant alternative service information. Hyper-h2 does not parse or in any
+    way modify that information: the user is required to do that.
+
+    This event can only be fired on the client end of a connection.
+
+    .. versionadded:: 2.3.0
+    """
+    def __init__(self):
+        #: The origin to which the alternative service field value applies.
+        #: This field is either supplied by the server directly, or inferred by
+        #: hyper-h2 from the ``:authority`` pseudo-header field that was sent
+        #: by the user when initiating the stream on which the frame was
+        #: received.
+        self.origin = None
+
+        #: The ALTSVC field value. This contains information about the HTTP
+        #: alternative service being advertised by the server. Hyper-h2 does
+        #: not parse this field: it is left exactly as sent by the server. The
+        #: structure of the data in this field is given by `RFC 7838 Section 3
+        #: <https://tools.ietf.org/html/rfc7838#section-3>`_.
+        self.field_value = None
+
+    def __repr__(self):
+        return (
+            "<AlternativeServiceAvailable origin:%s, field_value:%s>" % (
+                self.origin.decode('utf-8', 'ignore'),
+                self.field_value.decode('utf-8', 'ignore'),
+            )
+        )
+
+
+class UnknownFrameReceived(Event):
+    """
+    The UnknownFrameReceived event is fired when the remote peer sends a frame
+    that hyper-h2 does not understand. This occurs primarily when the remote
+    peer is employing HTTP/2 extensions that hyper-h2 doesn't know anything
+    about.
+
+    RFC 7540 requires that HTTP/2 implementations ignore these frames. hyper-h2
+    does so. However, this event is fired to allow implementations to perform
+    special processing on those frames if needed (e.g. if the implementation
+    is capable of handling the frame itself).
+
+    .. versionadded:: 2.7.0
+    """
+    def __init__(self):
+        #: The hyperframe Frame object that encapsulates the received frame.
+        self.frame = None
+
+    def __repr__(self):
+        return "<UnknownFrameReceived>"
+
+
+def _bytes_representation(data):
+    """
+    Converts a bytestring into something that is safe to print on all Python
+    platforms.
+
+    This function is relatively expensive, so it should not be called on the
+    mainline of the code. It's safe to use in things like object repr methods
+    though.
+    """
+    if data is None:
+        return None
+
+    hex = binascii.hexlify(data)
+
+    # This is moderately clever: on all Python versions hexlify returns a byte
+    # string. On Python 3 we want an actual string, so we just check whether
+    # that's what we have.
+    if not isinstance(hex, str):  # pragma: no cover
+        hex = hex.decode('ascii')
+
+    return hex
diff --git a/tools/third_party/h2/h2/exceptions.py b/tools/third_party/h2/h2/exceptions.py
new file mode 100755
index 0000000..388f9e9
--- /dev/null
+++ b/tools/third_party/h2/h2/exceptions.py
@@ -0,0 +1,186 @@
+# -*- coding: utf-8 -*-
+"""
+h2/exceptions
+~~~~~~~~~~~~~
+
+Exceptions for the HTTP/2 module.
+"""
+import h2.errors
+
+
+class H2Error(Exception):
+    """
+    The base class for all exceptions for the HTTP/2 module.
+    """
+
+
+class ProtocolError(H2Error):
+    """
+    An action was attempted in violation of the HTTP/2 protocol.
+    """
+    #: The error code corresponds to this kind of Protocol Error.
+    error_code = h2.errors.ErrorCodes.PROTOCOL_ERROR
+
+
+class FrameTooLargeError(ProtocolError):
+    """
+    The frame that we tried to send or that we received was too large.
+    """
+    #: This error code that corresponds to this kind of Protocol Error.
+    error_code = h2.errors.ErrorCodes.FRAME_SIZE_ERROR
+
+
+class FrameDataMissingError(ProtocolError):
+    """
+    The frame that we received is missing some data.
+
+    .. versionadded:: 2.0.0
+    """
+    #: The error code that corresponds to this kind of Protocol Error
+    error_code = h2.errors.ErrorCodes.FRAME_SIZE_ERROR
+
+
+class TooManyStreamsError(ProtocolError):
+    """
+    An attempt was made to open a stream that would lead to too many concurrent
+    streams.
+    """
+    pass
+
+
+class FlowControlError(ProtocolError):
+    """
+    An attempted action violates flow control constraints.
+    """
+    #: The error code that corresponds to this kind of
+    #: :class:`ProtocolError <h2.exceptions.ProtocolError>`
+    error_code = h2.errors.ErrorCodes.FLOW_CONTROL_ERROR
+
+
+class StreamIDTooLowError(ProtocolError):
+    """
+    An attempt was made to open a stream that had an ID that is lower than the
+    highest ID we have seen on this connection.
+    """
+    def __init__(self, stream_id, max_stream_id):
+        #: The ID of the stream that we attempted to open.
+        self.stream_id = stream_id
+
+        #: The current highest-seen stream ID.
+        self.max_stream_id = max_stream_id
+
+    def __str__(self):
+        return "StreamIDTooLowError: %d is lower than %d" % (
+            self.stream_id, self.max_stream_id
+        )
+
+
+class NoAvailableStreamIDError(ProtocolError):
+    """
+    There are no available stream IDs left to the connection. All stream IDs
+    have been exhausted.
+
+    .. versionadded:: 2.0.0
+    """
+    pass
+
+
+class NoSuchStreamError(ProtocolError):
+    """
+    A stream-specific action referenced a stream that does not exist.
+
+    .. versionchanged:: 2.0.0
+       Became a subclass of :class:`ProtocolError
+       <h2.exceptions.ProtocolError>`
+    """
+    def __init__(self, stream_id):
+        #: The stream ID that corresponds to the non-existent stream.
+        self.stream_id = stream_id
+
+
+class StreamClosedError(NoSuchStreamError):
+    """
+    A more specific form of
+    :class:`NoSuchStreamError <h2.exceptions.NoSuchStreamError>`. Indicates
+    that the stream has since been closed, and that all state relating to that
+    stream has been removed.
+    """
+    def __init__(self, stream_id):
+        #: The stream ID that corresponds to the nonexistent stream.
+        self.stream_id = stream_id
+
+        #: The relevant HTTP/2 error code.
+        self.error_code = h2.errors.ErrorCodes.STREAM_CLOSED
+
+        # Any events that internal code may need to fire. Not relevant to
+        # external users that may receive a StreamClosedError.
+        self._events = []
+
+
+class InvalidSettingsValueError(ProtocolError, ValueError):
+    """
+    An attempt was made to set an invalid Settings value.
+
+    .. versionadded:: 2.0.0
+    """
+    def __init__(self, msg, error_code):
+        super(InvalidSettingsValueError, self).__init__(msg)
+        self.error_code = error_code
+
+
+class InvalidBodyLengthError(ProtocolError):
+    """
+    The remote peer sent more or less data that the Content-Length header
+    indicated.
+
+    .. versionadded:: 2.0.0
+    """
+    def __init__(self, expected, actual):
+        self.expected_length = expected
+        self.actual_length = actual
+
+    def __str__(self):
+        return "InvalidBodyLengthError: Expected %d bytes, received %d" % (
+            self.expected_length, self.actual_length
+        )
+
+
+class UnsupportedFrameError(ProtocolError, KeyError):
+    """
+    The remote peer sent a frame that is unsupported in this context.
+
+    .. versionadded:: 2.1.0
+    """
+    # TODO: Remove the KeyError in 3.0.0
+    pass
+
+
+class RFC1122Error(H2Error):
+    """
+    Emitted when users attempt to do something that is literally allowed by the
+    relevant RFC, but is sufficiently ill-defined that it's unwise to allow
+    users to actually do it.
+
+    While there is some disagreement about whether or not we should be liberal
+    in what accept, it is a truth universally acknowledged that we should be
+    conservative in what emit.
+
+    .. versionadded:: 2.4.0
+    """
+    # shazow says I'm going to regret naming the exception this way. If that
+    # turns out to be true, TELL HIM NOTHING.
+    pass
+
+
+class DenialOfServiceError(ProtocolError):
+    """
+    Emitted when the remote peer exhibits a behaviour that is likely to be an
+    attempt to perform a Denial of Service attack on the implementation. This
+    is a form of ProtocolError that carries a different error code, and allows
+    more easy detection of this kind of behaviour.
+
+    .. versionadded:: 2.5.0
+    """
+    #: The error code that corresponds to this kind of
+    #: :class:`ProtocolError <h2.exceptions.ProtocolError>`
+    error_code = h2.errors.ErrorCodes.ENHANCE_YOUR_CALM
diff --git a/tools/third_party/h2/h2/frame_buffer.py b/tools/third_party/h2/h2/frame_buffer.py
new file mode 100755
index 0000000..e79f6ec
--- /dev/null
+++ b/tools/third_party/h2/h2/frame_buffer.py
@@ -0,0 +1,175 @@
+# -*- coding: utf-8 -*-
+"""
+h2/frame_buffer
+~~~~~~~~~~~~~~~
+
+A data structure that provides a way to iterate over a byte buffer in terms of
+frames.
+"""
+from hyperframe.exceptions import InvalidFrameError
+from hyperframe.frame import (
+    Frame, HeadersFrame, ContinuationFrame, PushPromiseFrame
+)
+
+from .exceptions import (
+    ProtocolError, FrameTooLargeError, FrameDataMissingError
+)
+
+# To avoid a DOS attack based on sending loads of continuation frames, we limit
+# the maximum number we're perpared to receive. In this case, we'll set the
+# limit to 64, which means the largest encoded header block we can receive by
+# default is 262144 bytes long, and the largest possible *at all* is 1073741760
+# bytes long.
+#
+# This value seems reasonable for now, but in future we may want to evaluate
+# making it configurable.
+CONTINUATION_BACKLOG = 64
+
+
+class FrameBuffer(object):
+    """
+    This is a data structure that expects to act as a buffer for HTTP/2 data
+    that allows iteraton in terms of H2 frames.
+    """
+    def __init__(self, server=False):
+        self.data = b''
+        self.max_frame_size = 0
+        self._preamble = b'PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n' if server else b''
+        self._preamble_len = len(self._preamble)
+        self._headers_buffer = []
+
+    def add_data(self, data):
+        """
+        Add more data to the frame buffer.
+
+        :param data: A bytestring containing the byte buffer.
+        """
+        if self._preamble_len:
+            data_len = len(data)
+            of_which_preamble = min(self._preamble_len, data_len)
+
+            if self._preamble[:of_which_preamble] != data[:of_which_preamble]:
+                raise ProtocolError("Invalid HTTP/2 preamble.")
+
+            data = data[of_which_preamble:]
+            self._preamble_len -= of_which_preamble
+            self._preamble = self._preamble[of_which_preamble:]
+
+        self.data += data
+
+    def _parse_frame_header(self, data):
+        """
+        Parses the frame header from the data. Either returns a tuple of
+        (frame, length), or throws an exception. The returned frame may be None
+        if the frame is of unknown type.
+        """
+        try:
+            frame, length = Frame.parse_frame_header(data[:9])
+        except ValueError as e:
+            # The frame header is invalid. This is a ProtocolError
+            raise ProtocolError("Invalid frame header received: %s" % str(e))
+
+        return frame, length
+
+    def _validate_frame_length(self, length):
+        """
+        Confirm that the frame is an appropriate length.
+        """
+        if length > self.max_frame_size:
+            raise FrameTooLargeError(
+                "Received overlong frame: length %d, max %d" %
+                (length, self.max_frame_size)
+            )
+
+    def _update_header_buffer(self, f):
+        """
+        Updates the internal header buffer. Returns a frame that should replace
+        the current one. May throw exceptions if this frame is invalid.
+        """
+        # Check if we're in the middle of a headers block. If we are, this
+        # frame *must* be a CONTINUATION frame with the same stream ID as the
+        # leading HEADERS or PUSH_PROMISE frame. Anything else is a
+        # ProtocolError. If the frame *is* valid, append it to the header
+        # buffer.
+        if self._headers_buffer:
+            stream_id = self._headers_buffer[0].stream_id
+            valid_frame = (
+                f is not None and
+                isinstance(f, ContinuationFrame) and
+                f.stream_id == stream_id
+            )
+            if not valid_frame:
+                raise ProtocolError("Invalid frame during header block.")
+
+            # Append the frame to the buffer.
+            self._headers_buffer.append(f)
+            if len(self._headers_buffer) > CONTINUATION_BACKLOG:
+                raise ProtocolError("Too many continuation frames received.")
+
+            # If this is the end of the header block, then we want to build a
+            # mutant HEADERS frame that's massive. Use the original one we got,
+            # then set END_HEADERS and set its data appopriately. If it's not
+            # the end of the block, lose the current frame: we can't yield it.
+            if 'END_HEADERS' in f.flags:
+                f = self._headers_buffer[0]
+                f.flags.add('END_HEADERS')
+                f.data = b''.join(x.data for x in self._headers_buffer)
+                self._headers_buffer = []
+            else:
+                f = None
+        elif (isinstance(f, (HeadersFrame, PushPromiseFrame)) and
+                'END_HEADERS' not in f.flags):
+            # This is the start of a headers block! Save the frame off and then
+            # act like we didn't receive one.
+            self._headers_buffer.append(f)
+            f = None
+
+        return f
+
+    # The methods below support the iterator protocol.
+    def __iter__(self):
+        return self
+
+    def next(self):  # Python 2
+        # First, check that we have enough data to successfully parse the
+        # next frame header. If not, bail. Otherwise, parse it.
+        if len(self.data) < 9:
+            raise StopIteration()
+
+        try:
+            f, length = self._parse_frame_header(self.data)
+        except InvalidFrameError:  # pragma: no cover
+            raise ProtocolError("Received frame with invalid frame header.")
+
+        # Next, check that we have enough length to parse the frame body. If
+        # not, bail, leaving the frame header data in the buffer for next time.
+        if len(self.data) < length + 9:
+            raise StopIteration()
+
+        # Confirm the frame has an appropriate length.
+        self._validate_frame_length(length)
+
+        # Don't try to parse the body if we didn't get a frame we know about:
+        # there's nothing we can do with it anyway.
+        if f is not None:
+            try:
+                f.parse_body(memoryview(self.data[9:9+length]))
+            except InvalidFrameError:
+                raise FrameDataMissingError("Frame data missing or invalid")
+
+        # At this point, as we know we'll use or discard the entire frame, we
+        # can update the data.
+        self.data = self.data[9+length:]
+
+        # Pass the frame through the header buffer.
+        f = self._update_header_buffer(f)
+
+        # If we got a frame we didn't understand or shouldn't yield, rather
+        # than return None it'd be better if we just tried to get the next
+        # frame in the sequence instead. Recurse back into ourselves to do
+        # that. This is safe because the amount of work we have to do here is
+        # strictly bounded by the length of the buffer.
+        return f if f is not None else self.next()
+
+    def __next__(self):  # Python 3
+        return self.next()
diff --git a/tools/third_party/h2/h2/settings.py b/tools/third_party/h2/h2/settings.py
new file mode 100755
index 0000000..3da7203
--- /dev/null
+++ b/tools/third_party/h2/h2/settings.py
@@ -0,0 +1,313 @@
+# -*- coding: utf-8 -*-
+"""
+h2/settings
+~~~~~~~~~~~
+
+This module contains a HTTP/2 settings object. This object provides a simple
+API for manipulating HTTP/2 settings, keeping track of both the current active
+state of the settings and the unacknowledged future values of the settings.
+"""
+import collections
+import enum
+
+from hyperframe.frame import SettingsFrame
+
+from h2.errors import ErrorCodes
+from h2.exceptions import InvalidSettingsValueError
+
+
+class SettingCodes(enum.IntEnum):
+    """
+    All known HTTP/2 setting codes.
+
+    .. versionadded:: 2.6.0
+    """
+
+    #: Allows the sender to inform the remote endpoint of the maximum size of
+    #: the header compression table used to decode header blocks, in octets.
+    HEADER_TABLE_SIZE = SettingsFrame.HEADER_TABLE_SIZE
+
+    #: This setting can be used to disable server push. To disable server push
+    #: on a client, set this to 0.
+    ENABLE_PUSH = SettingsFrame.ENABLE_PUSH
+
+    #: Indicates the maximum number of concurrent streams that the sender will
+    #: allow.
+    MAX_CONCURRENT_STREAMS = SettingsFrame.MAX_CONCURRENT_STREAMS
+
+    #: Indicates the sender's initial window size (in octets) for stream-level
+    #: flow control.
+    INITIAL_WINDOW_SIZE = SettingsFrame.INITIAL_WINDOW_SIZE
+
+    #: Indicates the size of the largest frame payload that the sender is
+    #: willing to receive, in octets.
+    MAX_FRAME_SIZE = SettingsFrame.MAX_FRAME_SIZE
+
+    #: This advisory setting informs a peer of the maximum size of header list
+    #: that the sender is prepared to accept, in octets.  The value is based on
+    #: the uncompressed size of header fields, including the length of the name
+    #: and value in octets plus an overhead of 32 octets for each header field.
+    MAX_HEADER_LIST_SIZE = SettingsFrame.MAX_HEADER_LIST_SIZE
+
+
+def _setting_code_from_int(code):
+    """
+    Given an integer setting code, returns either one of :class:`SettingCodes
+    <h2.settings.SettingCodes>` or, if not present in the known set of codes,
+    returns the integer directly.
+    """
+    try:
+        return SettingCodes(code)
+    except ValueError:
+        return code
+
+
+class ChangedSetting:
+
+    def __init__(self, setting, original_value, new_value):
+        #: The setting code given. Either one of :class:`SettingCodes
+        #: <h2.settings.SettingCodes>` or ``int``
+        #:
+        #: .. versionchanged:: 2.6.0
+        self.setting = setting
+
+        #: The original value before being changed.
+        self.original_value = original_value
+
+        #: The new value after being changed.
+        self.new_value = new_value
+
+    def __repr__(self):
+        return (
+            "ChangedSetting(setting=%s, original_value=%s, "
+            "new_value=%s)"
+        ) % (
+            self.setting,
+            self.original_value,
+            self.new_value
+        )
+
+
+class Settings(collections.MutableMapping):
+    """
+    An object that encapsulates HTTP/2 settings state.
+
+    HTTP/2 Settings are a complex beast. Each party, remote and local, has its
+    own settings and a view of the other party's settings. When a settings
+    frame is emitted by a peer it cannot assume that the new settings values
+    are in place until the remote peer acknowledges the setting. In principle,
+    multiple settings changes can be "in flight" at the same time, all with
+    different values.
+
+    This object encapsulates this mess. It provides a dict-like interface to
+    settings, which return the *current* values of the settings in question.
+    Additionally, it keeps track of the stack of proposed values: each time an
+    acknowledgement is sent/received, it updates the current values with the
+    stack of proposed values. On top of all that, it validates the values to
+    make sure they're allowed, and raises :class:`InvalidSettingsValueError
+    <h2.exceptions.InvalidSettingsValueError>` if they are not.
+
+    Finally, this object understands what the default values of the HTTP/2
+    settings are, and sets those defaults appropriately.
+
+    .. versionchanged:: 2.2.0
+       Added the ``initial_values`` parameter.
+
+    .. versionchanged:: 2.5.0
+       Added the ``max_header_list_size`` property.
+
+    :param client: (optional) Whether these settings should be defaulted for a
+        client implementation or a server implementation. Defaults to ``True``.
+    :type client: ``bool``
+    :param initial_values: (optional) Any initial values the user would like
+        set, rather than RFC 7540's defaults.
+    :type initial_vales: ``MutableMapping``
+    """
+    def __init__(self, client=True, initial_values=None):
+        # Backing object for the settings. This is a dictionary of
+        # (setting: [list of values]), where the first value in the list is the
+        # current value of the setting. Strictly this doesn't use lists but
+        # instead uses collections.deque to avoid repeated memory allocations.
+        #
+        # This contains the default values for HTTP/2.
+        self._settings = {
+            SettingCodes.HEADER_TABLE_SIZE: collections.deque([4096]),
+            SettingCodes.ENABLE_PUSH: collections.deque([int(client)]),
+            SettingCodes.INITIAL_WINDOW_SIZE: collections.deque([65535]),
+            SettingCodes.MAX_FRAME_SIZE: collections.deque([16384]),
+        }
+        if initial_values is not None:
+            for key, value in initial_values.items():
+                invalid = _validate_setting(key, value)
+                if invalid:
+                    raise InvalidSettingsValueError(
+                        "Setting %d has invalid value %d" % (key, value),
+                        error_code=invalid
+                    )
+                self._settings[key] = collections.deque([value])
+
+    def acknowledge(self):
+        """
+        The settings have been acknowledged, either by the user (remote
+        settings) or by the remote peer (local settings).
+
+        :returns: A dict of {setting: ChangedSetting} that were applied.
+        """
+        changed_settings = {}
+
+        # If there is more than one setting in the list, we have a setting
+        # value outstanding. Update them.
+        for k, v in self._settings.items():
+            if len(v) > 1:
+                old_setting = v.popleft()
+                new_setting = v[0]
+                changed_settings[k] = ChangedSetting(
+                    k, old_setting, new_setting
+                )
+
+        return changed_settings
+
+    # Provide easy-access to well known settings.
+    @property
+    def header_table_size(self):
+        """
+        The current value of the :data:`HEADER_TABLE_SIZE
+        <h2.settings.SettingCodes.HEADER_TABLE_SIZE>` setting.
+        """
+        return self[SettingCodes.HEADER_TABLE_SIZE]
+
+    @header_table_size.setter
+    def header_table_size(self, value):
+        self[SettingCodes.HEADER_TABLE_SIZE] = value
+
+    @property
+    def enable_push(self):
+        """
+        The current value of the :data:`ENABLE_PUSH
+        <h2.settings.SettingCodes.ENABLE_PUSH>` setting.
+        """
+        return self[SettingCodes.ENABLE_PUSH]
+
+    @enable_push.setter
+    def enable_push(self, value):
+        self[SettingCodes.ENABLE_PUSH] = value
+
+    @property
+    def initial_window_size(self):
+        """
+        The current value of the :data:`INITIAL_WINDOW_SIZE
+        <h2.settings.SettingCodes.INITIAL_WINDOW_SIZE>` setting.
+        """
+        return self[SettingCodes.INITIAL_WINDOW_SIZE]
+
+    @initial_window_size.setter
+    def initial_window_size(self, value):
+        self[SettingCodes.INITIAL_WINDOW_SIZE] = value
+
+    @property
+    def max_frame_size(self):
+        """
+        The current value of the :data:`MAX_FRAME_SIZE
+        <h2.settings.SettingCodes.MAX_FRAME_SIZE>` setting.
+        """
+        return self[SettingCodes.MAX_FRAME_SIZE]
+
+    @max_frame_size.setter
+    def max_frame_size(self, value):
+        self[SettingCodes.MAX_FRAME_SIZE] = value
+
+    @property
+    def max_concurrent_streams(self):
+        """
+        The current value of the :data:`MAX_CONCURRENT_STREAMS
+        <h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS>` setting.
+        """
+        return self.get(SettingCodes.MAX_CONCURRENT_STREAMS, 2**32+1)
+
+    @max_concurrent_streams.setter
+    def max_concurrent_streams(self, value):
+        self[SettingCodes.MAX_CONCURRENT_STREAMS] = value
+
+    @property
+    def max_header_list_size(self):
+        """
+        The current value of the :data:`MAX_HEADER_LIST_SIZE
+        <h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE>` setting. If not set,
+        returns ``None``, which means unlimited.
+
+        .. versionadded:: 2.5.0
+        """
+        return self.get(SettingCodes.MAX_HEADER_LIST_SIZE, None)
+
+    @max_header_list_size.setter
+    def max_header_list_size(self, value):
+        self[SettingCodes.MAX_HEADER_LIST_SIZE] = value
+
+    # Implement the MutableMapping API.
+    def __getitem__(self, key):
+        val = self._settings[key][0]
+
+        # Things that were created when a setting was received should stay
+        # KeyError'd.
+        if val is None:
+            raise KeyError
+
+        return val
+
+    def __setitem__(self, key, value):
+        invalid = _validate_setting(key, value)
+        if invalid:
+            raise InvalidSettingsValueError(
+                "Setting %d has invalid value %d" % (key, value),
+                error_code=invalid
+            )
+
+        try:
+            items = self._settings[key]
+        except KeyError:
+            items = collections.deque([None])
+            self._settings[key] = items
+
+        items.append(value)
+
+    def __delitem__(self, key):
+        del self._settings[key]
+
+    def __iter__(self):
+        return self._settings.__iter__()
+
+    def __len__(self):
+        return len(self._settings)
+
+    def __eq__(self, other):
+        if isinstance(other, Settings):
+            return self._settings == other._settings
+        else:
+            return NotImplemented
+
+    def __ne__(self, other):
+        if isinstance(other, Settings):
+            return not self == other
+        else:
+            return NotImplemented
+
+
+def _validate_setting(setting, value):
+    """
+    Confirms that a specific setting has a well-formed value. If the setting is
+    invalid, returns an error code. Otherwise, returns 0 (NO_ERROR).
+    """
+    if setting == SettingCodes.ENABLE_PUSH:
+        if value not in (0, 1):
+            return ErrorCodes.PROTOCOL_ERROR
+    elif setting == SettingCodes.INITIAL_WINDOW_SIZE:
+        if not 0 <= value <= 2147483647:  # 2^31 - 1
+            return ErrorCodes.FLOW_CONTROL_ERROR
+    elif setting == SettingCodes.MAX_FRAME_SIZE:
+        if not 16384 <= value <= 16777215:  # 2^14 and 2^24 - 1
+            return ErrorCodes.PROTOCOL_ERROR
+    elif setting == SettingCodes.MAX_HEADER_LIST_SIZE:
+        if value < 0:
+            return ErrorCodes.PROTOCOL_ERROR
+
+    return 0
diff --git a/tools/third_party/h2/h2/stream.py b/tools/third_party/h2/h2/stream.py
new file mode 100755
index 0000000..489ea4d
--- /dev/null
+++ b/tools/third_party/h2/h2/stream.py
@@ -0,0 +1,1403 @@
+# -*- coding: utf-8 -*-
+"""
+h2/stream
+~~~~~~~~~
+
+An implementation of a HTTP/2 stream.
+"""
+from enum import Enum, IntEnum
+from hpack import HeaderTuple
+from hyperframe.frame import (
+    HeadersFrame, ContinuationFrame, DataFrame, WindowUpdateFrame,
+    RstStreamFrame, PushPromiseFrame, AltSvcFrame
+)
+
+from .errors import ErrorCodes, _error_code_from_int
+from .events import (
+    RequestReceived, ResponseReceived, DataReceived, WindowUpdated,
+    StreamEnded, PushedStreamReceived, StreamReset, TrailersReceived,
+    InformationalResponseReceived, AlternativeServiceAvailable,
+    _ResponseSent, _RequestSent, _TrailersSent, _PushedRequestSent
+)
+from .exceptions import (
+    ProtocolError, StreamClosedError, InvalidBodyLengthError, FlowControlError
+)
+from .utilities import (
+    guard_increment_window, is_informational_response, authority_from_headers,
+    validate_headers, validate_outbound_headers, normalize_outbound_headers,
+    HeaderValidationFlags, extract_method_header, normalize_inbound_headers
+)
+from .windows import WindowManager
+
+
+class StreamState(IntEnum):
+    IDLE = 0
+    RESERVED_REMOTE = 1
+    RESERVED_LOCAL = 2
+    OPEN = 3
+    HALF_CLOSED_REMOTE = 4
+    HALF_CLOSED_LOCAL = 5
+    CLOSED = 6
+
+
+class StreamInputs(Enum):
+    SEND_HEADERS = 0
+    SEND_PUSH_PROMISE = 1
+    SEND_RST_STREAM = 2
+    SEND_DATA = 3
+    SEND_WINDOW_UPDATE = 4
+    SEND_END_STREAM = 5
+    RECV_HEADERS = 6
+    RECV_PUSH_PROMISE = 7
+    RECV_RST_STREAM = 8
+    RECV_DATA = 9
+    RECV_WINDOW_UPDATE = 10
+    RECV_END_STREAM = 11
+    RECV_CONTINUATION = 12  # Added in 2.0.0
+    SEND_INFORMATIONAL_HEADERS = 13  # Added in 2.2.0
+    RECV_INFORMATIONAL_HEADERS = 14  # Added in 2.2.0
+    SEND_ALTERNATIVE_SERVICE = 15  # Added in 2.3.0
+    RECV_ALTERNATIVE_SERVICE = 16  # Added in 2.3.0
+    UPGRADE_CLIENT = 17  # Added 2.3.0
+    UPGRADE_SERVER = 18  # Added 2.3.0
+
+
+class StreamClosedBy(Enum):
+    SEND_END_STREAM = 0
+    RECV_END_STREAM = 1
+    SEND_RST_STREAM = 2
+    RECV_RST_STREAM = 3
+
+
+# This array is initialized once, and is indexed by the stream states above.
+# It indicates whether a stream in the given state is open. The reason we do
+# this is that we potentially check whether a stream in a given state is open
+# quite frequently: given that we check so often, we should do so in the
+# fastest and most performant way possible.
+STREAM_OPEN = [False for _ in range(0, len(StreamState))]
+STREAM_OPEN[StreamState.OPEN] = True
+STREAM_OPEN[StreamState.HALF_CLOSED_LOCAL] = True
+STREAM_OPEN[StreamState.HALF_CLOSED_REMOTE] = True
+
+
+class H2StreamStateMachine(object):
+    """
+    A single HTTP/2 stream state machine.
+
+    This stream object implements basically the state machine described in
+    RFC 7540 section 5.1.
+
+    :param stream_id: The stream ID of this stream. This is stored primarily
+        for logging purposes.
+    """
+    def __init__(self, stream_id):
+        self.state = StreamState.IDLE
+        self.stream_id = stream_id
+
+        #: Whether this peer is the client side of this stream.
+        self.client = None
+
+        # Whether trailers have been sent/received on this stream or not.
+        self.headers_sent = None
+        self.trailers_sent = None
+        self.headers_received = None
+        self.trailers_received = None
+
+        # How the stream was closed. One of StreamClosedBy.
+        self.stream_closed_by = None
+
+    def process_input(self, input_):
+        """
+        Process a specific input in the state machine.
+        """
+        if not isinstance(input_, StreamInputs):
+            raise ValueError("Input must be an instance of StreamInputs")
+
+        try:
+            func, target_state = _transitions[(self.state, input_)]
+        except KeyError:
+            old_state = self.state
+            self.state = StreamState.CLOSED
+            raise ProtocolError(
+                "Invalid input %s in state %s" % (input_, old_state)
+            )
+        else:
+            previous_state = self.state
+            self.state = target_state
+            if func is not None:
+                try:
+                    return func(self, previous_state)
+                except ProtocolError:
+                    self.state = StreamState.CLOSED
+                    raise
+                except AssertionError as e:  # pragma: no cover
+                    self.state = StreamState.CLOSED
+                    raise ProtocolError(e)
+
+            return []
+
+    def request_sent(self, previous_state):
+        """
+        Fires when a request is sent.
+        """
+        self.client = True
+        self.headers_sent = True
+        event = _RequestSent()
+
+        return [event]
+
+    def response_sent(self, previous_state):
+        """
+        Fires when something that should be a response is sent. This 'response'
+        may actually be trailers.
+        """
+        if not self.headers_sent:
+            if self.client is True or self.client is None:
+                raise ProtocolError("Client cannot send responses.")
+            self.headers_sent = True
+            event = _ResponseSent()
+        else:
+            assert not self.trailers_sent
+            self.trailers_sent = True
+            event = _TrailersSent()
+
+        return [event]
+
+    def request_received(self, previous_state):
+        """
+        Fires when a request is received.
+        """
+        assert not self.headers_received
+        assert not self.trailers_received
+
+        self.client = False
+        self.headers_received = True
+        event = RequestReceived()
+
+        event.stream_id = self.stream_id
+        return [event]
+
+    def response_received(self, previous_state):
+        """
+        Fires when a response is received. Also disambiguates between responses
+        and trailers.
+        """
+        if not self.headers_received:
+            assert self.client is True
+            self.headers_received = True
+            event = ResponseReceived()
+        else:
+            assert not self.trailers_received
+            self.trailers_received = True
+            event = TrailersReceived()
+
+        event.stream_id = self.stream_id
+        return [event]
+
+    def data_received(self, previous_state):
+        """
+        Fires when data is received.
+        """
+        event = DataReceived()
+        event.stream_id = self.stream_id
+        return [event]
+
+    def window_updated(self, previous_state):
+        """
+        Fires when a window update frame is received.
+        """
+        event = WindowUpdated()
+        event.stream_id = self.stream_id
+        return [event]
+
+    def stream_half_closed(self, previous_state):
+        """
+        Fires when an END_STREAM flag is received in the OPEN state,
+        transitioning this stream to a HALF_CLOSED_REMOTE state.
+        """
+        event = StreamEnded()
+        event.stream_id = self.stream_id
+        return [event]
+
+    def stream_ended(self, previous_state):
+        """
+        Fires when a stream is cleanly ended.
+        """
+        self.stream_closed_by = StreamClosedBy.RECV_END_STREAM
+        event = StreamEnded()
+        event.stream_id = self.stream_id
+        return [event]
+
+    def stream_reset(self, previous_state):
+        """
+        Fired when a stream is forcefully reset.
+        """
+        self.stream_closed_by = StreamClosedBy.RECV_RST_STREAM
+        event = StreamReset()
+        event.stream_id = self.stream_id
+        return [event]
+
+    def send_new_pushed_stream(self, previous_state):
+        """
+        Fires on the newly pushed stream, when pushed by the local peer.
+
+        No event here, but definitionally this peer must be a server.
+        """
+        assert self.client is None
+        self.client = False
+        self.headers_received = True
+        return []
+
+    def recv_new_pushed_stream(self, previous_state):
+        """
+        Fires on the newly pushed stream, when pushed by the remote peer.
+
+        No event here, but definitionally this peer must be a client.
+        """
+        assert self.client is None
+        self.client = True
+        self.headers_sent = True
+        return []
+
+    def send_push_promise(self, previous_state):
+        """
+        Fires on the already-existing stream when a PUSH_PROMISE frame is sent.
+        We may only send PUSH_PROMISE frames if we're a server.
+        """
+        if self.client is True:
+            raise ProtocolError("Cannot push streams from client peers.")
+
+        event = _PushedRequestSent()
+        return [event]
+
+    def recv_push_promise(self, previous_state):
+        """
+        Fires on the already-existing stream when a PUSH_PROMISE frame is
+        received. We may only receive PUSH_PROMISE frames if we're a client.
+
+        Fires a PushedStreamReceived event.
+        """
+        if not self.client:
+            if self.client is None:  # pragma: no cover
+                msg = "Idle streams cannot receive pushes"
+            else:  # pragma: no cover
+                msg = "Cannot receive pushed streams as a server"
+            raise ProtocolError(msg)
+
+        event = PushedStreamReceived()
+        event.parent_stream_id = self.stream_id
+        return [event]
+
+    def send_end_stream(self, previous_state):
+        """
+        Called when an attempt is made to send END_STREAM in the
+        HALF_CLOSED_REMOTE state.
+        """
+        self.stream_closed_by = StreamClosedBy.SEND_END_STREAM
+
+    def send_reset_stream(self, previous_state):
+        """
+        Called when an attempt is made to send RST_STREAM in a non-closed
+        stream state.
+        """
+        self.stream_closed_by = StreamClosedBy.SEND_RST_STREAM
+
+    def reset_stream_on_error(self, previous_state):
+        """
+        Called when we need to forcefully emit another RST_STREAM frame on
+        behalf of the state machine.
+
+        If this is the first time we've done this, we should also hang an event
+        off the StreamClosedError so that the user can be informed. We know
+        it's the first time we've done this if the stream is currently in a
+        state other than CLOSED.
+        """
+        self.stream_closed_by = StreamClosedBy.SEND_RST_STREAM
+
+        error = StreamClosedError(self.stream_id)
+
+        event = StreamReset()
+        event.stream_id = self.stream_id
+        event.error_code = ErrorCodes.STREAM_CLOSED
+        event.remote_reset = False
+        error._events = [event]
+        raise error
+
+    def recv_on_closed_stream(self, previous_state):
+        """
+        Called when an unexpected frame is received on an already-closed
+        stream.
+
+        An endpoint that receives an unexpected frame should treat it as
+        a stream error or connection error with type STREAM_CLOSED, depending
+        on the specific frame. The error handling is done at a higher level:
+        this just raises the appropriate error.
+        """
+        raise StreamClosedError(self.stream_id)
+
+    def send_on_closed_stream(self, previous_state):
+        """
+        Called when an attempt is made to send data on an already-closed
+        stream.
+
+        This essentially overrides the standard logic by throwing a
+        more-specific error: StreamClosedError. This is a ProtocolError, so it
+        matches the standard API of the state machine, but provides more detail
+        to the user.
+        """
+        raise StreamClosedError(self.stream_id)
+
+    def recv_push_on_closed_stream(self, previous_state):
+        """
+        Called when a PUSH_PROMISE frame is received on a full stop
+        stream.
+
+        If the stream was closed by us sending a RST_STREAM frame, then we
+        presume that the PUSH_PROMISE was in flight when we reset the parent
+        stream. Rathen than accept the new stream, we just reset it.
+        Otherwise, we should call this a PROTOCOL_ERROR: pushing a stream on a
+        naturally closed stream is a real problem because it creates a brand
+        new stream that the remote peer now believes exists.
+        """
+        assert self.stream_closed_by is not None
+
+        if self.stream_closed_by == StreamClosedBy.SEND_RST_STREAM:
+            raise StreamClosedError(self.stream_id)
+        else:
+            raise ProtocolError("Attempted to push on closed stream.")
+
+    def send_push_on_closed_stream(self, previous_state):
+        """
+        Called when an attempt is made to push on an already-closed stream.
+
+        This essentially overrides the standard logic by providing a more
+        useful error message. It's necessary because simply indicating that the
+        stream is closed is not enough: there is now a new stream that is not
+        allowed to be there. The only recourse is to tear the whole connection
+        down.
+        """
+        raise ProtocolError("Attempted to push on closed stream.")
+
+    def window_on_closed_stream(self, previous_state):
+        """
+        Called when a WINDOW_UPDATE frame is received on an already-closed
+        stream.
+
+        If we sent an END_STREAM frame, we just ignore the frame, as instructed
+        in RFC 7540 Section 5.1. Technically we should eventually consider
+        WINDOW_UPDATE in this state an error, but we don't have access to a
+        clock so we just always allow it. If we closed the stream for any other
+        reason, we behave as we do for receiving any other frame on a closed
+        stream.
+        """
+        assert self.stream_closed_by is not None
+
+        if self.stream_closed_by == StreamClosedBy.SEND_END_STREAM:
+            return []
+        return self.recv_on_closed_stream(previous_state)
+
+    def reset_on_closed_stream(self, previous_state):
+        """
+        Called when a RST_STREAM frame is received on an already-closed stream.
+
+        If we sent an END_STREAM frame, we just ignore the frame, as instructed
+        in RFC 7540 Section 5.1. Technically we should eventually consider
+        RST_STREAM in this state an error, but we don't have access to a clock
+        so we just always allow it. If we closed the stream for any other
+        reason, we behave as we do for receiving any other frame on a closed
+        stream.
+        """
+        assert self.stream_closed_by is not None
+
+        if self.stream_closed_by is StreamClosedBy.SEND_END_STREAM:
+            return []
+        return self.recv_on_closed_stream(previous_state)
+
+    def send_informational_response(self, previous_state):
+        """
+        Called when an informational header block is sent (that is, a block
+        where the :status header has a 1XX value).
+
+        Only enforces that these are sent *before* final headers are sent.
+        """
+        if self.headers_sent:
+            raise ProtocolError("Information response after final response")
+
+        event = _ResponseSent()
+        return [event]
+
+    def recv_informational_response(self, previous_state):
+        """
+        Called when an informational header block is received (that is, a block
+        where the :status header has a 1XX value).
+        """
+        if self.headers_received:
+            raise ProtocolError("Informational response after final response")
+
+        event = InformationalResponseReceived()
+        event.stream_id = self.stream_id
+        return [event]
+
+    def recv_alt_svc(self, previous_state):
+        """
+        Called when receiving an ALTSVC frame.
+
+        RFC 7838 allows us to receive ALTSVC frames at any stream state, which
+        is really absurdly overzealous. For that reason, we want to limit the
+        states in which we can actually receive it. It's really only sensible
+        to receive it after we've sent our own headers and before the server
+        has sent its header block: the server can't guarantee that we have any
+        state around after it completes its header block, and the server
+        doesn't know what origin we're talking about before we've sent ours.
+
+        For that reason, this function applies a few extra checks on both state
+        and some of the little state variables we keep around. If those suggest
+        an unreasonable situation for the ALTSVC frame to have been sent in,
+        we quietly ignore it (as RFC 7838 suggests).
+
+        This function is also *not* always called by the state machine. In some
+        states (IDLE, RESERVED_LOCAL, CLOSED) we don't bother to call it,
+        because we know the frame cannot be valid in that state (IDLE because
+        the server cannot know what origin the stream applies to, CLOSED
+        because the server cannot assume we still have state around,
+        RESERVED_LOCAL because by definition if we're in the RESERVED_LOCAL
+        state then *we* are the server).
+        """
+        # Servers can't receive ALTSVC frames, but RFC 7838 tells us to ignore
+        # them.
+        if self.client is False:
+            return []
+
+        # If we've received the response headers from the server they can't
+        # guarantee we still have any state around. Other implementations
+        # (like nghttp2) ignore ALTSVC in this state, so we will too.
+        if self.headers_received:
+            return []
+
+        # Otherwise, this is a sensible enough frame to have received. Return
+        # the event and let it get populated.
+        return [AlternativeServiceAvailable()]
+
+    def send_alt_svc(self, previous_state):
+        """
+        Called when sending an ALTSVC frame on this stream.
+
+        For consistency with the restrictions we apply on receiving ALTSVC
+        frames in ``recv_alt_svc``, we want to restrict when users can send
+        ALTSVC frames to the situations when we ourselves would accept them.
+
+        That means: when we are a server, when we have received the request
+        headers, and when we have not yet sent our own response headers.
+        """
+        # We should not send ALTSVC after we've sent response headers, as the
+        # client may have disposed of its state.
+        if self.headers_sent:
+            raise ProtocolError(
+                "Cannot send ALTSVC after sending response headers."
+            )
+
+        return
+
+
+# STATE MACHINE
+#
+# The stream state machine is defined here to avoid the need to allocate it
+# repeatedly for each stream. It cannot be defined in the stream class because
+# it needs to be able to reference the callbacks defined on the class, but
+# because Python's scoping rules are weird the class object is not actually in
+# scope during the body of the class object.
+#
+# For the sake of clarity, we reproduce the RFC 7540 state machine here:
+#
+#                          +--------+
+#                  send PP |        | recv PP
+#                 ,--------|  idle  |--------.
+#                /         |        |         \
+#               v          +--------+          v
+#        +----------+          |           +----------+
+#        |          |          | send H /  |          |
+# ,------| reserved |          | recv H    | reserved |------.
+# |      | (local)  |          |           | (remote) |      |
+# |      +----------+          v           +----------+      |
+# |          |             +--------+             |          |
+# |          |     recv ES |        | send ES     |          |
+# |   send H |     ,-------|  open  |-------.     | recv H   |
+# |          |    /        |        |        \    |          |
+# |          v   v         +--------+         v   v          |
+# |      +----------+          |           +----------+      |
+# |      |   half   |          |           |   half   |      |
+# |      |  closed  |          | send R /  |  closed  |      |
+# |      | (remote) |          | recv R    | (local)  |      |
+# |      +----------+          |           +----------+      |
+# |           |                |                 |           |
+# |           | send ES /      |       recv ES / |           |
+# |           | send R /       v        send R / |           |
+# |           | recv R     +--------+   recv R   |           |
+# | send R /  `----------->|        |<-----------'  send R / |
+# | recv R                 | closed |               recv R   |
+# `----------------------->|        |<----------------------'
+#                          +--------+
+#
+#    send:   endpoint sends this frame
+#    recv:   endpoint receives this frame
+#
+#    H:  HEADERS frame (with implied CONTINUATIONs)
+#    PP: PUSH_PROMISE frame (with implied CONTINUATIONs)
+#    ES: END_STREAM flag
+#    R:  RST_STREAM frame
+#
+# For the purposes of this state machine we treat HEADERS and their
+# associated CONTINUATION frames as a single jumbo frame. The protocol
+# allows/requires this by preventing other frames from being interleved in
+# between HEADERS/CONTINUATION frames. However, if a CONTINUATION frame is
+# received without a prior HEADERS frame, it *will* be passed to this state
+# machine. The state machine should always reject that frame, either as an
+# invalid transition or because the stream is closed.
+#
+# There is a confusing relationship around PUSH_PROMISE frames. The state
+# machine above considers them to be frames belonging to the new stream,
+# which is *somewhat* true. However, they are sent with the stream ID of
+# their related stream, and are only sendable in some cases.
+# For this reason, our state machine implementation below allows for
+# PUSH_PROMISE frames both in the IDLE state (as in the diagram), but also
+# in the OPEN, HALF_CLOSED_LOCAL, and HALF_CLOSED_REMOTE states.
+# Essentially, for hyper-h2, PUSH_PROMISE frames are effectively sent on
+# two streams.
+#
+# The _transitions dictionary contains a mapping of tuples of
+# (state, input) to tuples of (side_effect_function, end_state). This
+# map contains all allowed transitions: anything not in this map is
+# invalid and immediately causes a transition to ``closed``.
+_transitions = {
+    # State: idle
+    (StreamState.IDLE, StreamInputs.SEND_HEADERS):
+        (H2StreamStateMachine.request_sent, StreamState.OPEN),
+    (StreamState.IDLE, StreamInputs.RECV_HEADERS):
+        (H2StreamStateMachine.request_received, StreamState.OPEN),
+    (StreamState.IDLE, StreamInputs.RECV_DATA):
+        (H2StreamStateMachine.reset_stream_on_error, StreamState.CLOSED),
+    (StreamState.IDLE, StreamInputs.SEND_PUSH_PROMISE):
+        (H2StreamStateMachine.send_new_pushed_stream,
+            StreamState.RESERVED_LOCAL),
+    (StreamState.IDLE, StreamInputs.RECV_PUSH_PROMISE):
+        (H2StreamStateMachine.recv_new_pushed_stream,
+            StreamState.RESERVED_REMOTE),
+    (StreamState.IDLE, StreamInputs.RECV_ALTERNATIVE_SERVICE):
+        (None, StreamState.IDLE),
+    (StreamState.IDLE, StreamInputs.UPGRADE_CLIENT):
+        (H2StreamStateMachine.request_sent, StreamState.HALF_CLOSED_LOCAL),
+    (StreamState.IDLE, StreamInputs.UPGRADE_SERVER):
+        (H2StreamStateMachine.request_received,
+            StreamState.HALF_CLOSED_REMOTE),
+
+    # State: reserved local
+    (StreamState.RESERVED_LOCAL, StreamInputs.SEND_HEADERS):
+        (H2StreamStateMachine.response_sent, StreamState.HALF_CLOSED_REMOTE),
+    (StreamState.RESERVED_LOCAL, StreamInputs.RECV_DATA):
+        (H2StreamStateMachine.reset_stream_on_error, StreamState.CLOSED),
+    (StreamState.RESERVED_LOCAL, StreamInputs.SEND_WINDOW_UPDATE):
+        (None, StreamState.RESERVED_LOCAL),
+    (StreamState.RESERVED_LOCAL, StreamInputs.RECV_WINDOW_UPDATE):
+        (H2StreamStateMachine.window_updated, StreamState.RESERVED_LOCAL),
+    (StreamState.RESERVED_LOCAL, StreamInputs.SEND_RST_STREAM):
+        (H2StreamStateMachine.send_reset_stream, StreamState.CLOSED),
+    (StreamState.RESERVED_LOCAL, StreamInputs.RECV_RST_STREAM):
+        (H2StreamStateMachine.stream_reset, StreamState.CLOSED),
+    (StreamState.RESERVED_LOCAL, StreamInputs.SEND_ALTERNATIVE_SERVICE):
+        (H2StreamStateMachine.send_alt_svc, StreamState.RESERVED_LOCAL),
+    (StreamState.RESERVED_LOCAL, StreamInputs.RECV_ALTERNATIVE_SERVICE):
+        (None, StreamState.RESERVED_LOCAL),
+
+    # State: reserved remote
+    (StreamState.RESERVED_REMOTE, StreamInputs.RECV_HEADERS):
+        (H2StreamStateMachine.response_received,
+            StreamState.HALF_CLOSED_LOCAL),
+    (StreamState.RESERVED_REMOTE, StreamInputs.RECV_DATA):
+        (H2StreamStateMachine.reset_stream_on_error, StreamState.CLOSED),
+    (StreamState.RESERVED_REMOTE, StreamInputs.SEND_WINDOW_UPDATE):
+        (None, StreamState.RESERVED_REMOTE),
+    (StreamState.RESERVED_REMOTE, StreamInputs.RECV_WINDOW_UPDATE):
+        (H2StreamStateMachine.window_updated, StreamState.RESERVED_REMOTE),
+    (StreamState.RESERVED_REMOTE, StreamInputs.SEND_RST_STREAM):
+        (H2StreamStateMachine.send_reset_stream, StreamState.CLOSED),
+    (StreamState.RESERVED_REMOTE, StreamInputs.RECV_RST_STREAM):
+        (H2StreamStateMachine.stream_reset, StreamState.CLOSED),
+    (StreamState.RESERVED_REMOTE, StreamInputs.RECV_ALTERNATIVE_SERVICE):
+        (H2StreamStateMachine.recv_alt_svc, StreamState.RESERVED_REMOTE),
+
+    # State: open
+    (StreamState.OPEN, StreamInputs.SEND_HEADERS):
+        (H2StreamStateMachine.response_sent, StreamState.OPEN),
+    (StreamState.OPEN, StreamInputs.RECV_HEADERS):
+        (H2StreamStateMachine.response_received, StreamState.OPEN),
+    (StreamState.OPEN, StreamInputs.SEND_DATA):
+        (None, StreamState.OPEN),
+    (StreamState.OPEN, StreamInputs.RECV_DATA):
+        (H2StreamStateMachine.data_received, StreamState.OPEN),
+    (StreamState.OPEN, StreamInputs.SEND_END_STREAM):
+        (None, StreamState.HALF_CLOSED_LOCAL),
+    (StreamState.OPEN, StreamInputs.RECV_END_STREAM):
+        (H2StreamStateMachine.stream_half_closed,
+         StreamState.HALF_CLOSED_REMOTE),
+    (StreamState.OPEN, StreamInputs.SEND_WINDOW_UPDATE):
+        (None, StreamState.OPEN),
+    (StreamState.OPEN, StreamInputs.RECV_WINDOW_UPDATE):
+        (H2StreamStateMachine.window_updated, StreamState.OPEN),
+    (StreamState.OPEN, StreamInputs.SEND_RST_STREAM):
+        (H2StreamStateMachine.send_reset_stream, StreamState.CLOSED),
+    (StreamState.OPEN, StreamInputs.RECV_RST_STREAM):
+        (H2StreamStateMachine.stream_reset, StreamState.CLOSED),
+    (StreamState.OPEN, StreamInputs.SEND_PUSH_PROMISE):
+        (H2StreamStateMachine.send_push_promise, StreamState.OPEN),
+    (StreamState.OPEN, StreamInputs.RECV_PUSH_PROMISE):
+        (H2StreamStateMachine.recv_push_promise, StreamState.OPEN),
+    (StreamState.OPEN, StreamInputs.SEND_INFORMATIONAL_HEADERS):
+        (H2StreamStateMachine.send_informational_response, StreamState.OPEN),
+    (StreamState.OPEN, StreamInputs.RECV_INFORMATIONAL_HEADERS):
+        (H2StreamStateMachine.recv_informational_response, StreamState.OPEN),
+    (StreamState.OPEN, StreamInputs.SEND_ALTERNATIVE_SERVICE):
+        (H2StreamStateMachine.send_alt_svc, StreamState.OPEN),
+    (StreamState.OPEN, StreamInputs.RECV_ALTERNATIVE_SERVICE):
+        (H2StreamStateMachine.recv_alt_svc, StreamState.OPEN),
+
+    # State: half-closed remote
+    (StreamState.HALF_CLOSED_REMOTE, StreamInputs.SEND_HEADERS):
+        (H2StreamStateMachine.response_sent, StreamState.HALF_CLOSED_REMOTE),
+    (StreamState.HALF_CLOSED_REMOTE, StreamInputs.RECV_HEADERS):
+        (H2StreamStateMachine.reset_stream_on_error, StreamState.CLOSED),
+    (StreamState.HALF_CLOSED_REMOTE, StreamInputs.SEND_DATA):
+        (None, StreamState.HALF_CLOSED_REMOTE),
+    (StreamState.HALF_CLOSED_REMOTE, StreamInputs.RECV_DATA):
+        (H2StreamStateMachine.reset_stream_on_error, StreamState.CLOSED),
+    (StreamState.HALF_CLOSED_REMOTE, StreamInputs.SEND_END_STREAM):
+        (H2StreamStateMachine.send_end_stream, StreamState.CLOSED),
+    (StreamState.HALF_CLOSED_REMOTE, StreamInputs.SEND_WINDOW_UPDATE):
+        (None, StreamState.HALF_CLOSED_REMOTE),
+    (StreamState.HALF_CLOSED_REMOTE, StreamInputs.RECV_WINDOW_UPDATE):
+        (H2StreamStateMachine.window_updated, StreamState.HALF_CLOSED_REMOTE),
+    (StreamState.HALF_CLOSED_REMOTE, StreamInputs.SEND_RST_STREAM):
+        (H2StreamStateMachine.send_reset_stream, StreamState.CLOSED),
+    (StreamState.HALF_CLOSED_REMOTE, StreamInputs.RECV_RST_STREAM):
+        (H2StreamStateMachine.stream_reset, StreamState.CLOSED),
+    (StreamState.HALF_CLOSED_REMOTE, StreamInputs.SEND_PUSH_PROMISE):
+        (H2StreamStateMachine.send_push_promise,
+            StreamState.HALF_CLOSED_REMOTE),
+    (StreamState.HALF_CLOSED_REMOTE, StreamInputs.RECV_PUSH_PROMISE):
+        (H2StreamStateMachine.reset_stream_on_error, StreamState.CLOSED),
+    (StreamState.HALF_CLOSED_REMOTE, StreamInputs.SEND_INFORMATIONAL_HEADERS):
+        (H2StreamStateMachine.send_informational_response,
+            StreamState.HALF_CLOSED_REMOTE),
+    (StreamState.HALF_CLOSED_REMOTE, StreamInputs.SEND_ALTERNATIVE_SERVICE):
+        (H2StreamStateMachine.send_alt_svc, StreamState.HALF_CLOSED_REMOTE),
+    (StreamState.HALF_CLOSED_REMOTE, StreamInputs.RECV_ALTERNATIVE_SERVICE):
+        (H2StreamStateMachine.recv_alt_svc, StreamState.HALF_CLOSED_REMOTE),
+
+    # State: half-closed local
+    (StreamState.HALF_CLOSED_LOCAL, StreamInputs.RECV_HEADERS):
+        (H2StreamStateMachine.response_received,
+            StreamState.HALF_CLOSED_LOCAL),
+    (StreamState.HALF_CLOSED_LOCAL, StreamInputs.RECV_DATA):
+        (H2StreamStateMachine.data_received, StreamState.HALF_CLOSED_LOCAL),
+    (StreamState.HALF_CLOSED_LOCAL, StreamInputs.RECV_END_STREAM):
+        (H2StreamStateMachine.stream_ended, StreamState.CLOSED),
+    (StreamState.HALF_CLOSED_LOCAL, StreamInputs.SEND_WINDOW_UPDATE):
+        (None, StreamState.HALF_CLOSED_LOCAL),
+    (StreamState.HALF_CLOSED_LOCAL, StreamInputs.RECV_WINDOW_UPDATE):
+        (H2StreamStateMachine.window_updated, StreamState.HALF_CLOSED_LOCAL),
+    (StreamState.HALF_CLOSED_LOCAL, StreamInputs.SEND_RST_STREAM):
+        (H2StreamStateMachine.send_reset_stream, StreamState.CLOSED),
+    (StreamState.HALF_CLOSED_LOCAL, StreamInputs.RECV_RST_STREAM):
+        (H2StreamStateMachine.stream_reset, StreamState.CLOSED),
+    (StreamState.HALF_CLOSED_LOCAL, StreamInputs.RECV_PUSH_PROMISE):
+        (H2StreamStateMachine.recv_push_promise,
+            StreamState.HALF_CLOSED_LOCAL),
+    (StreamState.HALF_CLOSED_LOCAL, StreamInputs.RECV_INFORMATIONAL_HEADERS):
+        (H2StreamStateMachine.recv_informational_response,
+            StreamState.HALF_CLOSED_LOCAL),
+    (StreamState.HALF_CLOSED_LOCAL, StreamInputs.SEND_ALTERNATIVE_SERVICE):
+        (H2StreamStateMachine.send_alt_svc, StreamState.HALF_CLOSED_LOCAL),
+    (StreamState.HALF_CLOSED_LOCAL, StreamInputs.RECV_ALTERNATIVE_SERVICE):
+        (H2StreamStateMachine.recv_alt_svc, StreamState.HALF_CLOSED_LOCAL),
+
+    # State: closed
+    (StreamState.CLOSED, StreamInputs.RECV_END_STREAM):
+        (None, StreamState.CLOSED),
+    (StreamState.CLOSED, StreamInputs.RECV_ALTERNATIVE_SERVICE):
+        (None, StreamState.CLOSED),
+
+    # RFC 7540 Section 5.1 defines how the end point should react when
+    # receiving a frame on a closed stream with the following statements:
+    #
+    # > An endpoint that receives any frame other than PRIORITY after receiving
+    # > a RST_STREAM MUST treat that as a stream error of type STREAM_CLOSED.
+    # > An endpoint that receives any frames after receiving a frame with the
+    # > END_STREAM flag set MUST treat that as a connection error of type
+    # > STREAM_CLOSED.
+    (StreamState.CLOSED, StreamInputs.RECV_HEADERS):
+        (H2StreamStateMachine.recv_on_closed_stream, StreamState.CLOSED),
+    (StreamState.CLOSED, StreamInputs.RECV_DATA):
+        (H2StreamStateMachine.recv_on_closed_stream, StreamState.CLOSED),
+
+    # > WINDOW_UPDATE or RST_STREAM frames can be received in this state
+    # > for a short period after a DATA or HEADERS frame containing a
+    # > END_STREAM flag is sent.
+    (StreamState.CLOSED, StreamInputs.RECV_WINDOW_UPDATE):
+        (H2StreamStateMachine.window_on_closed_stream, StreamState.CLOSED),
+    (StreamState.CLOSED, StreamInputs.RECV_RST_STREAM):
+        (H2StreamStateMachine.reset_on_closed_stream, StreamState.CLOSED),
+
+    # > A receiver MUST treat the receipt of a PUSH_PROMISE on a stream that is
+    # > neither "open" nor "half-closed (local)" as a connection error of type
+    # > PROTOCOL_ERROR.
+    (StreamState.CLOSED, StreamInputs.RECV_PUSH_PROMISE):
+        (H2StreamStateMachine.recv_push_on_closed_stream, StreamState.CLOSED),
+
+    # Also, users should be forbidden from sending on closed streams.
+    (StreamState.CLOSED, StreamInputs.SEND_HEADERS):
+        (H2StreamStateMachine.send_on_closed_stream, StreamState.CLOSED),
+    (StreamState.CLOSED, StreamInputs.SEND_PUSH_PROMISE):
+        (H2StreamStateMachine.send_push_on_closed_stream, StreamState.CLOSED),
+    (StreamState.CLOSED, StreamInputs.SEND_RST_STREAM):
+        (H2StreamStateMachine.send_on_closed_stream, StreamState.CLOSED),
+    (StreamState.CLOSED, StreamInputs.SEND_DATA):
+        (H2StreamStateMachine.send_on_closed_stream, StreamState.CLOSED),
+    (StreamState.CLOSED, StreamInputs.SEND_WINDOW_UPDATE):
+        (H2StreamStateMachine.send_on_closed_stream, StreamState.CLOSED),
+    (StreamState.CLOSED, StreamInputs.SEND_END_STREAM):
+        (H2StreamStateMachine.send_on_closed_stream, StreamState.CLOSED),
+}
+
+
+class H2Stream(object):
+    """
+    A low-level HTTP/2 stream object. This handles building and receiving
+    frames and maintains per-stream state.
+
+    This wraps a HTTP/2 Stream state machine implementation, ensuring that
+    frames can only be sent/received when the stream is in a valid state.
+    Attempts to create frames that cannot be sent will raise a
+    ``ProtocolError``.
+    """
+    def __init__(self,
+                 stream_id,
+                 config,
+                 inbound_window_size,
+                 outbound_window_size):
+        self.state_machine = H2StreamStateMachine(stream_id)
+        self.stream_id = stream_id
+        self.max_outbound_frame_size = None
+        self.request_method = None
+
+        # The curent value of the outbound stream flow control window
+        self.outbound_flow_control_window = outbound_window_size
+
+        # The flow control manager.
+        self._inbound_window_manager = WindowManager(inbound_window_size)
+
+        # The expected content length, if any.
+        self._expected_content_length = None
+
+        # The actual received content length. Always tracked.
+        self._actual_content_length = 0
+
+        # The authority we believe this stream belongs to.
+        self._authority = None
+
+        # The configuration for this stream.
+        self.config = config
+
+    def __repr__(self):
+        return "<%s id:%d state:%r>" % (
+            type(self).__name__,
+            self.stream_id,
+            self.state_machine.state
+        )
+
+    @property
+    def inbound_flow_control_window(self):
+        """
+        The size of the inbound flow control window for the stream. This is
+        rarely publicly useful: instead, use :meth:`remote_flow_control_window
+        <h2.stream.H2Stream.remote_flow_control_window>`. This shortcut is
+        largely present to provide a shortcut to this data.
+        """
+        return self._inbound_window_manager.current_window_size
+
+    @property
+    def open(self):
+        """
+        Whether the stream is 'open' in any sense: that is, whether it counts
+        against the number of concurrent streams.
+        """
+        # RFC 7540 Section 5.1.2 defines 'open' for this purpose to mean either
+        # the OPEN state or either of the HALF_CLOSED states. Perplexingly,
+        # this excludes the reserved states.
+        # For more detail on why we're doing this in this slightly weird way,
+        # see the comment on ``STREAM_OPEN`` at the top of the file.
+        return STREAM_OPEN[self.state_machine.state]
+
+    @property
+    def closed(self):
+        """
+        Whether the stream is closed.
+        """
+        return self.state_machine.state == StreamState.CLOSED
+
+    @property
+    def closed_by(self):
+        """
+        Returns how the stream was closed, as one of StreamClosedBy.
+        """
+        return self.state_machine.stream_closed_by
+
+    def upgrade(self, client_side):
+        """
+        Called by the connection to indicate that this stream is the initial
+        request/response of an upgraded connection. Places the stream into an
+        appropriate state.
+        """
+        self.config.logger.debug("Upgrading %r", self)
+
+        assert self.stream_id == 1
+        input_ = (
+            StreamInputs.UPGRADE_CLIENT if client_side
+            else StreamInputs.UPGRADE_SERVER
+        )
+
+        # This may return events, we deliberately don't want them.
+        self.state_machine.process_input(input_)
+        return
+
+    def send_headers(self, headers, encoder, end_stream=False):
+        """
+        Returns a list of HEADERS/CONTINUATION frames to emit as either headers
+        or trailers.
+        """
+        self.config.logger.debug("Send headers %s on %r", headers, self)
+
+        # Because encoding headers makes an irreversible change to the header
+        # compression context, we make the state transition before we encode
+        # them.
+
+        # First, check if we're a client. If we are, no problem: if we aren't,
+        # we need to scan the header block to see if this is an informational
+        # response.
+        input_ = StreamInputs.SEND_HEADERS
+        if ((not self.state_machine.client) and
+                is_informational_response(headers)):
+            if end_stream:
+                raise ProtocolError(
+                    "Cannot set END_STREAM on informational responses."
+                )
+
+            input_ = StreamInputs.SEND_INFORMATIONAL_HEADERS
+
+        events = self.state_machine.process_input(input_)
+
+        hf = HeadersFrame(self.stream_id)
+        hdr_validation_flags = self._build_hdr_validation_flags(events)
+        frames = self._build_headers_frames(
+            headers, encoder, hf, hdr_validation_flags
+        )
+
+        if end_stream:
+            # Not a bug: the END_STREAM flag is valid on the initial HEADERS
+            # frame, not the CONTINUATION frames that follow.
+            self.state_machine.process_input(StreamInputs.SEND_END_STREAM)
+            frames[0].flags.add('END_STREAM')
+
+        if self.state_machine.trailers_sent and not end_stream:
+            raise ProtocolError("Trailers must have END_STREAM set.")
+
+        if self.state_machine.client and self._authority is None:
+            self._authority = authority_from_headers(headers)
+
+        # store request method for _initialize_content_length
+        self.request_method = extract_method_header(headers)
+
+        return frames
+
+    def push_stream_in_band(self, related_stream_id, headers, encoder):
+        """
+        Returns a list of PUSH_PROMISE/CONTINUATION frames to emit as a pushed
+        stream header. Called on the stream that has the PUSH_PROMISE frame
+        sent on it.
+        """
+        self.config.logger.debug("Push stream %r", self)
+
+        # Because encoding headers makes an irreversible change to the header
+        # compression context, we make the state transition *first*.
+
+        events = self.state_machine.process_input(
+            StreamInputs.SEND_PUSH_PROMISE
+        )
+
+        ppf = PushPromiseFrame(self.stream_id)
+        ppf.promised_stream_id = related_stream_id
+        hdr_validation_flags = self._build_hdr_validation_flags(events)
+        frames = self._build_headers_frames(
+            headers, encoder, ppf, hdr_validation_flags
+        )
+
+        return frames
+
+    def locally_pushed(self):
+        """
+        Mark this stream as one that was pushed by this peer. Must be called
+        immediately after initialization. Sends no frames, simply updates the
+        state machine.
+        """
+        # This does not trigger any events.
+        events = self.state_machine.process_input(
+            StreamInputs.SEND_PUSH_PROMISE
+        )
+        assert not events
+        return []
+
+    def send_data(self, data, end_stream=False, pad_length=None):
+        """
+        Prepare some data frames. Optionally end the stream.
+
+        .. warning:: Does not perform flow control checks.
+        """
+        self.config.logger.debug(
+            "Send data on %r with end stream set to %s", self, end_stream
+        )
+
+        self.state_machine.process_input(StreamInputs.SEND_DATA)
+
+        df = DataFrame(self.stream_id)
+        df.data = data
+        if end_stream:
+            self.state_machine.process_input(StreamInputs.SEND_END_STREAM)
+            df.flags.add('END_STREAM')
+        if pad_length is not None:
+            df.flags.add('PADDED')
+            df.pad_length = pad_length
+
+        # Subtract flow_controlled_length to account for possible padding
+        self.outbound_flow_control_window -= df.flow_controlled_length
+        assert self.outbound_flow_control_window >= 0
+
+        return [df]
+
+    def end_stream(self):
+        """
+        End a stream without sending data.
+        """
+        self.config.logger.debug("End stream %r", self)
+
+        self.state_machine.process_input(StreamInputs.SEND_END_STREAM)
+        df = DataFrame(self.stream_id)
+        df.flags.add('END_STREAM')
+        return [df]
+
+    def advertise_alternative_service(self, field_value):
+        """
+        Advertise an RFC 7838 alternative service. The semantics of this are
+        better documented in the ``H2Connection`` class.
+        """
+        self.config.logger.debug(
+            "Advertise alternative service of %r for %r", field_value, self
+        )
+        self.state_machine.process_input(StreamInputs.SEND_ALTERNATIVE_SERVICE)
+        asf = AltSvcFrame(self.stream_id)
+        asf.field = field_value
+        return [asf]
+
+    def increase_flow_control_window(self, increment):
+        """
+        Increase the size of the flow control window for the remote side.
+        """
+        self.config.logger.debug(
+            "Increase flow control window for %r by %d",
+            self, increment
+        )
+        self.state_machine.process_input(StreamInputs.SEND_WINDOW_UPDATE)
+        self._inbound_window_manager.window_opened(increment)
+
+        wuf = WindowUpdateFrame(self.stream_id)
+        wuf.window_increment = increment
+        return [wuf]
+
+    def receive_push_promise_in_band(self,
+                                     promised_stream_id,
+                                     headers,
+                                     header_encoding):
+        """
+        Receives a push promise frame sent on this stream, pushing a remote
+        stream. This is called on the stream that has the PUSH_PROMISE sent
+        on it.
+        """
+        self.config.logger.debug(
+            "Receive Push Promise on %r for remote stream %d",
+            self, promised_stream_id
+        )
+        events = self.state_machine.process_input(
+            StreamInputs.RECV_PUSH_PROMISE
+        )
+        events[0].pushed_stream_id = promised_stream_id
+
+        hdr_validation_flags = self._build_hdr_validation_flags(events)
+        events[0].headers = self._process_received_headers(
+            headers, hdr_validation_flags, header_encoding
+        )
+        return [], events
+
+    def remotely_pushed(self, pushed_headers):
+        """
+        Mark this stream as one that was pushed by the remote peer. Must be
+        called immediately after initialization. Sends no frames, simply
+        updates the state machine.
+        """
+        self.config.logger.debug("%r pushed by remote peer", self)
+        events = self.state_machine.process_input(
+            StreamInputs.RECV_PUSH_PROMISE
+        )
+        self._authority = authority_from_headers(pushed_headers)
+        return [], events
+
+    def receive_headers(self, headers, end_stream, header_encoding):
+        """
+        Receive a set of headers (or trailers).
+        """
+        if is_informational_response(headers):
+            if end_stream:
+                raise ProtocolError(
+                    "Cannot set END_STREAM on informational responses"
+                )
+            input_ = StreamInputs.RECV_INFORMATIONAL_HEADERS
+        else:
+            input_ = StreamInputs.RECV_HEADERS
+
+        events = self.state_machine.process_input(input_)
+
+        if end_stream:
+            es_events = self.state_machine.process_input(
+                StreamInputs.RECV_END_STREAM
+            )
+            events[0].stream_ended = es_events[0]
+            events += es_events
+
+        self._initialize_content_length(headers)
+
+        if isinstance(events[0], TrailersReceived):
+            if not end_stream:
+                raise ProtocolError("Trailers must have END_STREAM set")
+
+        hdr_validation_flags = self._build_hdr_validation_flags(events)
+        events[0].headers = self._process_received_headers(
+            headers, hdr_validation_flags, header_encoding
+        )
+        return [], events
+
+    def receive_data(self, data, end_stream, flow_control_len):
+        """
+        Receive some data.
+        """
+        self.config.logger.debug(
+            "Receive data on %r with end stream %s and flow control length "
+            "set to %d", self, end_stream, flow_control_len
+        )
+        events = self.state_machine.process_input(StreamInputs.RECV_DATA)
+        self._inbound_window_manager.window_consumed(flow_control_len)
+        self._track_content_length(len(data), end_stream)
+
+        if end_stream:
+            es_events = self.state_machine.process_input(
+                StreamInputs.RECV_END_STREAM
+            )
+            events[0].stream_ended = es_events[0]
+            events.extend(es_events)
+
+        events[0].data = data
+        events[0].flow_controlled_length = flow_control_len
+        return [], events
+
+    def receive_window_update(self, increment):
+        """
+        Handle a WINDOW_UPDATE increment.
+        """
+        self.config.logger.debug(
+            "Receive Window Update on %r for increment of %d",
+            self, increment
+        )
+        events = self.state_machine.process_input(
+            StreamInputs.RECV_WINDOW_UPDATE
+        )
+        frames = []
+
+        # If we encounter a problem with incrementing the flow control window,
+        # this should be treated as a *stream* error, not a *connection* error.
+        # That means we need to catch the error and forcibly close the stream.
+        if events:
+            events[0].delta = increment
+            try:
+                self.outbound_flow_control_window = guard_increment_window(
+                    self.outbound_flow_control_window,
+                    increment
+                )
+            except FlowControlError:
+                # Ok, this is bad. We're going to need to perform a local
+                # reset.
+                event = StreamReset()
+                event.stream_id = self.stream_id
+                event.error_code = ErrorCodes.FLOW_CONTROL_ERROR
+                event.remote_reset = False
+
+                events = [event]
+                frames = self.reset_stream(event.error_code)
+
+        return frames, events
+
+    def receive_continuation(self):
+        """
+        A naked CONTINUATION frame has been received. This is always an error,
+        but the type of error it is depends on the state of the stream and must
+        transition the state of the stream, so we need to handle it.
+        """
+        self.config.logger.debug("Receive Continuation frame on %r", self)
+        self.state_machine.process_input(
+            StreamInputs.RECV_CONTINUATION
+        )
+        assert False, "Should not be reachable"
+
+    def receive_alt_svc(self, frame):
+        """
+        An Alternative Service frame was received on the stream. This frame
+        inherits the origin associated with this stream.
+        """
+        self.config.logger.debug(
+            "Receive Alternative Service frame on stream %r", self
+        )
+
+        # If the origin is present, RFC 7838 says we have to ignore it.
+        if frame.origin:
+            return [], []
+
+        events = self.state_machine.process_input(
+            StreamInputs.RECV_ALTERNATIVE_SERVICE
+        )
+
+        # There are lots of situations where we want to ignore the ALTSVC
+        # frame. If we need to pay attention, we'll have an event and should
+        # fill it out.
+        if events:
+            assert isinstance(events[0], AlternativeServiceAvailable)
+            events[0].origin = self._authority
+            events[0].field_value = frame.field
+
+        return [], events
+
+    def reset_stream(self, error_code=0):
+        """
+        Close the stream locally. Reset the stream with an error code.
+        """
+        self.config.logger.debug(
+            "Local reset %r with error code: %d", self, error_code
+        )
+        self.state_machine.process_input(StreamInputs.SEND_RST_STREAM)
+
+        rsf = RstStreamFrame(self.stream_id)
+        rsf.error_code = error_code
+        return [rsf]
+
+    def stream_reset(self, frame):
+        """
+        Handle a stream being reset remotely.
+        """
+        self.config.logger.debug(
+            "Remote reset %r with error code: %d", self, frame.error_code
+        )
+        events = self.state_machine.process_input(StreamInputs.RECV_RST_STREAM)
+
+        if events:
+            # We don't fire an event if this stream is already closed.
+            events[0].error_code = _error_code_from_int(frame.error_code)
+
+        return [], events
+
+    def acknowledge_received_data(self, acknowledged_size):
+        """
+        The user has informed us that they've processed some amount of data
+        that was received on this stream. Pass that to the window manager and
+        potentially return some WindowUpdate frames.
+        """
+        self.config.logger.debug(
+            "Acknowledge received data with size %d on %r",
+            acknowledged_size, self
+        )
+        increment = self._inbound_window_manager.process_bytes(
+            acknowledged_size
+        )
+        if increment:
+            f = WindowUpdateFrame(self.stream_id)
+            f.window_increment = increment
+            return [f]
+
+        return []
+
+    def _build_hdr_validation_flags(self, events):
+        """
+        Constructs a set of header validation flags for use when normalizing
+        and validating header blocks.
+        """
+        is_trailer = isinstance(
+            events[0], (_TrailersSent, TrailersReceived)
+        )
+        is_response_header = isinstance(
+            events[0],
+            (
+                _ResponseSent,
+                ResponseReceived,
+                InformationalResponseReceived
+            )
+        )
+        is_push_promise = isinstance(
+            events[0], (PushedStreamReceived, _PushedRequestSent)
+        )
+
+        return HeaderValidationFlags(
+            is_client=self.state_machine.client,
+            is_trailer=is_trailer,
+            is_response_header=is_response_header,
+            is_push_promise=is_push_promise,
+        )
+
+    def _build_headers_frames(self,
+                              headers,
+                              encoder,
+                              first_frame,
+                              hdr_validation_flags):
+        """
+        Helper method to build headers or push promise frames.
+        """
+        # We need to lowercase the header names, and to ensure that secure
+        # header fields are kept out of compression contexts.
+        if self.config.normalize_outbound_headers:
+            headers = normalize_outbound_headers(
+                headers, hdr_validation_flags
+            )
+        if self.config.validate_outbound_headers:
+            headers = validate_outbound_headers(
+                headers, hdr_validation_flags
+            )
+
+        encoded_headers = encoder.encode(headers)
+
+        # Slice into blocks of max_outbound_frame_size. Be careful with this:
+        # it only works right because we never send padded frames or priority
+        # information on the frames. Revisit this if we do.
+        header_blocks = [
+            encoded_headers[i:i+self.max_outbound_frame_size]
+            for i in range(
+                0, len(encoded_headers), self.max_outbound_frame_size
+            )
+        ]
+
+        frames = []
+        first_frame.data = header_blocks[0]
+        frames.append(first_frame)
+
+        for block in header_blocks[1:]:
+            cf = ContinuationFrame(self.stream_id)
+            cf.data = block
+            frames.append(cf)
+
+        frames[-1].flags.add('END_HEADERS')
+        return frames
+
+    def _process_received_headers(self,
+                                  headers,
+                                  header_validation_flags,
+                                  header_encoding):
+        """
+        When headers have been received from the remote peer, run a processing
+        pipeline on them to transform them into the appropriate form for
+        attaching to an event.
+        """
+        if self.config.normalize_inbound_headers:
+            headers = normalize_inbound_headers(
+                headers, header_validation_flags
+            )
+
+        if self.config.validate_inbound_headers:
+            headers = validate_headers(headers, header_validation_flags)
+
+        if header_encoding:
+            headers = _decode_headers(headers, header_encoding)
+
+        # The above steps are all generators, so we need to concretize the
+        # headers now.
+        return list(headers)
+
+    def _initialize_content_length(self, headers):
+        """
+        Checks the headers for a content-length header and initializes the
+        _expected_content_length field from it. It's not an error for no
+        Content-Length header to be present.
+        """
+        if self.request_method == b'HEAD':
+            self._expected_content_length = 0
+            return
+
+        for n, v in headers:
+            if n == b'content-length':
+                try:
+                    self._expected_content_length = int(v, 10)
+                except ValueError:
+                    raise ProtocolError(
+                        "Invalid content-length header: %s" % v
+                    )
+
+                return
+
+    def _track_content_length(self, length, end_stream):
+        """
+        Update the expected content length in response to data being received.
+        Validates that the appropriate amount of data is sent. Always updates
+        the received data, but only validates the length against the
+        content-length header if one was sent.
+
+        :param length: The length of the body chunk received.
+        :param end_stream: If this is the last body chunk received.
+        """
+        self._actual_content_length += length
+        actual = self._actual_content_length
+        expected = self._expected_content_length
+
+        if expected is not None:
+            if expected < actual:
+                raise InvalidBodyLengthError(expected, actual)
+
+            if end_stream and expected != actual:
+                raise InvalidBodyLengthError(expected, actual)
+
+    def _inbound_flow_control_change_from_settings(self, delta):
+        """
+        We changed SETTINGS_INITIAL_WINDOW_SIZE, which means we need to
+        update the target window size for flow control. For our flow control
+        strategy, this means we need to do two things: we need to adjust the
+        current window size, but we also need to set the target maximum window
+        size to the new value.
+        """
+        new_max_size = self._inbound_window_manager.max_window_size + delta
+        self._inbound_window_manager.window_opened(delta)
+        self._inbound_window_manager.max_window_size = new_max_size
+
+
+def _decode_headers(headers, encoding):
+    """
+    Given an iterable of header two-tuples and an encoding, decodes those
+    headers using that encoding while preserving the type of the header tuple.
+    This ensures that the use of ``HeaderTuple`` is preserved.
+    """
+    for header in headers:
+        # This function expects to work on decoded headers, which are always
+        # HeaderTuple objects.
+        assert isinstance(header, HeaderTuple)
+
+        name, value = header
+        name = name.decode(encoding)
+        value = value.decode(encoding)
+        yield header.__class__(name, value)
diff --git a/tools/third_party/h2/h2/utilities.py b/tools/third_party/h2/h2/utilities.py
new file mode 100755
index 0000000..0cff0fa
--- /dev/null
+++ b/tools/third_party/h2/h2/utilities.py
@@ -0,0 +1,619 @@
+# -*- coding: utf-8 -*-
+"""
+h2/utilities
+~~~~~~~~~~~~
+
+Utility functions that do not belong in a separate module.
+"""
+import collections
+import re
+from string import whitespace
+import sys
+
+from hpack import HeaderTuple, NeverIndexedHeaderTuple
+
+from .exceptions import ProtocolError, FlowControlError
+
+UPPER_RE = re.compile(b"[A-Z]")
+
+# A set of headers that are hop-by-hop or connection-specific and thus
+# forbidden in HTTP/2. This list comes from RFC 7540 § 8.1.2.2.
+CONNECTION_HEADERS = frozenset([
+    b'connection', u'connection',
+    b'proxy-connection', u'proxy-connection',
+    b'keep-alive', u'keep-alive',
+    b'transfer-encoding', u'transfer-encoding',
+    b'upgrade', u'upgrade',
+])
+
+
+_ALLOWED_PSEUDO_HEADER_FIELDS = frozenset([
+    b':method', u':method',
+    b':scheme', u':scheme',
+    b':authority', u':authority',
+    b':path', u':path',
+    b':status', u':status',
+])
+
+
+_SECURE_HEADERS = frozenset([
+    # May have basic credentials which are vulnerable to dictionary attacks.
+    b'authorization', u'authorization',
+    b'proxy-authorization', u'proxy-authorization',
+])
+
+
+_REQUEST_ONLY_HEADERS = frozenset([
+    b':scheme', u':scheme',
+    b':path', u':path',
+    b':authority', u':authority',
+    b':method', u':method'
+])
+
+
+_RESPONSE_ONLY_HEADERS = frozenset([b':status', u':status'])
+
+
+if sys.version_info[0] == 2:  # Python 2.X
+    _WHITESPACE = frozenset(whitespace)
+else:  # Python 3.3+
+    _WHITESPACE = frozenset(map(ord, whitespace))
+
+
+def _secure_headers(headers, hdr_validation_flags):
+    """
+    Certain headers are at risk of being attacked during the header compression
+    phase, and so need to be kept out of header compression contexts. This
+    function automatically transforms certain specific headers into HPACK
+    never-indexed fields to ensure they don't get added to header compression
+    contexts.
+
+    This function currently implements two rules:
+
+    - 'authorization' and 'proxy-authorization' fields are automatically made
+      never-indexed.
+    - Any 'cookie' header field shorter than 20 bytes long is made
+      never-indexed.
+
+    These fields are the most at-risk. These rules are inspired by Firefox
+    and nghttp2.
+    """
+    for header in headers:
+        if header[0] in _SECURE_HEADERS:
+            yield NeverIndexedHeaderTuple(*header)
+        elif header[0] in (b'cookie', u'cookie') and len(header[1]) < 20:
+            yield NeverIndexedHeaderTuple(*header)
+        else:
+            yield header
+
+
+def extract_method_header(headers):
+    """
+    Extracts the request method from the headers list.
+    """
+    for k, v in headers:
+        if k in (b':method', u':method'):
+            if not isinstance(v, bytes):
+                return v.encode('utf-8')
+            else:
+                return v
+
+
+def is_informational_response(headers):
+    """
+    Searches a header block for a :status header to confirm that a given
+    collection of headers are an informational response. Assumes the header
+    block is well formed: that is, that the HTTP/2 special headers are first
+    in the block, and so that it can stop looking when it finds the first
+    header field whose name does not begin with a colon.
+
+    :param headers: The HTTP/2 header block.
+    :returns: A boolean indicating if this is an informational response.
+    """
+    for n, v in headers:
+        if isinstance(n, bytes):
+            sigil = b':'
+            status = b':status'
+            informational_start = b'1'
+        else:
+            sigil = u':'
+            status = u':status'
+            informational_start = u'1'
+
+        # If we find a non-special header, we're done here: stop looping.
+        if not n.startswith(sigil):
+            return False
+
+        # This isn't the status header, bail.
+        if n != status:
+            continue
+
+        # If the first digit is a 1, we've got informational headers.
+        return v.startswith(informational_start)
+
+
+def guard_increment_window(current, increment):
+    """
+    Increments a flow control window, guarding against that window becoming too
+    large.
+
+    :param current: The current value of the flow control window.
+    :param increment: The increment to apply to that window.
+    :returns: The new value of the window.
+    :raises: ``FlowControlError``
+    """
+    # The largest value the flow control window may take.
+    LARGEST_FLOW_CONTROL_WINDOW = 2**31 - 1
+
+    new_size = current + increment
+
+    if new_size > LARGEST_FLOW_CONTROL_WINDOW:
+        raise FlowControlError(
+            "May not increment flow control window past %d" %
+            LARGEST_FLOW_CONTROL_WINDOW
+        )
+
+    return new_size
+
+
+def authority_from_headers(headers):
+    """
+    Given a header set, searches for the authority header and returns the
+    value.
+
+    Note that this doesn't terminate early, so should only be called if the
+    headers are for a client request. Otherwise, will loop over the entire
+    header set, which is potentially unwise.
+
+    :param headers: The HTTP header set.
+    :returns: The value of the authority header, or ``None``.
+    :rtype: ``bytes`` or ``None``.
+    """
+    for n, v in headers:
+        # This gets run against headers that come both from HPACK and from the
+        # user, so we may have unicode floating around in here. We only want
+        # bytes.
+        if n in (b':authority', u':authority'):
+            return v.encode('utf-8') if not isinstance(v, bytes) else v
+
+    return None
+
+
+# Flags used by the validate_headers pipeline to determine which checks
+# should be applied to a given set of headers.
+HeaderValidationFlags = collections.namedtuple(
+    'HeaderValidationFlags',
+    ['is_client', 'is_trailer', 'is_response_header', 'is_push_promise']
+)
+
+
+def validate_headers(headers, hdr_validation_flags):
+    """
+    Validates a header sequence against a set of constraints from RFC 7540.
+
+    :param headers: The HTTP header set.
+    :param hdr_validation_flags: An instance of HeaderValidationFlags.
+    """
+    # This validation logic is built on a sequence of generators that are
+    # iterated over to provide the final header list. This reduces some of the
+    # overhead of doing this checking. However, it's worth noting that this
+    # checking remains somewhat expensive, and attempts should be made wherever
+    # possible to reduce the time spent doing them.
+    #
+    # For example, we avoid tuple upacking in loops because it represents a
+    # fixed cost that we don't want to spend, instead indexing into the header
+    # tuples.
+    headers = _reject_uppercase_header_fields(
+        headers, hdr_validation_flags
+    )
+    headers = _reject_surrounding_whitespace(
+        headers, hdr_validation_flags
+    )
+    headers = _reject_te(
+        headers, hdr_validation_flags
+    )
+    headers = _reject_connection_header(
+        headers, hdr_validation_flags
+    )
+    headers = _reject_pseudo_header_fields(
+        headers, hdr_validation_flags
+    )
+    headers = _check_host_authority_header(
+        headers, hdr_validation_flags
+    )
+    headers = _check_path_header(headers, hdr_validation_flags)
+
+    return headers
+
+
+def _reject_uppercase_header_fields(headers, hdr_validation_flags):
+    """
+    Raises a ProtocolError if any uppercase character is found in a header
+    block.
+    """
+    for header in headers:
+        if UPPER_RE.search(header[0]):
+            raise ProtocolError(
+                "Received uppercase header name %s." % header[0])
+        yield header
+
+
+def _reject_surrounding_whitespace(headers, hdr_validation_flags):
+    """
+    Raises a ProtocolError if any header name or value is surrounded by
+    whitespace characters.
+    """
+    # For compatibility with RFC 7230 header fields, we need to allow the field
+    # value to be an empty string. This is ludicrous, but technically allowed.
+    # The field name may not be empty, though, so we can safely assume that it
+    # must have at least one character in it and throw exceptions if it
+    # doesn't.
+    for header in headers:
+        if header[0][0] in _WHITESPACE or header[0][-1] in _WHITESPACE:
+            raise ProtocolError(
+                "Received header name surrounded by whitespace %r" % header[0])
+        if header[1] and ((header[1][0] in _WHITESPACE) or
+           (header[1][-1] in _WHITESPACE)):
+            raise ProtocolError(
+                "Received header value surrounded by whitespace %r" % header[1]
+            )
+        yield header
+
+
+def _reject_te(headers, hdr_validation_flags):
+    """
+    Raises a ProtocolError if the TE header is present in a header block and
+    its value is anything other than "trailers".
+    """
+    for header in headers:
+        if header[0] in (b'te', u'te'):
+            if header[1].lower() not in (b'trailers', u'trailers'):
+                raise ProtocolError(
+                    "Invalid value for Transfer-Encoding header: %s" %
+                    header[1]
+                )
+
+        yield header
+
+
+def _reject_connection_header(headers, hdr_validation_flags):
+    """
+    Raises a ProtocolError if the Connection header is present in a header
+    block.
+    """
+    for header in headers:
+        if header[0] in CONNECTION_HEADERS:
+            raise ProtocolError(
+                "Connection-specific header field present: %s." % header[0]
+            )
+
+        yield header
+
+
+def _custom_startswith(test_string, bytes_prefix, unicode_prefix):
+    """
+    Given a string that might be a bytestring or a Unicode string,
+    return True if it starts with the appropriate prefix.
+    """
+    if isinstance(test_string, bytes):
+        return test_string.startswith(bytes_prefix)
+    else:
+        return test_string.startswith(unicode_prefix)
+
+
+def _assert_header_in_set(string_header, bytes_header, header_set):
+    """
+    Given a set of header names, checks whether the string or byte version of
+    the header name is present. Raises a Protocol error with the appropriate
+    error if it's missing.
+    """
+    if not (string_header in header_set or bytes_header in header_set):
+        raise ProtocolError(
+            "Header block missing mandatory %s header" % string_header
+        )
+
+
+def _reject_pseudo_header_fields(headers, hdr_validation_flags):
+    """
+    Raises a ProtocolError if duplicate pseudo-header fields are found in a
+    header block or if a pseudo-header field appears in a block after an
+    ordinary header field.
+
+    Raises a ProtocolError if pseudo-header fields are found in trailers.
+    """
+    seen_pseudo_header_fields = set()
+    seen_regular_header = False
+
+    for header in headers:
+        if _custom_startswith(header[0], b':', u':'):
+            if header[0] in seen_pseudo_header_fields:
+                raise ProtocolError(
+                    "Received duplicate pseudo-header field %s" % header[0]
+                )
+
+            seen_pseudo_header_fields.add(header[0])
+
+            if seen_regular_header:
+                raise ProtocolError(
+                    "Received pseudo-header field out of sequence: %s" %
+                    header[0]
+                )
+
+            if header[0] not in _ALLOWED_PSEUDO_HEADER_FIELDS:
+                raise ProtocolError(
+                    "Received custom pseudo-header field %s" % header[0]
+                )
+
+        else:
+            seen_regular_header = True
+
+        yield header
+
+    # Check the pseudo-headers we got to confirm they're acceptable.
+    _check_pseudo_header_field_acceptability(
+        seen_pseudo_header_fields, hdr_validation_flags
+    )
+
+
+def _check_pseudo_header_field_acceptability(pseudo_headers,
+                                             hdr_validation_flags):
+    """
+    Given the set of pseudo-headers present in a header block and the
+    validation flags, confirms that RFC 7540 allows them.
+    """
+    # Pseudo-header fields MUST NOT appear in trailers - RFC 7540 § 8.1.2.1
+    if hdr_validation_flags.is_trailer and pseudo_headers:
+        raise ProtocolError(
+            "Received pseudo-header in trailer %s" % pseudo_headers
+        )
+
+    # If ':status' pseudo-header is not there in a response header, reject it.
+    # Similarly, if ':path', ':method', or ':scheme' are not there in a request
+    # header, reject it. Additionally, if a response contains any request-only
+    # headers or vice-versa, reject it.
+    # Relevant RFC section: RFC 7540 § 8.1.2.4
+    # https://tools.ietf.org/html/rfc7540#section-8.1.2.4
+    if hdr_validation_flags.is_response_header:
+        _assert_header_in_set(u':status', b':status', pseudo_headers)
+        invalid_response_headers = pseudo_headers & _REQUEST_ONLY_HEADERS
+        if invalid_response_headers:
+            raise ProtocolError(
+                "Encountered request-only headers %s" %
+                invalid_response_headers
+            )
+    elif (not hdr_validation_flags.is_response_header and
+          not hdr_validation_flags.is_trailer):
+        # This is a request, so we need to have seen :path, :method, and
+        # :scheme.
+        _assert_header_in_set(u':path', b':path', pseudo_headers)
+        _assert_header_in_set(u':method', b':method', pseudo_headers)
+        _assert_header_in_set(u':scheme', b':scheme', pseudo_headers)
+        invalid_request_headers = pseudo_headers & _RESPONSE_ONLY_HEADERS
+        if invalid_request_headers:
+            raise ProtocolError(
+                "Encountered response-only headers %s" %
+                invalid_request_headers
+            )
+
+
+def _validate_host_authority_header(headers):
+    """
+    Given the :authority and Host headers from a request block that isn't
+    a trailer, check that:
+     1. At least one of these headers is set.
+     2. If both headers are set, they match.
+
+    :param headers: The HTTP header set.
+    :raises: ``ProtocolError``
+    """
+    # We use None as a sentinel value.  Iterate over the list of headers,
+    # and record the value of these headers (if present).  We don't need
+    # to worry about receiving duplicate :authority headers, as this is
+    # enforced by the _reject_pseudo_header_fields() pipeline.
+    #
+    # TODO: We should also guard against receiving duplicate Host headers,
+    # and against sending duplicate headers.
+    authority_header_val = None
+    host_header_val = None
+
+    for header in headers:
+        if header[0] in (b':authority', u':authority'):
+            authority_header_val = header[1]
+        elif header[0] in (b'host', u'host'):
+            host_header_val = header[1]
+
+        yield header
+
+    # If we have not-None values for these variables, then we know we saw
+    # the corresponding header.
+    authority_present = (authority_header_val is not None)
+    host_present = (host_header_val is not None)
+
+    # It is an error for a request header block to contain neither
+    # an :authority header nor a Host header.
+    if not authority_present and not host_present:
+        raise ProtocolError(
+            "Request header block does not have an :authority or Host header."
+        )
+
+    # If we receive both headers, they should definitely match.
+    if authority_present and host_present:
+        if authority_header_val != host_header_val:
+            raise ProtocolError(
+                "Request header block has mismatched :authority and "
+                "Host headers: %r / %r"
+                % (authority_header_val, host_header_val)
+            )
+
+
+def _check_host_authority_header(headers, hdr_validation_flags):
+    """
+    Raises a ProtocolError if a header block arrives that does not contain an
+    :authority or a Host header, or if a header block contains both fields,
+    but their values do not match.
+    """
+    # We only expect to see :authority and Host headers on request header
+    # blocks that aren't trailers, so skip this validation if this is a
+    # response header or we're looking at trailer blocks.
+    skip_validation = (
+        hdr_validation_flags.is_response_header or
+        hdr_validation_flags.is_trailer
+    )
+    if skip_validation:
+        return headers
+
+    return _validate_host_authority_header(headers)
+
+
+def _check_path_header(headers, hdr_validation_flags):
+    """
+    Raise a ProtocolError if a header block arrives or is sent that contains an
+    empty :path header.
+    """
+    def inner():
+        for header in headers:
+            if header[0] in (b':path', u':path'):
+                if not header[1]:
+                    raise ProtocolError("An empty :path header is forbidden")
+
+            yield header
+
+    # We only expect to see :authority and Host headers on request header
+    # blocks that aren't trailers, so skip this validation if this is a
+    # response header or we're looking at trailer blocks.
+    skip_validation = (
+        hdr_validation_flags.is_response_header or
+        hdr_validation_flags.is_trailer
+    )
+    if skip_validation:
+        return headers
+    else:
+        return inner()
+
+
+def _lowercase_header_names(headers, hdr_validation_flags):
+    """
+    Given an iterable of header two-tuples, rebuilds that iterable with the
+    header names lowercased. This generator produces tuples that preserve the
+    original type of the header tuple for tuple and any ``HeaderTuple``.
+    """
+    for header in headers:
+        if isinstance(header, HeaderTuple):
+            yield header.__class__(header[0].lower(), header[1])
+        else:
+            yield (header[0].lower(), header[1])
+
+
+def _strip_surrounding_whitespace(headers, hdr_validation_flags):
+    """
+    Given an iterable of header two-tuples, strip both leading and trailing
+    whitespace from both header names and header values. This generator
+    produces tuples that preserve the original type of the header tuple for
+    tuple and any ``HeaderTuple``.
+    """
+    for header in headers:
+        if isinstance(header, HeaderTuple):
+            yield header.__class__(header[0].strip(), header[1].strip())
+        else:
+            yield (header[0].strip(), header[1].strip())
+
+
+def _strip_connection_headers(headers, hdr_validation_flags):
+    """
+    Strip any connection headers as per RFC7540 § 8.1.2.2.
+    """
+    for header in headers:
+        if header[0] not in CONNECTION_HEADERS:
+            yield header
+
+
+def _check_sent_host_authority_header(headers, hdr_validation_flags):
+    """
+    Raises an InvalidHeaderBlockError if we try to send a header block
+    that does not contain an :authority or a Host header, or if
+    the header block contains both fields, but their values do not match.
+    """
+    # We only expect to see :authority and Host headers on request header
+    # blocks that aren't trailers, so skip this validation if this is a
+    # response header or we're looking at trailer blocks.
+    skip_validation = (
+        hdr_validation_flags.is_response_header or
+        hdr_validation_flags.is_trailer
+    )
+    if skip_validation:
+        return headers
+
+    return _validate_host_authority_header(headers)
+
+
+def _combine_cookie_fields(headers, hdr_validation_flags):
+    """
+    RFC 7540 § 8.1.2.5 allows HTTP/2 clients to split the Cookie header field,
+    which must normally appear only once, into multiple fields for better
+    compression. However, they MUST be joined back up again when received.
+    This normalization step applies that transform. The side-effect is that
+    all cookie fields now appear *last* in the header block.
+    """
+    # There is a problem here about header indexing. Specifically, it's
+    # possible that all these cookies are sent with different header indexing
+    # values. At this point it shouldn't matter too much, so we apply our own
+    # logic and make them never-indexed.
+    cookies = []
+    for header in headers:
+        if header[0] == b'cookie':
+            cookies.append(header[1])
+        else:
+            yield header
+    if cookies:
+        cookie_val = b'; '.join(cookies)
+        yield NeverIndexedHeaderTuple(b'cookie', cookie_val)
+
+
+def normalize_outbound_headers(headers, hdr_validation_flags):
+    """
+    Normalizes a header sequence that we are about to send.
+
+    :param headers: The HTTP header set.
+    :param hdr_validation_flags: An instance of HeaderValidationFlags.
+    """
+    headers = _lowercase_header_names(headers, hdr_validation_flags)
+    headers = _strip_surrounding_whitespace(headers, hdr_validation_flags)
+    headers = _strip_connection_headers(headers, hdr_validation_flags)
+    headers = _secure_headers(headers, hdr_validation_flags)
+
+    return headers
+
+
+def normalize_inbound_headers(headers, hdr_validation_flags):
+    """
+    Normalizes a header sequence that we have received.
+
+    :param headers: The HTTP header set.
+    :param hdr_validation_flags: An instance of HeaderValidationFlags
+    """
+    headers = _combine_cookie_fields(headers, hdr_validation_flags)
+    return headers
+
+
+def validate_outbound_headers(headers, hdr_validation_flags):
+    """
+    Validates and normalizes a header sequence that we are about to send.
+
+    :param headers: The HTTP header set.
+    :param hdr_validation_flags: An instance of HeaderValidationFlags.
+    """
+    headers = _reject_te(
+        headers, hdr_validation_flags
+    )
+    headers = _reject_connection_header(
+        headers, hdr_validation_flags
+    )
+    headers = _reject_pseudo_header_fields(
+        headers, hdr_validation_flags
+    )
+    headers = _check_sent_host_authority_header(
+        headers, hdr_validation_flags
+    )
+    headers = _check_path_header(headers, hdr_validation_flags)
+
+    return headers
diff --git a/tools/third_party/h2/h2/windows.py b/tools/third_party/h2/h2/windows.py
new file mode 100755
index 0000000..6656975
--- /dev/null
+++ b/tools/third_party/h2/h2/windows.py
@@ -0,0 +1,139 @@
+# -*- coding: utf-8 -*-
+"""
+h2/windows
+~~~~~~~~~~
+
+Defines tools for managing HTTP/2 flow control windows.
+
+The objects defined in this module are used to automatically manage HTTP/2
+flow control windows. Specifically, they keep track of what the size of the
+window is, how much data has been consumed from that window, and how much data
+the user has already used. It then implements a basic algorithm that attempts
+to manage the flow control window without user input, trying to ensure that it
+does not emit too many WINDOW_UPDATE frames.
+"""
+from __future__ import division
+
+from .exceptions import FlowControlError
+
+
+# The largest acceptable value for a HTTP/2 flow control window.
+LARGEST_FLOW_CONTROL_WINDOW = 2**31 - 1
+
+
+class WindowManager(object):
+    """
+    A basic HTTP/2 window manager.
+
+    :param max_window_size: The maximum size of the flow control window.
+    :type max_window_size: ``int``
+    """
+    def __init__(self, max_window_size):
+        assert max_window_size <= LARGEST_FLOW_CONTROL_WINDOW
+        self.max_window_size = max_window_size
+        self.current_window_size = max_window_size
+        self._bytes_processed = 0
+
+    def window_consumed(self, size):
+        """
+        We have received a certain number of bytes from the remote peer. This
+        necessarily shrinks the flow control window!
+
+        :param size: The number of flow controlled bytes we received from the
+            remote peer.
+        :type size: ``int``
+        :returns: Nothing.
+        :rtype: ``None``
+        """
+        self.current_window_size -= size
+        if self.current_window_size < 0:
+            raise FlowControlError("Flow control window shrunk below 0")
+
+    def window_opened(self, size):
+        """
+        The flow control window has been incremented, either because of manual
+        flow control management or because of the user changing the flow
+        control settings. This can have the effect of increasing what we
+        consider to be the "maximum" flow control window size.
+
+        This does not increase our view of how many bytes have been processed,
+        only of how much space is in the window.
+
+        :param size: The increment to the flow control window we received.
+        :type size: ``int``
+        :returns: Nothing
+        :rtype: ``None``
+        """
+        self.current_window_size += size
+
+        if self.current_window_size > LARGEST_FLOW_CONTROL_WINDOW:
+            raise FlowControlError(
+                "Flow control window mustn't exceed %d" %
+                LARGEST_FLOW_CONTROL_WINDOW
+            )
+
+        if self.current_window_size > self.max_window_size:
+            self.max_window_size = self.current_window_size
+
+    def process_bytes(self, size):
+        """
+        The application has informed us that it has processed a certain number
+        of bytes. This may cause us to want to emit a window update frame. If
+        we do want to emit a window update frame, this method will return the
+        number of bytes that we should increment the window by.
+
+        :param size: The number of flow controlled bytes that the application
+            has processed.
+        :type size: ``int``
+        :returns: The number of bytes to increment the flow control window by,
+            or ``None``.
+        :rtype: ``int`` or ``None``
+        """
+        self._bytes_processed += size
+        return self._maybe_update_window()
+
+    def _maybe_update_window(self):
+        """
+        Run the algorithm.
+
+        Our current algorithm can be described like this.
+
+        1. If no bytes have been processed, we immediately return 0. There is
+           no meaningful way for us to hand space in the window back to the
+           remote peer, so let's not even try.
+        2. If there is no space in the flow control window, and we have
+           processed at least 1024 bytes (or 1/4 of the window, if the window
+           is smaller), we will emit a window update frame. This is to avoid
+           the risk of blocking a stream altogether.
+        3. If there is space in the flow control window, and we have processed
+           at least 1/2 of the window worth of bytes, we will emit a window
+           update frame. This is to minimise the number of window update frames
+           we have to emit.
+
+        In a healthy system with large flow control windows, this will
+        irregularly emit WINDOW_UPDATE frames. This prevents us starving the
+        connection by emitting eleventy bajillion WINDOW_UPDATE frames,
+        especially in situations where the remote peer is sending a lot of very
+        small DATA frames.
+        """
+        # TODO: Can the window be smaller than 1024 bytes? If not, we can
+        # streamline this algorithm.
+        if not self._bytes_processed:
+            return None
+
+        max_increment = (self.max_window_size - self.current_window_size)
+        increment = 0
+
+        # Note that, even though we may increment less than _bytes_processed,
+        # we still want to set it to zero whenever we emit an increment. This
+        # is because we'll always increment up to the maximum we can.
+        if (self.current_window_size == 0) and (
+                self._bytes_processed > min(1024, self.max_window_size // 4)):
+            increment = min(self._bytes_processed, max_increment)
+            self._bytes_processed = 0
+        elif self._bytes_processed >= (self.max_window_size // 2):
+            increment = min(self._bytes_processed, max_increment)
+            self._bytes_processed = 0
+
+        self.current_window_size += increment
+        return increment
diff --git a/tools/third_party/h2/setup.cfg b/tools/third_party/h2/setup.cfg
new file mode 100755
index 0000000..5e40900
--- /dev/null
+++ b/tools/third_party/h2/setup.cfg
@@ -0,0 +1,2 @@
+[wheel]
+universal = 1
diff --git a/tools/third_party/h2/setup.py b/tools/third_party/h2/setup.py
new file mode 100755
index 0000000..d3b77f9
--- /dev/null
+++ b/tools/third_party/h2/setup.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+import codecs
+import os
+import re
+import sys
+
+try:
+    from setuptools import setup
+except ImportError:
+    from distutils.core import setup
+
+# Get the version
+version_regex = r'__version__ = ["\']([^"\']*)["\']'
+with open('h2/__init__.py', 'r') as f:
+    text = f.read()
+    match = re.search(version_regex, text)
+
+    if match:
+        version = match.group(1)
+    else:
+        raise RuntimeError("No version number found!")
+
+# Stealing this from Kenneth Reitz
+if sys.argv[-1] == 'publish':
+    os.system('python setup.py sdist upload')
+    sys.exit()
+
+packages = [
+    'h2',
+]
+
+readme = codecs.open('README.rst', encoding='utf-8').read()
+history = codecs.open('HISTORY.rst', encoding='utf-8').read()
+
+setup(
+    name='h2',
+    version=version,
+    description='HTTP/2 State-Machine based protocol implementation',
+    long_description=u'\n\n'.join([readme, history]),
+    author='Cory Benfield',
+    author_email='cory@lukasa.co.uk',
+    url='http://hyper.rtfd.org',
+    packages=packages,
+    package_data={'': ['LICENSE', 'README.rst', 'CONTRIBUTORS.rst', 'HISTORY.rst', 'NOTICES']},
+    package_dir={'h2': 'h2'},
+    include_package_data=True,
+    license='MIT License',
+    classifiers=[
+        'Development Status :: 5 - Production/Stable',
+        'Intended Audience :: Developers',
+        'License :: OSI Approved :: MIT License',
+        'Programming Language :: Python',
+        'Programming Language :: Python :: 2',
+        'Programming Language :: Python :: 2.7',
+        'Programming Language :: Python :: 3',
+        'Programming Language :: Python :: 3.3',
+        'Programming Language :: Python :: 3.4',
+        'Programming Language :: Python :: 3.5',
+        'Programming Language :: Python :: 3.6',
+        'Programming Language :: Python :: Implementation :: CPython',
+        'Programming Language :: Python :: Implementation :: PyPy',
+    ],
+    install_requires=[
+        'hyperframe>=5.0, <6',
+        'hpack>=2.3, <4',
+    ],
+    extras_require={
+        ':python_version == "2.7" or python_version == "3.3"': ['enum34>=1.1.6, <2'],
+    }
+)
diff --git a/tools/third_party/h2/test/conftest.py b/tools/third_party/h2/test/conftest.py
new file mode 100755
index 0000000..c646ad3
--- /dev/null
+++ b/tools/third_party/h2/test/conftest.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+from hypothesis import settings, HealthCheck
+
+import pytest
+import helpers
+
+# Set up a CI profile that allows slow example generation.
+settings.register_profile(
+    "travis",
+    settings(suppress_health_check=[HealthCheck.too_slow])
+)
+
+
+@pytest.fixture
+def frame_factory():
+    return helpers.FrameFactory()
diff --git a/tools/third_party/h2/test/coroutine_tests.py b/tools/third_party/h2/test/coroutine_tests.py
new file mode 100755
index 0000000..0f48c02
--- /dev/null
+++ b/tools/third_party/h2/test/coroutine_tests.py
@@ -0,0 +1,74 @@
+# -*- coding: utf-8 -*-
+"""
+coroutine_tests
+~~~~~~~~~~~~~~~
+
+This file gives access to a coroutine-based test class. This allows each test
+case to be defined as a pair of interacting coroutines, sending data to each
+other by yielding the flow of control.
+
+The advantage of this method is that we avoid the difficulty of using threads
+in Python, as well as the pain of using sockets and events to communicate and
+organise the communication. This makes the tests entirely deterministic and
+makes them behave identically on all platforms, as well as ensuring they both
+succeed and fail quickly.
+"""
+import itertools
+import functools
+
+import pytest
+
+
+class CoroutineTestCase(object):
+    """
+    A base class for tests that use interacting coroutines.
+
+    The run_until_complete method takes a number of coroutines as arguments.
+    Each one is, in order, passed the output of the previous coroutine until
+    one is exhausted. If a coroutine does not initially yield data (that is,
+    its first action is to receive data), the calling code should prime it by
+    using the 'server' decorator on this class.
+    """
+    def run_until_complete(self, *coroutines):
+        """
+        Executes a set of coroutines that communicate between each other. Each
+        one is, in order, passed the output of the previous coroutine until
+        one is exhausted. If a coroutine does not initially yield data (that
+        is, its first action is to receive data), the calling code should prime
+        it by using the 'server' decorator on this class.
+
+        Once a coroutine is exhausted, the method performs a final check to
+        ensure that all other coroutines are exhausted. This ensures that all
+        assertions in those coroutines got executed.
+        """
+        looping_coroutines = itertools.cycle(coroutines)
+        data = None
+
+        for coro in looping_coroutines:
+            try:
+                data = coro.send(data)
+            except StopIteration:
+                break
+
+        for coro in coroutines:
+            try:
+                next(coro)
+            except StopIteration:
+                continue
+            else:
+                pytest.fail("Coroutine %s not exhausted" % coro)
+
+    def server(self, func):
+        """
+        A decorator that marks a test coroutine as a 'server' coroutine: that
+        is, one whose first action is to consume data, rather than one that
+        initially emits data. The effect of this decorator is simply to prime
+        the coroutine.
+        """
+        @functools.wraps(func)
+        def wrapper(*args, **kwargs):
+            c = func(*args, **kwargs)
+            next(c)
+            return c
+
+        return wrapper
diff --git a/tools/third_party/h2/test/helpers.py b/tools/third_party/h2/test/helpers.py
new file mode 100755
index 0000000..2a4e909
--- /dev/null
+++ b/tools/third_party/h2/test/helpers.py
@@ -0,0 +1,176 @@
+# -*- coding: utf-8 -*-
+"""
+helpers
+~~~~~~~
+
+This module contains helpers for the h2 tests.
+"""
+from hyperframe.frame import (
+    HeadersFrame, DataFrame, SettingsFrame, WindowUpdateFrame, PingFrame,
+    GoAwayFrame, RstStreamFrame, PushPromiseFrame, PriorityFrame,
+    ContinuationFrame, AltSvcFrame
+)
+from hpack.hpack import Encoder
+
+
+SAMPLE_SETTINGS = {
+    SettingsFrame.HEADER_TABLE_SIZE: 4096,
+    SettingsFrame.ENABLE_PUSH: 1,
+    SettingsFrame.MAX_CONCURRENT_STREAMS: 2,
+}
+
+
+class FrameFactory(object):
+    """
+    A class containing lots of helper methods and state to build frames. This
+    allows test cases to easily build correct HTTP/2 frames to feed to
+    hyper-h2.
+    """
+    def __init__(self):
+        self.encoder = Encoder()
+
+    def refresh_encoder(self):
+        self.encoder = Encoder()
+
+    def preamble(self):
+        return b'PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n'
+
+    def build_headers_frame(self,
+                            headers,
+                            flags=[],
+                            stream_id=1,
+                            **priority_kwargs):
+        """
+        Builds a single valid headers frame out of the contained headers.
+        """
+        f = HeadersFrame(stream_id)
+        f.data = self.encoder.encode(headers)
+        f.flags.add('END_HEADERS')
+        for flag in flags:
+            f.flags.add(flag)
+
+        for k, v in priority_kwargs.items():
+            setattr(f, k, v)
+
+        return f
+
+    def build_continuation_frame(self, header_block, flags=[], stream_id=1):
+        """
+        Builds a single continuation frame out of the binary header block.
+        """
+        f = ContinuationFrame(stream_id)
+        f.data = header_block
+        f.flags = set(flags)
+
+        return f
+
+    def build_data_frame(self, data, flags=None, stream_id=1, padding_len=0):
+        """
+        Builds a single data frame out of a chunk of data.
+        """
+        flags = set(flags) if flags is not None else set()
+        f = DataFrame(stream_id)
+        f.data = data
+        f.flags = flags
+
+        if padding_len:
+            flags.add('PADDED')
+            f.pad_length = padding_len
+
+        return f
+
+    def build_settings_frame(self, settings, ack=False):
+        """
+        Builds a single settings frame.
+        """
+        f = SettingsFrame(0)
+        if ack:
+            f.flags.add('ACK')
+
+        f.settings = settings
+        return f
+
+    def build_window_update_frame(self, stream_id, increment):
+        """
+        Builds a single WindowUpdate frame.
+        """
+        f = WindowUpdateFrame(stream_id)
+        f.window_increment = increment
+        return f
+
+    def build_ping_frame(self, ping_data, flags=None):
+        """
+        Builds a single Ping frame.
+        """
+        f = PingFrame(0)
+        f.opaque_data = ping_data
+        if flags:
+            f.flags = set(flags)
+
+        return f
+
+    def build_goaway_frame(self,
+                           last_stream_id,
+                           error_code=0,
+                           additional_data=b''):
+        """
+        Builds a single GOAWAY frame.
+        """
+        f = GoAwayFrame(0)
+        f.error_code = error_code
+        f.last_stream_id = last_stream_id
+        f.additional_data = additional_data
+        return f
+
+    def build_rst_stream_frame(self, stream_id, error_code=0):
+        """
+        Builds a single RST_STREAM frame.
+        """
+        f = RstStreamFrame(stream_id)
+        f.error_code = error_code
+        return f
+
+    def build_push_promise_frame(self,
+                                 stream_id,
+                                 promised_stream_id,
+                                 headers,
+                                 flags=[]):
+        """
+        Builds a single PUSH_PROMISE frame.
+        """
+        f = PushPromiseFrame(stream_id)
+        f.promised_stream_id = promised_stream_id
+        f.data = self.encoder.encode(headers)
+        f.flags = set(flags)
+        f.flags.add('END_HEADERS')
+        return f
+
+    def build_priority_frame(self,
+                             stream_id,
+                             weight,
+                             depends_on=0,
+                             exclusive=False):
+        """
+        Builds a single priority frame.
+        """
+        f = PriorityFrame(stream_id)
+        f.depends_on = depends_on
+        f.stream_weight = weight
+        f.exclusive = exclusive
+        return f
+
+    def build_alt_svc_frame(self, stream_id, origin, field):
+        """
+        Builds a single ALTSVC frame.
+        """
+        f = AltSvcFrame(stream_id)
+        f.origin = origin
+        f.field = field
+        return f
+
+    def change_table_size(self, new_size):
+        """
+        Causes the encoder to send a dynamic size update in the next header
+        block it sends.
+        """
+        self.encoder.header_table_size = new_size
diff --git a/tools/third_party/h2/test/test_basic_logic.py b/tools/third_party/h2/test/test_basic_logic.py
new file mode 100755
index 0000000..3cfc7cc
--- /dev/null
+++ b/tools/third_party/h2/test/test_basic_logic.py
@@ -0,0 +1,1854 @@
+# -*- coding: utf-8 -*-
+"""
+test_basic_logic
+~~~~~~~~~~~~~~~~
+
+Test the basic logic of the h2 state machines.
+"""
+import random
+import sys
+
+import hyperframe
+import pytest
+
+import h2.config
+import h2.connection
+import h2.errors
+import h2.events
+import h2.exceptions
+import h2.frame_buffer
+import h2.settings
+import h2.stream
+
+import helpers
+
+from hypothesis import given
+from hypothesis.strategies import integers
+
+
+IS_PYTHON3 = sys.version_info >= (3, 0)
+
+
+class TestBasicClient(object):
+    """
+    Basic client-side tests.
+    """
+    example_request_headers = [
+        (u':authority', u'example.com'),
+        (u':path', u'/'),
+        (u':scheme', u'https'),
+        (u':method', u'GET'),
+    ]
+    bytes_example_request_headers = [
+        (b':authority', b'example.com'),
+        (b':path', b'/'),
+        (b':scheme', b'https'),
+        (b':method', b'GET'),
+    ]
+    example_response_headers = [
+        (u':status', u'200'),
+        (u'server', u'fake-serv/0.1.0')
+    ]
+    bytes_example_response_headers = [
+        (b':status', b'200'),
+        (b'server', b'fake-serv/0.1.0')
+    ]
+
+    def test_begin_connection(self, frame_factory):
+        """
+        Client connections emit the HTTP/2 preamble.
+        """
+        c = h2.connection.H2Connection()
+        expected_settings = frame_factory.build_settings_frame(
+            c.local_settings
+        )
+        expected_data = (
+            b'PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n' + expected_settings.serialize()
+        )
+
+        events = c.initiate_connection()
+        assert not events
+        assert c.data_to_send() == expected_data
+
+    def test_sending_headers(self):
+        """
+        Single headers frames are correctly encoded.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+
+        # Clear the data, then send headers.
+        c.clear_outbound_data_buffer()
+        events = c.send_headers(1, self.example_request_headers)
+        assert not events
+        assert c.data_to_send() == (
+            b'\x00\x00\r\x01\x04\x00\x00\x00\x01'
+            b'A\x88/\x91\xd3]\x05\\\x87\xa7\x84\x87\x82'
+        )
+
+    def test_sending_data(self):
+        """
+        Single data frames are encoded correctly.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(1, self.example_request_headers)
+
+        # Clear the data, then send some data.
+        c.clear_outbound_data_buffer()
+        events = c.send_data(1, b'some data')
+        assert not events
+        data_to_send = c.data_to_send()
+        assert (
+            data_to_send == b'\x00\x00\t\x00\x00\x00\x00\x00\x01some data'
+        )
+
+        buffer = h2.frame_buffer.FrameBuffer(server=False)
+        buffer.max_frame_size = 65535
+        buffer.add_data(data_to_send)
+        data_frame = list(buffer)[0]
+        sanity_check_data_frame(
+            data_frame=data_frame,
+            expected_flow_controlled_length=len(b'some data'),
+            expect_padded_flag=False,
+            expected_data_frame_pad_length=0
+        )
+
+    def test_sending_data_with_padding(self):
+        """
+        Single data frames with padding are encoded correctly.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(1, self.example_request_headers)
+
+        # Clear the data, then send some data.
+        c.clear_outbound_data_buffer()
+        events = c.send_data(1, b'some data', pad_length=5)
+        assert not events
+        data_to_send = c.data_to_send()
+        assert data_to_send == (
+            b'\x00\x00\x0f\x00\x08\x00\x00\x00\x01'
+            b'\x05some data\x00\x00\x00\x00\x00'
+        )
+
+        buffer = h2.frame_buffer.FrameBuffer(server=False)
+        buffer.max_frame_size = 65535
+        buffer.add_data(data_to_send)
+        data_frame = list(buffer)[0]
+        sanity_check_data_frame(
+            data_frame=data_frame,
+            expected_flow_controlled_length=len(b'some data') + 1 + 5,
+            expect_padded_flag=True,
+            expected_data_frame_pad_length=5
+        )
+
+    def test_sending_data_with_zero_length_padding(self):
+        """
+        Single data frames with zero-length padding are encoded
+        correctly.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(1, self.example_request_headers)
+
+        # Clear the data, then send some data.
+        c.clear_outbound_data_buffer()
+        events = c.send_data(1, b'some data', pad_length=0)
+        assert not events
+        data_to_send = c.data_to_send()
+        assert data_to_send == (
+            b'\x00\x00\x0a\x00\x08\x00\x00\x00\x01'
+            b'\x00some data'
+        )
+
+        buffer = h2.frame_buffer.FrameBuffer(server=False)
+        buffer.max_frame_size = 65535
+        buffer.add_data(data_to_send)
+        data_frame = list(buffer)[0]
+        sanity_check_data_frame(
+            data_frame=data_frame,
+            expected_flow_controlled_length=len(b'some data') + 1,
+            expect_padded_flag=True,
+            expected_data_frame_pad_length=0
+        )
+
+    @pytest.mark.parametrize("expected_error,pad_length", [
+        (None,  0),
+        (None, 255),
+        (None, None),
+        (ValueError, -1),
+        (ValueError, 256),
+        (TypeError, 'invalid'),
+        (TypeError, ''),
+        (TypeError, '10'),
+        (TypeError, {}),
+        (TypeError, ['1', '2', '3']),
+        (TypeError, []),
+        (TypeError, 1.5),
+        (TypeError, 1.0),
+        (TypeError, -1.0),
+    ])
+    def test_sending_data_with_invalid_padding_length(self,
+                                                      expected_error,
+                                                      pad_length):
+        """
+        ``send_data`` with a ``pad_length`` parameter that is an integer
+        outside the range of [0, 255] throws a ``ValueError``, and a
+        ``pad_length`` parameter which is not an ``integer`` type
+        throws a ``TypeError``.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(1, self.example_request_headers)
+
+        c.clear_outbound_data_buffer()
+        if expected_error is not None:
+            with pytest.raises(expected_error):
+                c.send_data(1, b'some data', pad_length=pad_length)
+        else:
+            c.send_data(1, b'some data', pad_length=pad_length)
+
+    def test_closing_stream_sending_data(self, frame_factory):
+        """
+        We can close a stream with a data frame.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(1, self.example_request_headers)
+
+        f = frame_factory.build_data_frame(
+            data=b'some data',
+            flags=['END_STREAM'],
+        )
+
+        # Clear the data, then send some data.
+        c.clear_outbound_data_buffer()
+        events = c.send_data(1, b'some data', end_stream=True)
+        assert not events
+        assert c.data_to_send() == f.serialize()
+
+    def test_receiving_a_response(self, frame_factory):
+        """
+        When receiving a response, the ResponseReceived event fires.
+        """
+        config = h2.config.H2Configuration(header_encoding='utf-8')
+        c = h2.connection.H2Connection(config=config)
+        c.initiate_connection()
+        c.send_headers(1, self.example_request_headers, end_stream=True)
+
+        # Clear the data
+        f = frame_factory.build_headers_frame(
+            self.example_response_headers
+        )
+        events = c.receive_data(f.serialize())
+
+        assert len(events) == 1
+        event = events[0]
+
+        assert isinstance(event, h2.events.ResponseReceived)
+        assert event.stream_id == 1
+        assert event.headers == self.example_response_headers
+
+    def test_receiving_a_response_bytes(self, frame_factory):
+        """
+        When receiving a response, the ResponseReceived event fires with bytes
+        headers if the encoding is set appropriately.
+        """
+        config = h2.config.H2Configuration(header_encoding=False)
+        c = h2.connection.H2Connection(config=config)
+        c.initiate_connection()
+        c.send_headers(1, self.example_request_headers, end_stream=True)
+
+        # Clear the data
+        f = frame_factory.build_headers_frame(
+            self.example_response_headers
+        )
+        events = c.receive_data(f.serialize())
+
+        assert len(events) == 1
+        event = events[0]
+
+        assert isinstance(event, h2.events.ResponseReceived)
+        assert event.stream_id == 1
+        assert event.headers == self.bytes_example_response_headers
+
+    def test_receiving_a_response_change_encoding(self, frame_factory):
+        """
+        When receiving a response, the ResponseReceived event fires with bytes
+        headers if the encoding is set appropriately, but if this changes then
+        the change reflects it.
+        """
+        config = h2.config.H2Configuration(header_encoding=False)
+        c = h2.connection.H2Connection(config=config)
+        c.initiate_connection()
+        c.send_headers(1, self.example_request_headers, end_stream=True)
+
+        f = frame_factory.build_headers_frame(
+            self.example_response_headers
+        )
+        events = c.receive_data(f.serialize())
+
+        assert len(events) == 1
+        event = events[0]
+
+        assert isinstance(event, h2.events.ResponseReceived)
+        assert event.stream_id == 1
+        assert event.headers == self.bytes_example_response_headers
+
+        c.send_headers(3, self.example_request_headers, end_stream=True)
+        c.config.header_encoding = 'utf-8'
+        f = frame_factory.build_headers_frame(
+            self.example_response_headers,
+            stream_id=3,
+        )
+        events = c.receive_data(f.serialize())
+
+        assert len(events) == 1
+        event = events[0]
+
+        assert isinstance(event, h2.events.ResponseReceived)
+        assert event.stream_id == 3
+        assert event.headers == self.example_response_headers
+
+    def test_end_stream_without_data(self, frame_factory):
+        """
+        Ending a stream without data emits a zero-length DATA frame with
+        END_STREAM set.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(1, self.example_request_headers, end_stream=False)
+
+        # Clear the data
+        c.clear_outbound_data_buffer()
+        f = frame_factory.build_data_frame(b'', flags=['END_STREAM'])
+        events = c.end_stream(1)
+
+        assert not events
+        assert c.data_to_send() == f.serialize()
+
+    def test_cannot_send_headers_on_lower_stream_id(self):
+        """
+        Once stream ID x has been used, cannot use stream ID y where y < x.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(3, self.example_request_headers, end_stream=False)
+
+        with pytest.raises(h2.exceptions.StreamIDTooLowError) as e:
+            c.send_headers(1, self.example_request_headers, end_stream=True)
+
+        assert e.value.stream_id == 1
+        assert e.value.max_stream_id == 3
+
+    def test_receiving_pushed_stream(self, frame_factory):
+        """
+        Pushed streams fire a PushedStreamReceived event, followed by
+        ResponseReceived when the response headers are received.
+        """
+        config = h2.config.H2Configuration(header_encoding='utf-8')
+        c = h2.connection.H2Connection(config=config)
+        c.initiate_connection()
+        c.send_headers(1, self.example_request_headers, end_stream=False)
+
+        f1 = frame_factory.build_headers_frame(
+            self.example_response_headers
+        )
+        f2 = frame_factory.build_push_promise_frame(
+            stream_id=1,
+            promised_stream_id=2,
+            headers=self.example_request_headers,
+            flags=['END_HEADERS'],
+        )
+        f3 = frame_factory.build_headers_frame(
+            self.example_response_headers,
+            stream_id=2,
+        )
+        data = b''.join(x.serialize() for x in [f1, f2, f3])
+
+        events = c.receive_data(data)
+
+        assert len(events) == 3
+        stream_push_event = events[1]
+        response_event = events[2]
+        assert isinstance(stream_push_event, h2.events.PushedStreamReceived)
+        assert isinstance(response_event, h2.events.ResponseReceived)
+
+        assert stream_push_event.pushed_stream_id == 2
+        assert stream_push_event.parent_stream_id == 1
+        assert (
+            stream_push_event.headers == self.example_request_headers
+        )
+        assert response_event.stream_id == 2
+        assert response_event.headers == self.example_response_headers
+
+    def test_receiving_pushed_stream_bytes(self, frame_factory):
+        """
+        Pushed headers are not decoded if the header encoding is set to False.
+        """
+        config = h2.config.H2Configuration(header_encoding=False)
+        c = h2.connection.H2Connection(config=config)
+        c.initiate_connection()
+        c.send_headers(1, self.example_request_headers, end_stream=False)
+
+        f1 = frame_factory.build_headers_frame(
+            self.example_response_headers
+        )
+        f2 = frame_factory.build_push_promise_frame(
+            stream_id=1,
+            promised_stream_id=2,
+            headers=self.example_request_headers,
+            flags=['END_HEADERS'],
+        )
+        f3 = frame_factory.build_headers_frame(
+            self.example_response_headers,
+            stream_id=2,
+        )
+        data = b''.join(x.serialize() for x in [f1, f2, f3])
+
+        events = c.receive_data(data)
+
+        assert len(events) == 3
+        stream_push_event = events[1]
+        response_event = events[2]
+        assert isinstance(stream_push_event, h2.events.PushedStreamReceived)
+        assert isinstance(response_event, h2.events.ResponseReceived)
+
+        assert stream_push_event.pushed_stream_id == 2
+        assert stream_push_event.parent_stream_id == 1
+        assert (
+            stream_push_event.headers == self.bytes_example_request_headers
+        )
+        assert response_event.stream_id == 2
+        assert response_event.headers == self.bytes_example_response_headers
+
+    def test_cannot_receive_pushed_stream_when_enable_push_is_0(self,
+                                                                frame_factory):
+        """
+        If we have set SETTINGS_ENABLE_PUSH to 0, receiving PUSH_PROMISE frames
+        triggers the connection to be closed.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.local_settings.enable_push = 0
+        c.send_headers(1, self.example_request_headers, end_stream=False)
+
+        f1 = frame_factory.build_settings_frame({}, ack=True)
+        f2 = frame_factory.build_headers_frame(
+            self.example_response_headers
+        )
+        f3 = frame_factory.build_push_promise_frame(
+            stream_id=1,
+            promised_stream_id=2,
+            headers=self.example_request_headers,
+            flags=['END_HEADERS'],
+        )
+        c.receive_data(f1.serialize())
+        c.receive_data(f2.serialize())
+        c.clear_outbound_data_buffer()
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.receive_data(f3.serialize())
+
+        expected_frame = frame_factory.build_goaway_frame(
+            0, h2.errors.ErrorCodes.PROTOCOL_ERROR
+        )
+        assert c.data_to_send() == expected_frame.serialize()
+
+    def test_receiving_response_no_body(self, frame_factory):
+        """
+        Receiving a response without a body fires two events, ResponseReceived
+        and StreamEnded.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(1, self.example_request_headers, end_stream=True)
+
+        f = frame_factory.build_headers_frame(
+            self.example_response_headers,
+            flags=['END_STREAM']
+        )
+        events = c.receive_data(f.serialize())
+
+        assert len(events) == 2
+        response_event = events[0]
+        end_stream = events[1]
+
+        assert isinstance(response_event, h2.events.ResponseReceived)
+        assert isinstance(end_stream, h2.events.StreamEnded)
+
+    def test_oversize_headers(self):
+        """
+        Sending headers that are oversized generates a stream of CONTINUATION
+        frames.
+        """
+        all_bytes = [chr(x) for x in range(0, 256)]
+        if IS_PYTHON3:
+            all_bytes = [x.encode('latin1') for x in all_bytes]
+
+        large_binary_string = b''.join(
+            random.choice(all_bytes) for _ in range(0, 256)
+        )
+        test_headers = [
+            (':authority', 'example.com'),
+            (':path', '/'),
+            (':method', 'GET'),
+            (':scheme', 'https'),
+            ('key', large_binary_string)
+        ]
+        c = h2.connection.H2Connection()
+
+        # Greatly shrink the max frame size to force us over.
+        c.max_outbound_frame_size = 48
+        c.initiate_connection()
+        c.send_headers(1, test_headers, end_stream=True)
+
+        # Use the frame buffer here, because we don't care about decoding
+        # the headers. Don't send all the data in because that will force the
+        # frame buffer to stop caching the CONTINUATION frames, so instead
+        # send all but one byte.
+        buffer = h2.frame_buffer.FrameBuffer(server=True)
+        buffer.max_frame_size = 65535
+        data = c.data_to_send()
+        buffer.add_data(data[:-1])
+
+        # Drain the buffer, confirming that it only provides a single frame
+        # (the settings frame)
+        assert len(list(buffer)) == 1
+
+        # Get the cached frames.
+        frames = buffer._headers_buffer
+
+        # Split the frames up.
+        headers_frame = frames[0]
+        continuation_frames = frames[1:]
+
+        assert isinstance(headers_frame, hyperframe.frame.HeadersFrame)
+        assert all(
+            map(
+                lambda f: isinstance(f, hyperframe.frame.ContinuationFrame),
+                continuation_frames)
+        )
+        assert all(
+            map(lambda f: len(f.data) <= c.max_outbound_frame_size, frames)
+        )
+
+        assert frames[0].flags == {'END_STREAM'}
+
+        buffer.add_data(data[-1:])
+        headers = list(buffer)[0]
+        assert isinstance(headers, hyperframe.frame.HeadersFrame)
+
+    def test_handle_stream_reset(self, frame_factory):
+        """
+        Streams being remotely reset fires a StreamReset event.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(1, self.example_request_headers, end_stream=True)
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_rst_stream_frame(
+            stream_id=1,
+            error_code=h2.errors.ErrorCodes.STREAM_CLOSED,
+        )
+        events = c.receive_data(f.serialize())
+
+        assert len(events) == 1
+        event = events[0]
+
+        assert isinstance(event, h2.events.StreamReset)
+        assert event.stream_id == 1
+        assert event.error_code is h2.errors.ErrorCodes.STREAM_CLOSED
+        assert isinstance(event.error_code, h2.errors.ErrorCodes)
+        assert event.remote_reset
+
+    def test_handle_stream_reset_with_unknown_erorr_code(self, frame_factory):
+        """
+        Streams being remotely reset with unknown error codes behave exactly as
+        they do with known error codes, but the error code on the event is an
+        int, instead of being an ErrorCodes.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(1, self.example_request_headers, end_stream=True)
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_rst_stream_frame(stream_id=1, error_code=0xFA)
+        events = c.receive_data(f.serialize())
+
+        assert len(events) == 1
+        event = events[0]
+
+        assert isinstance(event, h2.events.StreamReset)
+        assert event.stream_id == 1
+        assert event.error_code == 250
+        assert not isinstance(event.error_code, h2.errors.ErrorCodes)
+        assert event.remote_reset
+
+    def test_can_consume_partial_data_from_connection(self):
+        """
+        We can do partial reads from the connection.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+
+        assert len(c.data_to_send(2)) == 2
+        assert len(c.data_to_send(3)) == 3
+        assert 0 < len(c.data_to_send(500)) < 500
+        assert len(c.data_to_send(10)) == 0
+        assert len(c.data_to_send()) == 0
+
+    def test_we_can_update_settings(self, frame_factory):
+        """
+        Updating the settings emits a SETTINGS frame.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.clear_outbound_data_buffer()
+
+        new_settings = {
+            h2.settings.SettingCodes.HEADER_TABLE_SIZE: 52,
+            h2.settings.SettingCodes.ENABLE_PUSH: 0,
+        }
+        events = c.update_settings(new_settings)
+        assert not events
+
+        f = frame_factory.build_settings_frame(new_settings)
+        assert c.data_to_send() == f.serialize()
+
+    def test_settings_get_acked_correctly(self, frame_factory):
+        """
+        When settings changes are ACKed, they contain the changed settings.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+
+        new_settings = {
+            h2.settings.SettingCodes.HEADER_TABLE_SIZE: 52,
+            h2.settings.SettingCodes.ENABLE_PUSH: 0,
+        }
+        c.update_settings(new_settings)
+
+        f = frame_factory.build_settings_frame({}, ack=True)
+        events = c.receive_data(f.serialize())
+
+        assert len(events) == 1
+        event = events[0]
+
+        assert isinstance(event, h2.events.SettingsAcknowledged)
+        assert len(event.changed_settings) == len(new_settings)
+        for setting, value in new_settings.items():
+            assert event.changed_settings[setting].new_value == value
+
+    def test_cannot_create_new_outbound_stream_over_limit(self, frame_factory):
+        """
+        When the number of outbound streams exceeds the remote peer's
+        MAX_CONCURRENT_STREAMS setting, attempting to open new streams fails.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+
+        f = frame_factory.build_settings_frame(
+            {h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS: 1}
+        )
+        c.receive_data(f.serialize())[0]
+
+        c.send_headers(1, self.example_request_headers)
+
+        with pytest.raises(h2.exceptions.TooManyStreamsError):
+            c.send_headers(3, self.example_request_headers)
+
+    def test_can_receive_trailers(self, frame_factory):
+        """
+        When two HEADERS blocks are received in the same stream from a
+        server, the second set are trailers.
+        """
+        config = h2.config.H2Configuration(header_encoding='utf-8')
+        c = h2.connection.H2Connection(config=config)
+        c.initiate_connection()
+        c.send_headers(1, self.example_request_headers)
+        f = frame_factory.build_headers_frame(self.example_response_headers)
+        c.receive_data(f.serialize())
+
+        # Send in trailers.
+        trailers = [('content-length', '0')]
+        f = frame_factory.build_headers_frame(
+            trailers,
+            flags=['END_STREAM'],
+        )
+        events = c.receive_data(f.serialize())
+        assert len(events) == 2
+
+        event = events[0]
+        assert isinstance(event, h2.events.TrailersReceived)
+        assert event.headers == trailers
+        assert event.stream_id == 1
+
+    def test_reject_trailers_not_ending_stream(self, frame_factory):
+        """
+        When trailers are received without the END_STREAM flag being present,
+        this is a ProtocolError.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(1, self.example_request_headers)
+        f = frame_factory.build_headers_frame(self.example_response_headers)
+        c.receive_data(f.serialize())
+
+        # Send in trailers.
+        c.clear_outbound_data_buffer()
+        trailers = [('content-length', '0')]
+        f = frame_factory.build_headers_frame(
+            trailers,
+            flags=[],
+        )
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.receive_data(f.serialize())
+
+        expected_frame = frame_factory.build_goaway_frame(
+            last_stream_id=0, error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR,
+        )
+        assert c.data_to_send() == expected_frame.serialize()
+
+    def test_can_send_trailers(self, frame_factory):
+        """
+        When a second set of headers are sent, they are properly trailers.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.clear_outbound_data_buffer()
+        c.send_headers(1, self.example_request_headers)
+
+        # Now send trailers.
+        trailers = [('content-length', '0')]
+        c.send_headers(1, trailers, end_stream=True)
+
+        frame_factory.refresh_encoder()
+        f1 = frame_factory.build_headers_frame(
+            self.example_request_headers,
+        )
+        f2 = frame_factory.build_headers_frame(
+            trailers,
+            flags=['END_STREAM'],
+        )
+        assert c.data_to_send() == f1.serialize() + f2.serialize()
+
+    def test_trailers_must_have_end_stream(self, frame_factory):
+        """
+        A set of trailers must carry the END_STREAM flag.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+
+        # Send headers.
+        c.send_headers(1, self.example_request_headers)
+
+        # Now send trailers.
+        trailers = [('content-length', '0')]
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.send_headers(1, trailers)
+
+    def test_headers_are_lowercase(self, frame_factory):
+        """
+        When headers are sent, they are forced to lower-case.
+        """
+        weird_headers = self.example_request_headers + [
+            ('ChAnGiNg-CaSe', 'AlsoHere'),
+            ('alllowercase', 'alllowercase'),
+            ('ALLCAPS', 'ALLCAPS'),
+        ]
+        expected_headers = self.example_request_headers + [
+            ('changing-case', 'AlsoHere'),
+            ('alllowercase', 'alllowercase'),
+            ('allcaps', 'ALLCAPS'),
+        ]
+
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.clear_outbound_data_buffer()
+
+        c.send_headers(1, weird_headers)
+        expected_frame = frame_factory.build_headers_frame(
+            headers=expected_headers
+        )
+
+        assert c.data_to_send() == expected_frame.serialize()
+
+    @given(frame_size=integers(min_value=2**14, max_value=(2**24 - 1)))
+    def test_changing_max_frame_size(self, frame_factory, frame_size):
+        """
+        When the user changes the max frame size and the change is ACKed, the
+        remote peer is now bound by the new frame size.
+        """
+        # We need to refresh the encoder because hypothesis has a problem with
+        # integrating with py.test, meaning that we use the same frame factory
+        # for all tests.
+        # See https://github.com/HypothesisWorks/hypothesis-python/issues/377
+        frame_factory.refresh_encoder()
+
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+
+        # Set up the stream.
+        c.send_headers(1, self.example_request_headers, end_stream=True)
+        headers_frame = frame_factory.build_headers_frame(
+            headers=self.example_response_headers,
+        )
+        c.receive_data(headers_frame.serialize())
+
+        # Change the max frame size.
+        c.update_settings(
+            {h2.settings.SettingCodes.MAX_FRAME_SIZE: frame_size}
+        )
+        settings_ack = frame_factory.build_settings_frame({}, ack=True)
+        c.receive_data(settings_ack.serialize())
+
+        # Greatly increase the flow control windows: we're not here to test
+        # flow control today.
+        c.increment_flow_control_window(increment=(2 * frame_size) + 1)
+        c.increment_flow_control_window(
+            increment=(2 * frame_size) + 1, stream_id=1
+        )
+
+        # Send one DATA frame that is exactly the max frame size: confirm it's
+        # fine.
+        data = frame_factory.build_data_frame(
+            data=(b'\x00' * frame_size),
+        )
+        events = c.receive_data(data.serialize())
+        assert len(events) == 1
+        assert isinstance(events[0], h2.events.DataReceived)
+        assert events[0].flow_controlled_length == frame_size
+
+        # Send one that is one byte too large: confirm a protocol error is
+        # raised.
+        data.data += b'\x00'
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.receive_data(data.serialize())
+
+    def test_cookies_are_joined_on_push(self, frame_factory):
+        """
+        RFC 7540 Section 8.1.2.5 requires that we join multiple Cookie headers
+        in a header block together when they're received on a push.
+        """
+        # This is a moderately varied set of cookie headers: some combined,
+        # some split.
+        cookie_headers = [
+            ('cookie',
+                'username=John Doe; expires=Thu, 18 Dec 2013 12:00:00 UTC'),
+            ('cookie', 'path=1'),
+            ('cookie', 'test1=val1; test2=val2')
+        ]
+        expected = (
+            'username=John Doe; expires=Thu, 18 Dec 2013 12:00:00 UTC; '
+            'path=1; test1=val1; test2=val2'
+        )
+
+        config = h2.config.H2Configuration(header_encoding='utf-8')
+        c = h2.connection.H2Connection(config=config)
+        c.initiate_connection()
+        c.send_headers(1, self.example_request_headers, end_stream=True)
+
+        f = frame_factory.build_push_promise_frame(
+            stream_id=1,
+            promised_stream_id=2,
+            headers=self.example_request_headers + cookie_headers
+        )
+        events = c.receive_data(f.serialize())
+
+        assert len(events) == 1
+        e = events[0]
+
+        cookie_fields = [(n, v) for n, v in e.headers if n == 'cookie']
+        assert len(cookie_fields) == 1
+
+        _, v = cookie_fields[0]
+        assert v == expected
+
+    def test_cookies_arent_joined_without_normalization(self, frame_factory):
+        """
+        If inbound header normalization is disabled, cookie headers aren't
+        joined.
+        """
+        # This is a moderately varied set of cookie headers: some combined,
+        # some split.
+        cookie_headers = [
+            ('cookie',
+                'username=John Doe; expires=Thu, 18 Dec 2013 12:00:00 UTC'),
+            ('cookie', 'path=1'),
+            ('cookie', 'test1=val1; test2=val2')
+        ]
+
+        config = h2.config.H2Configuration(
+            client_side=True,
+            normalize_inbound_headers=False,
+            header_encoding='utf-8'
+        )
+        c = h2.connection.H2Connection(config=config)
+        c.initiate_connection()
+        c.send_headers(1, self.example_request_headers, end_stream=True)
+
+        f = frame_factory.build_push_promise_frame(
+            stream_id=1,
+            promised_stream_id=2,
+            headers=self.example_request_headers + cookie_headers
+        )
+        events = c.receive_data(f.serialize())
+
+        assert len(events) == 1
+        e = events[0]
+
+        received_cookies = [(n, v) for n, v in e.headers if n == 'cookie']
+        assert len(received_cookies) == 3
+        assert cookie_headers == received_cookies
+
+
+class TestBasicServer(object):
+    """
+    Basic server-side tests.
+    """
+    example_request_headers = [
+        (u':authority', u'example.com'),
+        (u':path', u'/'),
+        (u':scheme', u'https'),
+        (u':method', u'GET'),
+    ]
+    bytes_example_request_headers = [
+        (b':authority', b'example.com'),
+        (b':path', b'/'),
+        (b':scheme', b'https'),
+        (b':method', b'GET'),
+    ]
+    example_response_headers = [
+        (':status', '200'),
+        ('server', 'hyper-h2/0.1.0')
+    ]
+    server_config = h2.config.H2Configuration(
+        client_side=False, header_encoding='utf-8'
+    )
+
+    def test_ignores_preamble(self):
+        """
+        The preamble does not cause any events or frames to be written.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        preamble = b'PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n'
+
+        events = c.receive_data(preamble)
+        assert not events
+        assert not c.data_to_send()
+
+    @pytest.mark.parametrize("chunk_size", range(1, 24))
+    def test_drip_feed_preamble(self, chunk_size):
+        """
+        The preamble can be sent in in less than a single buffer.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        preamble = b'PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n'
+        events = []
+
+        for i in range(0, len(preamble), chunk_size):
+            events += c.receive_data(preamble[i:i+chunk_size])
+
+        assert not events
+        assert not c.data_to_send()
+
+    def test_initiate_connection_sends_server_preamble(self, frame_factory):
+        """
+        For server-side connections, initiate_connection sends a server
+        preamble.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        expected_settings = frame_factory.build_settings_frame(
+            c.local_settings
+        )
+        expected_data = expected_settings.serialize()
+
+        events = c.initiate_connection()
+        assert not events
+        assert c.data_to_send() == expected_data
+
+    def test_headers_event(self, frame_factory):
+        """
+        When a headers frame is received a RequestReceived event fires.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+
+        f = frame_factory.build_headers_frame(self.example_request_headers)
+        data = f.serialize()
+        events = c.receive_data(data)
+
+        assert len(events) == 1
+        event = events[0]
+
+        assert isinstance(event, h2.events.RequestReceived)
+        assert event.stream_id == 1
+        assert event.headers == self.example_request_headers
+
+    def test_headers_event_bytes(self, frame_factory):
+        """
+        When a headers frame is received a RequestReceived event fires with
+        bytes headers if the encoding is set appropriately.
+        """
+        config = h2.config.H2Configuration(
+            client_side=False, header_encoding=False
+        )
+        c = h2.connection.H2Connection(config=config)
+        c.receive_data(frame_factory.preamble())
+
+        f = frame_factory.build_headers_frame(self.example_request_headers)
+        data = f.serialize()
+        events = c.receive_data(data)
+
+        assert len(events) == 1
+        event = events[0]
+
+        assert isinstance(event, h2.events.RequestReceived)
+        assert event.stream_id == 1
+        assert event.headers == self.bytes_example_request_headers
+
+    def test_data_event(self, frame_factory):
+        """
+        Test that data received on a stream fires a DataReceived event.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+
+        f1 = frame_factory.build_headers_frame(
+            self.example_request_headers, stream_id=3
+        )
+        f2 = frame_factory.build_data_frame(
+            b'some request data',
+            stream_id=3,
+        )
+        data = b''.join(map(lambda f: f.serialize(), [f1, f2]))
+        events = c.receive_data(data)
+
+        assert len(events) == 2
+        event = events[1]
+
+        assert isinstance(event, h2.events.DataReceived)
+        assert event.stream_id == 3
+        assert event.data == b'some request data'
+        assert event.flow_controlled_length == 17
+
+    def test_data_event_with_padding(self, frame_factory):
+        """
+        Test that data received on a stream fires a DataReceived event that
+        accounts for padding.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+
+        f1 = frame_factory.build_headers_frame(
+            self.example_request_headers, stream_id=3
+        )
+        f2 = frame_factory.build_data_frame(
+            b'some request data',
+            stream_id=3,
+            padding_len=20
+        )
+        data = b''.join(map(lambda f: f.serialize(), [f1, f2]))
+        events = c.receive_data(data)
+
+        assert len(events) == 2
+        event = events[1]
+
+        assert isinstance(event, h2.events.DataReceived)
+        assert event.stream_id == 3
+        assert event.data == b'some request data'
+        assert event.flow_controlled_length == 17 + 20 + 1
+
+    def test_receiving_ping_frame(self, frame_factory):
+        """
+        Ping frames should be immediately ACKed.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+
+        ping_data = b'\x01' * 8
+        sent_frame = frame_factory.build_ping_frame(ping_data)
+        expected_frame = frame_factory.build_ping_frame(
+            ping_data, flags=["ACK"]
+        )
+        expected_data = expected_frame.serialize()
+
+        c.clear_outbound_data_buffer()
+        events = c.receive_data(sent_frame.serialize())
+
+        assert not events
+        assert c.data_to_send() == expected_data
+
+    def test_receiving_settings_frame_event(self, frame_factory):
+        """
+        Settings frames should cause a RemoteSettingsChanged event to fire.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+
+        f = frame_factory.build_settings_frame(
+            settings=helpers.SAMPLE_SETTINGS
+        )
+        events = c.receive_data(f.serialize())
+
+        assert len(events) == 1
+        event = events[0]
+
+        assert isinstance(event, h2.events.RemoteSettingsChanged)
+        assert len(event.changed_settings) == len(helpers.SAMPLE_SETTINGS)
+
+    def test_acknowledging_settings(self, frame_factory):
+        """
+        Acknowledging settings causes appropriate Settings frame to be emitted.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+
+        received_frame = frame_factory.build_settings_frame(
+            settings=helpers.SAMPLE_SETTINGS
+        )
+        expected_frame = frame_factory.build_settings_frame(
+            settings={}, ack=True
+        )
+        expected_data = expected_frame.serialize()
+
+        c.clear_outbound_data_buffer()
+        events = c.receive_data(received_frame.serialize())
+
+        assert len(events) == 1
+        assert c.data_to_send() == expected_data
+
+    def test_close_connection(self, frame_factory):
+        """
+        Closing the connection with no error code emits a GOAWAY frame with
+        error code 0.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+        f = frame_factory.build_goaway_frame(last_stream_id=0)
+        expected_data = f.serialize()
+
+        c.clear_outbound_data_buffer()
+        events = c.close_connection()
+
+        assert not events
+        assert c.data_to_send() == expected_data
+
+    @pytest.mark.parametrize("error_code", h2.errors.ErrorCodes)
+    def test_close_connection_with_error_code(self, frame_factory, error_code):
+        """
+        Closing the connection with an error code emits a GOAWAY frame with
+        that error code.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+        f = frame_factory.build_goaway_frame(
+            error_code=error_code, last_stream_id=0
+        )
+        expected_data = f.serialize()
+
+        c.clear_outbound_data_buffer()
+        events = c.close_connection(error_code)
+
+        assert not events
+        assert c.data_to_send() == expected_data
+
+    @pytest.mark.parametrize("last_stream_id,output", [
+        (None, 23),
+        (0, 0),
+        (42, 42)
+    ])
+    def test_close_connection_with_last_stream_id(self, frame_factory,
+                                                  last_stream_id, output):
+        """
+        Closing the connection with last_stream_id set emits a GOAWAY frame
+        with that value.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+        headers_frame = frame_factory.build_headers_frame(
+            [
+                (':authority', 'example.com'),
+                (':path', '/'),
+                (':scheme', 'https'),
+                (':method', 'GET'),
+            ],
+            stream_id=23)
+        c.receive_data(headers_frame.serialize())
+
+        f = frame_factory.build_goaway_frame(
+            last_stream_id=output
+        )
+        expected_data = f.serialize()
+
+        c.clear_outbound_data_buffer()
+        events = c.close_connection(last_stream_id=last_stream_id)
+
+        assert not events
+        assert c.data_to_send() == expected_data
+
+    @pytest.mark.parametrize("additional_data,output", [
+        (None, b''),
+        (b'', b''),
+        (b'foobar', b'foobar')
+    ])
+    def test_close_connection_with_additional_data(self, frame_factory,
+                                                   additional_data, output):
+        """
+        Closing the connection with additional debug data emits a GOAWAY frame
+        with that data attached.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+        f = frame_factory.build_goaway_frame(
+            last_stream_id=0, additional_data=output
+        )
+        expected_data = f.serialize()
+
+        c.clear_outbound_data_buffer()
+        events = c.close_connection(additional_data=additional_data)
+
+        assert not events
+        assert c.data_to_send() == expected_data
+
+    def test_reset_stream(self, frame_factory):
+        """
+        Resetting a stream with no error code emits a RST_STREAM frame with
+        error code 0.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+        f = frame_factory.build_headers_frame(self.example_request_headers)
+        c.receive_data(f.serialize())
+        c.clear_outbound_data_buffer()
+
+        expected_frame = frame_factory.build_rst_stream_frame(stream_id=1)
+        expected_data = expected_frame.serialize()
+
+        events = c.reset_stream(stream_id=1)
+
+        assert not events
+        assert c.data_to_send() == expected_data
+
+    @pytest.mark.parametrize("error_code", h2.errors.ErrorCodes)
+    def test_reset_stream_with_error_code(self, frame_factory, error_code):
+        """
+        Resetting a stream with an error code emits a RST_STREAM frame with
+        that error code.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+        f = frame_factory.build_headers_frame(
+            self.example_request_headers,
+            stream_id=3
+        )
+        c.receive_data(f.serialize())
+        c.clear_outbound_data_buffer()
+
+        expected_frame = frame_factory.build_rst_stream_frame(
+            stream_id=3, error_code=error_code
+        )
+        expected_data = expected_frame.serialize()
+
+        events = c.reset_stream(stream_id=3, error_code=error_code)
+
+        assert not events
+        assert c.data_to_send() == expected_data
+
+    def test_cannot_reset_nonexistent_stream(self, frame_factory):
+        """
+        Resetting nonexistent streams raises NoSuchStreamError.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+        f = frame_factory.build_headers_frame(
+            self.example_request_headers,
+            stream_id=3
+        )
+        c.receive_data(f.serialize())
+
+        with pytest.raises(h2.exceptions.NoSuchStreamError) as e:
+            c.reset_stream(stream_id=1)
+
+        assert e.value.stream_id == 1
+
+        with pytest.raises(h2.exceptions.NoSuchStreamError) as e:
+            c.reset_stream(stream_id=5)
+
+        assert e.value.stream_id == 5
+
+    def test_basic_sending_ping_frame_logic(self, frame_factory):
+        """
+        Sending ping frames serializes a ping frame on stream 0 with
+        approriate opaque data.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+        c.clear_outbound_data_buffer()
+
+        ping_data = b'\x01\x02\x03\x04\x05\x06\x07\x08'
+
+        expected_frame = frame_factory.build_ping_frame(ping_data)
+        expected_data = expected_frame.serialize()
+
+        events = c.ping(ping_data)
+
+        assert not events
+        assert c.data_to_send() == expected_data
+
+    @pytest.mark.parametrize(
+        'opaque_data',
+        [
+            b'',
+            b'\x01\x02\x03\x04\x05\x06\x07',
+            u'abcdefgh',
+            b'too many bytes',
+        ]
+    )
+    def test_ping_frame_opaque_data_must_be_length_8_bytestring(self,
+                                                                frame_factory,
+                                                                opaque_data):
+        """
+        Sending a ping frame only works with 8-byte bytestrings.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+
+        with pytest.raises(ValueError):
+            c.ping(opaque_data)
+
+    def test_receiving_ping_acknowledgement(self, frame_factory):
+        """
+        Receiving a PING acknolwedgement fires a PingAcknolwedged event.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+
+        ping_data = b'\x01\x02\x03\x04\x05\x06\x07\x08'
+
+        f = frame_factory.build_ping_frame(
+            ping_data, flags=['ACK']
+        )
+        events = c.receive_data(f.serialize())
+
+        assert len(events) == 1
+        event = events[0]
+
+        assert isinstance(event, h2.events.PingAcknowledged)
+        assert event.ping_data == ping_data
+
+    def test_stream_ended_remotely(self, frame_factory):
+        """
+        When the remote stream ends with a non-empty data frame a DataReceived
+        event and a StreamEnded event are fired.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+
+        f1 = frame_factory.build_headers_frame(
+            self.example_request_headers, stream_id=3
+        )
+        f2 = frame_factory.build_data_frame(
+            b'some request data',
+            flags=['END_STREAM'],
+            stream_id=3,
+        )
+        data = b''.join(map(lambda f: f.serialize(), [f1, f2]))
+        events = c.receive_data(data)
+
+        assert len(events) == 3
+        data_event = events[1]
+        stream_ended_event = events[2]
+
+        assert isinstance(data_event, h2.events.DataReceived)
+        assert isinstance(stream_ended_event, h2.events.StreamEnded)
+        stream_ended_event.stream_id == 3
+
+    def test_can_push_stream(self, frame_factory):
+        """
+        Pushing a stream causes a PUSH_PROMISE frame to be emitted.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+        f = frame_factory.build_headers_frame(
+            self.example_request_headers
+        )
+        c.receive_data(f.serialize())
+
+        frame_factory.refresh_encoder()
+        expected_frame = frame_factory.build_push_promise_frame(
+            stream_id=1,
+            promised_stream_id=2,
+            headers=self.example_request_headers,
+            flags=['END_HEADERS'],
+        )
+
+        c.clear_outbound_data_buffer()
+        c.push_stream(
+            stream_id=1,
+            promised_stream_id=2,
+            request_headers=self.example_request_headers
+        )
+
+        assert c.data_to_send() == expected_frame.serialize()
+
+    def test_cannot_push_streams_when_disabled(self, frame_factory):
+        """
+        When the remote peer has disabled stream pushing, we should fail.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+        f = frame_factory.build_settings_frame(
+            {h2.settings.SettingCodes.ENABLE_PUSH: 0}
+        )
+        c.receive_data(f.serialize())
+
+        f = frame_factory.build_headers_frame(
+            self.example_request_headers
+        )
+        c.receive_data(f.serialize())
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.push_stream(
+                stream_id=1,
+                promised_stream_id=2,
+                request_headers=self.example_request_headers
+            )
+
+    def test_settings_remote_change_header_table_size(self, frame_factory):
+        """
+        Acknowledging a remote HEADER_TABLE_SIZE settings change causes us to
+        change the header table size of our encoder.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+
+        assert c.encoder.header_table_size == 4096
+
+        received_frame = frame_factory.build_settings_frame(
+            {h2.settings.SettingCodes.HEADER_TABLE_SIZE: 80}
+        )
+        c.receive_data(received_frame.serialize())[0]
+
+        assert c.encoder.header_table_size == 80
+
+    def test_settings_local_change_header_table_size(self, frame_factory):
+        """
+        The remote peer acknowledging a local HEADER_TABLE_SIZE settings change
+        does not cause us to change the header table size of our decoder.
+
+        For an explanation of why this test is this way around, see issue #37.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+
+        assert c.decoder.header_table_size == 4096
+
+        expected_frame = frame_factory.build_settings_frame({}, ack=True)
+        c.update_settings(
+            {h2.settings.SettingCodes.HEADER_TABLE_SIZE: 80}
+        )
+        c.receive_data(expected_frame.serialize())
+        c.clear_outbound_data_buffer()
+
+        assert c.decoder.header_table_size == 4096
+
+    def test_restricting_outbound_frame_size_by_settings(self, frame_factory):
+        """
+        The remote peer can shrink the maximum outbound frame size using
+        settings.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+
+        f = frame_factory.build_headers_frame(self.example_request_headers)
+        c.receive_data(f.serialize())
+        c.clear_outbound_data_buffer()
+
+        with pytest.raises(h2.exceptions.FrameTooLargeError):
+            c.send_data(1, b'\x01' * 17000)
+
+        received_frame = frame_factory.build_settings_frame(
+            {h2.settings.SettingCodes.MAX_FRAME_SIZE: 17001}
+        )
+        c.receive_data(received_frame.serialize())
+
+        c.send_data(1, b'\x01' * 17000)
+        assert c.data_to_send()
+
+    def test_restricting_inbound_frame_size_by_settings(self, frame_factory):
+        """
+        We throw ProtocolErrors and tear down connections if oversize frames
+        are received.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+        h = frame_factory.build_headers_frame(self.example_request_headers)
+        c.receive_data(h.serialize())
+        c.clear_outbound_data_buffer()
+
+        data_frame = frame_factory.build_data_frame(b'\x01' * 17000)
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.receive_data(data_frame.serialize())
+
+        expected_frame = frame_factory.build_goaway_frame(
+            last_stream_id=1, error_code=h2.errors.ErrorCodes.FRAME_SIZE_ERROR
+        )
+        assert c.data_to_send() == expected_frame.serialize()
+
+    def test_cannot_receive_new_streams_over_limit(self, frame_factory):
+        """
+        When the number of inbound streams exceeds our MAX_CONCURRENT_STREAMS
+        setting, their attempt to open new streams fails.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+        c.update_settings(
+            {h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS: 1}
+        )
+        f = frame_factory.build_settings_frame({}, ack=True)
+        c.receive_data(f.serialize())
+
+        f = frame_factory.build_headers_frame(
+            stream_id=1,
+            headers=self.example_request_headers,
+        )
+        c.receive_data(f.serialize())
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_headers_frame(
+            stream_id=3,
+            headers=self.example_request_headers,
+        )
+        with pytest.raises(h2.exceptions.TooManyStreamsError):
+            c.receive_data(f.serialize())
+
+        expected_frame = frame_factory.build_goaway_frame(
+            last_stream_id=1, error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR,
+        )
+        assert c.data_to_send() == expected_frame.serialize()
+
+    def test_can_receive_trailers(self, frame_factory):
+        """
+        When two HEADERS blocks are received in the same stream from a
+        client, the second set are trailers.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+        f = frame_factory.build_headers_frame(self.example_request_headers)
+        c.receive_data(f.serialize())
+
+        # Send in trailers.
+        trailers = [('content-length', '0')]
+        f = frame_factory.build_headers_frame(
+            trailers,
+            flags=['END_STREAM'],
+        )
+        events = c.receive_data(f.serialize())
+        assert len(events) == 2
+
+        event = events[0]
+        assert isinstance(event, h2.events.TrailersReceived)
+        assert event.headers == trailers
+        assert event.stream_id == 1
+
+    def test_reject_trailers_not_ending_stream(self, frame_factory):
+        """
+        When trailers are received without the END_STREAM flag being present,
+        this is a ProtocolError.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+        f = frame_factory.build_headers_frame(self.example_request_headers)
+        c.receive_data(f.serialize())
+
+        # Send in trailers.
+        c.clear_outbound_data_buffer()
+        trailers = [('content-length', '0')]
+        f = frame_factory.build_headers_frame(
+            trailers,
+            flags=[],
+        )
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.receive_data(f.serialize())
+
+        expected_frame = frame_factory.build_goaway_frame(
+            last_stream_id=1, error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR,
+        )
+        assert c.data_to_send() == expected_frame.serialize()
+
+    def test_can_send_trailers(self, frame_factory):
+        """
+        When a second set of headers are sent, they are properly trailers.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+        f = frame_factory.build_headers_frame(self.example_request_headers)
+        c.receive_data(f.serialize())
+
+        # Send headers.
+        c.clear_outbound_data_buffer()
+        c.send_headers(1, self.example_response_headers)
+
+        # Now send trailers.
+        trailers = [('content-length', '0')]
+        c.send_headers(1, trailers, end_stream=True)
+
+        frame_factory.refresh_encoder()
+        f1 = frame_factory.build_headers_frame(
+            self.example_response_headers,
+        )
+        f2 = frame_factory.build_headers_frame(
+            trailers,
+            flags=['END_STREAM'],
+        )
+        assert c.data_to_send() == f1.serialize() + f2.serialize()
+
+    def test_trailers_must_have_end_stream(self, frame_factory):
+        """
+        A set of trailers must carry the END_STREAM flag.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+        f = frame_factory.build_headers_frame(self.example_request_headers)
+        c.receive_data(f.serialize())
+
+        # Send headers.
+        c.send_headers(1, self.example_response_headers)
+
+        # Now send trailers.
+        trailers = [('content-length', '0')]
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.send_headers(1, trailers)
+
+    @pytest.mark.parametrize("frame_id", range(12, 256))
+    def test_unknown_frames_are_ignored(self, frame_factory, frame_id):
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_data_frame(data=b'abcdefghtdst')
+        f.type = frame_id
+
+        events = c.receive_data(f.serialize())
+        assert not c.data_to_send()
+        assert len(events) == 1
+        assert isinstance(events[0], h2.events.UnknownFrameReceived)
+        assert isinstance(events[0].frame, hyperframe.frame.ExtensionFrame)
+
+    def test_can_send_goaway_repeatedly(self, frame_factory):
+        """
+        We can send a GOAWAY frame as many times as we like.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+        c.clear_outbound_data_buffer()
+
+        c.close_connection()
+        c.close_connection()
+        c.close_connection()
+
+        f = frame_factory.build_goaway_frame(last_stream_id=0)
+
+        assert c.data_to_send() == (f.serialize() * 3)
+
+    def test_receiving_goaway_frame(self, frame_factory):
+        """
+        Receiving a GOAWAY frame causes a ConnectionTerminated event to be
+        fired and transitions the connection to the CLOSED state, and clears
+        the outbound data buffer.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+
+        f = frame_factory.build_goaway_frame(
+            last_stream_id=5, error_code=h2.errors.ErrorCodes.SETTINGS_TIMEOUT
+        )
+        events = c.receive_data(f.serialize())
+
+        assert len(events) == 1
+        event = events[0]
+
+        assert isinstance(event, h2.events.ConnectionTerminated)
+        assert event.error_code == h2.errors.ErrorCodes.SETTINGS_TIMEOUT
+        assert isinstance(event.error_code, h2.errors.ErrorCodes)
+        assert event.last_stream_id == 5
+        assert event.additional_data is None
+        assert c.state_machine.state == h2.connection.ConnectionState.CLOSED
+
+        assert not c.data_to_send()
+
+    def test_receiving_multiple_goaway_frames(self, frame_factory):
+        """
+        Multiple GOAWAY frames can be received at once, and are allowed. Each
+        one fires a ConnectionTerminated event.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_goaway_frame(last_stream_id=0)
+        events = c.receive_data(f.serialize() * 3)
+
+        assert len(events) == 3
+        assert all(
+            isinstance(event, h2.events.ConnectionTerminated)
+            for event in events
+        )
+
+    def test_receiving_goaway_frame_with_additional_data(self, frame_factory):
+        """
+        GOAWAY frame can contain additional data,
+        it should be available via ConnectionTerminated event.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+
+        additional_data = b'debug data'
+        f = frame_factory.build_goaway_frame(last_stream_id=0,
+                                             additional_data=additional_data)
+        events = c.receive_data(f.serialize())
+
+        assert len(events) == 1
+        event = events[0]
+
+        assert isinstance(event, h2.events.ConnectionTerminated)
+        assert event.additional_data == additional_data
+
+    def test_receiving_goaway_frame_with_unknown_error(self, frame_factory):
+        """
+        Receiving a GOAWAY frame with an unknown error code behaves exactly the
+        same as receiving one we know about, but the code is reported as an
+        integer instead of as an ErrorCodes.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+
+        f = frame_factory.build_goaway_frame(
+            last_stream_id=5, error_code=0xFA
+        )
+        events = c.receive_data(f.serialize())
+
+        assert len(events) == 1
+        event = events[0]
+
+        assert isinstance(event, h2.events.ConnectionTerminated)
+        assert event.error_code == 250
+        assert not isinstance(event.error_code, h2.errors.ErrorCodes)
+        assert event.last_stream_id == 5
+        assert event.additional_data is None
+        assert c.state_machine.state == h2.connection.ConnectionState.CLOSED
+
+        assert not c.data_to_send()
+
+    def test_cookies_are_joined(self, frame_factory):
+        """
+        RFC 7540 Section 8.1.2.5 requires that we join multiple Cookie headers
+        in a header block together.
+        """
+        # This is a moderately varied set of cookie headers: some combined,
+        # some split.
+        cookie_headers = [
+            ('cookie',
+                'username=John Doe; expires=Thu, 18 Dec 2013 12:00:00 UTC'),
+            ('cookie', 'path=1'),
+            ('cookie', 'test1=val1; test2=val2')
+        ]
+        expected = (
+            'username=John Doe; expires=Thu, 18 Dec 2013 12:00:00 UTC; '
+            'path=1; test1=val1; test2=val2'
+        )
+
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+
+        f = frame_factory.build_headers_frame(
+            self.example_request_headers + cookie_headers
+        )
+        events = c.receive_data(f.serialize())
+
+        assert len(events) == 1
+        e = events[0]
+
+        cookie_fields = [(n, v) for n, v in e.headers if n == 'cookie']
+        assert len(cookie_fields) == 1
+
+        _, v = cookie_fields[0]
+        assert v == expected
+
+    def test_cookies_arent_joined_without_normalization(self, frame_factory):
+        """
+        If inbound header normalization is disabled, cookie headers aren't
+        joined.
+        """
+        # This is a moderately varied set of cookie headers: some combined,
+        # some split.
+        cookie_headers = [
+            ('cookie',
+                'username=John Doe; expires=Thu, 18 Dec 2013 12:00:00 UTC'),
+            ('cookie', 'path=1'),
+            ('cookie', 'test1=val1; test2=val2')
+        ]
+
+        config = h2.config.H2Configuration(
+            client_side=False,
+            normalize_inbound_headers=False,
+            header_encoding='utf-8'
+        )
+        c = h2.connection.H2Connection(config=config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+
+        f = frame_factory.build_headers_frame(
+            self.example_request_headers + cookie_headers
+        )
+        events = c.receive_data(f.serialize())
+
+        assert len(events) == 1
+        e = events[0]
+
+        received_cookies = [(n, v) for n, v in e.headers if n == 'cookie']
+        assert len(received_cookies) == 3
+        assert cookie_headers == received_cookies
+
+    def test_stream_repr(self):
+        """
+        Ensure stream string representation is appropriate.
+        """
+        s = h2.stream.H2Stream(4, None, 12, 14)
+        assert repr(s) == "<H2Stream id:4 state:<StreamState.IDLE: 0>>"
+
+
+def sanity_check_data_frame(data_frame,
+                            expected_flow_controlled_length,
+                            expect_padded_flag,
+                            expected_data_frame_pad_length):
+    """
+    ``data_frame`` is a frame of type ``hyperframe.frame.DataFrame``,
+    and the ``flags`` and ``flow_controlled_length`` of ``data_frame``
+    match expectations.
+    """
+
+    assert isinstance(data_frame, hyperframe.frame.DataFrame)
+
+    assert data_frame.flow_controlled_length == expected_flow_controlled_length
+
+    if expect_padded_flag:
+        assert 'PADDED' in data_frame.flags
+    else:
+        assert 'PADDED' not in data_frame.flags
+
+    assert data_frame.pad_length == expected_data_frame_pad_length
diff --git a/tools/third_party/h2/test/test_closed_streams.py b/tools/third_party/h2/test/test_closed_streams.py
new file mode 100755
index 0000000..2344f72
--- /dev/null
+++ b/tools/third_party/h2/test/test_closed_streams.py
@@ -0,0 +1,388 @@
+# -*- coding: utf-8 -*-
+"""
+test_closed_streams
+~~~~~~~~~~~~~~~~~~~
+
+Tests that we handle closed streams correctly.
+"""
+import pytest
+
+import h2.config
+import h2.connection
+import h2.errors
+import h2.events
+import h2.exceptions
+
+
+class TestClosedStreams(object):
+    example_request_headers = [
+        (':authority', 'example.com'),
+        (':path', '/'),
+        (':scheme', 'https'),
+        (':method', 'GET'),
+    ]
+    example_response_headers = [
+        (':status', '200'),
+        ('server', 'fake-serv/0.1.0')
+    ]
+    server_config = h2.config.H2Configuration(client_side=False)
+
+    def test_can_receive_multiple_rst_stream_frames(self, frame_factory):
+        """
+        Multiple RST_STREAM frames can be received, either at once or well
+        after one another. Only the first fires an event.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(1, self.example_request_headers, end_stream=True)
+
+        f = frame_factory.build_rst_stream_frame(stream_id=1)
+        events = c.receive_data(f.serialize() * 3)
+
+        # Force an iteration over all the streams to remove them.
+        c.open_outbound_streams
+
+        # Receive more data.
+        events += c.receive_data(f.serialize() * 3)
+
+        assert len(events) == 1
+        event = events[0]
+
+        assert isinstance(event, h2.events.StreamReset)
+
+    def test_receiving_low_stream_id_causes_goaway(self, frame_factory):
+        """
+        The remote peer creating a stream with a lower ID than one we've seen
+        causes a GOAWAY frame.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+        c.initiate_connection()
+
+        f = frame_factory.build_headers_frame(
+            self.example_request_headers,
+            stream_id=3,
+        )
+        c.receive_data(f.serialize())
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_headers_frame(
+            self.example_request_headers,
+            stream_id=1,
+        )
+
+        with pytest.raises(h2.exceptions.StreamIDTooLowError) as e:
+            c.receive_data(f.serialize())
+
+        assert e.value.stream_id == 1
+        assert e.value.max_stream_id == 3
+
+        f = frame_factory.build_goaway_frame(
+            last_stream_id=3,
+            error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR,
+        )
+        assert c.data_to_send() == f.serialize()
+
+    def test_closed_stream_not_present_in_streams_dict(self, frame_factory):
+        """
+        When streams have been closed, they get removed from the streams
+        dictionary the next time we count the open streams.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+        c.initiate_connection()
+
+        f = frame_factory.build_headers_frame(self.example_request_headers)
+        c.receive_data(f.serialize())
+        c.push_stream(1, 2, self.example_request_headers)
+        c.reset_stream(1)
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_rst_stream_frame(stream_id=2)
+        c.receive_data(f.serialize())
+
+        # Force a count of the streams.
+        assert not c.open_outbound_streams
+
+        # The streams dictionary should be empty.
+        assert not c.streams
+
+
+class TestStreamsClosedByEndStream(object):
+    example_request_headers = [
+        (':authority', 'example.com'),
+        (':path', '/'),
+        (':scheme', 'https'),
+        (':method', 'GET'),
+    ]
+    example_response_headers = [
+        (':status', '200'),
+        ('server', 'fake-serv/0.1.0')
+    ]
+    server_config = h2.config.H2Configuration(client_side=False)
+
+    @pytest.mark.parametrize(
+        "frame",
+        [
+            lambda self, ff: ff.build_data_frame(b'hello'),
+            lambda self, ff: ff.build_headers_frame(
+                self.example_request_headers, flags=['END_STREAM']),
+            lambda self, ff: ff.build_headers_frame(
+                self.example_request_headers),
+        ]
+    )
+    @pytest.mark.parametrize("clear_streams", [True, False])
+    def test_frames_after_recv_end_will_error(self,
+                                              frame_factory,
+                                              frame,
+                                              clear_streams):
+        """
+        A stream that is closed by receiving END_STREAM raises
+        ProtocolError when it receives an unexpected frame.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+        c.initiate_connection()
+
+        f = frame_factory.build_headers_frame(
+            self.example_request_headers, flags=['END_STREAM']
+        )
+        c.receive_data(f.serialize())
+        c.send_headers(
+            stream_id=1,
+            headers=self.example_response_headers,
+            end_stream=True
+        )
+
+        if clear_streams:
+            # Call open_inbound_streams to force the connection to clean
+            # closed streams.
+            c.open_inbound_streams
+
+        c.clear_outbound_data_buffer()
+
+        f = frame(self, frame_factory)
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.receive_data(f.serialize())
+
+        f = frame_factory.build_goaway_frame(
+            last_stream_id=1,
+            error_code=h2.errors.ErrorCodes.STREAM_CLOSED,
+        )
+        assert c.data_to_send() == f.serialize()
+
+    @pytest.mark.parametrize(
+        "frame",
+        [
+            lambda self, ff: ff.build_data_frame(b'hello'),
+            lambda self, ff: ff.build_headers_frame(
+                self.example_response_headers, flags=['END_STREAM']),
+            lambda self, ff: ff.build_headers_frame(
+                self.example_response_headers),
+        ]
+    )
+    @pytest.mark.parametrize("clear_streams", [True, False])
+    def test_frames_after_send_end_will_error(self,
+                                              frame_factory,
+                                              frame,
+                                              clear_streams):
+        """
+        A stream that is closed by sending END_STREAM raises
+        ProtocolError when it receives an unexpected frame.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(stream_id=1, headers=self.example_request_headers,
+                       end_stream=True)
+
+        f = frame_factory.build_headers_frame(
+            self.example_response_headers, flags=['END_STREAM']
+        )
+        c.receive_data(f.serialize())
+
+        if clear_streams:
+            # Call open_outbound_streams to force the connection to clean
+            # closed streams.
+            c.open_outbound_streams
+
+        c.clear_outbound_data_buffer()
+
+        f = frame(self, frame_factory)
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.receive_data(f.serialize())
+
+        f = frame_factory.build_goaway_frame(
+            last_stream_id=0,
+            error_code=h2.errors.ErrorCodes.STREAM_CLOSED,
+        )
+        assert c.data_to_send() == f.serialize()
+
+    @pytest.mark.parametrize(
+        "frame",
+        [
+            lambda self, ff: ff.build_window_update_frame(1, 1),
+            lambda self, ff: ff.build_rst_stream_frame(1)
+        ]
+    )
+    def test_frames_after_send_end_will_be_ignored(self,
+                                                   frame_factory,
+                                                   frame):
+        """
+        A stream that is closed by sending END_STREAM will raise
+        ProtocolError when received unexpected frame.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+        c.initiate_connection()
+
+        f = frame_factory.build_headers_frame(
+            self.example_request_headers, flags=['END_STREAM']
+        )
+        c.receive_data(f.serialize())
+        c.send_headers(
+            stream_id=1,
+            headers=self.example_response_headers,
+            end_stream=True
+        )
+
+        c.clear_outbound_data_buffer()
+
+        f = frame(self, frame_factory)
+        events = c.receive_data(f.serialize())
+
+        assert not events
+
+
+class TestStreamsClosedByRstStream(object):
+    example_request_headers = [
+        (':authority', 'example.com'),
+        (':path', '/'),
+        (':scheme', 'https'),
+        (':method', 'GET'),
+    ]
+    example_response_headers = [
+        (':status', '200'),
+        ('server', 'fake-serv/0.1.0')
+    ]
+    server_config = h2.config.H2Configuration(client_side=False)
+
+    @pytest.mark.parametrize(
+        "frame",
+        [
+            lambda self, ff: ff.build_headers_frame(
+                self.example_request_headers),
+            lambda self, ff: ff.build_headers_frame(
+                self.example_request_headers, flags=['END_STREAM']),
+            lambda self, ff: ff.build_data_frame(b'hello'),
+            lambda self, ff: ff.build_window_update_frame(1, 1),
+        ]
+    )
+    def test_resets_further_frames_after_recv_reset(self,
+                                                    frame_factory,
+                                                    frame):
+        """
+        A stream that is closed by receive RST_STREAM can receive further
+        frames: it simply sends RST_STREAM for it.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+        c.initiate_connection()
+
+        header_frame = frame_factory.build_headers_frame(
+            self.example_request_headers, flags=['END_STREAM']
+        )
+        c.receive_data(header_frame.serialize())
+
+        c.send_headers(
+            stream_id=1,
+            headers=self.example_response_headers,
+            end_stream=False
+        )
+
+        rst_frame = frame_factory.build_rst_stream_frame(
+            1, h2.errors.ErrorCodes.STREAM_CLOSED
+        )
+        c.receive_data(rst_frame.serialize())
+        c.clear_outbound_data_buffer()
+
+        f = frame(self, frame_factory)
+        events = c.receive_data(f.serialize())
+
+        rst_frame = frame_factory.build_rst_stream_frame(
+            1, h2.errors.ErrorCodes.STREAM_CLOSED
+        )
+        assert not events
+        assert c.data_to_send() == rst_frame.serialize()
+
+        events = c.receive_data(f.serialize() * 3)
+        assert not events
+        assert c.data_to_send() == rst_frame.serialize() * 3
+
+        # Iterate over the streams to make sure it's gone, then confirm the
+        # behaviour is unchanged.
+        c.open_outbound_streams
+
+        events = c.receive_data(f.serialize() * 3)
+        assert not events
+        assert c.data_to_send() == rst_frame.serialize() * 3
+
+    @pytest.mark.parametrize(
+        "frame",
+        [
+            lambda self, ff: ff.build_headers_frame(
+                self.example_request_headers),
+            lambda self, ff: ff.build_headers_frame(
+                self.example_request_headers, flags=['END_STREAM']),
+            lambda self, ff: ff.build_data_frame(b'hello'),
+            lambda self, ff: ff.build_window_update_frame(1, 1),
+        ]
+    )
+    def test_resets_further_frames_after_send_reset(self,
+                                                    frame_factory,
+                                                    frame):
+        """
+        A stream that is closed by sent RST_STREAM can receive further frames:
+        it simply sends RST_STREAM for it.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+        c.initiate_connection()
+
+        header_frame = frame_factory.build_headers_frame(
+            self.example_request_headers, flags=['END_STREAM']
+        )
+        c.receive_data(header_frame.serialize())
+
+        c.send_headers(
+            stream_id=1,
+            headers=self.example_response_headers,
+            end_stream=False
+        )
+
+        c.reset_stream(1, h2.errors.ErrorCodes.INTERNAL_ERROR)
+
+        rst_frame = frame_factory.build_rst_stream_frame(
+            1, h2.errors.ErrorCodes.STREAM_CLOSED
+        )
+        c.clear_outbound_data_buffer()
+
+        f = frame(self, frame_factory)
+        events = c.receive_data(f.serialize())
+
+        rst_frame = frame_factory.build_rst_stream_frame(
+            1, h2.errors.ErrorCodes.STREAM_CLOSED
+        )
+        assert not events
+        assert c.data_to_send() == rst_frame.serialize()
+
+        events = c.receive_data(f.serialize() * 3)
+        assert not events
+        assert c.data_to_send() == rst_frame.serialize() * 3
+
+        # Iterate over the streams to make sure it's gone, then confirm the
+        # behaviour is unchanged.
+        c.open_outbound_streams
+
+        events = c.receive_data(f.serialize() * 3)
+        assert not events
+        assert c.data_to_send() == rst_frame.serialize() * 3
diff --git a/tools/third_party/h2/test/test_complex_logic.py b/tools/third_party/h2/test/test_complex_logic.py
new file mode 100755
index 0000000..ff90bb8
--- /dev/null
+++ b/tools/third_party/h2/test/test_complex_logic.py
@@ -0,0 +1,586 @@
+# -*- coding: utf-8 -*-
+"""
+test_complex_logic
+~~~~~~~~~~~~~~~~
+
+More complex tests that try to do more.
+
+Certain tests don't really eliminate incorrect behaviour unless they do quite
+a bit. These tests should live here, to keep the pain in once place rather than
+hide it in the other parts of the test suite.
+"""
+import pytest
+
+import h2
+import h2.config
+import h2.connection
+
+
+class TestComplexClient(object):
+    """
+    Complex tests for client-side stacks.
+    """
+    example_request_headers = [
+        (':authority', 'example.com'),
+        (':path', '/'),
+        (':scheme', 'https'),
+        (':method', 'GET'),
+    ]
+    example_response_headers = [
+        (':status', '200'),
+        ('server', 'fake-serv/0.1.0')
+    ]
+
+    def test_correctly_count_server_streams(self, frame_factory):
+        """
+        We correctly count the number of server streams, both inbound and
+        outbound.
+        """
+        # This test makes no sense unless you do both inbound and outbound,
+        # because it's important to confirm that we count them correctly.
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        expected_inbound_streams = expected_outbound_streams = 0
+
+        assert c.open_inbound_streams == expected_inbound_streams
+        assert c.open_outbound_streams == expected_outbound_streams
+
+        for stream_id in range(1, 15, 2):
+            # Open an outbound stream
+            c.send_headers(stream_id, self.example_request_headers)
+            expected_outbound_streams += 1
+            assert c.open_inbound_streams == expected_inbound_streams
+            assert c.open_outbound_streams == expected_outbound_streams
+
+            # Receive a pushed stream (to create an inbound one). This doesn't
+            # open until we also receive headers.
+            f = frame_factory.build_push_promise_frame(
+                stream_id=stream_id,
+                promised_stream_id=stream_id+1,
+                headers=self.example_request_headers,
+            )
+            c.receive_data(f.serialize())
+            assert c.open_inbound_streams == expected_inbound_streams
+            assert c.open_outbound_streams == expected_outbound_streams
+
+            f = frame_factory.build_headers_frame(
+                stream_id=stream_id+1,
+                headers=self.example_response_headers,
+            )
+            c.receive_data(f.serialize())
+            expected_inbound_streams += 1
+            assert c.open_inbound_streams == expected_inbound_streams
+            assert c.open_outbound_streams == expected_outbound_streams
+
+        for stream_id in range(13, 0, -2):
+            # Close an outbound stream.
+            c.end_stream(stream_id)
+
+            # Stream doesn't close until both sides close it.
+            assert c.open_inbound_streams == expected_inbound_streams
+            assert c.open_outbound_streams == expected_outbound_streams
+
+            f = frame_factory.build_headers_frame(
+                stream_id=stream_id,
+                headers=self.example_response_headers,
+                flags=['END_STREAM'],
+            )
+            c.receive_data(f.serialize())
+            expected_outbound_streams -= 1
+            assert c.open_inbound_streams == expected_inbound_streams
+            assert c.open_outbound_streams == expected_outbound_streams
+
+            # Pushed streams can only be closed remotely.
+            f = frame_factory.build_data_frame(
+                stream_id=stream_id+1,
+                data=b'the content',
+                flags=['END_STREAM'],
+            )
+            c.receive_data(f.serialize())
+            expected_inbound_streams -= 1
+            assert c.open_inbound_streams == expected_inbound_streams
+            assert c.open_outbound_streams == expected_outbound_streams
+
+        assert c.open_inbound_streams == 0
+        assert c.open_outbound_streams == 0
+
+
+class TestComplexServer(object):
+    """
+    Complex tests for server-side stacks.
+    """
+    example_request_headers = [
+        (b':authority', b'example.com'),
+        (b':path', b'/'),
+        (b':scheme', b'https'),
+        (b':method', b'GET'),
+    ]
+    example_response_headers = [
+        (b':status', b'200'),
+        (b'server', b'fake-serv/0.1.0')
+    ]
+    server_config = h2.config.H2Configuration(client_side=False)
+
+    def test_correctly_count_server_streams(self, frame_factory):
+        """
+        We correctly count the number of server streams, both inbound and
+        outbound.
+        """
+        # This test makes no sense unless you do both inbound and outbound,
+        # because it's important to confirm that we count them correctly.
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+        expected_inbound_streams = expected_outbound_streams = 0
+
+        assert c.open_inbound_streams == expected_inbound_streams
+        assert c.open_outbound_streams == expected_outbound_streams
+
+        for stream_id in range(1, 15, 2):
+            # Receive an inbound stream.
+            f = frame_factory.build_headers_frame(
+                headers=self.example_request_headers,
+                stream_id=stream_id,
+            )
+            c.receive_data(f.serialize())
+            expected_inbound_streams += 1
+            assert c.open_inbound_streams == expected_inbound_streams
+            assert c.open_outbound_streams == expected_outbound_streams
+
+            # Push a stream (to create a outbound one). This doesn't open
+            # until we send our response headers.
+            c.push_stream(stream_id, stream_id+1, self.example_request_headers)
+            assert c.open_inbound_streams == expected_inbound_streams
+            assert c.open_outbound_streams == expected_outbound_streams
+
+            c.send_headers(stream_id+1, self.example_response_headers)
+            expected_outbound_streams += 1
+            assert c.open_inbound_streams == expected_inbound_streams
+            assert c.open_outbound_streams == expected_outbound_streams
+
+        for stream_id in range(13, 0, -2):
+            # Close an inbound stream.
+            f = frame_factory.build_data_frame(
+                data=b'',
+                flags=['END_STREAM'],
+                stream_id=stream_id,
+            )
+            c.receive_data(f.serialize())
+
+            # Stream doesn't close until both sides close it.
+            assert c.open_inbound_streams == expected_inbound_streams
+            assert c.open_outbound_streams == expected_outbound_streams
+
+            c.send_data(stream_id, b'', end_stream=True)
+            expected_inbound_streams -= 1
+            assert c.open_inbound_streams == expected_inbound_streams
+            assert c.open_outbound_streams == expected_outbound_streams
+
+            # Pushed streams, however, we can close ourselves.
+            c.send_data(
+                stream_id=stream_id+1,
+                data=b'',
+                end_stream=True,
+            )
+            expected_outbound_streams -= 1
+            assert c.open_inbound_streams == expected_inbound_streams
+            assert c.open_outbound_streams == expected_outbound_streams
+
+        assert c.open_inbound_streams == 0
+        assert c.open_outbound_streams == 0
+
+
+class TestContinuationFrames(object):
+    """
+    Tests for the relatively complex CONTINUATION frame logic.
+    """
+    example_request_headers = [
+        (b':authority', b'example.com'),
+        (b':path', b'/'),
+        (b':scheme', b'https'),
+        (b':method', b'GET'),
+    ]
+    server_config = h2.config.H2Configuration(client_side=False)
+
+    def _build_continuation_sequence(self, headers, block_size, frame_factory):
+        f = frame_factory.build_headers_frame(headers)
+        header_data = f.data
+        chunks = [
+            header_data[x:x+block_size]
+            for x in range(0, len(header_data), block_size)
+        ]
+        f.data = chunks.pop(0)
+        frames = [
+            frame_factory.build_continuation_frame(c) for c in chunks
+        ]
+        f.flags = {'END_STREAM'}
+        frames[-1].flags.add('END_HEADERS')
+        frames.insert(0, f)
+        return frames
+
+    def test_continuation_frame_basic(self, frame_factory):
+        """
+        Test that we correctly decode a header block split across continuation
+        frames.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+
+        frames = self._build_continuation_sequence(
+            headers=self.example_request_headers,
+            block_size=5,
+            frame_factory=frame_factory,
+        )
+        data = b''.join(f.serialize() for f in frames)
+        events = c.receive_data(data)
+
+        assert len(events) == 2
+        first_event, second_event = events
+
+        assert isinstance(first_event, h2.events.RequestReceived)
+        assert first_event.headers == self.example_request_headers
+        assert first_event.stream_id == 1
+
+        assert isinstance(second_event, h2.events.StreamEnded)
+        assert second_event.stream_id == 1
+
+    @pytest.mark.parametrize('stream_id', [3, 1])
+    def test_continuation_cannot_interleave_headers(self,
+                                                    frame_factory,
+                                                    stream_id):
+        """
+        We cannot interleave a new headers block with a CONTINUATION sequence.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+        c.clear_outbound_data_buffer()
+
+        frames = self._build_continuation_sequence(
+            headers=self.example_request_headers,
+            block_size=5,
+            frame_factory=frame_factory,
+        )
+        assert len(frames) > 2  # This is mostly defensive.
+
+        bogus_frame = frame_factory.build_headers_frame(
+            headers=self.example_request_headers,
+            stream_id=stream_id,
+            flags=['END_STREAM'],
+        )
+        frames.insert(len(frames) - 2, bogus_frame)
+        data = b''.join(f.serialize() for f in frames)
+
+        with pytest.raises(h2.exceptions.ProtocolError) as e:
+            c.receive_data(data)
+
+        assert "invalid frame" in str(e.value).lower()
+
+    def test_continuation_cannot_interleave_data(self, frame_factory):
+        """
+        We cannot interleave a data frame with a CONTINUATION sequence.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+        c.clear_outbound_data_buffer()
+
+        frames = self._build_continuation_sequence(
+            headers=self.example_request_headers,
+            block_size=5,
+            frame_factory=frame_factory,
+        )
+        assert len(frames) > 2  # This is mostly defensive.
+
+        bogus_frame = frame_factory.build_data_frame(
+            data=b'hello',
+            stream_id=1,
+        )
+        frames.insert(len(frames) - 2, bogus_frame)
+        data = b''.join(f.serialize() for f in frames)
+
+        with pytest.raises(h2.exceptions.ProtocolError) as e:
+            c.receive_data(data)
+
+        assert "invalid frame" in str(e.value).lower()
+
+    def test_continuation_cannot_interleave_unknown_frame(self, frame_factory):
+        """
+        We cannot interleave an unknown frame with a CONTINUATION sequence.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+        c.clear_outbound_data_buffer()
+
+        frames = self._build_continuation_sequence(
+            headers=self.example_request_headers,
+            block_size=5,
+            frame_factory=frame_factory,
+        )
+        assert len(frames) > 2  # This is mostly defensive.
+
+        bogus_frame = frame_factory.build_data_frame(
+            data=b'hello',
+            stream_id=1,
+        )
+        bogus_frame.type = 88
+        frames.insert(len(frames) - 2, bogus_frame)
+        data = b''.join(f.serialize() for f in frames)
+
+        with pytest.raises(h2.exceptions.ProtocolError) as e:
+            c.receive_data(data)
+
+        assert "invalid frame" in str(e.value).lower()
+
+    def test_continuation_frame_multiple_blocks(self, frame_factory):
+        """
+        Test that we correctly decode several header blocks split across
+        continuation frames.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+
+        for stream_id in range(1, 7, 2):
+            frames = self._build_continuation_sequence(
+                headers=self.example_request_headers,
+                block_size=2,
+                frame_factory=frame_factory,
+            )
+            for frame in frames:
+                frame.stream_id = stream_id
+
+            data = b''.join(f.serialize() for f in frames)
+            events = c.receive_data(data)
+
+            assert len(events) == 2
+            first_event, second_event = events
+
+            assert isinstance(first_event, h2.events.RequestReceived)
+            assert first_event.headers == self.example_request_headers
+            assert first_event.stream_id == stream_id
+
+            assert isinstance(second_event, h2.events.StreamEnded)
+            assert second_event.stream_id == stream_id
+
+
+class TestContinuationFramesPushPromise(object):
+    """
+    Tests for the relatively complex CONTINUATION frame logic working with
+    PUSH_PROMISE frames.
+    """
+    example_request_headers = [
+        (b':authority', b'example.com'),
+        (b':path', b'/'),
+        (b':scheme', b'https'),
+        (b':method', b'GET'),
+    ]
+    example_response_headers = [
+        (b':status', b'200'),
+        (b'server', b'fake-serv/0.1.0')
+    ]
+
+    def _build_continuation_sequence(self, headers, block_size, frame_factory):
+        f = frame_factory.build_push_promise_frame(
+            stream_id=1, promised_stream_id=2, headers=headers
+        )
+        header_data = f.data
+        chunks = [
+            header_data[x:x+block_size]
+            for x in range(0, len(header_data), block_size)
+        ]
+        f.data = chunks.pop(0)
+        frames = [
+            frame_factory.build_continuation_frame(c) for c in chunks
+        ]
+        f.flags = {'END_STREAM'}
+        frames[-1].flags.add('END_HEADERS')
+        frames.insert(0, f)
+        return frames
+
+    def test_continuation_frame_basic_push_promise(self, frame_factory):
+        """
+        Test that we correctly decode a header block split across continuation
+        frames when that header block is initiated with a PUSH_PROMISE.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(stream_id=1, headers=self.example_request_headers)
+
+        frames = self._build_continuation_sequence(
+            headers=self.example_request_headers,
+            block_size=5,
+            frame_factory=frame_factory,
+        )
+        data = b''.join(f.serialize() for f in frames)
+        events = c.receive_data(data)
+
+        assert len(events) == 1
+        event = events[0]
+
+        assert isinstance(event, h2.events.PushedStreamReceived)
+        assert event.headers == self.example_request_headers
+        assert event.parent_stream_id == 1
+        assert event.pushed_stream_id == 2
+
+    @pytest.mark.parametrize('stream_id', [3, 1, 2])
+    def test_continuation_cannot_interleave_headers_pp(self,
+                                                       frame_factory,
+                                                       stream_id):
+        """
+        We cannot interleave a new headers block with a CONTINUATION sequence
+        when the headers block is based on a PUSH_PROMISE frame.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(stream_id=1, headers=self.example_request_headers)
+
+        frames = self._build_continuation_sequence(
+            headers=self.example_request_headers,
+            block_size=5,
+            frame_factory=frame_factory,
+        )
+        assert len(frames) > 2  # This is mostly defensive.
+
+        bogus_frame = frame_factory.build_headers_frame(
+            headers=self.example_response_headers,
+            stream_id=stream_id,
+            flags=['END_STREAM'],
+        )
+        frames.insert(len(frames) - 2, bogus_frame)
+        data = b''.join(f.serialize() for f in frames)
+
+        with pytest.raises(h2.exceptions.ProtocolError) as e:
+            c.receive_data(data)
+
+        assert "invalid frame" in str(e.value).lower()
+
+    def test_continuation_cannot_interleave_data(self, frame_factory):
+        """
+        We cannot interleave a data frame with a CONTINUATION sequence when
+        that sequence began with a PUSH_PROMISE frame.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(stream_id=1, headers=self.example_request_headers)
+
+        frames = self._build_continuation_sequence(
+            headers=self.example_request_headers,
+            block_size=5,
+            frame_factory=frame_factory,
+        )
+        assert len(frames) > 2  # This is mostly defensive.
+
+        bogus_frame = frame_factory.build_data_frame(
+            data=b'hello',
+            stream_id=1,
+        )
+        frames.insert(len(frames) - 2, bogus_frame)
+        data = b''.join(f.serialize() for f in frames)
+
+        with pytest.raises(h2.exceptions.ProtocolError) as e:
+            c.receive_data(data)
+
+        assert "invalid frame" in str(e.value).lower()
+
+    def test_continuation_cannot_interleave_unknown_frame(self, frame_factory):
+        """
+        We cannot interleave an unknown frame with a CONTINUATION sequence when
+        that sequence began with a PUSH_PROMISE frame.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(stream_id=1, headers=self.example_request_headers)
+
+        frames = self._build_continuation_sequence(
+            headers=self.example_request_headers,
+            block_size=5,
+            frame_factory=frame_factory,
+        )
+        assert len(frames) > 2  # This is mostly defensive.
+
+        bogus_frame = frame_factory.build_data_frame(
+            data=b'hello',
+            stream_id=1,
+        )
+        bogus_frame.type = 88
+        frames.insert(len(frames) - 2, bogus_frame)
+        data = b''.join(f.serialize() for f in frames)
+
+        with pytest.raises(h2.exceptions.ProtocolError) as e:
+            c.receive_data(data)
+
+        assert "invalid frame" in str(e.value).lower()
+
+    @pytest.mark.parametrize('evict', [True, False])
+    def test_stream_remotely_closed_disallows_push_promise(self,
+                                                           evict,
+                                                           frame_factory):
+        """
+        Streams closed normally by the remote peer disallow PUSH_PROMISE
+        frames, and cause a GOAWAY.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(
+            stream_id=1,
+            headers=self.example_request_headers,
+            end_stream=True
+        )
+
+        f = frame_factory.build_headers_frame(
+            stream_id=1,
+            headers=self.example_response_headers,
+            flags=['END_STREAM']
+        )
+        c.receive_data(f.serialize())
+        c.clear_outbound_data_buffer()
+
+        if evict:
+            # This is annoyingly stateful, but enumerating the list of open
+            # streams will force us to flush state.
+            assert not c.open_outbound_streams
+
+        f = frame_factory.build_push_promise_frame(
+            stream_id=1,
+            promised_stream_id=2,
+            headers=self.example_request_headers,
+        )
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.receive_data(f.serialize())
+
+        f = frame_factory.build_goaway_frame(
+            last_stream_id=0,
+            error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR,
+        )
+        assert c.data_to_send() == f.serialize()
+
+    def test_continuation_frame_multiple_push_promise(self, frame_factory):
+        """
+        Test that we correctly decode  header blocks split across continuation
+        frames when those header block is initiated with a PUSH_PROMISE, for
+        more than one pushed stream.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(stream_id=1, headers=self.example_request_headers)
+
+        for promised_stream_id in range(2, 8, 2):
+            frames = self._build_continuation_sequence(
+                headers=self.example_request_headers,
+                block_size=2,
+                frame_factory=frame_factory,
+            )
+            frames[0].promised_stream_id = promised_stream_id
+            data = b''.join(f.serialize() for f in frames)
+            events = c.receive_data(data)
+
+            assert len(events) == 1
+            event = events[0]
+
+            assert isinstance(event, h2.events.PushedStreamReceived)
+            assert event.headers == self.example_request_headers
+            assert event.parent_stream_id == 1
+            assert event.pushed_stream_id == promised_stream_id
diff --git a/tools/third_party/h2/test/test_config.py b/tools/third_party/h2/test/test_config.py
new file mode 100755
index 0000000..8eb7fdc
--- /dev/null
+++ b/tools/third_party/h2/test/test_config.py
@@ -0,0 +1,130 @@
+# -*- coding: utf-8 -*-
+"""
+test_config
+~~~~~~~~~~~
+
+Test the configuration object.
+"""
+import logging
+import pytest
+
+import h2.config
+
+
+class TestH2Config(object):
+    """
+    Tests of the H2 config object.
+    """
+    def test_defaults(self):
+        """
+        The default values of the HTTP/2 config object are sensible.
+        """
+        config = h2.config.H2Configuration()
+        assert config.client_side
+        assert config.header_encoding is None
+        assert isinstance(config.logger, h2.config.DummyLogger)
+
+    boolean_config_options = [
+        'client_side',
+        'validate_outbound_headers',
+        'normalize_outbound_headers',
+        'validate_inbound_headers',
+        'normalize_inbound_headers',
+    ]
+
+    @pytest.mark.parametrize('option_name', boolean_config_options)
+    @pytest.mark.parametrize('value', [None, 'False', 1])
+    def test_boolean_config_options_reject_non_bools_init(
+        self, option_name, value
+    ):
+        """
+        The boolean config options raise an error if you try to set a value
+        that isn't a boolean via the initializer.
+        """
+        with pytest.raises(ValueError):
+            h2.config.H2Configuration(**{option_name: value})
+
+    @pytest.mark.parametrize('option_name', boolean_config_options)
+    @pytest.mark.parametrize('value', [None, 'False', 1])
+    def test_boolean_config_options_reject_non_bools_attr(
+        self, option_name, value
+    ):
+        """
+        The boolean config options raise an error if you try to set a value
+        that isn't a boolean via attribute setter.
+        """
+        config = h2.config.H2Configuration()
+        with pytest.raises(ValueError):
+            setattr(config, option_name, value)
+
+    @pytest.mark.parametrize('option_name', boolean_config_options)
+    @pytest.mark.parametrize('value', [True, False])
+    def test_boolean_config_option_is_reflected_init(self, option_name, value):
+        """
+        The value of the boolean config options, when set, is reflected
+        in the value via the initializer.
+        """
+        config = h2.config.H2Configuration(**{option_name: value})
+        assert getattr(config, option_name) == value
+
+    @pytest.mark.parametrize('option_name', boolean_config_options)
+    @pytest.mark.parametrize('value', [True, False])
+    def test_boolean_config_option_is_reflected_attr(self, option_name, value):
+        """
+        The value of the boolean config options, when set, is reflected
+        in the value via attribute setter.
+        """
+        config = h2.config.H2Configuration()
+        setattr(config, option_name, value)
+        assert getattr(config, option_name) == value
+
+    @pytest.mark.parametrize('header_encoding', [True, 1, object()])
+    def test_header_encoding_must_be_false_str_none_init(
+        self, header_encoding
+    ):
+        """
+        The value of the ``header_encoding`` setting must be False, a string,
+        or None via the initializer.
+        """
+        with pytest.raises(ValueError):
+            h2.config.H2Configuration(header_encoding=header_encoding)
+
+    @pytest.mark.parametrize('header_encoding', [True, 1, object()])
+    def test_header_encoding_must_be_false_str_none_attr(
+        self, header_encoding
+    ):
+        """
+        The value of the ``header_encoding`` setting must be False, a string,
+        or None via attribute setter.
+        """
+        config = h2.config.H2Configuration()
+        with pytest.raises(ValueError):
+            config.header_encoding = header_encoding
+
+    @pytest.mark.parametrize('header_encoding', [False, 'ascii', None])
+    def test_header_encoding_is_reflected_init(self, header_encoding):
+        """
+        The value of ``header_encoding``, when set, is reflected in the value
+        via the initializer.
+        """
+        config = h2.config.H2Configuration(header_encoding=header_encoding)
+        assert config.header_encoding == header_encoding
+
+    @pytest.mark.parametrize('header_encoding', [False, 'ascii', None])
+    def test_header_encoding_is_reflected_attr(self, header_encoding):
+        """
+        The value of ``header_encoding``, when set, is reflected in the value
+        via the attribute setter.
+        """
+        config = h2.config.H2Configuration()
+        config.header_encoding = header_encoding
+        assert config.header_encoding == header_encoding
+
+    def test_logger_instance_is_reflected(self):
+        """
+        The value of ``logger``, when set, is reflected in the value.
+        """
+        logger = logging.Logger('hyper-h2.test')
+        config = h2.config.H2Configuration()
+        config.logger = logger
+        assert config.logger is logger
diff --git a/tools/third_party/h2/test/test_events.py b/tools/third_party/h2/test/test_events.py
new file mode 100755
index 0000000..06f7da3
--- /dev/null
+++ b/tools/third_party/h2/test/test_events.py
@@ -0,0 +1,358 @@
+# -*- coding: utf-8 -*-
+"""
+test_events.py
+~~~~~~~~~~~~~~
+
+Specific tests for any function that is logically self-contained as part of
+events.py.
+"""
+import inspect
+import sys
+
+from hypothesis import given
+from hypothesis.strategies import (
+    integers, lists, tuples
+)
+import pytest
+
+import h2.errors
+import h2.events
+import h2.settings
+
+
+# We define a fairly complex Hypothesis strategy here. We want to build a list
+# of two tuples of (Setting, value). For Setting we want to make sure we can
+# handle settings that the rest of hyper knows nothing about, so we want to
+# use integers from 0 to (2**16-1). For values, they're from 0 to (2**32-1).
+# Define that strategy here for clarity.
+SETTINGS_STRATEGY = lists(
+    tuples(
+        integers(min_value=0, max_value=2**16-1),
+        integers(min_value=0, max_value=2**32-1),
+    )
+)
+
+
+class TestRemoteSettingsChanged(object):
+    """
+    Validate the function of the RemoteSettingsChanged event.
+    """
+    @given(SETTINGS_STRATEGY)
+    def test_building_settings_from_scratch(self, settings_list):
+        """
+        Missing old settings are defaulted to None.
+        """
+        settings_dict = dict(settings_list)
+        e = h2.events.RemoteSettingsChanged.from_settings(
+            old_settings={},
+            new_settings=settings_dict,
+        )
+
+        for setting, new_value in settings_dict.items():
+            assert e.changed_settings[setting].setting == setting
+            assert e.changed_settings[setting].original_value is None
+            assert e.changed_settings[setting].new_value == new_value
+
+    @given(SETTINGS_STRATEGY, SETTINGS_STRATEGY)
+    def test_only_reports_changed_settings(self,
+                                           old_settings_list,
+                                           new_settings_list):
+        """
+        Settings that were not changed are not reported.
+        """
+        old_settings_dict = dict(old_settings_list)
+        new_settings_dict = dict(new_settings_list)
+        e = h2.events.RemoteSettingsChanged.from_settings(
+            old_settings=old_settings_dict,
+            new_settings=new_settings_dict,
+        )
+
+        assert len(e.changed_settings) == len(new_settings_dict)
+        assert (
+            sorted(list(e.changed_settings.keys())) ==
+            sorted(list(new_settings_dict.keys()))
+        )
+
+    @given(SETTINGS_STRATEGY, SETTINGS_STRATEGY)
+    def test_correctly_reports_changed_settings(self,
+                                                old_settings_list,
+                                                new_settings_list):
+        """
+        Settings that are changed are correctly reported.
+        """
+        old_settings_dict = dict(old_settings_list)
+        new_settings_dict = dict(new_settings_list)
+        e = h2.events.RemoteSettingsChanged.from_settings(
+            old_settings=old_settings_dict,
+            new_settings=new_settings_dict,
+        )
+
+        for setting, new_value in new_settings_dict.items():
+            original_value = old_settings_dict.get(setting)
+            assert e.changed_settings[setting].setting == setting
+            assert e.changed_settings[setting].original_value == original_value
+            assert e.changed_settings[setting].new_value == new_value
+
+
+class TestEventReprs(object):
+    """
+    Events have useful representations.
+    """
+    example_request_headers = [
+        (':authority', 'example.com'),
+        (':path', '/'),
+        (':scheme', 'https'),
+        (':method', 'GET'),
+    ]
+    example_informational_headers = [
+        (':status', '100'),
+        ('server', 'fake-serv/0.1.0')
+    ]
+    example_response_headers = [
+        (':status', '200'),
+        ('server', 'fake-serv/0.1.0')
+    ]
+
+    def test_requestreceived_repr(self):
+        """
+        RequestReceived has a useful debug representation.
+        """
+        e = h2.events.RequestReceived()
+        e.stream_id = 5
+        e.headers = self.example_request_headers
+
+        assert repr(e) == (
+            "<RequestReceived stream_id:5, headers:["
+            "(':authority', 'example.com'), "
+            "(':path', '/'), "
+            "(':scheme', 'https'), "
+            "(':method', 'GET')]>"
+        )
+
+    def test_responsereceived_repr(self):
+        """
+        ResponseReceived has a useful debug representation.
+        """
+        e = h2.events.ResponseReceived()
+        e.stream_id = 500
+        e.headers = self.example_response_headers
+
+        assert repr(e) == (
+            "<ResponseReceived stream_id:500, headers:["
+            "(':status', '200'), "
+            "('server', 'fake-serv/0.1.0')]>"
+        )
+
+    def test_trailersreceived_repr(self):
+        """
+        TrailersReceived has a useful debug representation.
+        """
+        e = h2.events.TrailersReceived()
+        e.stream_id = 62
+        e.headers = self.example_response_headers
+
+        assert repr(e) == (
+            "<TrailersReceived stream_id:62, headers:["
+            "(':status', '200'), "
+            "('server', 'fake-serv/0.1.0')]>"
+        )
+
+    def test_informationalresponsereceived_repr(self):
+        """
+        InformationalResponseReceived has a useful debug representation.
+        """
+        e = h2.events.InformationalResponseReceived()
+        e.stream_id = 62
+        e.headers = self.example_informational_headers
+
+        assert repr(e) == (
+            "<InformationalResponseReceived stream_id:62, headers:["
+            "(':status', '100'), "
+            "('server', 'fake-serv/0.1.0')]>"
+        )
+
+    def test_datareceived_repr(self):
+        """
+        DataReceived has a useful debug representation.
+        """
+        e = h2.events.DataReceived()
+        e.stream_id = 888
+        e.data = b"abcdefghijklmnopqrstuvwxyz"
+        e.flow_controlled_length = 88
+
+        assert repr(e) == (
+            "<DataReceived stream_id:888, flow_controlled_length:88, "
+            "data:6162636465666768696a6b6c6d6e6f7071727374>"
+        )
+
+    def test_windowupdated_repr(self):
+        """
+        WindowUpdated has a useful debug representation.
+        """
+        e = h2.events.WindowUpdated()
+        e.stream_id = 0
+        e.delta = 2**16
+
+        assert repr(e) == "<WindowUpdated stream_id:0, delta:65536>"
+
+    def test_remotesettingschanged_repr(self):
+        """
+        RemoteSettingsChanged has a useful debug representation.
+        """
+        e = h2.events.RemoteSettingsChanged()
+        e.changed_settings = {
+            h2.settings.SettingCodes.INITIAL_WINDOW_SIZE:
+                h2.settings.ChangedSetting(
+                    h2.settings.SettingCodes.INITIAL_WINDOW_SIZE, 2**16, 2**15
+                ),
+        }
+
+        assert repr(e) == (
+            "<RemoteSettingsChanged changed_settings:{ChangedSetting("
+            "setting=SettingCodes.INITIAL_WINDOW_SIZE, original_value=65536, "
+            "new_value=32768)}>"
+        )
+
+    def test_pingacknowledged_repr(self):
+        """
+        PingAcknowledged has a useful debug representation.
+        """
+        e = h2.events.PingAcknowledged()
+        e.ping_data = b'abcdefgh'
+
+        assert repr(e) == "<PingAcknowledged ping_data:6162636465666768>"
+
+    def test_streamended_repr(self):
+        """
+        StreamEnded has a useful debug representation.
+        """
+        e = h2.events.StreamEnded()
+        e.stream_id = 99
+
+        assert repr(e) == "<StreamEnded stream_id:99>"
+
+    def test_streamreset_repr(self):
+        """
+        StreamEnded has a useful debug representation.
+        """
+        e = h2.events.StreamReset()
+        e.stream_id = 919
+        e.error_code = h2.errors.ErrorCodes.ENHANCE_YOUR_CALM
+        e.remote_reset = False
+
+        assert repr(e) == (
+            "<StreamReset stream_id:919, "
+            "error_code:ErrorCodes.ENHANCE_YOUR_CALM, remote_reset:False>"
+        )
+
+    def test_pushedstreamreceived_repr(self):
+        """
+        PushedStreamReceived has a useful debug representation.
+        """
+        e = h2.events.PushedStreamReceived()
+        e.pushed_stream_id = 50
+        e.parent_stream_id = 11
+        e.headers = self.example_request_headers
+
+        assert repr(e) == (
+            "<PushedStreamReceived pushed_stream_id:50, parent_stream_id:11, "
+            "headers:["
+            "(':authority', 'example.com'), "
+            "(':path', '/'), "
+            "(':scheme', 'https'), "
+            "(':method', 'GET')]>"
+        )
+
+    def test_settingsacknowledged_repr(self):
+        """
+        SettingsAcknowledged has a useful debug representation.
+        """
+        e = h2.events.SettingsAcknowledged()
+        e.changed_settings = {
+            h2.settings.SettingCodes.INITIAL_WINDOW_SIZE:
+                h2.settings.ChangedSetting(
+                    h2.settings.SettingCodes.INITIAL_WINDOW_SIZE, 2**16, 2**15
+                ),
+        }
+
+        assert repr(e) == (
+            "<SettingsAcknowledged changed_settings:{ChangedSetting("
+            "setting=SettingCodes.INITIAL_WINDOW_SIZE, original_value=65536, "
+            "new_value=32768)}>"
+        )
+
+    def test_priorityupdated_repr(self):
+        """
+        PriorityUpdated has a useful debug representation.
+        """
+        e = h2.events.PriorityUpdated()
+        e.stream_id = 87
+        e.weight = 32
+        e.depends_on = 8
+        e.exclusive = True
+
+        assert repr(e) == (
+            "<PriorityUpdated stream_id:87, weight:32, depends_on:8, "
+            "exclusive:True>"
+        )
+
+    @pytest.mark.parametrize("additional_data,data_repr", [
+        (None, "None"),
+        (b'some data', "736f6d652064617461")
+    ])
+    def test_connectionterminated_repr(self, additional_data, data_repr):
+        """
+        ConnectionTerminated has a useful debug representation.
+        """
+        e = h2.events.ConnectionTerminated()
+        e.error_code = h2.errors.ErrorCodes.INADEQUATE_SECURITY
+        e.last_stream_id = 33
+        e.additional_data = additional_data
+
+        assert repr(e) == (
+            "<ConnectionTerminated error_code:ErrorCodes.INADEQUATE_SECURITY, "
+            "last_stream_id:33, additional_data:%s>" % data_repr
+        )
+
+    def test_alternativeserviceavailable_repr(self):
+        """
+        AlternativeServiceAvailable has a useful debug representation.
+        """
+        e = h2.events.AlternativeServiceAvailable()
+        e.origin = b"example.com"
+        e.field_value = b'h2=":8000"; ma=60'
+
+        assert repr(e) == (
+            '<AlternativeServiceAvailable origin:example.com, '
+            'field_value:h2=":8000"; ma=60>'
+        )
+
+    def test_unknownframereceived_repr(self):
+        """
+        UnknownFrameReceived has a useful debug representation.
+        """
+        e = h2.events.UnknownFrameReceived()
+        assert repr(e) == '<UnknownFrameReceived>'
+
+
+def all_events():
+    """
+    Generates all the classes (i.e., events) defined in h2.events.
+    """
+    for _, obj in inspect.getmembers(sys.modules['h2.events']):
+
+        # We are only interested in objects that are defined in h2.events;
+        # objects that are imported from other modules are not of interest.
+        if hasattr(obj, '__module__') and (obj.__module__ != 'h2.events'):
+            continue
+
+        if inspect.isclass(obj):
+            yield obj
+
+
+@pytest.mark.parametrize('event', all_events())
+def test_all_events_subclass_from_event(event):
+    """
+    Every event defined in h2.events subclasses from h2.events.Event.
+    """
+    assert (event is h2.events.Event) or issubclass(event, h2.events.Event)
diff --git a/tools/third_party/h2/test/test_exceptions.py b/tools/third_party/h2/test/test_exceptions.py
new file mode 100755
index 0000000..1890459
--- /dev/null
+++ b/tools/third_party/h2/test/test_exceptions.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+"""
+test_exceptions
+~~~~~~~~~~~~~~~
+
+Tests that verify logic local to exceptions.
+"""
+import h2.exceptions
+
+
+class TestExceptions(object):
+    def test_stream_id_too_low_prints_properly(self):
+        x = h2.exceptions.StreamIDTooLowError(5, 10)
+
+        assert "StreamIDTooLowError: 5 is lower than 10" == str(x)
diff --git a/tools/third_party/h2/test/test_flow_control_window.py b/tools/third_party/h2/test/test_flow_control_window.py
new file mode 100755
index 0000000..380cd61
--- /dev/null
+++ b/tools/third_party/h2/test/test_flow_control_window.py
@@ -0,0 +1,916 @@
+# -*- coding: utf-8 -*-
+"""
+test_flow_control
+~~~~~~~~~~~~~~~~~
+
+Tests of the flow control management in h2
+"""
+import pytest
+
+from hypothesis import given
+from hypothesis.strategies import integers
+
+import h2.config
+import h2.connection
+import h2.errors
+import h2.events
+import h2.exceptions
+import h2.settings
+
+
+class TestFlowControl(object):
+    """
+    Tests of the flow control management in the connection objects.
+    """
+    example_request_headers = [
+        (':authority', 'example.com'),
+        (':path', '/'),
+        (':scheme', 'https'),
+        (':method', 'GET'),
+    ]
+    server_config = h2.config.H2Configuration(client_side=False)
+
+    DEFAULT_FLOW_WINDOW = 65535
+
+    def test_flow_control_initializes_properly(self):
+        """
+        The flow control window for a stream should initially be the default
+        flow control value.
+        """
+        c = h2.connection.H2Connection()
+        c.send_headers(1, self.example_request_headers)
+
+        assert c.local_flow_control_window(1) == self.DEFAULT_FLOW_WINDOW
+        assert c.remote_flow_control_window(1) == self.DEFAULT_FLOW_WINDOW
+
+    def test_flow_control_decreases_with_sent_data(self):
+        """
+        When data is sent on a stream, the flow control window should drop.
+        """
+        c = h2.connection.H2Connection()
+        c.send_headers(1, self.example_request_headers)
+        c.send_data(1, b'some data')
+
+        remaining_length = self.DEFAULT_FLOW_WINDOW - len(b'some data')
+        assert (c.local_flow_control_window(1) == remaining_length)
+
+    @pytest.mark.parametrize("pad_length", [5, 0])
+    def test_flow_control_decreases_with_sent_data_with_padding(self,
+                                                                pad_length):
+        """
+        When padded data is sent on a stream, the flow control window drops
+        by the length of the padding plus 1 for the 1-byte padding length
+        field.
+        """
+        c = h2.connection.H2Connection()
+        c.send_headers(1, self.example_request_headers)
+
+        c.send_data(1, b'some data', pad_length=pad_length)
+        remaining_length = (
+            self.DEFAULT_FLOW_WINDOW - len(b'some data') - pad_length - 1
+        )
+        assert c.local_flow_control_window(1) == remaining_length
+
+    def test_flow_control_decreases_with_received_data(self, frame_factory):
+        """
+        When data is received on a stream, the remote flow control window
+        should drop.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+        f1 = frame_factory.build_headers_frame(self.example_request_headers)
+        f2 = frame_factory.build_data_frame(b'some data')
+
+        c.receive_data(f1.serialize() + f2.serialize())
+
+        remaining_length = self.DEFAULT_FLOW_WINDOW - len(b'some data')
+        assert (c.remote_flow_control_window(1) == remaining_length)
+
+    def test_flow_control_decreases_with_padded_data(self, frame_factory):
+        """
+        When padded data is received on a stream, the remote flow control
+        window drops by an amount that includes the padding.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+        f1 = frame_factory.build_headers_frame(self.example_request_headers)
+        f2 = frame_factory.build_data_frame(b'some data', padding_len=10)
+
+        c.receive_data(f1.serialize() + f2.serialize())
+
+        remaining_length = (
+            self.DEFAULT_FLOW_WINDOW - len(b'some data') - 10 - 1
+        )
+        assert (c.remote_flow_control_window(1) == remaining_length)
+
+    def test_flow_control_is_limited_by_connection(self):
+        """
+        The flow control window is limited by the flow control of the
+        connection.
+        """
+        c = h2.connection.H2Connection()
+        c.send_headers(1, self.example_request_headers)
+        c.send_data(1, b'some data')
+        c.send_headers(3, self.example_request_headers)
+
+        remaining_length = self.DEFAULT_FLOW_WINDOW - len(b'some data')
+        assert (c.local_flow_control_window(3) == remaining_length)
+
+    def test_remote_flow_control_is_limited_by_connection(self, frame_factory):
+        """
+        The remote flow control window is limited by the flow control of the
+        connection.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+        f1 = frame_factory.build_headers_frame(self.example_request_headers)
+        f2 = frame_factory.build_data_frame(b'some data')
+        f3 = frame_factory.build_headers_frame(
+            self.example_request_headers,
+            stream_id=3,
+        )
+        c.receive_data(f1.serialize() + f2.serialize() + f3.serialize())
+
+        remaining_length = self.DEFAULT_FLOW_WINDOW - len(b'some data')
+        assert (c.remote_flow_control_window(3) == remaining_length)
+
+    def test_cannot_send_more_data_than_window(self):
+        """
+        Sending more data than the remaining flow control window raises a
+        FlowControlError.
+        """
+        c = h2.connection.H2Connection()
+        c.send_headers(1, self.example_request_headers)
+        c.outbound_flow_control_window = 5
+
+        with pytest.raises(h2.exceptions.FlowControlError):
+            c.send_data(1, b'some data')
+
+    def test_increasing_connection_window_allows_sending(self, frame_factory):
+        """
+        Confirm that sending a WindowUpdate frame on the connection frees
+        up space for further frames.
+        """
+        c = h2.connection.H2Connection()
+        c.send_headers(1, self.example_request_headers)
+        c.outbound_flow_control_window = 5
+
+        with pytest.raises(h2.exceptions.FlowControlError):
+            c.send_data(1, b'some data')
+
+        f = frame_factory.build_window_update_frame(
+            stream_id=0,
+            increment=5,
+        )
+        c.receive_data(f.serialize())
+
+        c.clear_outbound_data_buffer()
+        c.send_data(1, b'some data')
+        assert c.data_to_send()
+
+    def test_increasing_stream_window_allows_sending(self, frame_factory):
+        """
+        Confirm that sending a WindowUpdate frame on the connection frees
+        up space for further frames.
+        """
+        c = h2.connection.H2Connection()
+        c.send_headers(1, self.example_request_headers)
+        c._get_stream_by_id(1).outbound_flow_control_window = 5
+
+        with pytest.raises(h2.exceptions.FlowControlError):
+            c.send_data(1, b'some data')
+
+        f = frame_factory.build_window_update_frame(
+            stream_id=1,
+            increment=5,
+        )
+        c.receive_data(f.serialize())
+
+        c.clear_outbound_data_buffer()
+        c.send_data(1, b'some data')
+        assert c.data_to_send()
+
+    def test_flow_control_shrinks_in_response_to_settings(self, frame_factory):
+        """
+        Acknowledging SETTINGS_INITIAL_WINDOW_SIZE shrinks the flow control
+        window.
+        """
+        c = h2.connection.H2Connection()
+        c.send_headers(1, self.example_request_headers)
+
+        assert c.local_flow_control_window(1) == 65535
+
+        f = frame_factory.build_settings_frame(
+            settings={h2.settings.SettingCodes.INITIAL_WINDOW_SIZE: 1280}
+        )
+        c.receive_data(f.serialize())
+
+        assert c.local_flow_control_window(1) == 1280
+
+    def test_flow_control_grows_in_response_to_settings(self, frame_factory):
+        """
+        Acknowledging SETTINGS_INITIAL_WINDOW_SIZE grows the flow control
+        window.
+        """
+        c = h2.connection.H2Connection()
+        c.send_headers(1, self.example_request_headers)
+
+        # Greatly increase the connection flow control window.
+        f = frame_factory.build_window_update_frame(
+            stream_id=0, increment=128000
+        )
+        c.receive_data(f.serialize())
+
+        # The stream flow control window is the bottleneck here.
+        assert c.local_flow_control_window(1) == 65535
+
+        f = frame_factory.build_settings_frame(
+            settings={h2.settings.SettingCodes.INITIAL_WINDOW_SIZE: 128000}
+        )
+        c.receive_data(f.serialize())
+
+        # The stream window is still the bottleneck, but larger now.
+        assert c.local_flow_control_window(1) == 128000
+
+    def test_flow_control_settings_blocked_by_conn_window(self, frame_factory):
+        """
+        Changing SETTINGS_INITIAL_WINDOW_SIZE does not affect the effective
+        flow control window if the connection window isn't changed.
+        """
+        c = h2.connection.H2Connection()
+        c.send_headers(1, self.example_request_headers)
+
+        assert c.local_flow_control_window(1) == 65535
+
+        f = frame_factory.build_settings_frame(
+            settings={h2.settings.SettingCodes.INITIAL_WINDOW_SIZE: 128000}
+        )
+        c.receive_data(f.serialize())
+
+        assert c.local_flow_control_window(1) == 65535
+
+    def test_new_streams_have_flow_control_per_settings(self, frame_factory):
+        """
+        After a SETTINGS_INITIAL_WINDOW_SIZE change is received, new streams
+        have appropriate new flow control windows.
+        """
+        c = h2.connection.H2Connection()
+
+        f = frame_factory.build_settings_frame(
+            settings={h2.settings.SettingCodes.INITIAL_WINDOW_SIZE: 128000}
+        )
+        c.receive_data(f.serialize())
+
+        # Greatly increase the connection flow control window.
+        f = frame_factory.build_window_update_frame(
+            stream_id=0, increment=128000
+        )
+        c.receive_data(f.serialize())
+
+        c.send_headers(1, self.example_request_headers)
+        assert c.local_flow_control_window(1) == 128000
+
+    def test_window_update_no_stream(self, frame_factory):
+        """
+        WindowUpdate frames received without streams fire an appropriate
+        WindowUpdated event.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+
+        f = frame_factory.build_window_update_frame(
+            stream_id=0,
+            increment=5
+        )
+        events = c.receive_data(f.serialize())
+
+        assert len(events) == 1
+        event = events[0]
+
+        assert isinstance(event, h2.events.WindowUpdated)
+        assert event.stream_id == 0
+        assert event.delta == 5
+
+    def test_window_update_with_stream(self, frame_factory):
+        """
+        WindowUpdate frames received with streams fire an appropriate
+        WindowUpdated event.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+
+        f1 = frame_factory.build_headers_frame(self.example_request_headers)
+        f2 = frame_factory.build_window_update_frame(
+            stream_id=1,
+            increment=66
+        )
+        data = b''.join(map(lambda f: f.serialize(), [f1, f2]))
+        events = c.receive_data(data)
+
+        assert len(events) == 2
+        event = events[1]
+
+        assert isinstance(event, h2.events.WindowUpdated)
+        assert event.stream_id == 1
+        assert event.delta == 66
+
+    def test_we_can_increment_stream_flow_control(self, frame_factory):
+        """
+        It is possible for the user to increase the flow control window for
+        streams.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(1, self.example_request_headers, end_stream=True)
+        c.clear_outbound_data_buffer()
+
+        expected_frame = frame_factory.build_window_update_frame(
+            stream_id=1,
+            increment=5
+        )
+
+        events = c.increment_flow_control_window(increment=5, stream_id=1)
+        assert not events
+        assert c.data_to_send() == expected_frame.serialize()
+
+    def test_we_can_increment_connection_flow_control(self, frame_factory):
+        """
+        It is possible for the user to increase the flow control window for
+        the entire connection.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(1, self.example_request_headers, end_stream=True)
+        c.clear_outbound_data_buffer()
+
+        expected_frame = frame_factory.build_window_update_frame(
+            stream_id=0,
+            increment=5
+        )
+
+        events = c.increment_flow_control_window(increment=5)
+        assert not events
+        assert c.data_to_send() == expected_frame.serialize()
+
+    def test_we_enforce_our_flow_control_window(self, frame_factory):
+        """
+        The user can set a low flow control window, which leads to connection
+        teardown if violated.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+
+        # Change the flow control window to 80 bytes.
+        c.update_settings(
+            {h2.settings.SettingCodes.INITIAL_WINDOW_SIZE: 80}
+        )
+        f = frame_factory.build_settings_frame({}, ack=True)
+        c.receive_data(f.serialize())
+
+        # Receive a new stream.
+        f = frame_factory.build_headers_frame(self.example_request_headers)
+        c.receive_data(f.serialize())
+
+        # Attempt to violate the flow control window.
+        c.clear_outbound_data_buffer()
+        f = frame_factory.build_data_frame(b'\x01' * 100)
+
+        with pytest.raises(h2.exceptions.FlowControlError):
+            c.receive_data(f.serialize())
+
+        # Verify we tear down appropriately.
+        expected_frame = frame_factory.build_goaway_frame(
+            last_stream_id=1,
+            error_code=h2.errors.ErrorCodes.FLOW_CONTROL_ERROR,
+        )
+        assert c.data_to_send() == expected_frame.serialize()
+
+    def test_shrink_remote_flow_control_settings(self, frame_factory):
+        """
+        The remote peer acknowledging our SETTINGS_INITIAL_WINDOW_SIZE shrinks
+        the flow control window.
+        """
+        c = h2.connection.H2Connection()
+        c.send_headers(1, self.example_request_headers)
+
+        assert c.remote_flow_control_window(1) == 65535
+
+        c.update_settings({h2.settings.SettingCodes.INITIAL_WINDOW_SIZE: 1280})
+
+        f = frame_factory.build_settings_frame({}, ack=True)
+        c.receive_data(f.serialize())
+
+        assert c.remote_flow_control_window(1) == 1280
+
+    def test_grow_remote_flow_control_settings(self, frame_factory):
+        """
+        The remote peer acknowledging our SETTINGS_INITIAL_WINDOW_SIZE grows
+        the flow control window.
+        """
+        c = h2.connection.H2Connection()
+        c.send_headers(1, self.example_request_headers)
+
+        # Increase the connection flow control window greatly.
+        c.increment_flow_control_window(increment=128000)
+
+        assert c.remote_flow_control_window(1) == 65535
+
+        c.update_settings(
+            {h2.settings.SettingCodes.INITIAL_WINDOW_SIZE: 128000}
+        )
+        f = frame_factory.build_settings_frame({}, ack=True)
+        c.receive_data(f.serialize())
+
+        assert c.remote_flow_control_window(1) == 128000
+
+    def test_new_streams_have_remote_flow_control(self, frame_factory):
+        """
+        After a SETTINGS_INITIAL_WINDOW_SIZE change is acknowledged by the
+        remote peer, new streams have appropriate new flow control windows.
+        """
+        c = h2.connection.H2Connection()
+
+        c.update_settings(
+            {h2.settings.SettingCodes.INITIAL_WINDOW_SIZE: 128000}
+        )
+        f = frame_factory.build_settings_frame({}, ack=True)
+        c.receive_data(f.serialize())
+
+        # Increase the connection flow control window greatly.
+        c.increment_flow_control_window(increment=128000)
+
+        c.send_headers(1, self.example_request_headers)
+        assert c.remote_flow_control_window(1) == 128000
+
+    @pytest.mark.parametrize(
+        'increment', [0, -15, 2**31]
+    )
+    def test_reject_bad_attempts_to_increment_flow_control(self, increment):
+        """
+        Attempting to increment a flow control increment outside the valid
+        range causes a ValueError to be raised.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(1, self.example_request_headers, end_stream=True)
+        c.clear_outbound_data_buffer()
+
+        # Fails both on and off streams.
+        with pytest.raises(ValueError):
+            c.increment_flow_control_window(increment=increment, stream_id=1)
+
+        with pytest.raises(ValueError):
+            c.increment_flow_control_window(increment=increment)
+
+    @pytest.mark.parametrize('stream_id', [0, 1])
+    def test_reject_bad_remote_increments(self, frame_factory, stream_id):
+        """
+        Remote peers attempting to increment flow control outside the valid
+        range cause connection errors of type PROTOCOL_ERROR.
+        """
+        # The only number that can be encoded in a WINDOW_UPDATE frame but
+        # isn't valid is 0.
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(1, self.example_request_headers, end_stream=True)
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_window_update_frame(
+            stream_id=stream_id, increment=0
+        )
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.receive_data(f.serialize())
+
+        expected_frame = frame_factory.build_goaway_frame(
+            last_stream_id=0,
+            error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR,
+        )
+        assert c.data_to_send() == expected_frame.serialize()
+
+    def test_reject_increasing_connection_window_too_far(self, frame_factory):
+        """
+        Attempts by the remote peer to increase the connection flow control
+        window beyond 2**31 - 1 are rejected.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.clear_outbound_data_buffer()
+
+        increment = 2**31 - c.outbound_flow_control_window
+
+        f = frame_factory.build_window_update_frame(
+            stream_id=0, increment=increment
+        )
+
+        with pytest.raises(h2.exceptions.FlowControlError):
+            c.receive_data(f.serialize())
+
+        expected_frame = frame_factory.build_goaway_frame(
+            last_stream_id=0,
+            error_code=h2.errors.ErrorCodes.FLOW_CONTROL_ERROR,
+        )
+        assert c.data_to_send() == expected_frame.serialize()
+
+    def test_reject_increasing_stream_window_too_far(self, frame_factory):
+        """
+        Attempts by the remote peer to increase the stream flow control window
+        beyond 2**31 - 1 are rejected.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(1, self.example_request_headers)
+        c.clear_outbound_data_buffer()
+
+        increment = 2**31 - c.outbound_flow_control_window
+
+        f = frame_factory.build_window_update_frame(
+            stream_id=1, increment=increment
+        )
+
+        events = c.receive_data(f.serialize())
+        assert len(events) == 1
+
+        event = events[0]
+        assert isinstance(event, h2.events.StreamReset)
+        assert event.stream_id == 1
+        assert event.error_code == h2.errors.ErrorCodes.FLOW_CONTROL_ERROR
+        assert not event.remote_reset
+
+        expected_frame = frame_factory.build_rst_stream_frame(
+            stream_id=1,
+            error_code=h2.errors.ErrorCodes.FLOW_CONTROL_ERROR,
+        )
+        assert c.data_to_send() == expected_frame.serialize()
+
+    def test_reject_overlarge_conn_window_settings(self, frame_factory):
+        """
+        SETTINGS frames cannot change the size of the connection flow control
+        window.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+
+        # Go one byte smaller than the limit.
+        increment = 2**31 - 1 - c.outbound_flow_control_window
+
+        f = frame_factory.build_window_update_frame(
+            stream_id=0, increment=increment
+        )
+        c.receive_data(f.serialize())
+
+        # Receive an increment to the initial window size.
+        f = frame_factory.build_settings_frame(
+            settings={
+                h2.settings.SettingCodes.INITIAL_WINDOW_SIZE:
+                    self.DEFAULT_FLOW_WINDOW + 1
+            }
+        )
+        c.clear_outbound_data_buffer()
+
+        # No error is encountered.
+        events = c.receive_data(f.serialize())
+        assert len(events) == 1
+        assert isinstance(events[0], h2.events.RemoteSettingsChanged)
+
+        expected_frame = frame_factory.build_settings_frame(
+            settings={},
+            ack=True
+        )
+        assert c.data_to_send() == expected_frame.serialize()
+
+    def test_reject_overlarge_stream_window_settings(self, frame_factory):
+        """
+        Remote attempts to create overlarge stream windows via SETTINGS frames
+        are rejected.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(1, self.example_request_headers)
+
+        # Go one byte smaller than the limit.
+        increment = 2**31 - 1 - c.outbound_flow_control_window
+
+        f = frame_factory.build_window_update_frame(
+            stream_id=1, increment=increment
+        )
+        c.receive_data(f.serialize())
+
+        # Receive an increment to the initial window size.
+        f = frame_factory.build_settings_frame(
+            settings={
+                h2.settings.SettingCodes.INITIAL_WINDOW_SIZE:
+                    self.DEFAULT_FLOW_WINDOW + 1
+            }
+        )
+        c.clear_outbound_data_buffer()
+        with pytest.raises(h2.exceptions.FlowControlError):
+            c.receive_data(f.serialize())
+
+        expected_frame = frame_factory.build_goaway_frame(
+            last_stream_id=0,
+            error_code=h2.errors.ErrorCodes.FLOW_CONTROL_ERROR,
+        )
+        assert c.data_to_send() == expected_frame.serialize()
+
+    def test_reject_local_overlarge_increase_connection_window(self):
+        """
+        Local attempts to increase the connection window too far are rejected.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+
+        increment = 2**31 - c.inbound_flow_control_window
+
+        with pytest.raises(h2.exceptions.FlowControlError):
+            c.increment_flow_control_window(increment=increment)
+
+    def test_reject_local_overlarge_increase_stream_window(self):
+        """
+        Local attempts to increase the connection window too far are rejected.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(1, self.example_request_headers)
+
+        increment = 2**31 - c.inbound_flow_control_window
+
+        with pytest.raises(h2.exceptions.FlowControlError):
+            c.increment_flow_control_window(increment=increment, stream_id=1)
+
+
+class TestAutomaticFlowControl(object):
+    """
+    Tests for the automatic flow control logic.
+    """
+    example_request_headers = [
+        (':authority', 'example.com'),
+        (':path', '/'),
+        (':scheme', 'https'),
+        (':method', 'GET'),
+    ]
+    server_config = h2.config.H2Configuration(client_side=False)
+
+    DEFAULT_FLOW_WINDOW = 65535
+
+    def _setup_connection_and_send_headers(self, frame_factory):
+        """
+        Setup a server-side H2Connection and send a headers frame, and then
+        clear the outbound data buffer. Also increase the maximum frame size.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+
+        c.update_settings(
+            {h2.settings.SettingCodes.MAX_FRAME_SIZE: self.DEFAULT_FLOW_WINDOW}
+        )
+        settings_frame = frame_factory.build_settings_frame(
+            settings={}, ack=True
+        )
+        c.receive_data(settings_frame.serialize())
+        c.clear_outbound_data_buffer()
+
+        headers_frame = frame_factory.build_headers_frame(
+            headers=self.example_request_headers
+        )
+        c.receive_data(headers_frame.serialize())
+        c.clear_outbound_data_buffer()
+        return c
+
+    @given(stream_id=integers(max_value=0))
+    def test_must_acknowledge_for_stream(self, frame_factory, stream_id):
+        """
+        Flow control acknowledgements must be done on a stream ID that is
+        greater than zero.
+        """
+        # We need to refresh the encoder because hypothesis has a problem with
+        # integrating with py.test, meaning that we use the same frame factory
+        # for all tests.
+        # See https://github.com/HypothesisWorks/hypothesis-python/issues/377
+        frame_factory.refresh_encoder()
+
+        # Create a connection in a state that might actually accept
+        # data acknolwedgement.
+        c = self._setup_connection_and_send_headers(frame_factory)
+        data_frame = frame_factory.build_data_frame(
+            b'some data', flags=['END_STREAM']
+        )
+        c.receive_data(data_frame.serialize())
+
+        with pytest.raises(ValueError):
+            c.acknowledge_received_data(
+                acknowledged_size=5, stream_id=stream_id
+            )
+
+    @given(size=integers(max_value=-1))
+    def test_cannot_acknowledge_less_than_zero(self, frame_factory, size):
+        """
+        The user must acknowledge at least 0 bytes.
+        """
+        # We need to refresh the encoder because hypothesis has a problem with
+        # integrating with py.test, meaning that we use the same frame factory
+        # for all tests.
+        # See https://github.com/HypothesisWorks/hypothesis-python/issues/377
+        frame_factory.refresh_encoder()
+
+        # Create a connection in a state that might actually accept
+        # data acknolwedgement.
+        c = self._setup_connection_and_send_headers(frame_factory)
+        data_frame = frame_factory.build_data_frame(
+            b'some data', flags=['END_STREAM']
+        )
+        c.receive_data(data_frame.serialize())
+
+        with pytest.raises(ValueError):
+            c.acknowledge_received_data(acknowledged_size=size, stream_id=1)
+
+    def test_acknowledging_small_chunks_does_nothing(self, frame_factory):
+        """
+        When a small amount of data is received and acknowledged, no window
+        update is emitted.
+        """
+        c = self._setup_connection_and_send_headers(frame_factory)
+
+        data_frame = frame_factory.build_data_frame(
+            b'some data', flags=['END_STREAM']
+        )
+        data_event = c.receive_data(data_frame.serialize())[0]
+
+        c.acknowledge_received_data(
+            data_event.flow_controlled_length, stream_id=1
+        )
+
+        assert not c.data_to_send()
+
+    def test_acknowledging_no_data_does_nothing(self, frame_factory):
+        """
+        If a user accidentally acknowledges no data, nothing happens.
+        """
+        c = self._setup_connection_and_send_headers(frame_factory)
+
+        # Send an empty data frame, just to give the user impetus to ack the
+        # data.
+        data_frame = frame_factory.build_data_frame(b'')
+        c.receive_data(data_frame.serialize())
+
+        c.acknowledge_received_data(0, stream_id=1)
+        assert not c.data_to_send()
+
+    @pytest.mark.parametrize('force_cleanup', (True, False))
+    def test_acknowledging_data_on_closed_stream(self,
+                                                 frame_factory,
+                                                 force_cleanup):
+        """
+        When acknowledging data on a stream that has just been closed, no
+        acknowledgement is given for that stream, only for the connection.
+        """
+        c = self._setup_connection_and_send_headers(frame_factory)
+
+        data_to_send = b'\x00' * self.DEFAULT_FLOW_WINDOW
+        data_frame = frame_factory.build_data_frame(data_to_send)
+        c.receive_data(data_frame.serialize())
+
+        rst_frame = frame_factory.build_rst_stream_frame(
+            stream_id=1
+        )
+        c.receive_data(rst_frame.serialize())
+        c.clear_outbound_data_buffer()
+
+        if force_cleanup:
+            # Check how many streams are open to force the old one to be
+            # cleaned up.
+            assert c.open_outbound_streams == 0
+
+        c.acknowledge_received_data(2048, stream_id=1)
+
+        expected = frame_factory.build_window_update_frame(
+            stream_id=0, increment=2048
+        )
+        assert c.data_to_send() == expected.serialize()
+
+    def test_acknowledging_streams_we_never_saw(self, frame_factory):
+        """
+        If the user acknowledges a stream ID we've never seen, that raises a
+        NoSuchStreamError.
+        """
+        c = self._setup_connection_and_send_headers(frame_factory)
+        c.clear_outbound_data_buffer()
+
+        with pytest.raises(h2.exceptions.NoSuchStreamError):
+            c.acknowledge_received_data(2048, stream_id=101)
+
+    @given(integers(min_value=1025, max_value=DEFAULT_FLOW_WINDOW))
+    def test_acknowledging_1024_bytes_when_empty_increments(self,
+                                                            frame_factory,
+                                                            increment):
+        """
+        If the flow control window is empty and we acknowledge 1024 bytes or
+        more, we will emit a WINDOW_UPDATE frame just to move the connection
+        forward.
+        """
+        # We need to refresh the encoder because hypothesis has a problem with
+        # integrating with py.test, meaning that we use the same frame factory
+        # for all tests.
+        # See https://github.com/HypothesisWorks/hypothesis-python/issues/377
+        frame_factory.refresh_encoder()
+
+        c = self._setup_connection_and_send_headers(frame_factory)
+
+        data_to_send = b'\x00' * self.DEFAULT_FLOW_WINDOW
+        data_frame = frame_factory.build_data_frame(data_to_send)
+        c.receive_data(data_frame.serialize())
+
+        c.acknowledge_received_data(increment, stream_id=1)
+
+        first_expected = frame_factory.build_window_update_frame(
+            stream_id=0, increment=increment
+        )
+        second_expected = frame_factory.build_window_update_frame(
+            stream_id=1, increment=increment
+        )
+        expected_data = b''.join(
+            [first_expected.serialize(), second_expected.serialize()]
+        )
+        assert c.data_to_send() == expected_data
+
+    # This test needs to use a lower cap, because otherwise the algo will
+    # increment the stream window anyway.
+    @given(integers(min_value=1025, max_value=(DEFAULT_FLOW_WINDOW // 4) - 1))
+    def test_connection_only_empty(self, frame_factory, increment):
+        """
+        If the connection flow control window is empty, but the stream flow
+        control windows aren't, and 1024 bytes or more are acknowledged by the
+        user, we increment the connection window only.
+        """
+        # We need to refresh the encoder because hypothesis has a problem with
+        # integrating with py.test, meaning that we use the same frame factory
+        # for all tests.
+        # See https://github.com/HypothesisWorks/hypothesis-python/issues/377
+        frame_factory.refresh_encoder()
+
+        # Here we'll use 4 streams. Set them up.
+        c = self._setup_connection_and_send_headers(frame_factory)
+
+        for stream_id in [3, 5, 7]:
+            f = frame_factory.build_headers_frame(
+                headers=self.example_request_headers, stream_id=stream_id
+            )
+            c.receive_data(f.serialize())
+
+        # Now we send 1/4 of the connection window per stream. Annoyingly,
+        # that's an odd number, so we need to round the last frame up.
+        data_to_send = b'\x00' * (self.DEFAULT_FLOW_WINDOW // 4)
+        for stream_id in [1, 3, 5]:
+            f = frame_factory.build_data_frame(
+                data_to_send, stream_id=stream_id
+            )
+            c.receive_data(f.serialize())
+
+        data_to_send = b'\x00' * c.remote_flow_control_window(7)
+        data_frame = frame_factory.build_data_frame(data_to_send, stream_id=7)
+        c.receive_data(data_frame.serialize())
+
+        # Ok, now the actual test.
+        c.acknowledge_received_data(increment, stream_id=1)
+
+        expected_data = frame_factory.build_window_update_frame(
+            stream_id=0, increment=increment
+        ).serialize()
+        assert c.data_to_send() == expected_data
+
+    @given(integers(min_value=1025, max_value=DEFAULT_FLOW_WINDOW))
+    def test_mixing_update_forms(self, frame_factory, increment):
+        """
+        If the user mixes ackowledging data with manually incrementing windows,
+        we still keep track of what's going on.
+        """
+        # We need to refresh the encoder because hypothesis has a problem with
+        # integrating with py.test, meaning that we use the same frame factory
+        # for all tests.
+        # See https://github.com/HypothesisWorks/hypothesis-python/issues/377
+        frame_factory.refresh_encoder()
+
+        # Empty the flow control window.
+        c = self._setup_connection_and_send_headers(frame_factory)
+        data_to_send = b'\x00' * self.DEFAULT_FLOW_WINDOW
+        data_frame = frame_factory.build_data_frame(data_to_send)
+        c.receive_data(data_frame.serialize())
+
+        # Manually increment the connection flow control window back to fully
+        # open, but leave the stream window closed.
+        c.increment_flow_control_window(
+            stream_id=None, increment=self.DEFAULT_FLOW_WINDOW
+        )
+        c.clear_outbound_data_buffer()
+
+        # Now, acknowledge the receipt of that data. This should cause the
+        # stream window to be widened, but not the connection window, because
+        # it is already open.
+        c.acknowledge_received_data(increment, stream_id=1)
+
+        # We expect to see one window update frame only, for the stream.
+        expected_data = frame_factory.build_window_update_frame(
+            stream_id=1, increment=increment
+        ).serialize()
+        assert c.data_to_send() == expected_data
diff --git a/tools/third_party/h2/test/test_h2_upgrade.py b/tools/third_party/h2/test/test_h2_upgrade.py
new file mode 100755
index 0000000..1d4eac6
--- /dev/null
+++ b/tools/third_party/h2/test/test_h2_upgrade.py
@@ -0,0 +1,306 @@
+# -*- coding: utf-8 -*-
+"""
+test_h2_upgrade.py
+~~~~~~~~~~~~~~~~~~
+
+This module contains tests that exercise the HTTP Upgrade functionality of
+hyper-h2, ensuring that clients and servers can upgrade their plaintext
+HTTP/1.1 connections to HTTP/2.
+"""
+import base64
+
+import pytest
+
+import h2.config
+import h2.connection
+import h2.errors
+import h2.events
+import h2.exceptions
+
+
+class TestClientUpgrade(object):
+    """
+    Tests of the client-side of the HTTP/2 upgrade dance.
+    """
+    example_request_headers = [
+        (b':authority', b'example.com'),
+        (b':path', b'/'),
+        (b':scheme', b'https'),
+        (b':method', b'GET'),
+    ]
+    example_response_headers = [
+        (b':status', b'200'),
+        (b'server', b'fake-serv/0.1.0')
+    ]
+
+    def test_returns_http2_settings(self, frame_factory):
+        """
+        Calling initiate_upgrade_connection returns a base64url encoded
+        Settings frame with the settings used by the connection.
+        """
+        conn = h2.connection.H2Connection()
+        data = conn.initiate_upgrade_connection()
+
+        # The base64 encoding must not be padded.
+        assert not data.endswith(b'=')
+
+        # However, SETTINGS frames should never need to be padded.
+        decoded_frame = base64.urlsafe_b64decode(data)
+        expected_frame = frame_factory.build_settings_frame(
+            settings=conn.local_settings
+        )
+        assert decoded_frame == expected_frame.serialize_body()
+
+    def test_emits_preamble(self, frame_factory):
+        """
+        Calling initiate_upgrade_connection emits the connection preamble.
+        """
+        conn = h2.connection.H2Connection()
+        conn.initiate_upgrade_connection()
+
+        data = conn.data_to_send()
+        assert data.startswith(frame_factory.preamble())
+
+        data = data[len(frame_factory.preamble()):]
+        expected_frame = frame_factory.build_settings_frame(
+            settings=conn.local_settings
+        )
+        assert data == expected_frame.serialize()
+
+    def test_can_receive_response(self, frame_factory):
+        """
+        After upgrading, we can safely receive a response.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_upgrade_connection()
+        c.clear_outbound_data_buffer()
+
+        f1 = frame_factory.build_headers_frame(
+            stream_id=1,
+            headers=self.example_response_headers,
+        )
+        f2 = frame_factory.build_data_frame(
+            stream_id=1,
+            data=b'some data',
+            flags=['END_STREAM']
+        )
+        events = c.receive_data(f1.serialize() + f2.serialize())
+        assert len(events) == 3
+
+        assert isinstance(events[0], h2.events.ResponseReceived)
+        assert isinstance(events[1], h2.events.DataReceived)
+        assert isinstance(events[2], h2.events.StreamEnded)
+
+        assert events[0].headers == self.example_response_headers
+        assert events[1].data == b'some data'
+        assert all(e.stream_id == 1 for e in events)
+
+        assert not c.data_to_send()
+
+    def test_can_receive_pushed_stream(self, frame_factory):
+        """
+        After upgrading, we can safely receive a pushed stream.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_upgrade_connection()
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_push_promise_frame(
+            stream_id=1,
+            promised_stream_id=2,
+            headers=self.example_request_headers,
+        )
+        events = c.receive_data(f.serialize())
+        assert len(events) == 1
+
+        assert isinstance(events[0], h2.events.PushedStreamReceived)
+        assert events[0].headers == self.example_request_headers
+        assert events[0].parent_stream_id == 1
+        assert events[0].pushed_stream_id == 2
+
+    def test_cannot_send_headers_stream_1(self, frame_factory):
+        """
+        After upgrading, we cannot send headers on stream 1.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_upgrade_connection()
+        c.clear_outbound_data_buffer()
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.send_headers(stream_id=1, headers=self.example_request_headers)
+
+    def test_cannot_send_data_stream_1(self, frame_factory):
+        """
+        After upgrading, we cannot send data on stream 1.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_upgrade_connection()
+        c.clear_outbound_data_buffer()
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.send_data(stream_id=1, data=b'some data')
+
+
+class TestServerUpgrade(object):
+    """
+    Tests of the server-side of the HTTP/2 upgrade dance.
+    """
+    example_request_headers = [
+        (b':authority', b'example.com'),
+        (b':path', b'/'),
+        (b':scheme', b'https'),
+        (b':method', b'GET'),
+    ]
+    example_response_headers = [
+        (b':status', b'200'),
+        (b'server', b'fake-serv/0.1.0')
+    ]
+    server_config = h2.config.H2Configuration(client_side=False)
+
+    def test_returns_nothing(self, frame_factory):
+        """
+        Calling initiate_upgrade_connection returns nothing.
+        """
+        conn = h2.connection.H2Connection(config=self.server_config)
+        curl_header = b"AAMAAABkAAQAAP__"
+        data = conn.initiate_upgrade_connection(curl_header)
+        assert data is None
+
+    def test_emits_preamble(self, frame_factory):
+        """
+        Calling initiate_upgrade_connection emits the connection preamble.
+        """
+        conn = h2.connection.H2Connection(config=self.server_config)
+        conn.initiate_upgrade_connection()
+
+        data = conn.data_to_send()
+        expected_frame = frame_factory.build_settings_frame(
+            settings=conn.local_settings
+        )
+        assert data == expected_frame.serialize()
+
+    def test_can_send_response(self, frame_factory):
+        """
+        After upgrading, we can safely send a response.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_upgrade_connection()
+        c.clear_outbound_data_buffer()
+
+        c.send_headers(stream_id=1, headers=self.example_response_headers)
+        c.send_data(stream_id=1, data=b'some data', end_stream=True)
+
+        f1 = frame_factory.build_headers_frame(
+            stream_id=1,
+            headers=self.example_response_headers,
+        )
+        f2 = frame_factory.build_data_frame(
+            stream_id=1,
+            data=b'some data',
+            flags=['END_STREAM']
+        )
+
+        expected_data = f1.serialize() + f2.serialize()
+        assert c.data_to_send() == expected_data
+
+    def test_can_push_stream(self, frame_factory):
+        """
+        After upgrading, we can safely push a stream.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_upgrade_connection()
+        c.clear_outbound_data_buffer()
+
+        c.push_stream(
+            stream_id=1,
+            promised_stream_id=2,
+            request_headers=self.example_request_headers
+        )
+
+        f = frame_factory.build_push_promise_frame(
+            stream_id=1,
+            promised_stream_id=2,
+            headers=self.example_request_headers,
+        )
+        assert c.data_to_send() == f.serialize()
+
+    def test_cannot_receive_headers_stream_1(self, frame_factory):
+        """
+        After upgrading, we cannot receive headers on stream 1.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_upgrade_connection()
+        c.receive_data(frame_factory.preamble())
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_headers_frame(
+            stream_id=1,
+            headers=self.example_request_headers,
+        )
+        c.receive_data(f.serialize())
+
+        expected_frame = frame_factory.build_rst_stream_frame(
+            stream_id=1,
+            error_code=h2.errors.ErrorCodes.STREAM_CLOSED,
+        )
+        assert c.data_to_send() == expected_frame.serialize()
+
+    def test_cannot_receive_data_stream_1(self, frame_factory):
+        """
+        After upgrading, we cannot receive data on stream 1.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_upgrade_connection()
+        c.receive_data(frame_factory.preamble())
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_data_frame(
+            stream_id=1,
+            data=b'some data',
+        )
+        c.receive_data(f.serialize())
+
+        expected_frame = frame_factory.build_rst_stream_frame(
+            stream_id=1,
+            error_code=h2.errors.ErrorCodes.STREAM_CLOSED,
+        )
+        assert c.data_to_send() == expected_frame.serialize()
+
+    def test_client_settings_are_applied(self, frame_factory):
+        """
+        The settings provided by the client are applied and immediately
+        ACK'ed.
+        """
+        server = h2.connection.H2Connection(config=self.server_config)
+        client = h2.connection.H2Connection()
+
+        # As a precaution, let's confirm that the server and client, at the
+        # start of the connection, do not agree on their initial settings
+        # state.
+        assert (
+            client.local_settings._settings != server.remote_settings._settings
+        )
+
+        # Get the client header data and pass it to the server.
+        header_data = client.initiate_upgrade_connection()
+        server.initiate_upgrade_connection(header_data)
+
+        # This gets complex, but here we go.
+        # RFC 7540 § 3.2.1 says that "explicit acknowledgement" of the settings
+        # in the header is "not necessary". That's annoyingly vague, but we
+        # interpret that to mean "should not be sent". So to test that this
+        # worked we need to test that the server has only sent the preamble,
+        # and has not sent a SETTINGS ack, and also that the server has the
+        # correct settings.
+        expected_frame = frame_factory.build_settings_frame(
+            server.local_settings
+        )
+        assert server.data_to_send() == expected_frame.serialize()
+
+        # We violate abstraction layers here, but I don't think defining __eq__
+        # for this is worth it. In this case, both the client and server should
+        # agree that these settings have been ACK'd, so their underlying
+        # dictionaries should be identical.
+        assert (
+            client.local_settings._settings == server.remote_settings._settings
+        )
diff --git a/tools/third_party/h2/test/test_head_request.py b/tools/third_party/h2/test/test_head_request.py
new file mode 100755
index 0000000..313bf12
--- /dev/null
+++ b/tools/third_party/h2/test/test_head_request.py
@@ -0,0 +1,56 @@
+# -*- coding; utf-8 -*-
+"""
+test_head_request
+~~~~~~~~~~~~~~~~~
+"""
+import h2.connection
+import pytest
+
+
+class TestHeadRequest(object):
+
+        example_request_headers = [
+            (b':authority', b'example.com'),
+            (b':path', b'/'),
+            (b':scheme', b'https'),
+            (b':method', b'HEAD'),
+        ]
+
+        example_response_headers = [
+            (b':status', b'200'),
+            (b'server', b'fake-serv/0.1.0'),
+            (b'content_length', b'1'),
+        ]
+
+        def test_non_zero_content_and_no_body(self, frame_factory):
+
+            c = h2.connection.H2Connection()
+            c.initiate_connection()
+            c.send_headers(1, self.example_request_headers, end_stream=True)
+
+            f = frame_factory.build_headers_frame(
+                self.example_response_headers,
+                flags=['END_STREAM']
+            )
+            events = c.receive_data(f.serialize())
+
+            assert len(events) == 2
+            event = events[0]
+
+            assert isinstance(event, h2.events.ResponseReceived)
+            assert event.stream_id == 1
+            assert event.headers == self.example_response_headers
+
+        def test_reject_non_zero_content_and_body(self, frame_factory):
+            c = h2.connection.H2Connection()
+            c.initiate_connection()
+            c.send_headers(1, self.example_request_headers)
+
+            headers = frame_factory.build_headers_frame(
+                self.example_response_headers
+            )
+            data = frame_factory.build_data_frame(data=b'\x01')
+
+            c.receive_data(headers.serialize())
+            with pytest.raises(h2.exceptions.InvalidBodyLengthError):
+                c.receive_data(data.serialize())
diff --git a/tools/third_party/h2/test/test_header_indexing.py b/tools/third_party/h2/test/test_header_indexing.py
new file mode 100755
index 0000000..23fd06f
--- /dev/null
+++ b/tools/third_party/h2/test/test_header_indexing.py
@@ -0,0 +1,637 @@
+# -*- coding: utf-8 -*-
+"""
+test_header_indexing.py
+~~~~~~~~~~~~~~~~~~~~~~~
+
+This module contains tests that use HPACK header tuples that provide additional
+metadata to the hpack module about how to encode the headers.
+"""
+import pytest
+
+from hpack import HeaderTuple, NeverIndexedHeaderTuple
+
+import h2.config
+import h2.connection
+
+
+def assert_header_blocks_actually_equal(block_a, block_b):
+    """
+    Asserts that two header bocks are really, truly equal, down to the types
+    of their tuples. Doesn't return anything.
+    """
+    assert len(block_a) == len(block_b)
+
+    for a, b in zip(block_a, block_b):
+        assert a == b
+        assert a.__class__ is b.__class__
+
+
+class TestHeaderIndexing(object):
+    """
+    Test that Hyper-h2 can correctly handle never indexed header fields using
+    the appropriate hpack data structures.
+    """
+    example_request_headers = [
+        HeaderTuple(u':authority', u'example.com'),
+        HeaderTuple(u':path', u'/'),
+        HeaderTuple(u':scheme', u'https'),
+        HeaderTuple(u':method', u'GET'),
+    ]
+    bytes_example_request_headers = [
+        HeaderTuple(b':authority', b'example.com'),
+        HeaderTuple(b':path', b'/'),
+        HeaderTuple(b':scheme', b'https'),
+        HeaderTuple(b':method', b'GET'),
+    ]
+
+    extended_request_headers = [
+        HeaderTuple(u':authority', u'example.com'),
+        HeaderTuple(u':path', u'/'),
+        HeaderTuple(u':scheme', u'https'),
+        HeaderTuple(u':method', u'GET'),
+        NeverIndexedHeaderTuple(u'authorization', u'realpassword'),
+    ]
+    bytes_extended_request_headers = [
+        HeaderTuple(b':authority', b'example.com'),
+        HeaderTuple(b':path', b'/'),
+        HeaderTuple(b':scheme', b'https'),
+        HeaderTuple(b':method', b'GET'),
+        NeverIndexedHeaderTuple(b'authorization', b'realpassword'),
+    ]
+
+    example_response_headers = [
+        HeaderTuple(u':status', u'200'),
+        HeaderTuple(u'server', u'fake-serv/0.1.0')
+    ]
+    bytes_example_response_headers = [
+        HeaderTuple(b':status', b'200'),
+        HeaderTuple(b'server', b'fake-serv/0.1.0')
+    ]
+
+    extended_response_headers = [
+        HeaderTuple(u':status', u'200'),
+        HeaderTuple(u'server', u'fake-serv/0.1.0'),
+        NeverIndexedHeaderTuple(u'secure', u'you-bet'),
+    ]
+    bytes_extended_response_headers = [
+        HeaderTuple(b':status', b'200'),
+        HeaderTuple(b'server', b'fake-serv/0.1.0'),
+        NeverIndexedHeaderTuple(b'secure', b'you-bet'),
+    ]
+
+    server_config = h2.config.H2Configuration(client_side=False)
+
+    @pytest.mark.parametrize(
+        'headers', (
+            example_request_headers,
+            bytes_example_request_headers,
+            extended_request_headers,
+            bytes_extended_request_headers,
+        )
+    )
+    def test_sending_header_tuples(self, headers, frame_factory):
+        """
+        Providing HeaderTuple and HeaderTuple subclasses preserves the metadata
+        about indexing.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+
+        # Clear the data, then send headers.
+        c.clear_outbound_data_buffer()
+        c.send_headers(1, headers)
+
+        f = frame_factory.build_headers_frame(headers=headers)
+        assert c.data_to_send() == f.serialize()
+
+    @pytest.mark.parametrize(
+        'headers', (
+            example_request_headers,
+            bytes_example_request_headers,
+            extended_request_headers,
+            bytes_extended_request_headers,
+        )
+    )
+    def test_header_tuples_in_pushes(self, headers, frame_factory):
+        """
+        Providing HeaderTuple and HeaderTuple subclasses to push promises
+        preserves metadata about indexing.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+
+        # We can use normal headers for the request.
+        f = frame_factory.build_headers_frame(
+            self.example_request_headers
+        )
+        c.receive_data(f.serialize())
+
+        frame_factory.refresh_encoder()
+        expected_frame = frame_factory.build_push_promise_frame(
+            stream_id=1,
+            promised_stream_id=2,
+            headers=headers,
+            flags=['END_HEADERS'],
+        )
+
+        c.clear_outbound_data_buffer()
+        c.push_stream(
+            stream_id=1,
+            promised_stream_id=2,
+            request_headers=headers
+        )
+
+        assert c.data_to_send() == expected_frame.serialize()
+
+    @pytest.mark.parametrize(
+        'headers,encoding', (
+            (example_request_headers, 'utf-8'),
+            (bytes_example_request_headers, None),
+            (extended_request_headers, 'utf-8'),
+            (bytes_extended_request_headers, None),
+        )
+    )
+    def test_header_tuples_are_decoded_request(self,
+                                               headers,
+                                               encoding,
+                                               frame_factory):
+        """
+        The indexing status of the header is preserved when emitting
+        RequestReceived events.
+        """
+        config = h2.config.H2Configuration(
+            client_side=False, header_encoding=encoding
+        )
+        c = h2.connection.H2Connection(config=config)
+        c.receive_data(frame_factory.preamble())
+
+        f = frame_factory.build_headers_frame(headers)
+        data = f.serialize()
+        events = c.receive_data(data)
+
+        assert len(events) == 1
+        event = events[0]
+
+        assert isinstance(event, h2.events.RequestReceived)
+        assert_header_blocks_actually_equal(headers, event.headers)
+
+    @pytest.mark.parametrize(
+        'headers,encoding', (
+            (example_response_headers, 'utf-8'),
+            (bytes_example_response_headers, None),
+            (extended_response_headers, 'utf-8'),
+            (bytes_extended_response_headers, None),
+        )
+    )
+    def test_header_tuples_are_decoded_response(self,
+                                                headers,
+                                                encoding,
+                                                frame_factory):
+        """
+        The indexing status of the header is preserved when emitting
+        ResponseReceived events.
+        """
+        config = h2.config.H2Configuration(
+            header_encoding=encoding
+        )
+        c = h2.connection.H2Connection(config=config)
+        c.initiate_connection()
+        c.send_headers(stream_id=1, headers=self.example_request_headers)
+
+        f = frame_factory.build_headers_frame(headers)
+        data = f.serialize()
+        events = c.receive_data(data)
+
+        assert len(events) == 1
+        event = events[0]
+
+        assert isinstance(event, h2.events.ResponseReceived)
+        assert_header_blocks_actually_equal(headers, event.headers)
+
+    @pytest.mark.parametrize(
+        'headers,encoding', (
+            (example_response_headers, 'utf-8'),
+            (bytes_example_response_headers, None),
+            (extended_response_headers, 'utf-8'),
+            (bytes_extended_response_headers, None),
+        )
+    )
+    def test_header_tuples_are_decoded_info_response(self,
+                                                     headers,
+                                                     encoding,
+                                                     frame_factory):
+        """
+        The indexing status of the header is preserved when emitting
+        InformationalResponseReceived events.
+        """
+        # Manipulate the headers to send 100 Continue. We need to copy the list
+        # to avoid breaking the example headers.
+        headers = headers[:]
+        if encoding:
+            headers[0] = HeaderTuple(u':status', u'100')
+        else:
+            headers[0] = HeaderTuple(b':status', b'100')
+
+        config = h2.config.H2Configuration(
+            header_encoding=encoding
+        )
+        c = h2.connection.H2Connection(config=config)
+        c.initiate_connection()
+        c.send_headers(stream_id=1, headers=self.example_request_headers)
+
+        f = frame_factory.build_headers_frame(headers)
+        data = f.serialize()
+        events = c.receive_data(data)
+
+        assert len(events) == 1
+        event = events[0]
+
+        assert isinstance(event, h2.events.InformationalResponseReceived)
+        assert_header_blocks_actually_equal(headers, event.headers)
+
+    @pytest.mark.parametrize(
+        'headers,encoding', (
+            (example_response_headers, 'utf-8'),
+            (bytes_example_response_headers, None),
+            (extended_response_headers, 'utf-8'),
+            (bytes_extended_response_headers, None),
+        )
+    )
+    def test_header_tuples_are_decoded_trailers(self,
+                                                headers,
+                                                encoding,
+                                                frame_factory):
+        """
+        The indexing status of the header is preserved when emitting
+        TrailersReceived events.
+        """
+        # Manipulate the headers to remove the status, which shouldn't be in
+        # the trailers. We need to copy the list to avoid breaking the example
+        # headers.
+        headers = headers[1:]
+
+        config = h2.config.H2Configuration(
+            header_encoding=encoding
+        )
+        c = h2.connection.H2Connection(config=config)
+        c.initiate_connection()
+        c.send_headers(stream_id=1, headers=self.example_request_headers)
+        f = frame_factory.build_headers_frame(self.example_response_headers)
+        data = f.serialize()
+        c.receive_data(data)
+
+        f = frame_factory.build_headers_frame(headers, flags=['END_STREAM'])
+        data = f.serialize()
+        events = c.receive_data(data)
+
+        assert len(events) == 2
+        event = events[0]
+
+        assert isinstance(event, h2.events.TrailersReceived)
+        assert_header_blocks_actually_equal(headers, event.headers)
+
+    @pytest.mark.parametrize(
+        'headers,encoding', (
+            (example_request_headers, 'utf-8'),
+            (bytes_example_request_headers, None),
+            (extended_request_headers, 'utf-8'),
+            (bytes_extended_request_headers, None),
+        )
+    )
+    def test_header_tuples_are_decoded_push_promise(self,
+                                                    headers,
+                                                    encoding,
+                                                    frame_factory):
+        """
+        The indexing status of the header is preserved when emitting
+        PushedStreamReceived events.
+        """
+        config = h2.config.H2Configuration(
+            header_encoding=encoding
+        )
+        c = h2.connection.H2Connection(config=config)
+        c.initiate_connection()
+        c.send_headers(stream_id=1, headers=self.example_request_headers)
+
+        f = frame_factory.build_push_promise_frame(
+            stream_id=1,
+            promised_stream_id=2,
+            headers=headers,
+            flags=['END_HEADERS'],
+        )
+        data = f.serialize()
+        events = c.receive_data(data)
+
+        assert len(events) == 1
+        event = events[0]
+
+        assert isinstance(event, h2.events.PushedStreamReceived)
+        assert_header_blocks_actually_equal(headers, event.headers)
+
+
+class TestSecureHeaders(object):
+    """
+    Certain headers should always be transformed to their never-indexed form.
+    """
+    example_request_headers = [
+        (u':authority', u'example.com'),
+        (u':path', u'/'),
+        (u':scheme', u'https'),
+        (u':method', u'GET'),
+    ]
+    bytes_example_request_headers = [
+        (b':authority', b'example.com'),
+        (b':path', b'/'),
+        (b':scheme', b'https'),
+        (b':method', b'GET'),
+    ]
+    possible_auth_headers = [
+        (u'authorization', u'test'),
+        (u'Authorization', u'test'),
+        (u'authorization', u'really long test'),
+        HeaderTuple(u'authorization', u'test'),
+        HeaderTuple(u'Authorization', u'test'),
+        HeaderTuple(u'authorization', u'really long test'),
+        NeverIndexedHeaderTuple(u'authorization', u'test'),
+        NeverIndexedHeaderTuple(u'Authorization', u'test'),
+        NeverIndexedHeaderTuple(u'authorization', u'really long test'),
+        (b'authorization', b'test'),
+        (b'Authorization', b'test'),
+        (b'authorization', b'really long test'),
+        HeaderTuple(b'authorization', b'test'),
+        HeaderTuple(b'Authorization', b'test'),
+        HeaderTuple(b'authorization', b'really long test'),
+        NeverIndexedHeaderTuple(b'authorization', b'test'),
+        NeverIndexedHeaderTuple(b'Authorization', b'test'),
+        NeverIndexedHeaderTuple(b'authorization', b'really long test'),
+        (u'proxy-authorization', u'test'),
+        (u'Proxy-Authorization', u'test'),
+        (u'proxy-authorization', u'really long test'),
+        HeaderTuple(u'proxy-authorization', u'test'),
+        HeaderTuple(u'Proxy-Authorization', u'test'),
+        HeaderTuple(u'proxy-authorization', u'really long test'),
+        NeverIndexedHeaderTuple(u'proxy-authorization', u'test'),
+        NeverIndexedHeaderTuple(u'Proxy-Authorization', u'test'),
+        NeverIndexedHeaderTuple(u'proxy-authorization', u'really long test'),
+        (b'proxy-authorization', b'test'),
+        (b'Proxy-Authorization', b'test'),
+        (b'proxy-authorization', b'really long test'),
+        HeaderTuple(b'proxy-authorization', b'test'),
+        HeaderTuple(b'Proxy-Authorization', b'test'),
+        HeaderTuple(b'proxy-authorization', b'really long test'),
+        NeverIndexedHeaderTuple(b'proxy-authorization', b'test'),
+        NeverIndexedHeaderTuple(b'Proxy-Authorization', b'test'),
+        NeverIndexedHeaderTuple(b'proxy-authorization', b'really long test'),
+    ]
+    secured_cookie_headers = [
+        (u'cookie', u'short'),
+        (u'Cookie', u'short'),
+        (u'cookie', u'nineteen byte cooki'),
+        HeaderTuple(u'cookie', u'short'),
+        HeaderTuple(u'Cookie', u'short'),
+        HeaderTuple(u'cookie', u'nineteen byte cooki'),
+        NeverIndexedHeaderTuple(u'cookie', u'short'),
+        NeverIndexedHeaderTuple(u'Cookie', u'short'),
+        NeverIndexedHeaderTuple(u'cookie', u'nineteen byte cooki'),
+        NeverIndexedHeaderTuple(u'cookie', u'longer manually secured cookie'),
+        (b'cookie', b'short'),
+        (b'Cookie', b'short'),
+        (b'cookie', b'nineteen byte cooki'),
+        HeaderTuple(b'cookie', b'short'),
+        HeaderTuple(b'Cookie', b'short'),
+        HeaderTuple(b'cookie', b'nineteen byte cooki'),
+        NeverIndexedHeaderTuple(b'cookie', b'short'),
+        NeverIndexedHeaderTuple(b'Cookie', b'short'),
+        NeverIndexedHeaderTuple(b'cookie', b'nineteen byte cooki'),
+        NeverIndexedHeaderTuple(b'cookie', b'longer manually secured cookie'),
+    ]
+    unsecured_cookie_headers = [
+        (u'cookie', u'twenty byte cookie!!'),
+        (u'Cookie', u'twenty byte cookie!!'),
+        (u'cookie', u'substantially longer than 20 byte cookie'),
+        HeaderTuple(u'cookie', u'twenty byte cookie!!'),
+        HeaderTuple(u'cookie', u'twenty byte cookie!!'),
+        HeaderTuple(u'Cookie', u'twenty byte cookie!!'),
+        (b'cookie', b'twenty byte cookie!!'),
+        (b'Cookie', b'twenty byte cookie!!'),
+        (b'cookie', b'substantially longer than 20 byte cookie'),
+        HeaderTuple(b'cookie', b'twenty byte cookie!!'),
+        HeaderTuple(b'cookie', b'twenty byte cookie!!'),
+        HeaderTuple(b'Cookie', b'twenty byte cookie!!'),
+    ]
+
+    server_config = h2.config.H2Configuration(client_side=False)
+
+    @pytest.mark.parametrize(
+        'headers', (example_request_headers, bytes_example_request_headers)
+    )
+    @pytest.mark.parametrize('auth_header', possible_auth_headers)
+    def test_authorization_headers_never_indexed(self,
+                                                 headers,
+                                                 auth_header,
+                                                 frame_factory):
+        """
+        Authorization and Proxy-Authorization headers are always forced to be
+        never-indexed, regardless of their form.
+        """
+        # Regardless of what we send, we expect it to be never indexed.
+        send_headers = headers + [auth_header]
+        expected_headers = headers + [
+            NeverIndexedHeaderTuple(auth_header[0].lower(), auth_header[1])
+        ]
+
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+
+        # Clear the data, then send headers.
+        c.clear_outbound_data_buffer()
+        c.send_headers(1, send_headers)
+
+        f = frame_factory.build_headers_frame(headers=expected_headers)
+        assert c.data_to_send() == f.serialize()
+
+    @pytest.mark.parametrize(
+        'headers', (example_request_headers, bytes_example_request_headers)
+    )
+    @pytest.mark.parametrize('auth_header', possible_auth_headers)
+    def test_authorization_headers_never_indexed_push(self,
+                                                      headers,
+                                                      auth_header,
+                                                      frame_factory):
+        """
+        Authorization and Proxy-Authorization headers are always forced to be
+        never-indexed, regardless of their form, when pushed by a server.
+        """
+        # Regardless of what we send, we expect it to be never indexed.
+        send_headers = headers + [auth_header]
+        expected_headers = headers + [
+            NeverIndexedHeaderTuple(auth_header[0].lower(), auth_header[1])
+        ]
+
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+
+        # We can use normal headers for the request.
+        f = frame_factory.build_headers_frame(
+            self.example_request_headers
+        )
+        c.receive_data(f.serialize())
+
+        frame_factory.refresh_encoder()
+        expected_frame = frame_factory.build_push_promise_frame(
+            stream_id=1,
+            promised_stream_id=2,
+            headers=expected_headers,
+            flags=['END_HEADERS'],
+        )
+
+        c.clear_outbound_data_buffer()
+        c.push_stream(
+            stream_id=1,
+            promised_stream_id=2,
+            request_headers=send_headers
+        )
+
+        assert c.data_to_send() == expected_frame.serialize()
+
+    @pytest.mark.parametrize(
+        'headers', (example_request_headers, bytes_example_request_headers)
+    )
+    @pytest.mark.parametrize('cookie_header', secured_cookie_headers)
+    def test_short_cookie_headers_never_indexed(self,
+                                                headers,
+                                                cookie_header,
+                                                frame_factory):
+        """
+        Short cookie headers, and cookies provided as NeverIndexedHeaderTuple,
+        are never indexed.
+        """
+        # Regardless of what we send, we expect it to be never indexed.
+        send_headers = headers + [cookie_header]
+        expected_headers = headers + [
+            NeverIndexedHeaderTuple(cookie_header[0].lower(), cookie_header[1])
+        ]
+
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+
+        # Clear the data, then send headers.
+        c.clear_outbound_data_buffer()
+        c.send_headers(1, send_headers)
+
+        f = frame_factory.build_headers_frame(headers=expected_headers)
+        assert c.data_to_send() == f.serialize()
+
+    @pytest.mark.parametrize(
+        'headers', (example_request_headers, bytes_example_request_headers)
+    )
+    @pytest.mark.parametrize('cookie_header', secured_cookie_headers)
+    def test_short_cookie_headers_never_indexed_push(self,
+                                                     headers,
+                                                     cookie_header,
+                                                     frame_factory):
+        """
+        Short cookie headers, and cookies provided as NeverIndexedHeaderTuple,
+        are never indexed when pushed by servers.
+        """
+        # Regardless of what we send, we expect it to be never indexed.
+        send_headers = headers + [cookie_header]
+        expected_headers = headers + [
+            NeverIndexedHeaderTuple(cookie_header[0].lower(), cookie_header[1])
+        ]
+
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+
+        # We can use normal headers for the request.
+        f = frame_factory.build_headers_frame(
+            self.example_request_headers
+        )
+        c.receive_data(f.serialize())
+
+        frame_factory.refresh_encoder()
+        expected_frame = frame_factory.build_push_promise_frame(
+            stream_id=1,
+            promised_stream_id=2,
+            headers=expected_headers,
+            flags=['END_HEADERS'],
+        )
+
+        c.clear_outbound_data_buffer()
+        c.push_stream(
+            stream_id=1,
+            promised_stream_id=2,
+            request_headers=send_headers
+        )
+
+        assert c.data_to_send() == expected_frame.serialize()
+
+    @pytest.mark.parametrize(
+        'headers', (example_request_headers, bytes_example_request_headers)
+    )
+    @pytest.mark.parametrize('cookie_header', unsecured_cookie_headers)
+    def test_long_cookie_headers_can_be_indexed(self,
+                                                headers,
+                                                cookie_header,
+                                                frame_factory):
+        """
+        Longer cookie headers can be indexed.
+        """
+        # Regardless of what we send, we expect it to be indexed.
+        send_headers = headers + [cookie_header]
+        expected_headers = headers + [
+            HeaderTuple(cookie_header[0].lower(), cookie_header[1])
+        ]
+
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+
+        # Clear the data, then send headers.
+        c.clear_outbound_data_buffer()
+        c.send_headers(1, send_headers)
+
+        f = frame_factory.build_headers_frame(headers=expected_headers)
+        assert c.data_to_send() == f.serialize()
+
+    @pytest.mark.parametrize(
+        'headers', (example_request_headers, bytes_example_request_headers)
+    )
+    @pytest.mark.parametrize('cookie_header', unsecured_cookie_headers)
+    def test_long_cookie_headers_can_be_indexed_push(self,
+                                                     headers,
+                                                     cookie_header,
+                                                     frame_factory):
+        """
+        Longer cookie headers can be indexed.
+        """
+        # Regardless of what we send, we expect it to be never indexed.
+        send_headers = headers + [cookie_header]
+        expected_headers = headers + [
+            HeaderTuple(cookie_header[0].lower(), cookie_header[1])
+        ]
+
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+
+        # We can use normal headers for the request.
+        f = frame_factory.build_headers_frame(
+            self.example_request_headers
+        )
+        c.receive_data(f.serialize())
+
+        frame_factory.refresh_encoder()
+        expected_frame = frame_factory.build_push_promise_frame(
+            stream_id=1,
+            promised_stream_id=2,
+            headers=expected_headers,
+            flags=['END_HEADERS'],
+        )
+
+        c.clear_outbound_data_buffer()
+        c.push_stream(
+            stream_id=1,
+            promised_stream_id=2,
+            request_headers=send_headers
+        )
+
+        assert c.data_to_send() == expected_frame.serialize()
diff --git a/tools/third_party/h2/test/test_informational_responses.py b/tools/third_party/h2/test/test_informational_responses.py
new file mode 100755
index 0000000..e18c44b
--- /dev/null
+++ b/tools/third_party/h2/test/test_informational_responses.py
@@ -0,0 +1,444 @@
+# -*- coding: utf-8 -*-
+"""
+test_informational_responses
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Tests that validate that hyper-h2 correctly handles informational (1XX)
+responses in its state machine.
+"""
+import pytest
+
+import h2.config
+import h2.connection
+import h2.events
+import h2.exceptions
+
+
+class TestReceivingInformationalResponses(object):
+    """
+    Tests for receiving informational responses.
+    """
+    example_request_headers = [
+        (b':authority', b'example.com'),
+        (b':path', b'/'),
+        (b':scheme', b'https'),
+        (b':method', b'GET'),
+        (b'expect', b'100-continue'),
+    ]
+    example_informational_headers = [
+        (b':status', b'100'),
+        (b'server', b'fake-serv/0.1.0')
+    ]
+    example_response_headers = [
+        (b':status', b'200'),
+        (b'server', b'fake-serv/0.1.0')
+    ]
+    example_trailers = [
+        (b'trailer', b'you-bet'),
+    ]
+
+    @pytest.mark.parametrize('end_stream', (True, False))
+    def test_single_informational_response(self, frame_factory, end_stream):
+        """
+        When receiving a informational response, the appropriate event is
+        signaled.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(
+            stream_id=1,
+            headers=self.example_request_headers,
+            end_stream=end_stream
+        )
+
+        f = frame_factory.build_headers_frame(
+            headers=self.example_informational_headers,
+            stream_id=1,
+        )
+        events = c.receive_data(f.serialize())
+
+        assert len(events) == 1
+        event = events[0]
+
+        assert isinstance(event, h2.events.InformationalResponseReceived)
+        assert event.headers == self.example_informational_headers
+        assert event.stream_id == 1
+
+    @pytest.mark.parametrize('end_stream', (True, False))
+    def test_receiving_multiple_header_blocks(self, frame_factory, end_stream):
+        """
+        At least three header blocks can be received: informational, headers,
+        trailers.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(
+            stream_id=1,
+            headers=self.example_request_headers,
+            end_stream=end_stream
+        )
+
+        f1 = frame_factory.build_headers_frame(
+            headers=self.example_informational_headers,
+            stream_id=1,
+        )
+        f2 = frame_factory.build_headers_frame(
+            headers=self.example_response_headers,
+            stream_id=1,
+        )
+        f3 = frame_factory.build_headers_frame(
+            headers=self.example_trailers,
+            stream_id=1,
+            flags=['END_STREAM'],
+        )
+        events = c.receive_data(
+            f1.serialize() + f2.serialize() + f3.serialize()
+        )
+
+        assert len(events) == 4
+
+        assert isinstance(events[0], h2.events.InformationalResponseReceived)
+        assert events[0].headers == self.example_informational_headers
+        assert events[0].stream_id == 1
+
+        assert isinstance(events[1], h2.events.ResponseReceived)
+        assert events[1].headers == self.example_response_headers
+        assert events[1].stream_id == 1
+
+        assert isinstance(events[2], h2.events.TrailersReceived)
+        assert events[2].headers == self.example_trailers
+        assert events[2].stream_id == 1
+
+    @pytest.mark.parametrize('end_stream', (True, False))
+    def test_receiving_multiple_informational_responses(self,
+                                                        frame_factory,
+                                                        end_stream):
+        """
+        More than one informational response is allowed.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(
+            stream_id=1,
+            headers=self.example_request_headers,
+            end_stream=end_stream
+        )
+
+        f1 = frame_factory.build_headers_frame(
+            headers=self.example_informational_headers,
+            stream_id=1,
+        )
+        f2 = frame_factory.build_headers_frame(
+            headers=[(':status', '101')],
+            stream_id=1,
+        )
+        events = c.receive_data(f1.serialize() + f2.serialize())
+
+        assert len(events) == 2
+
+        assert isinstance(events[0], h2.events.InformationalResponseReceived)
+        assert events[0].headers == self.example_informational_headers
+        assert events[0].stream_id == 1
+
+        assert isinstance(events[1], h2.events.InformationalResponseReceived)
+        assert events[1].headers == [(b':status', b'101')]
+        assert events[1].stream_id == 1
+
+    @pytest.mark.parametrize('end_stream', (True, False))
+    def test_receive_provisional_response_with_end_stream(self,
+                                                          frame_factory,
+                                                          end_stream):
+        """
+        Receiving provisional responses with END_STREAM set causes
+        ProtocolErrors.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(
+            stream_id=1,
+            headers=self.example_request_headers,
+            end_stream=end_stream
+        )
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_headers_frame(
+            headers=self.example_informational_headers,
+            stream_id=1,
+            flags=['END_STREAM']
+        )
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.receive_data(f.serialize())
+
+        expected = frame_factory.build_goaway_frame(
+            last_stream_id=0,
+            error_code=1,
+        )
+        assert c.data_to_send() == expected.serialize()
+
+    @pytest.mark.parametrize('end_stream', (True, False))
+    def test_receiving_out_of_order_headers(self, frame_factory, end_stream):
+        """
+        When receiving a informational response after the actual response
+        headers we consider it a ProtocolError and raise it.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(
+            stream_id=1,
+            headers=self.example_request_headers,
+            end_stream=end_stream
+        )
+
+        f1 = frame_factory.build_headers_frame(
+            headers=self.example_response_headers,
+            stream_id=1,
+        )
+        f2 = frame_factory.build_headers_frame(
+            headers=self.example_informational_headers,
+            stream_id=1,
+        )
+        c.receive_data(f1.serialize())
+        c.clear_outbound_data_buffer()
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.receive_data(f2.serialize())
+
+        expected = frame_factory.build_goaway_frame(
+            last_stream_id=0,
+            error_code=1,
+        )
+        assert c.data_to_send() == expected.serialize()
+
+
+class TestSendingInformationalResponses(object):
+    """
+    Tests for sending informational responses.
+    """
+    example_request_headers = [
+        (b':authority', b'example.com'),
+        (b':path', b'/'),
+        (b':scheme', b'https'),
+        (b':method', b'GET'),
+        (b'expect', b'100-continue'),
+    ]
+    unicode_informational_headers = [
+        (u':status', u'100'),
+        (u'server', u'fake-serv/0.1.0')
+    ]
+    bytes_informational_headers = [
+        (b':status', b'100'),
+        (b'server', b'fake-serv/0.1.0')
+    ]
+    example_response_headers = [
+        (b':status', b'200'),
+        (b'server', b'fake-serv/0.1.0')
+    ]
+    example_trailers = [
+        (b'trailer', b'you-bet'),
+    ]
+    server_config = h2.config.H2Configuration(client_side=False)
+
+    @pytest.mark.parametrize(
+        'hdrs', (unicode_informational_headers, bytes_informational_headers),
+    )
+    @pytest.mark.parametrize('end_stream', (True, False))
+    def test_single_informational_response(self,
+                                           frame_factory,
+                                           hdrs,
+                                           end_stream):
+        """
+        When sending a informational response, the appropriate frames are
+        emitted.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+        flags = ['END_STREAM'] if end_stream else []
+        f = frame_factory.build_headers_frame(
+            headers=self.example_request_headers,
+            stream_id=1,
+            flags=flags,
+        )
+        c.receive_data(f.serialize())
+        c.clear_outbound_data_buffer()
+        frame_factory.refresh_encoder()
+
+        c.send_headers(
+            stream_id=1,
+            headers=hdrs
+        )
+
+        f = frame_factory.build_headers_frame(
+            headers=hdrs,
+            stream_id=1,
+        )
+        assert c.data_to_send() == f.serialize()
+
+    @pytest.mark.parametrize(
+        'hdrs', (unicode_informational_headers, bytes_informational_headers),
+    )
+    @pytest.mark.parametrize('end_stream', (True, False))
+    def test_sending_multiple_header_blocks(self,
+                                            frame_factory,
+                                            hdrs,
+                                            end_stream):
+        """
+        At least three header blocks can be sent: informational, headers,
+        trailers.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+        flags = ['END_STREAM'] if end_stream else []
+        f = frame_factory.build_headers_frame(
+            headers=self.example_request_headers,
+            stream_id=1,
+            flags=flags,
+        )
+        c.receive_data(f.serialize())
+        c.clear_outbound_data_buffer()
+        frame_factory.refresh_encoder()
+
+        # Send the three header blocks.
+        c.send_headers(
+            stream_id=1,
+            headers=hdrs
+        )
+        c.send_headers(
+            stream_id=1,
+            headers=self.example_response_headers
+        )
+        c.send_headers(
+            stream_id=1,
+            headers=self.example_trailers,
+            end_stream=True
+        )
+
+        # Check that we sent them properly.
+        f1 = frame_factory.build_headers_frame(
+            headers=hdrs,
+            stream_id=1,
+        )
+        f2 = frame_factory.build_headers_frame(
+            headers=self.example_response_headers,
+            stream_id=1,
+        )
+        f3 = frame_factory.build_headers_frame(
+            headers=self.example_trailers,
+            stream_id=1,
+            flags=['END_STREAM']
+        )
+        assert (
+            c.data_to_send() ==
+            f1.serialize() + f2.serialize() + f3.serialize()
+        )
+
+    @pytest.mark.parametrize(
+        'hdrs', (unicode_informational_headers, bytes_informational_headers),
+    )
+    @pytest.mark.parametrize('end_stream', (True, False))
+    def test_sending_multiple_informational_responses(self,
+                                                      frame_factory,
+                                                      hdrs,
+                                                      end_stream):
+        """
+        More than one informational response is allowed.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+        flags = ['END_STREAM'] if end_stream else []
+        f = frame_factory.build_headers_frame(
+            headers=self.example_request_headers,
+            stream_id=1,
+            flags=flags,
+        )
+        c.receive_data(f.serialize())
+        c.clear_outbound_data_buffer()
+        frame_factory.refresh_encoder()
+
+        # Send two informational responses.
+        c.send_headers(
+            stream_id=1,
+            headers=hdrs,
+        )
+        c.send_headers(
+            stream_id=1,
+            headers=[(':status', '101')]
+        )
+
+        # Check we sent them both.
+        f1 = frame_factory.build_headers_frame(
+            headers=hdrs,
+            stream_id=1,
+        )
+        f2 = frame_factory.build_headers_frame(
+            headers=[(':status', '101')],
+            stream_id=1,
+        )
+        assert c.data_to_send() == f1.serialize() + f2.serialize()
+
+    @pytest.mark.parametrize(
+        'hdrs', (unicode_informational_headers, bytes_informational_headers),
+    )
+    @pytest.mark.parametrize('end_stream', (True, False))
+    def test_send_provisional_response_with_end_stream(self,
+                                                       frame_factory,
+                                                       hdrs,
+                                                       end_stream):
+        """
+        Sending provisional responses with END_STREAM set causes
+        ProtocolErrors.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+        flags = ['END_STREAM'] if end_stream else []
+        f = frame_factory.build_headers_frame(
+            headers=self.example_request_headers,
+            stream_id=1,
+            flags=flags,
+        )
+        c.receive_data(f.serialize())
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.send_headers(
+                stream_id=1,
+                headers=hdrs,
+                end_stream=True,
+            )
+
+    @pytest.mark.parametrize(
+        'hdrs', (unicode_informational_headers, bytes_informational_headers),
+    )
+    @pytest.mark.parametrize('end_stream', (True, False))
+    def test_reject_sending_out_of_order_headers(self,
+                                                 frame_factory,
+                                                 hdrs,
+                                                 end_stream):
+        """
+        When sending an informational response after the actual response
+        headers we consider it a ProtocolError and raise it.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+        flags = ['END_STREAM'] if end_stream else []
+        f = frame_factory.build_headers_frame(
+            headers=self.example_request_headers,
+            stream_id=1,
+            flags=flags,
+        )
+        c.receive_data(f.serialize())
+
+        c.send_headers(
+            stream_id=1,
+            headers=self.example_response_headers
+        )
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.send_headers(
+                stream_id=1,
+                headers=hdrs
+            )
diff --git a/tools/third_party/h2/test/test_interacting_stacks.py b/tools/third_party/h2/test/test_interacting_stacks.py
new file mode 100755
index 0000000..9077682
--- /dev/null
+++ b/tools/third_party/h2/test/test_interacting_stacks.py
@@ -0,0 +1,120 @@
+# -*- coding: utf-8 -*-
+"""
+test_interacting_stacks
+~~~~~~~~~~~~~~~~~~~~~~~
+
+These tests run two entities, a client and a server, in parallel threads. These
+two entities talk to each other, running what amounts to a number of carefully
+controlled simulations of real flows.
+
+This is to ensure that the stack as a whole behaves intelligently in both
+client and server cases.
+
+These tests are long, complex, and somewhat brittle, so they aren't in general
+recommended for writing the majority of test cases. Their purposes is primarily
+to validate that the top-level API of the library behaves as described.
+
+We should also consider writing helper functions to reduce the complexity of
+these tests, so that they can be written more easily, as they are remarkably
+useful.
+"""
+import coroutine_tests
+
+import h2.config
+import h2.connection
+import h2.events
+import h2.settings
+
+
+class TestCommunication(coroutine_tests.CoroutineTestCase):
+    """
+    Test that two communicating state machines can work together.
+    """
+    server_config = h2.config.H2Configuration(client_side=False)
+
+    def test_basic_request_response(self):
+        """
+        A request issued by hyper-h2 can be responded to by hyper-h2.
+        """
+        request_headers = [
+            (b':method', b'GET'),
+            (b':path', b'/'),
+            (b':authority', b'example.com'),
+            (b':scheme', b'https'),
+            (b'user-agent', b'test-client/0.1.0'),
+        ]
+        response_headers = [
+            (b':status', b'204'),
+            (b'server', b'test-server/0.1.0'),
+            (b'content-length', b'0'),
+        ]
+
+        def client():
+            c = h2.connection.H2Connection()
+
+            # Do the handshake. First send the preamble.
+            c.initiate_connection()
+            data = yield c.data_to_send()
+
+            # Next, handle the remote preamble.
+            events = c.receive_data(data)
+            assert len(events) == 2
+            assert isinstance(events[0], h2.events.SettingsAcknowledged)
+            assert isinstance(events[1], h2.events.RemoteSettingsChanged)
+            changed = events[1].changed_settings
+            assert (
+                changed[
+                    h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS
+                ].new_value == 100
+            )
+
+            # Send a request.
+            events = c.send_headers(1, request_headers, end_stream=True)
+            assert not events
+            data = yield c.data_to_send()
+
+            # Validate the response.
+            events = c.receive_data(data)
+            assert len(events) == 2
+            assert isinstance(events[0], h2.events.ResponseReceived)
+            assert events[0].stream_id == 1
+            assert events[0].headers == response_headers
+            assert isinstance(events[1], h2.events.StreamEnded)
+            assert events[1].stream_id == 1
+
+        @self.server
+        def server():
+            c = h2.connection.H2Connection(config=self.server_config)
+
+            # First, read for the preamble.
+            data = yield
+            events = c.receive_data(data)
+            assert len(events) == 1
+            assert isinstance(events[0], h2.events.RemoteSettingsChanged)
+            changed = events[0].changed_settings
+            assert (
+                changed[
+                    h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS
+                ].new_value == 100
+            )
+
+            # Send our preamble back.
+            c.initiate_connection()
+            data = yield c.data_to_send()
+
+            # Listen for the request.
+            events = c.receive_data(data)
+            assert len(events) == 3
+            assert isinstance(events[0], h2.events.SettingsAcknowledged)
+            assert isinstance(events[1], h2.events.RequestReceived)
+            assert events[1].stream_id == 1
+            assert events[1].headers == request_headers
+            assert isinstance(events[2], h2.events.StreamEnded)
+            assert events[2].stream_id == 1
+
+            # Send our response.
+            events = c.send_headers(1, response_headers, end_stream=True)
+            assert not events
+            yield c.data_to_send()
+
+        self.run_until_complete(client(), server())
diff --git a/tools/third_party/h2/test/test_invalid_content_lengths.py b/tools/third_party/h2/test/test_invalid_content_lengths.py
new file mode 100755
index 0000000..fe682fc
--- /dev/null
+++ b/tools/third_party/h2/test/test_invalid_content_lengths.py
@@ -0,0 +1,136 @@
+# -*- coding: utf-8 -*-
+"""
+test_invalid_content_lengths.py
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This module contains tests that use invalid content lengths, and validates that
+they fail appropriately.
+"""
+import pytest
+
+import h2.config
+import h2.connection
+import h2.errors
+import h2.events
+import h2.exceptions
+
+
+class TestInvalidContentLengths(object):
+    """
+    Hyper-h2 raises Protocol Errors when the content-length sent by a remote
+    peer is not valid.
+    """
+    example_request_headers = [
+        (':authority', 'example.com'),
+        (':path', '/'),
+        (':scheme', 'https'),
+        (':method', 'POST'),
+        ('content-length', '15'),
+    ]
+    example_response_headers = [
+        (':status', '200'),
+        ('server', 'fake-serv/0.1.0')
+    ]
+    server_config = h2.config.H2Configuration(client_side=False)
+
+    def test_too_much_data(self, frame_factory):
+        """
+        Remote peers sending data in excess of content-length causes Protocol
+        Errors.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+
+        headers = frame_factory.build_headers_frame(
+            headers=self.example_request_headers
+        )
+        first_data = frame_factory.build_data_frame(data=b'\x01'*15)
+        c.receive_data(headers.serialize() + first_data.serialize())
+        c.clear_outbound_data_buffer()
+
+        second_data = frame_factory.build_data_frame(data=b'\x01')
+        with pytest.raises(h2.exceptions.InvalidBodyLengthError) as exp:
+            c.receive_data(second_data.serialize())
+
+        assert exp.value.expected_length == 15
+        assert exp.value.actual_length == 16
+        assert str(exp.value) == (
+            "InvalidBodyLengthError: Expected 15 bytes, received 16"
+        )
+
+        expected_frame = frame_factory.build_goaway_frame(
+            last_stream_id=1,
+            error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR,
+        )
+        assert c.data_to_send() == expected_frame.serialize()
+
+    def test_insufficient_data(self, frame_factory):
+        """
+        Remote peers sending less data than content-length causes Protocol
+        Errors.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+
+        headers = frame_factory.build_headers_frame(
+            headers=self.example_request_headers
+        )
+        first_data = frame_factory.build_data_frame(data=b'\x01'*13)
+        c.receive_data(headers.serialize() + first_data.serialize())
+        c.clear_outbound_data_buffer()
+
+        second_data = frame_factory.build_data_frame(
+            data=b'\x01',
+            flags=['END_STREAM'],
+        )
+        with pytest.raises(h2.exceptions.InvalidBodyLengthError) as exp:
+            c.receive_data(second_data.serialize())
+
+        assert exp.value.expected_length == 15
+        assert exp.value.actual_length == 14
+        assert str(exp.value) == (
+            "InvalidBodyLengthError: Expected 15 bytes, received 14"
+        )
+
+        expected_frame = frame_factory.build_goaway_frame(
+            last_stream_id=1,
+            error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR,
+        )
+        assert c.data_to_send() == expected_frame.serialize()
+
+    def test_insufficient_data_empty_frame(self, frame_factory):
+        """
+        Remote peers sending less data than content-length where the last data
+        frame is empty causes Protocol Errors.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+
+        headers = frame_factory.build_headers_frame(
+            headers=self.example_request_headers
+        )
+        first_data = frame_factory.build_data_frame(data=b'\x01'*14)
+        c.receive_data(headers.serialize() + first_data.serialize())
+        c.clear_outbound_data_buffer()
+
+        second_data = frame_factory.build_data_frame(
+            data=b'',
+            flags=['END_STREAM'],
+        )
+        with pytest.raises(h2.exceptions.InvalidBodyLengthError) as exp:
+            c.receive_data(second_data.serialize())
+
+        assert exp.value.expected_length == 15
+        assert exp.value.actual_length == 14
+        assert str(exp.value) == (
+            "InvalidBodyLengthError: Expected 15 bytes, received 14"
+        )
+
+        expected_frame = frame_factory.build_goaway_frame(
+            last_stream_id=1,
+            error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR,
+        )
+        assert c.data_to_send() == expected_frame.serialize()
diff --git a/tools/third_party/h2/test/test_invalid_frame_sequences.py b/tools/third_party/h2/test/test_invalid_frame_sequences.py
new file mode 100755
index 0000000..1f2f6c7
--- /dev/null
+++ b/tools/third_party/h2/test/test_invalid_frame_sequences.py
@@ -0,0 +1,486 @@
+# -*- coding: utf-8 -*-
+"""
+test_invalid_frame_sequences.py
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This module contains tests that use invalid frame sequences, and validates that
+they fail appropriately.
+"""
+import pytest
+
+import h2.config
+import h2.connection
+import h2.errors
+import h2.events
+import h2.exceptions
+
+
+class TestInvalidFrameSequences(object):
+    """
+    Invalid frame sequences, either sent or received, cause ProtocolErrors to
+    be thrown.
+    """
+    example_request_headers = [
+        (':authority', 'example.com'),
+        (':path', '/'),
+        (':scheme', 'https'),
+        (':method', 'GET'),
+    ]
+    example_response_headers = [
+        (':status', '200'),
+        ('server', 'fake-serv/0.1.0')
+    ]
+    server_config = h2.config.H2Configuration(client_side=False)
+
+    def test_cannot_send_on_closed_stream(self):
+        """
+        When we've closed a stream locally, we cannot send further data.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(1, self.example_request_headers, end_stream=True)
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.send_data(1, b'some data')
+
+    def test_missing_preamble_errors(self):
+        """
+        Server side connections require the preamble.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        encoded_headers_frame = (
+            b'\x00\x00\r\x01\x04\x00\x00\x00\x01'
+            b'A\x88/\x91\xd3]\x05\\\x87\xa7\x84\x87\x82'
+        )
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.receive_data(encoded_headers_frame)
+
+    def test_server_connections_reject_even_streams(self, frame_factory):
+        """
+        Servers do not allow clients to initiate even-numbered streams.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+
+        f = frame_factory.build_headers_frame(
+            self.example_request_headers, stream_id=2
+        )
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.receive_data(f.serialize())
+
+    def test_clients_reject_odd_stream_pushes(self, frame_factory):
+        """
+        Clients do not allow servers to push odd numbered streams.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(1, self.example_request_headers, end_stream=True)
+
+        f = frame_factory.build_push_promise_frame(
+            stream_id=1,
+            headers=self.example_request_headers,
+            promised_stream_id=3
+        )
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.receive_data(f.serialize())
+
+    def test_can_handle_frames_with_invalid_padding(self, frame_factory):
+        """
+        Frames with invalid padding cause connection teardown.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+
+        f = frame_factory.build_headers_frame(self.example_request_headers)
+        c.receive_data(f.serialize())
+        c.clear_outbound_data_buffer()
+
+        invalid_data_frame = (
+            b'\x00\x00\x05\x00\x0b\x00\x00\x00\x01\x06\x54\x65\x73\x74'
+        )
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.receive_data(invalid_data_frame)
+
+        expected_frame = frame_factory.build_goaway_frame(
+            last_stream_id=1, error_code=1
+        )
+        assert c.data_to_send() == expected_frame.serialize()
+
+    def test_receiving_frames_with_insufficent_size(self, frame_factory):
+        """
+        Frames with not enough data cause connection teardown.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+        c.clear_outbound_data_buffer()
+
+        invalid_window_update_frame = (
+            b'\x00\x00\x03\x08\x00\x00\x00\x00\x00\x00\x00\x02'
+        )
+
+        with pytest.raises(h2.exceptions.FrameDataMissingError):
+            c.receive_data(invalid_window_update_frame)
+
+        expected_frame = frame_factory.build_goaway_frame(
+            last_stream_id=0, error_code=h2.errors.ErrorCodes.FRAME_SIZE_ERROR
+        )
+        assert c.data_to_send() == expected_frame.serialize()
+
+    def test_reject_data_on_closed_streams(self, frame_factory):
+        """
+        When a stream is not open to the remote peer, we reject receiving data
+        frames from them.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+
+        f = frame_factory.build_headers_frame(
+            self.example_request_headers,
+            flags=['END_STREAM']
+        )
+        c.receive_data(f.serialize())
+        c.clear_outbound_data_buffer()
+
+        bad_frame = frame_factory.build_data_frame(data=b'hello')
+        c.receive_data(bad_frame.serialize())
+
+        expected_frame = frame_factory.build_rst_stream_frame(
+            stream_id=1,
+            error_code=0x5,
+        )
+        assert c.data_to_send() == expected_frame.serialize()
+
+    def test_unexpected_continuation_on_closed_stream(self, frame_factory):
+        """
+        CONTINUATION frames received on closed streams cause connection errors
+        of type PROTOCOL_ERROR.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+
+        f = frame_factory.build_headers_frame(
+            self.example_request_headers,
+            flags=['END_STREAM']
+        )
+        c.receive_data(f.serialize())
+        c.clear_outbound_data_buffer()
+
+        bad_frame = frame_factory.build_continuation_frame(
+            header_block=b'hello'
+        )
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.receive_data(bad_frame.serialize())
+
+        expected_frame = frame_factory.build_goaway_frame(
+            error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR,
+            last_stream_id=1
+        )
+        assert c.data_to_send() == expected_frame.serialize()
+
+    def test_prevent_continuation_dos(self, frame_factory):
+        """
+        Receiving too many CONTINUATION frames in one block causes a protocol
+        error.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+
+        f = frame_factory.build_headers_frame(
+            self.example_request_headers,
+        )
+        f.flags = {'END_STREAM'}
+        c.receive_data(f.serialize())
+        c.clear_outbound_data_buffer()
+
+        # Send 63 additional frames.
+        for _ in range(0, 63):
+            extra_frame = frame_factory.build_continuation_frame(
+                header_block=b'hello'
+            )
+            c.receive_data(extra_frame.serialize())
+
+        # The final continuation frame should cause a protocol error.
+        extra_frame = frame_factory.build_continuation_frame(
+            header_block=b'hello'
+        )
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.receive_data(extra_frame.serialize())
+
+        expected_frame = frame_factory.build_goaway_frame(
+            last_stream_id=0,
+            error_code=0x1,
+        )
+        assert c.data_to_send() == expected_frame.serialize()
+
+    # These settings are a bit annoyingly anonymous, but trust me, they're bad.
+    @pytest.mark.parametrize(
+        "settings",
+        [
+            {0x2: 5},
+            {0x4: 2**31},
+            {0x5: 5},
+            {0x5: 2**24},
+        ]
+    )
+    def test_reject_invalid_settings_values(self, frame_factory, settings):
+        """
+        When a SETTINGS frame is received with invalid settings values it
+        causes connection teardown with the appropriate error code.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+
+        f = frame_factory.build_settings_frame(settings=settings)
+
+        with pytest.raises(h2.exceptions.InvalidSettingsValueError) as e:
+            c.receive_data(f.serialize())
+
+        assert e.value.error_code == (
+            h2.errors.ErrorCodes.FLOW_CONTROL_ERROR if 0x4 in settings else
+            h2.errors.ErrorCodes.PROTOCOL_ERROR
+        )
+
+    def test_invalid_frame_headers_are_protocol_errors(self, frame_factory):
+        """
+        When invalid frame headers are received they cause ProtocolErrors to be
+        raised.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+
+        f = frame_factory.build_headers_frame(
+            headers=self.example_request_headers
+        )
+
+        # Do some annoying bit twiddling here: the stream ID is currently set
+        # to '1', change it to '0'. Grab the first 9 bytes (the frame header),
+        # replace any instances of the byte '\x01', and then graft it onto the
+        # remaining bytes.
+        frame_data = f.serialize()
+        frame_data = frame_data[:9].replace(b'\x01', b'\x00') + frame_data[9:]
+
+        with pytest.raises(h2.exceptions.ProtocolError) as e:
+            c.receive_data(frame_data)
+
+        assert "Stream ID must be non-zero" in str(e.value)
+
+    def test_get_stream_reset_event_on_auto_reset(self, frame_factory):
+        """
+        When hyper-h2 resets a stream automatically, a StreamReset event fires.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+
+        f = frame_factory.build_headers_frame(
+            self.example_request_headers,
+            flags=['END_STREAM']
+        )
+        c.receive_data(f.serialize())
+        c.clear_outbound_data_buffer()
+
+        bad_frame = frame_factory.build_data_frame(
+            data=b'hello'
+        )
+        events = c.receive_data(bad_frame.serialize())
+
+        expected_frame = frame_factory.build_rst_stream_frame(
+            stream_id=1,
+            error_code=0x5,
+        )
+        assert c.data_to_send() == expected_frame.serialize()
+
+        assert len(events) == 1
+        event = events[0]
+        assert isinstance(event, h2.events.StreamReset)
+        assert event.stream_id == 1
+        assert event.error_code == h2.errors.ErrorCodes.STREAM_CLOSED
+        assert not event.remote_reset
+
+    def test_one_one_stream_reset(self, frame_factory):
+        """
+        When hyper-h2 resets a stream automatically, a StreamReset event fires,
+        but only for the first reset: the others are silent.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+
+        f = frame_factory.build_headers_frame(
+            self.example_request_headers,
+            flags=['END_STREAM']
+        )
+        c.receive_data(f.serialize())
+        c.clear_outbound_data_buffer()
+
+        bad_frame = frame_factory.build_data_frame(
+            data=b'hello'
+        )
+        # Receive 5 frames.
+        events = c.receive_data(bad_frame.serialize() * 5)
+
+        expected_frame = frame_factory.build_rst_stream_frame(
+            stream_id=1,
+            error_code=0x5,
+        )
+        assert c.data_to_send() == expected_frame.serialize() * 5
+
+        assert len(events) == 1
+        event = events[0]
+        assert isinstance(event, h2.events.StreamReset)
+        assert event.stream_id == 1
+        assert event.error_code == h2.errors.ErrorCodes.STREAM_CLOSED
+        assert not event.remote_reset
+
+    @pytest.mark.parametrize('value', ['', 'twelve'])
+    def test_error_on_invalid_content_length(self, frame_factory, value):
+        """
+        When an invalid content-length is received, a ProtocolError is thrown.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_headers_frame(
+            stream_id=1,
+            headers=self.example_request_headers + [('content-length', value)]
+        )
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.receive_data(f.serialize())
+
+        expected_frame = frame_factory.build_goaway_frame(
+            last_stream_id=1,
+            error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR
+        )
+        assert c.data_to_send() == expected_frame.serialize()
+
+    def test_invalid_header_data_protocol_error(self, frame_factory):
+        """
+        If an invalid header block is received, we raise a ProtocolError.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_headers_frame(
+            stream_id=1,
+            headers=self.example_request_headers
+        )
+        f.data = b'\x00\x00\x00\x00'
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.receive_data(f.serialize())
+
+        expected_frame = frame_factory.build_goaway_frame(
+            last_stream_id=0,
+            error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR
+        )
+        assert c.data_to_send() == expected_frame.serialize()
+
+    def test_invalid_push_promise_data_protocol_error(self, frame_factory):
+        """
+        If an invalid header block is received on a PUSH_PROMISE, we raise a
+        ProtocolError.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(stream_id=1, headers=self.example_request_headers)
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_push_promise_frame(
+            stream_id=1,
+            promised_stream_id=2,
+            headers=self.example_request_headers
+        )
+        f.data = b'\x00\x00\x00\x00'
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.receive_data(f.serialize())
+
+        expected_frame = frame_factory.build_goaway_frame(
+            last_stream_id=0,
+            error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR
+        )
+        assert c.data_to_send() == expected_frame.serialize()
+
+    def test_cannot_receive_push_on_pushed_stream(self, frame_factory):
+        """
+        If a PUSH_PROMISE frame is received with the parent stream ID being a
+        pushed stream, this is rejected with a PROTOCOL_ERROR.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(
+            stream_id=1,
+            headers=self.example_request_headers,
+            end_stream=True
+        )
+
+        f1 = frame_factory.build_push_promise_frame(
+            stream_id=1,
+            promised_stream_id=2,
+            headers=self.example_request_headers,
+        )
+        f2 = frame_factory.build_headers_frame(
+            stream_id=2,
+            headers=self.example_response_headers,
+        )
+        c.receive_data(f1.serialize() + f2.serialize())
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_push_promise_frame(
+            stream_id=2,
+            promised_stream_id=4,
+            headers=self.example_request_headers,
+        )
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.receive_data(f.serialize())
+
+        expected_frame = frame_factory.build_goaway_frame(
+            last_stream_id=2,
+            error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR
+        )
+        assert c.data_to_send() == expected_frame.serialize()
+
+    def test_cannot_send_push_on_pushed_stream(self, frame_factory):
+        """
+        If a user tries to send a PUSH_PROMISE frame with the parent stream ID
+        being a pushed stream, this is rejected with a PROTOCOL_ERROR.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+        f = frame_factory.build_headers_frame(
+            stream_id=1, headers=self.example_request_headers
+        )
+        c.receive_data(f.serialize())
+
+        c.push_stream(
+            stream_id=1,
+            promised_stream_id=2,
+            request_headers=self.example_request_headers
+        )
+        c.send_headers(stream_id=2, headers=self.example_response_headers)
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.push_stream(
+                stream_id=2,
+                promised_stream_id=4,
+                request_headers=self.example_request_headers
+            )
diff --git a/tools/third_party/h2/test/test_invalid_headers.py b/tools/third_party/h2/test/test_invalid_headers.py
new file mode 100755
index 0000000..1a315ee
--- /dev/null
+++ b/tools/third_party/h2/test/test_invalid_headers.py
@@ -0,0 +1,951 @@
+# -*- coding: utf-8 -*-
+"""
+test_invalid_headers.py
+~~~~~~~~~~~~~~~~~~~~~~~
+
+This module contains tests that use invalid header blocks, and validates that
+they fail appropriately.
+"""
+import itertools
+
+import pytest
+
+import h2.config
+import h2.connection
+import h2.errors
+import h2.events
+import h2.exceptions
+import h2.settings
+import h2.utilities
+
+import hyperframe.frame
+
+from hypothesis import given
+from hypothesis.strategies import binary, lists, tuples
+
+HEADERS_STRATEGY = lists(tuples(binary(min_size=1), binary()))
+
+
+class TestInvalidFrameSequences(object):
+    """
+    Invalid header sequences cause ProtocolErrors to be thrown when received.
+    """
+    base_request_headers = [
+        (':authority', 'example.com'),
+        (':path', '/'),
+        (':scheme', 'https'),
+        (':method', 'GET'),
+        ('user-agent', 'someua/0.0.1'),
+    ]
+    invalid_header_blocks = [
+        base_request_headers + [('Uppercase', 'name')],
+        base_request_headers + [(':late', 'pseudo-header')],
+        [(':path', 'duplicate-pseudo-header')] + base_request_headers,
+        base_request_headers + [('connection', 'close')],
+        base_request_headers + [('proxy-connection', 'close')],
+        base_request_headers + [('keep-alive', 'close')],
+        base_request_headers + [('transfer-encoding', 'gzip')],
+        base_request_headers + [('upgrade', 'super-protocol/1.1')],
+        base_request_headers + [('te', 'chunked')],
+        base_request_headers + [('host', 'notexample.com')],
+        base_request_headers + [(' name', 'name with leading space')],
+        base_request_headers + [('name ', 'name with trailing space')],
+        base_request_headers + [('name', ' value with leading space')],
+        base_request_headers + [('name', 'value with trailing space ')],
+        [header for header in base_request_headers
+         if header[0] != ':authority'],
+    ]
+    server_config = h2.config.H2Configuration(
+        client_side=False, header_encoding='utf-8'
+    )
+
+    @pytest.mark.parametrize('headers', invalid_header_blocks)
+    def test_headers_event(self, frame_factory, headers):
+        """
+        Test invalid headers are rejected with PROTOCOL_ERROR.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_headers_frame(headers)
+        data = f.serialize()
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.receive_data(data)
+
+        expected_frame = frame_factory.build_goaway_frame(
+            last_stream_id=1, error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR
+        )
+        assert c.data_to_send() == expected_frame.serialize()
+
+    @pytest.mark.parametrize('headers', invalid_header_blocks)
+    def test_push_promise_event(self, frame_factory, headers):
+        """
+        If a PUSH_PROMISE header frame is received with an invalid header block
+        it is rejected with a PROTOCOL_ERROR.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(
+            stream_id=1, headers=self.base_request_headers, end_stream=True
+        )
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_push_promise_frame(
+            stream_id=1,
+            promised_stream_id=2,
+            headers=headers
+        )
+        data = f.serialize()
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.receive_data(data)
+
+        expected_frame = frame_factory.build_goaway_frame(
+            last_stream_id=0, error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR
+        )
+        assert c.data_to_send() == expected_frame.serialize()
+
+    @pytest.mark.parametrize('headers', invalid_header_blocks)
+    def test_push_promise_skipping_validation(self, frame_factory, headers):
+        """
+        If we have ``validate_inbound_headers`` disabled, then invalid header
+        blocks in push promise frames are allowed to pass.
+        """
+        config = h2.config.H2Configuration(
+            client_side=True,
+            validate_inbound_headers=False,
+            header_encoding='utf-8'
+        )
+
+        c = h2.connection.H2Connection(config=config)
+        c.initiate_connection()
+        c.send_headers(
+            stream_id=1, headers=self.base_request_headers, end_stream=True
+        )
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_push_promise_frame(
+            stream_id=1,
+            promised_stream_id=2,
+            headers=headers
+        )
+        data = f.serialize()
+
+        events = c.receive_data(data)
+        assert len(events) == 1
+        pp_event = events[0]
+        assert pp_event.headers == headers
+
+    @pytest.mark.parametrize('headers', invalid_header_blocks)
+    def test_headers_event_skipping_validation(self, frame_factory, headers):
+        """
+        If we have ``validate_inbound_headers`` disabled, then all of these
+        invalid header blocks are allowed to pass.
+        """
+        config = h2.config.H2Configuration(
+            client_side=False,
+            validate_inbound_headers=False,
+            header_encoding='utf-8'
+        )
+
+        c = h2.connection.H2Connection(config=config)
+        c.receive_data(frame_factory.preamble())
+
+        f = frame_factory.build_headers_frame(headers)
+        data = f.serialize()
+
+        events = c.receive_data(data)
+        assert len(events) == 1
+        request_event = events[0]
+        assert request_event.headers == headers
+
+    def test_transfer_encoding_trailers_is_valid(self, frame_factory):
+        """
+        Transfer-Encoding trailers is allowed by the filter.
+        """
+        headers = (
+            self.base_request_headers + [('te', 'trailers')]
+        )
+
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+
+        f = frame_factory.build_headers_frame(headers)
+        data = f.serialize()
+
+        events = c.receive_data(data)
+        assert len(events) == 1
+        request_event = events[0]
+        assert request_event.headers == headers
+
+    def test_pseudo_headers_rejected_in_trailer(self, frame_factory):
+        """
+        Ensure we reject pseudo headers included in trailers
+        """
+        trailers = [(':path', '/'), ('extra', 'value')]
+
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+        c.clear_outbound_data_buffer()
+
+        header_frame = frame_factory.build_headers_frame(
+            self.base_request_headers
+        )
+        trailer_frame = frame_factory.build_headers_frame(
+            trailers, flags=["END_STREAM"]
+        )
+        head = header_frame.serialize()
+        trailer = trailer_frame.serialize()
+
+        c.receive_data(head)
+        # Raise exception if pseudo header in trailer
+        with pytest.raises(h2.exceptions.ProtocolError) as e:
+            c.receive_data(trailer)
+        assert "pseudo-header in trailer" in str(e)
+
+        # Test appropriate response frame is generated
+        expected_frame = frame_factory.build_goaway_frame(
+            last_stream_id=1, error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR
+        )
+        assert c.data_to_send() == expected_frame.serialize()
+
+
+class TestSendingInvalidFrameSequences(object):
+    """
+    Trying to send invalid header sequences cause ProtocolErrors to
+    be thrown.
+    """
+    base_request_headers = [
+        (':authority', 'example.com'),
+        (':path', '/'),
+        (':scheme', 'https'),
+        (':method', 'GET'),
+        ('user-agent', 'someua/0.0.1'),
+    ]
+    invalid_header_blocks = [
+        base_request_headers + [(':late', 'pseudo-header')],
+        [(':path', 'duplicate-pseudo-header')] + base_request_headers,
+        base_request_headers + [('te', 'chunked')],
+        base_request_headers + [('host', 'notexample.com')],
+        [header for header in base_request_headers
+         if header[0] != ':authority'],
+    ]
+    strippable_header_blocks = [
+        base_request_headers + [('connection', 'close')],
+        base_request_headers + [('proxy-connection', 'close')],
+        base_request_headers + [('keep-alive', 'close')],
+        base_request_headers + [('transfer-encoding', 'gzip')],
+        base_request_headers + [('upgrade', 'super-protocol/1.1')]
+    ]
+    all_header_blocks = invalid_header_blocks + strippable_header_blocks
+
+    server_config = h2.config.H2Configuration(client_side=False)
+
+    @pytest.mark.parametrize('headers', invalid_header_blocks)
+    def test_headers_event(self, frame_factory, headers):
+        """
+        Test sending invalid headers raise a ProtocolError.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+
+        # Clear the data, then try to send headers.
+        c.clear_outbound_data_buffer()
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.send_headers(1, headers)
+
+    @pytest.mark.parametrize('headers', invalid_header_blocks)
+    def test_send_push_promise(self, frame_factory, headers):
+        """
+        Sending invalid headers in a push promise raises a ProtocolError.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+
+        header_frame = frame_factory.build_headers_frame(
+            self.base_request_headers
+        )
+        c.receive_data(header_frame.serialize())
+
+        # Clear the data, then try to send a push promise.
+        c.clear_outbound_data_buffer()
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.push_stream(
+                stream_id=1, promised_stream_id=2, request_headers=headers
+            )
+
+    @pytest.mark.parametrize('headers', all_header_blocks)
+    def test_headers_event_skipping_validation(self, frame_factory, headers):
+        """
+        If we have ``validate_outbound_headers`` disabled, then all of these
+        invalid header blocks are allowed to pass.
+        """
+        config = h2.config.H2Configuration(
+            validate_outbound_headers=False
+        )
+
+        c = h2.connection.H2Connection(config=config)
+        c.initiate_connection()
+
+        # Clear the data, then send headers.
+        c.clear_outbound_data_buffer()
+        c.send_headers(1, headers)
+
+        # Ensure headers are still normalized.
+        norm_headers = h2.utilities.normalize_outbound_headers(headers, None)
+        f = frame_factory.build_headers_frame(norm_headers)
+        assert c.data_to_send() == f.serialize()
+
+    @pytest.mark.parametrize('headers', all_header_blocks)
+    def test_push_promise_skipping_validation(self, frame_factory, headers):
+        """
+        If we have ``validate_outbound_headers`` disabled, then all of these
+        invalid header blocks are allowed to pass.
+        """
+        config = h2.config.H2Configuration(
+            client_side=False,
+            validate_outbound_headers=False,
+        )
+
+        c = h2.connection.H2Connection(config=config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+
+        header_frame = frame_factory.build_headers_frame(
+            self.base_request_headers
+        )
+        c.receive_data(header_frame.serialize())
+
+        # Create push promise frame with normalized headers.
+        frame_factory.refresh_encoder()
+        norm_headers = h2.utilities.normalize_outbound_headers(headers, None)
+        pp_frame = frame_factory.build_push_promise_frame(
+            stream_id=1, promised_stream_id=2, headers=norm_headers
+        )
+
+        # Clear the data, then send a push promise.
+        c.clear_outbound_data_buffer()
+        c.push_stream(
+            stream_id=1, promised_stream_id=2, request_headers=headers
+        )
+        assert c.data_to_send() == pp_frame.serialize()
+
+    @pytest.mark.parametrize('headers', all_header_blocks)
+    def test_headers_event_skip_normalization(self, frame_factory, headers):
+        """
+        If we have ``normalize_outbound_headers`` disabled, then all of these
+        invalid header blocks are sent through unmodified.
+        """
+        config = h2.config.H2Configuration(
+            validate_outbound_headers=False,
+            normalize_outbound_headers=False
+        )
+
+        c = h2.connection.H2Connection(config=config)
+        c.initiate_connection()
+
+        f = frame_factory.build_headers_frame(
+            headers,
+            stream_id=1,
+        )
+
+        # Clear the data, then send headers.
+        c.clear_outbound_data_buffer()
+        c.send_headers(1, headers)
+        assert c.data_to_send() == f.serialize()
+
+    @pytest.mark.parametrize('headers', all_header_blocks)
+    def test_push_promise_skip_normalization(self, frame_factory, headers):
+        """
+        If we have ``normalize_outbound_headers`` disabled, then all of these
+        invalid header blocks are allowed to pass unmodified.
+        """
+        config = h2.config.H2Configuration(
+            client_side=False,
+            validate_outbound_headers=False,
+            normalize_outbound_headers=False,
+        )
+
+        c = h2.connection.H2Connection(config=config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+
+        header_frame = frame_factory.build_headers_frame(
+            self.base_request_headers
+        )
+        c.receive_data(header_frame.serialize())
+
+        frame_factory.refresh_encoder()
+        pp_frame = frame_factory.build_push_promise_frame(
+            stream_id=1, promised_stream_id=2, headers=headers
+        )
+
+        # Clear the data, then send a push promise.
+        c.clear_outbound_data_buffer()
+        c.push_stream(
+            stream_id=1, promised_stream_id=2, request_headers=headers
+        )
+        assert c.data_to_send() == pp_frame.serialize()
+
+    @pytest.mark.parametrize('headers', strippable_header_blocks)
+    def test_strippable_headers(self, frame_factory, headers):
+        """
+        Test connection related headers are removed before sending.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+
+        # Clear the data, then try to send headers.
+        c.clear_outbound_data_buffer()
+        c.send_headers(1, headers)
+
+        f = frame_factory.build_headers_frame(self.base_request_headers)
+        assert c.data_to_send() == f.serialize()
+
+
+class TestFilter(object):
+    """
+    Test the filter function directly.
+
+    These tests exists to confirm the behaviour of the filter function in a
+    wide range of scenarios. Many of these scenarios may not be legal for
+    HTTP/2 and so may never hit the function, but it's worth validating that it
+    behaves as expected anyway.
+    """
+    validation_functions = [
+        h2.utilities.validate_headers,
+        h2.utilities.validate_outbound_headers
+    ]
+
+    hdr_validation_combos = [
+        h2.utilities.HeaderValidationFlags(
+            is_client, is_trailer, is_response_header, is_push_promise
+        )
+        for is_client, is_trailer, is_response_header, is_push_promise in (
+            itertools.product([True, False], repeat=4)
+        )
+    ]
+
+    hdr_validation_response_headers = [
+        flags for flags in hdr_validation_combos
+        if flags.is_response_header
+    ]
+
+    hdr_validation_request_headers_no_trailer = [
+        flags for flags in hdr_validation_combos
+        if not (flags.is_trailer or flags.is_response_header)
+    ]
+
+    invalid_request_header_blocks_bytes = (
+        # First, missing :method
+        (
+            (b':authority', b'google.com'),
+            (b':path', b'/'),
+            (b':scheme', b'https'),
+        ),
+        # Next, missing :path
+        (
+            (b':authority', b'google.com'),
+            (b':method', b'GET'),
+            (b':scheme', b'https'),
+        ),
+        # Next, missing :scheme
+        (
+            (b':authority', b'google.com'),
+            (b':method', b'GET'),
+            (b':path', b'/'),
+        ),
+        # Finally, path present but empty.
+        (
+            (b':authority', b'google.com'),
+            (b':method', b'GET'),
+            (b':scheme', b'https'),
+            (b':path', b''),
+        ),
+    )
+    invalid_request_header_blocks_unicode = (
+        # First, missing :method
+        (
+            (u':authority', u'google.com'),
+            (u':path', u'/'),
+            (u':scheme', u'https'),
+        ),
+        # Next, missing :path
+        (
+            (u':authority', u'google.com'),
+            (u':method', u'GET'),
+            (u':scheme', u'https'),
+        ),
+        # Next, missing :scheme
+        (
+            (u':authority', u'google.com'),
+            (u':method', u'GET'),
+            (u':path', u'/'),
+        ),
+        # Finally, path present but empty.
+        (
+            (u':authority', u'google.com'),
+            (u':method', u'GET'),
+            (u':scheme', u'https'),
+            (u':path', u''),
+        ),
+    )
+
+    # All headers that are forbidden from either request or response blocks.
+    forbidden_request_headers_bytes = (b':status',)
+    forbidden_request_headers_unicode = (u':status',)
+    forbidden_response_headers_bytes = (
+        b':path', b':scheme', b':authority', b':method'
+    )
+    forbidden_response_headers_unicode = (
+        u':path', u':scheme', u':authority', u':method'
+    )
+
+    @pytest.mark.parametrize('validation_function', validation_functions)
+    @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos)
+    @given(headers=HEADERS_STRATEGY)
+    def test_range_of_acceptable_outputs(self,
+                                         headers,
+                                         validation_function,
+                                         hdr_validation_flags):
+        """
+        The header validation functions either return the data unchanged
+        or throw a ProtocolError.
+        """
+        try:
+            assert headers == list(validation_function(
+                headers, hdr_validation_flags))
+        except h2.exceptions.ProtocolError:
+            assert True
+
+    @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos)
+    def test_invalid_pseudo_headers(self, hdr_validation_flags):
+        headers = [(b':custom', b'value')]
+        with pytest.raises(h2.exceptions.ProtocolError):
+            list(h2.utilities.validate_headers(headers, hdr_validation_flags))
+
+    @pytest.mark.parametrize('validation_function', validation_functions)
+    @pytest.mark.parametrize(
+        'hdr_validation_flags', hdr_validation_request_headers_no_trailer
+    )
+    def test_matching_authority_host_headers(self,
+                                             validation_function,
+                                             hdr_validation_flags):
+        """
+        If a header block has :authority and Host headers and they match,
+        the headers should pass through unchanged.
+        """
+        headers = [
+            (b':authority', b'example.com'),
+            (b':path', b'/'),
+            (b':scheme', b'https'),
+            (b':method', b'GET'),
+            (b'host', b'example.com'),
+        ]
+        assert headers == list(h2.utilities.validate_headers(
+            headers, hdr_validation_flags
+        ))
+
+    @pytest.mark.parametrize(
+        'hdr_validation_flags', hdr_validation_response_headers
+    )
+    def test_response_header_without_status(self, hdr_validation_flags):
+        headers = [(b'content-length', b'42')]
+        with pytest.raises(h2.exceptions.ProtocolError):
+            list(h2.utilities.validate_headers(headers, hdr_validation_flags))
+
+    @pytest.mark.parametrize(
+        'hdr_validation_flags', hdr_validation_request_headers_no_trailer
+    )
+    @pytest.mark.parametrize(
+        'header_block',
+        (
+            invalid_request_header_blocks_bytes +
+            invalid_request_header_blocks_unicode
+        )
+    )
+    def test_outbound_req_header_missing_pseudo_headers(self,
+                                                        hdr_validation_flags,
+                                                        header_block):
+        with pytest.raises(h2.exceptions.ProtocolError):
+            list(
+                h2.utilities.validate_outbound_headers(
+                    header_block, hdr_validation_flags
+                )
+            )
+
+    @pytest.mark.parametrize(
+        'hdr_validation_flags', hdr_validation_request_headers_no_trailer
+    )
+    @pytest.mark.parametrize(
+        'header_block', invalid_request_header_blocks_bytes
+    )
+    def test_inbound_req_header_missing_pseudo_headers(self,
+                                                       hdr_validation_flags,
+                                                       header_block):
+        with pytest.raises(h2.exceptions.ProtocolError):
+            list(
+                h2.utilities.validate_headers(
+                    header_block, hdr_validation_flags
+                )
+            )
+
+    @pytest.mark.parametrize(
+        'hdr_validation_flags', hdr_validation_request_headers_no_trailer
+    )
+    @pytest.mark.parametrize(
+        'invalid_header',
+        forbidden_request_headers_bytes + forbidden_request_headers_unicode
+    )
+    def test_outbound_req_header_extra_pseudo_headers(self,
+                                                      hdr_validation_flags,
+                                                      invalid_header):
+        """
+        Outbound request header blocks containing the forbidden request headers
+        fail validation.
+        """
+        headers = [
+            (b':path', b'/'),
+            (b':scheme', b'https'),
+            (b':authority', b'google.com'),
+            (b':method', b'GET'),
+        ]
+        headers.append((invalid_header, b'some value'))
+        with pytest.raises(h2.exceptions.ProtocolError):
+            list(
+                h2.utilities.validate_outbound_headers(
+                    headers, hdr_validation_flags
+                )
+            )
+
+    @pytest.mark.parametrize(
+        'hdr_validation_flags', hdr_validation_request_headers_no_trailer
+    )
+    @pytest.mark.parametrize(
+        'invalid_header',
+        forbidden_request_headers_bytes
+    )
+    def test_inbound_req_header_extra_pseudo_headers(self,
+                                                     hdr_validation_flags,
+                                                     invalid_header):
+        """
+        Inbound request header blocks containing the forbidden request headers
+        fail validation.
+        """
+        headers = [
+            (b':path', b'/'),
+            (b':scheme', b'https'),
+            (b':authority', b'google.com'),
+            (b':method', b'GET'),
+        ]
+        headers.append((invalid_header, b'some value'))
+        with pytest.raises(h2.exceptions.ProtocolError):
+            list(h2.utilities.validate_headers(headers, hdr_validation_flags))
+
+    @pytest.mark.parametrize(
+        'hdr_validation_flags', hdr_validation_response_headers
+    )
+    @pytest.mark.parametrize(
+        'invalid_header',
+        forbidden_response_headers_bytes + forbidden_response_headers_unicode
+    )
+    def test_outbound_resp_header_extra_pseudo_headers(self,
+                                                       hdr_validation_flags,
+                                                       invalid_header):
+        """
+        Outbound response header blocks containing the forbidden response
+        headers fail validation.
+        """
+        headers = [(b':status', b'200')]
+        headers.append((invalid_header, b'some value'))
+        with pytest.raises(h2.exceptions.ProtocolError):
+            list(
+                h2.utilities.validate_outbound_headers(
+                    headers, hdr_validation_flags
+                )
+            )
+
+    @pytest.mark.parametrize(
+        'hdr_validation_flags', hdr_validation_response_headers
+    )
+    @pytest.mark.parametrize(
+        'invalid_header',
+        forbidden_response_headers_bytes
+    )
+    def test_inbound_resp_header_extra_pseudo_headers(self,
+                                                      hdr_validation_flags,
+                                                      invalid_header):
+        """
+        Inbound response header blocks containing the forbidden response
+        headers fail validation.
+        """
+        headers = [(b':status', b'200')]
+        headers.append((invalid_header, b'some value'))
+        with pytest.raises(h2.exceptions.ProtocolError):
+            list(h2.utilities.validate_headers(headers, hdr_validation_flags))
+
+
+class TestOversizedHeaders(object):
+    """
+    Tests that oversized header blocks are correctly rejected. This replicates
+    the "HPACK Bomb" attack, and confirms that we're resistant against it.
+    """
+    request_header_block = [
+        (b':method', b'GET'),
+        (b':authority', b'example.com'),
+        (b':scheme', b'https'),
+        (b':path', b'/'),
+    ]
+
+    response_header_block = [
+        (b':status', b'200'),
+    ]
+
+    # The first header block contains a single header that fills the header
+    # table. To do that, we'll give it a single-character header name and a
+    # 4063 byte header value. This will make it exactly the size of the header
+    # table. It must come last, so that it evicts all other headers.
+    # This block must be appended to either a request or response block.
+    first_header_block = [
+        (b'a', b'a' * 4063),
+    ]
+
+    # The second header "block" is actually a custom HEADERS frame body that
+    # simply repeatedly refers to the first entry for 16kB. Each byte has the
+    # high bit set (0x80), and then uses the remaining 7 bits to encode the
+    # number 62 (0x3e), leading to a repeat of the byte 0xbe.
+    second_header_block = b'\xbe' * 2**14
+
+    server_config = h2.config.H2Configuration(client_side=False)
+
+    def test_hpack_bomb_request(self, frame_factory):
+        """
+        A HPACK bomb request causes the connection to be torn down with the
+        error code ENHANCE_YOUR_CALM.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_headers_frame(
+            self.request_header_block + self.first_header_block
+        )
+        data = f.serialize()
+        c.receive_data(data)
+
+        # Build the attack payload.
+        attack_frame = hyperframe.frame.HeadersFrame(stream_id=3)
+        attack_frame.data = self.second_header_block
+        attack_frame.flags.add('END_HEADERS')
+        data = attack_frame.serialize()
+
+        with pytest.raises(h2.exceptions.DenialOfServiceError):
+            c.receive_data(data)
+
+        expected_frame = frame_factory.build_goaway_frame(
+            last_stream_id=1, error_code=h2.errors.ErrorCodes.ENHANCE_YOUR_CALM
+        )
+        assert c.data_to_send() == expected_frame.serialize()
+
+    def test_hpack_bomb_response(self, frame_factory):
+        """
+        A HPACK bomb response causes the connection to be torn down with the
+        error code ENHANCE_YOUR_CALM.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(
+            stream_id=1, headers=self.request_header_block
+        )
+        c.send_headers(
+            stream_id=3, headers=self.request_header_block
+        )
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_headers_frame(
+            self.response_header_block + self.first_header_block
+        )
+        data = f.serialize()
+        c.receive_data(data)
+
+        # Build the attack payload.
+        attack_frame = hyperframe.frame.HeadersFrame(stream_id=3)
+        attack_frame.data = self.second_header_block
+        attack_frame.flags.add('END_HEADERS')
+        data = attack_frame.serialize()
+
+        with pytest.raises(h2.exceptions.DenialOfServiceError):
+            c.receive_data(data)
+
+        expected_frame = frame_factory.build_goaway_frame(
+            last_stream_id=0, error_code=h2.errors.ErrorCodes.ENHANCE_YOUR_CALM
+        )
+        assert c.data_to_send() == expected_frame.serialize()
+
+    def test_hpack_bomb_push(self, frame_factory):
+        """
+        A HPACK bomb push causes the connection to be torn down with the
+        error code ENHANCE_YOUR_CALM.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(
+            stream_id=1, headers=self.request_header_block
+        )
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_headers_frame(
+            self.response_header_block + self.first_header_block
+        )
+        data = f.serialize()
+        c.receive_data(data)
+
+        # Build the attack payload. We need to shrink it by four bytes because
+        # the promised_stream_id consumes four bytes of body.
+        attack_frame = hyperframe.frame.PushPromiseFrame(stream_id=3)
+        attack_frame.promised_stream_id = 2
+        attack_frame.data = self.second_header_block[:-4]
+        attack_frame.flags.add('END_HEADERS')
+        data = attack_frame.serialize()
+
+        with pytest.raises(h2.exceptions.DenialOfServiceError):
+            c.receive_data(data)
+
+        expected_frame = frame_factory.build_goaway_frame(
+            last_stream_id=0, error_code=h2.errors.ErrorCodes.ENHANCE_YOUR_CALM
+        )
+        assert c.data_to_send() == expected_frame.serialize()
+
+    def test_reject_headers_when_list_size_shrunk(self, frame_factory):
+        """
+        When we've shrunk the header list size, we reject new header blocks
+        that violate the new size.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+        c.clear_outbound_data_buffer()
+
+        # Receive the first request, which causes no problem.
+        f = frame_factory.build_headers_frame(
+            stream_id=1,
+            headers=self.request_header_block
+        )
+        data = f.serialize()
+        c.receive_data(data)
+
+        # Now, send a settings change. It's un-ACKed at this time. A new
+        # request arrives, also without incident.
+        c.update_settings({h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE: 50})
+        c.clear_outbound_data_buffer()
+        f = frame_factory.build_headers_frame(
+            stream_id=3,
+            headers=self.request_header_block
+        )
+        data = f.serialize()
+        c.receive_data(data)
+
+        # We get a SETTINGS ACK.
+        f = frame_factory.build_settings_frame({}, ack=True)
+        data = f.serialize()
+        c.receive_data(data)
+
+        # Now a third request comes in. This explodes.
+        f = frame_factory.build_headers_frame(
+            stream_id=5,
+            headers=self.request_header_block
+        )
+        data = f.serialize()
+
+        with pytest.raises(h2.exceptions.DenialOfServiceError):
+            c.receive_data(data)
+
+        expected_frame = frame_factory.build_goaway_frame(
+            last_stream_id=3, error_code=h2.errors.ErrorCodes.ENHANCE_YOUR_CALM
+        )
+        assert c.data_to_send() == expected_frame.serialize()
+
+    def test_reject_headers_when_table_size_shrunk(self, frame_factory):
+        """
+        When we've shrunk the header table size, we reject header blocks that
+        do not respect the change.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+        c.clear_outbound_data_buffer()
+
+        # Receive the first request, which causes no problem.
+        f = frame_factory.build_headers_frame(
+            stream_id=1,
+            headers=self.request_header_block
+        )
+        data = f.serialize()
+        c.receive_data(data)
+
+        # Now, send a settings change. It's un-ACKed at this time. A new
+        # request arrives, also without incident.
+        c.update_settings({h2.settings.SettingCodes.HEADER_TABLE_SIZE: 128})
+        c.clear_outbound_data_buffer()
+        f = frame_factory.build_headers_frame(
+            stream_id=3,
+            headers=self.request_header_block
+        )
+        data = f.serialize()
+        c.receive_data(data)
+
+        # We get a SETTINGS ACK.
+        f = frame_factory.build_settings_frame({}, ack=True)
+        data = f.serialize()
+        c.receive_data(data)
+
+        # Now a third request comes in. This explodes, as it does not contain
+        # a dynamic table size update.
+        f = frame_factory.build_headers_frame(
+            stream_id=5,
+            headers=self.request_header_block
+        )
+        data = f.serialize()
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.receive_data(data)
+
+        expected_frame = frame_factory.build_goaway_frame(
+            last_stream_id=3, error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR
+        )
+        assert c.data_to_send() == expected_frame.serialize()
+
+    def test_reject_headers_exceeding_table_size(self, frame_factory):
+        """
+        When the remote peer sends a dynamic table size update that exceeds our
+        setting, we reject it.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.receive_data(frame_factory.preamble())
+        c.clear_outbound_data_buffer()
+
+        # Receive the first request, which causes no problem.
+        f = frame_factory.build_headers_frame(
+            stream_id=1,
+            headers=self.request_header_block
+        )
+        data = f.serialize()
+        c.receive_data(data)
+
+        # Now a second request comes in that sets the table size too high.
+        # This explodes.
+        frame_factory.change_table_size(c.local_settings.header_table_size + 1)
+        f = frame_factory.build_headers_frame(
+            stream_id=5,
+            headers=self.request_header_block
+        )
+        data = f.serialize()
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.receive_data(data)
+
+        expected_frame = frame_factory.build_goaway_frame(
+            last_stream_id=1, error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR
+        )
+        assert c.data_to_send() == expected_frame.serialize()
diff --git a/tools/third_party/h2/test/test_priority.py b/tools/third_party/h2/test/test_priority.py
new file mode 100755
index 0000000..cbc7332
--- /dev/null
+++ b/tools/third_party/h2/test/test_priority.py
@@ -0,0 +1,358 @@
+# -*- coding: utf-8 -*-
+"""
+test_priority
+~~~~~~~~~~~~~
+
+Test the priority logic of Hyper-h2.
+"""
+import pytest
+
+import h2.config
+import h2.connection
+import h2.errors
+import h2.events
+import h2.exceptions
+import h2.stream
+
+
+class TestPriority(object):
+    """
+    Basic priority tests.
+    """
+    example_request_headers = [
+        (':authority', 'example.com'),
+        (':path', '/'),
+        (':scheme', 'https'),
+        (':method', 'GET'),
+    ]
+    example_response_headers = [
+        (':status', '200'),
+        ('server', 'pytest-h2'),
+    ]
+    server_config = h2.config.H2Configuration(client_side=False)
+
+    def test_receiving_priority_emits_priority_update(self, frame_factory):
+        """
+        Receiving a priority frame emits a PriorityUpdated event.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_priority_frame(
+            stream_id=1,
+            weight=255,
+        )
+        events = c.receive_data(f.serialize())
+
+        assert len(events) == 1
+        assert not c.data_to_send()
+
+        event = events[0]
+        assert isinstance(event, h2.events.PriorityUpdated)
+        assert event.stream_id == 1
+        assert event.depends_on == 0
+        assert event.weight == 256
+        assert event.exclusive is False
+
+    def test_headers_with_priority_info(self, frame_factory):
+        """
+        Receiving a HEADERS frame with priority information on it emits a
+        PriorityUpdated event.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_headers_frame(
+            headers=self.example_request_headers,
+            stream_id=3,
+            flags=['PRIORITY'],
+            stream_weight=15,
+            depends_on=1,
+            exclusive=True,
+        )
+        events = c.receive_data(f.serialize())
+
+        assert len(events) == 2
+        assert not c.data_to_send()
+
+        event = events[1]
+        assert isinstance(event, h2.events.PriorityUpdated)
+        assert event.stream_id == 3
+        assert event.depends_on == 1
+        assert event.weight == 16
+        assert event.exclusive is True
+
+    def test_streams_may_not_depend_on_themselves(self, frame_factory):
+        """
+        A stream adjusted to depend on itself causes a Protocol Error.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_headers_frame(
+            headers=self.example_request_headers,
+            stream_id=3,
+            flags=['PRIORITY'],
+            stream_weight=15,
+            depends_on=1,
+            exclusive=True,
+        )
+        c.receive_data(f.serialize())
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_priority_frame(
+            stream_id=3,
+            depends_on=3,
+            weight=15
+        )
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.receive_data(f.serialize())
+
+        expected_frame = frame_factory.build_goaway_frame(
+            last_stream_id=3,
+            error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR,
+        )
+        assert c.data_to_send() == expected_frame.serialize()
+
+    @pytest.mark.parametrize(
+        'depends_on,weight,exclusive',
+        [
+            (0, 256, False),
+            (3, 128, False),
+            (3, 128, True),
+        ]
+    )
+    def test_can_prioritize_stream(self, depends_on, weight, exclusive,
+                                   frame_factory):
+        """
+        hyper-h2 can emit priority frames.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+
+        c.send_headers(headers=self.example_request_headers, stream_id=1)
+        c.send_headers(headers=self.example_request_headers, stream_id=3)
+        c.clear_outbound_data_buffer()
+
+        c.prioritize(
+            stream_id=1,
+            depends_on=depends_on,
+            weight=weight,
+            exclusive=exclusive
+        )
+
+        f = frame_factory.build_priority_frame(
+            stream_id=1,
+            weight=weight - 1,
+            depends_on=depends_on,
+            exclusive=exclusive,
+        )
+        assert c.data_to_send() == f.serialize()
+
+    @pytest.mark.parametrize(
+        'depends_on,weight,exclusive',
+        [
+            (0, 256, False),
+            (1, 128, False),
+            (1, 128, True),
+        ]
+    )
+    def test_emit_headers_with_priority_info(self, depends_on, weight,
+                                             exclusive, frame_factory):
+        """
+        It is possible to send a headers frame with priority information on
+        it.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.clear_outbound_data_buffer()
+
+        c.send_headers(
+            headers=self.example_request_headers,
+            stream_id=3,
+            priority_weight=weight,
+            priority_depends_on=depends_on,
+            priority_exclusive=exclusive,
+        )
+
+        f = frame_factory.build_headers_frame(
+            headers=self.example_request_headers,
+            stream_id=3,
+            flags=['PRIORITY'],
+            stream_weight=weight - 1,
+            depends_on=depends_on,
+            exclusive=exclusive,
+        )
+        assert c.data_to_send() == f.serialize()
+
+    def test_may_not_prioritize_stream_to_depend_on_self(self, frame_factory):
+        """
+        A stream adjusted to depend on itself causes a Protocol Error.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+        c.send_headers(
+            headers=self.example_request_headers,
+            stream_id=3,
+            priority_weight=255,
+            priority_depends_on=0,
+            priority_exclusive=False,
+        )
+        c.clear_outbound_data_buffer()
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.prioritize(
+                stream_id=3,
+                depends_on=3,
+            )
+
+        assert not c.data_to_send()
+
+    def test_may_not_initially_set_stream_depend_on_self(self, frame_factory):
+        """
+        A stream that starts by depending on itself causes a Protocol Error.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+        c.clear_outbound_data_buffer()
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.send_headers(
+                headers=self.example_request_headers,
+                stream_id=3,
+                priority_depends_on=3,
+            )
+
+        assert not c.data_to_send()
+
+    @pytest.mark.parametrize('weight', [0, -15, 257])
+    def test_prioritize_requires_valid_weight(self, weight):
+        """
+        A call to prioritize with an invalid weight causes a ProtocolError.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.clear_outbound_data_buffer()
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.prioritize(stream_id=1, weight=weight)
+
+        assert not c.data_to_send()
+
+    @pytest.mark.parametrize('weight', [0, -15, 257])
+    def test_send_headers_requires_valid_weight(self, weight):
+        """
+        A call to send_headers with an invalid weight causes a ProtocolError.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.clear_outbound_data_buffer()
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.send_headers(
+                stream_id=1,
+                headers=self.example_request_headers,
+                priority_weight=weight
+            )
+
+        assert not c.data_to_send()
+
+    def test_prioritize_defaults(self, frame_factory):
+        """
+        When prioritize() is called with no explicit arguments, it emits a
+        weight of 16, depending on stream zero non-exclusively.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.clear_outbound_data_buffer()
+
+        c.prioritize(stream_id=1)
+
+        f = frame_factory.build_priority_frame(
+            stream_id=1,
+            weight=15,
+            depends_on=0,
+            exclusive=False,
+        )
+        assert c.data_to_send() == f.serialize()
+
+    @pytest.mark.parametrize(
+        'priority_kwargs',
+        [
+            {'priority_weight': 16},
+            {'priority_depends_on': 0},
+            {'priority_exclusive': False},
+        ]
+    )
+    def test_send_headers_defaults(self, priority_kwargs, frame_factory):
+        """
+        When send_headers() is called with only one explicit argument, it emits
+        default values for everything else.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.clear_outbound_data_buffer()
+
+        c.send_headers(
+            stream_id=1,
+            headers=self.example_request_headers,
+            **priority_kwargs
+        )
+
+        f = frame_factory.build_headers_frame(
+            headers=self.example_request_headers,
+            stream_id=1,
+            flags=['PRIORITY'],
+            stream_weight=15,
+            depends_on=0,
+            exclusive=False,
+        )
+        assert c.data_to_send() == f.serialize()
+
+    def test_servers_cannot_prioritize(self, frame_factory):
+        """
+        Server stacks are not allowed to call ``prioritize()``.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_headers_frame(
+            stream_id=1,
+            headers=self.example_request_headers,
+        )
+        c.receive_data(f.serialize())
+
+        with pytest.raises(h2.exceptions.RFC1122Error):
+            c.prioritize(stream_id=1)
+
+    def test_servers_cannot_prioritize_with_headers(self, frame_factory):
+        """
+        Server stacks are not allowed to prioritize on headers either.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_headers_frame(
+            stream_id=1,
+            headers=self.example_request_headers,
+        )
+        c.receive_data(f.serialize())
+
+        with pytest.raises(h2.exceptions.RFC1122Error):
+            c.send_headers(
+                stream_id=1,
+                headers=self.example_response_headers,
+                priority_weight=16,
+            )
diff --git a/tools/third_party/h2/test/test_related_events.py b/tools/third_party/h2/test/test_related_events.py
new file mode 100755
index 0000000..eb6b878
--- /dev/null
+++ b/tools/third_party/h2/test/test_related_events.py
@@ -0,0 +1,370 @@
+# -*- coding: utf-8 -*-
+"""
+test_related_events.py
+~~~~~~~~~~~~~~~~~~~~~~
+
+Specific tests to validate the "related events" logic used by certain events
+inside hyper-h2.
+"""
+import h2.config
+import h2.connection
+import h2.events
+
+
+class TestRelatedEvents(object):
+    """
+    Related events correlate all those events that happen on a single frame.
+    """
+    example_request_headers = [
+        (':authority', 'example.com'),
+        (':path', '/'),
+        (':scheme', 'https'),
+        (':method', 'GET'),
+    ]
+
+    example_response_headers = [
+        (':status', '200'),
+        ('server', 'fake-serv/0.1.0')
+    ]
+
+    informational_response_headers = [
+        (':status', '100'),
+        ('server', 'fake-serv/0.1.0')
+    ]
+
+    example_trailers = [
+        ('another', 'field'),
+    ]
+
+    server_config = h2.config.H2Configuration(client_side=False)
+
+    def test_request_received_related_all(self, frame_factory):
+        """
+        RequestReceived has two possible related events: PriorityUpdated and
+        StreamEnded, all fired when a single HEADERS frame is received.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+
+        input_frame = frame_factory.build_headers_frame(
+            headers=self.example_request_headers,
+            flags=['END_STREAM', 'PRIORITY'],
+            stream_weight=15,
+            depends_on=0,
+            exclusive=False,
+        )
+        events = c.receive_data(input_frame.serialize())
+
+        assert len(events) == 3
+        base_event = events[0]
+        other_events = events[1:]
+
+        assert base_event.stream_ended in other_events
+        assert isinstance(base_event.stream_ended, h2.events.StreamEnded)
+        assert base_event.priority_updated in other_events
+        assert isinstance(
+            base_event.priority_updated, h2.events.PriorityUpdated
+        )
+
+    def test_request_received_related_priority(self, frame_factory):
+        """
+        RequestReceived can be related to PriorityUpdated.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+
+        input_frame = frame_factory.build_headers_frame(
+            headers=self.example_request_headers,
+            flags=['PRIORITY'],
+            stream_weight=15,
+            depends_on=0,
+            exclusive=False,
+        )
+        events = c.receive_data(input_frame.serialize())
+
+        assert len(events) == 2
+        base_event = events[0]
+        priority_updated_event = events[1]
+
+        assert base_event.priority_updated is priority_updated_event
+        assert base_event.stream_ended is None
+        assert isinstance(
+            base_event.priority_updated, h2.events.PriorityUpdated
+        )
+
+    def test_request_received_related_stream_ended(self, frame_factory):
+        """
+        RequestReceived can be related to StreamEnded.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+
+        input_frame = frame_factory.build_headers_frame(
+            headers=self.example_request_headers,
+            flags=['END_STREAM'],
+        )
+        events = c.receive_data(input_frame.serialize())
+
+        assert len(events) == 2
+        base_event = events[0]
+        stream_ended_event = events[1]
+
+        assert base_event.stream_ended is stream_ended_event
+        assert base_event.priority_updated is None
+        assert isinstance(base_event.stream_ended, h2.events.StreamEnded)
+
+    def test_response_received_related_nothing(self, frame_factory):
+        """
+        ResponseReceived is ordinarily related to no events.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(stream_id=1, headers=self.example_request_headers)
+
+        input_frame = frame_factory.build_headers_frame(
+            headers=self.example_response_headers,
+        )
+        events = c.receive_data(input_frame.serialize())
+
+        assert len(events) == 1
+        base_event = events[0]
+
+        assert base_event.stream_ended is None
+        assert base_event.priority_updated is None
+
+    def test_response_received_related_all(self, frame_factory):
+        """
+        ResponseReceived has two possible related events: PriorityUpdated and
+        StreamEnded, all fired when a single HEADERS frame is received.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(stream_id=1, headers=self.example_request_headers)
+
+        input_frame = frame_factory.build_headers_frame(
+            headers=self.example_response_headers,
+            flags=['END_STREAM', 'PRIORITY'],
+            stream_weight=15,
+            depends_on=0,
+            exclusive=False,
+        )
+        events = c.receive_data(input_frame.serialize())
+
+        assert len(events) == 3
+        base_event = events[0]
+        other_events = events[1:]
+
+        assert base_event.stream_ended in other_events
+        assert isinstance(base_event.stream_ended, h2.events.StreamEnded)
+        assert base_event.priority_updated in other_events
+        assert isinstance(
+            base_event.priority_updated, h2.events.PriorityUpdated
+        )
+
+    def test_response_received_related_priority(self, frame_factory):
+        """
+        ResponseReceived can be related to PriorityUpdated.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(stream_id=1, headers=self.example_request_headers)
+
+        input_frame = frame_factory.build_headers_frame(
+            headers=self.example_response_headers,
+            flags=['PRIORITY'],
+            stream_weight=15,
+            depends_on=0,
+            exclusive=False,
+        )
+        events = c.receive_data(input_frame.serialize())
+
+        assert len(events) == 2
+        base_event = events[0]
+        priority_updated_event = events[1]
+
+        assert base_event.priority_updated is priority_updated_event
+        assert base_event.stream_ended is None
+        assert isinstance(
+            base_event.priority_updated, h2.events.PriorityUpdated
+        )
+
+    def test_response_received_related_stream_ended(self, frame_factory):
+        """
+        ResponseReceived can be related to StreamEnded.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(stream_id=1, headers=self.example_request_headers)
+
+        input_frame = frame_factory.build_headers_frame(
+            headers=self.example_response_headers,
+            flags=['END_STREAM'],
+        )
+        events = c.receive_data(input_frame.serialize())
+
+        assert len(events) == 2
+        base_event = events[0]
+        stream_ended_event = events[1]
+
+        assert base_event.stream_ended is stream_ended_event
+        assert base_event.priority_updated is None
+        assert isinstance(base_event.stream_ended, h2.events.StreamEnded)
+
+    def test_trailers_received_related_all(self, frame_factory):
+        """
+        TrailersReceived has two possible related events: PriorityUpdated and
+        StreamEnded, all fired when a single HEADERS frame is received.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(stream_id=1, headers=self.example_request_headers)
+
+        f = frame_factory.build_headers_frame(
+            headers=self.example_response_headers,
+        )
+        c.receive_data(f.serialize())
+
+        input_frame = frame_factory.build_headers_frame(
+            headers=self.example_trailers,
+            flags=['END_STREAM', 'PRIORITY'],
+            stream_weight=15,
+            depends_on=0,
+            exclusive=False,
+        )
+        events = c.receive_data(input_frame.serialize())
+
+        assert len(events) == 3
+        base_event = events[0]
+        other_events = events[1:]
+
+        assert base_event.stream_ended in other_events
+        assert isinstance(base_event.stream_ended, h2.events.StreamEnded)
+        assert base_event.priority_updated in other_events
+        assert isinstance(
+            base_event.priority_updated, h2.events.PriorityUpdated
+        )
+
+    def test_trailers_received_related_stream_ended(self, frame_factory):
+        """
+        TrailersReceived can be related to StreamEnded by itself.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(stream_id=1, headers=self.example_request_headers)
+
+        f = frame_factory.build_headers_frame(
+            headers=self.example_response_headers,
+        )
+        c.receive_data(f.serialize())
+
+        input_frame = frame_factory.build_headers_frame(
+            headers=self.example_trailers,
+            flags=['END_STREAM'],
+        )
+        events = c.receive_data(input_frame.serialize())
+
+        assert len(events) == 2
+        base_event = events[0]
+        stream_ended_event = events[1]
+
+        assert base_event.stream_ended is stream_ended_event
+        assert base_event.priority_updated is None
+        assert isinstance(base_event.stream_ended, h2.events.StreamEnded)
+
+    def test_informational_response_related_nothing(self, frame_factory):
+        """
+        InformationalResponseReceived in the standard case is related to
+        nothing.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(stream_id=1, headers=self.example_request_headers)
+
+        input_frame = frame_factory.build_headers_frame(
+            headers=self.informational_response_headers,
+        )
+        events = c.receive_data(input_frame.serialize())
+
+        assert len(events) == 1
+        base_event = events[0]
+
+        assert base_event.priority_updated is None
+
+    def test_informational_response_received_related_all(self, frame_factory):
+        """
+        InformationalResponseReceived has one possible related event:
+        PriorityUpdated, fired when a single HEADERS frame is received.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(stream_id=1, headers=self.example_request_headers)
+
+        input_frame = frame_factory.build_headers_frame(
+            headers=self.informational_response_headers,
+            flags=['PRIORITY'],
+            stream_weight=15,
+            depends_on=0,
+            exclusive=False,
+        )
+        events = c.receive_data(input_frame.serialize())
+
+        assert len(events) == 2
+        base_event = events[0]
+        priority_updated_event = events[1]
+
+        assert base_event.priority_updated is priority_updated_event
+        assert isinstance(
+            base_event.priority_updated, h2.events.PriorityUpdated
+        )
+
+    def test_data_received_normally_relates_to_nothing(self, frame_factory):
+        """
+        A plain DATA frame leads to DataReceieved with no related events.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(stream_id=1, headers=self.example_request_headers)
+
+        f = frame_factory.build_headers_frame(
+            headers=self.example_response_headers,
+        )
+        c.receive_data(f.serialize())
+
+        input_frame = frame_factory.build_data_frame(
+            data=b'some data',
+        )
+        events = c.receive_data(input_frame.serialize())
+
+        assert len(events) == 1
+        base_event = events[0]
+
+        assert base_event.stream_ended is None
+
+    def test_data_received_related_stream_ended(self, frame_factory):
+        """
+        DataReceived can be related to StreamEnded by itself.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(stream_id=1, headers=self.example_request_headers)
+
+        f = frame_factory.build_headers_frame(
+            headers=self.example_response_headers,
+        )
+        c.receive_data(f.serialize())
+
+        input_frame = frame_factory.build_data_frame(
+            data=b'some data',
+            flags=['END_STREAM'],
+        )
+        events = c.receive_data(input_frame.serialize())
+
+        assert len(events) == 2
+        base_event = events[0]
+        stream_ended_event = events[1]
+
+        assert base_event.stream_ended is stream_ended_event
+        assert isinstance(base_event.stream_ended, h2.events.StreamEnded)
diff --git a/tools/third_party/h2/test/test_rfc7838.py b/tools/third_party/h2/test/test_rfc7838.py
new file mode 100755
index 0000000..d7704e2
--- /dev/null
+++ b/tools/third_party/h2/test/test_rfc7838.py
@@ -0,0 +1,447 @@
+# -*- coding: utf-8 -*-
+"""
+test_rfc7838
+~~~~~~~~~~~~
+
+Test the RFC 7838 ALTSVC support.
+"""
+import pytest
+
+import h2.config
+import h2.connection
+import h2.events
+import h2.exceptions
+
+
+class TestRFC7838Client(object):
+    """
+    Tests that the client supports receiving the RFC 7838 AltSvc frame.
+    """
+    example_request_headers = [
+        (':authority', 'example.com'),
+        (':path', '/'),
+        (':scheme', 'https'),
+        (':method', 'GET'),
+    ]
+    example_response_headers = [
+        (u':status', u'200'),
+        (u'server', u'fake-serv/0.1.0')
+    ]
+
+    def test_receiving_altsvc_stream_zero(self, frame_factory):
+        """
+        An ALTSVC frame received on stream zero correctly transposes all the
+        fields from the frames.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_alt_svc_frame(
+            stream_id=0, origin=b"example.com", field=b'h2=":8000"; ma=60'
+        )
+        events = c.receive_data(f.serialize())
+
+        assert len(events) == 1
+        event = events[0]
+
+        assert isinstance(event, h2.events.AlternativeServiceAvailable)
+        assert event.origin == b"example.com"
+        assert event.field_value == b'h2=":8000"; ma=60'
+
+        # No data gets sent.
+        assert not c.data_to_send()
+
+    def test_receiving_altsvc_stream_zero_no_origin(self, frame_factory):
+        """
+        An ALTSVC frame received on stream zero without an origin field is
+        ignored.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_alt_svc_frame(
+            stream_id=0, origin=b"", field=b'h2=":8000"; ma=60'
+        )
+        events = c.receive_data(f.serialize())
+
+        assert not events
+        assert not c.data_to_send()
+
+    def test_receiving_altsvc_on_stream(self, frame_factory):
+        """
+        An ALTSVC frame received on a stream correctly transposes all the
+        fields from the frame and attaches the expected origin.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(stream_id=1, headers=self.example_request_headers)
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_alt_svc_frame(
+            stream_id=1, origin=b"", field=b'h2=":8000"; ma=60'
+        )
+        events = c.receive_data(f.serialize())
+
+        assert len(events) == 1
+        event = events[0]
+
+        assert isinstance(event, h2.events.AlternativeServiceAvailable)
+        assert event.origin == b"example.com"
+        assert event.field_value == b'h2=":8000"; ma=60'
+
+        # No data gets sent.
+        assert not c.data_to_send()
+
+    def test_receiving_altsvc_on_stream_with_origin(self, frame_factory):
+        """
+        An ALTSVC frame received on a stream with an origin field present gets
+        ignored.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(stream_id=1, headers=self.example_request_headers)
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_alt_svc_frame(
+            stream_id=1, origin=b"example.com", field=b'h2=":8000"; ma=60'
+        )
+        events = c.receive_data(f.serialize())
+
+        assert len(events) == 0
+        assert not c.data_to_send()
+
+    def test_receiving_altsvc_on_stream_not_yet_opened(self, frame_factory):
+        """
+        When an ALTSVC frame is received on a stream the client hasn't yet
+        opened, the frame is ignored.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.clear_outbound_data_buffer()
+
+        # We'll test this twice, once on a client-initiated stream ID and once
+        # on a server initiated one.
+        f1 = frame_factory.build_alt_svc_frame(
+            stream_id=1, origin=b"", field=b'h2=":8000"; ma=60'
+        )
+        f2 = frame_factory.build_alt_svc_frame(
+            stream_id=2, origin=b"", field=b'h2=":8000"; ma=60'
+        )
+        events = c.receive_data(f1.serialize() + f2.serialize())
+
+        assert len(events) == 0
+        assert not c.data_to_send()
+
+    def test_receiving_altsvc_before_sending_headers(self, frame_factory):
+        """
+        When an ALTSVC frame is received but the client hasn't sent headers yet
+        it gets ignored.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+
+        # We need to create the idle stream. We have to do it by calling
+        # a private API. While this can't naturally happen in hyper-h2 (we
+        # don't currently have a mechanism by which this could occur), it could
+        # happen in the future and we defend against it.
+        c._begin_new_stream(
+            stream_id=1, allowed_ids=h2.connection.AllowedStreamIDs.ODD
+        )
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_alt_svc_frame(
+            stream_id=1, origin=b"", field=b'h2=":8000"; ma=60'
+        )
+        events = c.receive_data(f.serialize())
+
+        assert len(events) == 0
+        assert not c.data_to_send()
+
+    def test_receiving_altsvc_after_receiving_headers(self, frame_factory):
+        """
+        When an ALTSVC frame is received but the server has already sent
+        headers it gets ignored.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(stream_id=1, headers=self.example_request_headers)
+
+        f = frame_factory.build_headers_frame(
+            headers=self.example_response_headers
+        )
+        c.receive_data(f.serialize())
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_alt_svc_frame(
+            stream_id=1, origin=b"", field=b'h2=":8000"; ma=60'
+        )
+        events = c.receive_data(f.serialize())
+
+        assert len(events) == 0
+        assert not c.data_to_send()
+
+    def test_receiving_altsvc_on_closed_stream(self, frame_factory):
+        """
+        When an ALTSVC frame is received on a closed stream, we ignore it.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(
+            stream_id=1, headers=self.example_request_headers, end_stream=True
+        )
+
+        f = frame_factory.build_headers_frame(
+            headers=self.example_response_headers,
+            flags=['END_STREAM'],
+        )
+        c.receive_data(f.serialize())
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_alt_svc_frame(
+            stream_id=1, origin=b"", field=b'h2=":8000"; ma=60'
+        )
+        events = c.receive_data(f.serialize())
+
+        assert len(events) == 0
+        assert not c.data_to_send()
+
+    def test_receiving_altsvc_on_pushed_stream(self, frame_factory):
+        """
+        When an ALTSVC frame is received on a stream that the server pushed,
+        the frame is accepted.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(stream_id=1, headers=self.example_request_headers)
+
+        f = frame_factory.build_push_promise_frame(
+            stream_id=1,
+            promised_stream_id=2,
+            headers=self.example_request_headers
+        )
+        c.receive_data(f.serialize())
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_alt_svc_frame(
+            stream_id=2, origin=b"", field=b'h2=":8000"; ma=60'
+        )
+        events = c.receive_data(f.serialize())
+
+        assert len(events) == 1
+        event = events[0]
+
+        assert isinstance(event, h2.events.AlternativeServiceAvailable)
+        assert event.origin == b"example.com"
+        assert event.field_value == b'h2=":8000"; ma=60'
+
+        # No data gets sent.
+        assert not c.data_to_send()
+
+    def test_cannot_send_explicit_alternative_service(self, frame_factory):
+        """
+        A client cannot send an explicit alternative service.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(stream_id=1, headers=self.example_request_headers)
+        c.clear_outbound_data_buffer()
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.advertise_alternative_service(
+                field_value=b'h2=":8000"; ma=60',
+                origin=b"example.com",
+            )
+
+    def test_cannot_send_implicit_alternative_service(self, frame_factory):
+        """
+        A client cannot send an implicit alternative service.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(stream_id=1, headers=self.example_request_headers)
+        c.clear_outbound_data_buffer()
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.advertise_alternative_service(
+                field_value=b'h2=":8000"; ma=60',
+                stream_id=1,
+            )
+
+
+class TestRFC7838Server(object):
+    """
+    Tests that the server supports sending the RFC 7838 AltSvc frame.
+    """
+    example_request_headers = [
+        (':authority', 'example.com'),
+        (':path', '/'),
+        (':scheme', 'https'),
+        (':method', 'GET'),
+    ]
+    example_response_headers = [
+        (u':status', u'200'),
+        (u'server', u'fake-serv/0.1.0')
+    ]
+
+    server_config = h2.config.H2Configuration(client_side=False)
+
+    def test_receiving_altsvc_as_server_stream_zero(self, frame_factory):
+        """
+        When an ALTSVC frame is received on stream zero and we are a server,
+        we ignore it.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_alt_svc_frame(
+            stream_id=0, origin=b"example.com", field=b'h2=":8000"; ma=60'
+        )
+        events = c.receive_data(f.serialize())
+
+        assert len(events) == 0
+        assert not c.data_to_send()
+
+    def test_receiving_altsvc_as_server_on_stream(self, frame_factory):
+        """
+        When an ALTSVC frame is received on a stream and we are a server, we
+        ignore it.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+
+        f = frame_factory.build_headers_frame(
+            headers=self.example_request_headers
+        )
+        c.receive_data(f.serialize())
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_alt_svc_frame(
+            stream_id=1, origin=b"", field=b'h2=":8000"; ma=60'
+        )
+        events = c.receive_data(f.serialize())
+
+        assert len(events) == 0
+        assert not c.data_to_send()
+
+    def test_sending_explicit_alternative_service(self, frame_factory):
+        """
+        A server can send an explicit alternative service.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+        c.clear_outbound_data_buffer()
+
+        c.advertise_alternative_service(
+            field_value=b'h2=":8000"; ma=60',
+            origin=b"example.com",
+        )
+
+        f = frame_factory.build_alt_svc_frame(
+            stream_id=0, origin=b"example.com", field=b'h2=":8000"; ma=60'
+        )
+        assert c.data_to_send() == f.serialize()
+
+    def test_sending_implicit_alternative_service(self, frame_factory):
+        """
+        A server can send an implicit alternative service.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+
+        f = frame_factory.build_headers_frame(
+            headers=self.example_request_headers
+        )
+        c.receive_data(f.serialize())
+        c.clear_outbound_data_buffer()
+
+        c.advertise_alternative_service(
+            field_value=b'h2=":8000"; ma=60',
+            stream_id=1,
+        )
+
+        f = frame_factory.build_alt_svc_frame(
+            stream_id=1, origin=b"", field=b'h2=":8000"; ma=60'
+        )
+        assert c.data_to_send() == f.serialize()
+
+    def test_no_implicit_alternative_service_before_headers(self,
+                                                            frame_factory):
+        """
+        If headers haven't been received yet, the server forbids sending an
+        implicit alternative service.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+        c.clear_outbound_data_buffer()
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.advertise_alternative_service(
+                field_value=b'h2=":8000"; ma=60',
+                stream_id=1,
+            )
+
+    def test_no_implicit_alternative_service_after_response(self,
+                                                            frame_factory):
+        """
+        If the server has sent response headers, hyper-h2 forbids sending an
+        implicit alternative service.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+
+        f = frame_factory.build_headers_frame(
+            headers=self.example_request_headers
+        )
+        c.receive_data(f.serialize())
+        c.send_headers(stream_id=1, headers=self.example_response_headers)
+        c.clear_outbound_data_buffer()
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            c.advertise_alternative_service(
+                field_value=b'h2=":8000"; ma=60',
+                stream_id=1,
+            )
+
+    def test_cannot_provide_origin_and_stream_id(self, frame_factory):
+        """
+        The user cannot provide both the origin and stream_id arguments when
+        advertising alternative services.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+        f = frame_factory.build_headers_frame(
+            headers=self.example_request_headers
+        )
+        c.receive_data(f.serialize())
+
+        with pytest.raises(ValueError):
+            c.advertise_alternative_service(
+                field_value=b'h2=":8000"; ma=60',
+                origin=b"example.com",
+                stream_id=1,
+            )
+
+    def test_cannot_provide_unicode_altsvc_field(self, frame_factory):
+        """
+        The user cannot provide the field value for alternative services as a
+        unicode string.
+        """
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+
+        with pytest.raises(ValueError):
+            c.advertise_alternative_service(
+                field_value=u'h2=":8000"; ma=60',
+                origin=b"example.com",
+            )
diff --git a/tools/third_party/h2/test/test_settings.py b/tools/third_party/h2/test/test_settings.py
new file mode 100755
index 0000000..2163bb9
--- /dev/null
+++ b/tools/third_party/h2/test/test_settings.py
@@ -0,0 +1,491 @@
+# -*- coding: utf-8 -*-
+"""
+test_settings
+~~~~~~~~~~~~~
+
+Test the Settings object.
+"""
+import pytest
+
+import h2.errors
+import h2.exceptions
+import h2.settings
+
+from hypothesis import given, assume
+from hypothesis.strategies import integers
+
+
+class TestSettings(object):
+    """
+    Test the Settings object behaves as expected.
+    """
+    def test_settings_defaults_client(self):
+        """
+        The Settings object begins with the appropriate defaults for clients.
+        """
+        s = h2.settings.Settings(client=True)
+
+        assert s[h2.settings.SettingCodes.HEADER_TABLE_SIZE] == 4096
+        assert s[h2.settings.SettingCodes.ENABLE_PUSH] == 1
+        assert s[h2.settings.SettingCodes.INITIAL_WINDOW_SIZE] == 65535
+        assert s[h2.settings.SettingCodes.MAX_FRAME_SIZE] == 16384
+
+    def test_settings_defaults_server(self):
+        """
+        The Settings object begins with the appropriate defaults for servers.
+        """
+        s = h2.settings.Settings(client=False)
+
+        assert s[h2.settings.SettingCodes.HEADER_TABLE_SIZE] == 4096
+        assert s[h2.settings.SettingCodes.ENABLE_PUSH] == 0
+        assert s[h2.settings.SettingCodes.INITIAL_WINDOW_SIZE] == 65535
+        assert s[h2.settings.SettingCodes.MAX_FRAME_SIZE] == 16384
+
+    @pytest.mark.parametrize('client', [True, False])
+    def test_can_set_initial_values(self, client):
+        """
+        The Settings object can be provided initial values that override the
+        defaults.
+        """
+        overrides = {
+            h2.settings.SettingCodes.HEADER_TABLE_SIZE: 8080,
+            h2.settings.SettingCodes.MAX_FRAME_SIZE: 16388,
+            h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS: 100,
+            h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE: 2**16,
+        }
+        s = h2.settings.Settings(client=client, initial_values=overrides)
+
+        assert s[h2.settings.SettingCodes.HEADER_TABLE_SIZE] == 8080
+        assert s[h2.settings.SettingCodes.ENABLE_PUSH] == bool(client)
+        assert s[h2.settings.SettingCodes.INITIAL_WINDOW_SIZE] == 65535
+        assert s[h2.settings.SettingCodes.MAX_FRAME_SIZE] == 16388
+        assert s[h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS] == 100
+        assert s[h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE] == 2**16
+
+    @pytest.mark.parametrize(
+        'setting,value',
+        [
+            (h2.settings.SettingCodes.ENABLE_PUSH, 2),
+            (h2.settings.SettingCodes.ENABLE_PUSH, -1),
+            (h2.settings.SettingCodes.INITIAL_WINDOW_SIZE, -1),
+            (h2.settings.SettingCodes.INITIAL_WINDOW_SIZE, 2**34),
+            (h2.settings.SettingCodes.MAX_FRAME_SIZE, 1),
+            (h2.settings.SettingCodes.MAX_FRAME_SIZE, 2**30),
+            (h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE, -1),
+        ]
+    )
+    def test_cannot_set_invalid_initial_values(self, setting, value):
+        """
+        The Settings object can be provided initial values that override the
+        defaults.
+        """
+        overrides = {setting: value}
+
+        with pytest.raises(h2.exceptions.InvalidSettingsValueError):
+            h2.settings.Settings(initial_values=overrides)
+
+    def test_applying_value_doesnt_take_effect_immediately(self):
+        """
+        When a value is applied to the settings object, it doesn't immediately
+        take effect.
+        """
+        s = h2.settings.Settings(client=True)
+        s[h2.settings.SettingCodes.HEADER_TABLE_SIZE] == 8000
+
+        assert s[h2.settings.SettingCodes.HEADER_TABLE_SIZE] == 4096
+
+    def test_acknowledging_values(self):
+        """
+        When we acknowledge settings, the values change.
+        """
+        s = h2.settings.Settings(client=True)
+        old_settings = dict(s)
+
+        new_settings = {
+            h2.settings.SettingCodes.HEADER_TABLE_SIZE: 4000,
+            h2.settings.SettingCodes.ENABLE_PUSH: 0,
+            h2.settings.SettingCodes.INITIAL_WINDOW_SIZE: 60,
+            h2.settings.SettingCodes.MAX_FRAME_SIZE: 16385,
+        }
+        s.update(new_settings)
+
+        assert dict(s) == old_settings
+        s.acknowledge()
+        assert dict(s) == new_settings
+
+    def test_acknowledging_returns_the_changed_settings(self):
+        """
+        Acknowledging settings returns the changes.
+        """
+        s = h2.settings.Settings(client=True)
+        s[h2.settings.SettingCodes.HEADER_TABLE_SIZE] = 8000
+        s[h2.settings.SettingCodes.ENABLE_PUSH] = 0
+
+        changes = s.acknowledge()
+        assert len(changes) == 2
+
+        table_size_change = (
+            changes[h2.settings.SettingCodes.HEADER_TABLE_SIZE]
+        )
+        push_change = changes[h2.settings.SettingCodes.ENABLE_PUSH]
+
+        assert table_size_change.setting == (
+            h2.settings.SettingCodes.HEADER_TABLE_SIZE
+        )
+        assert table_size_change.original_value == 4096
+        assert table_size_change.new_value == 8000
+
+        assert push_change.setting == h2.settings.SettingCodes.ENABLE_PUSH
+        assert push_change.original_value == 1
+        assert push_change.new_value == 0
+
+    def test_acknowledging_only_returns_changed_settings(self):
+        """
+        Acknowledging settings does not return unchanged settings.
+        """
+        s = h2.settings.Settings(client=True)
+        s[h2.settings.SettingCodes.INITIAL_WINDOW_SIZE] = 70
+
+        changes = s.acknowledge()
+        assert len(changes) == 1
+        assert list(changes.keys()) == [
+            h2.settings.SettingCodes.INITIAL_WINDOW_SIZE
+        ]
+
+    def test_deleting_values_deletes_all_of_them(self):
+        """
+        When we delete a key we lose all state about it.
+        """
+        s = h2.settings.Settings(client=True)
+        s[h2.settings.SettingCodes.HEADER_TABLE_SIZE] == 8000
+
+        del s[h2.settings.SettingCodes.HEADER_TABLE_SIZE]
+
+        with pytest.raises(KeyError):
+            s[h2.settings.SettingCodes.HEADER_TABLE_SIZE]
+
+    def test_length_correctly_reported(self):
+        """
+        Length is related only to the number of keys.
+        """
+        s = h2.settings.Settings(client=True)
+        assert len(s) == 4
+
+        s[h2.settings.SettingCodes.HEADER_TABLE_SIZE] == 8000
+        assert len(s) == 4
+
+        s.acknowledge()
+        assert len(s) == 4
+
+        del s[h2.settings.SettingCodes.HEADER_TABLE_SIZE]
+        assert len(s) == 3
+
+    def test_new_values_work(self):
+        """
+        New values initially don't appear
+        """
+        s = h2.settings.Settings(client=True)
+        s[80] = 81
+
+        with pytest.raises(KeyError):
+            s[80]
+
+    def test_new_values_follow_basic_acknowledgement_rules(self):
+        """
+        A new value properly appears when acknowledged.
+        """
+        s = h2.settings.Settings(client=True)
+        s[80] = 81
+        changed_settings = s.acknowledge()
+
+        assert s[80] == 81
+        assert len(changed_settings) == 1
+
+        changed = changed_settings[80]
+        assert changed.setting == 80
+        assert changed.original_value is None
+        assert changed.new_value == 81
+
+    def test_single_values_arent_affected_by_acknowledgement(self):
+        """
+        When acknowledged, unchanged settings remain unchanged.
+        """
+        s = h2.settings.Settings(client=True)
+        assert s[h2.settings.SettingCodes.HEADER_TABLE_SIZE] == 4096
+
+        s.acknowledge()
+        assert s[h2.settings.SettingCodes.HEADER_TABLE_SIZE] == 4096
+
+    def test_settings_getters(self):
+        """
+        Getters exist for well-known settings.
+        """
+        s = h2.settings.Settings(client=True)
+
+        assert s.header_table_size == (
+            s[h2.settings.SettingCodes.HEADER_TABLE_SIZE]
+        )
+        assert s.enable_push == s[h2.settings.SettingCodes.ENABLE_PUSH]
+        assert s.initial_window_size == (
+            s[h2.settings.SettingCodes.INITIAL_WINDOW_SIZE]
+        )
+        assert s.max_frame_size == s[h2.settings.SettingCodes.MAX_FRAME_SIZE]
+        assert s.max_concurrent_streams == 2**32 + 1  # A sensible default.
+        assert s.max_header_list_size is None
+
+    def test_settings_setters(self):
+        """
+        Setters exist for well-known settings.
+        """
+        s = h2.settings.Settings(client=True)
+
+        s.header_table_size = 0
+        s.enable_push = 1
+        s.initial_window_size = 2
+        s.max_frame_size = 16385
+        s.max_concurrent_streams = 4
+        s.max_header_list_size = 2**16
+
+        s.acknowledge()
+        assert s[h2.settings.SettingCodes.HEADER_TABLE_SIZE] == 0
+        assert s[h2.settings.SettingCodes.ENABLE_PUSH] == 1
+        assert s[h2.settings.SettingCodes.INITIAL_WINDOW_SIZE] == 2
+        assert s[h2.settings.SettingCodes.MAX_FRAME_SIZE] == 16385
+        assert s[h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS] == 4
+        assert s[h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE] == 2**16
+
+    @given(integers())
+    def test_cannot_set_invalid_values_for_enable_push(self, val):
+        """
+        SETTINGS_ENABLE_PUSH only allows two values: 0, 1.
+        """
+        assume(val not in (0, 1))
+        s = h2.settings.Settings()
+
+        with pytest.raises(h2.exceptions.InvalidSettingsValueError) as e:
+            s.enable_push = val
+
+        s.acknowledge()
+        assert e.value.error_code == h2.errors.ErrorCodes.PROTOCOL_ERROR
+        assert s.enable_push == 1
+
+        with pytest.raises(h2.exceptions.InvalidSettingsValueError) as e:
+            s[h2.settings.SettingCodes.ENABLE_PUSH] = val
+
+        s.acknowledge()
+        assert e.value.error_code == h2.errors.ErrorCodes.PROTOCOL_ERROR
+        assert s[h2.settings.SettingCodes.ENABLE_PUSH] == 1
+
+    @given(integers())
+    def test_cannot_set_invalid_vals_for_initial_window_size(self, val):
+        """
+        SETTINGS_INITIAL_WINDOW_SIZE only allows values between 0 and 2**32 - 1
+        inclusive.
+        """
+        s = h2.settings.Settings()
+
+        if 0 <= val <= 2**31 - 1:
+            s.initial_window_size = val
+            s.acknowledge()
+            assert s.initial_window_size == val
+        else:
+            with pytest.raises(h2.exceptions.InvalidSettingsValueError) as e:
+                s.initial_window_size = val
+
+            s.acknowledge()
+            assert (
+                e.value.error_code == h2.errors.ErrorCodes.FLOW_CONTROL_ERROR
+            )
+            assert s.initial_window_size == 65535
+
+            with pytest.raises(h2.exceptions.InvalidSettingsValueError) as e:
+                s[h2.settings.SettingCodes.INITIAL_WINDOW_SIZE] = val
+
+            s.acknowledge()
+            assert (
+                e.value.error_code == h2.errors.ErrorCodes.FLOW_CONTROL_ERROR
+            )
+            assert s[h2.settings.SettingCodes.INITIAL_WINDOW_SIZE] == 65535
+
+    @given(integers())
+    def test_cannot_set_invalid_values_for_max_frame_size(self, val):
+        """
+        SETTINGS_MAX_FRAME_SIZE only allows values between 2**14 and 2**24 - 1.
+        """
+        s = h2.settings.Settings()
+
+        if 2**14 <= val <= 2**24 - 1:
+            s.max_frame_size = val
+            s.acknowledge()
+            assert s.max_frame_size == val
+        else:
+            with pytest.raises(h2.exceptions.InvalidSettingsValueError) as e:
+                s.max_frame_size = val
+
+            s.acknowledge()
+            assert e.value.error_code == h2.errors.ErrorCodes.PROTOCOL_ERROR
+            assert s.max_frame_size == 16384
+
+            with pytest.raises(h2.exceptions.InvalidSettingsValueError) as e:
+                s[h2.settings.SettingCodes.MAX_FRAME_SIZE] = val
+
+            s.acknowledge()
+            assert e.value.error_code == h2.errors.ErrorCodes.PROTOCOL_ERROR
+            assert s[h2.settings.SettingCodes.MAX_FRAME_SIZE] == 16384
+
+    @given(integers())
+    def test_cannot_set_invalid_values_for_max_header_list_size(self, val):
+        """
+        SETTINGS_MAX_HEADER_LIST_SIZE only allows non-negative values.
+        """
+        s = h2.settings.Settings()
+
+        if val >= 0:
+            s.max_header_list_size = val
+            s.acknowledge()
+            assert s.max_header_list_size == val
+        else:
+            with pytest.raises(h2.exceptions.InvalidSettingsValueError) as e:
+                s.max_header_list_size = val
+
+            s.acknowledge()
+            assert e.value.error_code == h2.errors.ErrorCodes.PROTOCOL_ERROR
+            assert s.max_header_list_size is None
+
+            with pytest.raises(h2.exceptions.InvalidSettingsValueError) as e:
+                s[h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE] = val
+
+            s.acknowledge()
+            assert e.value.error_code == h2.errors.ErrorCodes.PROTOCOL_ERROR
+
+            with pytest.raises(KeyError):
+                s[h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE]
+
+
+class TestSettingsEquality(object):
+    """
+    A class defining tests for the standard implementation of == and != .
+    """
+
+    def an_instance(self):
+        """
+        Return an instance of the class under test.  Each call to this method
+        must return a different object.  All objects returned must be equal to
+        each other.
+        """
+        overrides = {
+            h2.settings.SettingCodes.HEADER_TABLE_SIZE: 0,
+            h2.settings.SettingCodes.MAX_FRAME_SIZE: 16384,
+            h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS: 4,
+            h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE: 2**16,
+        }
+        return h2.settings.Settings(client=True, initial_values=overrides)
+
+    def another_instance(self):
+        """
+        Return an instance of the class under test.  Each call to this method
+        must return a different object.  The objects must not be equal to the
+        objects returned by an_instance.  They may or may not be equal to
+        each other (they will not be compared against each other).
+        """
+        overrides = {
+            h2.settings.SettingCodes.HEADER_TABLE_SIZE: 8080,
+            h2.settings.SettingCodes.MAX_FRAME_SIZE: 16388,
+            h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS: 100,
+            h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE: 2**16,
+        }
+        return h2.settings.Settings(client=False, initial_values=overrides)
+
+    def test_identical_eq(self):
+        """
+        An object compares equal to itself using the == operator.
+        """
+        o = self.an_instance()
+        assert (o == o)
+
+    def test_identical_ne(self):
+        """
+        An object doesn't compare not equal to itself using the != operator.
+        """
+        o = self.an_instance()
+        assert not (o != o)
+
+    def test_same_eq(self):
+        """
+        Two objects that are equal to each other compare equal to each other
+        using the == operator.
+        """
+        a = self.an_instance()
+        b = self.an_instance()
+        assert (a == b)
+
+    def test_same_ne(self):
+        """
+        Two objects that are equal to each other do not compare not equal to
+        each other using the != operator.
+        """
+        a = self.an_instance()
+        b = self.an_instance()
+        assert not (a != b)
+
+    def test_different_eq(self):
+        """
+        Two objects that are not equal to each other do not compare equal to
+        each other using the == operator.
+        """
+        a = self.an_instance()
+        b = self.another_instance()
+        assert not (a == b)
+
+    def test_different_ne(self):
+        """
+        Two objects that are not equal to each other compare not equal to each
+        other using the != operator.
+        """
+        a = self.an_instance()
+        b = self.another_instance()
+        assert (a != b)
+
+    def test_another_type_eq(self):
+        """
+        The object does not compare equal to an object of an unrelated type
+        (which does not implement the comparison) using the == operator.
+        """
+        a = self.an_instance()
+        b = object()
+        assert not (a == b)
+
+    def test_another_type_ne(self):
+        """
+        The object compares not equal to an object of an unrelated type (which
+        does not implement the comparison) using the != operator.
+        """
+        a = self.an_instance()
+        b = object()
+        assert (a != b)
+
+    def test_delegated_eq(self):
+        """
+        The result of comparison using == is delegated to the right-hand
+        operand if it is of an unrelated type.
+        """
+        class Delegate(object):
+            def __eq__(self, other):
+                return [self]
+
+        a = self.an_instance()
+        b = Delegate()
+        assert (a == b) == [b]
+
+    def test_delegate_ne(self):
+        """
+        The result of comparison using != is delegated to the right-hand
+        operand if it is of an unrelated type.
+        """
+        class Delegate(object):
+            def __ne__(self, other):
+                return [self]
+
+        a = self.an_instance()
+        b = Delegate()
+        assert (a != b) == [b]
diff --git a/tools/third_party/h2/test/test_state_machines.py b/tools/third_party/h2/test/test_state_machines.py
new file mode 100755
index 0000000..034ae90
--- /dev/null
+++ b/tools/third_party/h2/test/test_state_machines.py
@@ -0,0 +1,163 @@
+# -*- coding: utf-8 -*-
+"""
+test_state_machines
+~~~~~~~~~~~~~~~~~~~
+
+These tests validate the state machines directly. Writing meaningful tests for
+this case can be tricky, so the majority of these tests use Hypothesis to try
+to talk about general behaviours rather than specific cases.
+"""
+import pytest
+
+import h2.connection
+import h2.exceptions
+import h2.stream
+
+from hypothesis import given
+from hypothesis.strategies import sampled_from
+
+
+class TestConnectionStateMachine(object):
+    """
+    Tests of the connection state machine.
+    """
+    @given(state=sampled_from(h2.connection.ConnectionState),
+           input_=sampled_from(h2.connection.ConnectionInputs))
+    def test_state_transitions(self, state, input_):
+        c = h2.connection.H2ConnectionStateMachine()
+        c.state = state
+
+        try:
+            c.process_input(input_)
+        except h2.exceptions.ProtocolError:
+            assert c.state == h2.connection.ConnectionState.CLOSED
+        else:
+            assert c.state in h2.connection.ConnectionState
+
+    def test_state_machine_only_allows_connection_states(self):
+        """
+        The Connection state machine only allows ConnectionState inputs.
+        """
+        c = h2.connection.H2ConnectionStateMachine()
+
+        with pytest.raises(ValueError):
+            c.process_input(1)
+
+    @pytest.mark.parametrize(
+        "state",
+        (
+            s for s in h2.connection.ConnectionState
+            if s != h2.connection.ConnectionState.CLOSED
+        ),
+    )
+    @pytest.mark.parametrize(
+        "input_",
+        [
+            h2.connection.ConnectionInputs.RECV_PRIORITY,
+            h2.connection.ConnectionInputs.SEND_PRIORITY
+        ]
+    )
+    def test_priority_frames_allowed_in_all_states(self, state, input_):
+        """
+        Priority frames can be sent/received in all connection states except
+        closed.
+        """
+        c = h2.connection.H2ConnectionStateMachine()
+        c.state = state
+
+        c.process_input(input_)
+
+
+class TestStreamStateMachine(object):
+    """
+    Tests of the stream state machine.
+    """
+    @given(state=sampled_from(h2.stream.StreamState),
+           input_=sampled_from(h2.stream.StreamInputs))
+    def test_state_transitions(self, state, input_):
+        s = h2.stream.H2StreamStateMachine(stream_id=1)
+        s.state = state
+
+        try:
+            s.process_input(input_)
+        except h2.exceptions.StreamClosedError:
+            # This can only happen for streams that started in the closed
+            # state OR where the input was RECV_DATA and the state was not
+            # OPEN or HALF_CLOSED_LOCAL OR where the state was
+            # HALF_CLOSED_REMOTE and a frame was received.
+            if state == h2.stream.StreamState.CLOSED:
+                assert s.state == h2.stream.StreamState.CLOSED
+            elif input_ == h2.stream.StreamInputs.RECV_DATA:
+                assert s.state == h2.stream.StreamState.CLOSED
+                assert state not in (
+                    h2.stream.StreamState.OPEN,
+                    h2.stream.StreamState.HALF_CLOSED_LOCAL,
+                )
+            elif state == h2.stream.StreamState.HALF_CLOSED_REMOTE:
+                assert input_ in (
+                    h2.stream.StreamInputs.RECV_HEADERS,
+                    h2.stream.StreamInputs.RECV_PUSH_PROMISE,
+                    h2.stream.StreamInputs.RECV_DATA,
+                    h2.stream.StreamInputs.RECV_CONTINUATION,
+                )
+        except h2.exceptions.ProtocolError:
+            assert s.state == h2.stream.StreamState.CLOSED
+        else:
+            assert s.state in h2.stream.StreamState
+
+    def test_state_machine_only_allows_stream_states(self):
+        """
+        The Stream state machine only allows StreamState inputs.
+        """
+        s = h2.stream.H2StreamStateMachine(stream_id=1)
+
+        with pytest.raises(ValueError):
+            s.process_input(1)
+
+    def test_stream_state_machine_forbids_pushes_on_server_streams(self):
+        """
+        Streams where this peer is a server do not allow receiving pushed
+        frames.
+        """
+        s = h2.stream.H2StreamStateMachine(stream_id=1)
+        s.process_input(h2.stream.StreamInputs.RECV_HEADERS)
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            s.process_input(h2.stream.StreamInputs.RECV_PUSH_PROMISE)
+
+    def test_stream_state_machine_forbids_sending_pushes_from_clients(self):
+        """
+        Streams where this peer is a client do not allow sending pushed frames.
+        """
+        s = h2.stream.H2StreamStateMachine(stream_id=1)
+        s.process_input(h2.stream.StreamInputs.SEND_HEADERS)
+
+        with pytest.raises(h2.exceptions.ProtocolError):
+            s.process_input(h2.stream.StreamInputs.SEND_PUSH_PROMISE)
+
+    @pytest.mark.parametrize(
+        "input_",
+        [
+            h2.stream.StreamInputs.SEND_HEADERS,
+            h2.stream.StreamInputs.SEND_PUSH_PROMISE,
+            h2.stream.StreamInputs.SEND_RST_STREAM,
+            h2.stream.StreamInputs.SEND_DATA,
+            h2.stream.StreamInputs.SEND_WINDOW_UPDATE,
+            h2.stream.StreamInputs.SEND_END_STREAM,
+        ]
+    )
+    def test_cannot_send_on_closed_streams(self, input_):
+        """
+        Sending anything but a PRIORITY frame is forbidden on closed streams.
+        """
+        c = h2.stream.H2StreamStateMachine(stream_id=1)
+        c.state = h2.stream.StreamState.CLOSED
+
+        expected_error = (
+            h2.exceptions.ProtocolError
+            if input_ == h2.stream.StreamInputs.SEND_PUSH_PROMISE
+            else h2.exceptions.StreamClosedError
+        )
+
+        with pytest.raises(expected_error):
+            c.process_input(input_)
diff --git a/tools/third_party/h2/test/test_stream_reset.py b/tools/third_party/h2/test/test_stream_reset.py
new file mode 100755
index 0000000..08886e8
--- /dev/null
+++ b/tools/third_party/h2/test/test_stream_reset.py
@@ -0,0 +1,137 @@
+# -*- coding: utf-8 -*-
+"""
+test_stream_reset
+~~~~~~~~~~~~~~~~~
+
+More complex tests that exercise stream resetting functionality to validate
+that connection state is appropriately maintained.
+
+Specifically, these tests validate that streams that have been reset accurately
+keep track of connection-level state.
+"""
+import pytest
+
+import h2.connection
+import h2.errors
+import h2.events
+
+
+class TestStreamReset(object):
+    """
+    Tests for resetting streams.
+    """
+    example_request_headers = [
+        (b':authority', b'example.com'),
+        (b':path', b'/'),
+        (b':scheme', b'https'),
+        (b':method', b'GET'),
+    ]
+    example_response_headers = [
+        (b':status', b'200'),
+        (b'server', b'fake-serv/0.1.0'),
+        (b'content-length', b'0')
+    ]
+
+    def test_reset_stream_keeps_header_state_correct(self, frame_factory):
+        """
+        A stream that has been reset still affects the header decoder.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(stream_id=1, headers=self.example_request_headers)
+        c.reset_stream(stream_id=1)
+        c.send_headers(stream_id=3, headers=self.example_request_headers)
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_headers_frame(
+            headers=self.example_response_headers, stream_id=1
+        )
+        rst_frame = frame_factory.build_rst_stream_frame(
+            1, h2.errors.ErrorCodes.STREAM_CLOSED
+        )
+        events = c.receive_data(f.serialize())
+        assert not events
+        assert c.data_to_send() == rst_frame.serialize()
+
+        # This works because the header state should be intact from the headers
+        # frame that was send on stream 1, so they should decode cleanly.
+        f = frame_factory.build_headers_frame(
+            headers=self.example_response_headers, stream_id=3
+        )
+        event = c.receive_data(f.serialize())[0]
+
+        assert isinstance(event, h2.events.ResponseReceived)
+        assert event.stream_id == 3
+        assert event.headers == self.example_response_headers
+
+    @pytest.mark.parametrize('close_id,other_id', [(1, 3), (3, 1)])
+    def test_reset_stream_keeps_flow_control_correct(self,
+                                                     close_id,
+                                                     other_id,
+                                                     frame_factory):
+        """
+        A stream that has been reset still affects the connection flow control
+        window.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(stream_id=1, headers=self.example_request_headers)
+        c.send_headers(stream_id=3, headers=self.example_request_headers)
+
+        # Record the initial window size.
+        initial_window = c.remote_flow_control_window(stream_id=other_id)
+
+        f = frame_factory.build_headers_frame(
+            headers=self.example_response_headers, stream_id=close_id
+        )
+        c.receive_data(f.serialize())
+        c.reset_stream(stream_id=close_id)
+        c.clear_outbound_data_buffer()
+
+        f = frame_factory.build_data_frame(
+            data=b'some data!',
+            stream_id=close_id
+        )
+        events = c.receive_data(f.serialize())
+
+        rst_frame = frame_factory.build_rst_stream_frame(
+            close_id, h2.errors.ErrorCodes.STREAM_CLOSED
+        )
+        assert not events
+        assert c.data_to_send() == rst_frame.serialize()
+
+        new_window = c.remote_flow_control_window(stream_id=other_id)
+        assert initial_window - len(b'some data!') == new_window
+
+    @pytest.mark.parametrize('clear_streams', [True, False])
+    def test_reset_stream_automatically_resets_pushed_streams(self,
+                                                              frame_factory,
+                                                              clear_streams):
+        """
+        Resetting a stream causes RST_STREAM frames to be automatically emitted
+        to close any streams pushed after the reset.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        c.send_headers(stream_id=1, headers=self.example_request_headers)
+        c.reset_stream(stream_id=1)
+        c.clear_outbound_data_buffer()
+
+        if clear_streams:
+            # Call open_outbound_streams to force the connection to clean
+            # closed streams.
+            c.open_outbound_streams
+
+        f = frame_factory.build_push_promise_frame(
+            stream_id=1,
+            promised_stream_id=2,
+            headers=self.example_request_headers,
+        )
+        events = c.receive_data(f.serialize())
+        assert not events
+
+        f = frame_factory.build_rst_stream_frame(
+            stream_id=2,
+            error_code=h2.errors.ErrorCodes.REFUSED_STREAM,
+        )
+        assert c.data_to_send() == f.serialize()
diff --git a/tools/third_party/h2/test/test_utility_functions.py b/tools/third_party/h2/test/test_utility_functions.py
new file mode 100755
index 0000000..61d527f
--- /dev/null
+++ b/tools/third_party/h2/test/test_utility_functions.py
@@ -0,0 +1,178 @@
+# -*- coding: utf-8 -*-
+"""
+test_utility_functions
+~~~~~~~~~~~~~~~~~~~~~~
+
+Tests for the various utility functions provided by hyper-h2.
+"""
+import pytest
+
+import h2.config
+import h2.connection
+import h2.errors
+import h2.events
+import h2.exceptions
+from h2.utilities import extract_method_header
+
+# These tests require a non-list-returning range function.
+try:
+    range = xrange
+except NameError:
+    range = range
+
+
+class TestGetNextAvailableStreamID(object):
+    """
+    Tests for the ``H2Connection.get_next_available_stream_id`` method.
+    """
+    example_request_headers = [
+        (':authority', 'example.com'),
+        (':path', '/'),
+        (':scheme', 'https'),
+        (':method', 'GET'),
+    ]
+    example_response_headers = [
+        (':status', '200'),
+        ('server', 'fake-serv/0.1.0')
+    ]
+    server_config = h2.config.H2Configuration(client_side=False)
+
+    def test_returns_correct_sequence_for_clients(self, frame_factory):
+        """
+        For a client connection, the correct sequence of stream IDs is
+        returned.
+        """
+        # Running the exhaustive version of this test (all 1 billion available
+        # stream IDs) is too painful. For that reason, we validate that the
+        # original sequence is right for the first few thousand, and then just
+        # check that it terminates properly.
+        #
+        # Make sure that the streams get cleaned up: 8k streams floating
+        # around would make this test memory-hard, and it's not supposed to be
+        # a test of how much RAM your machine has.
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+        initial_sequence = range(1, 2**13, 2)
+
+        for expected_stream_id in initial_sequence:
+            stream_id = c.get_next_available_stream_id()
+            assert stream_id == expected_stream_id
+
+            c.send_headers(
+                stream_id=stream_id,
+                headers=self.example_request_headers,
+                end_stream=True
+            )
+            f = frame_factory.build_headers_frame(
+                headers=self.example_response_headers,
+                stream_id=stream_id,
+                flags=['END_STREAM'],
+            )
+            c.receive_data(f.serialize())
+            c.clear_outbound_data_buffer()
+
+        # Jump up to the last available stream ID. Don't clean up the stream
+        # here because who cares about one stream.
+        last_client_id = 2**31 - 1
+        c.send_headers(
+            stream_id=last_client_id,
+            headers=self.example_request_headers,
+            end_stream=True
+        )
+
+        with pytest.raises(h2.exceptions.NoAvailableStreamIDError):
+            c.get_next_available_stream_id()
+
+    def test_returns_correct_sequence_for_servers(self, frame_factory):
+        """
+        For a server connection, the correct sequence of stream IDs is
+        returned.
+        """
+        # Running the exhaustive version of this test (all 1 billion available
+        # stream IDs) is too painful. For that reason, we validate that the
+        # original sequence is right for the first few thousand, and then just
+        # check that it terminates properly.
+        #
+        # Make sure that the streams get cleaned up: 8k streams floating
+        # around would make this test memory-hard, and it's not supposed to be
+        # a test of how much RAM your machine has.
+        c = h2.connection.H2Connection(config=self.server_config)
+        c.initiate_connection()
+        c.receive_data(frame_factory.preamble())
+        f = frame_factory.build_headers_frame(
+            headers=self.example_request_headers
+        )
+        c.receive_data(f.serialize())
+
+        initial_sequence = range(2, 2**13, 2)
+
+        for expected_stream_id in initial_sequence:
+            stream_id = c.get_next_available_stream_id()
+            assert stream_id == expected_stream_id
+
+            c.push_stream(
+                stream_id=1,
+                promised_stream_id=stream_id,
+                request_headers=self.example_request_headers
+            )
+            c.send_headers(
+                stream_id=stream_id,
+                headers=self.example_response_headers,
+                end_stream=True
+            )
+            c.clear_outbound_data_buffer()
+
+        # Jump up to the last available stream ID. Don't clean up the stream
+        # here because who cares about one stream.
+        last_server_id = 2**31 - 2
+        c.push_stream(
+            stream_id=1,
+            promised_stream_id=last_server_id,
+            request_headers=self.example_request_headers,
+        )
+
+        with pytest.raises(h2.exceptions.NoAvailableStreamIDError):
+            c.get_next_available_stream_id()
+
+    def test_does_not_increment_without_stream_send(self):
+        """
+        If a new stream isn't actually created, the next stream ID doesn't
+        change.
+        """
+        c = h2.connection.H2Connection()
+        c.initiate_connection()
+
+        first_stream_id = c.get_next_available_stream_id()
+        second_stream_id = c.get_next_available_stream_id()
+
+        assert first_stream_id == second_stream_id
+
+        c.send_headers(
+            stream_id=first_stream_id,
+            headers=self.example_request_headers
+        )
+
+        third_stream_id = c.get_next_available_stream_id()
+        assert third_stream_id == (first_stream_id + 2)
+
+
+class TestExtractHeader(object):
+
+    example_request_headers = [
+            (u':authority', u'example.com'),
+            (u':path', u'/'),
+            (u':scheme', u'https'),
+            (u':method', u'GET'),
+    ]
+    example_headers_with_bytes = [
+            (b':authority', b'example.com'),
+            (b':path', b'/'),
+            (b':scheme', b'https'),
+            (b':method', b'GET'),
+    ]
+
+    @pytest.mark.parametrize(
+        'headers', [example_request_headers, example_headers_with_bytes]
+    )
+    def test_extract_header_method(self, headers):
+        assert extract_method_header(headers) == b'GET'
diff --git a/tools/third_party/h2/test_requirements.txt b/tools/third_party/h2/test_requirements.txt
new file mode 100755
index 0000000..35bcbc2
--- /dev/null
+++ b/tools/third_party/h2/test_requirements.txt
@@ -0,0 +1,5 @@
+pytest==3.0.7
+pytest-cov==2.4.0
+coverage==4.3.4
+pytest-xdist==1.15.0
+hypothesis==3.7.0
diff --git a/tools/third_party/h2/tox.ini b/tools/third_party/h2/tox.ini
new file mode 100755
index 0000000..a3d6c4a
--- /dev/null
+++ b/tools/third_party/h2/tox.ini
@@ -0,0 +1,54 @@
+[tox]
+envlist = py27, py33, py34, py35, py36, pypy, lint, packaging, docs
+
+[testenv]
+deps= -r{toxinidir}/test_requirements.txt
+commands=
+    coverage run -m py.test {posargs} {toxinidir}/test/
+    coverage report
+
+[testenv:pypy]
+# temporarily disable coverage testing on PyPy due to performance problems
+commands= py.test {posargs} {toxinidir}/test/
+
+[testenv:py27-twistedMaster]
+# This is a validation test that confirms that Twisted's test cases haven't
+# broken.
+deps =
+    # [tls,http2] syntax doesn't work here so we enumerate all dependencies.
+    git+https://github.com/twisted/twisted
+    pyopenssl
+    service_identity
+    idna
+    priority
+    sphinx
+commands = python -m twisted.trial --reporter=text twisted
+
+[testenv:lint]
+basepython=python3.4
+deps = flake8==3.3.0
+commands = flake8 --max-complexity 10 h2 test
+
+[testenv:docs]
+basepython=python3.5
+deps = sphinx==1.4.9
+changedir = {toxinidir}/docs
+whitelist_externals = rm
+commands =
+    rm -rf build
+    sphinx-build -nW -b html -d build/doctrees source build/html
+
+[testenv:graphs]
+basepython=python2.7
+deps = graphviz==0.6
+commands =
+    python visualizer/visualize.py -i docs/source/_static
+
+[testenv:packaging]
+basepython=python2.7
+deps =
+    check-manifest==0.35
+    readme_renderer==17.2
+commands =
+    check-manifest
+    python setup.py check --metadata --restructuredtext --strict
diff --git a/tools/third_party/h2/utils/backport.sh b/tools/third_party/h2/utils/backport.sh
new file mode 100755
index 0000000..3f5d076
--- /dev/null
+++ b/tools/third_party/h2/utils/backport.sh
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+
+# This script is invoked as follows: the first argument is the target branch
+# for the backport. All following arguments are considered the "commit spec",
+# and will be passed to cherry-pick.
+
+TARGET_BRANCH="$1"
+PR_BRANCH="backport-${TARGET_BRANCH}"
+COMMIT_SPEC="${@:2}"
+
+if ! git checkout "$TARGET_BRANCH"; then
+    echo "Failed to checkout $TARGET_BRANCH"
+    exit 1
+fi
+
+if ! git pull --ff-only; then
+    echo "Unable to update $TARGET_BRANCH"
+    exit 2
+
+if ! git checkout -b "$PR_BRANCH"; then
+    echo "Failed to open new branch $PR_BRANCH"
+    exit 3
+fi
+
+if ! git cherry-pick -x $COMMIT_SPEC; then
+    echo "Cherry-pick failed. Please fix up manually."
+else
+    echo "Clean backport. Add changelog and open PR."
+fi
+
diff --git a/tools/third_party/h2/visualizer/NOTICES.visualizer b/tools/third_party/h2/visualizer/NOTICES.visualizer
new file mode 100755
index 0000000..202ca64
--- /dev/null
+++ b/tools/third_party/h2/visualizer/NOTICES.visualizer
@@ -0,0 +1,24 @@
+This module contains code inspired by and borrowed from Automat. That code was
+made available under the following license:
+
+Copyright (c) 2014
+Rackspace
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/tools/third_party/h2/visualizer/visualize.py b/tools/third_party/h2/visualizer/visualize.py
new file mode 100755
index 0000000..1fd3f17
--- /dev/null
+++ b/tools/third_party/h2/visualizer/visualize.py
@@ -0,0 +1,252 @@
+# -*- coding: utf-8 -*-
+"""
+State Machine Visualizer
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+This code provides a module that can use graphviz to visualise the state
+machines included in hyper-h2. These visualisations can be used as part of the
+documentation of hyper-h2, and as a reference material to understand how the
+state machines function.
+
+The code in this module is heavily inspired by code in Automat, which can be
+found here: https://github.com/glyph/automat. For details on the licensing of
+Automat, please see the NOTICES.visualizer file in this folder.
+
+This module is very deliberately not shipped with the rest of hyper-h2. This is
+because it is of minimal value to users who are installing hyper-h2: its use
+is only really for the developers of hyper-h2.
+"""
+from __future__ import print_function
+import argparse
+import collections
+import sys
+
+import graphviz
+import graphviz.files
+
+import h2.connection
+import h2.stream
+
+
+StateMachine = collections.namedtuple(
+    'StateMachine', ['fqdn', 'machine', 'states', 'inputs', 'transitions']
+)
+
+
+# This is all the state machines we currently know about and will render.
+# If any new state machines are added, they should be inserted here.
+STATE_MACHINES = [
+    StateMachine(
+        fqdn='h2.connection.H2ConnectionStateMachine',
+        machine=h2.connection.H2ConnectionStateMachine,
+        states=h2.connection.ConnectionState,
+        inputs=h2.connection.ConnectionInputs,
+        transitions=h2.connection.H2ConnectionStateMachine._transitions,
+    ),
+    StateMachine(
+        fqdn='h2.stream.H2StreamStateMachine',
+        machine=h2.stream.H2StreamStateMachine,
+        states=h2.stream.StreamState,
+        inputs=h2.stream.StreamInputs,
+        transitions=h2.stream._transitions,
+    ),
+]
+
+
+def quote(s):
+    return '"{}"'.format(s.replace('"', r'\"'))
+
+
+def html(s):
+    return '<{}>'.format(s)
+
+
+def element(name, *children, **attrs):
+    """
+    Construct a string from the HTML element description.
+    """
+    formatted_attributes = ' '.join(
+        '{}={}'.format(key, quote(str(value)))
+        for key, value in sorted(attrs.items())
+    )
+    formatted_children = ''.join(children)
+    return u'<{name} {attrs}>{children}</{name}>'.format(
+        name=name,
+        attrs=formatted_attributes,
+        children=formatted_children
+    )
+
+
+def row_for_output(event, side_effect):
+    """
+    Given an output tuple (an event and its side effect), generates a table row
+    from it.
+    """
+    point_size = {'point-size': '9'}
+    event_cell = element(
+        "td",
+        element("font", enum_member_name(event), **point_size)
+    )
+    side_effect_name = (
+        function_name(side_effect) if side_effect is not None else "None"
+    )
+    side_effect_cell = element(
+        "td",
+        element("font", side_effect_name, **point_size)
+    )
+    return element("tr", event_cell, side_effect_cell)
+
+
+def table_maker(initial_state, final_state, outputs, port):
+    """
+    Construct an HTML table to label a state transition.
+    """
+    header = "{} -&gt; {}".format(
+        enum_member_name(initial_state), enum_member_name(final_state)
+    )
+    header_row = element(
+        "tr",
+        element(
+            "td",
+            element(
+                "font",
+                header,
+                face="menlo-italic"
+            ),
+            port=port,
+            colspan="2",
+        )
+    )
+    rows = [header_row]
+    rows.extend(row_for_output(*output) for output in outputs)
+    return element("table", *rows)
+
+
+def enum_member_name(state):
+    """
+    All enum member names have the form <EnumClassName>.<EnumMemberName>. For
+    our rendering we only want the member name, so we take their representation
+    and split it.
+    """
+    return str(state).split('.', 1)[1]
+
+
+def function_name(func):
+    """
+    Given a side-effect function, return its string name.
+    """
+    return func.__name__
+
+
+def build_digraph(state_machine):
+    """
+    Produce a L{graphviz.Digraph} object from a state machine.
+    """
+    digraph = graphviz.Digraph(node_attr={'fontname': 'Menlo'},
+                               edge_attr={'fontname': 'Menlo'},
+                               graph_attr={'dpi': '200'})
+
+    # First, add the states as nodes.
+    seen_first_state = False
+    for state in state_machine.states:
+        if not seen_first_state:
+            state_shape = "bold"
+            font_name = "Menlo-Bold"
+        else:
+            state_shape = ""
+            font_name = "Menlo"
+        digraph.node(enum_member_name(state),
+                     fontame=font_name,
+                     shape="ellipse",
+                     style=state_shape,
+                     color="blue")
+        seen_first_state = True
+
+    # We frequently have vary many inputs that all trigger the same state
+    # transition, and only differ in terms of their input and side-effect. It
+    # would be polite to say that graphviz does not handle this very well. So
+    # instead we *collapse* the state transitions all into the one edge, and
+    # then provide a label that displays a table of all the inputs and their
+    # associated side effects.
+    transitions = collections.defaultdict(list)
+    for transition in state_machine.transitions.items():
+        initial_state, event = transition[0]
+        side_effect, final_state = transition[1]
+        transition_key = (initial_state, final_state)
+        transitions[transition_key].append((event, side_effect))
+
+    for n, (transition_key, outputs) in enumerate(transitions.items()):
+        this_transition = "t{}".format(n)
+        initial_state, final_state = transition_key
+
+        port = "tableport"
+        table = table_maker(
+            initial_state=initial_state,
+            final_state=final_state,
+            outputs=outputs,
+            port=port
+        )
+
+        digraph.node(this_transition,
+                     label=html(table), margin="0.2", shape="none")
+
+        digraph.edge(enum_member_name(initial_state),
+                     '{}:{}:w'.format(this_transition, port),
+                     arrowhead="none")
+        digraph.edge('{}:{}:e'.format(this_transition, port),
+                     enum_member_name(final_state))
+
+    return digraph
+
+
+def main():
+    """
+    Renders all the state machines in hyper-h2 into images.
+    """
+    program_name = sys.argv[0]
+    argv = sys.argv[1:]
+
+    description = """
+    Visualize hyper-h2 state machines as graphs.
+    """
+    epilog = """
+    You must have the graphviz tool suite installed.  Please visit
+    http://www.graphviz.org for more information.
+    """
+
+    argument_parser = argparse.ArgumentParser(
+        prog=program_name,
+        description=description,
+        epilog=epilog
+    )
+    argument_parser.add_argument(
+        '--image-directory',
+        '-i',
+        help="Where to write out image files.",
+        default=".h2_visualize"
+    )
+    argument_parser.add_argument(
+        '--view',
+        '-v',
+        help="View rendered graphs with default image viewer",
+        default=False,
+        action="store_true"
+    )
+    args = argument_parser.parse_args(argv)
+
+    for state_machine in STATE_MACHINES:
+        print(state_machine.fqdn, '...discovered')
+
+        digraph = build_digraph(state_machine)
+
+        if args.image_directory:
+            digraph.format = "png"
+            digraph.render(filename="{}.dot".format(state_machine.fqdn),
+                           directory=args.image_directory,
+                           view=args.view,
+                           cleanup=True)
+            print(state_machine.fqdn, "...wrote image into", args.image_directory)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/tools/third_party/hpack/CONTRIBUTORS.rst b/tools/third_party/hpack/CONTRIBUTORS.rst
new file mode 100644
index 0000000..f56d156
--- /dev/null
+++ b/tools/third_party/hpack/CONTRIBUTORS.rst
@@ -0,0 +1,62 @@
+Hyper is written and maintained by Cory Benfield and various contributors:
+
+Development Lead
+````````````````
+
+- Cory Benfield <cory@lukasa.co.uk>
+
+Contributors (hpack)
+````````````````````
+In chronological order:
+
+- Sriram Ganesan (@elricL)
+
+  - Implemented the Huffman encoding/decoding logic.
+
+- Tatsuhiro Tsujikawa (@tatsuhiro-t)
+
+  - Improved compression efficiency.
+
+- Jim Carreer (@jimcarreer)
+
+  - Support for 'never indexed' header fields.
+  - Refactor of header table code.
+  - Add support for returning bytestring headers instead of UTF-8 decoded ones.
+
+- Eugene Obukhov (@irvind)
+
+  - Improved decoding efficiency.
+
+- Ian Foote (@Ian-Foote)
+
+  - 25% performance improvement to integer decode.
+
+- Davey Shafik (@dshafik)
+
+  - More testing.
+
+- Seth Michael Larson (@SethMichaelLarson)
+
+  - Code cleanups.
+
+Contributors (hyper)
+````````````````````
+
+In chronological order:
+
+- Alek Storm (@alekstorm)
+
+  - Implemented Python 2.7 support.
+  - Implemented HTTP/2 draft 10 support.
+  - Implemented server push.
+
+- Tetsuya Morimoto (@t2y)
+
+  - Fixed a bug where large or incomplete frames were not handled correctly.
+  - Added hyper command-line tool.
+  - General code cleanups.
+
+- Jerome De Cuyper (@jdecuyper)
+
+  - Updated documentation and tests.
+
diff --git a/tools/third_party/hpack/HISTORY.rst b/tools/third_party/hpack/HISTORY.rst
new file mode 100644
index 0000000..37b2d9c
--- /dev/null
+++ b/tools/third_party/hpack/HISTORY.rst
@@ -0,0 +1,134 @@
+Release History
+===============
+
+3.0.0 (2017-03-29)
+------------------
+
+**API Changes (Backward Incompatible)**
+
+- Removed nghttp2 support. This support had rotted and was essentially
+  non-functional, so it has now been removed until someone has time to re-add
+  the support in a functional form.
+- Attempts by the encoder to exceed the maximum allowed header table size via
+  dynamic table size updates (or the absence thereof) are now forbidden.
+
+**API Changes (Backward Compatible)**
+
+- Added a new ``InvalidTableSizeError`` thrown when the encoder does not
+  respect the maximum table size set by the user.
+- Added a ``Decoder.max_allowed_table_size`` field that sets the maximum
+  allowed size of the decoder header table. See the documentation for an
+  indication of how this should be used.
+
+**Bugfixes**
+
+- Up to 25% performance improvement decoding HPACK-packed integers, depending
+  on the platform.
+- HPACK now tolerates receiving multiple header table size changes in sequence,
+  rather than only one.
+- HPACK now forbids header table size changes anywhere but first in a header
+  block, as required by RFC 7541 § 4.2.
+- Other miscellaneous performance improvements.
+
+2.3.0 (2016-08-04)
+------------------
+
+**Security Fixes**
+
+- CVE-2016-6581: HPACK Bomb. This release now enforces a maximum value of the
+  decompressed size of the header list. This is to avoid the so-called "HPACK
+  Bomb" vulnerability, which is caused when a malicious peer sends a compressed
+  HPACK body that decompresses to a gigantic header list size.
+
+  This also adds a ``OversizedHeaderListError``, which is thrown by the
+  ``decode`` method if the maximum header list size is being violated. This
+  places the HPACK decoder into a broken state: it must not be used after this
+  exception is thrown.
+
+  This also adds a ``max_header_list_size`` to the ``Decoder`` object. This
+  controls the maximum allowable decompressed size of the header list. By
+  default this is set to 64kB.
+
+2.2.0 (2016-04-20)
+------------------
+
+**API Changes (Backward Compatible)**
+
+- Added ``HeaderTuple`` and ``NeverIndexedHeaderTuple`` classes that signal
+  whether a given header field may ever be indexed in HTTP/2 header
+  compression.
+- Changed ``Decoder.decode()`` to return the newly added ``HeaderTuple`` class
+  and subclass. These objects behave like two-tuples, so this change does not
+  break working code.
+
+**Bugfixes**
+
+- Improve Huffman decoding speed by 4x using an approach borrowed from nghttp2.
+- Improve HPACK decoding speed by 10% by caching header table sizes.
+
+2.1.1 (2016-03-16)
+------------------
+
+**Bugfixes**
+
+- When passing a dictionary or dictionary subclass to ``Encoder.encode``, HPACK
+  now ensures that HTTP/2 special headers (headers whose names begin with
+  ``:`` characters) appear first in the header block.
+
+2.1.0 (2016-02-02)
+------------------
+
+**API Changes (Backward Compatible)**
+
+- Added new ``InvalidTableIndex`` exception, a subclass of
+  ``HPACKDecodingError``.
+- Instead of throwing ``IndexError`` when encountering invalid encoded integers
+  HPACK now throws ``HPACKDecodingError``.
+- Instead of throwing ``UnicodeDecodeError`` when encountering headers that are
+  not UTF-8 encoded, HPACK now throws ``HPACKDecodingError``.
+- Instead of throwing ``IndexError`` when encountering invalid table offsets,
+  HPACK now throws ``InvalidTableIndex``.
+- Added ``raw`` flag to ``decode``, allowing ``decode`` to return bytes instead
+  of attempting to decode the headers as UTF-8.
+
+**Bugfixes**
+
+- ``memoryview`` objects are now used when decoding HPACK, improving the
+  performance by avoiding unnecessary data copies.
+
+2.0.1 (2015-11-09)
+------------------
+
+- Fixed a bug where the Python HPACK implementation would only emit header
+  table size changes for the total change between one header block and another,
+  rather than for the entire sequence of changes.
+
+2.0.0 (2015-10-12)
+------------------
+
+- Remove unused ``HPACKEncodingError``.
+- Add the shortcut ability to import the public API (``Encoder``, ``Decoder``,
+  ``HPACKError``, ``HPACKDecodingError``) directly, rather than from
+  ``hpack.hpack``.
+
+1.1.0 (2015-07-07)
+------------------
+
+- Add support for emitting 'never indexed' header fields, by using an optional
+  third element in the header tuple. With thanks to @jimcarreer!
+
+1.0.1 (2015-04-19)
+------------------
+
+- Header fields that have names matching header table entries are now added to
+  the header table. This improves compression efficiency at the cost of
+  slightly more table operations. With thanks to `Tatsuhiro Tsujikawa`_.
+
+.. _Tatsuhiro Tsujikawa: https://github.com/tatsuhiro-t
+
+1.0.0 (2015-04-13)
+------------------
+
+- Initial fork of the code from `hyper`_.
+
+.. _hyper: https://hyper.readthedocs.org/
diff --git a/tools/third_party/hpack/LICENSE b/tools/third_party/hpack/LICENSE
new file mode 100644
index 0000000..d24c351
--- /dev/null
+++ b/tools/third_party/hpack/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Cory Benfield
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/tools/third_party/hpack/MANIFEST.in b/tools/third_party/hpack/MANIFEST.in
new file mode 100644
index 0000000..2f46467
--- /dev/null
+++ b/tools/third_party/hpack/MANIFEST.in
@@ -0,0 +1,2 @@
+include README.rst LICENSE CONTRIBUTORS.rst HISTORY.rst
+
diff --git a/tools/third_party/hpack/PKG-INFO b/tools/third_party/hpack/PKG-INFO
new file mode 100644
index 0000000..c2a3a1a
--- /dev/null
+++ b/tools/third_party/hpack/PKG-INFO
@@ -0,0 +1,199 @@
+Metadata-Version: 1.1
+Name: hpack
+Version: 3.0.0
+Summary: Pure-Python HPACK header compression
+Home-page: http://hyper.rtfd.org
+Author: Cory Benfield
+Author-email: cory@lukasa.co.uk
+License: MIT License
+Description: ========================================
+        hpack: HTTP/2 Header Encoding for Python
+        ========================================
+        
+        .. image:: https://raw.github.com/Lukasa/hyper/development/docs/source/images/hyper.png
+        
+        .. image:: https://travis-ci.org/python-hyper/hpack.png?branch=master
+            :target: https://travis-ci.org/python-hyper/hpack
+        
+        This module contains a pure-Python HTTP/2 header encoding (HPACK) logic for use
+        in Python programs that implement HTTP/2. It also contains a compatibility
+        layer that automatically enables the use of ``nghttp2`` if it's available.
+        
+        Documentation
+        =============
+        
+        Documentation is available at http://python-hyper.org/hpack/.
+        
+        Contributing
+        ============
+        
+        ``hpack`` welcomes contributions from anyone! Unlike many other projects we are
+        happy to accept cosmetic contributions and small contributions, in addition to
+        large feature requests and changes.
+        
+        Before you contribute (either by opening an issue or filing a pull request),
+        please `read the contribution guidelines`_.
+        
+        .. _read the contribution guidelines: http://hyper.readthedocs.org/en/development/contributing.html
+        
+        License
+        =======
+        
+        ``hpack`` is made available under the MIT License. For more details, see the
+        ``LICENSE`` file in the repository.
+        
+        Authors
+        =======
+        
+        ``hpack`` is maintained by Cory Benfield, with contributions from others. For
+        more details about the contributors, please see ``CONTRIBUTORS.rst``.
+        
+        
+        Release History
+        ===============
+        
+        3.0.0 (2017-03-29)
+        ------------------
+        
+        **API Changes (Backward Incompatible)**
+        
+        - Removed nghttp2 support. This support had rotted and was essentially
+          non-functional, so it has now been removed until someone has time to re-add
+          the support in a functional form.
+        - Attempts by the encoder to exceed the maximum allowed header table size via
+          dynamic table size updates (or the absence thereof) are now forbidden.
+        
+        **API Changes (Backward Compatible)**
+        
+        - Added a new ``InvalidTableSizeError`` thrown when the encoder does not
+          respect the maximum table size set by the user.
+        - Added a ``Decoder.max_allowed_table_size`` field that sets the maximum
+          allowed size of the decoder header table. See the documentation for an
+          indication of how this should be used.
+        
+        **Bugfixes**
+        
+        - Up to 25% performance improvement decoding HPACK-packed integers, depending
+          on the platform.
+        - HPACK now tolerates receiving multiple header table size changes in sequence,
+          rather than only one.
+        - HPACK now forbids header table size changes anywhere but first in a header
+          block, as required by RFC 7541 § 4.2.
+        - Other miscellaneous performance improvements.
+        
+        2.3.0 (2016-08-04)
+        ------------------
+        
+        **Security Fixes**
+        
+        - CVE-2016-6581: HPACK Bomb. This release now enforces a maximum value of the
+          decompressed size of the header list. This is to avoid the so-called "HPACK
+          Bomb" vulnerability, which is caused when a malicious peer sends a compressed
+          HPACK body that decompresses to a gigantic header list size.
+        
+          This also adds a ``OversizedHeaderListError``, which is thrown by the
+          ``decode`` method if the maximum header list size is being violated. This
+          places the HPACK decoder into a broken state: it must not be used after this
+          exception is thrown.
+        
+          This also adds a ``max_header_list_size`` to the ``Decoder`` object. This
+          controls the maximum allowable decompressed size of the header list. By
+          default this is set to 64kB.
+        
+        2.2.0 (2016-04-20)
+        ------------------
+        
+        **API Changes (Backward Compatible)**
+        
+        - Added ``HeaderTuple`` and ``NeverIndexedHeaderTuple`` classes that signal
+          whether a given header field may ever be indexed in HTTP/2 header
+          compression.
+        - Changed ``Decoder.decode()`` to return the newly added ``HeaderTuple`` class
+          and subclass. These objects behave like two-tuples, so this change does not
+          break working code.
+        
+        **Bugfixes**
+        
+        - Improve Huffman decoding speed by 4x using an approach borrowed from nghttp2.
+        - Improve HPACK decoding speed by 10% by caching header table sizes.
+        
+        2.1.1 (2016-03-16)
+        ------------------
+        
+        **Bugfixes**
+        
+        - When passing a dictionary or dictionary subclass to ``Encoder.encode``, HPACK
+          now ensures that HTTP/2 special headers (headers whose names begin with
+          ``:`` characters) appear first in the header block.
+        
+        2.1.0 (2016-02-02)
+        ------------------
+        
+        **API Changes (Backward Compatible)**
+        
+        - Added new ``InvalidTableIndex`` exception, a subclass of
+          ``HPACKDecodingError``.
+        - Instead of throwing ``IndexError`` when encountering invalid encoded integers
+          HPACK now throws ``HPACKDecodingError``.
+        - Instead of throwing ``UnicodeDecodeError`` when encountering headers that are
+          not UTF-8 encoded, HPACK now throws ``HPACKDecodingError``.
+        - Instead of throwing ``IndexError`` when encountering invalid table offsets,
+          HPACK now throws ``InvalidTableIndex``.
+        - Added ``raw`` flag to ``decode``, allowing ``decode`` to return bytes instead
+          of attempting to decode the headers as UTF-8.
+        
+        **Bugfixes**
+        
+        - ``memoryview`` objects are now used when decoding HPACK, improving the
+          performance by avoiding unnecessary data copies.
+        
+        2.0.1 (2015-11-09)
+        ------------------
+        
+        - Fixed a bug where the Python HPACK implementation would only emit header
+          table size changes for the total change between one header block and another,
+          rather than for the entire sequence of changes.
+        
+        2.0.0 (2015-10-12)
+        ------------------
+        
+        - Remove unused ``HPACKEncodingError``.
+        - Add the shortcut ability to import the public API (``Encoder``, ``Decoder``,
+          ``HPACKError``, ``HPACKDecodingError``) directly, rather than from
+          ``hpack.hpack``.
+        
+        1.1.0 (2015-07-07)
+        ------------------
+        
+        - Add support for emitting 'never indexed' header fields, by using an optional
+          third element in the header tuple. With thanks to @jimcarreer!
+        
+        1.0.1 (2015-04-19)
+        ------------------
+        
+        - Header fields that have names matching header table entries are now added to
+          the header table. This improves compression efficiency at the cost of
+          slightly more table operations. With thanks to `Tatsuhiro Tsujikawa`_.
+        
+        .. _Tatsuhiro Tsujikawa: https://github.com/tatsuhiro-t
+        
+        1.0.0 (2015-04-13)
+        ------------------
+        
+        - Initial fork of the code from `hyper`_.
+        
+        .. _hyper: https://hyper.readthedocs.org/
+        
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: Implementation :: CPython
diff --git a/tools/third_party/hpack/README.rst b/tools/third_party/hpack/README.rst
new file mode 100644
index 0000000..1a04397
--- /dev/null
+++ b/tools/third_party/hpack/README.rst
@@ -0,0 +1,41 @@
+========================================
+hpack: HTTP/2 Header Encoding for Python
+========================================
+
+.. image:: https://raw.github.com/Lukasa/hyper/development/docs/source/images/hyper.png
+
+.. image:: https://travis-ci.org/python-hyper/hpack.png?branch=master
+    :target: https://travis-ci.org/python-hyper/hpack
+
+This module contains a pure-Python HTTP/2 header encoding (HPACK) logic for use
+in Python programs that implement HTTP/2. It also contains a compatibility
+layer that automatically enables the use of ``nghttp2`` if it's available.
+
+Documentation
+=============
+
+Documentation is available at http://python-hyper.org/hpack/.
+
+Contributing
+============
+
+``hpack`` welcomes contributions from anyone! Unlike many other projects we are
+happy to accept cosmetic contributions and small contributions, in addition to
+large feature requests and changes.
+
+Before you contribute (either by opening an issue or filing a pull request),
+please `read the contribution guidelines`_.
+
+.. _read the contribution guidelines: http://hyper.readthedocs.org/en/development/contributing.html
+
+License
+=======
+
+``hpack`` is made available under the MIT License. For more details, see the
+``LICENSE`` file in the repository.
+
+Authors
+=======
+
+``hpack`` is maintained by Cory Benfield, with contributions from others. For
+more details about the contributors, please see ``CONTRIBUTORS.rst``.
diff --git a/tools/third_party/hpack/hpack.egg-info/PKG-INFO b/tools/third_party/hpack/hpack.egg-info/PKG-INFO
new file mode 100644
index 0000000..c2a3a1a
--- /dev/null
+++ b/tools/third_party/hpack/hpack.egg-info/PKG-INFO
@@ -0,0 +1,199 @@
+Metadata-Version: 1.1
+Name: hpack
+Version: 3.0.0
+Summary: Pure-Python HPACK header compression
+Home-page: http://hyper.rtfd.org
+Author: Cory Benfield
+Author-email: cory@lukasa.co.uk
+License: MIT License
+Description: ========================================
+        hpack: HTTP/2 Header Encoding for Python
+        ========================================
+        
+        .. image:: https://raw.github.com/Lukasa/hyper/development/docs/source/images/hyper.png
+        
+        .. image:: https://travis-ci.org/python-hyper/hpack.png?branch=master
+            :target: https://travis-ci.org/python-hyper/hpack
+        
+        This module contains a pure-Python HTTP/2 header encoding (HPACK) logic for use
+        in Python programs that implement HTTP/2. It also contains a compatibility
+        layer that automatically enables the use of ``nghttp2`` if it's available.
+        
+        Documentation
+        =============
+        
+        Documentation is available at http://python-hyper.org/hpack/.
+        
+        Contributing
+        ============
+        
+        ``hpack`` welcomes contributions from anyone! Unlike many other projects we are
+        happy to accept cosmetic contributions and small contributions, in addition to
+        large feature requests and changes.
+        
+        Before you contribute (either by opening an issue or filing a pull request),
+        please `read the contribution guidelines`_.
+        
+        .. _read the contribution guidelines: http://hyper.readthedocs.org/en/development/contributing.html
+        
+        License
+        =======
+        
+        ``hpack`` is made available under the MIT License. For more details, see the
+        ``LICENSE`` file in the repository.
+        
+        Authors
+        =======
+        
+        ``hpack`` is maintained by Cory Benfield, with contributions from others. For
+        more details about the contributors, please see ``CONTRIBUTORS.rst``.
+        
+        
+        Release History
+        ===============
+        
+        3.0.0 (2017-03-29)
+        ------------------
+        
+        **API Changes (Backward Incompatible)**
+        
+        - Removed nghttp2 support. This support had rotted and was essentially
+          non-functional, so it has now been removed until someone has time to re-add
+          the support in a functional form.
+        - Attempts by the encoder to exceed the maximum allowed header table size via
+          dynamic table size updates (or the absence thereof) are now forbidden.
+        
+        **API Changes (Backward Compatible)**
+        
+        - Added a new ``InvalidTableSizeError`` thrown when the encoder does not
+          respect the maximum table size set by the user.
+        - Added a ``Decoder.max_allowed_table_size`` field that sets the maximum
+          allowed size of the decoder header table. See the documentation for an
+          indication of how this should be used.
+        
+        **Bugfixes**
+        
+        - Up to 25% performance improvement decoding HPACK-packed integers, depending
+          on the platform.
+        - HPACK now tolerates receiving multiple header table size changes in sequence,
+          rather than only one.
+        - HPACK now forbids header table size changes anywhere but first in a header
+          block, as required by RFC 7541 § 4.2.
+        - Other miscellaneous performance improvements.
+        
+        2.3.0 (2016-08-04)
+        ------------------
+        
+        **Security Fixes**
+        
+        - CVE-2016-6581: HPACK Bomb. This release now enforces a maximum value of the
+          decompressed size of the header list. This is to avoid the so-called "HPACK
+          Bomb" vulnerability, which is caused when a malicious peer sends a compressed
+          HPACK body that decompresses to a gigantic header list size.
+        
+          This also adds a ``OversizedHeaderListError``, which is thrown by the
+          ``decode`` method if the maximum header list size is being violated. This
+          places the HPACK decoder into a broken state: it must not be used after this
+          exception is thrown.
+        
+          This also adds a ``max_header_list_size`` to the ``Decoder`` object. This
+          controls the maximum allowable decompressed size of the header list. By
+          default this is set to 64kB.
+        
+        2.2.0 (2016-04-20)
+        ------------------
+        
+        **API Changes (Backward Compatible)**
+        
+        - Added ``HeaderTuple`` and ``NeverIndexedHeaderTuple`` classes that signal
+          whether a given header field may ever be indexed in HTTP/2 header
+          compression.
+        - Changed ``Decoder.decode()`` to return the newly added ``HeaderTuple`` class
+          and subclass. These objects behave like two-tuples, so this change does not
+          break working code.
+        
+        **Bugfixes**
+        
+        - Improve Huffman decoding speed by 4x using an approach borrowed from nghttp2.
+        - Improve HPACK decoding speed by 10% by caching header table sizes.
+        
+        2.1.1 (2016-03-16)
+        ------------------
+        
+        **Bugfixes**
+        
+        - When passing a dictionary or dictionary subclass to ``Encoder.encode``, HPACK
+          now ensures that HTTP/2 special headers (headers whose names begin with
+          ``:`` characters) appear first in the header block.
+        
+        2.1.0 (2016-02-02)
+        ------------------
+        
+        **API Changes (Backward Compatible)**
+        
+        - Added new ``InvalidTableIndex`` exception, a subclass of
+          ``HPACKDecodingError``.
+        - Instead of throwing ``IndexError`` when encountering invalid encoded integers
+          HPACK now throws ``HPACKDecodingError``.
+        - Instead of throwing ``UnicodeDecodeError`` when encountering headers that are
+          not UTF-8 encoded, HPACK now throws ``HPACKDecodingError``.
+        - Instead of throwing ``IndexError`` when encountering invalid table offsets,
+          HPACK now throws ``InvalidTableIndex``.
+        - Added ``raw`` flag to ``decode``, allowing ``decode`` to return bytes instead
+          of attempting to decode the headers as UTF-8.
+        
+        **Bugfixes**
+        
+        - ``memoryview`` objects are now used when decoding HPACK, improving the
+          performance by avoiding unnecessary data copies.
+        
+        2.0.1 (2015-11-09)
+        ------------------
+        
+        - Fixed a bug where the Python HPACK implementation would only emit header
+          table size changes for the total change between one header block and another,
+          rather than for the entire sequence of changes.
+        
+        2.0.0 (2015-10-12)
+        ------------------
+        
+        - Remove unused ``HPACKEncodingError``.
+        - Add the shortcut ability to import the public API (``Encoder``, ``Decoder``,
+          ``HPACKError``, ``HPACKDecodingError``) directly, rather than from
+          ``hpack.hpack``.
+        
+        1.1.0 (2015-07-07)
+        ------------------
+        
+        - Add support for emitting 'never indexed' header fields, by using an optional
+          third element in the header tuple. With thanks to @jimcarreer!
+        
+        1.0.1 (2015-04-19)
+        ------------------
+        
+        - Header fields that have names matching header table entries are now added to
+          the header table. This improves compression efficiency at the cost of
+          slightly more table operations. With thanks to `Tatsuhiro Tsujikawa`_.
+        
+        .. _Tatsuhiro Tsujikawa: https://github.com/tatsuhiro-t
+        
+        1.0.0 (2015-04-13)
+        ------------------
+        
+        - Initial fork of the code from `hyper`_.
+        
+        .. _hyper: https://hyper.readthedocs.org/
+        
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: Implementation :: CPython
diff --git a/tools/third_party/hpack/hpack.egg-info/SOURCES.txt b/tools/third_party/hpack/hpack.egg-info/SOURCES.txt
new file mode 100644
index 0000000..247a5eb
--- /dev/null
+++ b/tools/third_party/hpack/hpack.egg-info/SOURCES.txt
@@ -0,0 +1,26 @@
+CONTRIBUTORS.rst
+HISTORY.rst
+LICENSE
+MANIFEST.in
+README.rst
+setup.cfg
+setup.py
+hpack/__init__.py
+hpack/compat.py
+hpack/exceptions.py
+hpack/hpack.py
+hpack/huffman.py
+hpack/huffman_constants.py
+hpack/huffman_table.py
+hpack/struct.py
+hpack/table.py
+hpack.egg-info/PKG-INFO
+hpack.egg-info/SOURCES.txt
+hpack.egg-info/dependency_links.txt
+hpack.egg-info/top_level.txt
+test/test_encode_decode.py
+test/test_hpack.py
+test/test_hpack_integration.py
+test/test_huffman.py
+test/test_struct.py
+test/test_table.py
\ No newline at end of file
diff --git a/tools/third_party/hpack/hpack.egg-info/dependency_links.txt b/tools/third_party/hpack/hpack.egg-info/dependency_links.txt
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/tools/third_party/hpack/hpack.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/tools/third_party/hpack/hpack.egg-info/top_level.txt b/tools/third_party/hpack/hpack.egg-info/top_level.txt
new file mode 100644
index 0000000..1a0ac48
--- /dev/null
+++ b/tools/third_party/hpack/hpack.egg-info/top_level.txt
@@ -0,0 +1 @@
+hpack
diff --git a/tools/third_party/hpack/hpack/__init__.py b/tools/third_party/hpack/hpack/__init__.py
new file mode 100644
index 0000000..22edde2
--- /dev/null
+++ b/tools/third_party/hpack/hpack/__init__.py
@@ -0,0 +1,20 @@
+# -*- coding: utf-8 -*-
+"""
+hpack
+~~~~~
+
+HTTP/2 header encoding for Python.
+"""
+from .hpack import Encoder, Decoder
+from .struct import HeaderTuple, NeverIndexedHeaderTuple
+from .exceptions import (
+    HPACKError, HPACKDecodingError, InvalidTableIndex, OversizedHeaderListError
+)
+
+__all__ = [
+    'Encoder', 'Decoder', 'HPACKError', 'HPACKDecodingError',
+    'InvalidTableIndex', 'HeaderTuple', 'NeverIndexedHeaderTuple',
+    'OversizedHeaderListError'
+]
+
+__version__ = '3.0.0'
diff --git a/tools/third_party/hpack/hpack/compat.py b/tools/third_party/hpack/hpack/compat.py
new file mode 100644
index 0000000..4fcaad4
--- /dev/null
+++ b/tools/third_party/hpack/hpack/compat.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+"""
+hpack/compat
+~~~~~~~~~~~~
+
+Normalizes the Python 2/3 API for internal use.
+"""
+import sys
+
+
+_ver = sys.version_info
+is_py2 = _ver[0] == 2
+is_py3 = _ver[0] == 3
+
+if is_py2:
+    def to_byte(char):
+        return ord(char)
+
+    def decode_hex(b):
+        return b.decode('hex')
+
+    def to_bytes(b):
+        if isinstance(b, memoryview):
+            return b.tobytes()
+        else:
+            return bytes(b)
+
+    unicode = unicode  # noqa
+    bytes = str
+
+elif is_py3:
+    def to_byte(char):
+        return char
+
+    def decode_hex(b):
+        return bytes.fromhex(b)
+
+    def to_bytes(b):
+        return bytes(b)
+
+    unicode = str
+    bytes = bytes
diff --git a/tools/third_party/hpack/hpack/exceptions.py b/tools/third_party/hpack/hpack/exceptions.py
new file mode 100644
index 0000000..571ba98
--- /dev/null
+++ b/tools/third_party/hpack/hpack/exceptions.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+"""
+hyper/http20/exceptions
+~~~~~~~~~~~~~~~~~~~~~~~
+
+This defines exceptions used in the HTTP/2 portion of hyper.
+"""
+
+
+class HPACKError(Exception):
+    """
+    The base class for all ``hpack`` exceptions.
+    """
+    pass
+
+
+class HPACKDecodingError(HPACKError):
+    """
+    An error has been encountered while performing HPACK decoding.
+    """
+    pass
+
+
+class InvalidTableIndex(HPACKDecodingError):
+    """
+    An invalid table index was received.
+    """
+    pass
+
+
+class OversizedHeaderListError(HPACKDecodingError):
+    """
+    A header list that was larger than we allow has been received. This may be
+    a DoS attack.
+
+    .. versionadded:: 2.3.0
+    """
+    pass
+
+
+class InvalidTableSizeError(HPACKDecodingError):
+    """
+    An attempt was made to change the decoder table size to a value larger than
+    allowed, or the list was shrunk and the remote peer didn't shrink their
+    table size.
+
+    .. versionadded:: 3.0.0
+    """
+    pass
diff --git a/tools/third_party/hpack/hpack/hpack.py b/tools/third_party/hpack/hpack/hpack.py
new file mode 100644
index 0000000..57e8c7f
--- /dev/null
+++ b/tools/third_party/hpack/hpack/hpack.py
@@ -0,0 +1,630 @@
+# -*- coding: utf-8 -*-
+"""
+hpack/hpack
+~~~~~~~~~~~
+
+Implements the HPACK header compression algorithm as detailed by the IETF.
+"""
+import logging
+
+from .table import HeaderTable, table_entry_size
+from .compat import to_byte, to_bytes
+from .exceptions import (
+    HPACKDecodingError, OversizedHeaderListError, InvalidTableSizeError
+)
+from .huffman import HuffmanEncoder
+from .huffman_constants import (
+    REQUEST_CODES, REQUEST_CODES_LENGTH
+)
+from .huffman_table import decode_huffman
+from .struct import HeaderTuple, NeverIndexedHeaderTuple
+
+log = logging.getLogger(__name__)
+
+INDEX_NONE = b'\x00'
+INDEX_NEVER = b'\x10'
+INDEX_INCREMENTAL = b'\x40'
+
+# Precompute 2^i for 1-8 for use in prefix calcs.
+# Zero index is not used but there to save a subtraction
+# as prefix numbers are not zero indexed.
+_PREFIX_BIT_MAX_NUMBERS = [(2 ** i) - 1 for i in range(9)]
+
+try:  # pragma: no cover
+    basestring = basestring
+except NameError:  # pragma: no cover
+    basestring = (str, bytes)
+
+
+# We default the maximum header list we're willing to accept to 64kB. That's a
+# lot of headers, but if applications want to raise it they can do.
+DEFAULT_MAX_HEADER_LIST_SIZE = 2 ** 16
+
+
+def _unicode_if_needed(header, raw):
+    """
+    Provides a header as a unicode string if raw is False, otherwise returns
+    it as a bytestring.
+    """
+    name = to_bytes(header[0])
+    value = to_bytes(header[1])
+    if not raw:
+        name = name.decode('utf-8')
+        value = value.decode('utf-8')
+    return header.__class__(name, value)
+
+
+def encode_integer(integer, prefix_bits):
+    """
+    This encodes an integer according to the wacky integer encoding rules
+    defined in the HPACK spec.
+    """
+    log.debug("Encoding %d with %d bits", integer, prefix_bits)
+
+    if integer < 0:
+        raise ValueError(
+            "Can only encode positive integers, got %s" % integer
+        )
+
+    if prefix_bits < 1 or prefix_bits > 8:
+        raise ValueError(
+            "Prefix bits must be between 1 and 8, got %s" % prefix_bits
+        )
+
+    max_number = _PREFIX_BIT_MAX_NUMBERS[prefix_bits]
+
+    if integer < max_number:
+        return bytearray([integer])  # Seriously?
+    else:
+        elements = [max_number]
+        integer -= max_number
+
+        while integer >= 128:
+            elements.append((integer & 127) + 128)
+            integer >>= 7
+
+        elements.append(integer)
+
+        return bytearray(elements)
+
+
+def decode_integer(data, prefix_bits):
+    """
+    This decodes an integer according to the wacky integer encoding rules
+    defined in the HPACK spec. Returns a tuple of the decoded integer and the
+    number of bytes that were consumed from ``data`` in order to get that
+    integer.
+    """
+    if prefix_bits < 1 or prefix_bits > 8:
+        raise ValueError(
+            "Prefix bits must be between 1 and 8, got %s" % prefix_bits
+        )
+
+    max_number = _PREFIX_BIT_MAX_NUMBERS[prefix_bits]
+    index = 1
+    shift = 0
+    mask = (0xFF >> (8 - prefix_bits))
+
+    try:
+        number = to_byte(data[0]) & mask
+        if number == max_number:
+            while True:
+                next_byte = to_byte(data[index])
+                index += 1
+
+                if next_byte >= 128:
+                    number += (next_byte - 128) << shift
+                else:
+                    number += next_byte << shift
+                    break
+                shift += 7
+
+    except IndexError:
+        raise HPACKDecodingError(
+            "Unable to decode HPACK integer representation from %r" % data
+        )
+
+    log.debug("Decoded %d, consumed %d bytes", number, index)
+
+    return number, index
+
+
+def _dict_to_iterable(header_dict):
+    """
+    This converts a dictionary to an iterable of two-tuples. This is a
+    HPACK-specific function becuase it pulls "special-headers" out first and
+    then emits them.
+    """
+    assert isinstance(header_dict, dict)
+    keys = sorted(
+        header_dict.keys(),
+        key=lambda k: not _to_bytes(k).startswith(b':')
+    )
+    for key in keys:
+        yield key, header_dict[key]
+
+
+def _to_bytes(string):
+    """
+    Convert string to bytes.
+    """
+    if not isinstance(string, basestring):  # pragma: no cover
+        string = str(string)
+
+    return string if isinstance(string, bytes) else string.encode('utf-8')
+
+
+class Encoder(object):
+    """
+    An HPACK encoder object. This object takes HTTP headers and emits encoded
+    HTTP/2 header blocks.
+    """
+
+    def __init__(self):
+        self.header_table = HeaderTable()
+        self.huffman_coder = HuffmanEncoder(
+            REQUEST_CODES, REQUEST_CODES_LENGTH
+        )
+        self.table_size_changes = []
+
+    @property
+    def header_table_size(self):
+        """
+        Controls the size of the HPACK header table.
+        """
+        return self.header_table.maxsize
+
+    @header_table_size.setter
+    def header_table_size(self, value):
+        self.header_table.maxsize = value
+        if self.header_table.resized:
+            self.table_size_changes.append(value)
+
+    def encode(self, headers, huffman=True):
+        """
+        Takes a set of headers and encodes them into a HPACK-encoded header
+        block.
+
+        :param headers: The headers to encode. Must be either an iterable of
+                        tuples, an iterable of :class:`HeaderTuple
+                        <hpack.struct.HeaderTuple>`, or a ``dict``.
+
+                        If an iterable of tuples, the tuples may be either
+                        two-tuples or three-tuples. If they are two-tuples, the
+                        tuples must be of the format ``(name, value)``. If they
+                        are three-tuples, they must be of the format
+                        ``(name, value, sensitive)``, where ``sensitive`` is a
+                        boolean value indicating whether the header should be
+                        added to header tables anywhere. If not present,
+                        ``sensitive`` defaults to ``False``.
+
+                        If an iterable of :class:`HeaderTuple
+                        <hpack.struct.HeaderTuple>`, the tuples must always be
+                        two-tuples. Instead of using ``sensitive`` as a third
+                        tuple entry, use :class:`NeverIndexedHeaderTuple
+                        <hpack.struct.NeverIndexedHeaderTuple>` to request that
+                        the field never be indexed.
+
+                        .. warning:: HTTP/2 requires that all special headers
+                            (headers whose names begin with ``:`` characters)
+                            appear at the *start* of the header block. While
+                            this method will ensure that happens for ``dict``
+                            subclasses, callers using any other iterable of
+                            tuples **must** ensure they place their special
+                            headers at the start of the iterable.
+
+                            For efficiency reasons users should prefer to use
+                            iterables of two-tuples: fixing the ordering of
+                            dictionary headers is an expensive operation that
+                            should be avoided if possible.
+
+        :param huffman: (optional) Whether to Huffman-encode any header sent as
+                        a literal value. Except for use when debugging, it is
+                        recommended that this be left enabled.
+
+        :returns: A bytestring containing the HPACK-encoded header block.
+        """
+        # Transforming the headers into a header block is a procedure that can
+        # be modeled as a chain or pipe. First, the headers are encoded. This
+        # encoding can be done a number of ways. If the header name-value pair
+        # are already in the header table we can represent them using the
+        # indexed representation: the same is true if they are in the static
+        # table. Otherwise, a literal representation will be used.
+        log.debug("HPACK encoding %s", headers)
+        header_block = []
+
+        # Turn the headers into a list of tuples if possible. This is the
+        # natural way to interact with them in HPACK. Because dictionaries are
+        # un-ordered, we need to make sure we grab the "special" headers first.
+        if isinstance(headers, dict):
+            headers = _dict_to_iterable(headers)
+
+        # Before we begin, if the header table size has been changed we need
+        # to signal all changes since last emission appropriately.
+        if self.header_table.resized:
+            header_block.append(self._encode_table_size_change())
+            self.header_table.resized = False
+
+        # Add each header to the header block
+        for header in headers:
+            sensitive = False
+            if isinstance(header, HeaderTuple):
+                sensitive = not header.indexable
+            elif len(header) > 2:
+                sensitive = header[2]
+
+            header = (_to_bytes(header[0]), _to_bytes(header[1]))
+            header_block.append(self.add(header, sensitive, huffman))
+
+        header_block = b''.join(header_block)
+
+        log.debug("Encoded header block to %s", header_block)
+
+        return header_block
+
+    def add(self, to_add, sensitive, huffman=False):
+        """
+        This function takes a header key-value tuple and serializes it.
+        """
+        log.debug("Adding %s to the header table", to_add)
+
+        name, value = to_add
+
+        # Set our indexing mode
+        indexbit = INDEX_INCREMENTAL if not sensitive else INDEX_NEVER
+
+        # Search for a matching header in the header table.
+        match = self.header_table.search(name, value)
+
+        if match is None:
+            # Not in the header table. Encode using the literal syntax,
+            # and add it to the header table.
+            encoded = self._encode_literal(name, value, indexbit, huffman)
+            if not sensitive:
+                self.header_table.add(name, value)
+            return encoded
+
+        # The header is in the table, break out the values. If we matched
+        # perfectly, we can use the indexed representation: otherwise we
+        # can use the indexed literal.
+        index, name, perfect = match
+
+        if perfect:
+            # Indexed representation.
+            encoded = self._encode_indexed(index)
+        else:
+            # Indexed literal. We are going to add header to the
+            # header table unconditionally. It is a future todo to
+            # filter out headers which are known to be ineffective for
+            # indexing since they just take space in the table and
+            # pushed out other valuable headers.
+            encoded = self._encode_indexed_literal(
+                index, value, indexbit, huffman
+            )
+            if not sensitive:
+                self.header_table.add(name, value)
+
+        return encoded
+
+    def _encode_indexed(self, index):
+        """
+        Encodes a header using the indexed representation.
+        """
+        field = encode_integer(index, 7)
+        field[0] |= 0x80  # we set the top bit
+        return bytes(field)
+
+    def _encode_literal(self, name, value, indexbit, huffman=False):
+        """
+        Encodes a header with a literal name and literal value. If ``indexing``
+        is True, the header will be added to the header table: otherwise it
+        will not.
+        """
+        if huffman:
+            name = self.huffman_coder.encode(name)
+            value = self.huffman_coder.encode(value)
+
+        name_len = encode_integer(len(name), 7)
+        value_len = encode_integer(len(value), 7)
+
+        if huffman:
+            name_len[0] |= 0x80
+            value_len[0] |= 0x80
+
+        return b''.join(
+            [indexbit, bytes(name_len), name, bytes(value_len), value]
+        )
+
+    def _encode_indexed_literal(self, index, value, indexbit, huffman=False):
+        """
+        Encodes a header with an indexed name and a literal value and performs
+        incremental indexing.
+        """
+        if indexbit != INDEX_INCREMENTAL:
+            prefix = encode_integer(index, 4)
+        else:
+            prefix = encode_integer(index, 6)
+
+        prefix[0] |= ord(indexbit)
+
+        if huffman:
+            value = self.huffman_coder.encode(value)
+
+        value_len = encode_integer(len(value), 7)
+
+        if huffman:
+            value_len[0] |= 0x80
+
+        return b''.join([bytes(prefix), bytes(value_len), value])
+
+    def _encode_table_size_change(self):
+        """
+        Produces the encoded form of all header table size change context
+        updates.
+        """
+        block = b''
+        for size_bytes in self.table_size_changes:
+            size_bytes = encode_integer(size_bytes, 5)
+            size_bytes[0] |= 0x20
+            block += bytes(size_bytes)
+        self.table_size_changes = []
+        return block
+
+
+class Decoder(object):
+    """
+    An HPACK decoder object.
+
+    .. versionchanged:: 2.3.0
+       Added ``max_header_list_size`` argument.
+
+    :param max_header_list_size: The maximum decompressed size we will allow
+        for any single header block. This is a protection against DoS attacks
+        that attempt to force the application to expand a relatively small
+        amount of data into a really large header list, allowing enormous
+        amounts of memory to be allocated.
+
+        If this amount of data is exceeded, a `OversizedHeaderListError
+        <hpack.OversizedHeaderListError>` exception will be raised. At this
+        point the connection should be shut down, as the HPACK state will no
+        longer be useable.
+
+        Defaults to 64kB.
+    :type max_header_list_size: ``int``
+    """
+    def __init__(self, max_header_list_size=DEFAULT_MAX_HEADER_LIST_SIZE):
+        self.header_table = HeaderTable()
+
+        #: The maximum decompressed size we will allow for any single header
+        #: block. This is a protection against DoS attacks that attempt to
+        #: force the application to expand a relatively small amount of data
+        #: into a really large header list, allowing enormous amounts of memory
+        #: to be allocated.
+        #:
+        #: If this amount of data is exceeded, a `OversizedHeaderListError
+        #: <hpack.OversizedHeaderListError>` exception will be raised. At this
+        #: point the connection should be shut down, as the HPACK state will no
+        #: longer be usable.
+        #:
+        #: Defaults to 64kB.
+        #:
+        #: .. versionadded:: 2.3.0
+        self.max_header_list_size = max_header_list_size
+
+        #: Maximum allowed header table size.
+        #:
+        #: A HTTP/2 implementation should set this to the most recent value of
+        #: SETTINGS_HEADER_TABLE_SIZE that it sent *and has received an ACK
+        #: for*. Once this setting is set, the actual header table size will be
+        #: checked at the end of each decoding run and whenever it is changed,
+        #: to confirm that it fits in this size.
+        self.max_allowed_table_size = self.header_table.maxsize
+
+    @property
+    def header_table_size(self):
+        """
+        Controls the size of the HPACK header table.
+        """
+        return self.header_table.maxsize
+
+    @header_table_size.setter
+    def header_table_size(self, value):
+        self.header_table.maxsize = value
+
+    def decode(self, data, raw=False):
+        """
+        Takes an HPACK-encoded header block and decodes it into a header set.
+
+        :param data: A bytestring representing a complete HPACK-encoded header
+                     block.
+        :param raw: (optional) Whether to return the headers as tuples of raw
+                    byte strings or to decode them as UTF-8 before returning
+                    them. The default value is False, which returns tuples of
+                    Unicode strings
+        :returns: A list of two-tuples of ``(name, value)`` representing the
+                  HPACK-encoded headers, in the order they were decoded.
+        :raises HPACKDecodingError: If an error is encountered while decoding
+                                    the header block.
+        """
+        log.debug("Decoding %s", data)
+
+        data_mem = memoryview(data)
+        headers = []
+        data_len = len(data)
+        inflated_size = 0
+        current_index = 0
+
+        while current_index < data_len:
+            # Work out what kind of header we're decoding.
+            # If the high bit is 1, it's an indexed field.
+            current = to_byte(data[current_index])
+            indexed = True if current & 0x80 else False
+
+            # Otherwise, if the second-highest bit is 1 it's a field that does
+            # alter the header table.
+            literal_index = True if current & 0x40 else False
+
+            # Otherwise, if the third-highest bit is 1 it's an encoding context
+            # update.
+            encoding_update = True if current & 0x20 else False
+
+            if indexed:
+                header, consumed = self._decode_indexed(
+                    data_mem[current_index:]
+                )
+            elif literal_index:
+                # It's a literal header that does affect the header table.
+                header, consumed = self._decode_literal_index(
+                    data_mem[current_index:]
+                )
+            elif encoding_update:
+                # It's an update to the encoding context. These are forbidden
+                # in a header block after any actual header.
+                if headers:
+                    raise HPACKDecodingError(
+                        "Table size update not at the start of the block"
+                    )
+                consumed = self._update_encoding_context(
+                    data_mem[current_index:]
+                )
+                header = None
+            else:
+                # It's a literal header that does not affect the header table.
+                header, consumed = self._decode_literal_no_index(
+                    data_mem[current_index:]
+                )
+
+            if header:
+                headers.append(header)
+                inflated_size += table_entry_size(*header)
+
+                if inflated_size > self.max_header_list_size:
+                    raise OversizedHeaderListError(
+                        "A header list larger than %d has been received" %
+                        self.max_header_list_size
+                    )
+
+            current_index += consumed
+
+        # Confirm that the table size is lower than the maximum. We do this
+        # here to ensure that we catch when the max has been *shrunk* and the
+        # remote peer hasn't actually done that.
+        self._assert_valid_table_size()
+
+        try:
+            return [_unicode_if_needed(h, raw) for h in headers]
+        except UnicodeDecodeError:
+            raise HPACKDecodingError("Unable to decode headers as UTF-8.")
+
+    def _assert_valid_table_size(self):
+        """
+        Check that the table size set by the encoder is lower than the maximum
+        we expect to have.
+        """
+        if self.header_table_size > self.max_allowed_table_size:
+            raise InvalidTableSizeError(
+                "Encoder did not shrink table size to within the max"
+            )
+
+    def _update_encoding_context(self, data):
+        """
+        Handles a byte that updates the encoding context.
+        """
+        # We've been asked to resize the header table.
+        new_size, consumed = decode_integer(data, 5)
+        if new_size > self.max_allowed_table_size:
+            raise InvalidTableSizeError(
+                "Encoder exceeded max allowable table size"
+            )
+        self.header_table_size = new_size
+        return consumed
+
+    def _decode_indexed(self, data):
+        """
+        Decodes a header represented using the indexed representation.
+        """
+        index, consumed = decode_integer(data, 7)
+        header = HeaderTuple(*self.header_table.get_by_index(index))
+        log.debug("Decoded %s, consumed %d", header, consumed)
+        return header, consumed
+
+    def _decode_literal_no_index(self, data):
+        return self._decode_literal(data, False)
+
+    def _decode_literal_index(self, data):
+        return self._decode_literal(data, True)
+
+    def _decode_literal(self, data, should_index):
+        """
+        Decodes a header represented with a literal.
+        """
+        total_consumed = 0
+
+        # When should_index is true, if the low six bits of the first byte are
+        # nonzero, the header name is indexed.
+        # When should_index is false, if the low four bits of the first byte
+        # are nonzero the header name is indexed.
+        if should_index:
+            indexed_name = to_byte(data[0]) & 0x3F
+            name_len = 6
+            not_indexable = False
+        else:
+            high_byte = to_byte(data[0])
+            indexed_name = high_byte & 0x0F
+            name_len = 4
+            not_indexable = high_byte & 0x10
+
+        if indexed_name:
+            # Indexed header name.
+            index, consumed = decode_integer(data, name_len)
+            name = self.header_table.get_by_index(index)[0]
+
+            total_consumed = consumed
+            length = 0
+        else:
+            # Literal header name. The first byte was consumed, so we need to
+            # move forward.
+            data = data[1:]
+
+            length, consumed = decode_integer(data, 7)
+            name = data[consumed:consumed + length]
+            if len(name) != length:
+                raise HPACKDecodingError("Truncated header block")
+
+            if to_byte(data[0]) & 0x80:
+                name = decode_huffman(name)
+            total_consumed = consumed + length + 1  # Since we moved forward 1.
+
+        data = data[consumed + length:]
+
+        # The header value is definitely length-based.
+        length, consumed = decode_integer(data, 7)
+        value = data[consumed:consumed + length]
+        if len(value) != length:
+            raise HPACKDecodingError("Truncated header block")
+
+        if to_byte(data[0]) & 0x80:
+            value = decode_huffman(value)
+
+        # Updated the total consumed length.
+        total_consumed += length + consumed
+
+        # If we have been told never to index the header field, encode that in
+        # the tuple we use.
+        if not_indexable:
+            header = NeverIndexedHeaderTuple(name, value)
+        else:
+            header = HeaderTuple(name, value)
+
+        # If we've been asked to index this, add it to the header table.
+        if should_index:
+            self.header_table.add(name, value)
+
+        log.debug(
+            "Decoded %s, total consumed %d bytes, indexed %s",
+            header,
+            total_consumed,
+            should_index
+        )
+
+        return header, total_consumed
diff --git a/tools/third_party/hpack/hpack/huffman.py b/tools/third_party/hpack/hpack/huffman.py
new file mode 100644
index 0000000..159569c
--- /dev/null
+++ b/tools/third_party/hpack/hpack/huffman.py
@@ -0,0 +1,68 @@
+# -*- coding: utf-8 -*-
+"""
+hpack/huffman_decoder
+~~~~~~~~~~~~~~~~~~~~~
+
+An implementation of a bitwise prefix tree specially built for decoding
+Huffman-coded content where we already know the Huffman table.
+"""
+from .compat import to_byte, decode_hex
+
+
+class HuffmanEncoder(object):
+    """
+    Encodes a string according to the Huffman encoding table defined in the
+    HPACK specification.
+    """
+    def __init__(self, huffman_code_list, huffman_code_list_lengths):
+        self.huffman_code_list = huffman_code_list
+        self.huffman_code_list_lengths = huffman_code_list_lengths
+
+    def encode(self, bytes_to_encode):
+        """
+        Given a string of bytes, encodes them according to the HPACK Huffman
+        specification.
+        """
+        # If handed the empty string, just immediately return.
+        if not bytes_to_encode:
+            return b''
+
+        final_num = 0
+        final_int_len = 0
+
+        # Turn each byte into its huffman code. These codes aren't necessarily
+        # octet aligned, so keep track of how far through an octet we are. To
+        # handle this cleanly, just use a single giant integer.
+        for char in bytes_to_encode:
+            byte = to_byte(char)
+            bin_int_len = self.huffman_code_list_lengths[byte]
+            bin_int = self.huffman_code_list[byte] & (
+                2 ** (bin_int_len + 1) - 1
+            )
+            final_num <<= bin_int_len
+            final_num |= bin_int
+            final_int_len += bin_int_len
+
+        # Pad out to an octet with ones.
+        bits_to_be_padded = (8 - (final_int_len % 8)) % 8
+        final_num <<= bits_to_be_padded
+        final_num |= (1 << bits_to_be_padded) - 1
+
+        # Convert the number to hex and strip off the leading '0x' and the
+        # trailing 'L', if present.
+        final_num = hex(final_num)[2:].rstrip('L')
+
+        # If this is odd, prepend a zero.
+        final_num = '0' + final_num if len(final_num) % 2 != 0 else final_num
+
+        # This number should have twice as many digits as bytes. If not, we're
+        # missing some leading zeroes. Work out how many bytes we want and how
+        # many digits we have, then add the missing zero digits to the front.
+        total_bytes = (final_int_len + bits_to_be_padded) // 8
+        expected_digits = total_bytes * 2
+
+        if len(final_num) != expected_digits:
+            missing_digits = expected_digits - len(final_num)
+            final_num = ('0' * missing_digits) + final_num
+
+        return decode_hex(final_num)
diff --git a/tools/third_party/hpack/hpack/huffman_constants.py b/tools/third_party/hpack/hpack/huffman_constants.py
new file mode 100644
index 0000000..c2b3bb2
--- /dev/null
+++ b/tools/third_party/hpack/hpack/huffman_constants.py
@@ -0,0 +1,288 @@
+# -*- coding: utf-8 -*-
+"""
+hpack/huffman_constants
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Defines the constant Huffman table. This takes up an upsetting amount of space,
+but c'est la vie.
+"""
+
+REQUEST_CODES = [
+    0x1ff8,
+    0x7fffd8,
+    0xfffffe2,
+    0xfffffe3,
+    0xfffffe4,
+    0xfffffe5,
+    0xfffffe6,
+    0xfffffe7,
+    0xfffffe8,
+    0xffffea,
+    0x3ffffffc,
+    0xfffffe9,
+    0xfffffea,
+    0x3ffffffd,
+    0xfffffeb,
+    0xfffffec,
+    0xfffffed,
+    0xfffffee,
+    0xfffffef,
+    0xffffff0,
+    0xffffff1,
+    0xffffff2,
+    0x3ffffffe,
+    0xffffff3,
+    0xffffff4,
+    0xffffff5,
+    0xffffff6,
+    0xffffff7,
+    0xffffff8,
+    0xffffff9,
+    0xffffffa,
+    0xffffffb,
+    0x14,
+    0x3f8,
+    0x3f9,
+    0xffa,
+    0x1ff9,
+    0x15,
+    0xf8,
+    0x7fa,
+    0x3fa,
+    0x3fb,
+    0xf9,
+    0x7fb,
+    0xfa,
+    0x16,
+    0x17,
+    0x18,
+    0x0,
+    0x1,
+    0x2,
+    0x19,
+    0x1a,
+    0x1b,
+    0x1c,
+    0x1d,
+    0x1e,
+    0x1f,
+    0x5c,
+    0xfb,
+    0x7ffc,
+    0x20,
+    0xffb,
+    0x3fc,
+    0x1ffa,
+    0x21,
+    0x5d,
+    0x5e,
+    0x5f,
+    0x60,
+    0x61,
+    0x62,
+    0x63,
+    0x64,
+    0x65,
+    0x66,
+    0x67,
+    0x68,
+    0x69,
+    0x6a,
+    0x6b,
+    0x6c,
+    0x6d,
+    0x6e,
+    0x6f,
+    0x70,
+    0x71,
+    0x72,
+    0xfc,
+    0x73,
+    0xfd,
+    0x1ffb,
+    0x7fff0,
+    0x1ffc,
+    0x3ffc,
+    0x22,
+    0x7ffd,
+    0x3,
+    0x23,
+    0x4,
+    0x24,
+    0x5,
+    0x25,
+    0x26,
+    0x27,
+    0x6,
+    0x74,
+    0x75,
+    0x28,
+    0x29,
+    0x2a,
+    0x7,
+    0x2b,
+    0x76,
+    0x2c,
+    0x8,
+    0x9,
+    0x2d,
+    0x77,
+    0x78,
+    0x79,
+    0x7a,
+    0x7b,
+    0x7ffe,
+    0x7fc,
+    0x3ffd,
+    0x1ffd,
+    0xffffffc,
+    0xfffe6,
+    0x3fffd2,
+    0xfffe7,
+    0xfffe8,
+    0x3fffd3,
+    0x3fffd4,
+    0x3fffd5,
+    0x7fffd9,
+    0x3fffd6,
+    0x7fffda,
+    0x7fffdb,
+    0x7fffdc,
+    0x7fffdd,
+    0x7fffde,
+    0xffffeb,
+    0x7fffdf,
+    0xffffec,
+    0xffffed,
+    0x3fffd7,
+    0x7fffe0,
+    0xffffee,
+    0x7fffe1,
+    0x7fffe2,
+    0x7fffe3,
+    0x7fffe4,
+    0x1fffdc,
+    0x3fffd8,
+    0x7fffe5,
+    0x3fffd9,
+    0x7fffe6,
+    0x7fffe7,
+    0xffffef,
+    0x3fffda,
+    0x1fffdd,
+    0xfffe9,
+    0x3fffdb,
+    0x3fffdc,
+    0x7fffe8,
+    0x7fffe9,
+    0x1fffde,
+    0x7fffea,
+    0x3fffdd,
+    0x3fffde,
+    0xfffff0,
+    0x1fffdf,
+    0x3fffdf,
+    0x7fffeb,
+    0x7fffec,
+    0x1fffe0,
+    0x1fffe1,
+    0x3fffe0,
+    0x1fffe2,
+    0x7fffed,
+    0x3fffe1,
+    0x7fffee,
+    0x7fffef,
+    0xfffea,
+    0x3fffe2,
+    0x3fffe3,
+    0x3fffe4,
+    0x7ffff0,
+    0x3fffe5,
+    0x3fffe6,
+    0x7ffff1,
+    0x3ffffe0,
+    0x3ffffe1,
+    0xfffeb,
+    0x7fff1,
+    0x3fffe7,
+    0x7ffff2,
+    0x3fffe8,
+    0x1ffffec,
+    0x3ffffe2,
+    0x3ffffe3,
+    0x3ffffe4,
+    0x7ffffde,
+    0x7ffffdf,
+    0x3ffffe5,
+    0xfffff1,
+    0x1ffffed,
+    0x7fff2,
+    0x1fffe3,
+    0x3ffffe6,
+    0x7ffffe0,
+    0x7ffffe1,
+    0x3ffffe7,
+    0x7ffffe2,
+    0xfffff2,
+    0x1fffe4,
+    0x1fffe5,
+    0x3ffffe8,
+    0x3ffffe9,
+    0xffffffd,
+    0x7ffffe3,
+    0x7ffffe4,
+    0x7ffffe5,
+    0xfffec,
+    0xfffff3,
+    0xfffed,
+    0x1fffe6,
+    0x3fffe9,
+    0x1fffe7,
+    0x1fffe8,
+    0x7ffff3,
+    0x3fffea,
+    0x3fffeb,
+    0x1ffffee,
+    0x1ffffef,
+    0xfffff4,
+    0xfffff5,
+    0x3ffffea,
+    0x7ffff4,
+    0x3ffffeb,
+    0x7ffffe6,
+    0x3ffffec,
+    0x3ffffed,
+    0x7ffffe7,
+    0x7ffffe8,
+    0x7ffffe9,
+    0x7ffffea,
+    0x7ffffeb,
+    0xffffffe,
+    0x7ffffec,
+    0x7ffffed,
+    0x7ffffee,
+    0x7ffffef,
+    0x7fffff0,
+    0x3ffffee,
+    0x3fffffff,
+]
+
+REQUEST_CODES_LENGTH = [
+    13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28,
+    28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+     6, 10, 10, 12, 13,  6,  8, 11, 10, 10,  8, 11,  8,  6,  6,  6,
+     5,  5,  5,  6,  6,  6,  6,  6,  6,  6,  7,  8, 15,  6, 12, 10,
+    13,  6,  7,  7,  7,  7,  7,  7,  7,  7,  7,  7,  7,  7,  7,  7,
+     7,  7,  7,  7,  7,  7,  7,  7,  8,  7,  8, 13, 19, 13, 14,  6,
+    15,  5,  6,  5,  6,  5,  6,  6,  6,  5,  7,  7,  6,  6,  6,  5,
+     6,  7,  6,  5,  5,  6,  7,  7,  7,  7,  7, 15, 11, 14, 13, 28,
+    20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23,
+    24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24,
+    22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23,
+    21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23,
+    26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25,
+    19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27,
+    20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23,
+    26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26,
+    30,
+]
diff --git a/tools/third_party/hpack/hpack/huffman_table.py b/tools/third_party/hpack/hpack/huffman_table.py
new file mode 100644
index 0000000..c199ef5
--- /dev/null
+++ b/tools/third_party/hpack/hpack/huffman_table.py
@@ -0,0 +1,4739 @@
+# -*- coding: utf-8 -*-
+"""
+hpack/huffman_table
+~~~~~~~~~~~~~~~~~~~
+
+This implementation of a Huffman decoding table for HTTP/2 is essentially a
+Python port of the work originally done for nghttp2's Huffman decoding. For
+this reason, while this file is made available under the MIT license as is the
+rest of this module, this file is undoubtedly a derivative work of the nghttp2
+file ``nghttp2_hd_huffman_data.c``, obtained from
+https://github.com/tatsuhiro-t/nghttp2/ at commit
+d2b55ad1a245e1d1964579fa3fac36ebf3939e72. That work is made available under
+the Apache 2.0 license under the following terms:
+
+    Copyright (c) 2013 Tatsuhiro Tsujikawa
+
+    Permission is hereby granted, free of charge, to any person obtaining
+    a copy of this software and associated documentation files (the
+    "Software"), to deal in the Software without restriction, including
+    without limitation the rights to use, copy, modify, merge, publish,
+    distribute, sublicense, and/or sell copies of the Software, and to
+    permit persons to whom the Software is furnished to do so, subject to
+    the following conditions:
+
+    The above copyright notice and this permission notice shall be
+    included in all copies or substantial portions of the Software.
+
+    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+    EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+    MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+    NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+    LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+    OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+    WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+The essence of this approach is that it builds a finite state machine out of
+4-bit nibbles of Huffman coded data. The input function passes 4 bits worth of
+data to the state machine each time, which uses those 4 bits of data along with
+the current accumulated state data to process the data given.
+
+For the sake of efficiency, the in-memory representation of the states,
+transitions, and result values of the state machine are represented as a long
+list containing three-tuples. This list is enormously long, and viewing it as
+an in-memory representation is not very clear, but it is laid out here in a way
+that is intended to be *somewhat* more clear.
+
+Essentially, the list is structured as 256 collections of 16 entries (one for
+each nibble) of three-tuples. Each collection is called a "node", and the
+zeroth collection is called the "root node". The state machine tracks one
+value: the "state" byte.
+
+For each nibble passed to the state machine, it first multiplies the "state"
+byte by 16 and adds the numerical value of the nibble. This number is the index
+into the large flat list.
+
+The three-tuple that is found by looking up that index consists of three
+values:
+
+- a new state value, used for subsequent decoding
+- a collection of flags, used to determine whether data is emitted or whether
+  the state machine is complete.
+- the byte value to emit, assuming that emitting a byte is required.
+
+The flags are consulted, if necessary a byte is emitted, and then the next
+nibble is used. This continues until the state machine believes it has
+completely Huffman-decoded the data.
+
+This approach has relatively little indirection, and therefore performs
+relatively well, particularly on implementations like PyPy where the cost of
+loops at the Python-level is not too expensive. The total number of loop
+iterations is 4x the number of bytes passed to the decoder.
+"""
+from .exceptions import HPACKDecodingError
+
+
+# This defines the state machine "class" at the top of the file. The reason we
+# do this is to keep the terrifing monster state table at the *bottom* of the
+# file so you don't have to actually *look* at the damn thing.
+def decode_huffman(huffman_string):
+    """
+    Given a bytestring of Huffman-encoded data for HPACK, returns a bytestring
+    of the decompressed data.
+    """
+    if not huffman_string:
+        return b''
+
+    state = 0
+    flags = 0
+    decoded_bytes = bytearray()
+
+    # Perversely, bytearrays are a lot more convenient across Python 2 and
+    # Python 3 because they behave *the same way* on both platforms. Given that
+    # we really do want numerical bytes when we iterate here, let's use a
+    # bytearray.
+    huffman_string = bytearray(huffman_string)
+
+    # This loop is unrolled somewhat. Because we use a nibble, not a byte, we
+    # need to handle each nibble twice. We unroll that: it makes the loop body
+    # a bit longer, but that's ok.
+    for input_byte in huffman_string:
+        index = (state * 16) + (input_byte >> 4)
+        state, flags, output_byte = HUFFMAN_TABLE[index]
+
+        if flags & HUFFMAN_FAIL:
+            raise HPACKDecodingError("Invalid Huffman String")
+
+        if flags & HUFFMAN_EMIT_SYMBOL:
+            decoded_bytes.append(output_byte)
+
+        index = (state * 16) + (input_byte & 0x0F)
+        state, flags, output_byte = HUFFMAN_TABLE[index]
+
+        if flags & HUFFMAN_FAIL:
+            raise HPACKDecodingError("Invalid Huffman String")
+
+        if flags & HUFFMAN_EMIT_SYMBOL:
+            decoded_bytes.append(output_byte)
+
+    if not (flags & HUFFMAN_COMPLETE):
+        raise HPACKDecodingError("Incomplete Huffman string")
+
+    return bytes(decoded_bytes)
+
+
+# Some decoder flags to control state transitions.
+HUFFMAN_COMPLETE = 1
+HUFFMAN_EMIT_SYMBOL = (1 << 1)
+HUFFMAN_FAIL = (1 << 2)
+
+# This is the monster table. Avert your eyes, children.
+HUFFMAN_TABLE = [
+    # Node 0 (Root Node, never emits symbols.)
+    (4, 0, 0),
+    (5, 0, 0),
+    (7, 0, 0),
+    (8, 0, 0),
+    (11, 0, 0),
+    (12, 0, 0),
+    (16, 0, 0),
+    (19, 0, 0),
+    (25, 0, 0),
+    (28, 0, 0),
+    (32, 0, 0),
+    (35, 0, 0),
+    (42, 0, 0),
+    (49, 0, 0),
+    (57, 0, 0),
+    (64, HUFFMAN_COMPLETE, 0),
+
+    # Node 1
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 48),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 49),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 50),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 97),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 99),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 101),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 105),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 111),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 115),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 116),
+    (13, 0, 0),
+    (14, 0, 0),
+    (17, 0, 0),
+    (18, 0, 0),
+    (20, 0, 0),
+    (21, 0, 0),
+
+    # Node 2
+    (1, HUFFMAN_EMIT_SYMBOL, 48),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 48),
+    (1, HUFFMAN_EMIT_SYMBOL, 49),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 49),
+    (1, HUFFMAN_EMIT_SYMBOL, 50),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 50),
+    (1, HUFFMAN_EMIT_SYMBOL, 97),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 97),
+    (1, HUFFMAN_EMIT_SYMBOL, 99),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 99),
+    (1, HUFFMAN_EMIT_SYMBOL, 101),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 101),
+    (1, HUFFMAN_EMIT_SYMBOL, 105),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 105),
+    (1, HUFFMAN_EMIT_SYMBOL, 111),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 111),
+
+    # Node 3
+    (2, HUFFMAN_EMIT_SYMBOL, 48),
+    (9, HUFFMAN_EMIT_SYMBOL, 48),
+    (23, HUFFMAN_EMIT_SYMBOL, 48),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 48),
+    (2, HUFFMAN_EMIT_SYMBOL, 49),
+    (9, HUFFMAN_EMIT_SYMBOL, 49),
+    (23, HUFFMAN_EMIT_SYMBOL, 49),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 49),
+    (2, HUFFMAN_EMIT_SYMBOL, 50),
+    (9, HUFFMAN_EMIT_SYMBOL, 50),
+    (23, HUFFMAN_EMIT_SYMBOL, 50),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 50),
+    (2, HUFFMAN_EMIT_SYMBOL, 97),
+    (9, HUFFMAN_EMIT_SYMBOL, 97),
+    (23, HUFFMAN_EMIT_SYMBOL, 97),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 97),
+
+    # Node 4
+    (3, HUFFMAN_EMIT_SYMBOL, 48),
+    (6, HUFFMAN_EMIT_SYMBOL, 48),
+    (10, HUFFMAN_EMIT_SYMBOL, 48),
+    (15, HUFFMAN_EMIT_SYMBOL, 48),
+    (24, HUFFMAN_EMIT_SYMBOL, 48),
+    (31, HUFFMAN_EMIT_SYMBOL, 48),
+    (41, HUFFMAN_EMIT_SYMBOL, 48),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 48),
+    (3, HUFFMAN_EMIT_SYMBOL, 49),
+    (6, HUFFMAN_EMIT_SYMBOL, 49),
+    (10, HUFFMAN_EMIT_SYMBOL, 49),
+    (15, HUFFMAN_EMIT_SYMBOL, 49),
+    (24, HUFFMAN_EMIT_SYMBOL, 49),
+    (31, HUFFMAN_EMIT_SYMBOL, 49),
+    (41, HUFFMAN_EMIT_SYMBOL, 49),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 49),
+
+    # Node 5
+    (3, HUFFMAN_EMIT_SYMBOL, 50),
+    (6, HUFFMAN_EMIT_SYMBOL, 50),
+    (10, HUFFMAN_EMIT_SYMBOL, 50),
+    (15, HUFFMAN_EMIT_SYMBOL, 50),
+    (24, HUFFMAN_EMIT_SYMBOL, 50),
+    (31, HUFFMAN_EMIT_SYMBOL, 50),
+    (41, HUFFMAN_EMIT_SYMBOL, 50),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 50),
+    (3, HUFFMAN_EMIT_SYMBOL, 97),
+    (6, HUFFMAN_EMIT_SYMBOL, 97),
+    (10, HUFFMAN_EMIT_SYMBOL, 97),
+    (15, HUFFMAN_EMIT_SYMBOL, 97),
+    (24, HUFFMAN_EMIT_SYMBOL, 97),
+    (31, HUFFMAN_EMIT_SYMBOL, 97),
+    (41, HUFFMAN_EMIT_SYMBOL, 97),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 97),
+
+    # Node 6
+    (2, HUFFMAN_EMIT_SYMBOL, 99),
+    (9, HUFFMAN_EMIT_SYMBOL, 99),
+    (23, HUFFMAN_EMIT_SYMBOL, 99),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 99),
+    (2, HUFFMAN_EMIT_SYMBOL, 101),
+    (9, HUFFMAN_EMIT_SYMBOL, 101),
+    (23, HUFFMAN_EMIT_SYMBOL, 101),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 101),
+    (2, HUFFMAN_EMIT_SYMBOL, 105),
+    (9, HUFFMAN_EMIT_SYMBOL, 105),
+    (23, HUFFMAN_EMIT_SYMBOL, 105),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 105),
+    (2, HUFFMAN_EMIT_SYMBOL, 111),
+    (9, HUFFMAN_EMIT_SYMBOL, 111),
+    (23, HUFFMAN_EMIT_SYMBOL, 111),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 111),
+
+    # Node 7
+    (3, HUFFMAN_EMIT_SYMBOL, 99),
+    (6, HUFFMAN_EMIT_SYMBOL, 99),
+    (10, HUFFMAN_EMIT_SYMBOL, 99),
+    (15, HUFFMAN_EMIT_SYMBOL, 99),
+    (24, HUFFMAN_EMIT_SYMBOL, 99),
+    (31, HUFFMAN_EMIT_SYMBOL, 99),
+    (41, HUFFMAN_EMIT_SYMBOL, 99),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 99),
+    (3, HUFFMAN_EMIT_SYMBOL, 101),
+    (6, HUFFMAN_EMIT_SYMBOL, 101),
+    (10, HUFFMAN_EMIT_SYMBOL, 101),
+    (15, HUFFMAN_EMIT_SYMBOL, 101),
+    (24, HUFFMAN_EMIT_SYMBOL, 101),
+    (31, HUFFMAN_EMIT_SYMBOL, 101),
+    (41, HUFFMAN_EMIT_SYMBOL, 101),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 101),
+
+    # Node 8
+    (3, HUFFMAN_EMIT_SYMBOL, 105),
+    (6, HUFFMAN_EMIT_SYMBOL, 105),
+    (10, HUFFMAN_EMIT_SYMBOL, 105),
+    (15, HUFFMAN_EMIT_SYMBOL, 105),
+    (24, HUFFMAN_EMIT_SYMBOL, 105),
+    (31, HUFFMAN_EMIT_SYMBOL, 105),
+    (41, HUFFMAN_EMIT_SYMBOL, 105),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 105),
+    (3, HUFFMAN_EMIT_SYMBOL, 111),
+    (6, HUFFMAN_EMIT_SYMBOL, 111),
+    (10, HUFFMAN_EMIT_SYMBOL, 111),
+    (15, HUFFMAN_EMIT_SYMBOL, 111),
+    (24, HUFFMAN_EMIT_SYMBOL, 111),
+    (31, HUFFMAN_EMIT_SYMBOL, 111),
+    (41, HUFFMAN_EMIT_SYMBOL, 111),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 111),
+
+    # Node 9
+    (1, HUFFMAN_EMIT_SYMBOL, 115),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 115),
+    (1, HUFFMAN_EMIT_SYMBOL, 116),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 116),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 32),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 37),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 45),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 46),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 47),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 51),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 52),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 53),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 54),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 55),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 56),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 57),
+
+    # Node 10
+    (2, HUFFMAN_EMIT_SYMBOL, 115),
+    (9, HUFFMAN_EMIT_SYMBOL, 115),
+    (23, HUFFMAN_EMIT_SYMBOL, 115),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 115),
+    (2, HUFFMAN_EMIT_SYMBOL, 116),
+    (9, HUFFMAN_EMIT_SYMBOL, 116),
+    (23, HUFFMAN_EMIT_SYMBOL, 116),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 116),
+    (1, HUFFMAN_EMIT_SYMBOL, 32),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 32),
+    (1, HUFFMAN_EMIT_SYMBOL, 37),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 37),
+    (1, HUFFMAN_EMIT_SYMBOL, 45),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 45),
+    (1, HUFFMAN_EMIT_SYMBOL, 46),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 46),
+
+    # Node 11
+    (3, HUFFMAN_EMIT_SYMBOL, 115),
+    (6, HUFFMAN_EMIT_SYMBOL, 115),
+    (10, HUFFMAN_EMIT_SYMBOL, 115),
+    (15, HUFFMAN_EMIT_SYMBOL, 115),
+    (24, HUFFMAN_EMIT_SYMBOL, 115),
+    (31, HUFFMAN_EMIT_SYMBOL, 115),
+    (41, HUFFMAN_EMIT_SYMBOL, 115),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 115),
+    (3, HUFFMAN_EMIT_SYMBOL, 116),
+    (6, HUFFMAN_EMIT_SYMBOL, 116),
+    (10, HUFFMAN_EMIT_SYMBOL, 116),
+    (15, HUFFMAN_EMIT_SYMBOL, 116),
+    (24, HUFFMAN_EMIT_SYMBOL, 116),
+    (31, HUFFMAN_EMIT_SYMBOL, 116),
+    (41, HUFFMAN_EMIT_SYMBOL, 116),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 116),
+
+    # Node 12
+    (2, HUFFMAN_EMIT_SYMBOL, 32),
+    (9, HUFFMAN_EMIT_SYMBOL, 32),
+    (23, HUFFMAN_EMIT_SYMBOL, 32),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 32),
+    (2, HUFFMAN_EMIT_SYMBOL, 37),
+    (9, HUFFMAN_EMIT_SYMBOL, 37),
+    (23, HUFFMAN_EMIT_SYMBOL, 37),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 37),
+    (2, HUFFMAN_EMIT_SYMBOL, 45),
+    (9, HUFFMAN_EMIT_SYMBOL, 45),
+    (23, HUFFMAN_EMIT_SYMBOL, 45),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 45),
+    (2, HUFFMAN_EMIT_SYMBOL, 46),
+    (9, HUFFMAN_EMIT_SYMBOL, 46),
+    (23, HUFFMAN_EMIT_SYMBOL, 46),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 46),
+
+    # Node 13
+    (3, HUFFMAN_EMIT_SYMBOL, 32),
+    (6, HUFFMAN_EMIT_SYMBOL, 32),
+    (10, HUFFMAN_EMIT_SYMBOL, 32),
+    (15, HUFFMAN_EMIT_SYMBOL, 32),
+    (24, HUFFMAN_EMIT_SYMBOL, 32),
+    (31, HUFFMAN_EMIT_SYMBOL, 32),
+    (41, HUFFMAN_EMIT_SYMBOL, 32),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 32),
+    (3, HUFFMAN_EMIT_SYMBOL, 37),
+    (6, HUFFMAN_EMIT_SYMBOL, 37),
+    (10, HUFFMAN_EMIT_SYMBOL, 37),
+    (15, HUFFMAN_EMIT_SYMBOL, 37),
+    (24, HUFFMAN_EMIT_SYMBOL, 37),
+    (31, HUFFMAN_EMIT_SYMBOL, 37),
+    (41, HUFFMAN_EMIT_SYMBOL, 37),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 37),
+
+    # Node 14
+    (3, HUFFMAN_EMIT_SYMBOL, 45),
+    (6, HUFFMAN_EMIT_SYMBOL, 45),
+    (10, HUFFMAN_EMIT_SYMBOL, 45),
+    (15, HUFFMAN_EMIT_SYMBOL, 45),
+    (24, HUFFMAN_EMIT_SYMBOL, 45),
+    (31, HUFFMAN_EMIT_SYMBOL, 45),
+    (41, HUFFMAN_EMIT_SYMBOL, 45),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 45),
+    (3, HUFFMAN_EMIT_SYMBOL, 46),
+    (6, HUFFMAN_EMIT_SYMBOL, 46),
+    (10, HUFFMAN_EMIT_SYMBOL, 46),
+    (15, HUFFMAN_EMIT_SYMBOL, 46),
+    (24, HUFFMAN_EMIT_SYMBOL, 46),
+    (31, HUFFMAN_EMIT_SYMBOL, 46),
+    (41, HUFFMAN_EMIT_SYMBOL, 46),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 46),
+
+    # Node 15
+    (1, HUFFMAN_EMIT_SYMBOL, 47),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 47),
+    (1, HUFFMAN_EMIT_SYMBOL, 51),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 51),
+    (1, HUFFMAN_EMIT_SYMBOL, 52),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 52),
+    (1, HUFFMAN_EMIT_SYMBOL, 53),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 53),
+    (1, HUFFMAN_EMIT_SYMBOL, 54),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 54),
+    (1, HUFFMAN_EMIT_SYMBOL, 55),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 55),
+    (1, HUFFMAN_EMIT_SYMBOL, 56),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 56),
+    (1, HUFFMAN_EMIT_SYMBOL, 57),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 57),
+
+    # Node 16
+    (2, HUFFMAN_EMIT_SYMBOL, 47),
+    (9, HUFFMAN_EMIT_SYMBOL, 47),
+    (23, HUFFMAN_EMIT_SYMBOL, 47),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 47),
+    (2, HUFFMAN_EMIT_SYMBOL, 51),
+    (9, HUFFMAN_EMIT_SYMBOL, 51),
+    (23, HUFFMAN_EMIT_SYMBOL, 51),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 51),
+    (2, HUFFMAN_EMIT_SYMBOL, 52),
+    (9, HUFFMAN_EMIT_SYMBOL, 52),
+    (23, HUFFMAN_EMIT_SYMBOL, 52),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 52),
+    (2, HUFFMAN_EMIT_SYMBOL, 53),
+    (9, HUFFMAN_EMIT_SYMBOL, 53),
+    (23, HUFFMAN_EMIT_SYMBOL, 53),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 53),
+
+    # Node 17
+    (3, HUFFMAN_EMIT_SYMBOL, 47),
+    (6, HUFFMAN_EMIT_SYMBOL, 47),
+    (10, HUFFMAN_EMIT_SYMBOL, 47),
+    (15, HUFFMAN_EMIT_SYMBOL, 47),
+    (24, HUFFMAN_EMIT_SYMBOL, 47),
+    (31, HUFFMAN_EMIT_SYMBOL, 47),
+    (41, HUFFMAN_EMIT_SYMBOL, 47),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 47),
+    (3, HUFFMAN_EMIT_SYMBOL, 51),
+    (6, HUFFMAN_EMIT_SYMBOL, 51),
+    (10, HUFFMAN_EMIT_SYMBOL, 51),
+    (15, HUFFMAN_EMIT_SYMBOL, 51),
+    (24, HUFFMAN_EMIT_SYMBOL, 51),
+    (31, HUFFMAN_EMIT_SYMBOL, 51),
+    (41, HUFFMAN_EMIT_SYMBOL, 51),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 51),
+
+    # Node 18
+    (3, HUFFMAN_EMIT_SYMBOL, 52),
+    (6, HUFFMAN_EMIT_SYMBOL, 52),
+    (10, HUFFMAN_EMIT_SYMBOL, 52),
+    (15, HUFFMAN_EMIT_SYMBOL, 52),
+    (24, HUFFMAN_EMIT_SYMBOL, 52),
+    (31, HUFFMAN_EMIT_SYMBOL, 52),
+    (41, HUFFMAN_EMIT_SYMBOL, 52),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 52),
+    (3, HUFFMAN_EMIT_SYMBOL, 53),
+    (6, HUFFMAN_EMIT_SYMBOL, 53),
+    (10, HUFFMAN_EMIT_SYMBOL, 53),
+    (15, HUFFMAN_EMIT_SYMBOL, 53),
+    (24, HUFFMAN_EMIT_SYMBOL, 53),
+    (31, HUFFMAN_EMIT_SYMBOL, 53),
+    (41, HUFFMAN_EMIT_SYMBOL, 53),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 53),
+
+    # Node 19
+    (2, HUFFMAN_EMIT_SYMBOL, 54),
+    (9, HUFFMAN_EMIT_SYMBOL, 54),
+    (23, HUFFMAN_EMIT_SYMBOL, 54),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 54),
+    (2, HUFFMAN_EMIT_SYMBOL, 55),
+    (9, HUFFMAN_EMIT_SYMBOL, 55),
+    (23, HUFFMAN_EMIT_SYMBOL, 55),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 55),
+    (2, HUFFMAN_EMIT_SYMBOL, 56),
+    (9, HUFFMAN_EMIT_SYMBOL, 56),
+    (23, HUFFMAN_EMIT_SYMBOL, 56),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 56),
+    (2, HUFFMAN_EMIT_SYMBOL, 57),
+    (9, HUFFMAN_EMIT_SYMBOL, 57),
+    (23, HUFFMAN_EMIT_SYMBOL, 57),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 57),
+
+    # Node 20
+    (3, HUFFMAN_EMIT_SYMBOL, 54),
+    (6, HUFFMAN_EMIT_SYMBOL, 54),
+    (10, HUFFMAN_EMIT_SYMBOL, 54),
+    (15, HUFFMAN_EMIT_SYMBOL, 54),
+    (24, HUFFMAN_EMIT_SYMBOL, 54),
+    (31, HUFFMAN_EMIT_SYMBOL, 54),
+    (41, HUFFMAN_EMIT_SYMBOL, 54),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 54),
+    (3, HUFFMAN_EMIT_SYMBOL, 55),
+    (6, HUFFMAN_EMIT_SYMBOL, 55),
+    (10, HUFFMAN_EMIT_SYMBOL, 55),
+    (15, HUFFMAN_EMIT_SYMBOL, 55),
+    (24, HUFFMAN_EMIT_SYMBOL, 55),
+    (31, HUFFMAN_EMIT_SYMBOL, 55),
+    (41, HUFFMAN_EMIT_SYMBOL, 55),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 55),
+
+    # Node 21
+    (3, HUFFMAN_EMIT_SYMBOL, 56),
+    (6, HUFFMAN_EMIT_SYMBOL, 56),
+    (10, HUFFMAN_EMIT_SYMBOL, 56),
+    (15, HUFFMAN_EMIT_SYMBOL, 56),
+    (24, HUFFMAN_EMIT_SYMBOL, 56),
+    (31, HUFFMAN_EMIT_SYMBOL, 56),
+    (41, HUFFMAN_EMIT_SYMBOL, 56),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 56),
+    (3, HUFFMAN_EMIT_SYMBOL, 57),
+    (6, HUFFMAN_EMIT_SYMBOL, 57),
+    (10, HUFFMAN_EMIT_SYMBOL, 57),
+    (15, HUFFMAN_EMIT_SYMBOL, 57),
+    (24, HUFFMAN_EMIT_SYMBOL, 57),
+    (31, HUFFMAN_EMIT_SYMBOL, 57),
+    (41, HUFFMAN_EMIT_SYMBOL, 57),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 57),
+
+    # Node 22
+    (26, 0, 0),
+    (27, 0, 0),
+    (29, 0, 0),
+    (30, 0, 0),
+    (33, 0, 0),
+    (34, 0, 0),
+    (36, 0, 0),
+    (37, 0, 0),
+    (43, 0, 0),
+    (46, 0, 0),
+    (50, 0, 0),
+    (53, 0, 0),
+    (58, 0, 0),
+    (61, 0, 0),
+    (65, 0, 0),
+    (68, HUFFMAN_COMPLETE, 0),
+
+    # Node 23
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 61),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 65),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 95),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 98),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 100),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 102),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 103),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 104),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 108),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 109),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 110),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 112),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 114),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 117),
+    (38, 0, 0),
+    (39, 0, 0),
+
+    # Node 24
+    (1, HUFFMAN_EMIT_SYMBOL, 61),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 61),
+    (1, HUFFMAN_EMIT_SYMBOL, 65),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 65),
+    (1, HUFFMAN_EMIT_SYMBOL, 95),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 95),
+    (1, HUFFMAN_EMIT_SYMBOL, 98),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 98),
+    (1, HUFFMAN_EMIT_SYMBOL, 100),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 100),
+    (1, HUFFMAN_EMIT_SYMBOL, 102),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 102),
+    (1, HUFFMAN_EMIT_SYMBOL, 103),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 103),
+    (1, HUFFMAN_EMIT_SYMBOL, 104),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 104),
+
+    # Node 25
+    (2, HUFFMAN_EMIT_SYMBOL, 61),
+    (9, HUFFMAN_EMIT_SYMBOL, 61),
+    (23, HUFFMAN_EMIT_SYMBOL, 61),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 61),
+    (2, HUFFMAN_EMIT_SYMBOL, 65),
+    (9, HUFFMAN_EMIT_SYMBOL, 65),
+    (23, HUFFMAN_EMIT_SYMBOL, 65),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 65),
+    (2, HUFFMAN_EMIT_SYMBOL, 95),
+    (9, HUFFMAN_EMIT_SYMBOL, 95),
+    (23, HUFFMAN_EMIT_SYMBOL, 95),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 95),
+    (2, HUFFMAN_EMIT_SYMBOL, 98),
+    (9, HUFFMAN_EMIT_SYMBOL, 98),
+    (23, HUFFMAN_EMIT_SYMBOL, 98),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 98),
+
+    # Node 26
+    (3, HUFFMAN_EMIT_SYMBOL, 61),
+    (6, HUFFMAN_EMIT_SYMBOL, 61),
+    (10, HUFFMAN_EMIT_SYMBOL, 61),
+    (15, HUFFMAN_EMIT_SYMBOL, 61),
+    (24, HUFFMAN_EMIT_SYMBOL, 61),
+    (31, HUFFMAN_EMIT_SYMBOL, 61),
+    (41, HUFFMAN_EMIT_SYMBOL, 61),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 61),
+    (3, HUFFMAN_EMIT_SYMBOL, 65),
+    (6, HUFFMAN_EMIT_SYMBOL, 65),
+    (10, HUFFMAN_EMIT_SYMBOL, 65),
+    (15, HUFFMAN_EMIT_SYMBOL, 65),
+    (24, HUFFMAN_EMIT_SYMBOL, 65),
+    (31, HUFFMAN_EMIT_SYMBOL, 65),
+    (41, HUFFMAN_EMIT_SYMBOL, 65),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 65),
+
+    # Node 27
+    (3, HUFFMAN_EMIT_SYMBOL, 95),
+    (6, HUFFMAN_EMIT_SYMBOL, 95),
+    (10, HUFFMAN_EMIT_SYMBOL, 95),
+    (15, HUFFMAN_EMIT_SYMBOL, 95),
+    (24, HUFFMAN_EMIT_SYMBOL, 95),
+    (31, HUFFMAN_EMIT_SYMBOL, 95),
+    (41, HUFFMAN_EMIT_SYMBOL, 95),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 95),
+    (3, HUFFMAN_EMIT_SYMBOL, 98),
+    (6, HUFFMAN_EMIT_SYMBOL, 98),
+    (10, HUFFMAN_EMIT_SYMBOL, 98),
+    (15, HUFFMAN_EMIT_SYMBOL, 98),
+    (24, HUFFMAN_EMIT_SYMBOL, 98),
+    (31, HUFFMAN_EMIT_SYMBOL, 98),
+    (41, HUFFMAN_EMIT_SYMBOL, 98),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 98),
+
+    # Node 28
+    (2, HUFFMAN_EMIT_SYMBOL, 100),
+    (9, HUFFMAN_EMIT_SYMBOL, 100),
+    (23, HUFFMAN_EMIT_SYMBOL, 100),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 100),
+    (2, HUFFMAN_EMIT_SYMBOL, 102),
+    (9, HUFFMAN_EMIT_SYMBOL, 102),
+    (23, HUFFMAN_EMIT_SYMBOL, 102),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 102),
+    (2, HUFFMAN_EMIT_SYMBOL, 103),
+    (9, HUFFMAN_EMIT_SYMBOL, 103),
+    (23, HUFFMAN_EMIT_SYMBOL, 103),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 103),
+    (2, HUFFMAN_EMIT_SYMBOL, 104),
+    (9, HUFFMAN_EMIT_SYMBOL, 104),
+    (23, HUFFMAN_EMIT_SYMBOL, 104),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 104),
+
+    # Node 29
+    (3, HUFFMAN_EMIT_SYMBOL, 100),
+    (6, HUFFMAN_EMIT_SYMBOL, 100),
+    (10, HUFFMAN_EMIT_SYMBOL, 100),
+    (15, HUFFMAN_EMIT_SYMBOL, 100),
+    (24, HUFFMAN_EMIT_SYMBOL, 100),
+    (31, HUFFMAN_EMIT_SYMBOL, 100),
+    (41, HUFFMAN_EMIT_SYMBOL, 100),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 100),
+    (3, HUFFMAN_EMIT_SYMBOL, 102),
+    (6, HUFFMAN_EMIT_SYMBOL, 102),
+    (10, HUFFMAN_EMIT_SYMBOL, 102),
+    (15, HUFFMAN_EMIT_SYMBOL, 102),
+    (24, HUFFMAN_EMIT_SYMBOL, 102),
+    (31, HUFFMAN_EMIT_SYMBOL, 102),
+    (41, HUFFMAN_EMIT_SYMBOL, 102),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 102),
+
+    # Node 30
+    (3, HUFFMAN_EMIT_SYMBOL, 103),
+    (6, HUFFMAN_EMIT_SYMBOL, 103),
+    (10, HUFFMAN_EMIT_SYMBOL, 103),
+    (15, HUFFMAN_EMIT_SYMBOL, 103),
+    (24, HUFFMAN_EMIT_SYMBOL, 103),
+    (31, HUFFMAN_EMIT_SYMBOL, 103),
+    (41, HUFFMAN_EMIT_SYMBOL, 103),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 103),
+    (3, HUFFMAN_EMIT_SYMBOL, 104),
+    (6, HUFFMAN_EMIT_SYMBOL, 104),
+    (10, HUFFMAN_EMIT_SYMBOL, 104),
+    (15, HUFFMAN_EMIT_SYMBOL, 104),
+    (24, HUFFMAN_EMIT_SYMBOL, 104),
+    (31, HUFFMAN_EMIT_SYMBOL, 104),
+    (41, HUFFMAN_EMIT_SYMBOL, 104),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 104),
+
+    # Node 31
+    (1, HUFFMAN_EMIT_SYMBOL, 108),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 108),
+    (1, HUFFMAN_EMIT_SYMBOL, 109),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 109),
+    (1, HUFFMAN_EMIT_SYMBOL, 110),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 110),
+    (1, HUFFMAN_EMIT_SYMBOL, 112),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 112),
+    (1, HUFFMAN_EMIT_SYMBOL, 114),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 114),
+    (1, HUFFMAN_EMIT_SYMBOL, 117),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 117),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 58),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 66),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 67),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 68),
+
+    # Node 32
+    (2, HUFFMAN_EMIT_SYMBOL, 108),
+    (9, HUFFMAN_EMIT_SYMBOL, 108),
+    (23, HUFFMAN_EMIT_SYMBOL, 108),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 108),
+    (2, HUFFMAN_EMIT_SYMBOL, 109),
+    (9, HUFFMAN_EMIT_SYMBOL, 109),
+    (23, HUFFMAN_EMIT_SYMBOL, 109),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 109),
+    (2, HUFFMAN_EMIT_SYMBOL, 110),
+    (9, HUFFMAN_EMIT_SYMBOL, 110),
+    (23, HUFFMAN_EMIT_SYMBOL, 110),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 110),
+    (2, HUFFMAN_EMIT_SYMBOL, 112),
+    (9, HUFFMAN_EMIT_SYMBOL, 112),
+    (23, HUFFMAN_EMIT_SYMBOL, 112),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 112),
+
+    # Node 33
+    (3, HUFFMAN_EMIT_SYMBOL, 108),
+    (6, HUFFMAN_EMIT_SYMBOL, 108),
+    (10, HUFFMAN_EMIT_SYMBOL, 108),
+    (15, HUFFMAN_EMIT_SYMBOL, 108),
+    (24, HUFFMAN_EMIT_SYMBOL, 108),
+    (31, HUFFMAN_EMIT_SYMBOL, 108),
+    (41, HUFFMAN_EMIT_SYMBOL, 108),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 108),
+    (3, HUFFMAN_EMIT_SYMBOL, 109),
+    (6, HUFFMAN_EMIT_SYMBOL, 109),
+    (10, HUFFMAN_EMIT_SYMBOL, 109),
+    (15, HUFFMAN_EMIT_SYMBOL, 109),
+    (24, HUFFMAN_EMIT_SYMBOL, 109),
+    (31, HUFFMAN_EMIT_SYMBOL, 109),
+    (41, HUFFMAN_EMIT_SYMBOL, 109),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 109),
+
+    # Node 34
+    (3, HUFFMAN_EMIT_SYMBOL, 110),
+    (6, HUFFMAN_EMIT_SYMBOL, 110),
+    (10, HUFFMAN_EMIT_SYMBOL, 110),
+    (15, HUFFMAN_EMIT_SYMBOL, 110),
+    (24, HUFFMAN_EMIT_SYMBOL, 110),
+    (31, HUFFMAN_EMIT_SYMBOL, 110),
+    (41, HUFFMAN_EMIT_SYMBOL, 110),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 110),
+    (3, HUFFMAN_EMIT_SYMBOL, 112),
+    (6, HUFFMAN_EMIT_SYMBOL, 112),
+    (10, HUFFMAN_EMIT_SYMBOL, 112),
+    (15, HUFFMAN_EMIT_SYMBOL, 112),
+    (24, HUFFMAN_EMIT_SYMBOL, 112),
+    (31, HUFFMAN_EMIT_SYMBOL, 112),
+    (41, HUFFMAN_EMIT_SYMBOL, 112),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 112),
+
+    # Node 35
+    (2, HUFFMAN_EMIT_SYMBOL, 114),
+    (9, HUFFMAN_EMIT_SYMBOL, 114),
+    (23, HUFFMAN_EMIT_SYMBOL, 114),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 114),
+    (2, HUFFMAN_EMIT_SYMBOL, 117),
+    (9, HUFFMAN_EMIT_SYMBOL, 117),
+    (23, HUFFMAN_EMIT_SYMBOL, 117),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 117),
+    (1, HUFFMAN_EMIT_SYMBOL, 58),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 58),
+    (1, HUFFMAN_EMIT_SYMBOL, 66),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 66),
+    (1, HUFFMAN_EMIT_SYMBOL, 67),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 67),
+    (1, HUFFMAN_EMIT_SYMBOL, 68),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 68),
+
+    # Node 36
+    (3, HUFFMAN_EMIT_SYMBOL, 114),
+    (6, HUFFMAN_EMIT_SYMBOL, 114),
+    (10, HUFFMAN_EMIT_SYMBOL, 114),
+    (15, HUFFMAN_EMIT_SYMBOL, 114),
+    (24, HUFFMAN_EMIT_SYMBOL, 114),
+    (31, HUFFMAN_EMIT_SYMBOL, 114),
+    (41, HUFFMAN_EMIT_SYMBOL, 114),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 114),
+    (3, HUFFMAN_EMIT_SYMBOL, 117),
+    (6, HUFFMAN_EMIT_SYMBOL, 117),
+    (10, HUFFMAN_EMIT_SYMBOL, 117),
+    (15, HUFFMAN_EMIT_SYMBOL, 117),
+    (24, HUFFMAN_EMIT_SYMBOL, 117),
+    (31, HUFFMAN_EMIT_SYMBOL, 117),
+    (41, HUFFMAN_EMIT_SYMBOL, 117),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 117),
+
+    # Node 37
+    (2, HUFFMAN_EMIT_SYMBOL, 58),
+    (9, HUFFMAN_EMIT_SYMBOL, 58),
+    (23, HUFFMAN_EMIT_SYMBOL, 58),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 58),
+    (2, HUFFMAN_EMIT_SYMBOL, 66),
+    (9, HUFFMAN_EMIT_SYMBOL, 66),
+    (23, HUFFMAN_EMIT_SYMBOL, 66),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 66),
+    (2, HUFFMAN_EMIT_SYMBOL, 67),
+    (9, HUFFMAN_EMIT_SYMBOL, 67),
+    (23, HUFFMAN_EMIT_SYMBOL, 67),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 67),
+    (2, HUFFMAN_EMIT_SYMBOL, 68),
+    (9, HUFFMAN_EMIT_SYMBOL, 68),
+    (23, HUFFMAN_EMIT_SYMBOL, 68),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 68),
+
+    # Node 38
+    (3, HUFFMAN_EMIT_SYMBOL, 58),
+    (6, HUFFMAN_EMIT_SYMBOL, 58),
+    (10, HUFFMAN_EMIT_SYMBOL, 58),
+    (15, HUFFMAN_EMIT_SYMBOL, 58),
+    (24, HUFFMAN_EMIT_SYMBOL, 58),
+    (31, HUFFMAN_EMIT_SYMBOL, 58),
+    (41, HUFFMAN_EMIT_SYMBOL, 58),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 58),
+    (3, HUFFMAN_EMIT_SYMBOL, 66),
+    (6, HUFFMAN_EMIT_SYMBOL, 66),
+    (10, HUFFMAN_EMIT_SYMBOL, 66),
+    (15, HUFFMAN_EMIT_SYMBOL, 66),
+    (24, HUFFMAN_EMIT_SYMBOL, 66),
+    (31, HUFFMAN_EMIT_SYMBOL, 66),
+    (41, HUFFMAN_EMIT_SYMBOL, 66),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 66),
+
+    # Node 39
+    (3, HUFFMAN_EMIT_SYMBOL, 67),
+    (6, HUFFMAN_EMIT_SYMBOL, 67),
+    (10, HUFFMAN_EMIT_SYMBOL, 67),
+    (15, HUFFMAN_EMIT_SYMBOL, 67),
+    (24, HUFFMAN_EMIT_SYMBOL, 67),
+    (31, HUFFMAN_EMIT_SYMBOL, 67),
+    (41, HUFFMAN_EMIT_SYMBOL, 67),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 67),
+    (3, HUFFMAN_EMIT_SYMBOL, 68),
+    (6, HUFFMAN_EMIT_SYMBOL, 68),
+    (10, HUFFMAN_EMIT_SYMBOL, 68),
+    (15, HUFFMAN_EMIT_SYMBOL, 68),
+    (24, HUFFMAN_EMIT_SYMBOL, 68),
+    (31, HUFFMAN_EMIT_SYMBOL, 68),
+    (41, HUFFMAN_EMIT_SYMBOL, 68),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 68),
+
+    # Node 40
+    (44, 0, 0),
+    (45, 0, 0),
+    (47, 0, 0),
+    (48, 0, 0),
+    (51, 0, 0),
+    (52, 0, 0),
+    (54, 0, 0),
+    (55, 0, 0),
+    (59, 0, 0),
+    (60, 0, 0),
+    (62, 0, 0),
+    (63, 0, 0),
+    (66, 0, 0),
+    (67, 0, 0),
+    (69, 0, 0),
+    (72, HUFFMAN_COMPLETE, 0),
+
+    # Node 41
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 69),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 70),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 71),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 72),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 73),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 74),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 75),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 76),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 77),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 78),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 79),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 80),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 81),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 82),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 83),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 84),
+
+    # Node 42
+    (1, HUFFMAN_EMIT_SYMBOL, 69),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 69),
+    (1, HUFFMAN_EMIT_SYMBOL, 70),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 70),
+    (1, HUFFMAN_EMIT_SYMBOL, 71),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 71),
+    (1, HUFFMAN_EMIT_SYMBOL, 72),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 72),
+    (1, HUFFMAN_EMIT_SYMBOL, 73),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 73),
+    (1, HUFFMAN_EMIT_SYMBOL, 74),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 74),
+    (1, HUFFMAN_EMIT_SYMBOL, 75),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 75),
+    (1, HUFFMAN_EMIT_SYMBOL, 76),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 76),
+
+    # Node 43
+    (2, HUFFMAN_EMIT_SYMBOL, 69),
+    (9, HUFFMAN_EMIT_SYMBOL, 69),
+    (23, HUFFMAN_EMIT_SYMBOL, 69),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 69),
+    (2, HUFFMAN_EMIT_SYMBOL, 70),
+    (9, HUFFMAN_EMIT_SYMBOL, 70),
+    (23, HUFFMAN_EMIT_SYMBOL, 70),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 70),
+    (2, HUFFMAN_EMIT_SYMBOL, 71),
+    (9, HUFFMAN_EMIT_SYMBOL, 71),
+    (23, HUFFMAN_EMIT_SYMBOL, 71),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 71),
+    (2, HUFFMAN_EMIT_SYMBOL, 72),
+    (9, HUFFMAN_EMIT_SYMBOL, 72),
+    (23, HUFFMAN_EMIT_SYMBOL, 72),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 72),
+
+    # Node 44
+    (3, HUFFMAN_EMIT_SYMBOL, 69),
+    (6, HUFFMAN_EMIT_SYMBOL, 69),
+    (10, HUFFMAN_EMIT_SYMBOL, 69),
+    (15, HUFFMAN_EMIT_SYMBOL, 69),
+    (24, HUFFMAN_EMIT_SYMBOL, 69),
+    (31, HUFFMAN_EMIT_SYMBOL, 69),
+    (41, HUFFMAN_EMIT_SYMBOL, 69),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 69),
+    (3, HUFFMAN_EMIT_SYMBOL, 70),
+    (6, HUFFMAN_EMIT_SYMBOL, 70),
+    (10, HUFFMAN_EMIT_SYMBOL, 70),
+    (15, HUFFMAN_EMIT_SYMBOL, 70),
+    (24, HUFFMAN_EMIT_SYMBOL, 70),
+    (31, HUFFMAN_EMIT_SYMBOL, 70),
+    (41, HUFFMAN_EMIT_SYMBOL, 70),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 70),
+
+    # Node 45
+    (3, HUFFMAN_EMIT_SYMBOL, 71),
+    (6, HUFFMAN_EMIT_SYMBOL, 71),
+    (10, HUFFMAN_EMIT_SYMBOL, 71),
+    (15, HUFFMAN_EMIT_SYMBOL, 71),
+    (24, HUFFMAN_EMIT_SYMBOL, 71),
+    (31, HUFFMAN_EMIT_SYMBOL, 71),
+    (41, HUFFMAN_EMIT_SYMBOL, 71),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 71),
+    (3, HUFFMAN_EMIT_SYMBOL, 72),
+    (6, HUFFMAN_EMIT_SYMBOL, 72),
+    (10, HUFFMAN_EMIT_SYMBOL, 72),
+    (15, HUFFMAN_EMIT_SYMBOL, 72),
+    (24, HUFFMAN_EMIT_SYMBOL, 72),
+    (31, HUFFMAN_EMIT_SYMBOL, 72),
+    (41, HUFFMAN_EMIT_SYMBOL, 72),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 72),
+
+    # Node 46
+    (2, HUFFMAN_EMIT_SYMBOL, 73),
+    (9, HUFFMAN_EMIT_SYMBOL, 73),
+    (23, HUFFMAN_EMIT_SYMBOL, 73),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 73),
+    (2, HUFFMAN_EMIT_SYMBOL, 74),
+    (9, HUFFMAN_EMIT_SYMBOL, 74),
+    (23, HUFFMAN_EMIT_SYMBOL, 74),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 74),
+    (2, HUFFMAN_EMIT_SYMBOL, 75),
+    (9, HUFFMAN_EMIT_SYMBOL, 75),
+    (23, HUFFMAN_EMIT_SYMBOL, 75),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 75),
+    (2, HUFFMAN_EMIT_SYMBOL, 76),
+    (9, HUFFMAN_EMIT_SYMBOL, 76),
+    (23, HUFFMAN_EMIT_SYMBOL, 76),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 76),
+
+    # Node 47
+    (3, HUFFMAN_EMIT_SYMBOL, 73),
+    (6, HUFFMAN_EMIT_SYMBOL, 73),
+    (10, HUFFMAN_EMIT_SYMBOL, 73),
+    (15, HUFFMAN_EMIT_SYMBOL, 73),
+    (24, HUFFMAN_EMIT_SYMBOL, 73),
+    (31, HUFFMAN_EMIT_SYMBOL, 73),
+    (41, HUFFMAN_EMIT_SYMBOL, 73),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 73),
+    (3, HUFFMAN_EMIT_SYMBOL, 74),
+    (6, HUFFMAN_EMIT_SYMBOL, 74),
+    (10, HUFFMAN_EMIT_SYMBOL, 74),
+    (15, HUFFMAN_EMIT_SYMBOL, 74),
+    (24, HUFFMAN_EMIT_SYMBOL, 74),
+    (31, HUFFMAN_EMIT_SYMBOL, 74),
+    (41, HUFFMAN_EMIT_SYMBOL, 74),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 74),
+
+    # Node 48
+    (3, HUFFMAN_EMIT_SYMBOL, 75),
+    (6, HUFFMAN_EMIT_SYMBOL, 75),
+    (10, HUFFMAN_EMIT_SYMBOL, 75),
+    (15, HUFFMAN_EMIT_SYMBOL, 75),
+    (24, HUFFMAN_EMIT_SYMBOL, 75),
+    (31, HUFFMAN_EMIT_SYMBOL, 75),
+    (41, HUFFMAN_EMIT_SYMBOL, 75),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 75),
+    (3, HUFFMAN_EMIT_SYMBOL, 76),
+    (6, HUFFMAN_EMIT_SYMBOL, 76),
+    (10, HUFFMAN_EMIT_SYMBOL, 76),
+    (15, HUFFMAN_EMIT_SYMBOL, 76),
+    (24, HUFFMAN_EMIT_SYMBOL, 76),
+    (31, HUFFMAN_EMIT_SYMBOL, 76),
+    (41, HUFFMAN_EMIT_SYMBOL, 76),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 76),
+
+    # Node 49
+    (1, HUFFMAN_EMIT_SYMBOL, 77),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 77),
+    (1, HUFFMAN_EMIT_SYMBOL, 78),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 78),
+    (1, HUFFMAN_EMIT_SYMBOL, 79),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 79),
+    (1, HUFFMAN_EMIT_SYMBOL, 80),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 80),
+    (1, HUFFMAN_EMIT_SYMBOL, 81),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 81),
+    (1, HUFFMAN_EMIT_SYMBOL, 82),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 82),
+    (1, HUFFMAN_EMIT_SYMBOL, 83),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 83),
+    (1, HUFFMAN_EMIT_SYMBOL, 84),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 84),
+
+    # Node 50
+    (2, HUFFMAN_EMIT_SYMBOL, 77),
+    (9, HUFFMAN_EMIT_SYMBOL, 77),
+    (23, HUFFMAN_EMIT_SYMBOL, 77),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 77),
+    (2, HUFFMAN_EMIT_SYMBOL, 78),
+    (9, HUFFMAN_EMIT_SYMBOL, 78),
+    (23, HUFFMAN_EMIT_SYMBOL, 78),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 78),
+    (2, HUFFMAN_EMIT_SYMBOL, 79),
+    (9, HUFFMAN_EMIT_SYMBOL, 79),
+    (23, HUFFMAN_EMIT_SYMBOL, 79),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 79),
+    (2, HUFFMAN_EMIT_SYMBOL, 80),
+    (9, HUFFMAN_EMIT_SYMBOL, 80),
+    (23, HUFFMAN_EMIT_SYMBOL, 80),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 80),
+
+    # Node 51
+    (3, HUFFMAN_EMIT_SYMBOL, 77),
+    (6, HUFFMAN_EMIT_SYMBOL, 77),
+    (10, HUFFMAN_EMIT_SYMBOL, 77),
+    (15, HUFFMAN_EMIT_SYMBOL, 77),
+    (24, HUFFMAN_EMIT_SYMBOL, 77),
+    (31, HUFFMAN_EMIT_SYMBOL, 77),
+    (41, HUFFMAN_EMIT_SYMBOL, 77),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 77),
+    (3, HUFFMAN_EMIT_SYMBOL, 78),
+    (6, HUFFMAN_EMIT_SYMBOL, 78),
+    (10, HUFFMAN_EMIT_SYMBOL, 78),
+    (15, HUFFMAN_EMIT_SYMBOL, 78),
+    (24, HUFFMAN_EMIT_SYMBOL, 78),
+    (31, HUFFMAN_EMIT_SYMBOL, 78),
+    (41, HUFFMAN_EMIT_SYMBOL, 78),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 78),
+
+    # Node 52
+    (3, HUFFMAN_EMIT_SYMBOL, 79),
+    (6, HUFFMAN_EMIT_SYMBOL, 79),
+    (10, HUFFMAN_EMIT_SYMBOL, 79),
+    (15, HUFFMAN_EMIT_SYMBOL, 79),
+    (24, HUFFMAN_EMIT_SYMBOL, 79),
+    (31, HUFFMAN_EMIT_SYMBOL, 79),
+    (41, HUFFMAN_EMIT_SYMBOL, 79),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 79),
+    (3, HUFFMAN_EMIT_SYMBOL, 80),
+    (6, HUFFMAN_EMIT_SYMBOL, 80),
+    (10, HUFFMAN_EMIT_SYMBOL, 80),
+    (15, HUFFMAN_EMIT_SYMBOL, 80),
+    (24, HUFFMAN_EMIT_SYMBOL, 80),
+    (31, HUFFMAN_EMIT_SYMBOL, 80),
+    (41, HUFFMAN_EMIT_SYMBOL, 80),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 80),
+
+    # Node 53
+    (2, HUFFMAN_EMIT_SYMBOL, 81),
+    (9, HUFFMAN_EMIT_SYMBOL, 81),
+    (23, HUFFMAN_EMIT_SYMBOL, 81),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 81),
+    (2, HUFFMAN_EMIT_SYMBOL, 82),
+    (9, HUFFMAN_EMIT_SYMBOL, 82),
+    (23, HUFFMAN_EMIT_SYMBOL, 82),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 82),
+    (2, HUFFMAN_EMIT_SYMBOL, 83),
+    (9, HUFFMAN_EMIT_SYMBOL, 83),
+    (23, HUFFMAN_EMIT_SYMBOL, 83),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 83),
+    (2, HUFFMAN_EMIT_SYMBOL, 84),
+    (9, HUFFMAN_EMIT_SYMBOL, 84),
+    (23, HUFFMAN_EMIT_SYMBOL, 84),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 84),
+
+    # Node 54
+    (3, HUFFMAN_EMIT_SYMBOL, 81),
+    (6, HUFFMAN_EMIT_SYMBOL, 81),
+    (10, HUFFMAN_EMIT_SYMBOL, 81),
+    (15, HUFFMAN_EMIT_SYMBOL, 81),
+    (24, HUFFMAN_EMIT_SYMBOL, 81),
+    (31, HUFFMAN_EMIT_SYMBOL, 81),
+    (41, HUFFMAN_EMIT_SYMBOL, 81),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 81),
+    (3, HUFFMAN_EMIT_SYMBOL, 82),
+    (6, HUFFMAN_EMIT_SYMBOL, 82),
+    (10, HUFFMAN_EMIT_SYMBOL, 82),
+    (15, HUFFMAN_EMIT_SYMBOL, 82),
+    (24, HUFFMAN_EMIT_SYMBOL, 82),
+    (31, HUFFMAN_EMIT_SYMBOL, 82),
+    (41, HUFFMAN_EMIT_SYMBOL, 82),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 82),
+
+    # Node 55
+    (3, HUFFMAN_EMIT_SYMBOL, 83),
+    (6, HUFFMAN_EMIT_SYMBOL, 83),
+    (10, HUFFMAN_EMIT_SYMBOL, 83),
+    (15, HUFFMAN_EMIT_SYMBOL, 83),
+    (24, HUFFMAN_EMIT_SYMBOL, 83),
+    (31, HUFFMAN_EMIT_SYMBOL, 83),
+    (41, HUFFMAN_EMIT_SYMBOL, 83),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 83),
+    (3, HUFFMAN_EMIT_SYMBOL, 84),
+    (6, HUFFMAN_EMIT_SYMBOL, 84),
+    (10, HUFFMAN_EMIT_SYMBOL, 84),
+    (15, HUFFMAN_EMIT_SYMBOL, 84),
+    (24, HUFFMAN_EMIT_SYMBOL, 84),
+    (31, HUFFMAN_EMIT_SYMBOL, 84),
+    (41, HUFFMAN_EMIT_SYMBOL, 84),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 84),
+
+    # Node 56
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 85),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 86),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 87),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 89),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 106),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 107),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 113),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 118),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 119),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 120),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 121),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 122),
+    (70, 0, 0),
+    (71, 0, 0),
+    (73, 0, 0),
+    (74, HUFFMAN_COMPLETE, 0),
+
+    # Node 57
+    (1, HUFFMAN_EMIT_SYMBOL, 85),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 85),
+    (1, HUFFMAN_EMIT_SYMBOL, 86),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 86),
+    (1, HUFFMAN_EMIT_SYMBOL, 87),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 87),
+    (1, HUFFMAN_EMIT_SYMBOL, 89),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 89),
+    (1, HUFFMAN_EMIT_SYMBOL, 106),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 106),
+    (1, HUFFMAN_EMIT_SYMBOL, 107),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 107),
+    (1, HUFFMAN_EMIT_SYMBOL, 113),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 113),
+    (1, HUFFMAN_EMIT_SYMBOL, 118),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 118),
+
+    # Node 58
+    (2, HUFFMAN_EMIT_SYMBOL, 85),
+    (9, HUFFMAN_EMIT_SYMBOL, 85),
+    (23, HUFFMAN_EMIT_SYMBOL, 85),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 85),
+    (2, HUFFMAN_EMIT_SYMBOL, 86),
+    (9, HUFFMAN_EMIT_SYMBOL, 86),
+    (23, HUFFMAN_EMIT_SYMBOL, 86),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 86),
+    (2, HUFFMAN_EMIT_SYMBOL, 87),
+    (9, HUFFMAN_EMIT_SYMBOL, 87),
+    (23, HUFFMAN_EMIT_SYMBOL, 87),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 87),
+    (2, HUFFMAN_EMIT_SYMBOL, 89),
+    (9, HUFFMAN_EMIT_SYMBOL, 89),
+    (23, HUFFMAN_EMIT_SYMBOL, 89),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 89),
+
+    # Node 59
+    (3, HUFFMAN_EMIT_SYMBOL, 85),
+    (6, HUFFMAN_EMIT_SYMBOL, 85),
+    (10, HUFFMAN_EMIT_SYMBOL, 85),
+    (15, HUFFMAN_EMIT_SYMBOL, 85),
+    (24, HUFFMAN_EMIT_SYMBOL, 85),
+    (31, HUFFMAN_EMIT_SYMBOL, 85),
+    (41, HUFFMAN_EMIT_SYMBOL, 85),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 85),
+    (3, HUFFMAN_EMIT_SYMBOL, 86),
+    (6, HUFFMAN_EMIT_SYMBOL, 86),
+    (10, HUFFMAN_EMIT_SYMBOL, 86),
+    (15, HUFFMAN_EMIT_SYMBOL, 86),
+    (24, HUFFMAN_EMIT_SYMBOL, 86),
+    (31, HUFFMAN_EMIT_SYMBOL, 86),
+    (41, HUFFMAN_EMIT_SYMBOL, 86),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 86),
+
+    # Node 60
+    (3, HUFFMAN_EMIT_SYMBOL, 87),
+    (6, HUFFMAN_EMIT_SYMBOL, 87),
+    (10, HUFFMAN_EMIT_SYMBOL, 87),
+    (15, HUFFMAN_EMIT_SYMBOL, 87),
+    (24, HUFFMAN_EMIT_SYMBOL, 87),
+    (31, HUFFMAN_EMIT_SYMBOL, 87),
+    (41, HUFFMAN_EMIT_SYMBOL, 87),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 87),
+    (3, HUFFMAN_EMIT_SYMBOL, 89),
+    (6, HUFFMAN_EMIT_SYMBOL, 89),
+    (10, HUFFMAN_EMIT_SYMBOL, 89),
+    (15, HUFFMAN_EMIT_SYMBOL, 89),
+    (24, HUFFMAN_EMIT_SYMBOL, 89),
+    (31, HUFFMAN_EMIT_SYMBOL, 89),
+    (41, HUFFMAN_EMIT_SYMBOL, 89),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 89),
+
+    # Node 61
+    (2, HUFFMAN_EMIT_SYMBOL, 106),
+    (9, HUFFMAN_EMIT_SYMBOL, 106),
+    (23, HUFFMAN_EMIT_SYMBOL, 106),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 106),
+    (2, HUFFMAN_EMIT_SYMBOL, 107),
+    (9, HUFFMAN_EMIT_SYMBOL, 107),
+    (23, HUFFMAN_EMIT_SYMBOL, 107),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 107),
+    (2, HUFFMAN_EMIT_SYMBOL, 113),
+    (9, HUFFMAN_EMIT_SYMBOL, 113),
+    (23, HUFFMAN_EMIT_SYMBOL, 113),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 113),
+    (2, HUFFMAN_EMIT_SYMBOL, 118),
+    (9, HUFFMAN_EMIT_SYMBOL, 118),
+    (23, HUFFMAN_EMIT_SYMBOL, 118),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 118),
+
+    # Node 62
+    (3, HUFFMAN_EMIT_SYMBOL, 106),
+    (6, HUFFMAN_EMIT_SYMBOL, 106),
+    (10, HUFFMAN_EMIT_SYMBOL, 106),
+    (15, HUFFMAN_EMIT_SYMBOL, 106),
+    (24, HUFFMAN_EMIT_SYMBOL, 106),
+    (31, HUFFMAN_EMIT_SYMBOL, 106),
+    (41, HUFFMAN_EMIT_SYMBOL, 106),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 106),
+    (3, HUFFMAN_EMIT_SYMBOL, 107),
+    (6, HUFFMAN_EMIT_SYMBOL, 107),
+    (10, HUFFMAN_EMIT_SYMBOL, 107),
+    (15, HUFFMAN_EMIT_SYMBOL, 107),
+    (24, HUFFMAN_EMIT_SYMBOL, 107),
+    (31, HUFFMAN_EMIT_SYMBOL, 107),
+    (41, HUFFMAN_EMIT_SYMBOL, 107),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 107),
+
+    # Node 63
+    (3, HUFFMAN_EMIT_SYMBOL, 113),
+    (6, HUFFMAN_EMIT_SYMBOL, 113),
+    (10, HUFFMAN_EMIT_SYMBOL, 113),
+    (15, HUFFMAN_EMIT_SYMBOL, 113),
+    (24, HUFFMAN_EMIT_SYMBOL, 113),
+    (31, HUFFMAN_EMIT_SYMBOL, 113),
+    (41, HUFFMAN_EMIT_SYMBOL, 113),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 113),
+    (3, HUFFMAN_EMIT_SYMBOL, 118),
+    (6, HUFFMAN_EMIT_SYMBOL, 118),
+    (10, HUFFMAN_EMIT_SYMBOL, 118),
+    (15, HUFFMAN_EMIT_SYMBOL, 118),
+    (24, HUFFMAN_EMIT_SYMBOL, 118),
+    (31, HUFFMAN_EMIT_SYMBOL, 118),
+    (41, HUFFMAN_EMIT_SYMBOL, 118),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 118),
+
+    # Node 64
+    (1, HUFFMAN_EMIT_SYMBOL, 119),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 119),
+    (1, HUFFMAN_EMIT_SYMBOL, 120),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 120),
+    (1, HUFFMAN_EMIT_SYMBOL, 121),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 121),
+    (1, HUFFMAN_EMIT_SYMBOL, 122),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 122),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 38),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 42),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 44),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 59),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 88),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 90),
+    (75, 0, 0),
+    (78, 0, 0),
+
+    # Node 65
+    (2, HUFFMAN_EMIT_SYMBOL, 119),
+    (9, HUFFMAN_EMIT_SYMBOL, 119),
+    (23, HUFFMAN_EMIT_SYMBOL, 119),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 119),
+    (2, HUFFMAN_EMIT_SYMBOL, 120),
+    (9, HUFFMAN_EMIT_SYMBOL, 120),
+    (23, HUFFMAN_EMIT_SYMBOL, 120),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 120),
+    (2, HUFFMAN_EMIT_SYMBOL, 121),
+    (9, HUFFMAN_EMIT_SYMBOL, 121),
+    (23, HUFFMAN_EMIT_SYMBOL, 121),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 121),
+    (2, HUFFMAN_EMIT_SYMBOL, 122),
+    (9, HUFFMAN_EMIT_SYMBOL, 122),
+    (23, HUFFMAN_EMIT_SYMBOL, 122),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 122),
+
+    # Node 66
+    (3, HUFFMAN_EMIT_SYMBOL, 119),
+    (6, HUFFMAN_EMIT_SYMBOL, 119),
+    (10, HUFFMAN_EMIT_SYMBOL, 119),
+    (15, HUFFMAN_EMIT_SYMBOL, 119),
+    (24, HUFFMAN_EMIT_SYMBOL, 119),
+    (31, HUFFMAN_EMIT_SYMBOL, 119),
+    (41, HUFFMAN_EMIT_SYMBOL, 119),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 119),
+    (3, HUFFMAN_EMIT_SYMBOL, 120),
+    (6, HUFFMAN_EMIT_SYMBOL, 120),
+    (10, HUFFMAN_EMIT_SYMBOL, 120),
+    (15, HUFFMAN_EMIT_SYMBOL, 120),
+    (24, HUFFMAN_EMIT_SYMBOL, 120),
+    (31, HUFFMAN_EMIT_SYMBOL, 120),
+    (41, HUFFMAN_EMIT_SYMBOL, 120),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 120),
+
+    # Node 67
+    (3, HUFFMAN_EMIT_SYMBOL, 121),
+    (6, HUFFMAN_EMIT_SYMBOL, 121),
+    (10, HUFFMAN_EMIT_SYMBOL, 121),
+    (15, HUFFMAN_EMIT_SYMBOL, 121),
+    (24, HUFFMAN_EMIT_SYMBOL, 121),
+    (31, HUFFMAN_EMIT_SYMBOL, 121),
+    (41, HUFFMAN_EMIT_SYMBOL, 121),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 121),
+    (3, HUFFMAN_EMIT_SYMBOL, 122),
+    (6, HUFFMAN_EMIT_SYMBOL, 122),
+    (10, HUFFMAN_EMIT_SYMBOL, 122),
+    (15, HUFFMAN_EMIT_SYMBOL, 122),
+    (24, HUFFMAN_EMIT_SYMBOL, 122),
+    (31, HUFFMAN_EMIT_SYMBOL, 122),
+    (41, HUFFMAN_EMIT_SYMBOL, 122),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 122),
+
+    # Node 68
+    (1, HUFFMAN_EMIT_SYMBOL, 38),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 38),
+    (1, HUFFMAN_EMIT_SYMBOL, 42),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 42),
+    (1, HUFFMAN_EMIT_SYMBOL, 44),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 44),
+    (1, HUFFMAN_EMIT_SYMBOL, 59),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 59),
+    (1, HUFFMAN_EMIT_SYMBOL, 88),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 88),
+    (1, HUFFMAN_EMIT_SYMBOL, 90),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 90),
+    (76, 0, 0),
+    (77, 0, 0),
+    (79, 0, 0),
+    (81, 0, 0),
+
+    # Node 69
+    (2, HUFFMAN_EMIT_SYMBOL, 38),
+    (9, HUFFMAN_EMIT_SYMBOL, 38),
+    (23, HUFFMAN_EMIT_SYMBOL, 38),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 38),
+    (2, HUFFMAN_EMIT_SYMBOL, 42),
+    (9, HUFFMAN_EMIT_SYMBOL, 42),
+    (23, HUFFMAN_EMIT_SYMBOL, 42),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 42),
+    (2, HUFFMAN_EMIT_SYMBOL, 44),
+    (9, HUFFMAN_EMIT_SYMBOL, 44),
+    (23, HUFFMAN_EMIT_SYMBOL, 44),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 44),
+    (2, HUFFMAN_EMIT_SYMBOL, 59),
+    (9, HUFFMAN_EMIT_SYMBOL, 59),
+    (23, HUFFMAN_EMIT_SYMBOL, 59),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 59),
+
+    # Node 70
+    (3, HUFFMAN_EMIT_SYMBOL, 38),
+    (6, HUFFMAN_EMIT_SYMBOL, 38),
+    (10, HUFFMAN_EMIT_SYMBOL, 38),
+    (15, HUFFMAN_EMIT_SYMBOL, 38),
+    (24, HUFFMAN_EMIT_SYMBOL, 38),
+    (31, HUFFMAN_EMIT_SYMBOL, 38),
+    (41, HUFFMAN_EMIT_SYMBOL, 38),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 38),
+    (3, HUFFMAN_EMIT_SYMBOL, 42),
+    (6, HUFFMAN_EMIT_SYMBOL, 42),
+    (10, HUFFMAN_EMIT_SYMBOL, 42),
+    (15, HUFFMAN_EMIT_SYMBOL, 42),
+    (24, HUFFMAN_EMIT_SYMBOL, 42),
+    (31, HUFFMAN_EMIT_SYMBOL, 42),
+    (41, HUFFMAN_EMIT_SYMBOL, 42),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 42),
+
+    # Node 71
+    (3, HUFFMAN_EMIT_SYMBOL, 44),
+    (6, HUFFMAN_EMIT_SYMBOL, 44),
+    (10, HUFFMAN_EMIT_SYMBOL, 44),
+    (15, HUFFMAN_EMIT_SYMBOL, 44),
+    (24, HUFFMAN_EMIT_SYMBOL, 44),
+    (31, HUFFMAN_EMIT_SYMBOL, 44),
+    (41, HUFFMAN_EMIT_SYMBOL, 44),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 44),
+    (3, HUFFMAN_EMIT_SYMBOL, 59),
+    (6, HUFFMAN_EMIT_SYMBOL, 59),
+    (10, HUFFMAN_EMIT_SYMBOL, 59),
+    (15, HUFFMAN_EMIT_SYMBOL, 59),
+    (24, HUFFMAN_EMIT_SYMBOL, 59),
+    (31, HUFFMAN_EMIT_SYMBOL, 59),
+    (41, HUFFMAN_EMIT_SYMBOL, 59),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 59),
+
+    # Node 72
+    (2, HUFFMAN_EMIT_SYMBOL, 88),
+    (9, HUFFMAN_EMIT_SYMBOL, 88),
+    (23, HUFFMAN_EMIT_SYMBOL, 88),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 88),
+    (2, HUFFMAN_EMIT_SYMBOL, 90),
+    (9, HUFFMAN_EMIT_SYMBOL, 90),
+    (23, HUFFMAN_EMIT_SYMBOL, 90),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 90),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 33),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 34),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 40),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 41),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 63),
+    (80, 0, 0),
+    (82, 0, 0),
+    (84, 0, 0),
+
+    # Node 73
+    (3, HUFFMAN_EMIT_SYMBOL, 88),
+    (6, HUFFMAN_EMIT_SYMBOL, 88),
+    (10, HUFFMAN_EMIT_SYMBOL, 88),
+    (15, HUFFMAN_EMIT_SYMBOL, 88),
+    (24, HUFFMAN_EMIT_SYMBOL, 88),
+    (31, HUFFMAN_EMIT_SYMBOL, 88),
+    (41, HUFFMAN_EMIT_SYMBOL, 88),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 88),
+    (3, HUFFMAN_EMIT_SYMBOL, 90),
+    (6, HUFFMAN_EMIT_SYMBOL, 90),
+    (10, HUFFMAN_EMIT_SYMBOL, 90),
+    (15, HUFFMAN_EMIT_SYMBOL, 90),
+    (24, HUFFMAN_EMIT_SYMBOL, 90),
+    (31, HUFFMAN_EMIT_SYMBOL, 90),
+    (41, HUFFMAN_EMIT_SYMBOL, 90),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 90),
+
+    # Node 74
+    (1, HUFFMAN_EMIT_SYMBOL, 33),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 33),
+    (1, HUFFMAN_EMIT_SYMBOL, 34),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 34),
+    (1, HUFFMAN_EMIT_SYMBOL, 40),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 40),
+    (1, HUFFMAN_EMIT_SYMBOL, 41),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 41),
+    (1, HUFFMAN_EMIT_SYMBOL, 63),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 63),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 39),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 43),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 124),
+    (83, 0, 0),
+    (85, 0, 0),
+    (88, 0, 0),
+
+    # Node 75
+    (2, HUFFMAN_EMIT_SYMBOL, 33),
+    (9, HUFFMAN_EMIT_SYMBOL, 33),
+    (23, HUFFMAN_EMIT_SYMBOL, 33),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 33),
+    (2, HUFFMAN_EMIT_SYMBOL, 34),
+    (9, HUFFMAN_EMIT_SYMBOL, 34),
+    (23, HUFFMAN_EMIT_SYMBOL, 34),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 34),
+    (2, HUFFMAN_EMIT_SYMBOL, 40),
+    (9, HUFFMAN_EMIT_SYMBOL, 40),
+    (23, HUFFMAN_EMIT_SYMBOL, 40),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 40),
+    (2, HUFFMAN_EMIT_SYMBOL, 41),
+    (9, HUFFMAN_EMIT_SYMBOL, 41),
+    (23, HUFFMAN_EMIT_SYMBOL, 41),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 41),
+
+    # Node 76
+    (3, HUFFMAN_EMIT_SYMBOL, 33),
+    (6, HUFFMAN_EMIT_SYMBOL, 33),
+    (10, HUFFMAN_EMIT_SYMBOL, 33),
+    (15, HUFFMAN_EMIT_SYMBOL, 33),
+    (24, HUFFMAN_EMIT_SYMBOL, 33),
+    (31, HUFFMAN_EMIT_SYMBOL, 33),
+    (41, HUFFMAN_EMIT_SYMBOL, 33),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 33),
+    (3, HUFFMAN_EMIT_SYMBOL, 34),
+    (6, HUFFMAN_EMIT_SYMBOL, 34),
+    (10, HUFFMAN_EMIT_SYMBOL, 34),
+    (15, HUFFMAN_EMIT_SYMBOL, 34),
+    (24, HUFFMAN_EMIT_SYMBOL, 34),
+    (31, HUFFMAN_EMIT_SYMBOL, 34),
+    (41, HUFFMAN_EMIT_SYMBOL, 34),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 34),
+
+    # Node 77
+    (3, HUFFMAN_EMIT_SYMBOL, 40),
+    (6, HUFFMAN_EMIT_SYMBOL, 40),
+    (10, HUFFMAN_EMIT_SYMBOL, 40),
+    (15, HUFFMAN_EMIT_SYMBOL, 40),
+    (24, HUFFMAN_EMIT_SYMBOL, 40),
+    (31, HUFFMAN_EMIT_SYMBOL, 40),
+    (41, HUFFMAN_EMIT_SYMBOL, 40),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 40),
+    (3, HUFFMAN_EMIT_SYMBOL, 41),
+    (6, HUFFMAN_EMIT_SYMBOL, 41),
+    (10, HUFFMAN_EMIT_SYMBOL, 41),
+    (15, HUFFMAN_EMIT_SYMBOL, 41),
+    (24, HUFFMAN_EMIT_SYMBOL, 41),
+    (31, HUFFMAN_EMIT_SYMBOL, 41),
+    (41, HUFFMAN_EMIT_SYMBOL, 41),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 41),
+
+    # Node 78
+    (2, HUFFMAN_EMIT_SYMBOL, 63),
+    (9, HUFFMAN_EMIT_SYMBOL, 63),
+    (23, HUFFMAN_EMIT_SYMBOL, 63),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 63),
+    (1, HUFFMAN_EMIT_SYMBOL, 39),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 39),
+    (1, HUFFMAN_EMIT_SYMBOL, 43),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 43),
+    (1, HUFFMAN_EMIT_SYMBOL, 124),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 124),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 35),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 62),
+    (86, 0, 0),
+    (87, 0, 0),
+    (89, 0, 0),
+    (90, 0, 0),
+
+    # Node 79
+    (3, HUFFMAN_EMIT_SYMBOL, 63),
+    (6, HUFFMAN_EMIT_SYMBOL, 63),
+    (10, HUFFMAN_EMIT_SYMBOL, 63),
+    (15, HUFFMAN_EMIT_SYMBOL, 63),
+    (24, HUFFMAN_EMIT_SYMBOL, 63),
+    (31, HUFFMAN_EMIT_SYMBOL, 63),
+    (41, HUFFMAN_EMIT_SYMBOL, 63),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 63),
+    (2, HUFFMAN_EMIT_SYMBOL, 39),
+    (9, HUFFMAN_EMIT_SYMBOL, 39),
+    (23, HUFFMAN_EMIT_SYMBOL, 39),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 39),
+    (2, HUFFMAN_EMIT_SYMBOL, 43),
+    (9, HUFFMAN_EMIT_SYMBOL, 43),
+    (23, HUFFMAN_EMIT_SYMBOL, 43),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 43),
+
+    # Node 80
+    (3, HUFFMAN_EMIT_SYMBOL, 39),
+    (6, HUFFMAN_EMIT_SYMBOL, 39),
+    (10, HUFFMAN_EMIT_SYMBOL, 39),
+    (15, HUFFMAN_EMIT_SYMBOL, 39),
+    (24, HUFFMAN_EMIT_SYMBOL, 39),
+    (31, HUFFMAN_EMIT_SYMBOL, 39),
+    (41, HUFFMAN_EMIT_SYMBOL, 39),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 39),
+    (3, HUFFMAN_EMIT_SYMBOL, 43),
+    (6, HUFFMAN_EMIT_SYMBOL, 43),
+    (10, HUFFMAN_EMIT_SYMBOL, 43),
+    (15, HUFFMAN_EMIT_SYMBOL, 43),
+    (24, HUFFMAN_EMIT_SYMBOL, 43),
+    (31, HUFFMAN_EMIT_SYMBOL, 43),
+    (41, HUFFMAN_EMIT_SYMBOL, 43),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 43),
+
+    # Node 81
+    (2, HUFFMAN_EMIT_SYMBOL, 124),
+    (9, HUFFMAN_EMIT_SYMBOL, 124),
+    (23, HUFFMAN_EMIT_SYMBOL, 124),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 124),
+    (1, HUFFMAN_EMIT_SYMBOL, 35),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 35),
+    (1, HUFFMAN_EMIT_SYMBOL, 62),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 62),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 0),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 36),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 64),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 91),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 93),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 126),
+    (91, 0, 0),
+    (92, 0, 0),
+
+    # Node 82
+    (3, HUFFMAN_EMIT_SYMBOL, 124),
+    (6, HUFFMAN_EMIT_SYMBOL, 124),
+    (10, HUFFMAN_EMIT_SYMBOL, 124),
+    (15, HUFFMAN_EMIT_SYMBOL, 124),
+    (24, HUFFMAN_EMIT_SYMBOL, 124),
+    (31, HUFFMAN_EMIT_SYMBOL, 124),
+    (41, HUFFMAN_EMIT_SYMBOL, 124),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 124),
+    (2, HUFFMAN_EMIT_SYMBOL, 35),
+    (9, HUFFMAN_EMIT_SYMBOL, 35),
+    (23, HUFFMAN_EMIT_SYMBOL, 35),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 35),
+    (2, HUFFMAN_EMIT_SYMBOL, 62),
+    (9, HUFFMAN_EMIT_SYMBOL, 62),
+    (23, HUFFMAN_EMIT_SYMBOL, 62),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 62),
+
+    # Node 83
+    (3, HUFFMAN_EMIT_SYMBOL, 35),
+    (6, HUFFMAN_EMIT_SYMBOL, 35),
+    (10, HUFFMAN_EMIT_SYMBOL, 35),
+    (15, HUFFMAN_EMIT_SYMBOL, 35),
+    (24, HUFFMAN_EMIT_SYMBOL, 35),
+    (31, HUFFMAN_EMIT_SYMBOL, 35),
+    (41, HUFFMAN_EMIT_SYMBOL, 35),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 35),
+    (3, HUFFMAN_EMIT_SYMBOL, 62),
+    (6, HUFFMAN_EMIT_SYMBOL, 62),
+    (10, HUFFMAN_EMIT_SYMBOL, 62),
+    (15, HUFFMAN_EMIT_SYMBOL, 62),
+    (24, HUFFMAN_EMIT_SYMBOL, 62),
+    (31, HUFFMAN_EMIT_SYMBOL, 62),
+    (41, HUFFMAN_EMIT_SYMBOL, 62),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 62),
+
+    # Node 84
+    (1, HUFFMAN_EMIT_SYMBOL, 0),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 0),
+    (1, HUFFMAN_EMIT_SYMBOL, 36),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 36),
+    (1, HUFFMAN_EMIT_SYMBOL, 64),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 64),
+    (1, HUFFMAN_EMIT_SYMBOL, 91),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 91),
+    (1, HUFFMAN_EMIT_SYMBOL, 93),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 93),
+    (1, HUFFMAN_EMIT_SYMBOL, 126),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 126),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 94),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 125),
+    (93, 0, 0),
+    (94, 0, 0),
+
+    # Node 85
+    (2, HUFFMAN_EMIT_SYMBOL, 0),
+    (9, HUFFMAN_EMIT_SYMBOL, 0),
+    (23, HUFFMAN_EMIT_SYMBOL, 0),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 0),
+    (2, HUFFMAN_EMIT_SYMBOL, 36),
+    (9, HUFFMAN_EMIT_SYMBOL, 36),
+    (23, HUFFMAN_EMIT_SYMBOL, 36),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 36),
+    (2, HUFFMAN_EMIT_SYMBOL, 64),
+    (9, HUFFMAN_EMIT_SYMBOL, 64),
+    (23, HUFFMAN_EMIT_SYMBOL, 64),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 64),
+    (2, HUFFMAN_EMIT_SYMBOL, 91),
+    (9, HUFFMAN_EMIT_SYMBOL, 91),
+    (23, HUFFMAN_EMIT_SYMBOL, 91),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 91),
+
+    # Node 86
+    (3, HUFFMAN_EMIT_SYMBOL, 0),
+    (6, HUFFMAN_EMIT_SYMBOL, 0),
+    (10, HUFFMAN_EMIT_SYMBOL, 0),
+    (15, HUFFMAN_EMIT_SYMBOL, 0),
+    (24, HUFFMAN_EMIT_SYMBOL, 0),
+    (31, HUFFMAN_EMIT_SYMBOL, 0),
+    (41, HUFFMAN_EMIT_SYMBOL, 0),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 0),
+    (3, HUFFMAN_EMIT_SYMBOL, 36),
+    (6, HUFFMAN_EMIT_SYMBOL, 36),
+    (10, HUFFMAN_EMIT_SYMBOL, 36),
+    (15, HUFFMAN_EMIT_SYMBOL, 36),
+    (24, HUFFMAN_EMIT_SYMBOL, 36),
+    (31, HUFFMAN_EMIT_SYMBOL, 36),
+    (41, HUFFMAN_EMIT_SYMBOL, 36),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 36),
+
+    # Node 87
+    (3, HUFFMAN_EMIT_SYMBOL, 64),
+    (6, HUFFMAN_EMIT_SYMBOL, 64),
+    (10, HUFFMAN_EMIT_SYMBOL, 64),
+    (15, HUFFMAN_EMIT_SYMBOL, 64),
+    (24, HUFFMAN_EMIT_SYMBOL, 64),
+    (31, HUFFMAN_EMIT_SYMBOL, 64),
+    (41, HUFFMAN_EMIT_SYMBOL, 64),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 64),
+    (3, HUFFMAN_EMIT_SYMBOL, 91),
+    (6, HUFFMAN_EMIT_SYMBOL, 91),
+    (10, HUFFMAN_EMIT_SYMBOL, 91),
+    (15, HUFFMAN_EMIT_SYMBOL, 91),
+    (24, HUFFMAN_EMIT_SYMBOL, 91),
+    (31, HUFFMAN_EMIT_SYMBOL, 91),
+    (41, HUFFMAN_EMIT_SYMBOL, 91),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 91),
+
+    # Node 88
+    (2, HUFFMAN_EMIT_SYMBOL, 93),
+    (9, HUFFMAN_EMIT_SYMBOL, 93),
+    (23, HUFFMAN_EMIT_SYMBOL, 93),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 93),
+    (2, HUFFMAN_EMIT_SYMBOL, 126),
+    (9, HUFFMAN_EMIT_SYMBOL, 126),
+    (23, HUFFMAN_EMIT_SYMBOL, 126),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 126),
+    (1, HUFFMAN_EMIT_SYMBOL, 94),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 94),
+    (1, HUFFMAN_EMIT_SYMBOL, 125),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 125),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 60),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 96),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 123),
+    (95, 0, 0),
+
+    # Node 89
+    (3, HUFFMAN_EMIT_SYMBOL, 93),
+    (6, HUFFMAN_EMIT_SYMBOL, 93),
+    (10, HUFFMAN_EMIT_SYMBOL, 93),
+    (15, HUFFMAN_EMIT_SYMBOL, 93),
+    (24, HUFFMAN_EMIT_SYMBOL, 93),
+    (31, HUFFMAN_EMIT_SYMBOL, 93),
+    (41, HUFFMAN_EMIT_SYMBOL, 93),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 93),
+    (3, HUFFMAN_EMIT_SYMBOL, 126),
+    (6, HUFFMAN_EMIT_SYMBOL, 126),
+    (10, HUFFMAN_EMIT_SYMBOL, 126),
+    (15, HUFFMAN_EMIT_SYMBOL, 126),
+    (24, HUFFMAN_EMIT_SYMBOL, 126),
+    (31, HUFFMAN_EMIT_SYMBOL, 126),
+    (41, HUFFMAN_EMIT_SYMBOL, 126),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 126),
+
+    # Node 90
+    (2, HUFFMAN_EMIT_SYMBOL, 94),
+    (9, HUFFMAN_EMIT_SYMBOL, 94),
+    (23, HUFFMAN_EMIT_SYMBOL, 94),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 94),
+    (2, HUFFMAN_EMIT_SYMBOL, 125),
+    (9, HUFFMAN_EMIT_SYMBOL, 125),
+    (23, HUFFMAN_EMIT_SYMBOL, 125),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 125),
+    (1, HUFFMAN_EMIT_SYMBOL, 60),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 60),
+    (1, HUFFMAN_EMIT_SYMBOL, 96),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 96),
+    (1, HUFFMAN_EMIT_SYMBOL, 123),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 123),
+    (96, 0, 0),
+    (110, 0, 0),
+
+    # Node 91
+    (3, HUFFMAN_EMIT_SYMBOL, 94),
+    (6, HUFFMAN_EMIT_SYMBOL, 94),
+    (10, HUFFMAN_EMIT_SYMBOL, 94),
+    (15, HUFFMAN_EMIT_SYMBOL, 94),
+    (24, HUFFMAN_EMIT_SYMBOL, 94),
+    (31, HUFFMAN_EMIT_SYMBOL, 94),
+    (41, HUFFMAN_EMIT_SYMBOL, 94),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 94),
+    (3, HUFFMAN_EMIT_SYMBOL, 125),
+    (6, HUFFMAN_EMIT_SYMBOL, 125),
+    (10, HUFFMAN_EMIT_SYMBOL, 125),
+    (15, HUFFMAN_EMIT_SYMBOL, 125),
+    (24, HUFFMAN_EMIT_SYMBOL, 125),
+    (31, HUFFMAN_EMIT_SYMBOL, 125),
+    (41, HUFFMAN_EMIT_SYMBOL, 125),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 125),
+
+    # Node 92
+    (2, HUFFMAN_EMIT_SYMBOL, 60),
+    (9, HUFFMAN_EMIT_SYMBOL, 60),
+    (23, HUFFMAN_EMIT_SYMBOL, 60),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 60),
+    (2, HUFFMAN_EMIT_SYMBOL, 96),
+    (9, HUFFMAN_EMIT_SYMBOL, 96),
+    (23, HUFFMAN_EMIT_SYMBOL, 96),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 96),
+    (2, HUFFMAN_EMIT_SYMBOL, 123),
+    (9, HUFFMAN_EMIT_SYMBOL, 123),
+    (23, HUFFMAN_EMIT_SYMBOL, 123),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 123),
+    (97, 0, 0),
+    (101, 0, 0),
+    (111, 0, 0),
+    (133, 0, 0),
+
+    # Node 93
+    (3, HUFFMAN_EMIT_SYMBOL, 60),
+    (6, HUFFMAN_EMIT_SYMBOL, 60),
+    (10, HUFFMAN_EMIT_SYMBOL, 60),
+    (15, HUFFMAN_EMIT_SYMBOL, 60),
+    (24, HUFFMAN_EMIT_SYMBOL, 60),
+    (31, HUFFMAN_EMIT_SYMBOL, 60),
+    (41, HUFFMAN_EMIT_SYMBOL, 60),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 60),
+    (3, HUFFMAN_EMIT_SYMBOL, 96),
+    (6, HUFFMAN_EMIT_SYMBOL, 96),
+    (10, HUFFMAN_EMIT_SYMBOL, 96),
+    (15, HUFFMAN_EMIT_SYMBOL, 96),
+    (24, HUFFMAN_EMIT_SYMBOL, 96),
+    (31, HUFFMAN_EMIT_SYMBOL, 96),
+    (41, HUFFMAN_EMIT_SYMBOL, 96),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 96),
+
+    # Node 94
+    (3, HUFFMAN_EMIT_SYMBOL, 123),
+    (6, HUFFMAN_EMIT_SYMBOL, 123),
+    (10, HUFFMAN_EMIT_SYMBOL, 123),
+    (15, HUFFMAN_EMIT_SYMBOL, 123),
+    (24, HUFFMAN_EMIT_SYMBOL, 123),
+    (31, HUFFMAN_EMIT_SYMBOL, 123),
+    (41, HUFFMAN_EMIT_SYMBOL, 123),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 123),
+    (98, 0, 0),
+    (99, 0, 0),
+    (102, 0, 0),
+    (105, 0, 0),
+    (112, 0, 0),
+    (119, 0, 0),
+    (134, 0, 0),
+    (153, 0, 0),
+
+    # Node 95
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 92),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 195),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 208),
+    (100, 0, 0),
+    (103, 0, 0),
+    (104, 0, 0),
+    (106, 0, 0),
+    (107, 0, 0),
+    (113, 0, 0),
+    (116, 0, 0),
+    (120, 0, 0),
+    (126, 0, 0),
+    (135, 0, 0),
+    (142, 0, 0),
+    (154, 0, 0),
+    (169, 0, 0),
+
+    # Node 96
+    (1, HUFFMAN_EMIT_SYMBOL, 92),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 92),
+    (1, HUFFMAN_EMIT_SYMBOL, 195),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 195),
+    (1, HUFFMAN_EMIT_SYMBOL, 208),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 208),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 128),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 130),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 131),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 162),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 184),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 194),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 224),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 226),
+    (108, 0, 0),
+    (109, 0, 0),
+
+    # Node 97
+    (2, HUFFMAN_EMIT_SYMBOL, 92),
+    (9, HUFFMAN_EMIT_SYMBOL, 92),
+    (23, HUFFMAN_EMIT_SYMBOL, 92),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 92),
+    (2, HUFFMAN_EMIT_SYMBOL, 195),
+    (9, HUFFMAN_EMIT_SYMBOL, 195),
+    (23, HUFFMAN_EMIT_SYMBOL, 195),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 195),
+    (2, HUFFMAN_EMIT_SYMBOL, 208),
+    (9, HUFFMAN_EMIT_SYMBOL, 208),
+    (23, HUFFMAN_EMIT_SYMBOL, 208),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 208),
+    (1, HUFFMAN_EMIT_SYMBOL, 128),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 128),
+    (1, HUFFMAN_EMIT_SYMBOL, 130),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 130),
+
+    # Node 98
+    (3, HUFFMAN_EMIT_SYMBOL, 92),
+    (6, HUFFMAN_EMIT_SYMBOL, 92),
+    (10, HUFFMAN_EMIT_SYMBOL, 92),
+    (15, HUFFMAN_EMIT_SYMBOL, 92),
+    (24, HUFFMAN_EMIT_SYMBOL, 92),
+    (31, HUFFMAN_EMIT_SYMBOL, 92),
+    (41, HUFFMAN_EMIT_SYMBOL, 92),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 92),
+    (3, HUFFMAN_EMIT_SYMBOL, 195),
+    (6, HUFFMAN_EMIT_SYMBOL, 195),
+    (10, HUFFMAN_EMIT_SYMBOL, 195),
+    (15, HUFFMAN_EMIT_SYMBOL, 195),
+    (24, HUFFMAN_EMIT_SYMBOL, 195),
+    (31, HUFFMAN_EMIT_SYMBOL, 195),
+    (41, HUFFMAN_EMIT_SYMBOL, 195),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 195),
+
+    # Node 99
+    (3, HUFFMAN_EMIT_SYMBOL, 208),
+    (6, HUFFMAN_EMIT_SYMBOL, 208),
+    (10, HUFFMAN_EMIT_SYMBOL, 208),
+    (15, HUFFMAN_EMIT_SYMBOL, 208),
+    (24, HUFFMAN_EMIT_SYMBOL, 208),
+    (31, HUFFMAN_EMIT_SYMBOL, 208),
+    (41, HUFFMAN_EMIT_SYMBOL, 208),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 208),
+    (2, HUFFMAN_EMIT_SYMBOL, 128),
+    (9, HUFFMAN_EMIT_SYMBOL, 128),
+    (23, HUFFMAN_EMIT_SYMBOL, 128),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 128),
+    (2, HUFFMAN_EMIT_SYMBOL, 130),
+    (9, HUFFMAN_EMIT_SYMBOL, 130),
+    (23, HUFFMAN_EMIT_SYMBOL, 130),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 130),
+
+    # Node 100
+    (3, HUFFMAN_EMIT_SYMBOL, 128),
+    (6, HUFFMAN_EMIT_SYMBOL, 128),
+    (10, HUFFMAN_EMIT_SYMBOL, 128),
+    (15, HUFFMAN_EMIT_SYMBOL, 128),
+    (24, HUFFMAN_EMIT_SYMBOL, 128),
+    (31, HUFFMAN_EMIT_SYMBOL, 128),
+    (41, HUFFMAN_EMIT_SYMBOL, 128),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 128),
+    (3, HUFFMAN_EMIT_SYMBOL, 130),
+    (6, HUFFMAN_EMIT_SYMBOL, 130),
+    (10, HUFFMAN_EMIT_SYMBOL, 130),
+    (15, HUFFMAN_EMIT_SYMBOL, 130),
+    (24, HUFFMAN_EMIT_SYMBOL, 130),
+    (31, HUFFMAN_EMIT_SYMBOL, 130),
+    (41, HUFFMAN_EMIT_SYMBOL, 130),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 130),
+
+    # Node 101
+    (1, HUFFMAN_EMIT_SYMBOL, 131),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 131),
+    (1, HUFFMAN_EMIT_SYMBOL, 162),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 162),
+    (1, HUFFMAN_EMIT_SYMBOL, 184),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 184),
+    (1, HUFFMAN_EMIT_SYMBOL, 194),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 194),
+    (1, HUFFMAN_EMIT_SYMBOL, 224),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 224),
+    (1, HUFFMAN_EMIT_SYMBOL, 226),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 226),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 153),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 161),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 167),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 172),
+
+    # Node 102
+    (2, HUFFMAN_EMIT_SYMBOL, 131),
+    (9, HUFFMAN_EMIT_SYMBOL, 131),
+    (23, HUFFMAN_EMIT_SYMBOL, 131),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 131),
+    (2, HUFFMAN_EMIT_SYMBOL, 162),
+    (9, HUFFMAN_EMIT_SYMBOL, 162),
+    (23, HUFFMAN_EMIT_SYMBOL, 162),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 162),
+    (2, HUFFMAN_EMIT_SYMBOL, 184),
+    (9, HUFFMAN_EMIT_SYMBOL, 184),
+    (23, HUFFMAN_EMIT_SYMBOL, 184),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 184),
+    (2, HUFFMAN_EMIT_SYMBOL, 194),
+    (9, HUFFMAN_EMIT_SYMBOL, 194),
+    (23, HUFFMAN_EMIT_SYMBOL, 194),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 194),
+
+    # Node 103
+    (3, HUFFMAN_EMIT_SYMBOL, 131),
+    (6, HUFFMAN_EMIT_SYMBOL, 131),
+    (10, HUFFMAN_EMIT_SYMBOL, 131),
+    (15, HUFFMAN_EMIT_SYMBOL, 131),
+    (24, HUFFMAN_EMIT_SYMBOL, 131),
+    (31, HUFFMAN_EMIT_SYMBOL, 131),
+    (41, HUFFMAN_EMIT_SYMBOL, 131),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 131),
+    (3, HUFFMAN_EMIT_SYMBOL, 162),
+    (6, HUFFMAN_EMIT_SYMBOL, 162),
+    (10, HUFFMAN_EMIT_SYMBOL, 162),
+    (15, HUFFMAN_EMIT_SYMBOL, 162),
+    (24, HUFFMAN_EMIT_SYMBOL, 162),
+    (31, HUFFMAN_EMIT_SYMBOL, 162),
+    (41, HUFFMAN_EMIT_SYMBOL, 162),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 162),
+
+    # Node 104
+    (3, HUFFMAN_EMIT_SYMBOL, 184),
+    (6, HUFFMAN_EMIT_SYMBOL, 184),
+    (10, HUFFMAN_EMIT_SYMBOL, 184),
+    (15, HUFFMAN_EMIT_SYMBOL, 184),
+    (24, HUFFMAN_EMIT_SYMBOL, 184),
+    (31, HUFFMAN_EMIT_SYMBOL, 184),
+    (41, HUFFMAN_EMIT_SYMBOL, 184),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 184),
+    (3, HUFFMAN_EMIT_SYMBOL, 194),
+    (6, HUFFMAN_EMIT_SYMBOL, 194),
+    (10, HUFFMAN_EMIT_SYMBOL, 194),
+    (15, HUFFMAN_EMIT_SYMBOL, 194),
+    (24, HUFFMAN_EMIT_SYMBOL, 194),
+    (31, HUFFMAN_EMIT_SYMBOL, 194),
+    (41, HUFFMAN_EMIT_SYMBOL, 194),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 194),
+
+    # Node 105
+    (2, HUFFMAN_EMIT_SYMBOL, 224),
+    (9, HUFFMAN_EMIT_SYMBOL, 224),
+    (23, HUFFMAN_EMIT_SYMBOL, 224),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 224),
+    (2, HUFFMAN_EMIT_SYMBOL, 226),
+    (9, HUFFMAN_EMIT_SYMBOL, 226),
+    (23, HUFFMAN_EMIT_SYMBOL, 226),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 226),
+    (1, HUFFMAN_EMIT_SYMBOL, 153),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 153),
+    (1, HUFFMAN_EMIT_SYMBOL, 161),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 161),
+    (1, HUFFMAN_EMIT_SYMBOL, 167),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 167),
+    (1, HUFFMAN_EMIT_SYMBOL, 172),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 172),
+
+    # Node 106
+    (3, HUFFMAN_EMIT_SYMBOL, 224),
+    (6, HUFFMAN_EMIT_SYMBOL, 224),
+    (10, HUFFMAN_EMIT_SYMBOL, 224),
+    (15, HUFFMAN_EMIT_SYMBOL, 224),
+    (24, HUFFMAN_EMIT_SYMBOL, 224),
+    (31, HUFFMAN_EMIT_SYMBOL, 224),
+    (41, HUFFMAN_EMIT_SYMBOL, 224),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 224),
+    (3, HUFFMAN_EMIT_SYMBOL, 226),
+    (6, HUFFMAN_EMIT_SYMBOL, 226),
+    (10, HUFFMAN_EMIT_SYMBOL, 226),
+    (15, HUFFMAN_EMIT_SYMBOL, 226),
+    (24, HUFFMAN_EMIT_SYMBOL, 226),
+    (31, HUFFMAN_EMIT_SYMBOL, 226),
+    (41, HUFFMAN_EMIT_SYMBOL, 226),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 226),
+
+    # Node 107
+    (2, HUFFMAN_EMIT_SYMBOL, 153),
+    (9, HUFFMAN_EMIT_SYMBOL, 153),
+    (23, HUFFMAN_EMIT_SYMBOL, 153),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 153),
+    (2, HUFFMAN_EMIT_SYMBOL, 161),
+    (9, HUFFMAN_EMIT_SYMBOL, 161),
+    (23, HUFFMAN_EMIT_SYMBOL, 161),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 161),
+    (2, HUFFMAN_EMIT_SYMBOL, 167),
+    (9, HUFFMAN_EMIT_SYMBOL, 167),
+    (23, HUFFMAN_EMIT_SYMBOL, 167),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 167),
+    (2, HUFFMAN_EMIT_SYMBOL, 172),
+    (9, HUFFMAN_EMIT_SYMBOL, 172),
+    (23, HUFFMAN_EMIT_SYMBOL, 172),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 172),
+
+    # Node 108
+    (3, HUFFMAN_EMIT_SYMBOL, 153),
+    (6, HUFFMAN_EMIT_SYMBOL, 153),
+    (10, HUFFMAN_EMIT_SYMBOL, 153),
+    (15, HUFFMAN_EMIT_SYMBOL, 153),
+    (24, HUFFMAN_EMIT_SYMBOL, 153),
+    (31, HUFFMAN_EMIT_SYMBOL, 153),
+    (41, HUFFMAN_EMIT_SYMBOL, 153),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 153),
+    (3, HUFFMAN_EMIT_SYMBOL, 161),
+    (6, HUFFMAN_EMIT_SYMBOL, 161),
+    (10, HUFFMAN_EMIT_SYMBOL, 161),
+    (15, HUFFMAN_EMIT_SYMBOL, 161),
+    (24, HUFFMAN_EMIT_SYMBOL, 161),
+    (31, HUFFMAN_EMIT_SYMBOL, 161),
+    (41, HUFFMAN_EMIT_SYMBOL, 161),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 161),
+
+    # Node 109
+    (3, HUFFMAN_EMIT_SYMBOL, 167),
+    (6, HUFFMAN_EMIT_SYMBOL, 167),
+    (10, HUFFMAN_EMIT_SYMBOL, 167),
+    (15, HUFFMAN_EMIT_SYMBOL, 167),
+    (24, HUFFMAN_EMIT_SYMBOL, 167),
+    (31, HUFFMAN_EMIT_SYMBOL, 167),
+    (41, HUFFMAN_EMIT_SYMBOL, 167),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 167),
+    (3, HUFFMAN_EMIT_SYMBOL, 172),
+    (6, HUFFMAN_EMIT_SYMBOL, 172),
+    (10, HUFFMAN_EMIT_SYMBOL, 172),
+    (15, HUFFMAN_EMIT_SYMBOL, 172),
+    (24, HUFFMAN_EMIT_SYMBOL, 172),
+    (31, HUFFMAN_EMIT_SYMBOL, 172),
+    (41, HUFFMAN_EMIT_SYMBOL, 172),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 172),
+
+    # Node 110
+    (114, 0, 0),
+    (115, 0, 0),
+    (117, 0, 0),
+    (118, 0, 0),
+    (121, 0, 0),
+    (123, 0, 0),
+    (127, 0, 0),
+    (130, 0, 0),
+    (136, 0, 0),
+    (139, 0, 0),
+    (143, 0, 0),
+    (146, 0, 0),
+    (155, 0, 0),
+    (162, 0, 0),
+    (170, 0, 0),
+    (180, 0, 0),
+
+    # Node 111
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 176),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 177),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 179),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 209),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 216),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 217),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 227),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 229),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 230),
+    (122, 0, 0),
+    (124, 0, 0),
+    (125, 0, 0),
+    (128, 0, 0),
+    (129, 0, 0),
+    (131, 0, 0),
+    (132, 0, 0),
+
+    # Node 112
+    (1, HUFFMAN_EMIT_SYMBOL, 176),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 176),
+    (1, HUFFMAN_EMIT_SYMBOL, 177),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 177),
+    (1, HUFFMAN_EMIT_SYMBOL, 179),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 179),
+    (1, HUFFMAN_EMIT_SYMBOL, 209),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 209),
+    (1, HUFFMAN_EMIT_SYMBOL, 216),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 216),
+    (1, HUFFMAN_EMIT_SYMBOL, 217),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 217),
+    (1, HUFFMAN_EMIT_SYMBOL, 227),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 227),
+    (1, HUFFMAN_EMIT_SYMBOL, 229),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 229),
+
+    # Node 113
+    (2, HUFFMAN_EMIT_SYMBOL, 176),
+    (9, HUFFMAN_EMIT_SYMBOL, 176),
+    (23, HUFFMAN_EMIT_SYMBOL, 176),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 176),
+    (2, HUFFMAN_EMIT_SYMBOL, 177),
+    (9, HUFFMAN_EMIT_SYMBOL, 177),
+    (23, HUFFMAN_EMIT_SYMBOL, 177),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 177),
+    (2, HUFFMAN_EMIT_SYMBOL, 179),
+    (9, HUFFMAN_EMIT_SYMBOL, 179),
+    (23, HUFFMAN_EMIT_SYMBOL, 179),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 179),
+    (2, HUFFMAN_EMIT_SYMBOL, 209),
+    (9, HUFFMAN_EMIT_SYMBOL, 209),
+    (23, HUFFMAN_EMIT_SYMBOL, 209),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 209),
+
+    # Node 114
+    (3, HUFFMAN_EMIT_SYMBOL, 176),
+    (6, HUFFMAN_EMIT_SYMBOL, 176),
+    (10, HUFFMAN_EMIT_SYMBOL, 176),
+    (15, HUFFMAN_EMIT_SYMBOL, 176),
+    (24, HUFFMAN_EMIT_SYMBOL, 176),
+    (31, HUFFMAN_EMIT_SYMBOL, 176),
+    (41, HUFFMAN_EMIT_SYMBOL, 176),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 176),
+    (3, HUFFMAN_EMIT_SYMBOL, 177),
+    (6, HUFFMAN_EMIT_SYMBOL, 177),
+    (10, HUFFMAN_EMIT_SYMBOL, 177),
+    (15, HUFFMAN_EMIT_SYMBOL, 177),
+    (24, HUFFMAN_EMIT_SYMBOL, 177),
+    (31, HUFFMAN_EMIT_SYMBOL, 177),
+    (41, HUFFMAN_EMIT_SYMBOL, 177),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 177),
+
+    # Node 115
+    (3, HUFFMAN_EMIT_SYMBOL, 179),
+    (6, HUFFMAN_EMIT_SYMBOL, 179),
+    (10, HUFFMAN_EMIT_SYMBOL, 179),
+    (15, HUFFMAN_EMIT_SYMBOL, 179),
+    (24, HUFFMAN_EMIT_SYMBOL, 179),
+    (31, HUFFMAN_EMIT_SYMBOL, 179),
+    (41, HUFFMAN_EMIT_SYMBOL, 179),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 179),
+    (3, HUFFMAN_EMIT_SYMBOL, 209),
+    (6, HUFFMAN_EMIT_SYMBOL, 209),
+    (10, HUFFMAN_EMIT_SYMBOL, 209),
+    (15, HUFFMAN_EMIT_SYMBOL, 209),
+    (24, HUFFMAN_EMIT_SYMBOL, 209),
+    (31, HUFFMAN_EMIT_SYMBOL, 209),
+    (41, HUFFMAN_EMIT_SYMBOL, 209),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 209),
+
+    # Node 116
+    (2, HUFFMAN_EMIT_SYMBOL, 216),
+    (9, HUFFMAN_EMIT_SYMBOL, 216),
+    (23, HUFFMAN_EMIT_SYMBOL, 216),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 216),
+    (2, HUFFMAN_EMIT_SYMBOL, 217),
+    (9, HUFFMAN_EMIT_SYMBOL, 217),
+    (23, HUFFMAN_EMIT_SYMBOL, 217),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 217),
+    (2, HUFFMAN_EMIT_SYMBOL, 227),
+    (9, HUFFMAN_EMIT_SYMBOL, 227),
+    (23, HUFFMAN_EMIT_SYMBOL, 227),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 227),
+    (2, HUFFMAN_EMIT_SYMBOL, 229),
+    (9, HUFFMAN_EMIT_SYMBOL, 229),
+    (23, HUFFMAN_EMIT_SYMBOL, 229),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 229),
+
+    # Node 117
+    (3, HUFFMAN_EMIT_SYMBOL, 216),
+    (6, HUFFMAN_EMIT_SYMBOL, 216),
+    (10, HUFFMAN_EMIT_SYMBOL, 216),
+    (15, HUFFMAN_EMIT_SYMBOL, 216),
+    (24, HUFFMAN_EMIT_SYMBOL, 216),
+    (31, HUFFMAN_EMIT_SYMBOL, 216),
+    (41, HUFFMAN_EMIT_SYMBOL, 216),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 216),
+    (3, HUFFMAN_EMIT_SYMBOL, 217),
+    (6, HUFFMAN_EMIT_SYMBOL, 217),
+    (10, HUFFMAN_EMIT_SYMBOL, 217),
+    (15, HUFFMAN_EMIT_SYMBOL, 217),
+    (24, HUFFMAN_EMIT_SYMBOL, 217),
+    (31, HUFFMAN_EMIT_SYMBOL, 217),
+    (41, HUFFMAN_EMIT_SYMBOL, 217),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 217),
+
+    # Node 118
+    (3, HUFFMAN_EMIT_SYMBOL, 227),
+    (6, HUFFMAN_EMIT_SYMBOL, 227),
+    (10, HUFFMAN_EMIT_SYMBOL, 227),
+    (15, HUFFMAN_EMIT_SYMBOL, 227),
+    (24, HUFFMAN_EMIT_SYMBOL, 227),
+    (31, HUFFMAN_EMIT_SYMBOL, 227),
+    (41, HUFFMAN_EMIT_SYMBOL, 227),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 227),
+    (3, HUFFMAN_EMIT_SYMBOL, 229),
+    (6, HUFFMAN_EMIT_SYMBOL, 229),
+    (10, HUFFMAN_EMIT_SYMBOL, 229),
+    (15, HUFFMAN_EMIT_SYMBOL, 229),
+    (24, HUFFMAN_EMIT_SYMBOL, 229),
+    (31, HUFFMAN_EMIT_SYMBOL, 229),
+    (41, HUFFMAN_EMIT_SYMBOL, 229),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 229),
+
+    # Node 119
+    (1, HUFFMAN_EMIT_SYMBOL, 230),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 230),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 129),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 132),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 133),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 134),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 136),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 146),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 154),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 156),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 160),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 163),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 164),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 169),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 170),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 173),
+
+    # Node 120
+    (2, HUFFMAN_EMIT_SYMBOL, 230),
+    (9, HUFFMAN_EMIT_SYMBOL, 230),
+    (23, HUFFMAN_EMIT_SYMBOL, 230),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 230),
+    (1, HUFFMAN_EMIT_SYMBOL, 129),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 129),
+    (1, HUFFMAN_EMIT_SYMBOL, 132),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 132),
+    (1, HUFFMAN_EMIT_SYMBOL, 133),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 133),
+    (1, HUFFMAN_EMIT_SYMBOL, 134),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 134),
+    (1, HUFFMAN_EMIT_SYMBOL, 136),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 136),
+    (1, HUFFMAN_EMIT_SYMBOL, 146),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 146),
+
+    # Node 121
+    (3, HUFFMAN_EMIT_SYMBOL, 230),
+    (6, HUFFMAN_EMIT_SYMBOL, 230),
+    (10, HUFFMAN_EMIT_SYMBOL, 230),
+    (15, HUFFMAN_EMIT_SYMBOL, 230),
+    (24, HUFFMAN_EMIT_SYMBOL, 230),
+    (31, HUFFMAN_EMIT_SYMBOL, 230),
+    (41, HUFFMAN_EMIT_SYMBOL, 230),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 230),
+    (2, HUFFMAN_EMIT_SYMBOL, 129),
+    (9, HUFFMAN_EMIT_SYMBOL, 129),
+    (23, HUFFMAN_EMIT_SYMBOL, 129),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 129),
+    (2, HUFFMAN_EMIT_SYMBOL, 132),
+    (9, HUFFMAN_EMIT_SYMBOL, 132),
+    (23, HUFFMAN_EMIT_SYMBOL, 132),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 132),
+
+    # Node 122
+    (3, HUFFMAN_EMIT_SYMBOL, 129),
+    (6, HUFFMAN_EMIT_SYMBOL, 129),
+    (10, HUFFMAN_EMIT_SYMBOL, 129),
+    (15, HUFFMAN_EMIT_SYMBOL, 129),
+    (24, HUFFMAN_EMIT_SYMBOL, 129),
+    (31, HUFFMAN_EMIT_SYMBOL, 129),
+    (41, HUFFMAN_EMIT_SYMBOL, 129),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 129),
+    (3, HUFFMAN_EMIT_SYMBOL, 132),
+    (6, HUFFMAN_EMIT_SYMBOL, 132),
+    (10, HUFFMAN_EMIT_SYMBOL, 132),
+    (15, HUFFMAN_EMIT_SYMBOL, 132),
+    (24, HUFFMAN_EMIT_SYMBOL, 132),
+    (31, HUFFMAN_EMIT_SYMBOL, 132),
+    (41, HUFFMAN_EMIT_SYMBOL, 132),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 132),
+
+    # Node 123
+    (2, HUFFMAN_EMIT_SYMBOL, 133),
+    (9, HUFFMAN_EMIT_SYMBOL, 133),
+    (23, HUFFMAN_EMIT_SYMBOL, 133),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 133),
+    (2, HUFFMAN_EMIT_SYMBOL, 134),
+    (9, HUFFMAN_EMIT_SYMBOL, 134),
+    (23, HUFFMAN_EMIT_SYMBOL, 134),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 134),
+    (2, HUFFMAN_EMIT_SYMBOL, 136),
+    (9, HUFFMAN_EMIT_SYMBOL, 136),
+    (23, HUFFMAN_EMIT_SYMBOL, 136),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 136),
+    (2, HUFFMAN_EMIT_SYMBOL, 146),
+    (9, HUFFMAN_EMIT_SYMBOL, 146),
+    (23, HUFFMAN_EMIT_SYMBOL, 146),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 146),
+
+    # Node 124
+    (3, HUFFMAN_EMIT_SYMBOL, 133),
+    (6, HUFFMAN_EMIT_SYMBOL, 133),
+    (10, HUFFMAN_EMIT_SYMBOL, 133),
+    (15, HUFFMAN_EMIT_SYMBOL, 133),
+    (24, HUFFMAN_EMIT_SYMBOL, 133),
+    (31, HUFFMAN_EMIT_SYMBOL, 133),
+    (41, HUFFMAN_EMIT_SYMBOL, 133),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 133),
+    (3, HUFFMAN_EMIT_SYMBOL, 134),
+    (6, HUFFMAN_EMIT_SYMBOL, 134),
+    (10, HUFFMAN_EMIT_SYMBOL, 134),
+    (15, HUFFMAN_EMIT_SYMBOL, 134),
+    (24, HUFFMAN_EMIT_SYMBOL, 134),
+    (31, HUFFMAN_EMIT_SYMBOL, 134),
+    (41, HUFFMAN_EMIT_SYMBOL, 134),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 134),
+
+    # Node 125
+    (3, HUFFMAN_EMIT_SYMBOL, 136),
+    (6, HUFFMAN_EMIT_SYMBOL, 136),
+    (10, HUFFMAN_EMIT_SYMBOL, 136),
+    (15, HUFFMAN_EMIT_SYMBOL, 136),
+    (24, HUFFMAN_EMIT_SYMBOL, 136),
+    (31, HUFFMAN_EMIT_SYMBOL, 136),
+    (41, HUFFMAN_EMIT_SYMBOL, 136),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 136),
+    (3, HUFFMAN_EMIT_SYMBOL, 146),
+    (6, HUFFMAN_EMIT_SYMBOL, 146),
+    (10, HUFFMAN_EMIT_SYMBOL, 146),
+    (15, HUFFMAN_EMIT_SYMBOL, 146),
+    (24, HUFFMAN_EMIT_SYMBOL, 146),
+    (31, HUFFMAN_EMIT_SYMBOL, 146),
+    (41, HUFFMAN_EMIT_SYMBOL, 146),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 146),
+
+    # Node 126
+    (1, HUFFMAN_EMIT_SYMBOL, 154),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 154),
+    (1, HUFFMAN_EMIT_SYMBOL, 156),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 156),
+    (1, HUFFMAN_EMIT_SYMBOL, 160),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 160),
+    (1, HUFFMAN_EMIT_SYMBOL, 163),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 163),
+    (1, HUFFMAN_EMIT_SYMBOL, 164),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 164),
+    (1, HUFFMAN_EMIT_SYMBOL, 169),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 169),
+    (1, HUFFMAN_EMIT_SYMBOL, 170),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 170),
+    (1, HUFFMAN_EMIT_SYMBOL, 173),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 173),
+
+    # Node 127
+    (2, HUFFMAN_EMIT_SYMBOL, 154),
+    (9, HUFFMAN_EMIT_SYMBOL, 154),
+    (23, HUFFMAN_EMIT_SYMBOL, 154),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 154),
+    (2, HUFFMAN_EMIT_SYMBOL, 156),
+    (9, HUFFMAN_EMIT_SYMBOL, 156),
+    (23, HUFFMAN_EMIT_SYMBOL, 156),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 156),
+    (2, HUFFMAN_EMIT_SYMBOL, 160),
+    (9, HUFFMAN_EMIT_SYMBOL, 160),
+    (23, HUFFMAN_EMIT_SYMBOL, 160),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 160),
+    (2, HUFFMAN_EMIT_SYMBOL, 163),
+    (9, HUFFMAN_EMIT_SYMBOL, 163),
+    (23, HUFFMAN_EMIT_SYMBOL, 163),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 163),
+
+    # Node 128
+    (3, HUFFMAN_EMIT_SYMBOL, 154),
+    (6, HUFFMAN_EMIT_SYMBOL, 154),
+    (10, HUFFMAN_EMIT_SYMBOL, 154),
+    (15, HUFFMAN_EMIT_SYMBOL, 154),
+    (24, HUFFMAN_EMIT_SYMBOL, 154),
+    (31, HUFFMAN_EMIT_SYMBOL, 154),
+    (41, HUFFMAN_EMIT_SYMBOL, 154),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 154),
+    (3, HUFFMAN_EMIT_SYMBOL, 156),
+    (6, HUFFMAN_EMIT_SYMBOL, 156),
+    (10, HUFFMAN_EMIT_SYMBOL, 156),
+    (15, HUFFMAN_EMIT_SYMBOL, 156),
+    (24, HUFFMAN_EMIT_SYMBOL, 156),
+    (31, HUFFMAN_EMIT_SYMBOL, 156),
+    (41, HUFFMAN_EMIT_SYMBOL, 156),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 156),
+
+    # Node 129
+    (3, HUFFMAN_EMIT_SYMBOL, 160),
+    (6, HUFFMAN_EMIT_SYMBOL, 160),
+    (10, HUFFMAN_EMIT_SYMBOL, 160),
+    (15, HUFFMAN_EMIT_SYMBOL, 160),
+    (24, HUFFMAN_EMIT_SYMBOL, 160),
+    (31, HUFFMAN_EMIT_SYMBOL, 160),
+    (41, HUFFMAN_EMIT_SYMBOL, 160),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 160),
+    (3, HUFFMAN_EMIT_SYMBOL, 163),
+    (6, HUFFMAN_EMIT_SYMBOL, 163),
+    (10, HUFFMAN_EMIT_SYMBOL, 163),
+    (15, HUFFMAN_EMIT_SYMBOL, 163),
+    (24, HUFFMAN_EMIT_SYMBOL, 163),
+    (31, HUFFMAN_EMIT_SYMBOL, 163),
+    (41, HUFFMAN_EMIT_SYMBOL, 163),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 163),
+
+    # Node 130
+    (2, HUFFMAN_EMIT_SYMBOL, 164),
+    (9, HUFFMAN_EMIT_SYMBOL, 164),
+    (23, HUFFMAN_EMIT_SYMBOL, 164),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 164),
+    (2, HUFFMAN_EMIT_SYMBOL, 169),
+    (9, HUFFMAN_EMIT_SYMBOL, 169),
+    (23, HUFFMAN_EMIT_SYMBOL, 169),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 169),
+    (2, HUFFMAN_EMIT_SYMBOL, 170),
+    (9, HUFFMAN_EMIT_SYMBOL, 170),
+    (23, HUFFMAN_EMIT_SYMBOL, 170),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 170),
+    (2, HUFFMAN_EMIT_SYMBOL, 173),
+    (9, HUFFMAN_EMIT_SYMBOL, 173),
+    (23, HUFFMAN_EMIT_SYMBOL, 173),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 173),
+
+    # Node 131
+    (3, HUFFMAN_EMIT_SYMBOL, 164),
+    (6, HUFFMAN_EMIT_SYMBOL, 164),
+    (10, HUFFMAN_EMIT_SYMBOL, 164),
+    (15, HUFFMAN_EMIT_SYMBOL, 164),
+    (24, HUFFMAN_EMIT_SYMBOL, 164),
+    (31, HUFFMAN_EMIT_SYMBOL, 164),
+    (41, HUFFMAN_EMIT_SYMBOL, 164),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 164),
+    (3, HUFFMAN_EMIT_SYMBOL, 169),
+    (6, HUFFMAN_EMIT_SYMBOL, 169),
+    (10, HUFFMAN_EMIT_SYMBOL, 169),
+    (15, HUFFMAN_EMIT_SYMBOL, 169),
+    (24, HUFFMAN_EMIT_SYMBOL, 169),
+    (31, HUFFMAN_EMIT_SYMBOL, 169),
+    (41, HUFFMAN_EMIT_SYMBOL, 169),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 169),
+
+    # Node 132
+    (3, HUFFMAN_EMIT_SYMBOL, 170),
+    (6, HUFFMAN_EMIT_SYMBOL, 170),
+    (10, HUFFMAN_EMIT_SYMBOL, 170),
+    (15, HUFFMAN_EMIT_SYMBOL, 170),
+    (24, HUFFMAN_EMIT_SYMBOL, 170),
+    (31, HUFFMAN_EMIT_SYMBOL, 170),
+    (41, HUFFMAN_EMIT_SYMBOL, 170),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 170),
+    (3, HUFFMAN_EMIT_SYMBOL, 173),
+    (6, HUFFMAN_EMIT_SYMBOL, 173),
+    (10, HUFFMAN_EMIT_SYMBOL, 173),
+    (15, HUFFMAN_EMIT_SYMBOL, 173),
+    (24, HUFFMAN_EMIT_SYMBOL, 173),
+    (31, HUFFMAN_EMIT_SYMBOL, 173),
+    (41, HUFFMAN_EMIT_SYMBOL, 173),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 173),
+
+    # Node 133
+    (137, 0, 0),
+    (138, 0, 0),
+    (140, 0, 0),
+    (141, 0, 0),
+    (144, 0, 0),
+    (145, 0, 0),
+    (147, 0, 0),
+    (150, 0, 0),
+    (156, 0, 0),
+    (159, 0, 0),
+    (163, 0, 0),
+    (166, 0, 0),
+    (171, 0, 0),
+    (174, 0, 0),
+    (181, 0, 0),
+    (190, 0, 0),
+
+    # Node 134
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 178),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 181),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 185),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 186),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 187),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 189),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 190),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 196),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 198),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 228),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 232),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 233),
+    (148, 0, 0),
+    (149, 0, 0),
+    (151, 0, 0),
+    (152, 0, 0),
+
+    # Node 135
+    (1, HUFFMAN_EMIT_SYMBOL, 178),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 178),
+    (1, HUFFMAN_EMIT_SYMBOL, 181),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 181),
+    (1, HUFFMAN_EMIT_SYMBOL, 185),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 185),
+    (1, HUFFMAN_EMIT_SYMBOL, 186),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 186),
+    (1, HUFFMAN_EMIT_SYMBOL, 187),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 187),
+    (1, HUFFMAN_EMIT_SYMBOL, 189),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 189),
+    (1, HUFFMAN_EMIT_SYMBOL, 190),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 190),
+    (1, HUFFMAN_EMIT_SYMBOL, 196),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 196),
+
+    # Node 136
+    (2, HUFFMAN_EMIT_SYMBOL, 178),
+    (9, HUFFMAN_EMIT_SYMBOL, 178),
+    (23, HUFFMAN_EMIT_SYMBOL, 178),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 178),
+    (2, HUFFMAN_EMIT_SYMBOL, 181),
+    (9, HUFFMAN_EMIT_SYMBOL, 181),
+    (23, HUFFMAN_EMIT_SYMBOL, 181),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 181),
+    (2, HUFFMAN_EMIT_SYMBOL, 185),
+    (9, HUFFMAN_EMIT_SYMBOL, 185),
+    (23, HUFFMAN_EMIT_SYMBOL, 185),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 185),
+    (2, HUFFMAN_EMIT_SYMBOL, 186),
+    (9, HUFFMAN_EMIT_SYMBOL, 186),
+    (23, HUFFMAN_EMIT_SYMBOL, 186),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 186),
+
+    # Node 137
+    (3, HUFFMAN_EMIT_SYMBOL, 178),
+    (6, HUFFMAN_EMIT_SYMBOL, 178),
+    (10, HUFFMAN_EMIT_SYMBOL, 178),
+    (15, HUFFMAN_EMIT_SYMBOL, 178),
+    (24, HUFFMAN_EMIT_SYMBOL, 178),
+    (31, HUFFMAN_EMIT_SYMBOL, 178),
+    (41, HUFFMAN_EMIT_SYMBOL, 178),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 178),
+    (3, HUFFMAN_EMIT_SYMBOL, 181),
+    (6, HUFFMAN_EMIT_SYMBOL, 181),
+    (10, HUFFMAN_EMIT_SYMBOL, 181),
+    (15, HUFFMAN_EMIT_SYMBOL, 181),
+    (24, HUFFMAN_EMIT_SYMBOL, 181),
+    (31, HUFFMAN_EMIT_SYMBOL, 181),
+    (41, HUFFMAN_EMIT_SYMBOL, 181),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 181),
+
+    # Node 138
+    (3, HUFFMAN_EMIT_SYMBOL, 185),
+    (6, HUFFMAN_EMIT_SYMBOL, 185),
+    (10, HUFFMAN_EMIT_SYMBOL, 185),
+    (15, HUFFMAN_EMIT_SYMBOL, 185),
+    (24, HUFFMAN_EMIT_SYMBOL, 185),
+    (31, HUFFMAN_EMIT_SYMBOL, 185),
+    (41, HUFFMAN_EMIT_SYMBOL, 185),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 185),
+    (3, HUFFMAN_EMIT_SYMBOL, 186),
+    (6, HUFFMAN_EMIT_SYMBOL, 186),
+    (10, HUFFMAN_EMIT_SYMBOL, 186),
+    (15, HUFFMAN_EMIT_SYMBOL, 186),
+    (24, HUFFMAN_EMIT_SYMBOL, 186),
+    (31, HUFFMAN_EMIT_SYMBOL, 186),
+    (41, HUFFMAN_EMIT_SYMBOL, 186),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 186),
+
+    # Node 139
+    (2, HUFFMAN_EMIT_SYMBOL, 187),
+    (9, HUFFMAN_EMIT_SYMBOL, 187),
+    (23, HUFFMAN_EMIT_SYMBOL, 187),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 187),
+    (2, HUFFMAN_EMIT_SYMBOL, 189),
+    (9, HUFFMAN_EMIT_SYMBOL, 189),
+    (23, HUFFMAN_EMIT_SYMBOL, 189),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 189),
+    (2, HUFFMAN_EMIT_SYMBOL, 190),
+    (9, HUFFMAN_EMIT_SYMBOL, 190),
+    (23, HUFFMAN_EMIT_SYMBOL, 190),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 190),
+    (2, HUFFMAN_EMIT_SYMBOL, 196),
+    (9, HUFFMAN_EMIT_SYMBOL, 196),
+    (23, HUFFMAN_EMIT_SYMBOL, 196),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 196),
+
+    # Node 140
+    (3, HUFFMAN_EMIT_SYMBOL, 187),
+    (6, HUFFMAN_EMIT_SYMBOL, 187),
+    (10, HUFFMAN_EMIT_SYMBOL, 187),
+    (15, HUFFMAN_EMIT_SYMBOL, 187),
+    (24, HUFFMAN_EMIT_SYMBOL, 187),
+    (31, HUFFMAN_EMIT_SYMBOL, 187),
+    (41, HUFFMAN_EMIT_SYMBOL, 187),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 187),
+    (3, HUFFMAN_EMIT_SYMBOL, 189),
+    (6, HUFFMAN_EMIT_SYMBOL, 189),
+    (10, HUFFMAN_EMIT_SYMBOL, 189),
+    (15, HUFFMAN_EMIT_SYMBOL, 189),
+    (24, HUFFMAN_EMIT_SYMBOL, 189),
+    (31, HUFFMAN_EMIT_SYMBOL, 189),
+    (41, HUFFMAN_EMIT_SYMBOL, 189),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 189),
+
+    # Node 141
+    (3, HUFFMAN_EMIT_SYMBOL, 190),
+    (6, HUFFMAN_EMIT_SYMBOL, 190),
+    (10, HUFFMAN_EMIT_SYMBOL, 190),
+    (15, HUFFMAN_EMIT_SYMBOL, 190),
+    (24, HUFFMAN_EMIT_SYMBOL, 190),
+    (31, HUFFMAN_EMIT_SYMBOL, 190),
+    (41, HUFFMAN_EMIT_SYMBOL, 190),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 190),
+    (3, HUFFMAN_EMIT_SYMBOL, 196),
+    (6, HUFFMAN_EMIT_SYMBOL, 196),
+    (10, HUFFMAN_EMIT_SYMBOL, 196),
+    (15, HUFFMAN_EMIT_SYMBOL, 196),
+    (24, HUFFMAN_EMIT_SYMBOL, 196),
+    (31, HUFFMAN_EMIT_SYMBOL, 196),
+    (41, HUFFMAN_EMIT_SYMBOL, 196),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 196),
+
+    # Node 142
+    (1, HUFFMAN_EMIT_SYMBOL, 198),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 198),
+    (1, HUFFMAN_EMIT_SYMBOL, 228),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 228),
+    (1, HUFFMAN_EMIT_SYMBOL, 232),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 232),
+    (1, HUFFMAN_EMIT_SYMBOL, 233),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 233),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 1),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 135),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 137),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 138),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 139),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 140),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 141),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 143),
+
+    # Node 143
+    (2, HUFFMAN_EMIT_SYMBOL, 198),
+    (9, HUFFMAN_EMIT_SYMBOL, 198),
+    (23, HUFFMAN_EMIT_SYMBOL, 198),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 198),
+    (2, HUFFMAN_EMIT_SYMBOL, 228),
+    (9, HUFFMAN_EMIT_SYMBOL, 228),
+    (23, HUFFMAN_EMIT_SYMBOL, 228),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 228),
+    (2, HUFFMAN_EMIT_SYMBOL, 232),
+    (9, HUFFMAN_EMIT_SYMBOL, 232),
+    (23, HUFFMAN_EMIT_SYMBOL, 232),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 232),
+    (2, HUFFMAN_EMIT_SYMBOL, 233),
+    (9, HUFFMAN_EMIT_SYMBOL, 233),
+    (23, HUFFMAN_EMIT_SYMBOL, 233),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 233),
+
+    # Node 144
+    (3, HUFFMAN_EMIT_SYMBOL, 198),
+    (6, HUFFMAN_EMIT_SYMBOL, 198),
+    (10, HUFFMAN_EMIT_SYMBOL, 198),
+    (15, HUFFMAN_EMIT_SYMBOL, 198),
+    (24, HUFFMAN_EMIT_SYMBOL, 198),
+    (31, HUFFMAN_EMIT_SYMBOL, 198),
+    (41, HUFFMAN_EMIT_SYMBOL, 198),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 198),
+    (3, HUFFMAN_EMIT_SYMBOL, 228),
+    (6, HUFFMAN_EMIT_SYMBOL, 228),
+    (10, HUFFMAN_EMIT_SYMBOL, 228),
+    (15, HUFFMAN_EMIT_SYMBOL, 228),
+    (24, HUFFMAN_EMIT_SYMBOL, 228),
+    (31, HUFFMAN_EMIT_SYMBOL, 228),
+    (41, HUFFMAN_EMIT_SYMBOL, 228),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 228),
+
+    # Node 145
+    (3, HUFFMAN_EMIT_SYMBOL, 232),
+    (6, HUFFMAN_EMIT_SYMBOL, 232),
+    (10, HUFFMAN_EMIT_SYMBOL, 232),
+    (15, HUFFMAN_EMIT_SYMBOL, 232),
+    (24, HUFFMAN_EMIT_SYMBOL, 232),
+    (31, HUFFMAN_EMIT_SYMBOL, 232),
+    (41, HUFFMAN_EMIT_SYMBOL, 232),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 232),
+    (3, HUFFMAN_EMIT_SYMBOL, 233),
+    (6, HUFFMAN_EMIT_SYMBOL, 233),
+    (10, HUFFMAN_EMIT_SYMBOL, 233),
+    (15, HUFFMAN_EMIT_SYMBOL, 233),
+    (24, HUFFMAN_EMIT_SYMBOL, 233),
+    (31, HUFFMAN_EMIT_SYMBOL, 233),
+    (41, HUFFMAN_EMIT_SYMBOL, 233),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 233),
+
+    # Node 146
+    (1, HUFFMAN_EMIT_SYMBOL, 1),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 1),
+    (1, HUFFMAN_EMIT_SYMBOL, 135),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 135),
+    (1, HUFFMAN_EMIT_SYMBOL, 137),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 137),
+    (1, HUFFMAN_EMIT_SYMBOL, 138),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 138),
+    (1, HUFFMAN_EMIT_SYMBOL, 139),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 139),
+    (1, HUFFMAN_EMIT_SYMBOL, 140),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 140),
+    (1, HUFFMAN_EMIT_SYMBOL, 141),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 141),
+    (1, HUFFMAN_EMIT_SYMBOL, 143),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 143),
+
+    # Node 147
+    (2, HUFFMAN_EMIT_SYMBOL, 1),
+    (9, HUFFMAN_EMIT_SYMBOL, 1),
+    (23, HUFFMAN_EMIT_SYMBOL, 1),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 1),
+    (2, HUFFMAN_EMIT_SYMBOL, 135),
+    (9, HUFFMAN_EMIT_SYMBOL, 135),
+    (23, HUFFMAN_EMIT_SYMBOL, 135),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 135),
+    (2, HUFFMAN_EMIT_SYMBOL, 137),
+    (9, HUFFMAN_EMIT_SYMBOL, 137),
+    (23, HUFFMAN_EMIT_SYMBOL, 137),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 137),
+    (2, HUFFMAN_EMIT_SYMBOL, 138),
+    (9, HUFFMAN_EMIT_SYMBOL, 138),
+    (23, HUFFMAN_EMIT_SYMBOL, 138),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 138),
+
+    # Node 148
+    (3, HUFFMAN_EMIT_SYMBOL, 1),
+    (6, HUFFMAN_EMIT_SYMBOL, 1),
+    (10, HUFFMAN_EMIT_SYMBOL, 1),
+    (15, HUFFMAN_EMIT_SYMBOL, 1),
+    (24, HUFFMAN_EMIT_SYMBOL, 1),
+    (31, HUFFMAN_EMIT_SYMBOL, 1),
+    (41, HUFFMAN_EMIT_SYMBOL, 1),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 1),
+    (3, HUFFMAN_EMIT_SYMBOL, 135),
+    (6, HUFFMAN_EMIT_SYMBOL, 135),
+    (10, HUFFMAN_EMIT_SYMBOL, 135),
+    (15, HUFFMAN_EMIT_SYMBOL, 135),
+    (24, HUFFMAN_EMIT_SYMBOL, 135),
+    (31, HUFFMAN_EMIT_SYMBOL, 135),
+    (41, HUFFMAN_EMIT_SYMBOL, 135),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 135),
+
+    # Node 149
+    (3, HUFFMAN_EMIT_SYMBOL, 137),
+    (6, HUFFMAN_EMIT_SYMBOL, 137),
+    (10, HUFFMAN_EMIT_SYMBOL, 137),
+    (15, HUFFMAN_EMIT_SYMBOL, 137),
+    (24, HUFFMAN_EMIT_SYMBOL, 137),
+    (31, HUFFMAN_EMIT_SYMBOL, 137),
+    (41, HUFFMAN_EMIT_SYMBOL, 137),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 137),
+    (3, HUFFMAN_EMIT_SYMBOL, 138),
+    (6, HUFFMAN_EMIT_SYMBOL, 138),
+    (10, HUFFMAN_EMIT_SYMBOL, 138),
+    (15, HUFFMAN_EMIT_SYMBOL, 138),
+    (24, HUFFMAN_EMIT_SYMBOL, 138),
+    (31, HUFFMAN_EMIT_SYMBOL, 138),
+    (41, HUFFMAN_EMIT_SYMBOL, 138),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 138),
+
+    # Node 150
+    (2, HUFFMAN_EMIT_SYMBOL, 139),
+    (9, HUFFMAN_EMIT_SYMBOL, 139),
+    (23, HUFFMAN_EMIT_SYMBOL, 139),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 139),
+    (2, HUFFMAN_EMIT_SYMBOL, 140),
+    (9, HUFFMAN_EMIT_SYMBOL, 140),
+    (23, HUFFMAN_EMIT_SYMBOL, 140),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 140),
+    (2, HUFFMAN_EMIT_SYMBOL, 141),
+    (9, HUFFMAN_EMIT_SYMBOL, 141),
+    (23, HUFFMAN_EMIT_SYMBOL, 141),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 141),
+    (2, HUFFMAN_EMIT_SYMBOL, 143),
+    (9, HUFFMAN_EMIT_SYMBOL, 143),
+    (23, HUFFMAN_EMIT_SYMBOL, 143),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 143),
+
+    # Node 151
+    (3, HUFFMAN_EMIT_SYMBOL, 139),
+    (6, HUFFMAN_EMIT_SYMBOL, 139),
+    (10, HUFFMAN_EMIT_SYMBOL, 139),
+    (15, HUFFMAN_EMIT_SYMBOL, 139),
+    (24, HUFFMAN_EMIT_SYMBOL, 139),
+    (31, HUFFMAN_EMIT_SYMBOL, 139),
+    (41, HUFFMAN_EMIT_SYMBOL, 139),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 139),
+    (3, HUFFMAN_EMIT_SYMBOL, 140),
+    (6, HUFFMAN_EMIT_SYMBOL, 140),
+    (10, HUFFMAN_EMIT_SYMBOL, 140),
+    (15, HUFFMAN_EMIT_SYMBOL, 140),
+    (24, HUFFMAN_EMIT_SYMBOL, 140),
+    (31, HUFFMAN_EMIT_SYMBOL, 140),
+    (41, HUFFMAN_EMIT_SYMBOL, 140),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 140),
+
+    # Node 152
+    (3, HUFFMAN_EMIT_SYMBOL, 141),
+    (6, HUFFMAN_EMIT_SYMBOL, 141),
+    (10, HUFFMAN_EMIT_SYMBOL, 141),
+    (15, HUFFMAN_EMIT_SYMBOL, 141),
+    (24, HUFFMAN_EMIT_SYMBOL, 141),
+    (31, HUFFMAN_EMIT_SYMBOL, 141),
+    (41, HUFFMAN_EMIT_SYMBOL, 141),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 141),
+    (3, HUFFMAN_EMIT_SYMBOL, 143),
+    (6, HUFFMAN_EMIT_SYMBOL, 143),
+    (10, HUFFMAN_EMIT_SYMBOL, 143),
+    (15, HUFFMAN_EMIT_SYMBOL, 143),
+    (24, HUFFMAN_EMIT_SYMBOL, 143),
+    (31, HUFFMAN_EMIT_SYMBOL, 143),
+    (41, HUFFMAN_EMIT_SYMBOL, 143),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 143),
+
+    # Node 153
+    (157, 0, 0),
+    (158, 0, 0),
+    (160, 0, 0),
+    (161, 0, 0),
+    (164, 0, 0),
+    (165, 0, 0),
+    (167, 0, 0),
+    (168, 0, 0),
+    (172, 0, 0),
+    (173, 0, 0),
+    (175, 0, 0),
+    (177, 0, 0),
+    (182, 0, 0),
+    (185, 0, 0),
+    (191, 0, 0),
+    (207, 0, 0),
+
+    # Node 154
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 147),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 149),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 150),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 151),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 152),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 155),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 157),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 158),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 165),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 166),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 168),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 174),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 175),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 180),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 182),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 183),
+
+    # Node 155
+    (1, HUFFMAN_EMIT_SYMBOL, 147),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 147),
+    (1, HUFFMAN_EMIT_SYMBOL, 149),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 149),
+    (1, HUFFMAN_EMIT_SYMBOL, 150),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 150),
+    (1, HUFFMAN_EMIT_SYMBOL, 151),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 151),
+    (1, HUFFMAN_EMIT_SYMBOL, 152),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 152),
+    (1, HUFFMAN_EMIT_SYMBOL, 155),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 155),
+    (1, HUFFMAN_EMIT_SYMBOL, 157),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 157),
+    (1, HUFFMAN_EMIT_SYMBOL, 158),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 158),
+
+    # Node 156
+    (2, HUFFMAN_EMIT_SYMBOL, 147),
+    (9, HUFFMAN_EMIT_SYMBOL, 147),
+    (23, HUFFMAN_EMIT_SYMBOL, 147),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 147),
+    (2, HUFFMAN_EMIT_SYMBOL, 149),
+    (9, HUFFMAN_EMIT_SYMBOL, 149),
+    (23, HUFFMAN_EMIT_SYMBOL, 149),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 149),
+    (2, HUFFMAN_EMIT_SYMBOL, 150),
+    (9, HUFFMAN_EMIT_SYMBOL, 150),
+    (23, HUFFMAN_EMIT_SYMBOL, 150),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 150),
+    (2, HUFFMAN_EMIT_SYMBOL, 151),
+    (9, HUFFMAN_EMIT_SYMBOL, 151),
+    (23, HUFFMAN_EMIT_SYMBOL, 151),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 151),
+
+    # Node 157
+    (3, HUFFMAN_EMIT_SYMBOL, 147),
+    (6, HUFFMAN_EMIT_SYMBOL, 147),
+    (10, HUFFMAN_EMIT_SYMBOL, 147),
+    (15, HUFFMAN_EMIT_SYMBOL, 147),
+    (24, HUFFMAN_EMIT_SYMBOL, 147),
+    (31, HUFFMAN_EMIT_SYMBOL, 147),
+    (41, HUFFMAN_EMIT_SYMBOL, 147),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 147),
+    (3, HUFFMAN_EMIT_SYMBOL, 149),
+    (6, HUFFMAN_EMIT_SYMBOL, 149),
+    (10, HUFFMAN_EMIT_SYMBOL, 149),
+    (15, HUFFMAN_EMIT_SYMBOL, 149),
+    (24, HUFFMAN_EMIT_SYMBOL, 149),
+    (31, HUFFMAN_EMIT_SYMBOL, 149),
+    (41, HUFFMAN_EMIT_SYMBOL, 149),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 149),
+
+    # Node 158
+    (3, HUFFMAN_EMIT_SYMBOL, 150),
+    (6, HUFFMAN_EMIT_SYMBOL, 150),
+    (10, HUFFMAN_EMIT_SYMBOL, 150),
+    (15, HUFFMAN_EMIT_SYMBOL, 150),
+    (24, HUFFMAN_EMIT_SYMBOL, 150),
+    (31, HUFFMAN_EMIT_SYMBOL, 150),
+    (41, HUFFMAN_EMIT_SYMBOL, 150),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 150),
+    (3, HUFFMAN_EMIT_SYMBOL, 151),
+    (6, HUFFMAN_EMIT_SYMBOL, 151),
+    (10, HUFFMAN_EMIT_SYMBOL, 151),
+    (15, HUFFMAN_EMIT_SYMBOL, 151),
+    (24, HUFFMAN_EMIT_SYMBOL, 151),
+    (31, HUFFMAN_EMIT_SYMBOL, 151),
+    (41, HUFFMAN_EMIT_SYMBOL, 151),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 151),
+
+    # Node 159
+    (2, HUFFMAN_EMIT_SYMBOL, 152),
+    (9, HUFFMAN_EMIT_SYMBOL, 152),
+    (23, HUFFMAN_EMIT_SYMBOL, 152),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 152),
+    (2, HUFFMAN_EMIT_SYMBOL, 155),
+    (9, HUFFMAN_EMIT_SYMBOL, 155),
+    (23, HUFFMAN_EMIT_SYMBOL, 155),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 155),
+    (2, HUFFMAN_EMIT_SYMBOL, 157),
+    (9, HUFFMAN_EMIT_SYMBOL, 157),
+    (23, HUFFMAN_EMIT_SYMBOL, 157),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 157),
+    (2, HUFFMAN_EMIT_SYMBOL, 158),
+    (9, HUFFMAN_EMIT_SYMBOL, 158),
+    (23, HUFFMAN_EMIT_SYMBOL, 158),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 158),
+
+    # Node 160
+    (3, HUFFMAN_EMIT_SYMBOL, 152),
+    (6, HUFFMAN_EMIT_SYMBOL, 152),
+    (10, HUFFMAN_EMIT_SYMBOL, 152),
+    (15, HUFFMAN_EMIT_SYMBOL, 152),
+    (24, HUFFMAN_EMIT_SYMBOL, 152),
+    (31, HUFFMAN_EMIT_SYMBOL, 152),
+    (41, HUFFMAN_EMIT_SYMBOL, 152),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 152),
+    (3, HUFFMAN_EMIT_SYMBOL, 155),
+    (6, HUFFMAN_EMIT_SYMBOL, 155),
+    (10, HUFFMAN_EMIT_SYMBOL, 155),
+    (15, HUFFMAN_EMIT_SYMBOL, 155),
+    (24, HUFFMAN_EMIT_SYMBOL, 155),
+    (31, HUFFMAN_EMIT_SYMBOL, 155),
+    (41, HUFFMAN_EMIT_SYMBOL, 155),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 155),
+
+    # Node 161
+    (3, HUFFMAN_EMIT_SYMBOL, 157),
+    (6, HUFFMAN_EMIT_SYMBOL, 157),
+    (10, HUFFMAN_EMIT_SYMBOL, 157),
+    (15, HUFFMAN_EMIT_SYMBOL, 157),
+    (24, HUFFMAN_EMIT_SYMBOL, 157),
+    (31, HUFFMAN_EMIT_SYMBOL, 157),
+    (41, HUFFMAN_EMIT_SYMBOL, 157),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 157),
+    (3, HUFFMAN_EMIT_SYMBOL, 158),
+    (6, HUFFMAN_EMIT_SYMBOL, 158),
+    (10, HUFFMAN_EMIT_SYMBOL, 158),
+    (15, HUFFMAN_EMIT_SYMBOL, 158),
+    (24, HUFFMAN_EMIT_SYMBOL, 158),
+    (31, HUFFMAN_EMIT_SYMBOL, 158),
+    (41, HUFFMAN_EMIT_SYMBOL, 158),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 158),
+
+    # Node 162
+    (1, HUFFMAN_EMIT_SYMBOL, 165),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 165),
+    (1, HUFFMAN_EMIT_SYMBOL, 166),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 166),
+    (1, HUFFMAN_EMIT_SYMBOL, 168),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 168),
+    (1, HUFFMAN_EMIT_SYMBOL, 174),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 174),
+    (1, HUFFMAN_EMIT_SYMBOL, 175),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 175),
+    (1, HUFFMAN_EMIT_SYMBOL, 180),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 180),
+    (1, HUFFMAN_EMIT_SYMBOL, 182),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 182),
+    (1, HUFFMAN_EMIT_SYMBOL, 183),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 183),
+
+    # Node 163
+    (2, HUFFMAN_EMIT_SYMBOL, 165),
+    (9, HUFFMAN_EMIT_SYMBOL, 165),
+    (23, HUFFMAN_EMIT_SYMBOL, 165),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 165),
+    (2, HUFFMAN_EMIT_SYMBOL, 166),
+    (9, HUFFMAN_EMIT_SYMBOL, 166),
+    (23, HUFFMAN_EMIT_SYMBOL, 166),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 166),
+    (2, HUFFMAN_EMIT_SYMBOL, 168),
+    (9, HUFFMAN_EMIT_SYMBOL, 168),
+    (23, HUFFMAN_EMIT_SYMBOL, 168),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 168),
+    (2, HUFFMAN_EMIT_SYMBOL, 174),
+    (9, HUFFMAN_EMIT_SYMBOL, 174),
+    (23, HUFFMAN_EMIT_SYMBOL, 174),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 174),
+
+    # Node 164
+    (3, HUFFMAN_EMIT_SYMBOL, 165),
+    (6, HUFFMAN_EMIT_SYMBOL, 165),
+    (10, HUFFMAN_EMIT_SYMBOL, 165),
+    (15, HUFFMAN_EMIT_SYMBOL, 165),
+    (24, HUFFMAN_EMIT_SYMBOL, 165),
+    (31, HUFFMAN_EMIT_SYMBOL, 165),
+    (41, HUFFMAN_EMIT_SYMBOL, 165),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 165),
+    (3, HUFFMAN_EMIT_SYMBOL, 166),
+    (6, HUFFMAN_EMIT_SYMBOL, 166),
+    (10, HUFFMAN_EMIT_SYMBOL, 166),
+    (15, HUFFMAN_EMIT_SYMBOL, 166),
+    (24, HUFFMAN_EMIT_SYMBOL, 166),
+    (31, HUFFMAN_EMIT_SYMBOL, 166),
+    (41, HUFFMAN_EMIT_SYMBOL, 166),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 166),
+
+    # Node 165
+    (3, HUFFMAN_EMIT_SYMBOL, 168),
+    (6, HUFFMAN_EMIT_SYMBOL, 168),
+    (10, HUFFMAN_EMIT_SYMBOL, 168),
+    (15, HUFFMAN_EMIT_SYMBOL, 168),
+    (24, HUFFMAN_EMIT_SYMBOL, 168),
+    (31, HUFFMAN_EMIT_SYMBOL, 168),
+    (41, HUFFMAN_EMIT_SYMBOL, 168),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 168),
+    (3, HUFFMAN_EMIT_SYMBOL, 174),
+    (6, HUFFMAN_EMIT_SYMBOL, 174),
+    (10, HUFFMAN_EMIT_SYMBOL, 174),
+    (15, HUFFMAN_EMIT_SYMBOL, 174),
+    (24, HUFFMAN_EMIT_SYMBOL, 174),
+    (31, HUFFMAN_EMIT_SYMBOL, 174),
+    (41, HUFFMAN_EMIT_SYMBOL, 174),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 174),
+
+    # Node 166
+    (2, HUFFMAN_EMIT_SYMBOL, 175),
+    (9, HUFFMAN_EMIT_SYMBOL, 175),
+    (23, HUFFMAN_EMIT_SYMBOL, 175),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 175),
+    (2, HUFFMAN_EMIT_SYMBOL, 180),
+    (9, HUFFMAN_EMIT_SYMBOL, 180),
+    (23, HUFFMAN_EMIT_SYMBOL, 180),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 180),
+    (2, HUFFMAN_EMIT_SYMBOL, 182),
+    (9, HUFFMAN_EMIT_SYMBOL, 182),
+    (23, HUFFMAN_EMIT_SYMBOL, 182),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 182),
+    (2, HUFFMAN_EMIT_SYMBOL, 183),
+    (9, HUFFMAN_EMIT_SYMBOL, 183),
+    (23, HUFFMAN_EMIT_SYMBOL, 183),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 183),
+
+    # Node 167
+    (3, HUFFMAN_EMIT_SYMBOL, 175),
+    (6, HUFFMAN_EMIT_SYMBOL, 175),
+    (10, HUFFMAN_EMIT_SYMBOL, 175),
+    (15, HUFFMAN_EMIT_SYMBOL, 175),
+    (24, HUFFMAN_EMIT_SYMBOL, 175),
+    (31, HUFFMAN_EMIT_SYMBOL, 175),
+    (41, HUFFMAN_EMIT_SYMBOL, 175),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 175),
+    (3, HUFFMAN_EMIT_SYMBOL, 180),
+    (6, HUFFMAN_EMIT_SYMBOL, 180),
+    (10, HUFFMAN_EMIT_SYMBOL, 180),
+    (15, HUFFMAN_EMIT_SYMBOL, 180),
+    (24, HUFFMAN_EMIT_SYMBOL, 180),
+    (31, HUFFMAN_EMIT_SYMBOL, 180),
+    (41, HUFFMAN_EMIT_SYMBOL, 180),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 180),
+
+    # Node 168
+    (3, HUFFMAN_EMIT_SYMBOL, 182),
+    (6, HUFFMAN_EMIT_SYMBOL, 182),
+    (10, HUFFMAN_EMIT_SYMBOL, 182),
+    (15, HUFFMAN_EMIT_SYMBOL, 182),
+    (24, HUFFMAN_EMIT_SYMBOL, 182),
+    (31, HUFFMAN_EMIT_SYMBOL, 182),
+    (41, HUFFMAN_EMIT_SYMBOL, 182),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 182),
+    (3, HUFFMAN_EMIT_SYMBOL, 183),
+    (6, HUFFMAN_EMIT_SYMBOL, 183),
+    (10, HUFFMAN_EMIT_SYMBOL, 183),
+    (15, HUFFMAN_EMIT_SYMBOL, 183),
+    (24, HUFFMAN_EMIT_SYMBOL, 183),
+    (31, HUFFMAN_EMIT_SYMBOL, 183),
+    (41, HUFFMAN_EMIT_SYMBOL, 183),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 183),
+
+    # Node 169
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 188),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 191),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 197),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 231),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 239),
+    (176, 0, 0),
+    (178, 0, 0),
+    (179, 0, 0),
+    (183, 0, 0),
+    (184, 0, 0),
+    (186, 0, 0),
+    (187, 0, 0),
+    (192, 0, 0),
+    (199, 0, 0),
+    (208, 0, 0),
+    (223, 0, 0),
+
+    # Node 170
+    (1, HUFFMAN_EMIT_SYMBOL, 188),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 188),
+    (1, HUFFMAN_EMIT_SYMBOL, 191),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 191),
+    (1, HUFFMAN_EMIT_SYMBOL, 197),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 197),
+    (1, HUFFMAN_EMIT_SYMBOL, 231),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 231),
+    (1, HUFFMAN_EMIT_SYMBOL, 239),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 239),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 9),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 142),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 144),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 145),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 148),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 159),
+
+    # Node 171
+    (2, HUFFMAN_EMIT_SYMBOL, 188),
+    (9, HUFFMAN_EMIT_SYMBOL, 188),
+    (23, HUFFMAN_EMIT_SYMBOL, 188),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 188),
+    (2, HUFFMAN_EMIT_SYMBOL, 191),
+    (9, HUFFMAN_EMIT_SYMBOL, 191),
+    (23, HUFFMAN_EMIT_SYMBOL, 191),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 191),
+    (2, HUFFMAN_EMIT_SYMBOL, 197),
+    (9, HUFFMAN_EMIT_SYMBOL, 197),
+    (23, HUFFMAN_EMIT_SYMBOL, 197),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 197),
+    (2, HUFFMAN_EMIT_SYMBOL, 231),
+    (9, HUFFMAN_EMIT_SYMBOL, 231),
+    (23, HUFFMAN_EMIT_SYMBOL, 231),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 231),
+
+    # Node 172
+    (3, HUFFMAN_EMIT_SYMBOL, 188),
+    (6, HUFFMAN_EMIT_SYMBOL, 188),
+    (10, HUFFMAN_EMIT_SYMBOL, 188),
+    (15, HUFFMAN_EMIT_SYMBOL, 188),
+    (24, HUFFMAN_EMIT_SYMBOL, 188),
+    (31, HUFFMAN_EMIT_SYMBOL, 188),
+    (41, HUFFMAN_EMIT_SYMBOL, 188),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 188),
+    (3, HUFFMAN_EMIT_SYMBOL, 191),
+    (6, HUFFMAN_EMIT_SYMBOL, 191),
+    (10, HUFFMAN_EMIT_SYMBOL, 191),
+    (15, HUFFMAN_EMIT_SYMBOL, 191),
+    (24, HUFFMAN_EMIT_SYMBOL, 191),
+    (31, HUFFMAN_EMIT_SYMBOL, 191),
+    (41, HUFFMAN_EMIT_SYMBOL, 191),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 191),
+
+    # Node 173
+    (3, HUFFMAN_EMIT_SYMBOL, 197),
+    (6, HUFFMAN_EMIT_SYMBOL, 197),
+    (10, HUFFMAN_EMIT_SYMBOL, 197),
+    (15, HUFFMAN_EMIT_SYMBOL, 197),
+    (24, HUFFMAN_EMIT_SYMBOL, 197),
+    (31, HUFFMAN_EMIT_SYMBOL, 197),
+    (41, HUFFMAN_EMIT_SYMBOL, 197),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 197),
+    (3, HUFFMAN_EMIT_SYMBOL, 231),
+    (6, HUFFMAN_EMIT_SYMBOL, 231),
+    (10, HUFFMAN_EMIT_SYMBOL, 231),
+    (15, HUFFMAN_EMIT_SYMBOL, 231),
+    (24, HUFFMAN_EMIT_SYMBOL, 231),
+    (31, HUFFMAN_EMIT_SYMBOL, 231),
+    (41, HUFFMAN_EMIT_SYMBOL, 231),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 231),
+
+    # Node 174
+    (2, HUFFMAN_EMIT_SYMBOL, 239),
+    (9, HUFFMAN_EMIT_SYMBOL, 239),
+    (23, HUFFMAN_EMIT_SYMBOL, 239),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 239),
+    (1, HUFFMAN_EMIT_SYMBOL, 9),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 9),
+    (1, HUFFMAN_EMIT_SYMBOL, 142),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 142),
+    (1, HUFFMAN_EMIT_SYMBOL, 144),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 144),
+    (1, HUFFMAN_EMIT_SYMBOL, 145),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 145),
+    (1, HUFFMAN_EMIT_SYMBOL, 148),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 148),
+    (1, HUFFMAN_EMIT_SYMBOL, 159),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 159),
+
+    # Node 175
+    (3, HUFFMAN_EMIT_SYMBOL, 239),
+    (6, HUFFMAN_EMIT_SYMBOL, 239),
+    (10, HUFFMAN_EMIT_SYMBOL, 239),
+    (15, HUFFMAN_EMIT_SYMBOL, 239),
+    (24, HUFFMAN_EMIT_SYMBOL, 239),
+    (31, HUFFMAN_EMIT_SYMBOL, 239),
+    (41, HUFFMAN_EMIT_SYMBOL, 239),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 239),
+    (2, HUFFMAN_EMIT_SYMBOL, 9),
+    (9, HUFFMAN_EMIT_SYMBOL, 9),
+    (23, HUFFMAN_EMIT_SYMBOL, 9),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 9),
+    (2, HUFFMAN_EMIT_SYMBOL, 142),
+    (9, HUFFMAN_EMIT_SYMBOL, 142),
+    (23, HUFFMAN_EMIT_SYMBOL, 142),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 142),
+
+    # Node 176
+    (3, HUFFMAN_EMIT_SYMBOL, 9),
+    (6, HUFFMAN_EMIT_SYMBOL, 9),
+    (10, HUFFMAN_EMIT_SYMBOL, 9),
+    (15, HUFFMAN_EMIT_SYMBOL, 9),
+    (24, HUFFMAN_EMIT_SYMBOL, 9),
+    (31, HUFFMAN_EMIT_SYMBOL, 9),
+    (41, HUFFMAN_EMIT_SYMBOL, 9),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 9),
+    (3, HUFFMAN_EMIT_SYMBOL, 142),
+    (6, HUFFMAN_EMIT_SYMBOL, 142),
+    (10, HUFFMAN_EMIT_SYMBOL, 142),
+    (15, HUFFMAN_EMIT_SYMBOL, 142),
+    (24, HUFFMAN_EMIT_SYMBOL, 142),
+    (31, HUFFMAN_EMIT_SYMBOL, 142),
+    (41, HUFFMAN_EMIT_SYMBOL, 142),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 142),
+
+    # Node 177
+    (2, HUFFMAN_EMIT_SYMBOL, 144),
+    (9, HUFFMAN_EMIT_SYMBOL, 144),
+    (23, HUFFMAN_EMIT_SYMBOL, 144),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 144),
+    (2, HUFFMAN_EMIT_SYMBOL, 145),
+    (9, HUFFMAN_EMIT_SYMBOL, 145),
+    (23, HUFFMAN_EMIT_SYMBOL, 145),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 145),
+    (2, HUFFMAN_EMIT_SYMBOL, 148),
+    (9, HUFFMAN_EMIT_SYMBOL, 148),
+    (23, HUFFMAN_EMIT_SYMBOL, 148),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 148),
+    (2, HUFFMAN_EMIT_SYMBOL, 159),
+    (9, HUFFMAN_EMIT_SYMBOL, 159),
+    (23, HUFFMAN_EMIT_SYMBOL, 159),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 159),
+
+    # Node 178
+    (3, HUFFMAN_EMIT_SYMBOL, 144),
+    (6, HUFFMAN_EMIT_SYMBOL, 144),
+    (10, HUFFMAN_EMIT_SYMBOL, 144),
+    (15, HUFFMAN_EMIT_SYMBOL, 144),
+    (24, HUFFMAN_EMIT_SYMBOL, 144),
+    (31, HUFFMAN_EMIT_SYMBOL, 144),
+    (41, HUFFMAN_EMIT_SYMBOL, 144),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 144),
+    (3, HUFFMAN_EMIT_SYMBOL, 145),
+    (6, HUFFMAN_EMIT_SYMBOL, 145),
+    (10, HUFFMAN_EMIT_SYMBOL, 145),
+    (15, HUFFMAN_EMIT_SYMBOL, 145),
+    (24, HUFFMAN_EMIT_SYMBOL, 145),
+    (31, HUFFMAN_EMIT_SYMBOL, 145),
+    (41, HUFFMAN_EMIT_SYMBOL, 145),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 145),
+
+    # Node 179
+    (3, HUFFMAN_EMIT_SYMBOL, 148),
+    (6, HUFFMAN_EMIT_SYMBOL, 148),
+    (10, HUFFMAN_EMIT_SYMBOL, 148),
+    (15, HUFFMAN_EMIT_SYMBOL, 148),
+    (24, HUFFMAN_EMIT_SYMBOL, 148),
+    (31, HUFFMAN_EMIT_SYMBOL, 148),
+    (41, HUFFMAN_EMIT_SYMBOL, 148),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 148),
+    (3, HUFFMAN_EMIT_SYMBOL, 159),
+    (6, HUFFMAN_EMIT_SYMBOL, 159),
+    (10, HUFFMAN_EMIT_SYMBOL, 159),
+    (15, HUFFMAN_EMIT_SYMBOL, 159),
+    (24, HUFFMAN_EMIT_SYMBOL, 159),
+    (31, HUFFMAN_EMIT_SYMBOL, 159),
+    (41, HUFFMAN_EMIT_SYMBOL, 159),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 159),
+
+    # Node 180
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 171),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 206),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 215),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 225),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 236),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 237),
+    (188, 0, 0),
+    (189, 0, 0),
+    (193, 0, 0),
+    (196, 0, 0),
+    (200, 0, 0),
+    (203, 0, 0),
+    (209, 0, 0),
+    (216, 0, 0),
+    (224, 0, 0),
+    (238, 0, 0),
+
+    # Node 181
+    (1, HUFFMAN_EMIT_SYMBOL, 171),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 171),
+    (1, HUFFMAN_EMIT_SYMBOL, 206),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 206),
+    (1, HUFFMAN_EMIT_SYMBOL, 215),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 215),
+    (1, HUFFMAN_EMIT_SYMBOL, 225),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 225),
+    (1, HUFFMAN_EMIT_SYMBOL, 236),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 236),
+    (1, HUFFMAN_EMIT_SYMBOL, 237),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 237),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 199),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 207),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 234),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 235),
+
+    # Node 182
+    (2, HUFFMAN_EMIT_SYMBOL, 171),
+    (9, HUFFMAN_EMIT_SYMBOL, 171),
+    (23, HUFFMAN_EMIT_SYMBOL, 171),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 171),
+    (2, HUFFMAN_EMIT_SYMBOL, 206),
+    (9, HUFFMAN_EMIT_SYMBOL, 206),
+    (23, HUFFMAN_EMIT_SYMBOL, 206),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 206),
+    (2, HUFFMAN_EMIT_SYMBOL, 215),
+    (9, HUFFMAN_EMIT_SYMBOL, 215),
+    (23, HUFFMAN_EMIT_SYMBOL, 215),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 215),
+    (2, HUFFMAN_EMIT_SYMBOL, 225),
+    (9, HUFFMAN_EMIT_SYMBOL, 225),
+    (23, HUFFMAN_EMIT_SYMBOL, 225),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 225),
+
+    # Node 183
+    (3, HUFFMAN_EMIT_SYMBOL, 171),
+    (6, HUFFMAN_EMIT_SYMBOL, 171),
+    (10, HUFFMAN_EMIT_SYMBOL, 171),
+    (15, HUFFMAN_EMIT_SYMBOL, 171),
+    (24, HUFFMAN_EMIT_SYMBOL, 171),
+    (31, HUFFMAN_EMIT_SYMBOL, 171),
+    (41, HUFFMAN_EMIT_SYMBOL, 171),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 171),
+    (3, HUFFMAN_EMIT_SYMBOL, 206),
+    (6, HUFFMAN_EMIT_SYMBOL, 206),
+    (10, HUFFMAN_EMIT_SYMBOL, 206),
+    (15, HUFFMAN_EMIT_SYMBOL, 206),
+    (24, HUFFMAN_EMIT_SYMBOL, 206),
+    (31, HUFFMAN_EMIT_SYMBOL, 206),
+    (41, HUFFMAN_EMIT_SYMBOL, 206),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 206),
+
+    # Node 184
+    (3, HUFFMAN_EMIT_SYMBOL, 215),
+    (6, HUFFMAN_EMIT_SYMBOL, 215),
+    (10, HUFFMAN_EMIT_SYMBOL, 215),
+    (15, HUFFMAN_EMIT_SYMBOL, 215),
+    (24, HUFFMAN_EMIT_SYMBOL, 215),
+    (31, HUFFMAN_EMIT_SYMBOL, 215),
+    (41, HUFFMAN_EMIT_SYMBOL, 215),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 215),
+    (3, HUFFMAN_EMIT_SYMBOL, 225),
+    (6, HUFFMAN_EMIT_SYMBOL, 225),
+    (10, HUFFMAN_EMIT_SYMBOL, 225),
+    (15, HUFFMAN_EMIT_SYMBOL, 225),
+    (24, HUFFMAN_EMIT_SYMBOL, 225),
+    (31, HUFFMAN_EMIT_SYMBOL, 225),
+    (41, HUFFMAN_EMIT_SYMBOL, 225),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 225),
+
+    # Node 185
+    (2, HUFFMAN_EMIT_SYMBOL, 236),
+    (9, HUFFMAN_EMIT_SYMBOL, 236),
+    (23, HUFFMAN_EMIT_SYMBOL, 236),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 236),
+    (2, HUFFMAN_EMIT_SYMBOL, 237),
+    (9, HUFFMAN_EMIT_SYMBOL, 237),
+    (23, HUFFMAN_EMIT_SYMBOL, 237),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 237),
+    (1, HUFFMAN_EMIT_SYMBOL, 199),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 199),
+    (1, HUFFMAN_EMIT_SYMBOL, 207),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 207),
+    (1, HUFFMAN_EMIT_SYMBOL, 234),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 234),
+    (1, HUFFMAN_EMIT_SYMBOL, 235),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 235),
+
+    # Node 186
+    (3, HUFFMAN_EMIT_SYMBOL, 236),
+    (6, HUFFMAN_EMIT_SYMBOL, 236),
+    (10, HUFFMAN_EMIT_SYMBOL, 236),
+    (15, HUFFMAN_EMIT_SYMBOL, 236),
+    (24, HUFFMAN_EMIT_SYMBOL, 236),
+    (31, HUFFMAN_EMIT_SYMBOL, 236),
+    (41, HUFFMAN_EMIT_SYMBOL, 236),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 236),
+    (3, HUFFMAN_EMIT_SYMBOL, 237),
+    (6, HUFFMAN_EMIT_SYMBOL, 237),
+    (10, HUFFMAN_EMIT_SYMBOL, 237),
+    (15, HUFFMAN_EMIT_SYMBOL, 237),
+    (24, HUFFMAN_EMIT_SYMBOL, 237),
+    (31, HUFFMAN_EMIT_SYMBOL, 237),
+    (41, HUFFMAN_EMIT_SYMBOL, 237),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 237),
+
+    # Node 187
+    (2, HUFFMAN_EMIT_SYMBOL, 199),
+    (9, HUFFMAN_EMIT_SYMBOL, 199),
+    (23, HUFFMAN_EMIT_SYMBOL, 199),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 199),
+    (2, HUFFMAN_EMIT_SYMBOL, 207),
+    (9, HUFFMAN_EMIT_SYMBOL, 207),
+    (23, HUFFMAN_EMIT_SYMBOL, 207),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 207),
+    (2, HUFFMAN_EMIT_SYMBOL, 234),
+    (9, HUFFMAN_EMIT_SYMBOL, 234),
+    (23, HUFFMAN_EMIT_SYMBOL, 234),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 234),
+    (2, HUFFMAN_EMIT_SYMBOL, 235),
+    (9, HUFFMAN_EMIT_SYMBOL, 235),
+    (23, HUFFMAN_EMIT_SYMBOL, 235),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 235),
+
+    # Node 188
+    (3, HUFFMAN_EMIT_SYMBOL, 199),
+    (6, HUFFMAN_EMIT_SYMBOL, 199),
+    (10, HUFFMAN_EMIT_SYMBOL, 199),
+    (15, HUFFMAN_EMIT_SYMBOL, 199),
+    (24, HUFFMAN_EMIT_SYMBOL, 199),
+    (31, HUFFMAN_EMIT_SYMBOL, 199),
+    (41, HUFFMAN_EMIT_SYMBOL, 199),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 199),
+    (3, HUFFMAN_EMIT_SYMBOL, 207),
+    (6, HUFFMAN_EMIT_SYMBOL, 207),
+    (10, HUFFMAN_EMIT_SYMBOL, 207),
+    (15, HUFFMAN_EMIT_SYMBOL, 207),
+    (24, HUFFMAN_EMIT_SYMBOL, 207),
+    (31, HUFFMAN_EMIT_SYMBOL, 207),
+    (41, HUFFMAN_EMIT_SYMBOL, 207),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 207),
+
+    # Node 189
+    (3, HUFFMAN_EMIT_SYMBOL, 234),
+    (6, HUFFMAN_EMIT_SYMBOL, 234),
+    (10, HUFFMAN_EMIT_SYMBOL, 234),
+    (15, HUFFMAN_EMIT_SYMBOL, 234),
+    (24, HUFFMAN_EMIT_SYMBOL, 234),
+    (31, HUFFMAN_EMIT_SYMBOL, 234),
+    (41, HUFFMAN_EMIT_SYMBOL, 234),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 234),
+    (3, HUFFMAN_EMIT_SYMBOL, 235),
+    (6, HUFFMAN_EMIT_SYMBOL, 235),
+    (10, HUFFMAN_EMIT_SYMBOL, 235),
+    (15, HUFFMAN_EMIT_SYMBOL, 235),
+    (24, HUFFMAN_EMIT_SYMBOL, 235),
+    (31, HUFFMAN_EMIT_SYMBOL, 235),
+    (41, HUFFMAN_EMIT_SYMBOL, 235),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 235),
+
+    # Node 190
+    (194, 0, 0),
+    (195, 0, 0),
+    (197, 0, 0),
+    (198, 0, 0),
+    (201, 0, 0),
+    (202, 0, 0),
+    (204, 0, 0),
+    (205, 0, 0),
+    (210, 0, 0),
+    (213, 0, 0),
+    (217, 0, 0),
+    (220, 0, 0),
+    (225, 0, 0),
+    (231, 0, 0),
+    (239, 0, 0),
+    (246, 0, 0),
+
+    # Node 191
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 192),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 193),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 200),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 201),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 202),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 205),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 210),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 213),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 218),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 219),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 238),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 240),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 242),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 243),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 255),
+    (206, 0, 0),
+
+    # Node 192
+    (1, HUFFMAN_EMIT_SYMBOL, 192),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 192),
+    (1, HUFFMAN_EMIT_SYMBOL, 193),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 193),
+    (1, HUFFMAN_EMIT_SYMBOL, 200),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 200),
+    (1, HUFFMAN_EMIT_SYMBOL, 201),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 201),
+    (1, HUFFMAN_EMIT_SYMBOL, 202),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 202),
+    (1, HUFFMAN_EMIT_SYMBOL, 205),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 205),
+    (1, HUFFMAN_EMIT_SYMBOL, 210),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 210),
+    (1, HUFFMAN_EMIT_SYMBOL, 213),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 213),
+
+    # Node 193
+    (2, HUFFMAN_EMIT_SYMBOL, 192),
+    (9, HUFFMAN_EMIT_SYMBOL, 192),
+    (23, HUFFMAN_EMIT_SYMBOL, 192),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 192),
+    (2, HUFFMAN_EMIT_SYMBOL, 193),
+    (9, HUFFMAN_EMIT_SYMBOL, 193),
+    (23, HUFFMAN_EMIT_SYMBOL, 193),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 193),
+    (2, HUFFMAN_EMIT_SYMBOL, 200),
+    (9, HUFFMAN_EMIT_SYMBOL, 200),
+    (23, HUFFMAN_EMIT_SYMBOL, 200),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 200),
+    (2, HUFFMAN_EMIT_SYMBOL, 201),
+    (9, HUFFMAN_EMIT_SYMBOL, 201),
+    (23, HUFFMAN_EMIT_SYMBOL, 201),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 201),
+
+    # Node 194
+    (3, HUFFMAN_EMIT_SYMBOL, 192),
+    (6, HUFFMAN_EMIT_SYMBOL, 192),
+    (10, HUFFMAN_EMIT_SYMBOL, 192),
+    (15, HUFFMAN_EMIT_SYMBOL, 192),
+    (24, HUFFMAN_EMIT_SYMBOL, 192),
+    (31, HUFFMAN_EMIT_SYMBOL, 192),
+    (41, HUFFMAN_EMIT_SYMBOL, 192),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 192),
+    (3, HUFFMAN_EMIT_SYMBOL, 193),
+    (6, HUFFMAN_EMIT_SYMBOL, 193),
+    (10, HUFFMAN_EMIT_SYMBOL, 193),
+    (15, HUFFMAN_EMIT_SYMBOL, 193),
+    (24, HUFFMAN_EMIT_SYMBOL, 193),
+    (31, HUFFMAN_EMIT_SYMBOL, 193),
+    (41, HUFFMAN_EMIT_SYMBOL, 193),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 193),
+
+    # Node 195
+    (3, HUFFMAN_EMIT_SYMBOL, 200),
+    (6, HUFFMAN_EMIT_SYMBOL, 200),
+    (10, HUFFMAN_EMIT_SYMBOL, 200),
+    (15, HUFFMAN_EMIT_SYMBOL, 200),
+    (24, HUFFMAN_EMIT_SYMBOL, 200),
+    (31, HUFFMAN_EMIT_SYMBOL, 200),
+    (41, HUFFMAN_EMIT_SYMBOL, 200),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 200),
+    (3, HUFFMAN_EMIT_SYMBOL, 201),
+    (6, HUFFMAN_EMIT_SYMBOL, 201),
+    (10, HUFFMAN_EMIT_SYMBOL, 201),
+    (15, HUFFMAN_EMIT_SYMBOL, 201),
+    (24, HUFFMAN_EMIT_SYMBOL, 201),
+    (31, HUFFMAN_EMIT_SYMBOL, 201),
+    (41, HUFFMAN_EMIT_SYMBOL, 201),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 201),
+
+    # Node 196
+    (2, HUFFMAN_EMIT_SYMBOL, 202),
+    (9, HUFFMAN_EMIT_SYMBOL, 202),
+    (23, HUFFMAN_EMIT_SYMBOL, 202),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 202),
+    (2, HUFFMAN_EMIT_SYMBOL, 205),
+    (9, HUFFMAN_EMIT_SYMBOL, 205),
+    (23, HUFFMAN_EMIT_SYMBOL, 205),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 205),
+    (2, HUFFMAN_EMIT_SYMBOL, 210),
+    (9, HUFFMAN_EMIT_SYMBOL, 210),
+    (23, HUFFMAN_EMIT_SYMBOL, 210),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 210),
+    (2, HUFFMAN_EMIT_SYMBOL, 213),
+    (9, HUFFMAN_EMIT_SYMBOL, 213),
+    (23, HUFFMAN_EMIT_SYMBOL, 213),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 213),
+
+    # Node 197
+    (3, HUFFMAN_EMIT_SYMBOL, 202),
+    (6, HUFFMAN_EMIT_SYMBOL, 202),
+    (10, HUFFMAN_EMIT_SYMBOL, 202),
+    (15, HUFFMAN_EMIT_SYMBOL, 202),
+    (24, HUFFMAN_EMIT_SYMBOL, 202),
+    (31, HUFFMAN_EMIT_SYMBOL, 202),
+    (41, HUFFMAN_EMIT_SYMBOL, 202),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 202),
+    (3, HUFFMAN_EMIT_SYMBOL, 205),
+    (6, HUFFMAN_EMIT_SYMBOL, 205),
+    (10, HUFFMAN_EMIT_SYMBOL, 205),
+    (15, HUFFMAN_EMIT_SYMBOL, 205),
+    (24, HUFFMAN_EMIT_SYMBOL, 205),
+    (31, HUFFMAN_EMIT_SYMBOL, 205),
+    (41, HUFFMAN_EMIT_SYMBOL, 205),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 205),
+
+    # Node 198
+    (3, HUFFMAN_EMIT_SYMBOL, 210),
+    (6, HUFFMAN_EMIT_SYMBOL, 210),
+    (10, HUFFMAN_EMIT_SYMBOL, 210),
+    (15, HUFFMAN_EMIT_SYMBOL, 210),
+    (24, HUFFMAN_EMIT_SYMBOL, 210),
+    (31, HUFFMAN_EMIT_SYMBOL, 210),
+    (41, HUFFMAN_EMIT_SYMBOL, 210),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 210),
+    (3, HUFFMAN_EMIT_SYMBOL, 213),
+    (6, HUFFMAN_EMIT_SYMBOL, 213),
+    (10, HUFFMAN_EMIT_SYMBOL, 213),
+    (15, HUFFMAN_EMIT_SYMBOL, 213),
+    (24, HUFFMAN_EMIT_SYMBOL, 213),
+    (31, HUFFMAN_EMIT_SYMBOL, 213),
+    (41, HUFFMAN_EMIT_SYMBOL, 213),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 213),
+
+    # Node 199
+    (1, HUFFMAN_EMIT_SYMBOL, 218),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 218),
+    (1, HUFFMAN_EMIT_SYMBOL, 219),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 219),
+    (1, HUFFMAN_EMIT_SYMBOL, 238),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 238),
+    (1, HUFFMAN_EMIT_SYMBOL, 240),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 240),
+    (1, HUFFMAN_EMIT_SYMBOL, 242),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 242),
+    (1, HUFFMAN_EMIT_SYMBOL, 243),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 243),
+    (1, HUFFMAN_EMIT_SYMBOL, 255),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 255),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 203),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 204),
+
+    # Node 200
+    (2, HUFFMAN_EMIT_SYMBOL, 218),
+    (9, HUFFMAN_EMIT_SYMBOL, 218),
+    (23, HUFFMAN_EMIT_SYMBOL, 218),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 218),
+    (2, HUFFMAN_EMIT_SYMBOL, 219),
+    (9, HUFFMAN_EMIT_SYMBOL, 219),
+    (23, HUFFMAN_EMIT_SYMBOL, 219),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 219),
+    (2, HUFFMAN_EMIT_SYMBOL, 238),
+    (9, HUFFMAN_EMIT_SYMBOL, 238),
+    (23, HUFFMAN_EMIT_SYMBOL, 238),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 238),
+    (2, HUFFMAN_EMIT_SYMBOL, 240),
+    (9, HUFFMAN_EMIT_SYMBOL, 240),
+    (23, HUFFMAN_EMIT_SYMBOL, 240),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 240),
+
+    # Node 201
+    (3, HUFFMAN_EMIT_SYMBOL, 218),
+    (6, HUFFMAN_EMIT_SYMBOL, 218),
+    (10, HUFFMAN_EMIT_SYMBOL, 218),
+    (15, HUFFMAN_EMIT_SYMBOL, 218),
+    (24, HUFFMAN_EMIT_SYMBOL, 218),
+    (31, HUFFMAN_EMIT_SYMBOL, 218),
+    (41, HUFFMAN_EMIT_SYMBOL, 218),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 218),
+    (3, HUFFMAN_EMIT_SYMBOL, 219),
+    (6, HUFFMAN_EMIT_SYMBOL, 219),
+    (10, HUFFMAN_EMIT_SYMBOL, 219),
+    (15, HUFFMAN_EMIT_SYMBOL, 219),
+    (24, HUFFMAN_EMIT_SYMBOL, 219),
+    (31, HUFFMAN_EMIT_SYMBOL, 219),
+    (41, HUFFMAN_EMIT_SYMBOL, 219),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 219),
+
+    # Node 202
+    (3, HUFFMAN_EMIT_SYMBOL, 238),
+    (6, HUFFMAN_EMIT_SYMBOL, 238),
+    (10, HUFFMAN_EMIT_SYMBOL, 238),
+    (15, HUFFMAN_EMIT_SYMBOL, 238),
+    (24, HUFFMAN_EMIT_SYMBOL, 238),
+    (31, HUFFMAN_EMIT_SYMBOL, 238),
+    (41, HUFFMAN_EMIT_SYMBOL, 238),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 238),
+    (3, HUFFMAN_EMIT_SYMBOL, 240),
+    (6, HUFFMAN_EMIT_SYMBOL, 240),
+    (10, HUFFMAN_EMIT_SYMBOL, 240),
+    (15, HUFFMAN_EMIT_SYMBOL, 240),
+    (24, HUFFMAN_EMIT_SYMBOL, 240),
+    (31, HUFFMAN_EMIT_SYMBOL, 240),
+    (41, HUFFMAN_EMIT_SYMBOL, 240),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 240),
+
+    # Node 203
+    (2, HUFFMAN_EMIT_SYMBOL, 242),
+    (9, HUFFMAN_EMIT_SYMBOL, 242),
+    (23, HUFFMAN_EMIT_SYMBOL, 242),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 242),
+    (2, HUFFMAN_EMIT_SYMBOL, 243),
+    (9, HUFFMAN_EMIT_SYMBOL, 243),
+    (23, HUFFMAN_EMIT_SYMBOL, 243),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 243),
+    (2, HUFFMAN_EMIT_SYMBOL, 255),
+    (9, HUFFMAN_EMIT_SYMBOL, 255),
+    (23, HUFFMAN_EMIT_SYMBOL, 255),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 255),
+    (1, HUFFMAN_EMIT_SYMBOL, 203),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 203),
+    (1, HUFFMAN_EMIT_SYMBOL, 204),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 204),
+
+    # Node 204
+    (3, HUFFMAN_EMIT_SYMBOL, 242),
+    (6, HUFFMAN_EMIT_SYMBOL, 242),
+    (10, HUFFMAN_EMIT_SYMBOL, 242),
+    (15, HUFFMAN_EMIT_SYMBOL, 242),
+    (24, HUFFMAN_EMIT_SYMBOL, 242),
+    (31, HUFFMAN_EMIT_SYMBOL, 242),
+    (41, HUFFMAN_EMIT_SYMBOL, 242),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 242),
+    (3, HUFFMAN_EMIT_SYMBOL, 243),
+    (6, HUFFMAN_EMIT_SYMBOL, 243),
+    (10, HUFFMAN_EMIT_SYMBOL, 243),
+    (15, HUFFMAN_EMIT_SYMBOL, 243),
+    (24, HUFFMAN_EMIT_SYMBOL, 243),
+    (31, HUFFMAN_EMIT_SYMBOL, 243),
+    (41, HUFFMAN_EMIT_SYMBOL, 243),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 243),
+
+    # Node 205
+    (3, HUFFMAN_EMIT_SYMBOL, 255),
+    (6, HUFFMAN_EMIT_SYMBOL, 255),
+    (10, HUFFMAN_EMIT_SYMBOL, 255),
+    (15, HUFFMAN_EMIT_SYMBOL, 255),
+    (24, HUFFMAN_EMIT_SYMBOL, 255),
+    (31, HUFFMAN_EMIT_SYMBOL, 255),
+    (41, HUFFMAN_EMIT_SYMBOL, 255),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 255),
+    (2, HUFFMAN_EMIT_SYMBOL, 203),
+    (9, HUFFMAN_EMIT_SYMBOL, 203),
+    (23, HUFFMAN_EMIT_SYMBOL, 203),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 203),
+    (2, HUFFMAN_EMIT_SYMBOL, 204),
+    (9, HUFFMAN_EMIT_SYMBOL, 204),
+    (23, HUFFMAN_EMIT_SYMBOL, 204),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 204),
+
+    # Node 206
+    (3, HUFFMAN_EMIT_SYMBOL, 203),
+    (6, HUFFMAN_EMIT_SYMBOL, 203),
+    (10, HUFFMAN_EMIT_SYMBOL, 203),
+    (15, HUFFMAN_EMIT_SYMBOL, 203),
+    (24, HUFFMAN_EMIT_SYMBOL, 203),
+    (31, HUFFMAN_EMIT_SYMBOL, 203),
+    (41, HUFFMAN_EMIT_SYMBOL, 203),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 203),
+    (3, HUFFMAN_EMIT_SYMBOL, 204),
+    (6, HUFFMAN_EMIT_SYMBOL, 204),
+    (10, HUFFMAN_EMIT_SYMBOL, 204),
+    (15, HUFFMAN_EMIT_SYMBOL, 204),
+    (24, HUFFMAN_EMIT_SYMBOL, 204),
+    (31, HUFFMAN_EMIT_SYMBOL, 204),
+    (41, HUFFMAN_EMIT_SYMBOL, 204),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 204),
+
+    # Node 207
+    (211, 0, 0),
+    (212, 0, 0),
+    (214, 0, 0),
+    (215, 0, 0),
+    (218, 0, 0),
+    (219, 0, 0),
+    (221, 0, 0),
+    (222, 0, 0),
+    (226, 0, 0),
+    (228, 0, 0),
+    (232, 0, 0),
+    (235, 0, 0),
+    (240, 0, 0),
+    (243, 0, 0),
+    (247, 0, 0),
+    (250, 0, 0),
+
+    # Node 208
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 211),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 212),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 214),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 221),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 222),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 223),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 241),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 244),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 245),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 246),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 247),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 248),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 250),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 251),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 252),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 253),
+
+    # Node 209
+    (1, HUFFMAN_EMIT_SYMBOL, 211),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 211),
+    (1, HUFFMAN_EMIT_SYMBOL, 212),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 212),
+    (1, HUFFMAN_EMIT_SYMBOL, 214),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 214),
+    (1, HUFFMAN_EMIT_SYMBOL, 221),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 221),
+    (1, HUFFMAN_EMIT_SYMBOL, 222),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 222),
+    (1, HUFFMAN_EMIT_SYMBOL, 223),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 223),
+    (1, HUFFMAN_EMIT_SYMBOL, 241),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 241),
+    (1, HUFFMAN_EMIT_SYMBOL, 244),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 244),
+
+    # Node 210
+    (2, HUFFMAN_EMIT_SYMBOL, 211),
+    (9, HUFFMAN_EMIT_SYMBOL, 211),
+    (23, HUFFMAN_EMIT_SYMBOL, 211),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 211),
+    (2, HUFFMAN_EMIT_SYMBOL, 212),
+    (9, HUFFMAN_EMIT_SYMBOL, 212),
+    (23, HUFFMAN_EMIT_SYMBOL, 212),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 212),
+    (2, HUFFMAN_EMIT_SYMBOL, 214),
+    (9, HUFFMAN_EMIT_SYMBOL, 214),
+    (23, HUFFMAN_EMIT_SYMBOL, 214),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 214),
+    (2, HUFFMAN_EMIT_SYMBOL, 221),
+    (9, HUFFMAN_EMIT_SYMBOL, 221),
+    (23, HUFFMAN_EMIT_SYMBOL, 221),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 221),
+
+    # Node 211
+    (3, HUFFMAN_EMIT_SYMBOL, 211),
+    (6, HUFFMAN_EMIT_SYMBOL, 211),
+    (10, HUFFMAN_EMIT_SYMBOL, 211),
+    (15, HUFFMAN_EMIT_SYMBOL, 211),
+    (24, HUFFMAN_EMIT_SYMBOL, 211),
+    (31, HUFFMAN_EMIT_SYMBOL, 211),
+    (41, HUFFMAN_EMIT_SYMBOL, 211),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 211),
+    (3, HUFFMAN_EMIT_SYMBOL, 212),
+    (6, HUFFMAN_EMIT_SYMBOL, 212),
+    (10, HUFFMAN_EMIT_SYMBOL, 212),
+    (15, HUFFMAN_EMIT_SYMBOL, 212),
+    (24, HUFFMAN_EMIT_SYMBOL, 212),
+    (31, HUFFMAN_EMIT_SYMBOL, 212),
+    (41, HUFFMAN_EMIT_SYMBOL, 212),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 212),
+
+    # Node 212
+    (3, HUFFMAN_EMIT_SYMBOL, 214),
+    (6, HUFFMAN_EMIT_SYMBOL, 214),
+    (10, HUFFMAN_EMIT_SYMBOL, 214),
+    (15, HUFFMAN_EMIT_SYMBOL, 214),
+    (24, HUFFMAN_EMIT_SYMBOL, 214),
+    (31, HUFFMAN_EMIT_SYMBOL, 214),
+    (41, HUFFMAN_EMIT_SYMBOL, 214),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 214),
+    (3, HUFFMAN_EMIT_SYMBOL, 221),
+    (6, HUFFMAN_EMIT_SYMBOL, 221),
+    (10, HUFFMAN_EMIT_SYMBOL, 221),
+    (15, HUFFMAN_EMIT_SYMBOL, 221),
+    (24, HUFFMAN_EMIT_SYMBOL, 221),
+    (31, HUFFMAN_EMIT_SYMBOL, 221),
+    (41, HUFFMAN_EMIT_SYMBOL, 221),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 221),
+
+    # Node 213
+    (2, HUFFMAN_EMIT_SYMBOL, 222),
+    (9, HUFFMAN_EMIT_SYMBOL, 222),
+    (23, HUFFMAN_EMIT_SYMBOL, 222),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 222),
+    (2, HUFFMAN_EMIT_SYMBOL, 223),
+    (9, HUFFMAN_EMIT_SYMBOL, 223),
+    (23, HUFFMAN_EMIT_SYMBOL, 223),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 223),
+    (2, HUFFMAN_EMIT_SYMBOL, 241),
+    (9, HUFFMAN_EMIT_SYMBOL, 241),
+    (23, HUFFMAN_EMIT_SYMBOL, 241),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 241),
+    (2, HUFFMAN_EMIT_SYMBOL, 244),
+    (9, HUFFMAN_EMIT_SYMBOL, 244),
+    (23, HUFFMAN_EMIT_SYMBOL, 244),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 244),
+
+    # Node 214
+    (3, HUFFMAN_EMIT_SYMBOL, 222),
+    (6, HUFFMAN_EMIT_SYMBOL, 222),
+    (10, HUFFMAN_EMIT_SYMBOL, 222),
+    (15, HUFFMAN_EMIT_SYMBOL, 222),
+    (24, HUFFMAN_EMIT_SYMBOL, 222),
+    (31, HUFFMAN_EMIT_SYMBOL, 222),
+    (41, HUFFMAN_EMIT_SYMBOL, 222),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 222),
+    (3, HUFFMAN_EMIT_SYMBOL, 223),
+    (6, HUFFMAN_EMIT_SYMBOL, 223),
+    (10, HUFFMAN_EMIT_SYMBOL, 223),
+    (15, HUFFMAN_EMIT_SYMBOL, 223),
+    (24, HUFFMAN_EMIT_SYMBOL, 223),
+    (31, HUFFMAN_EMIT_SYMBOL, 223),
+    (41, HUFFMAN_EMIT_SYMBOL, 223),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 223),
+
+    # Node 215
+    (3, HUFFMAN_EMIT_SYMBOL, 241),
+    (6, HUFFMAN_EMIT_SYMBOL, 241),
+    (10, HUFFMAN_EMIT_SYMBOL, 241),
+    (15, HUFFMAN_EMIT_SYMBOL, 241),
+    (24, HUFFMAN_EMIT_SYMBOL, 241),
+    (31, HUFFMAN_EMIT_SYMBOL, 241),
+    (41, HUFFMAN_EMIT_SYMBOL, 241),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 241),
+    (3, HUFFMAN_EMIT_SYMBOL, 244),
+    (6, HUFFMAN_EMIT_SYMBOL, 244),
+    (10, HUFFMAN_EMIT_SYMBOL, 244),
+    (15, HUFFMAN_EMIT_SYMBOL, 244),
+    (24, HUFFMAN_EMIT_SYMBOL, 244),
+    (31, HUFFMAN_EMIT_SYMBOL, 244),
+    (41, HUFFMAN_EMIT_SYMBOL, 244),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 244),
+
+    # Node 216
+    (1, HUFFMAN_EMIT_SYMBOL, 245),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 245),
+    (1, HUFFMAN_EMIT_SYMBOL, 246),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 246),
+    (1, HUFFMAN_EMIT_SYMBOL, 247),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 247),
+    (1, HUFFMAN_EMIT_SYMBOL, 248),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 248),
+    (1, HUFFMAN_EMIT_SYMBOL, 250),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 250),
+    (1, HUFFMAN_EMIT_SYMBOL, 251),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 251),
+    (1, HUFFMAN_EMIT_SYMBOL, 252),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 252),
+    (1, HUFFMAN_EMIT_SYMBOL, 253),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 253),
+
+    # Node 217
+    (2, HUFFMAN_EMIT_SYMBOL, 245),
+    (9, HUFFMAN_EMIT_SYMBOL, 245),
+    (23, HUFFMAN_EMIT_SYMBOL, 245),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 245),
+    (2, HUFFMAN_EMIT_SYMBOL, 246),
+    (9, HUFFMAN_EMIT_SYMBOL, 246),
+    (23, HUFFMAN_EMIT_SYMBOL, 246),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 246),
+    (2, HUFFMAN_EMIT_SYMBOL, 247),
+    (9, HUFFMAN_EMIT_SYMBOL, 247),
+    (23, HUFFMAN_EMIT_SYMBOL, 247),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 247),
+    (2, HUFFMAN_EMIT_SYMBOL, 248),
+    (9, HUFFMAN_EMIT_SYMBOL, 248),
+    (23, HUFFMAN_EMIT_SYMBOL, 248),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 248),
+
+    # Node 218
+    (3, HUFFMAN_EMIT_SYMBOL, 245),
+    (6, HUFFMAN_EMIT_SYMBOL, 245),
+    (10, HUFFMAN_EMIT_SYMBOL, 245),
+    (15, HUFFMAN_EMIT_SYMBOL, 245),
+    (24, HUFFMAN_EMIT_SYMBOL, 245),
+    (31, HUFFMAN_EMIT_SYMBOL, 245),
+    (41, HUFFMAN_EMIT_SYMBOL, 245),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 245),
+    (3, HUFFMAN_EMIT_SYMBOL, 246),
+    (6, HUFFMAN_EMIT_SYMBOL, 246),
+    (10, HUFFMAN_EMIT_SYMBOL, 246),
+    (15, HUFFMAN_EMIT_SYMBOL, 246),
+    (24, HUFFMAN_EMIT_SYMBOL, 246),
+    (31, HUFFMAN_EMIT_SYMBOL, 246),
+    (41, HUFFMAN_EMIT_SYMBOL, 246),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 246),
+
+    # Node 219
+    (3, HUFFMAN_EMIT_SYMBOL, 247),
+    (6, HUFFMAN_EMIT_SYMBOL, 247),
+    (10, HUFFMAN_EMIT_SYMBOL, 247),
+    (15, HUFFMAN_EMIT_SYMBOL, 247),
+    (24, HUFFMAN_EMIT_SYMBOL, 247),
+    (31, HUFFMAN_EMIT_SYMBOL, 247),
+    (41, HUFFMAN_EMIT_SYMBOL, 247),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 247),
+    (3, HUFFMAN_EMIT_SYMBOL, 248),
+    (6, HUFFMAN_EMIT_SYMBOL, 248),
+    (10, HUFFMAN_EMIT_SYMBOL, 248),
+    (15, HUFFMAN_EMIT_SYMBOL, 248),
+    (24, HUFFMAN_EMIT_SYMBOL, 248),
+    (31, HUFFMAN_EMIT_SYMBOL, 248),
+    (41, HUFFMAN_EMIT_SYMBOL, 248),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 248),
+
+    # Node 220
+    (2, HUFFMAN_EMIT_SYMBOL, 250),
+    (9, HUFFMAN_EMIT_SYMBOL, 250),
+    (23, HUFFMAN_EMIT_SYMBOL, 250),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 250),
+    (2, HUFFMAN_EMIT_SYMBOL, 251),
+    (9, HUFFMAN_EMIT_SYMBOL, 251),
+    (23, HUFFMAN_EMIT_SYMBOL, 251),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 251),
+    (2, HUFFMAN_EMIT_SYMBOL, 252),
+    (9, HUFFMAN_EMIT_SYMBOL, 252),
+    (23, HUFFMAN_EMIT_SYMBOL, 252),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 252),
+    (2, HUFFMAN_EMIT_SYMBOL, 253),
+    (9, HUFFMAN_EMIT_SYMBOL, 253),
+    (23, HUFFMAN_EMIT_SYMBOL, 253),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 253),
+
+    # Node 221
+    (3, HUFFMAN_EMIT_SYMBOL, 250),
+    (6, HUFFMAN_EMIT_SYMBOL, 250),
+    (10, HUFFMAN_EMIT_SYMBOL, 250),
+    (15, HUFFMAN_EMIT_SYMBOL, 250),
+    (24, HUFFMAN_EMIT_SYMBOL, 250),
+    (31, HUFFMAN_EMIT_SYMBOL, 250),
+    (41, HUFFMAN_EMIT_SYMBOL, 250),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 250),
+    (3, HUFFMAN_EMIT_SYMBOL, 251),
+    (6, HUFFMAN_EMIT_SYMBOL, 251),
+    (10, HUFFMAN_EMIT_SYMBOL, 251),
+    (15, HUFFMAN_EMIT_SYMBOL, 251),
+    (24, HUFFMAN_EMIT_SYMBOL, 251),
+    (31, HUFFMAN_EMIT_SYMBOL, 251),
+    (41, HUFFMAN_EMIT_SYMBOL, 251),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 251),
+
+    # Node 222
+    (3, HUFFMAN_EMIT_SYMBOL, 252),
+    (6, HUFFMAN_EMIT_SYMBOL, 252),
+    (10, HUFFMAN_EMIT_SYMBOL, 252),
+    (15, HUFFMAN_EMIT_SYMBOL, 252),
+    (24, HUFFMAN_EMIT_SYMBOL, 252),
+    (31, HUFFMAN_EMIT_SYMBOL, 252),
+    (41, HUFFMAN_EMIT_SYMBOL, 252),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 252),
+    (3, HUFFMAN_EMIT_SYMBOL, 253),
+    (6, HUFFMAN_EMIT_SYMBOL, 253),
+    (10, HUFFMAN_EMIT_SYMBOL, 253),
+    (15, HUFFMAN_EMIT_SYMBOL, 253),
+    (24, HUFFMAN_EMIT_SYMBOL, 253),
+    (31, HUFFMAN_EMIT_SYMBOL, 253),
+    (41, HUFFMAN_EMIT_SYMBOL, 253),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 253),
+
+    # Node 223
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 254),
+    (227, 0, 0),
+    (229, 0, 0),
+    (230, 0, 0),
+    (233, 0, 0),
+    (234, 0, 0),
+    (236, 0, 0),
+    (237, 0, 0),
+    (241, 0, 0),
+    (242, 0, 0),
+    (244, 0, 0),
+    (245, 0, 0),
+    (248, 0, 0),
+    (249, 0, 0),
+    (251, 0, 0),
+    (252, 0, 0),
+
+    # Node 224
+    (1, HUFFMAN_EMIT_SYMBOL, 254),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 254),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 2),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 3),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 4),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 5),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 6),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 7),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 8),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 11),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 12),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 14),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 15),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 16),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 17),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 18),
+
+    # Node 225
+    (2, HUFFMAN_EMIT_SYMBOL, 254),
+    (9, HUFFMAN_EMIT_SYMBOL, 254),
+    (23, HUFFMAN_EMIT_SYMBOL, 254),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 254),
+    (1, HUFFMAN_EMIT_SYMBOL, 2),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 2),
+    (1, HUFFMAN_EMIT_SYMBOL, 3),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 3),
+    (1, HUFFMAN_EMIT_SYMBOL, 4),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 4),
+    (1, HUFFMAN_EMIT_SYMBOL, 5),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 5),
+    (1, HUFFMAN_EMIT_SYMBOL, 6),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 6),
+    (1, HUFFMAN_EMIT_SYMBOL, 7),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 7),
+
+    # Node 226
+    (3, HUFFMAN_EMIT_SYMBOL, 254),
+    (6, HUFFMAN_EMIT_SYMBOL, 254),
+    (10, HUFFMAN_EMIT_SYMBOL, 254),
+    (15, HUFFMAN_EMIT_SYMBOL, 254),
+    (24, HUFFMAN_EMIT_SYMBOL, 254),
+    (31, HUFFMAN_EMIT_SYMBOL, 254),
+    (41, HUFFMAN_EMIT_SYMBOL, 254),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 254),
+    (2, HUFFMAN_EMIT_SYMBOL, 2),
+    (9, HUFFMAN_EMIT_SYMBOL, 2),
+    (23, HUFFMAN_EMIT_SYMBOL, 2),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 2),
+    (2, HUFFMAN_EMIT_SYMBOL, 3),
+    (9, HUFFMAN_EMIT_SYMBOL, 3),
+    (23, HUFFMAN_EMIT_SYMBOL, 3),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 3),
+
+    # Node 227
+    (3, HUFFMAN_EMIT_SYMBOL, 2),
+    (6, HUFFMAN_EMIT_SYMBOL, 2),
+    (10, HUFFMAN_EMIT_SYMBOL, 2),
+    (15, HUFFMAN_EMIT_SYMBOL, 2),
+    (24, HUFFMAN_EMIT_SYMBOL, 2),
+    (31, HUFFMAN_EMIT_SYMBOL, 2),
+    (41, HUFFMAN_EMIT_SYMBOL, 2),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 2),
+    (3, HUFFMAN_EMIT_SYMBOL, 3),
+    (6, HUFFMAN_EMIT_SYMBOL, 3),
+    (10, HUFFMAN_EMIT_SYMBOL, 3),
+    (15, HUFFMAN_EMIT_SYMBOL, 3),
+    (24, HUFFMAN_EMIT_SYMBOL, 3),
+    (31, HUFFMAN_EMIT_SYMBOL, 3),
+    (41, HUFFMAN_EMIT_SYMBOL, 3),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 3),
+
+    # Node 228
+    (2, HUFFMAN_EMIT_SYMBOL, 4),
+    (9, HUFFMAN_EMIT_SYMBOL, 4),
+    (23, HUFFMAN_EMIT_SYMBOL, 4),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 4),
+    (2, HUFFMAN_EMIT_SYMBOL, 5),
+    (9, HUFFMAN_EMIT_SYMBOL, 5),
+    (23, HUFFMAN_EMIT_SYMBOL, 5),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 5),
+    (2, HUFFMAN_EMIT_SYMBOL, 6),
+    (9, HUFFMAN_EMIT_SYMBOL, 6),
+    (23, HUFFMAN_EMIT_SYMBOL, 6),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 6),
+    (2, HUFFMAN_EMIT_SYMBOL, 7),
+    (9, HUFFMAN_EMIT_SYMBOL, 7),
+    (23, HUFFMAN_EMIT_SYMBOL, 7),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 7),
+
+    # Node 229
+    (3, HUFFMAN_EMIT_SYMBOL, 4),
+    (6, HUFFMAN_EMIT_SYMBOL, 4),
+    (10, HUFFMAN_EMIT_SYMBOL, 4),
+    (15, HUFFMAN_EMIT_SYMBOL, 4),
+    (24, HUFFMAN_EMIT_SYMBOL, 4),
+    (31, HUFFMAN_EMIT_SYMBOL, 4),
+    (41, HUFFMAN_EMIT_SYMBOL, 4),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 4),
+    (3, HUFFMAN_EMIT_SYMBOL, 5),
+    (6, HUFFMAN_EMIT_SYMBOL, 5),
+    (10, HUFFMAN_EMIT_SYMBOL, 5),
+    (15, HUFFMAN_EMIT_SYMBOL, 5),
+    (24, HUFFMAN_EMIT_SYMBOL, 5),
+    (31, HUFFMAN_EMIT_SYMBOL, 5),
+    (41, HUFFMAN_EMIT_SYMBOL, 5),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 5),
+
+    # Node 230
+    (3, HUFFMAN_EMIT_SYMBOL, 6),
+    (6, HUFFMAN_EMIT_SYMBOL, 6),
+    (10, HUFFMAN_EMIT_SYMBOL, 6),
+    (15, HUFFMAN_EMIT_SYMBOL, 6),
+    (24, HUFFMAN_EMIT_SYMBOL, 6),
+    (31, HUFFMAN_EMIT_SYMBOL, 6),
+    (41, HUFFMAN_EMIT_SYMBOL, 6),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 6),
+    (3, HUFFMAN_EMIT_SYMBOL, 7),
+    (6, HUFFMAN_EMIT_SYMBOL, 7),
+    (10, HUFFMAN_EMIT_SYMBOL, 7),
+    (15, HUFFMAN_EMIT_SYMBOL, 7),
+    (24, HUFFMAN_EMIT_SYMBOL, 7),
+    (31, HUFFMAN_EMIT_SYMBOL, 7),
+    (41, HUFFMAN_EMIT_SYMBOL, 7),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 7),
+
+    # Node 231
+    (1, HUFFMAN_EMIT_SYMBOL, 8),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 8),
+    (1, HUFFMAN_EMIT_SYMBOL, 11),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 11),
+    (1, HUFFMAN_EMIT_SYMBOL, 12),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 12),
+    (1, HUFFMAN_EMIT_SYMBOL, 14),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 14),
+    (1, HUFFMAN_EMIT_SYMBOL, 15),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 15),
+    (1, HUFFMAN_EMIT_SYMBOL, 16),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 16),
+    (1, HUFFMAN_EMIT_SYMBOL, 17),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 17),
+    (1, HUFFMAN_EMIT_SYMBOL, 18),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 18),
+
+    # Node 232
+    (2, HUFFMAN_EMIT_SYMBOL, 8),
+    (9, HUFFMAN_EMIT_SYMBOL, 8),
+    (23, HUFFMAN_EMIT_SYMBOL, 8),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 8),
+    (2, HUFFMAN_EMIT_SYMBOL, 11),
+    (9, HUFFMAN_EMIT_SYMBOL, 11),
+    (23, HUFFMAN_EMIT_SYMBOL, 11),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 11),
+    (2, HUFFMAN_EMIT_SYMBOL, 12),
+    (9, HUFFMAN_EMIT_SYMBOL, 12),
+    (23, HUFFMAN_EMIT_SYMBOL, 12),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 12),
+    (2, HUFFMAN_EMIT_SYMBOL, 14),
+    (9, HUFFMAN_EMIT_SYMBOL, 14),
+    (23, HUFFMAN_EMIT_SYMBOL, 14),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 14),
+
+    # Node 233
+    (3, HUFFMAN_EMIT_SYMBOL, 8),
+    (6, HUFFMAN_EMIT_SYMBOL, 8),
+    (10, HUFFMAN_EMIT_SYMBOL, 8),
+    (15, HUFFMAN_EMIT_SYMBOL, 8),
+    (24, HUFFMAN_EMIT_SYMBOL, 8),
+    (31, HUFFMAN_EMIT_SYMBOL, 8),
+    (41, HUFFMAN_EMIT_SYMBOL, 8),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 8),
+    (3, HUFFMAN_EMIT_SYMBOL, 11),
+    (6, HUFFMAN_EMIT_SYMBOL, 11),
+    (10, HUFFMAN_EMIT_SYMBOL, 11),
+    (15, HUFFMAN_EMIT_SYMBOL, 11),
+    (24, HUFFMAN_EMIT_SYMBOL, 11),
+    (31, HUFFMAN_EMIT_SYMBOL, 11),
+    (41, HUFFMAN_EMIT_SYMBOL, 11),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 11),
+
+    # Node 234
+    (3, HUFFMAN_EMIT_SYMBOL, 12),
+    (6, HUFFMAN_EMIT_SYMBOL, 12),
+    (10, HUFFMAN_EMIT_SYMBOL, 12),
+    (15, HUFFMAN_EMIT_SYMBOL, 12),
+    (24, HUFFMAN_EMIT_SYMBOL, 12),
+    (31, HUFFMAN_EMIT_SYMBOL, 12),
+    (41, HUFFMAN_EMIT_SYMBOL, 12),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 12),
+    (3, HUFFMAN_EMIT_SYMBOL, 14),
+    (6, HUFFMAN_EMIT_SYMBOL, 14),
+    (10, HUFFMAN_EMIT_SYMBOL, 14),
+    (15, HUFFMAN_EMIT_SYMBOL, 14),
+    (24, HUFFMAN_EMIT_SYMBOL, 14),
+    (31, HUFFMAN_EMIT_SYMBOL, 14),
+    (41, HUFFMAN_EMIT_SYMBOL, 14),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 14),
+
+    # Node 235
+    (2, HUFFMAN_EMIT_SYMBOL, 15),
+    (9, HUFFMAN_EMIT_SYMBOL, 15),
+    (23, HUFFMAN_EMIT_SYMBOL, 15),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 15),
+    (2, HUFFMAN_EMIT_SYMBOL, 16),
+    (9, HUFFMAN_EMIT_SYMBOL, 16),
+    (23, HUFFMAN_EMIT_SYMBOL, 16),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 16),
+    (2, HUFFMAN_EMIT_SYMBOL, 17),
+    (9, HUFFMAN_EMIT_SYMBOL, 17),
+    (23, HUFFMAN_EMIT_SYMBOL, 17),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 17),
+    (2, HUFFMAN_EMIT_SYMBOL, 18),
+    (9, HUFFMAN_EMIT_SYMBOL, 18),
+    (23, HUFFMAN_EMIT_SYMBOL, 18),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 18),
+
+    # Node 236
+    (3, HUFFMAN_EMIT_SYMBOL, 15),
+    (6, HUFFMAN_EMIT_SYMBOL, 15),
+    (10, HUFFMAN_EMIT_SYMBOL, 15),
+    (15, HUFFMAN_EMIT_SYMBOL, 15),
+    (24, HUFFMAN_EMIT_SYMBOL, 15),
+    (31, HUFFMAN_EMIT_SYMBOL, 15),
+    (41, HUFFMAN_EMIT_SYMBOL, 15),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 15),
+    (3, HUFFMAN_EMIT_SYMBOL, 16),
+    (6, HUFFMAN_EMIT_SYMBOL, 16),
+    (10, HUFFMAN_EMIT_SYMBOL, 16),
+    (15, HUFFMAN_EMIT_SYMBOL, 16),
+    (24, HUFFMAN_EMIT_SYMBOL, 16),
+    (31, HUFFMAN_EMIT_SYMBOL, 16),
+    (41, HUFFMAN_EMIT_SYMBOL, 16),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 16),
+
+    # Node 237
+    (3, HUFFMAN_EMIT_SYMBOL, 17),
+    (6, HUFFMAN_EMIT_SYMBOL, 17),
+    (10, HUFFMAN_EMIT_SYMBOL, 17),
+    (15, HUFFMAN_EMIT_SYMBOL, 17),
+    (24, HUFFMAN_EMIT_SYMBOL, 17),
+    (31, HUFFMAN_EMIT_SYMBOL, 17),
+    (41, HUFFMAN_EMIT_SYMBOL, 17),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 17),
+    (3, HUFFMAN_EMIT_SYMBOL, 18),
+    (6, HUFFMAN_EMIT_SYMBOL, 18),
+    (10, HUFFMAN_EMIT_SYMBOL, 18),
+    (15, HUFFMAN_EMIT_SYMBOL, 18),
+    (24, HUFFMAN_EMIT_SYMBOL, 18),
+    (31, HUFFMAN_EMIT_SYMBOL, 18),
+    (41, HUFFMAN_EMIT_SYMBOL, 18),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 18),
+
+    # Node 238
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 19),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 20),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 21),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 23),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 24),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 25),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 26),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 27),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 28),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 29),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 30),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 31),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 127),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 220),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 249),
+    (253, 0, 0),
+
+    # Node 239
+    (1, HUFFMAN_EMIT_SYMBOL, 19),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 19),
+    (1, HUFFMAN_EMIT_SYMBOL, 20),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 20),
+    (1, HUFFMAN_EMIT_SYMBOL, 21),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 21),
+    (1, HUFFMAN_EMIT_SYMBOL, 23),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 23),
+    (1, HUFFMAN_EMIT_SYMBOL, 24),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 24),
+    (1, HUFFMAN_EMIT_SYMBOL, 25),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 25),
+    (1, HUFFMAN_EMIT_SYMBOL, 26),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 26),
+    (1, HUFFMAN_EMIT_SYMBOL, 27),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 27),
+
+    # Node 240
+    (2, HUFFMAN_EMIT_SYMBOL, 19),
+    (9, HUFFMAN_EMIT_SYMBOL, 19),
+    (23, HUFFMAN_EMIT_SYMBOL, 19),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 19),
+    (2, HUFFMAN_EMIT_SYMBOL, 20),
+    (9, HUFFMAN_EMIT_SYMBOL, 20),
+    (23, HUFFMAN_EMIT_SYMBOL, 20),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 20),
+    (2, HUFFMAN_EMIT_SYMBOL, 21),
+    (9, HUFFMAN_EMIT_SYMBOL, 21),
+    (23, HUFFMAN_EMIT_SYMBOL, 21),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 21),
+    (2, HUFFMAN_EMIT_SYMBOL, 23),
+    (9, HUFFMAN_EMIT_SYMBOL, 23),
+    (23, HUFFMAN_EMIT_SYMBOL, 23),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 23),
+
+    # Node 241
+    (3, HUFFMAN_EMIT_SYMBOL, 19),
+    (6, HUFFMAN_EMIT_SYMBOL, 19),
+    (10, HUFFMAN_EMIT_SYMBOL, 19),
+    (15, HUFFMAN_EMIT_SYMBOL, 19),
+    (24, HUFFMAN_EMIT_SYMBOL, 19),
+    (31, HUFFMAN_EMIT_SYMBOL, 19),
+    (41, HUFFMAN_EMIT_SYMBOL, 19),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 19),
+    (3, HUFFMAN_EMIT_SYMBOL, 20),
+    (6, HUFFMAN_EMIT_SYMBOL, 20),
+    (10, HUFFMAN_EMIT_SYMBOL, 20),
+    (15, HUFFMAN_EMIT_SYMBOL, 20),
+    (24, HUFFMAN_EMIT_SYMBOL, 20),
+    (31, HUFFMAN_EMIT_SYMBOL, 20),
+    (41, HUFFMAN_EMIT_SYMBOL, 20),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 20),
+
+    # Node 242
+    (3, HUFFMAN_EMIT_SYMBOL, 21),
+    (6, HUFFMAN_EMIT_SYMBOL, 21),
+    (10, HUFFMAN_EMIT_SYMBOL, 21),
+    (15, HUFFMAN_EMIT_SYMBOL, 21),
+    (24, HUFFMAN_EMIT_SYMBOL, 21),
+    (31, HUFFMAN_EMIT_SYMBOL, 21),
+    (41, HUFFMAN_EMIT_SYMBOL, 21),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 21),
+    (3, HUFFMAN_EMIT_SYMBOL, 23),
+    (6, HUFFMAN_EMIT_SYMBOL, 23),
+    (10, HUFFMAN_EMIT_SYMBOL, 23),
+    (15, HUFFMAN_EMIT_SYMBOL, 23),
+    (24, HUFFMAN_EMIT_SYMBOL, 23),
+    (31, HUFFMAN_EMIT_SYMBOL, 23),
+    (41, HUFFMAN_EMIT_SYMBOL, 23),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 23),
+
+    # Node 243
+    (2, HUFFMAN_EMIT_SYMBOL, 24),
+    (9, HUFFMAN_EMIT_SYMBOL, 24),
+    (23, HUFFMAN_EMIT_SYMBOL, 24),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 24),
+    (2, HUFFMAN_EMIT_SYMBOL, 25),
+    (9, HUFFMAN_EMIT_SYMBOL, 25),
+    (23, HUFFMAN_EMIT_SYMBOL, 25),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 25),
+    (2, HUFFMAN_EMIT_SYMBOL, 26),
+    (9, HUFFMAN_EMIT_SYMBOL, 26),
+    (23, HUFFMAN_EMIT_SYMBOL, 26),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 26),
+    (2, HUFFMAN_EMIT_SYMBOL, 27),
+    (9, HUFFMAN_EMIT_SYMBOL, 27),
+    (23, HUFFMAN_EMIT_SYMBOL, 27),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 27),
+
+    # Node 244
+    (3, HUFFMAN_EMIT_SYMBOL, 24),
+    (6, HUFFMAN_EMIT_SYMBOL, 24),
+    (10, HUFFMAN_EMIT_SYMBOL, 24),
+    (15, HUFFMAN_EMIT_SYMBOL, 24),
+    (24, HUFFMAN_EMIT_SYMBOL, 24),
+    (31, HUFFMAN_EMIT_SYMBOL, 24),
+    (41, HUFFMAN_EMIT_SYMBOL, 24),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 24),
+    (3, HUFFMAN_EMIT_SYMBOL, 25),
+    (6, HUFFMAN_EMIT_SYMBOL, 25),
+    (10, HUFFMAN_EMIT_SYMBOL, 25),
+    (15, HUFFMAN_EMIT_SYMBOL, 25),
+    (24, HUFFMAN_EMIT_SYMBOL, 25),
+    (31, HUFFMAN_EMIT_SYMBOL, 25),
+    (41, HUFFMAN_EMIT_SYMBOL, 25),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 25),
+
+    # Node 245
+    (3, HUFFMAN_EMIT_SYMBOL, 26),
+    (6, HUFFMAN_EMIT_SYMBOL, 26),
+    (10, HUFFMAN_EMIT_SYMBOL, 26),
+    (15, HUFFMAN_EMIT_SYMBOL, 26),
+    (24, HUFFMAN_EMIT_SYMBOL, 26),
+    (31, HUFFMAN_EMIT_SYMBOL, 26),
+    (41, HUFFMAN_EMIT_SYMBOL, 26),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 26),
+    (3, HUFFMAN_EMIT_SYMBOL, 27),
+    (6, HUFFMAN_EMIT_SYMBOL, 27),
+    (10, HUFFMAN_EMIT_SYMBOL, 27),
+    (15, HUFFMAN_EMIT_SYMBOL, 27),
+    (24, HUFFMAN_EMIT_SYMBOL, 27),
+    (31, HUFFMAN_EMIT_SYMBOL, 27),
+    (41, HUFFMAN_EMIT_SYMBOL, 27),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 27),
+
+    # Node 246
+    (1, HUFFMAN_EMIT_SYMBOL, 28),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 28),
+    (1, HUFFMAN_EMIT_SYMBOL, 29),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 29),
+    (1, HUFFMAN_EMIT_SYMBOL, 30),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 30),
+    (1, HUFFMAN_EMIT_SYMBOL, 31),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 31),
+    (1, HUFFMAN_EMIT_SYMBOL, 127),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 127),
+    (1, HUFFMAN_EMIT_SYMBOL, 220),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 220),
+    (1, HUFFMAN_EMIT_SYMBOL, 249),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 249),
+    (254, 0, 0),
+    (255, 0, 0),
+
+    # Node 247
+    (2, HUFFMAN_EMIT_SYMBOL, 28),
+    (9, HUFFMAN_EMIT_SYMBOL, 28),
+    (23, HUFFMAN_EMIT_SYMBOL, 28),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 28),
+    (2, HUFFMAN_EMIT_SYMBOL, 29),
+    (9, HUFFMAN_EMIT_SYMBOL, 29),
+    (23, HUFFMAN_EMIT_SYMBOL, 29),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 29),
+    (2, HUFFMAN_EMIT_SYMBOL, 30),
+    (9, HUFFMAN_EMIT_SYMBOL, 30),
+    (23, HUFFMAN_EMIT_SYMBOL, 30),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 30),
+    (2, HUFFMAN_EMIT_SYMBOL, 31),
+    (9, HUFFMAN_EMIT_SYMBOL, 31),
+    (23, HUFFMAN_EMIT_SYMBOL, 31),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 31),
+
+    # Node 248
+    (3, HUFFMAN_EMIT_SYMBOL, 28),
+    (6, HUFFMAN_EMIT_SYMBOL, 28),
+    (10, HUFFMAN_EMIT_SYMBOL, 28),
+    (15, HUFFMAN_EMIT_SYMBOL, 28),
+    (24, HUFFMAN_EMIT_SYMBOL, 28),
+    (31, HUFFMAN_EMIT_SYMBOL, 28),
+    (41, HUFFMAN_EMIT_SYMBOL, 28),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 28),
+    (3, HUFFMAN_EMIT_SYMBOL, 29),
+    (6, HUFFMAN_EMIT_SYMBOL, 29),
+    (10, HUFFMAN_EMIT_SYMBOL, 29),
+    (15, HUFFMAN_EMIT_SYMBOL, 29),
+    (24, HUFFMAN_EMIT_SYMBOL, 29),
+    (31, HUFFMAN_EMIT_SYMBOL, 29),
+    (41, HUFFMAN_EMIT_SYMBOL, 29),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 29),
+
+    # Node 249
+    (3, HUFFMAN_EMIT_SYMBOL, 30),
+    (6, HUFFMAN_EMIT_SYMBOL, 30),
+    (10, HUFFMAN_EMIT_SYMBOL, 30),
+    (15, HUFFMAN_EMIT_SYMBOL, 30),
+    (24, HUFFMAN_EMIT_SYMBOL, 30),
+    (31, HUFFMAN_EMIT_SYMBOL, 30),
+    (41, HUFFMAN_EMIT_SYMBOL, 30),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 30),
+    (3, HUFFMAN_EMIT_SYMBOL, 31),
+    (6, HUFFMAN_EMIT_SYMBOL, 31),
+    (10, HUFFMAN_EMIT_SYMBOL, 31),
+    (15, HUFFMAN_EMIT_SYMBOL, 31),
+    (24, HUFFMAN_EMIT_SYMBOL, 31),
+    (31, HUFFMAN_EMIT_SYMBOL, 31),
+    (41, HUFFMAN_EMIT_SYMBOL, 31),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 31),
+
+    # Node 250
+    (2, HUFFMAN_EMIT_SYMBOL, 127),
+    (9, HUFFMAN_EMIT_SYMBOL, 127),
+    (23, HUFFMAN_EMIT_SYMBOL, 127),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 127),
+    (2, HUFFMAN_EMIT_SYMBOL, 220),
+    (9, HUFFMAN_EMIT_SYMBOL, 220),
+    (23, HUFFMAN_EMIT_SYMBOL, 220),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 220),
+    (2, HUFFMAN_EMIT_SYMBOL, 249),
+    (9, HUFFMAN_EMIT_SYMBOL, 249),
+    (23, HUFFMAN_EMIT_SYMBOL, 249),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 249),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 10),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 13),
+    (0, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 22),
+    (0, HUFFMAN_FAIL, 0),
+
+    # Node 251
+    (3, HUFFMAN_EMIT_SYMBOL, 127),
+    (6, HUFFMAN_EMIT_SYMBOL, 127),
+    (10, HUFFMAN_EMIT_SYMBOL, 127),
+    (15, HUFFMAN_EMIT_SYMBOL, 127),
+    (24, HUFFMAN_EMIT_SYMBOL, 127),
+    (31, HUFFMAN_EMIT_SYMBOL, 127),
+    (41, HUFFMAN_EMIT_SYMBOL, 127),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 127),
+    (3, HUFFMAN_EMIT_SYMBOL, 220),
+    (6, HUFFMAN_EMIT_SYMBOL, 220),
+    (10, HUFFMAN_EMIT_SYMBOL, 220),
+    (15, HUFFMAN_EMIT_SYMBOL, 220),
+    (24, HUFFMAN_EMIT_SYMBOL, 220),
+    (31, HUFFMAN_EMIT_SYMBOL, 220),
+    (41, HUFFMAN_EMIT_SYMBOL, 220),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 220),
+
+    # Node 252
+    (3, HUFFMAN_EMIT_SYMBOL, 249),
+    (6, HUFFMAN_EMIT_SYMBOL, 249),
+    (10, HUFFMAN_EMIT_SYMBOL, 249),
+    (15, HUFFMAN_EMIT_SYMBOL, 249),
+    (24, HUFFMAN_EMIT_SYMBOL, 249),
+    (31, HUFFMAN_EMIT_SYMBOL, 249),
+    (41, HUFFMAN_EMIT_SYMBOL, 249),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 249),
+    (1, HUFFMAN_EMIT_SYMBOL, 10),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 10),
+    (1, HUFFMAN_EMIT_SYMBOL, 13),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 13),
+    (1, HUFFMAN_EMIT_SYMBOL, 22),
+    (22, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 22),
+    (0, HUFFMAN_FAIL, 0),
+    (0, HUFFMAN_FAIL, 0),
+
+    # Node 253
+    (2, HUFFMAN_EMIT_SYMBOL, 10),
+    (9, HUFFMAN_EMIT_SYMBOL, 10),
+    (23, HUFFMAN_EMIT_SYMBOL, 10),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 10),
+    (2, HUFFMAN_EMIT_SYMBOL, 13),
+    (9, HUFFMAN_EMIT_SYMBOL, 13),
+    (23, HUFFMAN_EMIT_SYMBOL, 13),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 13),
+    (2, HUFFMAN_EMIT_SYMBOL, 22),
+    (9, HUFFMAN_EMIT_SYMBOL, 22),
+    (23, HUFFMAN_EMIT_SYMBOL, 22),
+    (40, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 22),
+    (0, HUFFMAN_FAIL, 0),
+    (0, HUFFMAN_FAIL, 0),
+    (0, HUFFMAN_FAIL, 0),
+    (0, HUFFMAN_FAIL, 0),
+
+    # Node 254
+    (3, HUFFMAN_EMIT_SYMBOL, 10),
+    (6, HUFFMAN_EMIT_SYMBOL, 10),
+    (10, HUFFMAN_EMIT_SYMBOL, 10),
+    (15, HUFFMAN_EMIT_SYMBOL, 10),
+    (24, HUFFMAN_EMIT_SYMBOL, 10),
+    (31, HUFFMAN_EMIT_SYMBOL, 10),
+    (41, HUFFMAN_EMIT_SYMBOL, 10),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 10),
+    (3, HUFFMAN_EMIT_SYMBOL, 13),
+    (6, HUFFMAN_EMIT_SYMBOL, 13),
+    (10, HUFFMAN_EMIT_SYMBOL, 13),
+    (15, HUFFMAN_EMIT_SYMBOL, 13),
+    (24, HUFFMAN_EMIT_SYMBOL, 13),
+    (31, HUFFMAN_EMIT_SYMBOL, 13),
+    (41, HUFFMAN_EMIT_SYMBOL, 13),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 13),
+
+    # Node 255
+    (3, HUFFMAN_EMIT_SYMBOL, 22),
+    (6, HUFFMAN_EMIT_SYMBOL, 22),
+    (10, HUFFMAN_EMIT_SYMBOL, 22),
+    (15, HUFFMAN_EMIT_SYMBOL, 22),
+    (24, HUFFMAN_EMIT_SYMBOL, 22),
+    (31, HUFFMAN_EMIT_SYMBOL, 22),
+    (41, HUFFMAN_EMIT_SYMBOL, 22),
+    (56, HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL, 22),
+    (0, HUFFMAN_FAIL, 0),
+    (0, HUFFMAN_FAIL, 0),
+    (0, HUFFMAN_FAIL, 0),
+    (0, HUFFMAN_FAIL, 0),
+    (0, HUFFMAN_FAIL, 0),
+    (0, HUFFMAN_FAIL, 0),
+    (0, HUFFMAN_FAIL, 0),
+    (0, HUFFMAN_FAIL, 0),
+]
diff --git a/tools/third_party/hpack/hpack/struct.py b/tools/third_party/hpack/hpack/struct.py
new file mode 100644
index 0000000..e860cd7
--- /dev/null
+++ b/tools/third_party/hpack/hpack/struct.py
@@ -0,0 +1,39 @@
+# -*- coding: utf-8 -*-
+"""
+hpack/struct
+~~~~~~~~~~~~
+
+Contains structures for representing header fields with associated metadata.
+"""
+
+
+class HeaderTuple(tuple):
+    """
+    A data structure that stores a single header field.
+
+    HTTP headers can be thought of as tuples of ``(field name, field value)``.
+    A single header block is a sequence of such tuples.
+
+    In HTTP/2, however, certain bits of additional information are required for
+    compressing these headers: in particular, whether the header field can be
+    safely added to the HPACK compression context.
+
+    This class stores a header that can be added to the compression context. In
+    all other ways it behaves exactly like a tuple.
+    """
+    __slots__ = ()
+
+    indexable = True
+
+    def __new__(_cls, *args):
+        return tuple.__new__(_cls, args)
+
+
+class NeverIndexedHeaderTuple(HeaderTuple):
+    """
+    A data structure that stores a single header field that cannot be added to
+    a HTTP/2 header compression context.
+    """
+    __slots__ = ()
+
+    indexable = False
diff --git a/tools/third_party/hpack/hpack/table.py b/tools/third_party/hpack/hpack/table.py
new file mode 100644
index 0000000..9a89c72
--- /dev/null
+++ b/tools/third_party/hpack/hpack/table.py
@@ -0,0 +1,215 @@
+# -*- coding: utf-8 -*-
+# flake8: noqa
+from collections import deque
+import logging
+
+from .exceptions import InvalidTableIndex
+
+log = logging.getLogger(__name__)
+
+
+def table_entry_size(name, value):
+    """
+    Calculates the size of a single entry
+
+    This size is mostly irrelevant to us and defined
+    specifically to accommodate memory management for
+    lower level implementations. The 32 extra bytes are
+    considered the "maximum" overhead that would be
+    required to represent each entry in the table.
+
+    See RFC7541 Section 4.1
+    """
+    return 32 + len(name) + len(value)
+
+
+class HeaderTable(object):
+    """
+    Implements the combined static and dynamic header table
+
+    The name and value arguments for all the functions
+    should ONLY be byte strings (b'') however this is not
+    strictly enforced in the interface.
+
+    See RFC7541 Section 2.3
+    """
+    #: Default maximum size of the dynamic table. See
+    #:  RFC7540 Section 6.5.2.
+    DEFAULT_SIZE = 4096
+
+    #: Constant list of static headers. See RFC7541 Section
+    #:  2.3.1 and Appendix A
+    STATIC_TABLE = (
+        (b':authority'                  , b''             ),  # noqa
+        (b':method'                     , b'GET'          ),  # noqa
+        (b':method'                     , b'POST'         ),  # noqa
+        (b':path'                       , b'/'            ),  # noqa
+        (b':path'                       , b'/index.html'  ),  # noqa
+        (b':scheme'                     , b'http'         ),  # noqa
+        (b':scheme'                     , b'https'        ),  # noqa
+        (b':status'                     , b'200'          ),  # noqa
+        (b':status'                     , b'204'          ),  # noqa
+        (b':status'                     , b'206'          ),  # noqa
+        (b':status'                     , b'304'          ),  # noqa
+        (b':status'                     , b'400'          ),  # noqa
+        (b':status'                     , b'404'          ),  # noqa
+        (b':status'                     , b'500'          ),  # noqa
+        (b'accept-charset'              , b''             ),  # noqa
+        (b'accept-encoding'             , b'gzip, deflate'),  # noqa
+        (b'accept-language'             , b''             ),  # noqa
+        (b'accept-ranges'               , b''             ),  # noqa
+        (b'accept'                      , b''             ),  # noqa
+        (b'access-control-allow-origin' , b''             ),  # noqa
+        (b'age'                         , b''             ),  # noqa
+        (b'allow'                       , b''             ),  # noqa
+        (b'authorization'               , b''             ),  # noqa
+        (b'cache-control'               , b''             ),  # noqa
+        (b'content-disposition'         , b''             ),  # noqa
+        (b'content-encoding'            , b''             ),  # noqa
+        (b'content-language'            , b''             ),  # noqa
+        (b'content-length'              , b''             ),  # noqa
+        (b'content-location'            , b''             ),  # noqa
+        (b'content-range'               , b''             ),  # noqa
+        (b'content-type'                , b''             ),  # noqa
+        (b'cookie'                      , b''             ),  # noqa
+        (b'date'                        , b''             ),  # noqa
+        (b'etag'                        , b''             ),  # noqa
+        (b'expect'                      , b''             ),  # noqa
+        (b'expires'                     , b''             ),  # noqa
+        (b'from'                        , b''             ),  # noqa
+        (b'host'                        , b''             ),  # noqa
+        (b'if-match'                    , b''             ),  # noqa
+        (b'if-modified-since'           , b''             ),  # noqa
+        (b'if-none-match'               , b''             ),  # noqa
+        (b'if-range'                    , b''             ),  # noqa
+        (b'if-unmodified-since'         , b''             ),  # noqa
+        (b'last-modified'               , b''             ),  # noqa
+        (b'link'                        , b''             ),  # noqa
+        (b'location'                    , b''             ),  # noqa
+        (b'max-forwards'                , b''             ),  # noqa
+        (b'proxy-authenticate'          , b''             ),  # noqa
+        (b'proxy-authorization'         , b''             ),  # noqa
+        (b'range'                       , b''             ),  # noqa
+        (b'referer'                     , b''             ),  # noqa
+        (b'refresh'                     , b''             ),  # noqa
+        (b'retry-after'                 , b''             ),  # noqa
+        (b'server'                      , b''             ),  # noqa
+        (b'set-cookie'                  , b''             ),  # noqa
+        (b'strict-transport-security'   , b''             ),  # noqa
+        (b'transfer-encoding'           , b''             ),  # noqa
+        (b'user-agent'                  , b''             ),  # noqa
+        (b'vary'                        , b''             ),  # noqa
+        (b'via'                         , b''             ),  # noqa
+        (b'www-authenticate'            , b''             ),  # noqa
+    )  # noqa
+
+    STATIC_TABLE_LENGTH = len(STATIC_TABLE)
+
+    def __init__(self):
+        self._maxsize = HeaderTable.DEFAULT_SIZE
+        self._current_size = 0
+        self.resized = False
+        self.dynamic_entries = deque()
+
+    def get_by_index(self, index):
+        """
+        Returns the entry specified by index
+
+        Note that the table is 1-based ie an index of 0 is
+        invalid.  This is due to the fact that a zero value
+        index signals that a completely unindexed header
+        follows.
+
+        The entry will either be from the static table or
+        the dynamic table depending on the value of index.
+        """
+        original_index = index
+        index -= 1
+        if 0 <= index:
+            if index < HeaderTable.STATIC_TABLE_LENGTH:
+                return HeaderTable.STATIC_TABLE[index]
+
+            index -= HeaderTable.STATIC_TABLE_LENGTH
+            if index < len(self.dynamic_entries):
+                return self.dynamic_entries[index]
+
+        raise InvalidTableIndex("Invalid table index %d" % original_index)
+
+    def __repr__(self):
+        return "HeaderTable(%d, %s, %r)" % (
+            self._maxsize,
+            self.resized,
+            self.dynamic_entries
+        )
+
+    def add(self, name, value):
+        """
+        Adds a new entry to the table
+
+        We reduce the table size if the entry will make the
+        table size greater than maxsize.
+        """
+        # We just clear the table if the entry is too big
+        size = table_entry_size(name, value)
+        if size > self._maxsize:
+            self.dynamic_entries.clear()
+            self._current_size = 0
+        else:
+            # Add new entry
+            self.dynamic_entries.appendleft((name, value))
+            self._current_size += size
+            self._shrink()
+
+    def search(self, name, value):
+        """
+        Searches the table for the entry specified by name
+        and value
+
+        Returns one of the following:
+            - ``None``, no match at all
+            - ``(index, name, None)`` for partial matches on name only.
+            - ``(index, name, value)`` for perfect matches.
+        """
+        offset = HeaderTable.STATIC_TABLE_LENGTH + 1
+        partial = None
+        for (i, (n, v)) in enumerate(HeaderTable.STATIC_TABLE):
+            if n == name:
+                if v == value:
+                    return i + 1, n, v
+                elif partial is None:
+                    partial = (i + 1, n, None)
+        for (i, (n, v)) in enumerate(self.dynamic_entries):
+            if n == name:
+                if v == value:
+                    return i + offset, n, v
+                elif partial is None:
+                    partial = (i + offset, n, None)
+        return partial
+
+    @property
+    def maxsize(self):
+        return self._maxsize
+
+    @maxsize.setter
+    def maxsize(self, newmax):
+        newmax = int(newmax)
+        log.debug("Resizing header table to %d from %d", newmax, self._maxsize)
+        oldmax = self._maxsize
+        self._maxsize = newmax
+        self.resized = (newmax != oldmax)
+        if newmax <= 0:
+            self.dynamic_entries.clear()
+            self._current_size = 0
+        elif oldmax > newmax:
+            self._shrink()
+
+    def _shrink(self):
+        """
+        Shrinks the dynamic table to be at or below maxsize
+        """
+        cursize = self._current_size
+        while cursize > self._maxsize:
+            name, value = self.dynamic_entries.pop()
+            cursize -= table_entry_size(name, value)
+            log.debug("Evicting %s: %s from the header table", name, value)
+        self._current_size = cursize
diff --git a/tools/third_party/hpack/setup.cfg b/tools/third_party/hpack/setup.cfg
new file mode 100644
index 0000000..b1d2b88
--- /dev/null
+++ b/tools/third_party/hpack/setup.cfg
@@ -0,0 +1,12 @@
+[wheel]
+universal = 1
+
+[flake8]
+max-complexity = 10
+exclude = 
+	hpack/huffman_constants.py
+
+[egg_info]
+tag_build = 
+tag_date = 0
+
diff --git a/tools/third_party/hpack/setup.py b/tools/third_party/hpack/setup.py
new file mode 100644
index 0000000..7ffc4be
--- /dev/null
+++ b/tools/third_party/hpack/setup.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+import os
+import re
+import sys
+
+try:
+    from setuptools import setup
+except ImportError:
+    from distutils.core import setup
+
+# Get the version
+version_regex = r'__version__ = ["\']([^"\']*)["\']'
+with open('hpack/__init__.py', 'r') as f:
+    text = f.read()
+    match = re.search(version_regex, text)
+
+    if match:
+        version = match.group(1)
+    else:
+        raise RuntimeError("No version number found!")
+
+# Stealing this from Kenneth Reitz
+if sys.argv[-1] == 'publish':
+    os.system('python setup.py sdist upload')
+    sys.exit()
+
+packages = ['hpack']
+
+setup(
+    name='hpack',
+    version=version,
+    description='Pure-Python HPACK header compression',
+    long_description=open('README.rst').read() + '\n\n' + open('HISTORY.rst').read(),
+    author='Cory Benfield',
+    author_email='cory@lukasa.co.uk',
+    url='http://hyper.rtfd.org',
+    packages=packages,
+    package_data={'': ['LICENSE', 'README.rst', 'CONTRIBUTORS.rst', 'HISTORY.rst', 'NOTICES']},
+    package_dir={'hpack': 'hpack'},
+    include_package_data=True,
+    license='MIT License',
+    classifiers=[
+        'Development Status :: 5 - Production/Stable',
+        'Intended Audience :: Developers',
+        'License :: OSI Approved :: MIT License',
+        'Programming Language :: Python',
+        'Programming Language :: Python :: 2',
+        'Programming Language :: Python :: 2.7',
+        'Programming Language :: Python :: 3',
+        'Programming Language :: Python :: 3.3',
+        'Programming Language :: Python :: 3.4',
+        'Programming Language :: Python :: 3.5',
+        'Programming Language :: Python :: 3.6',
+        'Programming Language :: Python :: Implementation :: CPython',
+    ],
+)
diff --git a/tools/third_party/hpack/test/test_encode_decode.py b/tools/third_party/hpack/test/test_encode_decode.py
new file mode 100644
index 0000000..94820f2
--- /dev/null
+++ b/tools/third_party/hpack/test/test_encode_decode.py
@@ -0,0 +1,141 @@
+# -*- coding: utf-8 -*-
+"""
+Test for the integer encoding/decoding functionality in the HPACK library.
+"""
+import pytest
+
+from hypothesis import given
+from hypothesis.strategies import integers, binary, one_of
+
+from hpack.hpack import encode_integer, decode_integer
+from hpack.exceptions import HPACKDecodingError
+
+
+class TestIntegerEncoding(object):
+    # These tests are stolen from the HPACK spec.
+    def test_encoding_10_with_5_bit_prefix(self):
+        val = encode_integer(10, 5)
+        assert len(val) == 1
+        assert val == bytearray(b'\x0a')
+
+    def test_encoding_1337_with_5_bit_prefix(self):
+        val = encode_integer(1337, 5)
+        assert len(val) == 3
+        assert val == bytearray(b'\x1f\x9a\x0a')
+
+    def test_encoding_42_with_8_bit_prefix(self):
+        val = encode_integer(42, 8)
+        assert len(val) == 1
+        assert val == bytearray(b'\x2a')
+
+
+class TestIntegerDecoding(object):
+    # These tests are stolen from the HPACK spec.
+    def test_decoding_10_with_5_bit_prefix(self):
+        val = decode_integer(b'\x0a', 5)
+        assert val == (10, 1)
+
+    def test_encoding_1337_with_5_bit_prefix(self):
+        val = decode_integer(b'\x1f\x9a\x0a', 5)
+        assert val == (1337, 3)
+
+    def test_encoding_42_with_8_bit_prefix(self):
+        val = decode_integer(b'\x2a', 8)
+        assert val == (42, 1)
+
+    def test_decode_empty_string_fails(self):
+        with pytest.raises(HPACKDecodingError):
+            decode_integer(b'', 8)
+
+    def test_decode_insufficient_data_fails(self):
+        with pytest.raises(HPACKDecodingError):
+            decode_integer(b'\x1f', 5)
+
+
+class TestEncodingProperties(object):
+    """
+    Property-based tests for our integer encoder and decoder.
+    """
+    @given(
+        integer=integers(min_value=0),
+        prefix_bits=integers(min_value=1, max_value=8)
+    )
+    def test_encode_positive_integer_always_valid(self, integer, prefix_bits):
+        """
+        So long as the prefix bits are between 1 and 8, any positive integer
+        can be represented.
+        """
+        result = encode_integer(integer, prefix_bits)
+        assert isinstance(result, bytearray)
+        assert len(result) > 0
+
+    @given(
+        integer=integers(max_value=-1),
+        prefix_bits=integers(min_value=1, max_value=8)
+    )
+    def test_encode_fails_for_negative_integers(self, integer, prefix_bits):
+        """
+        If the integer to encode is negative, the encoder fails.
+        """
+        with pytest.raises(ValueError):
+            encode_integer(integer, prefix_bits)
+
+    @given(
+        integer=integers(min_value=0),
+        prefix_bits=one_of(
+            integers(max_value=0),
+            integers(min_value=9)
+        )
+    )
+    def test_encode_fails_for_invalid_prefixes(self, integer, prefix_bits):
+        """
+        If the prefix is out of the range [1,8], the encoder fails.
+        """
+        with pytest.raises(ValueError):
+            encode_integer(integer, prefix_bits)
+
+    @given(
+        prefix_bits=one_of(
+            integers(max_value=0),
+            integers(min_value=9)
+        )
+    )
+    def test_decode_fails_for_invalid_prefixes(self, prefix_bits):
+        """
+        If the prefix is out of the range [1,8], the encoder fails.
+        """
+        with pytest.raises(ValueError):
+            decode_integer(b'\x00', prefix_bits)
+
+    @given(
+        data=binary(),
+        prefix_bits=integers(min_value=1, max_value=8)
+    )
+    def test_decode_either_succeeds_or_raises_error(self, data, prefix_bits):
+        """
+        Attempting to decode data either returns a positive integer or throws a
+        HPACKDecodingError.
+        """
+        try:
+            result, consumed = decode_integer(data, prefix_bits)
+        except HPACKDecodingError:
+            pass
+        else:
+            assert isinstance(result, int)
+            assert result >= 0
+            assert consumed > 0
+
+    @given(
+        integer=integers(min_value=0),
+        prefix_bits=integers(min_value=1, max_value=8)
+    )
+    def test_encode_decode_round_trips(self, integer, prefix_bits):
+        """
+        Given valid data, the encoder and decoder can round trip.
+        """
+        encoded_result = encode_integer(integer, prefix_bits)
+        decoded_integer, consumed = decode_integer(
+            bytes(encoded_result), prefix_bits
+        )
+        assert integer == decoded_integer
+        assert consumed > 0
diff --git a/tools/third_party/hpack/test/test_hpack.py b/tools/third_party/hpack/test/test_hpack.py
new file mode 100644
index 0000000..c3333b4
--- /dev/null
+++ b/tools/third_party/hpack/test/test_hpack.py
@@ -0,0 +1,828 @@
+# -*- coding: utf-8 -*-
+from hpack.hpack import Encoder, Decoder, _dict_to_iterable, _to_bytes
+from hpack.exceptions import (
+    HPACKDecodingError, InvalidTableIndex, OversizedHeaderListError,
+    InvalidTableSizeError
+)
+from hpack.struct import HeaderTuple, NeverIndexedHeaderTuple
+import itertools
+import pytest
+
+from hypothesis import given
+from hypothesis.strategies import text, binary, sets, one_of
+
+try:
+    unicode = unicode
+except NameError:
+    unicode = str
+
+
+class TestHPACKEncoder(object):
+    # These tests are stolen entirely from the IETF specification examples.
+    def test_literal_header_field_with_indexing(self):
+        """
+        The header field representation uses a literal name and a literal
+        value.
+        """
+        e = Encoder()
+        header_set = {'custom-key': 'custom-header'}
+        result = b'\x40\x0acustom-key\x0dcustom-header'
+
+        assert e.encode(header_set, huffman=False) == result
+        assert list(e.header_table.dynamic_entries) == [
+            (n.encode('utf-8'), v.encode('utf-8'))
+            for n, v in header_set.items()
+        ]
+
+    def test_sensitive_headers(self):
+        """
+        Test encoding header values
+        """
+        e = Encoder()
+        result = (b'\x82\x14\x88\x63\xa1\xa9' +
+                  b'\x32\x08\x73\xd0\xc7\x10' +
+                  b'\x87\x25\xa8\x49\xe9\xea' +
+                  b'\x5f\x5f\x89\x41\x6a\x41' +
+                  b'\x92\x6e\xe5\x35\x52\x9f')
+        header_set = [
+            (':method', 'GET', True),
+            (':path', '/jimiscool/', True),
+            ('customkey', 'sensitiveinfo', True),
+        ]
+        assert e.encode(header_set, huffman=True) == result
+
+    def test_non_sensitive_headers_with_header_tuples(self):
+        """
+        A header field stored in a HeaderTuple emits a representation that
+        allows indexing.
+        """
+        e = Encoder()
+        result = (b'\x82\x44\x88\x63\xa1\xa9' +
+                  b'\x32\x08\x73\xd0\xc7\x40' +
+                  b'\x87\x25\xa8\x49\xe9\xea' +
+                  b'\x5f\x5f\x89\x41\x6a\x41' +
+                  b'\x92\x6e\xe5\x35\x52\x9f')
+        header_set = [
+            HeaderTuple(':method', 'GET'),
+            HeaderTuple(':path', '/jimiscool/'),
+            HeaderTuple('customkey', 'sensitiveinfo'),
+        ]
+        assert e.encode(header_set, huffman=True) == result
+
+    def test_sensitive_headers_with_header_tuples(self):
+        """
+        A header field stored in a NeverIndexedHeaderTuple emits a
+        representation that forbids indexing.
+        """
+        e = Encoder()
+        result = (b'\x82\x14\x88\x63\xa1\xa9' +
+                  b'\x32\x08\x73\xd0\xc7\x10' +
+                  b'\x87\x25\xa8\x49\xe9\xea' +
+                  b'\x5f\x5f\x89\x41\x6a\x41' +
+                  b'\x92\x6e\xe5\x35\x52\x9f')
+        header_set = [
+            NeverIndexedHeaderTuple(':method', 'GET'),
+            NeverIndexedHeaderTuple(':path', '/jimiscool/'),
+            NeverIndexedHeaderTuple('customkey', 'sensitiveinfo'),
+        ]
+        assert e.encode(header_set, huffman=True) == result
+
+    def test_header_table_size_getter(self):
+        e = Encoder()
+        assert e.header_table_size == 4096
+
+    def test_indexed_literal_header_field_with_indexing(self):
+        """
+        The header field representation uses an indexed name and a literal
+        value and performs incremental indexing.
+        """
+        e = Encoder()
+        header_set = {':path': '/sample/path'}
+        result = b'\x44\x0c/sample/path'
+
+        assert e.encode(header_set, huffman=False) == result
+        assert list(e.header_table.dynamic_entries) == [
+            (n.encode('utf-8'), v.encode('utf-8'))
+            for n, v in header_set.items()
+        ]
+
+    def test_indexed_header_field(self):
+        """
+        The header field representation uses an indexed header field, from
+        the static table.
+        """
+        e = Encoder()
+        header_set = {':method': 'GET'}
+        result = b'\x82'
+
+        assert e.encode(header_set, huffman=False) == result
+        assert list(e.header_table.dynamic_entries) == []
+
+    def test_indexed_header_field_from_static_table(self):
+        e = Encoder()
+        e.header_table_size = 0
+        header_set = {':method': 'GET'}
+        result = b'\x82'
+
+        # Make sure we don't emit an encoding context update.
+        e.header_table.resized = False
+
+        assert e.encode(header_set, huffman=False) == result
+        assert list(e.header_table.dynamic_entries) == []
+
+    def test_request_examples_without_huffman(self):
+        """
+        This section shows several consecutive header sets, corresponding to
+        HTTP requests, on the same connection.
+        """
+        e = Encoder()
+        first_header_set = [
+            (':method', 'GET',),
+            (':scheme', 'http',),
+            (':path', '/',),
+            (':authority', 'www.example.com'),
+        ]
+        # We should have :authority in first_header_table since we index it
+        first_header_table = [(':authority', 'www.example.com')]
+        first_result = b'\x82\x86\x84\x41\x0fwww.example.com'
+
+        assert e.encode(first_header_set, huffman=False) == first_result
+        assert list(e.header_table.dynamic_entries) == [
+            (n.encode('utf-8'), v.encode('utf-8'))
+            for n, v in first_header_table
+        ]
+
+        second_header_set = [
+            (':method', 'GET',),
+            (':scheme', 'http',),
+            (':path', '/',),
+            (':authority', 'www.example.com',),
+            ('cache-control', 'no-cache'),
+        ]
+        second_header_table = [
+            ('cache-control', 'no-cache'),
+            (':authority', 'www.example.com')
+        ]
+        second_result = b'\x82\x86\x84\xbeX\x08no-cache'
+
+        assert e.encode(second_header_set, huffman=False) == second_result
+        assert list(e.header_table.dynamic_entries) == [
+            (n.encode('utf-8'), v.encode('utf-8'))
+            for n, v in second_header_table
+        ]
+
+        third_header_set = [
+            (':method', 'GET',),
+            (':scheme', 'https',),
+            (':path', '/index.html',),
+            (':authority', 'www.example.com',),
+            ('custom-key', 'custom-value'),
+        ]
+        third_result = (
+            b'\x82\x87\x85\xbf@\ncustom-key\x0ccustom-value'
+        )
+
+        assert e.encode(third_header_set, huffman=False) == third_result
+        # Don't check the header table here, it's just too complex to be
+        # reliable. Check its length though.
+        assert len(e.header_table.dynamic_entries) == 3
+
+    def test_request_examples_with_huffman(self):
+        """
+        This section shows the same examples as the previous section, but
+        using Huffman encoding for the literal values.
+        """
+        e = Encoder()
+        first_header_set = [
+            (':method', 'GET',),
+            (':scheme', 'http',),
+            (':path', '/',),
+            (':authority', 'www.example.com'),
+        ]
+        first_header_table = [(':authority', 'www.example.com')]
+        first_result = (
+            b'\x82\x86\x84\x41\x8c\xf1\xe3\xc2\xe5\xf2:k\xa0\xab\x90\xf4\xff'
+        )
+
+        assert e.encode(first_header_set, huffman=True) == first_result
+        assert list(e.header_table.dynamic_entries) == [
+            (n.encode('utf-8'), v.encode('utf-8'))
+            for n, v in first_header_table
+        ]
+
+        second_header_table = [
+            ('cache-control', 'no-cache'),
+            (':authority', 'www.example.com')
+        ]
+        second_header_set = [
+            (':method', 'GET',),
+            (':scheme', 'http',),
+            (':path', '/',),
+            (':authority', 'www.example.com',),
+            ('cache-control', 'no-cache'),
+        ]
+        second_result = b'\x82\x86\x84\xbeX\x86\xa8\xeb\x10d\x9c\xbf'
+
+        assert e.encode(second_header_set, huffman=True) == second_result
+        assert list(e.header_table.dynamic_entries) == [
+            (n.encode('utf-8'), v.encode('utf-8'))
+            for n, v in second_header_table
+        ]
+
+        third_header_set = [
+            (':method', 'GET',),
+            (':scheme', 'https',),
+            (':path', '/index.html',),
+            (':authority', 'www.example.com',),
+            ('custom-key', 'custom-value'),
+        ]
+        third_result = (
+            b'\x82\x87\x85\xbf'
+            b'@\x88%\xa8I\xe9[\xa9}\x7f\x89%\xa8I\xe9[\xb8\xe8\xb4\xbf'
+        )
+
+        assert e.encode(third_header_set, huffman=True) == third_result
+        assert len(e.header_table.dynamic_entries) == 3
+
+    # These tests are custom, for hyper.
+    def test_resizing_header_table(self):
+        # We need to encode a substantial number of headers, to populate the
+        # header table.
+        e = Encoder()
+        header_set = [
+            (':method', 'GET'),
+            (':scheme', 'https'),
+            (':path', '/some/path'),
+            (':authority', 'www.example.com'),
+            ('custom-key', 'custom-value'),
+            (
+                "user-agent",
+                "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:16.0) "
+                "Gecko/20100101 Firefox/16.0",
+            ),
+            (
+                "accept",
+                "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;"
+                "q=0.8",
+            ),
+            ('X-Lukasa-Test', '88989'),
+        ]
+        e.encode(header_set, huffman=True)
+
+        # Resize the header table to a size so small that nothing can be in it.
+        e.header_table_size = 40
+        assert len(e.header_table.dynamic_entries) == 0
+
+    def test_resizing_header_table_sends_multiple_updates(self):
+        e = Encoder()
+
+        e.header_table_size = 40
+        e.header_table_size = 100
+        e.header_table_size = 40
+
+        header_set = [(':method', 'GET')]
+        out = e.encode(header_set, huffman=True)
+        assert out == b'\x3F\x09\x3F\x45\x3F\x09\x82'
+
+    def test_resizing_header_table_to_same_size_ignored(self):
+        e = Encoder()
+
+        # These size changes should be ignored
+        e.header_table_size = 4096
+        e.header_table_size = 4096
+        e.header_table_size = 4096
+
+        # These size changes should be encoded
+        e.header_table_size = 40
+        e.header_table_size = 100
+        e.header_table_size = 40
+
+        header_set = [(':method', 'GET')]
+        out = e.encode(header_set, huffman=True)
+        assert out == b'\x3F\x09\x3F\x45\x3F\x09\x82'
+
+    def test_resizing_header_table_sends_context_update(self):
+        e = Encoder()
+
+        # Resize the header table to a size so small that nothing can be in it.
+        e.header_table_size = 40
+
+        # Now, encode a header set. Just a small one, with a well-defined
+        # output.
+        header_set = [(':method', 'GET')]
+        out = e.encode(header_set, huffman=True)
+
+        assert out == b'?\t\x82'
+
+    def test_setting_table_size_to_the_same_does_nothing(self):
+        e = Encoder()
+
+        # Set the header table size to the default.
+        e.header_table_size = 4096
+
+        # Now encode a header set. Just a small one, with a well-defined
+        # output.
+        header_set = [(':method', 'GET')]
+        out = e.encode(header_set, huffman=True)
+
+        assert out == b'\x82'
+
+    def test_evicting_header_table_objects(self):
+        e = Encoder()
+
+        # Set the header table size large enough to include one header.
+        e.header_table_size = 66
+        header_set = [('a', 'b'), ('long-custom-header', 'longish value')]
+        e.encode(header_set)
+
+        assert len(e.header_table.dynamic_entries) == 1
+
+
+class TestHPACKDecoder(object):
+    # These tests are stolen entirely from the IETF specification examples.
+    def test_literal_header_field_with_indexing(self):
+        """
+        The header field representation uses a literal name and a literal
+        value.
+        """
+        d = Decoder()
+        header_set = [('custom-key', 'custom-header')]
+        data = b'\x40\x0acustom-key\x0dcustom-header'
+
+        assert d.decode(data) == header_set
+        assert list(d.header_table.dynamic_entries) == [
+            (n.encode('utf-8'), v.encode('utf-8')) for n, v in header_set
+        ]
+
+    def test_raw_decoding(self):
+        """
+        The header field representation is decoded as a raw byte string instead
+        of UTF-8
+        """
+        d = Decoder()
+        header_set = [
+            (b'\x00\x01\x99\x30\x11\x22\x55\x21\x89\x14', b'custom-header')
+        ]
+        data = (
+            b'\x40\x0a\x00\x01\x99\x30\x11\x22\x55\x21\x89\x14\x0d'
+            b'custom-header'
+        )
+
+        assert d.decode(data, raw=True) == header_set
+
+    def test_literal_header_field_without_indexing(self):
+        """
+        The header field representation uses an indexed name and a literal
+        value.
+        """
+        d = Decoder()
+        header_set = [(':path', '/sample/path')]
+        data = b'\x04\x0c/sample/path'
+
+        assert d.decode(data) == header_set
+        assert list(d.header_table.dynamic_entries) == []
+
+    def test_header_table_size_getter(self):
+        d = Decoder()
+        assert d.header_table_size
+
+    def test_indexed_header_field(self):
+        """
+        The header field representation uses an indexed header field, from
+        the static table.
+        """
+        d = Decoder()
+        header_set = [(':method', 'GET')]
+        data = b'\x82'
+
+        assert d.decode(data) == header_set
+        assert list(d.header_table.dynamic_entries) == []
+
+    def test_request_examples_without_huffman(self):
+        """
+        This section shows several consecutive header sets, corresponding to
+        HTTP requests, on the same connection.
+        """
+        d = Decoder()
+        first_header_set = [
+            (':method', 'GET',),
+            (':scheme', 'http',),
+            (':path', '/',),
+            (':authority', 'www.example.com'),
+        ]
+        # The first_header_table doesn't contain 'authority'
+        first_data = b'\x82\x86\x84\x01\x0fwww.example.com'
+
+        assert d.decode(first_data) == first_header_set
+        assert list(d.header_table.dynamic_entries) == []
+
+        # This request takes advantage of the differential encoding of header
+        # sets.
+        second_header_set = [
+            (':method', 'GET',),
+            (':scheme', 'http',),
+            (':path', '/',),
+            (':authority', 'www.example.com',),
+            ('cache-control', 'no-cache'),
+        ]
+        second_data = (
+            b'\x82\x86\x84\x01\x0fwww.example.com\x0f\t\x08no-cache'
+        )
+
+        assert d.decode(second_data) == second_header_set
+        assert list(d.header_table.dynamic_entries) == []
+
+        third_header_set = [
+            (':method', 'GET',),
+            (':scheme', 'https',),
+            (':path', '/index.html',),
+            (':authority', 'www.example.com',),
+            ('custom-key', 'custom-value'),
+        ]
+        third_data = (
+            b'\x82\x87\x85\x01\x0fwww.example.com@\ncustom-key\x0ccustom-value'
+        )
+
+        assert d.decode(third_data) == third_header_set
+        # Don't check the header table here, it's just too complex to be
+        # reliable. Check its length though.
+        assert len(d.header_table.dynamic_entries) == 1
+
+    def test_request_examples_with_huffman(self):
+        """
+        This section shows the same examples as the previous section, but
+        using Huffman encoding for the literal values.
+        """
+        d = Decoder()
+
+        first_header_set = [
+            (':method', 'GET',),
+            (':scheme', 'http',),
+            (':path', '/',),
+            (':authority', 'www.example.com'),
+        ]
+        first_data = (
+            b'\x82\x86\x84\x01\x8c\xf1\xe3\xc2\xe5\xf2:k\xa0\xab\x90\xf4\xff'
+        )
+
+        assert d.decode(first_data) == first_header_set
+        assert list(d.header_table.dynamic_entries) == []
+
+        second_header_set = [
+            (':method', 'GET',),
+            (':scheme', 'http',),
+            (':path', '/',),
+            (':authority', 'www.example.com',),
+            ('cache-control', 'no-cache'),
+        ]
+        second_data = (
+            b'\x82\x86\x84\x01\x8c\xf1\xe3\xc2\xe5\xf2:k\xa0\xab\x90\xf4\xff'
+            b'\x0f\t\x86\xa8\xeb\x10d\x9c\xbf'
+        )
+
+        assert d.decode(second_data) == second_header_set
+        assert list(d.header_table.dynamic_entries) == []
+
+        third_header_set = [
+            (':method', 'GET',),
+            (':scheme', 'https',),
+            (':path', '/index.html',),
+            (':authority', 'www.example.com',),
+            ('custom-key', 'custom-value'),
+        ]
+        third_data = (
+            b'\x82\x87\x85\x01\x8c\xf1\xe3\xc2\xe5\xf2:k\xa0\xab\x90\xf4\xff@'
+            b'\x88%\xa8I\xe9[\xa9}\x7f\x89%\xa8I\xe9[\xb8\xe8\xb4\xbf'
+        )
+
+        assert d.decode(third_data) == third_header_set
+        assert len(d.header_table.dynamic_entries) == 1
+
+    # These tests are custom, for hyper.
+    def test_resizing_header_table(self):
+        # We need to decode a substantial number of headers, to populate the
+        # header table. This string isn't magic: it's the output from the
+        # equivalent test for the Encoder.
+        d = Decoder()
+        data = (
+            b'\x82\x87D\x87a\x07\xa4\xacV4\xcfA\x8c\xf1\xe3\xc2\xe5\xf2:k\xa0'
+            b'\xab\x90\xf4\xff@\x88%\xa8I\xe9[\xa9}\x7f\x89%\xa8I\xe9[\xb8\xe8'
+            b'\xb4\xbfz\xbc\xd0\x7ff\xa2\x81\xb0\xda\xe0S\xfa\xd02\x1a\xa4\x9d'
+            b'\x13\xfd\xa9\x92\xa4\x96\x854\x0c\x8aj\xdc\xa7\xe2\x81\x02\xef}'
+            b'\xa9g{\x81qp\x7fjb):\x9d\x81\x00 \x00@\x150\x9a\xc2\xca\x7f,\x05'
+            b'\xc5\xc1S\xb0I|\xa5\x89\xd3M\x1fC\xae\xba\x0cA\xa4\xc7\xa9\x8f3'
+            b'\xa6\x9a?\xdf\x9ah\xfa\x1du\xd0b\r&=Ly\xa6\x8f\xbe\xd0\x01w\xfe'
+            b'\xbeX\xf9\xfb\xed\x00\x17{@\x8a\xfc[=\xbdF\x81\xad\xbc\xa8O\x84y'
+            b'\xe7\xde\x7f'
+        )
+        d.decode(data)
+
+        # Resize the header table to a size so small that nothing can be in it.
+        d.header_table_size = 40
+        assert len(d.header_table.dynamic_entries) == 0
+
+    def test_apache_trafficserver(self):
+        # This test reproduces the bug in #110, using exactly the same header
+        # data.
+        d = Decoder()
+        data = (
+            b'\x10\x07:status\x03200@\x06server\tATS/6.0.0'
+            b'@\x04date\x1dTue, 31 Mar 2015 08:09:51 GMT'
+            b'@\x0ccontent-type\ttext/html@\x0econtent-length\x0542468'
+            b'@\rlast-modified\x1dTue, 31 Mar 2015 01:55:51 GMT'
+            b'@\x04vary\x0fAccept-Encoding@\x04etag\x0f"5519fea7-a5e4"'
+            b'@\x08x-served\x05Nginx@\x14x-subdomain-tryfiles\x04True'
+            b'@\x07x-deity\thydra-lts@\raccept-ranges\x05bytes@\x03age\x010'
+            b'@\x19strict-transport-security\rmax-age=86400'
+            b'@\x03via2https/1.1 ATS (ApacheTrafficServer/6.0.0 [cSsNfU])'
+        )
+        expect = [
+            (':status', '200'),
+            ('server', 'ATS/6.0.0'),
+            ('date', 'Tue, 31 Mar 2015 08:09:51 GMT'),
+            ('content-type', 'text/html'),
+            ('content-length', '42468'),
+            ('last-modified', 'Tue, 31 Mar 2015 01:55:51 GMT'),
+            ('vary', 'Accept-Encoding'),
+            ('etag', '"5519fea7-a5e4"'),
+            ('x-served', 'Nginx'),
+            ('x-subdomain-tryfiles', 'True'),
+            ('x-deity', 'hydra-lts'),
+            ('accept-ranges', 'bytes'),
+            ('age', '0'),
+            ('strict-transport-security', 'max-age=86400'),
+            ('via', 'https/1.1 ATS (ApacheTrafficServer/6.0.0 [cSsNfU])'),
+        ]
+
+        result = d.decode(data)
+
+        assert result == expect
+        # The status header shouldn't be indexed.
+        assert len(d.header_table.dynamic_entries) == len(expect) - 1
+
+    def test_utf8_errors_raise_hpack_decoding_error(self):
+        d = Decoder()
+
+        # Invalid UTF-8 data.
+        data = b'\x82\x86\x84\x01\x10www.\x07\xaa\xd7\x95\xd7\xa8\xd7\x94.com'
+
+        with pytest.raises(HPACKDecodingError):
+            d.decode(data)
+
+    def test_invalid_indexed_literal(self):
+        d = Decoder()
+
+        # Refer to an index that is too large.
+        data = b'\x82\x86\x84\x7f\x0a\x0fwww.example.com'
+        with pytest.raises(InvalidTableIndex):
+            d.decode(data)
+
+    def test_invalid_indexed_header(self):
+        d = Decoder()
+
+        # Refer to an indexed header that is too large.
+        data = b'\xBE\x86\x84\x01\x0fwww.example.com'
+        with pytest.raises(InvalidTableIndex):
+            d.decode(data)
+
+    def test_literal_header_field_with_indexing_emits_headertuple(self):
+        """
+        A header field with indexing emits a HeaderTuple.
+        """
+        d = Decoder()
+        data = b'\x00\x0acustom-key\x0dcustom-header'
+
+        headers = d.decode(data)
+        assert len(headers) == 1
+
+        header = headers[0]
+        assert isinstance(header, HeaderTuple)
+        assert not isinstance(header, NeverIndexedHeaderTuple)
+
+    def test_literal_never_indexed_emits_neverindexedheadertuple(self):
+        """
+        A literal header field that must never be indexed emits a
+        NeverIndexedHeaderTuple.
+        """
+        d = Decoder()
+        data = b'\x10\x0acustom-key\x0dcustom-header'
+
+        headers = d.decode(data)
+        assert len(headers) == 1
+
+        header = headers[0]
+        assert isinstance(header, NeverIndexedHeaderTuple)
+
+    def test_indexed_never_indexed_emits_neverindexedheadertuple(self):
+        """
+        A header field with an indexed name that must never be indexed emits a
+        NeverIndexedHeaderTuple.
+        """
+        d = Decoder()
+        data = b'\x14\x0c/sample/path'
+
+        headers = d.decode(data)
+        assert len(headers) == 1
+
+        header = headers[0]
+        assert isinstance(header, NeverIndexedHeaderTuple)
+
+    def test_max_header_list_size(self):
+        """
+        If the header block is larger than the max_header_list_size, the HPACK
+        decoder throws an OversizedHeaderListError.
+        """
+        d = Decoder(max_header_list_size=44)
+        data = b'\x14\x0c/sample/path'
+
+        with pytest.raises(OversizedHeaderListError):
+            d.decode(data)
+
+    def test_can_decode_multiple_header_table_size_changes(self):
+        """
+        If multiple header table size changes are sent in at once, they are
+        successfully decoded.
+        """
+        d = Decoder()
+        data = b'?a?\xe1\x1f\x82\x87\x84A\x8a\x08\x9d\\\x0b\x81p\xdcy\xa6\x99'
+        expect = [
+            (':method', 'GET'),
+            (':scheme', 'https'),
+            (':path', '/'),
+            (':authority', '127.0.0.1:8443')
+        ]
+
+        assert d.decode(data) == expect
+
+    def test_header_table_size_change_above_maximum(self):
+        """
+        If a header table size change is received that exceeds the maximum
+        allowed table size, it is rejected.
+        """
+        d = Decoder()
+        d.max_allowed_table_size = 127
+        data = b'?a\x82\x87\x84A\x8a\x08\x9d\\\x0b\x81p\xdcy\xa6\x99'
+
+        with pytest.raises(InvalidTableSizeError):
+            d.decode(data)
+
+    def test_table_size_not_adjusting(self):
+        """
+        If the header table size is shrunk, and then the remote peer doesn't
+        join in the shrinking, then an error is raised.
+        """
+        d = Decoder()
+        d.max_allowed_table_size = 128
+        data = b'\x82\x87\x84A\x8a\x08\x9d\\\x0b\x81p\xdcy\xa6\x99'
+
+        with pytest.raises(InvalidTableSizeError):
+            d.decode(data)
+
+    def test_table_size_last_rejected(self):
+        """
+        If a header table size change comes last in the header block, it is
+        forbidden.
+        """
+        d = Decoder()
+        data = b'\x82\x87\x84A\x8a\x08\x9d\\\x0b\x81p\xdcy\xa6\x99?a'
+
+        with pytest.raises(HPACKDecodingError):
+            d.decode(data)
+
+    def test_table_size_middle_rejected(self):
+        """
+        If a header table size change comes anywhere but first in the header
+        block, it is forbidden.
+        """
+        d = Decoder()
+        data = b'\x82?a\x87\x84A\x8a\x08\x9d\\\x0b\x81p\xdcy\xa6\x99'
+
+        with pytest.raises(HPACKDecodingError):
+            d.decode(data)
+
+    def test_truncated_header_name(self):
+        """
+        If a header name is truncated an error is raised.
+        """
+        d = Decoder()
+        # This is a simple header block that has a bad ending. The interesting
+        # part begins on the second line. This indicates a string that has
+        # literal name and value. The name is a 5 character huffman-encoded
+        # string that is only three bytes long.
+        data = (
+            b'\x82\x87\x84A\x8a\x08\x9d\\\x0b\x81p\xdcy\xa6\x99'
+            b'\x00\x85\xf2\xb2J'
+        )
+
+        with pytest.raises(HPACKDecodingError):
+            d.decode(data)
+
+    def test_truncated_header_value(self):
+        """
+        If a header value is truncated an error is raised.
+        """
+        d = Decoder()
+        # This is a simple header block that has a bad ending. The interesting
+        # part begins on the second line. This indicates a string that has
+        # literal name and value. The name is a 5 character huffman-encoded
+        # string, but the entire EOS character has been written over the end.
+        # This causes hpack to see the header value as being supposed to be
+        # 622462 bytes long, which it clearly is not, and so this must fail.
+        data = (
+            b'\x82\x87\x84A\x8a\x08\x9d\\\x0b\x81p\xdcy\xa6\x99'
+            b'\x00\x85\xf2\xb2J\x87\xff\xff\xff\xfd%B\x7f'
+        )
+
+        with pytest.raises(HPACKDecodingError):
+            d.decode(data)
+
+
+class TestDictToIterable(object):
+    """
+    The dict_to_iterable function has some subtle requirements: validates that
+    everything behaves as expected.
+
+    As much as possible this tries to be exhaustive.
+    """
+    keys = one_of(
+        text().filter(lambda k: k and not k.startswith(u':')),
+        binary().filter(lambda k: k and not k.startswith(b':'))
+    )
+
+    @given(
+        special_keys=sets(keys),
+        boring_keys=sets(keys),
+    )
+    def test_ordering(self, special_keys, boring_keys):
+        """
+        _dict_to_iterable produces an iterable where all the keys beginning
+        with a colon are emitted first.
+        """
+        def _prepend_colon(k):
+            if isinstance(k, unicode):
+                return u':' + k
+            else:
+                return b':' + k
+
+        special_keys = set(map(_prepend_colon, special_keys))
+        input_dict = {
+            k: b'testval' for k in itertools.chain(
+                special_keys,
+                boring_keys
+            )
+        }
+        filtered = _dict_to_iterable(input_dict)
+
+        received_special = set()
+        received_boring = set()
+
+        for _ in special_keys:
+            k, _ = next(filtered)
+            received_special.add(k)
+        for _ in boring_keys:
+            k, _ = next(filtered)
+            received_boring.add(k)
+
+        assert special_keys == received_special
+        assert boring_keys == received_boring
+
+    @given(
+        special_keys=sets(keys),
+        boring_keys=sets(keys),
+    )
+    def test_ordering_applies_to_encoding(self, special_keys, boring_keys):
+        """
+        When encoding a dictionary the special keys all appear first.
+        """
+        def _prepend_colon(k):
+            if isinstance(k, unicode):
+                return u':' + k
+            else:
+                return b':' + k
+
+        special_keys = set(map(_prepend_colon, special_keys))
+        input_dict = {
+            k: b'testval' for k in itertools.chain(
+                special_keys,
+                boring_keys
+            )
+        }
+        e = Encoder()
+        d = Decoder()
+        encoded = e.encode(input_dict)
+        decoded = iter(d.decode(encoded, raw=True))
+
+        received_special = set()
+        received_boring = set()
+        expected_special = set(map(_to_bytes, special_keys))
+        expected_boring = set(map(_to_bytes, boring_keys))
+
+        for _ in special_keys:
+            k, _ = next(decoded)
+            received_special.add(k)
+        for _ in boring_keys:
+            k, _ = next(decoded)
+            received_boring.add(k)
+
+        assert expected_special == received_special
+        assert expected_boring == received_boring
diff --git a/tools/third_party/hpack/test/test_hpack_integration.py b/tools/third_party/hpack/test/test_hpack_integration.py
new file mode 100644
index 0000000..8b8de65
--- /dev/null
+++ b/tools/third_party/hpack/test/test_hpack_integration.py
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+"""
+This module defines substantial HPACK integration tests. These can take a very
+long time to run, so they're outside the main test suite, but they need to be
+run before every change to HPACK.
+"""
+from hpack.hpack import Decoder, Encoder
+from hpack.struct import HeaderTuple
+from binascii import unhexlify
+from pytest import skip
+
+
+class TestHPACKDecoderIntegration(object):
+    def test_can_decode_a_story(self, story):
+        d = Decoder()
+
+        # We test against draft 9 of the HPACK spec.
+        if story['draft'] != 9:
+            skip("We test against draft 9, not draft %d" % story['draft'])
+
+        for case in story['cases']:
+            try:
+                d.header_table_size = case['header_table_size']
+            except KeyError:
+                pass
+            decoded_headers = d.decode(unhexlify(case['wire']))
+
+            # The correct headers are a list of dicts, which is annoying.
+            correct_headers = [
+                (item[0], item[1])
+                for header in case['headers']
+                for item in header.items()
+            ]
+            correct_headers = correct_headers
+            assert correct_headers == decoded_headers
+            assert all(
+                isinstance(header, HeaderTuple) for header in decoded_headers
+            )
+
+    def test_can_encode_a_story_no_huffman(self, raw_story):
+        d = Decoder()
+        e = Encoder()
+
+        for case in raw_story['cases']:
+            # The input headers are a list of dicts, which is annoying.
+            input_headers = [
+                (item[0], item[1])
+                for header in case['headers']
+                for item in header.items()
+            ]
+
+            encoded = e.encode(input_headers, huffman=False)
+            decoded_headers = d.decode(encoded)
+
+            assert input_headers == decoded_headers
+            assert all(
+                isinstance(header, HeaderTuple) for header in decoded_headers
+            )
+
+    def test_can_encode_a_story_with_huffman(self, raw_story):
+        d = Decoder()
+        e = Encoder()
+
+        for case in raw_story['cases']:
+            # The input headers are a list of dicts, which is annoying.
+            input_headers = [
+                (item[0], item[1])
+                for header in case['headers']
+                for item in header.items()
+            ]
+
+            encoded = e.encode(input_headers, huffman=True)
+            decoded_headers = d.decode(encoded)
+
+            assert input_headers == decoded_headers
diff --git a/tools/third_party/hpack/test/test_huffman.py b/tools/third_party/hpack/test/test_huffman.py
new file mode 100644
index 0000000..1b8c2f1
--- /dev/null
+++ b/tools/third_party/hpack/test/test_huffman.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+from hpack.exceptions import HPACKDecodingError
+from hpack.huffman_table import decode_huffman
+from hpack.huffman import HuffmanEncoder
+from hpack.huffman_constants import REQUEST_CODES, REQUEST_CODES_LENGTH
+
+from hypothesis import given, example
+from hypothesis.strategies import binary
+
+
+class TestHuffman(object):
+
+    def test_request_huffman_decoder(self):
+        assert (
+            decode_huffman(b'\xf1\xe3\xc2\xe5\xf2:k\xa0\xab\x90\xf4\xff') ==
+            b"www.example.com"
+        )
+        assert decode_huffman(b'\xa8\xeb\x10d\x9c\xbf') == b"no-cache"
+        assert decode_huffman(b'%\xa8I\xe9[\xa9}\x7f') == b"custom-key"
+        assert (
+            decode_huffman(b'%\xa8I\xe9[\xb8\xe8\xb4\xbf') == b"custom-value"
+        )
+
+    def test_request_huffman_encode(self):
+        encoder = HuffmanEncoder(REQUEST_CODES, REQUEST_CODES_LENGTH)
+        assert (
+            encoder.encode(b"www.example.com") ==
+            b'\xf1\xe3\xc2\xe5\xf2:k\xa0\xab\x90\xf4\xff'
+        )
+        assert encoder.encode(b"no-cache") == b'\xa8\xeb\x10d\x9c\xbf'
+        assert encoder.encode(b"custom-key") == b'%\xa8I\xe9[\xa9}\x7f'
+        assert (
+            encoder.encode(b"custom-value") == b'%\xa8I\xe9[\xb8\xe8\xb4\xbf'
+        )
+
+
+class TestHuffmanDecoder(object):
+    @given(data=binary())
+    @example(b'\xff')
+    @example(b'\x5f\xff\xff\xff\xff')
+    @example(b'\x00\x3f\xff\xff\xff')
+    def test_huffman_decoder_properly_handles_all_bytestrings(self, data):
+        """
+        When given random bytestrings, either we get HPACKDecodingError or we
+        get a bytestring back.
+        """
+        # The examples aren't special, they're just known to hit specific error
+        # paths through the state machine. Basically, they are strings that are
+        # definitely invalid.
+        try:
+            result = decode_huffman(data)
+        except HPACKDecodingError:
+            result = b''
+
+        assert isinstance(result, bytes)
diff --git a/tools/third_party/hpack/test/test_struct.py b/tools/third_party/hpack/test/test_struct.py
new file mode 100644
index 0000000..613b8c6
--- /dev/null
+++ b/tools/third_party/hpack/test/test_struct.py
@@ -0,0 +1,77 @@
+# -*- coding: utf-8 -*-
+"""
+test_struct
+~~~~~~~~~~~
+
+Tests for the Header tuples.
+"""
+import pytest
+
+from hpack.struct import HeaderTuple, NeverIndexedHeaderTuple
+
+
+class TestHeaderTuple(object):
+    def test_is_tuple(self):
+        """
+        HeaderTuple objects are tuples.
+        """
+        h = HeaderTuple('name', 'value')
+        assert isinstance(h, tuple)
+
+    def test_unpacks_properly(self):
+        """
+        HeaderTuple objects unpack like tuples.
+        """
+        h = HeaderTuple('name', 'value')
+        k, v = h
+
+        assert k == 'name'
+        assert v == 'value'
+
+    def test_header_tuples_are_indexable(self):
+        """
+        HeaderTuple objects can be indexed.
+        """
+        h = HeaderTuple('name', 'value')
+        assert h.indexable
+
+    def test_never_indexed_tuples_are_not_indexable(self):
+        """
+        NeverIndexedHeaderTuple objects cannot be indexed.
+        """
+        h = NeverIndexedHeaderTuple('name', 'value')
+        assert not h.indexable
+
+    @pytest.mark.parametrize('cls', (HeaderTuple, NeverIndexedHeaderTuple))
+    def test_equal_to_tuples(self, cls):
+        """
+        HeaderTuples and NeverIndexedHeaderTuples are equal to equivalent
+        tuples.
+        """
+        t1 = ('name', 'value')
+        t2 = cls('name', 'value')
+
+        assert t1 == t2
+        assert t1 is not t2
+
+    @pytest.mark.parametrize('cls', (HeaderTuple, NeverIndexedHeaderTuple))
+    def test_equal_to_self(self, cls):
+        """
+        HeaderTuples and NeverIndexedHeaderTuples are always equal when
+        compared to the same class.
+        """
+        t1 = cls('name', 'value')
+        t2 = cls('name', 'value')
+
+        assert t1 == t2
+        assert t1 is not t2
+
+    def test_equal_for_different_indexes(self):
+        """
+        HeaderTuples compare equal to equivalent NeverIndexedHeaderTuples.
+        """
+        t1 = HeaderTuple('name', 'value')
+        t2 = NeverIndexedHeaderTuple('name', 'value')
+
+        assert t1 == t2
+        assert t1 is not t2
diff --git a/tools/third_party/hpack/test/test_table.py b/tools/third_party/hpack/test/test_table.py
new file mode 100644
index 0000000..d77c30a
--- /dev/null
+++ b/tools/third_party/hpack/test/test_table.py
@@ -0,0 +1,158 @@
+# -*- coding: utf-8 -*-
+from hpack.table import HeaderTable, table_entry_size
+from hpack.exceptions import InvalidTableIndex
+import pytest
+import sys
+_ver = sys.version_info
+is_py2 = _ver[0] == 2
+is_py3 = _ver[0] == 3
+
+
+class TestPackageFunctions(object):
+    def test_table_entry_size(self):
+        res = table_entry_size(b'TestName', b'TestValue')
+        assert res == 49
+
+
+class TestHeaderTable(object):
+    def test_get_by_index_dynamic_table(self):
+        tbl = HeaderTable()
+        off = len(HeaderTable.STATIC_TABLE)
+        val = (b'TestName', b'TestValue')
+        tbl.add(*val)
+        res = tbl.get_by_index(off + 1)
+        assert res == val
+
+    def test_get_by_index_static_table(self):
+        tbl = HeaderTable()
+        exp = (b':authority', b'')
+        res = tbl.get_by_index(1)
+        assert res == exp
+        idx = len(HeaderTable.STATIC_TABLE)
+        exp = (b'www-authenticate', b'')
+        res = tbl.get_by_index(idx)
+        assert res == exp
+
+    def test_get_by_index_zero_index(self):
+        tbl = HeaderTable()
+        with pytest.raises(InvalidTableIndex):
+            tbl.get_by_index(0)
+
+    def test_get_by_index_out_of_range(self):
+        tbl = HeaderTable()
+        off = len(HeaderTable.STATIC_TABLE)
+        tbl.add(b'TestName', b'TestValue')
+        with pytest.raises(InvalidTableIndex) as e:
+            tbl.get_by_index(off + 2)
+
+        assert (
+            "InvalidTableIndex: Invalid table index %d" % (off + 2) in str(e)
+        )
+
+    def test_repr(self):
+        tbl = HeaderTable()
+        tbl.add(b'TestName1', b'TestValue1')
+        tbl.add(b'TestName2', b'TestValue2')
+        tbl.add(b'TestName2', b'TestValue2')
+        # Meh, I hate that I have to do this to test
+        # repr
+        if is_py3:
+            exp = (
+                "HeaderTable(4096, False, deque(["
+                "(b'TestName2', b'TestValue2'), "
+                "(b'TestName2', b'TestValue2'), "
+                "(b'TestName1', b'TestValue1')"
+                "]))"
+            )
+        else:
+            exp = (
+                "HeaderTable(4096, False, deque(["
+                "('TestName2', 'TestValue2'), "
+                "('TestName2', 'TestValue2'), "
+                "('TestName1', 'TestValue1')"
+                "]))"
+            )
+        res = repr(tbl)
+        assert res == exp
+
+    def test_add_to_large(self):
+        tbl = HeaderTable()
+        # Max size to small to hold the value we specify
+        tbl.maxsize = 1
+        tbl.add(b'TestName', b'TestValue')
+        # Table length should be 0
+        assert len(tbl.dynamic_entries) == 0
+
+    def test_search_in_static_full(self):
+        tbl = HeaderTable()
+        itm = (b':authority', b'')
+        exp = (1, itm[0], itm[1])
+        res = tbl.search(itm[0], itm[1])
+        assert res == exp
+
+    def test_search_in_static_partial(self):
+        tbl = HeaderTable()
+        exp = (1, b':authority', None)
+        res = tbl.search(b':authority', b'NotInTable')
+        assert res == exp
+
+    def test_search_in_dynamic_full(self):
+        tbl = HeaderTable()
+        idx = len(HeaderTable.STATIC_TABLE) + 1
+        tbl.add(b'TestName', b'TestValue')
+        exp = (idx, b'TestName', b'TestValue')
+        res = tbl.search(b'TestName', b'TestValue')
+        assert res == exp
+
+    def test_search_in_dynamic_partial(self):
+        tbl = HeaderTable()
+        idx = len(HeaderTable.STATIC_TABLE) + 1
+        tbl.add(b'TestName', b'TestValue')
+        exp = (idx, b'TestName', None)
+        res = tbl.search(b'TestName', b'NotInTable')
+        assert res == exp
+
+    def test_search_no_match(self):
+        tbl = HeaderTable()
+        tbl.add(b'TestName', b'TestValue')
+        res = tbl.search(b'NotInTable', b'NotInTable')
+        assert res is None
+
+    def test_maxsize_prop_getter(self):
+        tbl = HeaderTable()
+        assert tbl.maxsize == HeaderTable.DEFAULT_SIZE
+
+    def test_maxsize_prop_setter(self):
+        tbl = HeaderTable()
+        exp = int(HeaderTable.DEFAULT_SIZE / 2)
+        tbl.maxsize = exp
+        assert tbl.resized is True
+        assert tbl.maxsize == exp
+        tbl.resized = False
+        tbl.maxsize = exp
+        assert tbl.resized is False
+        assert tbl.maxsize == exp
+
+    def test_size(self):
+        tbl = HeaderTable()
+        for i in range(3):
+            tbl.add(b'TestName', b'TestValue')
+        res = tbl._current_size
+        assert res == 147
+
+    def test_shrink_maxsize_is_zero(self):
+        tbl = HeaderTable()
+        tbl.add(b'TestName', b'TestValue')
+        assert len(tbl.dynamic_entries) == 1
+        tbl.maxsize = 0
+        assert len(tbl.dynamic_entries) == 0
+
+    def test_shrink_maxsize(self):
+        tbl = HeaderTable()
+        for i in range(3):
+            tbl.add(b'TestName', b'TestValue')
+
+        assert tbl._current_size == 147
+        tbl.maxsize = 146
+        assert len(tbl.dynamic_entries) == 2
+        assert tbl._current_size == 98
diff --git a/tools/third_party/hyperframe/CONTRIBUTORS.rst b/tools/third_party/hyperframe/CONTRIBUTORS.rst
new file mode 100644
index 0000000..aa7ab8b
--- /dev/null
+++ b/tools/third_party/hyperframe/CONTRIBUTORS.rst
@@ -0,0 +1,56 @@
+Hyper is written and maintained by Cory Benfield and various contributors:
+
+Development Lead
+````````````````
+
+- Cory Benfield <cory@lukasa.co.uk>
+
+Contributors
+````````````
+
+In chronological order:
+
+- Sriram Ganesan (@elricL)
+
+  - Implemented the Huffman encoding/decoding logic.
+
+- Alek Storm (@alekstorm)
+
+  - Implemented Python 2.7 support.
+  - Implemented HTTP/2 draft 10 support.
+  - Implemented server push.
+
+- Tetsuya Morimoto (@t2y)
+
+  - Fixed a bug where large or incomplete frames were not handled correctly.
+  - Added hyper command-line tool.
+  - General code cleanups.
+
+- Jerome De Cuyper (@jdecuyper)
+
+  - Updated documentation and tests.
+
+- Maximilian Hils (@mhils)
+
+  - Added repr for frames.
+  - Improved frame initialization code.
+  - Added flag validation.
+
+- Thomas Kriechbaumer (@Kriechi)
+
+  - Improved initialization code.
+  - Fixed bugs in frame initialization code.
+  - Improved frame repr for frames with non-printable bodies.
+
+- Davey Shafik (@dshafik)
+
+  - Fixed Alt Svc frame stream association.
+
+- Seth Michael Larson (@SethMichaelLarson)
+
+  - Performance improvements to serialization and parsing.
+
+- Fred Thomsen (@fredthomsen)
+
+  - Support for memoryview in DataFrames.
+
diff --git a/tools/third_party/hyperframe/HISTORY.rst b/tools/third_party/hyperframe/HISTORY.rst
new file mode 100644
index 0000000..22cd87e
--- /dev/null
+++ b/tools/third_party/hyperframe/HISTORY.rst
@@ -0,0 +1,167 @@
+Release History
+===============
+
+6.0.0dev0
+---------
+
+5.1.0 (2017-04-24)
+------------------
+
+**API Changes (Backward-compatible)**
+
+- Added support for ``DataFrame.data`` being a ``memoryview`` object.
+
+5.0.0 (2017-03-07)
+------------------
+
+**Backwards Incompatible API Changes**
+
+- Added support for unknown extension frames. These will be returned in the new
+  ``ExtensionFrame`` object. The flag information for these frames is persisted
+  in ``flag_byte`` if needed.
+
+4.0.2 (2017-02-20)
+------------------
+
+**Bugfixes**
+
+- Fixed AltSvc stream association, which was incorrectly set to ``'both'``:
+  should have been ``'either'``.
+- Fixed a bug where stream IDs on received frames were allowed to be 32-bit,
+  instead of 31-bit.
+- Fixed a bug with frames that had the ``PADDING`` flag set but zero-length
+  padding, whose flow-controlled length was calculated wrongly.
+- Miscellaneous performance improvements to serialization and parsing logic.
+
+4.0.1 (2016-03-13)
+------------------
+
+**Bugfixes**
+
+- Fixed bug with the repr of ``AltSvcFrame``, where building it could throw
+  exceptions if the frame had been received from the network.
+
+4.0.0 (2016-03-13)
+------------------
+
+**Backwards Incompatible API Changes**
+
+- Updated old ALTSVC frame definition to match the newly specified RFC 7838.
+- Remove BLOCKED frame, which was never actually specified.
+- Removed previously deprecated ``SettingsFrame.SETTINGS_MAX_FRAME_SIZE`` and
+  ``SettingsFrame.SETTINGS_MAX_HEADER_LIST_SIZE``.
+
+3.2.0 (2016-02-02)
+------------------
+
+**API Changes (Backward-compatible)**
+
+- Invalid PING frame bodies now raise ``InvalidFrameError``, not
+  ``ValueError``. Note that ``InvalidFrameError`` is a ``ValueError`` subclass.
+- Invalid RST_STREAM frame bodies now raise ``InvalidFramError``, not
+  ``ValueError``. Note that ``InvalidFrameError`` is a ``ValueError`` subclass.
+- Canonicalized the names of ``SettingsFrame.SETTINGS_MAX_FRAME_SIZE`` and
+  ``SettingsFrame.SETTINGS_MAX_HEADER_LIST_SIZE`` to match their peers, by
+  adding new properties ``SettingsFrame.MAX_FRAME_SIZE`` and
+  ``SettingsFrame.SETTINGS_MAX_HEADER_LIST_SIZE``. The old names are still
+  present, but will be deprecated in 4.0.0.
+
+**Bugfixes**
+
+- The change in ``3.1.0`` that ensured that ``InvalidFrameError`` would be
+  thrown did not affect certain invalid values in ALT_SVC frames. This has been
+  fixed: ``ValueError`` will no longer be thrown from invalid ALT_SVC bodies.
+
+3.1.1 (2016-01-18)
+------------------
+
+**Bugfixes**
+
+- Correctly error when receiving Ping frames that have insufficient data.
+
+3.1.0 (2016-01-13)
+------------------
+
+**API Changes**
+
+- Added new ``InvalidFrameError`` that is thrown instead of ``struct.error``
+  when parsing a frame.
+
+**Bugfixes**
+
+- Fixed error when trying to serialize frames that use Priority information
+  with the defaults for that information.
+- Fixed errors when displaying the repr of frames with non-printable bodies.
+
+3.0.1 (2016-01-08)
+------------------
+
+**Bugfixes**
+
+- Fix issue where unpadded DATA, PUSH_PROMISE and HEADERS frames that had empty
+  bodies would raise ``InvalidPaddingError`` exceptions when parsed.
+
+3.0.0 (2016-01-08)
+------------------
+
+**Backwards Incompatible API Changes**
+
+- Parsing padded frames that have invalid padding sizes now throws an
+  ``InvalidPaddingError``.
+
+2.2.0 (2015-10-15)
+------------------
+
+**API Changes**
+
+- When an unknown frame is encountered, ``parse_frame_header`` now throws a
+  ``ValueError`` subclass: ``UnknownFrameError``. This subclass contains the
+  frame type and the length of the frame body.
+
+2.1.0 (2015-10-06)
+------------------
+
+**API Changes**
+
+- Frames parsed from binary data now carry a ``body_len`` attribute that
+  matches the frame length (minus the frame header).
+
+2.0.0 (2015-09-21)
+------------------
+
+**API Changes**
+
+- Attempting to parse unrecognised frames now throws ``ValueError`` instead of
+  ``KeyError``.  Thanks to @Kriechi!
+- Flags are now validated for correctness, preventing setting flags that
+  ``hyperframe`` does not recognise and that would not serialize. Thanks to
+  @mhils!
+- Frame properties can now be initialized in the constructors. Thanks to @mhils
+  and @Kriechi!
+- Frames that cannot be sent on a stream now have their stream ID defaulted
+  to ``0``. Thanks to @Kriechi!
+
+**Other Changes**
+
+- Frames have a more useful repr. Thanks to @mhils!
+
+1.1.1 (2015-07-20)
+------------------
+
+- Fix a bug where ``FRAME_MAX_LEN`` was one byte too small.
+
+1.1.0 (2015-06-28)
+------------------
+
+- Add ``body_len`` property to frames to enable introspection of the actual
+  frame length. Thanks to @jdecuyper!
+
+1.0.1 (2015-06-27)
+------------------
+
+- Fix bug where the frame header would have an incorrect length added to it.
+
+1.0.0 (2015-04-12)
+------------------
+
+- Initial extraction from hyper.
diff --git a/tools/third_party/hyperframe/LICENSE b/tools/third_party/hyperframe/LICENSE
new file mode 100644
index 0000000..d24c351
--- /dev/null
+++ b/tools/third_party/hyperframe/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Cory Benfield
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/tools/third_party/hyperframe/MANIFEST.in b/tools/third_party/hyperframe/MANIFEST.in
new file mode 100644
index 0000000..2f46467
--- /dev/null
+++ b/tools/third_party/hyperframe/MANIFEST.in
@@ -0,0 +1,2 @@
+include README.rst LICENSE CONTRIBUTORS.rst HISTORY.rst
+
diff --git a/tools/third_party/hyperframe/PKG-INFO b/tools/third_party/hyperframe/PKG-INFO
new file mode 100644
index 0000000..393c1e8
--- /dev/null
+++ b/tools/third_party/hyperframe/PKG-INFO
@@ -0,0 +1,230 @@
+Metadata-Version: 1.1
+Name: hyperframe
+Version: 5.1.0
+Summary: HTTP/2 framing layer for Python
+Home-page: http://hyper.rtfd.org
+Author: Cory Benfield
+Author-email: cory@lukasa.co.uk
+License: MIT License
+Description: ======================================
+        hyperframe: Pure-Python HTTP/2 framing
+        ======================================
+        
+        .. image:: https://travis-ci.org/python-hyper/hyperframe.png?branch=master
+            :target: https://travis-ci.org/python-hyper/hyperframe
+        
+        This library contains the HTTP/2 framing code used in the `hyper`_ project. It
+        provides a pure-Python codebase that is capable of decoding a binary stream
+        into HTTP/2 frames.
+        
+        This library is used directly by `hyper`_ and a number of other projects to
+        provide HTTP/2 frame decoding logic.
+        
+        Contributing
+        ============
+        
+        hyperframe welcomes contributions from anyone! Unlike many other projects we
+        are happy to accept cosmetic contributions and small contributions, in addition
+        to large feature requests and changes.
+        
+        Before you contribute (either by opening an issue or filing a pull request),
+        please `read the contribution guidelines`_.
+        
+        .. _read the contribution guidelines: http://hyper.readthedocs.org/en/development/contributing.html
+        
+        License
+        =======
+        
+        hyperframe is made available under the MIT License. For more details, see the
+        ``LICENSE`` file in the repository.
+        
+        Authors
+        =======
+        
+        hyperframe is maintained by Cory Benfield, with contributions from others. For
+        more details about the contributors, please see ``CONTRIBUTORS.rst``.
+        
+        .. _hyper: http://python-hyper.org/
+        
+        
+        Release History
+        ===============
+        
+        6.0.0dev0
+        ---------
+        
+        5.1.0 (2017-04-24)
+        ------------------
+        
+        **API Changes (Backward-compatible)**
+        
+        - Added support for ``DataFrame.data`` being a ``memoryview`` object.
+        
+        5.0.0 (2017-03-07)
+        ------------------
+        
+        **Backwards Incompatible API Changes**
+        
+        - Added support for unknown extension frames. These will be returned in the new
+          ``ExtensionFrame`` object. The flag information for these frames is persisted
+          in ``flag_byte`` if needed.
+        
+        4.0.2 (2017-02-20)
+        ------------------
+        
+        **Bugfixes**
+        
+        - Fixed AltSvc stream association, which was incorrectly set to ``'both'``:
+          should have been ``'either'``.
+        - Fixed a bug where stream IDs on received frames were allowed to be 32-bit,
+          instead of 31-bit.
+        - Fixed a bug with frames that had the ``PADDING`` flag set but zero-length
+          padding, whose flow-controlled length was calculated wrongly.
+        - Miscellaneous performance improvements to serialization and parsing logic.
+        
+        4.0.1 (2016-03-13)
+        ------------------
+        
+        **Bugfixes**
+        
+        - Fixed bug with the repr of ``AltSvcFrame``, where building it could throw
+          exceptions if the frame had been received from the network.
+        
+        4.0.0 (2016-03-13)
+        ------------------
+        
+        **Backwards Incompatible API Changes**
+        
+        - Updated old ALTSVC frame definition to match the newly specified RFC 7838.
+        - Remove BLOCKED frame, which was never actually specified.
+        - Removed previously deprecated ``SettingsFrame.SETTINGS_MAX_FRAME_SIZE`` and
+          ``SettingsFrame.SETTINGS_MAX_HEADER_LIST_SIZE``.
+        
+        3.2.0 (2016-02-02)
+        ------------------
+        
+        **API Changes (Backward-compatible)**
+        
+        - Invalid PING frame bodies now raise ``InvalidFrameError``, not
+          ``ValueError``. Note that ``InvalidFrameError`` is a ``ValueError`` subclass.
+        - Invalid RST_STREAM frame bodies now raise ``InvalidFramError``, not
+          ``ValueError``. Note that ``InvalidFrameError`` is a ``ValueError`` subclass.
+        - Canonicalized the names of ``SettingsFrame.SETTINGS_MAX_FRAME_SIZE`` and
+          ``SettingsFrame.SETTINGS_MAX_HEADER_LIST_SIZE`` to match their peers, by
+          adding new properties ``SettingsFrame.MAX_FRAME_SIZE`` and
+          ``SettingsFrame.SETTINGS_MAX_HEADER_LIST_SIZE``. The old names are still
+          present, but will be deprecated in 4.0.0.
+        
+        **Bugfixes**
+        
+        - The change in ``3.1.0`` that ensured that ``InvalidFrameError`` would be
+          thrown did not affect certain invalid values in ALT_SVC frames. This has been
+          fixed: ``ValueError`` will no longer be thrown from invalid ALT_SVC bodies.
+        
+        3.1.1 (2016-01-18)
+        ------------------
+        
+        **Bugfixes**
+        
+        - Correctly error when receiving Ping frames that have insufficient data.
+        
+        3.1.0 (2016-01-13)
+        ------------------
+        
+        **API Changes**
+        
+        - Added new ``InvalidFrameError`` that is thrown instead of ``struct.error``
+          when parsing a frame.
+        
+        **Bugfixes**
+        
+        - Fixed error when trying to serialize frames that use Priority information
+          with the defaults for that information.
+        - Fixed errors when displaying the repr of frames with non-printable bodies.
+        
+        3.0.1 (2016-01-08)
+        ------------------
+        
+        **Bugfixes**
+        
+        - Fix issue where unpadded DATA, PUSH_PROMISE and HEADERS frames that had empty
+          bodies would raise ``InvalidPaddingError`` exceptions when parsed.
+        
+        3.0.0 (2016-01-08)
+        ------------------
+        
+        **Backwards Incompatible API Changes**
+        
+        - Parsing padded frames that have invalid padding sizes now throws an
+          ``InvalidPaddingError``.
+        
+        2.2.0 (2015-10-15)
+        ------------------
+        
+        **API Changes**
+        
+        - When an unknown frame is encountered, ``parse_frame_header`` now throws a
+          ``ValueError`` subclass: ``UnknownFrameError``. This subclass contains the
+          frame type and the length of the frame body.
+        
+        2.1.0 (2015-10-06)
+        ------------------
+        
+        **API Changes**
+        
+        - Frames parsed from binary data now carry a ``body_len`` attribute that
+          matches the frame length (minus the frame header).
+        
+        2.0.0 (2015-09-21)
+        ------------------
+        
+        **API Changes**
+        
+        - Attempting to parse unrecognised frames now throws ``ValueError`` instead of
+          ``KeyError``.  Thanks to @Kriechi!
+        - Flags are now validated for correctness, preventing setting flags that
+          ``hyperframe`` does not recognise and that would not serialize. Thanks to
+          @mhils!
+        - Frame properties can now be initialized in the constructors. Thanks to @mhils
+          and @Kriechi!
+        - Frames that cannot be sent on a stream now have their stream ID defaulted
+          to ``0``. Thanks to @Kriechi!
+        
+        **Other Changes**
+        
+        - Frames have a more useful repr. Thanks to @mhils!
+        
+        1.1.1 (2015-07-20)
+        ------------------
+        
+        - Fix a bug where ``FRAME_MAX_LEN`` was one byte too small.
+        
+        1.1.0 (2015-06-28)
+        ------------------
+        
+        - Add ``body_len`` property to frames to enable introspection of the actual
+          frame length. Thanks to @jdecuyper!
+        
+        1.0.1 (2015-06-27)
+        ------------------
+        
+        - Fix bug where the frame header would have an incorrect length added to it.
+        
+        1.0.0 (2015-04-12)
+        ------------------
+        
+        - Initial extraction from hyper.
+        
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: Implementation :: CPython
diff --git a/tools/third_party/hyperframe/README.rst b/tools/third_party/hyperframe/README.rst
new file mode 100644
index 0000000..385b39a
--- /dev/null
+++ b/tools/third_party/hyperframe/README.rst
@@ -0,0 +1,39 @@
+======================================
+hyperframe: Pure-Python HTTP/2 framing
+======================================
+
+.. image:: https://travis-ci.org/python-hyper/hyperframe.png?branch=master
+    :target: https://travis-ci.org/python-hyper/hyperframe
+
+This library contains the HTTP/2 framing code used in the `hyper`_ project. It
+provides a pure-Python codebase that is capable of decoding a binary stream
+into HTTP/2 frames.
+
+This library is used directly by `hyper`_ and a number of other projects to
+provide HTTP/2 frame decoding logic.
+
+Contributing
+============
+
+hyperframe welcomes contributions from anyone! Unlike many other projects we
+are happy to accept cosmetic contributions and small contributions, in addition
+to large feature requests and changes.
+
+Before you contribute (either by opening an issue or filing a pull request),
+please `read the contribution guidelines`_.
+
+.. _read the contribution guidelines: http://hyper.readthedocs.org/en/development/contributing.html
+
+License
+=======
+
+hyperframe is made available under the MIT License. For more details, see the
+``LICENSE`` file in the repository.
+
+Authors
+=======
+
+hyperframe is maintained by Cory Benfield, with contributions from others. For
+more details about the contributors, please see ``CONTRIBUTORS.rst``.
+
+.. _hyper: http://python-hyper.org/
diff --git a/tools/third_party/hyperframe/hyperframe.egg-info/PKG-INFO b/tools/third_party/hyperframe/hyperframe.egg-info/PKG-INFO
new file mode 100644
index 0000000..393c1e8
--- /dev/null
+++ b/tools/third_party/hyperframe/hyperframe.egg-info/PKG-INFO
@@ -0,0 +1,230 @@
+Metadata-Version: 1.1
+Name: hyperframe
+Version: 5.1.0
+Summary: HTTP/2 framing layer for Python
+Home-page: http://hyper.rtfd.org
+Author: Cory Benfield
+Author-email: cory@lukasa.co.uk
+License: MIT License
+Description: ======================================
+        hyperframe: Pure-Python HTTP/2 framing
+        ======================================
+        
+        .. image:: https://travis-ci.org/python-hyper/hyperframe.png?branch=master
+            :target: https://travis-ci.org/python-hyper/hyperframe
+        
+        This library contains the HTTP/2 framing code used in the `hyper`_ project. It
+        provides a pure-Python codebase that is capable of decoding a binary stream
+        into HTTP/2 frames.
+        
+        This library is used directly by `hyper`_ and a number of other projects to
+        provide HTTP/2 frame decoding logic.
+        
+        Contributing
+        ============
+        
+        hyperframe welcomes contributions from anyone! Unlike many other projects we
+        are happy to accept cosmetic contributions and small contributions, in addition
+        to large feature requests and changes.
+        
+        Before you contribute (either by opening an issue or filing a pull request),
+        please `read the contribution guidelines`_.
+        
+        .. _read the contribution guidelines: http://hyper.readthedocs.org/en/development/contributing.html
+        
+        License
+        =======
+        
+        hyperframe is made available under the MIT License. For more details, see the
+        ``LICENSE`` file in the repository.
+        
+        Authors
+        =======
+        
+        hyperframe is maintained by Cory Benfield, with contributions from others. For
+        more details about the contributors, please see ``CONTRIBUTORS.rst``.
+        
+        .. _hyper: http://python-hyper.org/
+        
+        
+        Release History
+        ===============
+        
+        6.0.0dev0
+        ---------
+        
+        5.1.0 (2017-04-24)
+        ------------------
+        
+        **API Changes (Backward-compatible)**
+        
+        - Added support for ``DataFrame.data`` being a ``memoryview`` object.
+        
+        5.0.0 (2017-03-07)
+        ------------------
+        
+        **Backwards Incompatible API Changes**
+        
+        - Added support for unknown extension frames. These will be returned in the new
+          ``ExtensionFrame`` object. The flag information for these frames is persisted
+          in ``flag_byte`` if needed.
+        
+        4.0.2 (2017-02-20)
+        ------------------
+        
+        **Bugfixes**
+        
+        - Fixed AltSvc stream association, which was incorrectly set to ``'both'``:
+          should have been ``'either'``.
+        - Fixed a bug where stream IDs on received frames were allowed to be 32-bit,
+          instead of 31-bit.
+        - Fixed a bug with frames that had the ``PADDING`` flag set but zero-length
+          padding, whose flow-controlled length was calculated wrongly.
+        - Miscellaneous performance improvements to serialization and parsing logic.
+        
+        4.0.1 (2016-03-13)
+        ------------------
+        
+        **Bugfixes**
+        
+        - Fixed bug with the repr of ``AltSvcFrame``, where building it could throw
+          exceptions if the frame had been received from the network.
+        
+        4.0.0 (2016-03-13)
+        ------------------
+        
+        **Backwards Incompatible API Changes**
+        
+        - Updated old ALTSVC frame definition to match the newly specified RFC 7838.
+        - Remove BLOCKED frame, which was never actually specified.
+        - Removed previously deprecated ``SettingsFrame.SETTINGS_MAX_FRAME_SIZE`` and
+          ``SettingsFrame.SETTINGS_MAX_HEADER_LIST_SIZE``.
+        
+        3.2.0 (2016-02-02)
+        ------------------
+        
+        **API Changes (Backward-compatible)**
+        
+        - Invalid PING frame bodies now raise ``InvalidFrameError``, not
+          ``ValueError``. Note that ``InvalidFrameError`` is a ``ValueError`` subclass.
+        - Invalid RST_STREAM frame bodies now raise ``InvalidFramError``, not
+          ``ValueError``. Note that ``InvalidFrameError`` is a ``ValueError`` subclass.
+        - Canonicalized the names of ``SettingsFrame.SETTINGS_MAX_FRAME_SIZE`` and
+          ``SettingsFrame.SETTINGS_MAX_HEADER_LIST_SIZE`` to match their peers, by
+          adding new properties ``SettingsFrame.MAX_FRAME_SIZE`` and
+          ``SettingsFrame.SETTINGS_MAX_HEADER_LIST_SIZE``. The old names are still
+          present, but will be deprecated in 4.0.0.
+        
+        **Bugfixes**
+        
+        - The change in ``3.1.0`` that ensured that ``InvalidFrameError`` would be
+          thrown did not affect certain invalid values in ALT_SVC frames. This has been
+          fixed: ``ValueError`` will no longer be thrown from invalid ALT_SVC bodies.
+        
+        3.1.1 (2016-01-18)
+        ------------------
+        
+        **Bugfixes**
+        
+        - Correctly error when receiving Ping frames that have insufficient data.
+        
+        3.1.0 (2016-01-13)
+        ------------------
+        
+        **API Changes**
+        
+        - Added new ``InvalidFrameError`` that is thrown instead of ``struct.error``
+          when parsing a frame.
+        
+        **Bugfixes**
+        
+        - Fixed error when trying to serialize frames that use Priority information
+          with the defaults for that information.
+        - Fixed errors when displaying the repr of frames with non-printable bodies.
+        
+        3.0.1 (2016-01-08)
+        ------------------
+        
+        **Bugfixes**
+        
+        - Fix issue where unpadded DATA, PUSH_PROMISE and HEADERS frames that had empty
+          bodies would raise ``InvalidPaddingError`` exceptions when parsed.
+        
+        3.0.0 (2016-01-08)
+        ------------------
+        
+        **Backwards Incompatible API Changes**
+        
+        - Parsing padded frames that have invalid padding sizes now throws an
+          ``InvalidPaddingError``.
+        
+        2.2.0 (2015-10-15)
+        ------------------
+        
+        **API Changes**
+        
+        - When an unknown frame is encountered, ``parse_frame_header`` now throws a
+          ``ValueError`` subclass: ``UnknownFrameError``. This subclass contains the
+          frame type and the length of the frame body.
+        
+        2.1.0 (2015-10-06)
+        ------------------
+        
+        **API Changes**
+        
+        - Frames parsed from binary data now carry a ``body_len`` attribute that
+          matches the frame length (minus the frame header).
+        
+        2.0.0 (2015-09-21)
+        ------------------
+        
+        **API Changes**
+        
+        - Attempting to parse unrecognised frames now throws ``ValueError`` instead of
+          ``KeyError``.  Thanks to @Kriechi!
+        - Flags are now validated for correctness, preventing setting flags that
+          ``hyperframe`` does not recognise and that would not serialize. Thanks to
+          @mhils!
+        - Frame properties can now be initialized in the constructors. Thanks to @mhils
+          and @Kriechi!
+        - Frames that cannot be sent on a stream now have their stream ID defaulted
+          to ``0``. Thanks to @Kriechi!
+        
+        **Other Changes**
+        
+        - Frames have a more useful repr. Thanks to @mhils!
+        
+        1.1.1 (2015-07-20)
+        ------------------
+        
+        - Fix a bug where ``FRAME_MAX_LEN`` was one byte too small.
+        
+        1.1.0 (2015-06-28)
+        ------------------
+        
+        - Add ``body_len`` property to frames to enable introspection of the actual
+          frame length. Thanks to @jdecuyper!
+        
+        1.0.1 (2015-06-27)
+        ------------------
+        
+        - Fix bug where the frame header would have an incorrect length added to it.
+        
+        1.0.0 (2015-04-12)
+        ------------------
+        
+        - Initial extraction from hyper.
+        
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: Implementation :: CPython
diff --git a/tools/third_party/hyperframe/hyperframe.egg-info/SOURCES.txt b/tools/third_party/hyperframe/hyperframe.egg-info/SOURCES.txt
new file mode 100644
index 0000000..0a2bd76
--- /dev/null
+++ b/tools/third_party/hyperframe/hyperframe.egg-info/SOURCES.txt
@@ -0,0 +1,17 @@
+CONTRIBUTORS.rst
+HISTORY.rst
+LICENSE
+MANIFEST.in
+README.rst
+setup.cfg
+setup.py
+hyperframe/__init__.py
+hyperframe/exceptions.py
+hyperframe/flags.py
+hyperframe/frame.py
+hyperframe.egg-info/PKG-INFO
+hyperframe.egg-info/SOURCES.txt
+hyperframe.egg-info/dependency_links.txt
+hyperframe.egg-info/top_level.txt
+test/test_flags.py
+test/test_frames.py
\ No newline at end of file
diff --git a/tools/third_party/hyperframe/hyperframe.egg-info/dependency_links.txt b/tools/third_party/hyperframe/hyperframe.egg-info/dependency_links.txt
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/tools/third_party/hyperframe/hyperframe.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/tools/third_party/hyperframe/hyperframe.egg-info/top_level.txt b/tools/third_party/hyperframe/hyperframe.egg-info/top_level.txt
new file mode 100644
index 0000000..b21bb7c
--- /dev/null
+++ b/tools/third_party/hyperframe/hyperframe.egg-info/top_level.txt
@@ -0,0 +1 @@
+hyperframe
diff --git a/tools/third_party/hyperframe/hyperframe/__init__.py b/tools/third_party/hyperframe/hyperframe/__init__.py
new file mode 100644
index 0000000..8fd1cd3
--- /dev/null
+++ b/tools/third_party/hyperframe/hyperframe/__init__.py
@@ -0,0 +1,8 @@
+# -*- coding: utf-8 -*-
+"""
+hyperframe
+~~~~~~~~~~
+
+A module for providing a pure-Python HTTP/2 framing layer.
+"""
+__version__ = '5.1.0'
diff --git a/tools/third_party/hyperframe/hyperframe/exceptions.py b/tools/third_party/hyperframe/hyperframe/exceptions.py
new file mode 100644
index 0000000..dd30369
--- /dev/null
+++ b/tools/third_party/hyperframe/hyperframe/exceptions.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+"""
+hyperframe/exceptions
+~~~~~~~~~~~~~~~~~~~~~
+
+Defines the exceptions that can be thrown by hyperframe.
+"""
+
+
+class UnknownFrameError(ValueError):
+    """
+    An frame of unknown type was received.
+    """
+    def __init__(self, frame_type, length):
+        #: The type byte of the unknown frame that was received.
+        self.frame_type = frame_type
+
+        #: The length of the data portion of the unknown frame.
+        self.length = length
+
+    def __str__(self):
+        return (
+            "UnknownFrameError: Unknown frame type 0x%X received, "
+            "length %d bytes" % (self.frame_type, self.length)
+        )
+
+
+class InvalidPaddingError(ValueError):
+    """
+    A frame with invalid padding was received.
+    """
+    pass
+
+
+class InvalidFrameError(ValueError):
+    """
+    Parsing a frame failed because the data was not laid out appropriately.
+
+    .. versionadded:: 3.0.2
+    """
+    pass
diff --git a/tools/third_party/hyperframe/hyperframe/flags.py b/tools/third_party/hyperframe/hyperframe/flags.py
new file mode 100644
index 0000000..05b3501
--- /dev/null
+++ b/tools/third_party/hyperframe/hyperframe/flags.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+"""
+hyperframe/flags
+~~~~~~~~~~~~~~~~
+
+Defines basic Flag and Flags data structures.
+"""
+import collections
+
+
+Flag = collections.namedtuple("Flag", ["name", "bit"])
+
+
+class Flags(collections.MutableSet):
+    """
+    A simple MutableSet implementation that will only accept known flags as
+    elements.
+
+    Will behave like a regular set(), except that a ValueError will be thrown
+    when .add()ing unexpected flags.
+    """
+    def __init__(self, defined_flags):
+        self._valid_flags = set(flag.name for flag in defined_flags)
+        self._flags = set()
+
+    def __contains__(self, x):
+        return self._flags.__contains__(x)
+
+    def __iter__(self):
+        return self._flags.__iter__()
+
+    def __len__(self):
+        return self._flags.__len__()
+
+    def discard(self, value):
+        return self._flags.discard(value)
+
+    def add(self, value):
+        if value not in self._valid_flags:
+            raise ValueError(
+                "Unexpected flag: {}. Valid flags are: {}".format(
+                    value, self._valid_flags
+                )
+            )
+        return self._flags.add(value)
diff --git a/tools/third_party/hyperframe/hyperframe/frame.py b/tools/third_party/hyperframe/hyperframe/frame.py
new file mode 100644
index 0000000..5294768
--- /dev/null
+++ b/tools/third_party/hyperframe/hyperframe/frame.py
@@ -0,0 +1,820 @@
+# -*- coding: utf-8 -*-
+"""
+hyperframe/frame
+~~~~~~~~~~~~~~~~
+
+Defines framing logic for HTTP/2. Provides both classes to represent framed
+data and logic for aiding the connection when it comes to reading from the
+socket.
+"""
+import struct
+import binascii
+
+from .exceptions import (
+    UnknownFrameError, InvalidPaddingError, InvalidFrameError
+)
+from .flags import Flag, Flags
+
+
+# The maximum initial length of a frame. Some frames have shorter maximum
+# lengths.
+FRAME_MAX_LEN = (2 ** 14)
+
+# The maximum allowed length of a frame.
+FRAME_MAX_ALLOWED_LEN = (2 ** 24) - 1
+
+# Stream association enumerations.
+_STREAM_ASSOC_HAS_STREAM = "has-stream"
+_STREAM_ASSOC_NO_STREAM = "no-stream"
+_STREAM_ASSOC_EITHER = "either"
+
+# Structs for packing and unpacking
+_STRUCT_HBBBL = struct.Struct(">HBBBL")
+_STRUCT_LL = struct.Struct(">LL")
+_STRUCT_HL = struct.Struct(">HL")
+_STRUCT_LB = struct.Struct(">LB")
+_STRUCT_L = struct.Struct(">L")
+_STRUCT_H = struct.Struct(">H")
+_STRUCT_B = struct.Struct(">B")
+
+
+class Frame(object):
+    """
+    The base class for all HTTP/2 frames.
+    """
+    #: The flags defined on this type of frame.
+    defined_flags = []
+
+    #: The byte used to define the type of the frame.
+    type = None
+
+    # If 'has-stream', the frame's stream_id must be non-zero. If 'no-stream',
+    # it must be zero. If 'either', it's not checked.
+    stream_association = None
+
+    def __init__(self, stream_id, flags=()):
+        #: The stream identifier for the stream this frame was received on.
+        #: Set to 0 for frames sent on the connection (stream-id 0).
+        self.stream_id = stream_id
+
+        #: The flags set for this frame.
+        self.flags = Flags(self.defined_flags)
+
+        #: The frame length, excluding the nine-byte header.
+        self.body_len = 0
+
+        for flag in flags:
+            self.flags.add(flag)
+
+        if (not self.stream_id and
+           self.stream_association == _STREAM_ASSOC_HAS_STREAM):
+            raise ValueError('Stream ID must be non-zero')
+        if (self.stream_id and
+           self.stream_association == _STREAM_ASSOC_NO_STREAM):
+            raise ValueError('Stream ID must be zero')
+
+    def __repr__(self):
+        flags = ", ".join(self.flags) or "None"
+        body = binascii.hexlify(self.serialize_body()).decode('ascii')
+        if len(body) > 20:
+            body = body[:20] + "..."
+        return (
+            "{type}(Stream: {stream}; Flags: {flags}): {body}"
+        ).format(
+            type=type(self).__name__,
+            stream=self.stream_id,
+            flags=flags,
+            body=body
+        )
+
+    @staticmethod
+    def parse_frame_header(header, strict=False):
+        """
+        Takes a 9-byte frame header and returns a tuple of the appropriate
+        Frame object and the length that needs to be read from the socket.
+
+        This populates the flags field, and determines how long the body is.
+
+        :param strict: Whether to raise an exception when encountering a frame
+            not defined by spec and implemented by hyperframe.
+
+        :raises hyperframe.exceptions.UnknownFrameError: If a frame of unknown
+            type is received.
+
+        .. versionchanged:: 5.0.0
+            Added :param:`strict` to accommodate :class:`ExtensionFrame`
+        """
+        try:
+            fields = _STRUCT_HBBBL.unpack(header)
+        except struct.error:
+            raise InvalidFrameError("Invalid frame header")
+
+        # First 24 bits are frame length.
+        length = (fields[0] << 8) + fields[1]
+        type = fields[2]
+        flags = fields[3]
+        stream_id = fields[4] & 0x7FFFFFFF
+
+        try:
+            frame = FRAMES[type](stream_id)
+        except KeyError:
+            if strict:
+                raise UnknownFrameError(type, length)
+            frame = ExtensionFrame(type=type, stream_id=stream_id)
+
+        frame.parse_flags(flags)
+        return (frame, length)
+
+    def parse_flags(self, flag_byte):
+        for flag, flag_bit in self.defined_flags:
+            if flag_byte & flag_bit:
+                self.flags.add(flag)
+
+        return self.flags
+
+    def serialize(self):
+        """
+        Convert a frame into a bytestring, representing the serialized form of
+        the frame.
+        """
+        body = self.serialize_body()
+        self.body_len = len(body)
+
+        # Build the common frame header.
+        # First, get the flags.
+        flags = 0
+
+        for flag, flag_bit in self.defined_flags:
+            if flag in self.flags:
+                flags |= flag_bit
+
+        header = _STRUCT_HBBBL.pack(
+            (self.body_len >> 8) & 0xFFFF,  # Length spread over top 24 bits
+            self.body_len & 0xFF,
+            self.type,
+            flags,
+            self.stream_id & 0x7FFFFFFF  # Stream ID is 32 bits.
+        )
+
+        return header + body
+
+    def serialize_body(self):
+        raise NotImplementedError()
+
+    def parse_body(self, data):
+        """
+        Given the body of a frame, parses it into frame data. This populates
+        the non-header parts of the frame: that is, it does not populate the
+        stream ID or flags.
+
+        :param data: A memoryview object containing the body data of the frame.
+                     Must not contain *more* data than the length returned by
+                     :meth:`parse_frame_header
+                     <hyperframe.frame.Frame.parse_frame_header>`.
+        """
+        raise NotImplementedError()
+
+
+class Padding(object):
+    """
+    Mixin for frames that contain padding. Defines extra fields that can be
+    used and set by frames that can be padded.
+    """
+    def __init__(self, stream_id, pad_length=0, **kwargs):
+        super(Padding, self).__init__(stream_id, **kwargs)
+
+        #: The length of the padding to use.
+        self.pad_length = pad_length
+
+    def serialize_padding_data(self):
+        if 'PADDED' in self.flags:
+            return _STRUCT_B.pack(self.pad_length)
+        return b''
+
+    def parse_padding_data(self, data):
+        if 'PADDED' in self.flags:
+            try:
+                self.pad_length = struct.unpack('!B', data[:1])[0]
+            except struct.error:
+                raise InvalidFrameError("Invalid Padding data")
+            return 1
+        return 0
+
+    @property
+    def total_padding(self):
+        return self.pad_length
+
+
+class Priority(object):
+    """
+    Mixin for frames that contain priority data. Defines extra fields that can
+    be used and set by frames that contain priority data.
+    """
+    def __init__(self,
+                 stream_id,
+                 depends_on=0x0,
+                 stream_weight=0x0,
+                 exclusive=False,
+                 **kwargs):
+        super(Priority, self).__init__(stream_id, **kwargs)
+
+        #: The stream ID of the stream on which this stream depends.
+        self.depends_on = depends_on
+
+        #: The weight of the stream. This is an integer between 0 and 256.
+        self.stream_weight = stream_weight
+
+        #: Whether the exclusive bit was set.
+        self.exclusive = exclusive
+
+    def serialize_priority_data(self):
+        return _STRUCT_LB.pack(
+            self.depends_on + (0x80000000 if self.exclusive else 0),
+            self.stream_weight
+        )
+
+    def parse_priority_data(self, data):
+        try:
+            self.depends_on, self.stream_weight = _STRUCT_LB.unpack(data[:5])
+        except struct.error:
+            raise InvalidFrameError("Invalid Priority data")
+
+        self.exclusive = True if self.depends_on >> 31 else False
+        self.depends_on &= 0x7FFFFFFF
+        return 5
+
+
+class DataFrame(Padding, Frame):
+    """
+    DATA frames convey arbitrary, variable-length sequences of octets
+    associated with a stream. One or more DATA frames are used, for instance,
+    to carry HTTP request or response payloads.
+    """
+    #: The flags defined for DATA frames.
+    defined_flags = [
+        Flag('END_STREAM', 0x01),
+        Flag('PADDED', 0x08),
+    ]
+
+    #: The type byte for data frames.
+    type = 0x0
+
+    stream_association = _STREAM_ASSOC_HAS_STREAM
+
+    def __init__(self, stream_id, data=b'', **kwargs):
+        super(DataFrame, self).__init__(stream_id, **kwargs)
+
+        #: The data contained on this frame.
+        self.data = data
+
+    def serialize_body(self):
+        padding_data = self.serialize_padding_data()
+        padding = b'\0' * self.total_padding
+        if isinstance(self.data, memoryview):
+            self.data = self.data.tobytes()
+        return b''.join([padding_data, self.data, padding])
+
+    def parse_body(self, data):
+        padding_data_length = self.parse_padding_data(data)
+        self.data = (
+            data[padding_data_length:len(data)-self.total_padding].tobytes()
+        )
+        self.body_len = len(data)
+
+        if self.total_padding and self.total_padding >= self.body_len:
+            raise InvalidPaddingError("Padding is too long.")
+
+    @property
+    def flow_controlled_length(self):
+        """
+        The length of the frame that needs to be accounted for when considering
+        flow control.
+        """
+        padding_len = 0
+        if 'PADDED' in self.flags:
+            # Account for extra 1-byte padding length field, which is still
+            # present if possibly zero-valued.
+            padding_len = self.total_padding + 1
+        return len(self.data) + padding_len
+
+
+class PriorityFrame(Priority, Frame):
+    """
+    The PRIORITY frame specifies the sender-advised priority of a stream. It
+    can be sent at any time for an existing stream. This enables
+    reprioritisation of existing streams.
+    """
+    #: The flags defined for PRIORITY frames.
+    defined_flags = []
+
+    #: The type byte defined for PRIORITY frames.
+    type = 0x02
+
+    stream_association = _STREAM_ASSOC_HAS_STREAM
+
+    def serialize_body(self):
+        return self.serialize_priority_data()
+
+    def parse_body(self, data):
+        self.parse_priority_data(data)
+        self.body_len = len(data)
+
+
+class RstStreamFrame(Frame):
+    """
+    The RST_STREAM frame allows for abnormal termination of a stream. When sent
+    by the initiator of a stream, it indicates that they wish to cancel the
+    stream or that an error condition has occurred. When sent by the receiver
+    of a stream, it indicates that either the receiver is rejecting the stream,
+    requesting that the stream be cancelled or that an error condition has
+    occurred.
+    """
+    #: The flags defined for RST_STREAM frames.
+    defined_flags = []
+
+    #: The type byte defined for RST_STREAM frames.
+    type = 0x03
+
+    stream_association = _STREAM_ASSOC_HAS_STREAM
+
+    def __init__(self, stream_id, error_code=0, **kwargs):
+        super(RstStreamFrame, self).__init__(stream_id, **kwargs)
+
+        #: The error code used when resetting the stream.
+        self.error_code = error_code
+
+    def serialize_body(self):
+        return _STRUCT_L.pack(self.error_code)
+
+    def parse_body(self, data):
+        if len(data) != 4:
+            raise InvalidFrameError(
+                "RST_STREAM must have 4 byte body: actual length %s." %
+                len(data)
+            )
+
+        try:
+            self.error_code = _STRUCT_L.unpack(data)[0]
+        except struct.error:  # pragma: no cover
+            raise InvalidFrameError("Invalid RST_STREAM body")
+
+        self.body_len = 4
+
+
+class SettingsFrame(Frame):
+    """
+    The SETTINGS frame conveys configuration parameters that affect how
+    endpoints communicate. The parameters are either constraints on peer
+    behavior or preferences.
+
+    Settings are not negotiated. Settings describe characteristics of the
+    sending peer, which are used by the receiving peer. Different values for
+    the same setting can be advertised by each peer. For example, a client
+    might set a high initial flow control window, whereas a server might set a
+    lower value to conserve resources.
+    """
+    #: The flags defined for SETTINGS frames.
+    defined_flags = [Flag('ACK', 0x01)]
+
+    #: The type byte defined for SETTINGS frames.
+    type = 0x04
+
+    stream_association = _STREAM_ASSOC_NO_STREAM
+
+    # We need to define the known settings, they may as well be class
+    # attributes.
+    #: The byte that signals the SETTINGS_HEADER_TABLE_SIZE setting.
+    HEADER_TABLE_SIZE = 0x01
+    #: The byte that signals the SETTINGS_ENABLE_PUSH setting.
+    ENABLE_PUSH = 0x02
+    #: The byte that signals the SETTINGS_MAX_CONCURRENT_STREAMS setting.
+    MAX_CONCURRENT_STREAMS = 0x03
+    #: The byte that signals the SETTINGS_INITIAL_WINDOW_SIZE setting.
+    INITIAL_WINDOW_SIZE = 0x04
+    #: The byte that signals the SETTINGS_MAX_FRAME_SIZE setting.
+    MAX_FRAME_SIZE = 0x05
+    #: The byte that signals the SETTINGS_MAX_HEADER_LIST_SIZE setting.
+    MAX_HEADER_LIST_SIZE = 0x06
+
+    def __init__(self, stream_id=0, settings=None, **kwargs):
+        super(SettingsFrame, self).__init__(stream_id, **kwargs)
+
+        if settings and "ACK" in kwargs.get("flags", ()):
+            raise ValueError("Settings must be empty if ACK flag is set.")
+
+        #: A dictionary of the setting type byte to the value of the setting.
+        self.settings = settings or {}
+
+    def serialize_body(self):
+        return b''.join([_STRUCT_HL.pack(setting & 0xFF, value)
+                         for setting, value in self.settings.items()])
+
+    def parse_body(self, data):
+        body_len = 0
+        for i in range(0, len(data), 6):
+            try:
+                name, value = _STRUCT_HL.unpack(data[i:i+6])
+            except struct.error:
+                raise InvalidFrameError("Invalid SETTINGS body")
+
+            self.settings[name] = value
+            body_len += 6
+
+        self.body_len = body_len
+
+
+class PushPromiseFrame(Padding, Frame):
+    """
+    The PUSH_PROMISE frame is used to notify the peer endpoint in advance of
+    streams the sender intends to initiate.
+    """
+    #: The flags defined for PUSH_PROMISE frames.
+    defined_flags = [
+        Flag('END_HEADERS', 0x04),
+        Flag('PADDED', 0x08)
+    ]
+
+    #: The type byte defined for PUSH_PROMISE frames.
+    type = 0x05
+
+    stream_association = _STREAM_ASSOC_HAS_STREAM
+
+    def __init__(self, stream_id, promised_stream_id=0, data=b'', **kwargs):
+        super(PushPromiseFrame, self).__init__(stream_id, **kwargs)
+
+        #: The stream ID that is promised by this frame.
+        self.promised_stream_id = promised_stream_id
+
+        #: The HPACK-encoded header block for the simulated request on the new
+        #: stream.
+        self.data = data
+
+    def serialize_body(self):
+        padding_data = self.serialize_padding_data()
+        padding = b'\0' * self.total_padding
+        data = _STRUCT_L.pack(self.promised_stream_id)
+        return b''.join([padding_data, data, self.data, padding])
+
+    def parse_body(self, data):
+        padding_data_length = self.parse_padding_data(data)
+
+        try:
+            self.promised_stream_id = _STRUCT_L.unpack(
+                data[padding_data_length:padding_data_length + 4]
+            )[0]
+        except struct.error:
+            raise InvalidFrameError("Invalid PUSH_PROMISE body")
+
+        self.data = data[padding_data_length + 4:].tobytes()
+        self.body_len = len(data)
+
+        if self.total_padding and self.total_padding >= self.body_len:
+            raise InvalidPaddingError("Padding is too long.")
+
+
+class PingFrame(Frame):
+    """
+    The PING frame is a mechanism for measuring a minimal round-trip time from
+    the sender, as well as determining whether an idle connection is still
+    functional. PING frames can be sent from any endpoint.
+    """
+    #: The flags defined for PING frames.
+    defined_flags = [Flag('ACK', 0x01)]
+
+    #: The type byte defined for PING frames.
+    type = 0x06
+
+    stream_association = _STREAM_ASSOC_NO_STREAM
+
+    def __init__(self, stream_id=0, opaque_data=b'', **kwargs):
+        super(PingFrame, self).__init__(stream_id, **kwargs)
+
+        #: The opaque data sent in this PING frame, as a bytestring.
+        self.opaque_data = opaque_data
+
+    def serialize_body(self):
+        if len(self.opaque_data) > 8:
+            raise InvalidFrameError(
+                "PING frame may not have more than 8 bytes of data, got %s" %
+                self.opaque_data
+            )
+
+        data = self.opaque_data
+        data += b'\x00' * (8 - len(self.opaque_data))
+        return data
+
+    def parse_body(self, data):
+        if len(data) != 8:
+            raise InvalidFrameError(
+                "PING frame must have 8 byte length: got %s" % len(data)
+            )
+
+        self.opaque_data = data.tobytes()
+        self.body_len = 8
+
+
+class GoAwayFrame(Frame):
+    """
+    The GOAWAY frame informs the remote peer to stop creating streams on this
+    connection. It can be sent from the client or the server. Once sent, the
+    sender will ignore frames sent on new streams for the remainder of the
+    connection.
+    """
+    #: The flags defined for GOAWAY frames.
+    defined_flags = []
+
+    #: The type byte defined for GOAWAY frames.
+    type = 0x07
+
+    stream_association = _STREAM_ASSOC_NO_STREAM
+
+    def __init__(self,
+                 stream_id=0,
+                 last_stream_id=0,
+                 error_code=0,
+                 additional_data=b'',
+                 **kwargs):
+        super(GoAwayFrame, self).__init__(stream_id, **kwargs)
+
+        #: The last stream ID definitely seen by the remote peer.
+        self.last_stream_id = last_stream_id
+
+        #: The error code for connection teardown.
+        self.error_code = error_code
+
+        #: Any additional data sent in the GOAWAY.
+        self.additional_data = additional_data
+
+    def serialize_body(self):
+        data = _STRUCT_LL.pack(
+            self.last_stream_id & 0x7FFFFFFF,
+            self.error_code
+        )
+        data += self.additional_data
+
+        return data
+
+    def parse_body(self, data):
+        try:
+            self.last_stream_id, self.error_code = _STRUCT_LL.unpack(
+                data[:8]
+            )
+        except struct.error:
+            raise InvalidFrameError("Invalid GOAWAY body.")
+
+        self.body_len = len(data)
+
+        if len(data) > 8:
+            self.additional_data = data[8:].tobytes()
+
+
+class WindowUpdateFrame(Frame):
+    """
+    The WINDOW_UPDATE frame is used to implement flow control.
+
+    Flow control operates at two levels: on each individual stream and on the
+    entire connection.
+
+    Both types of flow control are hop by hop; that is, only between the two
+    endpoints. Intermediaries do not forward WINDOW_UPDATE frames between
+    dependent connections. However, throttling of data transfer by any receiver
+    can indirectly cause the propagation of flow control information toward the
+    original sender.
+    """
+    #: The flags defined for WINDOW_UPDATE frames.
+    defined_flags = []
+
+    #: The type byte defined for WINDOW_UPDATE frames.
+    type = 0x08
+
+    stream_association = _STREAM_ASSOC_EITHER
+
+    def __init__(self, stream_id, window_increment=0, **kwargs):
+        super(WindowUpdateFrame, self).__init__(stream_id, **kwargs)
+
+        #: The amount the flow control window is to be incremented.
+        self.window_increment = window_increment
+
+    def serialize_body(self):
+        return _STRUCT_L.pack(self.window_increment & 0x7FFFFFFF)
+
+    def parse_body(self, data):
+        try:
+            self.window_increment = _STRUCT_L.unpack(data)[0]
+        except struct.error:
+            raise InvalidFrameError("Invalid WINDOW_UPDATE body")
+
+        self.body_len = 4
+
+
+class HeadersFrame(Padding, Priority, Frame):
+    """
+    The HEADERS frame carries name-value pairs. It is used to open a stream.
+    HEADERS frames can be sent on a stream in the "open" or "half closed
+    (remote)" states.
+
+    The HeadersFrame class is actually basically a data frame in this
+    implementation, because of the requirement to control the sizes of frames.
+    A header block fragment that doesn't fit in an entire HEADERS frame needs
+    to be followed with CONTINUATION frames. From the perspective of the frame
+    building code the header block is an opaque data segment.
+    """
+    #: The flags defined for HEADERS frames.
+    defined_flags = [
+        Flag('END_STREAM', 0x01),
+        Flag('END_HEADERS', 0x04),
+        Flag('PADDED', 0x08),
+        Flag('PRIORITY', 0x20),
+    ]
+
+    #: The type byte defined for HEADERS frames.
+    type = 0x01
+
+    stream_association = _STREAM_ASSOC_HAS_STREAM
+
+    def __init__(self, stream_id, data=b'', **kwargs):
+        super(HeadersFrame, self).__init__(stream_id, **kwargs)
+
+        #: The HPACK-encoded header block.
+        self.data = data
+
+    def serialize_body(self):
+        padding_data = self.serialize_padding_data()
+        padding = b'\0' * self.total_padding
+
+        if 'PRIORITY' in self.flags:
+            priority_data = self.serialize_priority_data()
+        else:
+            priority_data = b''
+
+        return b''.join([padding_data, priority_data, self.data, padding])
+
+    def parse_body(self, data):
+        padding_data_length = self.parse_padding_data(data)
+        data = data[padding_data_length:]
+
+        if 'PRIORITY' in self.flags:
+            priority_data_length = self.parse_priority_data(data)
+        else:
+            priority_data_length = 0
+
+        self.body_len = len(data)
+        self.data = (
+            data[priority_data_length:len(data)-self.total_padding].tobytes()
+        )
+
+        if self.total_padding and self.total_padding >= self.body_len:
+            raise InvalidPaddingError("Padding is too long.")
+
+
+class ContinuationFrame(Frame):
+    """
+    The CONTINUATION frame is used to continue a sequence of header block
+    fragments. Any number of CONTINUATION frames can be sent on an existing
+    stream, as long as the preceding frame on the same stream is one of
+    HEADERS, PUSH_PROMISE or CONTINUATION without the END_HEADERS flag set.
+
+    Much like the HEADERS frame, hyper treats this as an opaque data frame with
+    different flags and a different type.
+    """
+    #: The flags defined for CONTINUATION frames.
+    defined_flags = [Flag('END_HEADERS', 0x04)]
+
+    #: The type byte defined for CONTINUATION frames.
+    type = 0x09
+
+    stream_association = _STREAM_ASSOC_HAS_STREAM
+
+    def __init__(self, stream_id, data=b'', **kwargs):
+        super(ContinuationFrame, self).__init__(stream_id, **kwargs)
+
+        #: The HPACK-encoded header block.
+        self.data = data
+
+    def serialize_body(self):
+        return self.data
+
+    def parse_body(self, data):
+        self.data = data.tobytes()
+        self.body_len = len(data)
+
+
+class AltSvcFrame(Frame):
+    """
+    The ALTSVC frame is used to advertise alternate services that the current
+    host, or a different one, can understand. This frame is standardised as
+    part of RFC 7838.
+
+    This frame does no work to validate that the ALTSVC field parameter is
+    acceptable per the rules of RFC 7838.
+
+    .. note:: If the ``stream_id`` of this frame is nonzero, the origin field
+              must have zero length. Conversely, if the ``stream_id`` of this
+              frame is zero, the origin field must have nonzero length. Put
+              another way, a valid ALTSVC frame has ``stream_id != 0`` XOR
+              ``len(origin) != 0``.
+    """
+    type = 0xA
+
+    stream_association = _STREAM_ASSOC_EITHER
+
+    def __init__(self, stream_id, origin=b'', field=b'', **kwargs):
+        super(AltSvcFrame, self).__init__(stream_id, **kwargs)
+
+        if not isinstance(origin, bytes):
+            raise ValueError("AltSvc origin must be bytestring.")
+        if not isinstance(field, bytes):
+            raise ValueError("AltSvc field must be a bytestring.")
+        self.origin = origin
+        self.field = field
+
+    def serialize_body(self):
+        origin_len = _STRUCT_H.pack(len(self.origin))
+        return b''.join([origin_len, self.origin, self.field])
+
+    def parse_body(self, data):
+        try:
+            origin_len = _STRUCT_H.unpack(data[0:2])[0]
+            self.origin = data[2:2+origin_len].tobytes()
+
+            if len(self.origin) != origin_len:
+                raise InvalidFrameError("Invalid ALTSVC frame body.")
+
+            self.field = data[2+origin_len:].tobytes()
+        except (struct.error, ValueError):
+            raise InvalidFrameError("Invalid ALTSVC frame body.")
+
+        self.body_len = len(data)
+
+
+class ExtensionFrame(Frame):
+    """
+    ExtensionFrame is used to wrap frames which are not natively interpretable
+    by hyperframe.
+
+    Although certain byte prefixes are ordained by specification to have
+    certain contextual meanings, frames with other prefixes are not prohibited,
+    and may be used to communicate arbitrary meaning between HTTP/2 peers.
+
+    Thus, hyperframe, rather than raising an exception when such a frame is
+    encountered, wraps it in a generic frame to be properly acted upon by
+    upstream consumers which might have additional context on how to use it.
+
+    .. versionadded:: 5.0.0
+    """
+
+    stream_association = _STREAM_ASSOC_EITHER
+
+    def __init__(self, type, stream_id, **kwargs):
+        super(ExtensionFrame, self).__init__(stream_id, **kwargs)
+        self.type = type
+        self.flag_byte = None
+
+    def parse_flags(self, flag_byte):
+        """
+        For extension frames, we parse the flags by just storing a flag byte.
+        """
+        self.flag_byte = flag_byte
+
+    def parse_body(self, data):
+        self.body = data.tobytes()
+        self.body_len = len(data)
+
+    def serialize(self):
+        """
+        A broad override of the serialize method that ensures that the data
+        comes back out exactly as it came in. This should not be used in most
+        user code: it exists only as a helper method if frames need to be
+        reconstituted.
+        """
+        # Build the frame header.
+        # First, get the flags.
+        flags = self.flag_byte
+
+        header = _STRUCT_HBBBL.pack(
+            (self.body_len >> 8) & 0xFFFF,  # Length spread over top 24 bits
+            self.body_len & 0xFF,
+            self.type,
+            flags,
+            self.stream_id & 0x7FFFFFFF  # Stream ID is 32 bits.
+        )
+
+        return header + self.body
+
+
+_FRAME_CLASSES = [
+    DataFrame,
+    HeadersFrame,
+    PriorityFrame,
+    RstStreamFrame,
+    SettingsFrame,
+    PushPromiseFrame,
+    PingFrame,
+    GoAwayFrame,
+    WindowUpdateFrame,
+    ContinuationFrame,
+    AltSvcFrame,
+]
+#: FRAMES maps the type byte for each frame to the class used to represent that
+#: frame.
+FRAMES = {cls.type: cls for cls in _FRAME_CLASSES}
diff --git a/tools/third_party/hyperframe/setup.cfg b/tools/third_party/hyperframe/setup.cfg
new file mode 100644
index 0000000..1e3eb36
--- /dev/null
+++ b/tools/third_party/hyperframe/setup.cfg
@@ -0,0 +1,7 @@
+[wheel]
+universal = 1
+
+[egg_info]
+tag_build = 
+tag_date = 0
+
diff --git a/tools/third_party/hyperframe/setup.py b/tools/third_party/hyperframe/setup.py
new file mode 100644
index 0000000..8f13c26
--- /dev/null
+++ b/tools/third_party/hyperframe/setup.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+import itertools
+import os
+import re
+import sys
+
+try:
+    from setuptools import setup
+except ImportError:
+    from distutils.core import setup
+
+# Get the version
+version_regex = r'__version__ = ["\']([^"\']*)["\']'
+with open('hyperframe/__init__.py', 'r') as f:
+    text = f.read()
+    match = re.search(version_regex, text)
+
+    if match:
+        version = match.group(1)
+    else:
+        raise RuntimeError("No version number found!")
+
+# Stealing this from Kenneth Reitz
+if sys.argv[-1] == 'publish':
+    os.system('python setup.py sdist upload')
+    sys.exit()
+
+
+packages = ['hyperframe']
+
+setup(
+    name='hyperframe',
+    version=version,
+    description='HTTP/2 framing layer for Python',
+    long_description=open('README.rst').read() + '\n\n' + open('HISTORY.rst').read(),
+    author='Cory Benfield',
+    author_email='cory@lukasa.co.uk',
+    url='http://hyper.rtfd.org',
+    packages=packages,
+    package_data={'': ['LICENSE', 'README.rst', 'CONTRIBUTORS.rst', 'HISTORY.rst']},
+    package_dir={'hyperframe': 'hyperframe'},
+    include_package_data=True,
+    license='MIT License',
+    classifiers=[
+        'Development Status :: 5 - Production/Stable',
+        'Intended Audience :: Developers',
+        'License :: OSI Approved :: MIT License',
+        'Programming Language :: Python',
+        'Programming Language :: Python :: 2',
+        'Programming Language :: Python :: 2.7',
+        'Programming Language :: Python :: 3',
+        'Programming Language :: Python :: 3.3',
+        'Programming Language :: Python :: 3.4',
+        'Programming Language :: Python :: 3.5',
+        'Programming Language :: Python :: 3.6',
+        'Programming Language :: Python :: Implementation :: CPython',
+    ],
+)
diff --git a/tools/third_party/hyperframe/test/test_flags.py b/tools/third_party/hyperframe/test/test_flags.py
new file mode 100644
index 0000000..62a6a30
--- /dev/null
+++ b/tools/third_party/hyperframe/test/test_flags.py
@@ -0,0 +1,35 @@
+# -*- coding: utf-8 -*-
+from hyperframe.frame import (
+    Flags, Flag,
+)
+import pytest
+
+
+class TestFlags(object):
+    def test_add(self):
+        flags = Flags([Flag("VALID_FLAG", 0x00)])
+        assert not flags
+
+        flags.add("VALID_FLAG")
+        flags.add("VALID_FLAG")
+        assert "VALID_FLAG" in flags
+        assert list(flags) == ["VALID_FLAG"]
+        assert len(flags) == 1
+
+    def test_remove(self):
+        flags = Flags([Flag("VALID_FLAG", 0x00)])
+        flags.add("VALID_FLAG")
+
+        flags.discard("VALID_FLAG")
+        assert "VALID_FLAG" not in flags
+        assert list(flags) == []
+        assert len(flags) == 0
+
+        # discarding elements not in the set should not throw an exception
+        flags.discard("END_STREAM")
+
+    def test_validation(self):
+        flags = Flags([Flag("VALID_FLAG", 0x00)])
+        flags.add("VALID_FLAG")
+        with pytest.raises(ValueError):
+            flags.add("INVALID_FLAG")
diff --git a/tools/third_party/hyperframe/test/test_frames.py b/tools/third_party/hyperframe/test/test_frames.py
new file mode 100644
index 0000000..948349a
--- /dev/null
+++ b/tools/third_party/hyperframe/test/test_frames.py
@@ -0,0 +1,776 @@
+# -*- coding: utf-8 -*-
+from hyperframe.frame import (
+    Frame, Flags, DataFrame, PriorityFrame, RstStreamFrame, SettingsFrame,
+    PushPromiseFrame, PingFrame, GoAwayFrame, WindowUpdateFrame, HeadersFrame,
+    ContinuationFrame, AltSvcFrame, ExtensionFrame
+)
+from hyperframe.exceptions import (
+    UnknownFrameError, InvalidPaddingError, InvalidFrameError
+)
+import pytest
+
+
+def decode_frame(frame_data):
+    f, length = Frame.parse_frame_header(frame_data[:9])
+    f.parse_body(memoryview(frame_data[9:9 + length]))
+    assert 9 + length == len(frame_data)
+    return f
+
+
+class TestGeneralFrameBehaviour(object):
+    def test_base_frame_ignores_flags(self):
+        f = Frame(stream_id=0)
+        flags = f.parse_flags(0xFF)
+        assert not flags
+        assert isinstance(flags, Flags)
+
+    def test_base_frame_cant_serialize(self):
+        f = Frame(stream_id=0)
+        with pytest.raises(NotImplementedError):
+            f.serialize()
+
+    def test_base_frame_cant_parse_body(self):
+        data = b''
+        f = Frame(stream_id=0)
+        with pytest.raises(NotImplementedError):
+            f.parse_body(data)
+
+    def test_parse_frame_header_unknown_type_strict(self):
+        with pytest.raises(UnknownFrameError) as excinfo:
+            Frame.parse_frame_header(
+                b'\x00\x00\x59\xFF\x00\x00\x00\x00\x01',
+                strict=True
+            )
+        exception = excinfo.value
+        assert exception.frame_type == 0xFF
+        assert exception.length == 0x59
+        assert str(exception) == (
+            "UnknownFrameError: Unknown frame type 0xFF received, "
+            "length 89 bytes"
+        )
+
+    def test_parse_frame_header_ignore_first_bit_of_stream_id(self):
+        s = b'\x00\x00\x00\x06\x01\x80\x00\x00\x00'
+        f, _ = Frame.parse_frame_header(s)
+
+        assert f.stream_id == 0
+
+    def test_parse_frame_header_unknown_type(self):
+        f, l = Frame.parse_frame_header(
+            b'\x00\x00\x59\xFF\x00\x00\x00\x00\x01'
+        )
+        assert f.type == 0xFF
+        assert l == 0x59
+        assert isinstance(f, ExtensionFrame)
+        assert f.stream_id == 1
+
+    def test_flags_are_persisted(self):
+        f, l = Frame.parse_frame_header(
+            b'\x00\x00\x59\xFF\x09\x00\x00\x00\x01'
+        )
+        assert f.type == 0xFF
+        assert l == 0x59
+        assert f.flag_byte == 0x09
+
+    def test_parse_body_unknown_type(self):
+        f = decode_frame(
+            b'\x00\x00\x0C\xFF\x00\x00\x00\x00\x01hello world!'
+        )
+        assert f.body == b'hello world!'
+        assert f.body_len == 12
+        assert f.stream_id == 1
+
+    def test_can_round_trip_unknown_frames(self):
+        frame_data = b'\x00\x00\x0C\xFF\x00\x00\x00\x00\x01hello world!'
+        f = decode_frame(frame_data)
+        assert f.serialize() == frame_data
+
+    def test_repr(self, monkeypatch):
+        f = Frame(stream_id=0)
+        monkeypatch.setattr(Frame, "serialize_body", lambda _: b"body")
+        assert repr(f) == "Frame(Stream: 0; Flags: None): 626f6479"
+
+        monkeypatch.setattr(Frame, "serialize_body", lambda _: b"A"*25)
+        assert repr(f) == (
+            "Frame(Stream: 0; Flags: None): {}...".format("41"*10)
+        )
+
+    def test_cannot_parse_invalid_frame_header(self):
+        with pytest.raises(InvalidFrameError):
+            Frame.parse_frame_header(b'\x00\x00\x08\x00\x01\x00\x00\x00')
+
+
+class TestDataFrame(object):
+    payload = b'\x00\x00\x08\x00\x01\x00\x00\x00\x01testdata'
+    payload_with_padding = (
+        b'\x00\x00\x13\x00\x09\x00\x00\x00\x01\x0Atestdata' + b'\0' * 10
+    )
+
+    def test_data_frame_has_correct_flags(self):
+        f = DataFrame(1)
+        flags = f.parse_flags(0xFF)
+        assert flags == set([
+            'END_STREAM', 'PADDED'
+        ])
+
+    @pytest.mark.parametrize('data', [
+        b'testdata',
+        memoryview(b'testdata')
+    ])
+    def test_data_frame_serializes_properly(self, data):
+        f = DataFrame(1)
+        f.flags = set(['END_STREAM'])
+        f.data = data
+
+        s = f.serialize()
+        assert s == self.payload
+
+    def test_data_frame_with_padding_serializes_properly(self):
+        f = DataFrame(1)
+        f.flags = set(['END_STREAM', 'PADDED'])
+        f.data = b'testdata'
+        f.pad_length = 10
+
+        s = f.serialize()
+        assert s == self.payload_with_padding
+
+    def test_data_frame_parses_properly(self):
+        f = decode_frame(self.payload)
+
+        assert isinstance(f, DataFrame)
+        assert f.flags == set(['END_STREAM'])
+        assert f.pad_length == 0
+        assert f.data == b'testdata'
+        assert f.body_len == 8
+
+    def test_data_frame_with_padding_parses_properly(self):
+        f = decode_frame(self.payload_with_padding)
+
+        assert isinstance(f, DataFrame)
+        assert f.flags == set(['END_STREAM', 'PADDED'])
+        assert f.pad_length == 10
+        assert f.data == b'testdata'
+        assert f.body_len == 19
+
+    def test_data_frame_with_invalid_padding_errors(self):
+        with pytest.raises(InvalidFrameError):
+            decode_frame(self.payload_with_padding[:9])
+
+    def test_data_frame_with_padding_calculates_flow_control_len(self):
+        f = DataFrame(1)
+        f.flags = set(['PADDED'])
+        f.data = b'testdata'
+        f.pad_length = 10
+
+        assert f.flow_controlled_length == 19
+
+    def test_data_frame_zero_length_padding_calculates_flow_control_len(self):
+        f = DataFrame(1)
+        f.flags = set(['PADDED'])
+        f.data = b'testdata'
+        f.pad_length = 0
+
+        assert f.flow_controlled_length == len(b'testdata') + 1
+
+    def test_data_frame_without_padding_calculates_flow_control_len(self):
+        f = DataFrame(1)
+        f.data = b'testdata'
+
+        assert f.flow_controlled_length == 8
+
+    def test_data_frame_comes_on_a_stream(self):
+        with pytest.raises(ValueError):
+            DataFrame(0)
+
+    def test_long_data_frame(self):
+        f = DataFrame(1)
+
+        # Use more than 256 bytes of data to force setting higher bits.
+        f.data = b'\x01' * 300
+        data = f.serialize()
+
+        # The top three bytes should be numerically equal to 300. That means
+        # they should read 00 01 2C.
+        # The weird double index trick is to ensure this test behaves equally
+        # on Python 2 and Python 3.
+        assert data[0] == b'\x00'[0]
+        assert data[1] == b'\x01'[0]
+        assert data[2] == b'\x2C'[0]
+
+    def test_body_length_behaves_correctly(self):
+        f = DataFrame(1)
+
+        f.data = b'\x01' * 300
+
+        # Initially the body length is zero. For now this is incidental, but
+        # I'm going to test it to ensure that the behaviour is codified. We
+        # should change this test if we change that.
+        assert f.body_len == 0
+
+        f.serialize()
+        assert f.body_len == 300
+
+    def test_data_frame_with_invalid_padding_fails_to_parse(self):
+        # This frame has a padding length of 6 bytes, but a total length of
+        # only 5.
+        data = b'\x00\x00\x05\x00\x0b\x00\x00\x00\x01\x06\x54\x65\x73\x74'
+
+        with pytest.raises(InvalidPaddingError):
+            decode_frame(data)
+
+    def test_data_frame_with_no_length_parses(self):
+        # Fixes issue with empty data frames raising InvalidPaddingError.
+        f = DataFrame(1)
+        f.data = b''
+        data = f.serialize()
+
+        new_frame = decode_frame(data)
+        assert new_frame.data == b''
+
+
+class TestPriorityFrame(object):
+    payload = b'\x00\x00\x05\x02\x00\x00\x00\x00\x01\x80\x00\x00\x04\x40'
+
+    def test_priority_frame_has_no_flags(self):
+        f = PriorityFrame(1)
+        flags = f.parse_flags(0xFF)
+        assert flags == set()
+        assert isinstance(flags, Flags)
+
+    def test_priority_frame_default_serializes_properly(self):
+        f = PriorityFrame(1)
+
+        assert f.serialize() == (
+            b'\x00\x00\x05\x02\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00'
+        )
+
+    def test_priority_frame_with_all_data_serializes_properly(self):
+        f = PriorityFrame(1)
+        f.depends_on = 0x04
+        f.stream_weight = 64
+        f.exclusive = True
+
+        assert f.serialize() == self.payload
+
+    def test_priority_frame_with_all_data_parses_properly(self):
+        f = decode_frame(self.payload)
+
+        assert isinstance(f, PriorityFrame)
+        assert f.flags == set()
+        assert f.depends_on == 4
+        assert f.stream_weight == 64
+        assert f.exclusive is True
+        assert f.body_len == 5
+
+    def test_priority_frame_comes_on_a_stream(self):
+        with pytest.raises(ValueError):
+            PriorityFrame(0)
+
+    def test_short_priority_frame_errors(self):
+        with pytest.raises(InvalidFrameError):
+            decode_frame(self.payload[:-2])
+
+
+class TestRstStreamFrame(object):
+    def test_rst_stream_frame_has_no_flags(self):
+        f = RstStreamFrame(1)
+        flags = f.parse_flags(0xFF)
+        assert not flags
+        assert isinstance(flags, Flags)
+
+    def test_rst_stream_frame_serializes_properly(self):
+        f = RstStreamFrame(1)
+        f.error_code = 420
+
+        s = f.serialize()
+        assert s == b'\x00\x00\x04\x03\x00\x00\x00\x00\x01\x00\x00\x01\xa4'
+
+    def test_rst_stream_frame_parses_properly(self):
+        s = b'\x00\x00\x04\x03\x00\x00\x00\x00\x01\x00\x00\x01\xa4'
+        f = decode_frame(s)
+
+        assert isinstance(f, RstStreamFrame)
+        assert f.flags == set()
+        assert f.error_code == 420
+        assert f.body_len == 4
+
+    def test_rst_stream_frame_comes_on_a_stream(self):
+        with pytest.raises(ValueError):
+            RstStreamFrame(0)
+
+    def test_rst_stream_frame_must_have_body_length_four(self):
+        f = RstStreamFrame(1)
+        with pytest.raises(ValueError):
+            f.parse_body(b'\x01')
+
+
+class TestSettingsFrame(object):
+    serialized = (
+        b'\x00\x00\x24\x04\x01\x00\x00\x00\x00' +  # Frame header
+        b'\x00\x01\x00\x00\x10\x00' +              # HEADER_TABLE_SIZE
+        b'\x00\x02\x00\x00\x00\x00' +              # ENABLE_PUSH
+        b'\x00\x03\x00\x00\x00\x64' +              # MAX_CONCURRENT_STREAMS
+        b'\x00\x04\x00\x00\xFF\xFF' +              # INITIAL_WINDOW_SIZE
+        b'\x00\x05\x00\x00\x40\x00' +              # MAX_FRAME_SIZE
+        b'\x00\x06\x00\x00\xFF\xFF'                # MAX_HEADER_LIST_SIZE
+    )
+
+    settings = {
+        SettingsFrame.HEADER_TABLE_SIZE: 4096,
+        SettingsFrame.ENABLE_PUSH: 0,
+        SettingsFrame.MAX_CONCURRENT_STREAMS: 100,
+        SettingsFrame.INITIAL_WINDOW_SIZE: 65535,
+        SettingsFrame.MAX_FRAME_SIZE: 16384,
+        SettingsFrame.MAX_HEADER_LIST_SIZE: 65535,
+    }
+
+    def test_settings_frame_has_only_one_flag(self):
+        f = SettingsFrame()
+        flags = f.parse_flags(0xFF)
+        assert flags == set(['ACK'])
+
+    def test_settings_frame_serializes_properly(self):
+        f = SettingsFrame()
+        f.parse_flags(0xFF)
+        f.settings = self.settings
+
+        s = f.serialize()
+        assert s == self.serialized
+
+    def test_settings_frame_with_settings(self):
+        f = SettingsFrame(settings=self.settings)
+        assert f.settings == self.settings
+
+    def test_settings_frame_without_settings(self):
+        f = SettingsFrame()
+        assert f.settings == {}
+
+    def test_settings_frame_with_ack(self):
+        f = SettingsFrame(flags=('ACK',))
+        assert 'ACK' in f.flags
+
+    def test_settings_frame_ack_and_settings(self):
+        with pytest.raises(ValueError):
+            SettingsFrame(settings=self.settings, flags=('ACK',))
+
+    def test_settings_frame_parses_properly(self):
+        f = decode_frame(self.serialized)
+
+        assert isinstance(f, SettingsFrame)
+        assert f.flags == set(['ACK'])
+        assert f.settings == self.settings
+        assert f.body_len == 36
+
+    def test_settings_frames_never_have_streams(self):
+        with pytest.raises(ValueError):
+            SettingsFrame(stream_id=1)
+
+    def test_short_settings_frame_errors(self):
+        with pytest.raises(InvalidFrameError):
+            decode_frame(self.serialized[:-2])
+
+
+class TestPushPromiseFrame(object):
+    def test_push_promise_frame_flags(self):
+        f = PushPromiseFrame(1)
+        flags = f.parse_flags(0xFF)
+
+        assert flags == set(['END_HEADERS', 'PADDED'])
+
+    def test_push_promise_frame_serializes_properly(self):
+        f = PushPromiseFrame(1)
+        f.flags = set(['END_HEADERS'])
+        f.promised_stream_id = 4
+        f.data = b'hello world'
+
+        s = f.serialize()
+        assert s == (
+            b'\x00\x00\x0F\x05\x04\x00\x00\x00\x01' +
+            b'\x00\x00\x00\x04' +
+            b'hello world'
+        )
+
+    def test_push_promise_frame_parses_properly(self):
+        s = (
+            b'\x00\x00\x0F\x05\x04\x00\x00\x00\x01' +
+            b'\x00\x00\x00\x04' +
+            b'hello world'
+        )
+        f = decode_frame(s)
+
+        assert isinstance(f, PushPromiseFrame)
+        assert f.flags == set(['END_HEADERS'])
+        assert f.promised_stream_id == 4
+        assert f.data == b'hello world'
+        assert f.body_len == 15
+
+    def test_push_promise_frame_with_invalid_padding_fails_to_parse(self):
+        # This frame has a padding length of 6 bytes, but a total length of
+        # only 5.
+        data = b'\x00\x00\x05\x05\x08\x00\x00\x00\x01\x06\x54\x65\x73\x74'
+
+        with pytest.raises(InvalidPaddingError):
+            decode_frame(data)
+
+    def test_push_promise_frame_with_no_length_parses(self):
+        # Fixes issue with empty data frames raising InvalidPaddingError.
+        f = PushPromiseFrame(1)
+        f.data = b''
+        data = f.serialize()
+
+        new_frame = decode_frame(data)
+        assert new_frame.data == b''
+
+    def test_short_push_promise_errors(self):
+        s = (
+            b'\x00\x00\x0F\x05\x04\x00\x00\x00\x01' +
+            b'\x00\x00\x00'  # One byte short
+        )
+
+        with pytest.raises(InvalidFrameError):
+            decode_frame(s)
+
+
+class TestPingFrame(object):
+    def test_ping_frame_has_only_one_flag(self):
+        f = PingFrame()
+        flags = f.parse_flags(0xFF)
+
+        assert flags == set(['ACK'])
+
+    def test_ping_frame_serializes_properly(self):
+        f = PingFrame()
+        f.parse_flags(0xFF)
+        f.opaque_data = b'\x01\x02'
+
+        s = f.serialize()
+        assert s == (
+            b'\x00\x00\x08\x06\x01\x00\x00\x00\x00\x01\x02\x00\x00\x00\x00\x00'
+            b'\x00'
+        )
+
+    def test_no_more_than_8_octets(self):
+        f = PingFrame()
+        f.opaque_data = b'\x01\x02\x03\x04\x05\x06\x07\x08\x09'
+
+        with pytest.raises(ValueError):
+            f.serialize()
+
+    def test_ping_frame_parses_properly(self):
+        s = (
+            b'\x00\x00\x08\x06\x01\x00\x00\x00\x00\x01\x02\x00\x00\x00\x00\x00'
+            b'\x00'
+        )
+        f = decode_frame(s)
+
+        assert isinstance(f, PingFrame)
+        assert f.flags == set(['ACK'])
+        assert f.opaque_data == b'\x01\x02\x00\x00\x00\x00\x00\x00'
+        assert f.body_len == 8
+
+    def test_ping_frame_never_has_a_stream(self):
+        with pytest.raises(ValueError):
+            PingFrame(stream_id=1)
+
+    def test_ping_frame_has_no_more_than_body_length_8(self):
+        f = PingFrame()
+        with pytest.raises(ValueError):
+            f.parse_body(b'\x01\x02\x03\x04\x05\x06\x07\x08\x09')
+
+    def test_ping_frame_has_no_less_than_body_length_8(self):
+        f = PingFrame()
+        with pytest.raises(ValueError):
+            f.parse_body(b'\x01\x02\x03\x04\x05\x06\x07')
+
+
+class TestGoAwayFrame(object):
+    def test_go_away_has_no_flags(self):
+        f = GoAwayFrame()
+        flags = f.parse_flags(0xFF)
+
+        assert not flags
+        assert isinstance(flags, Flags)
+
+    def test_goaway_serializes_properly(self):
+        f = GoAwayFrame()
+        f.last_stream_id = 64
+        f.error_code = 32
+        f.additional_data = b'hello'
+
+        s = f.serialize()
+        assert s == (
+            b'\x00\x00\x0D\x07\x00\x00\x00\x00\x00' +  # Frame header
+            b'\x00\x00\x00\x40' +                      # Last Stream ID
+            b'\x00\x00\x00\x20' +                      # Error Code
+            b'hello'                                   # Additional data
+        )
+
+    def test_goaway_frame_parses_properly(self):
+        s = (
+            b'\x00\x00\x0D\x07\x00\x00\x00\x00\x00' +  # Frame header
+            b'\x00\x00\x00\x40' +                      # Last Stream ID
+            b'\x00\x00\x00\x20' +                      # Error Code
+            b'hello'                                   # Additional data
+        )
+        f = decode_frame(s)
+
+        assert isinstance(f, GoAwayFrame)
+        assert f.flags == set()
+        assert f.additional_data == b'hello'
+        assert f.body_len == 13
+
+    def test_goaway_frame_never_has_a_stream(self):
+        with pytest.raises(ValueError):
+            GoAwayFrame(stream_id=1)
+
+    def test_short_goaway_frame_errors(self):
+        s = (
+            b'\x00\x00\x0D\x07\x00\x00\x00\x00\x00' +  # Frame header
+            b'\x00\x00\x00\x40' +                      # Last Stream ID
+            b'\x00\x00\x00'                            # short Error Code
+        )
+        with pytest.raises(InvalidFrameError):
+            decode_frame(s)
+
+
+class TestWindowUpdateFrame(object):
+    def test_window_update_has_no_flags(self):
+        f = WindowUpdateFrame(0)
+        flags = f.parse_flags(0xFF)
+
+        assert not flags
+        assert isinstance(flags, Flags)
+
+    def test_window_update_serializes_properly(self):
+        f = WindowUpdateFrame(0)
+        f.window_increment = 512
+
+        s = f.serialize()
+        assert s == b'\x00\x00\x04\x08\x00\x00\x00\x00\x00\x00\x00\x02\x00'
+
+    def test_windowupdate_frame_parses_properly(self):
+        s = b'\x00\x00\x04\x08\x00\x00\x00\x00\x00\x00\x00\x02\x00'
+        f = decode_frame(s)
+
+        assert isinstance(f, WindowUpdateFrame)
+        assert f.flags == set()
+        assert f.window_increment == 512
+        assert f.body_len == 4
+
+    def test_short_windowupdate_frame_errors(self):
+        s = b'\x00\x00\x04\x08\x00\x00\x00\x00\x00\x00\x00\x02'  # -1 byte
+
+        with pytest.raises(InvalidFrameError):
+            decode_frame(s)
+
+
+class TestHeadersFrame(object):
+    def test_headers_frame_flags(self):
+        f = HeadersFrame(1)
+        flags = f.parse_flags(0xFF)
+
+        assert flags == set(['END_STREAM', 'END_HEADERS',
+                             'PADDED', 'PRIORITY'])
+
+    def test_headers_frame_serializes_properly(self):
+        f = HeadersFrame(1)
+        f.flags = set(['END_STREAM', 'END_HEADERS'])
+        f.data = b'hello world'
+
+        s = f.serialize()
+        assert s == (
+            b'\x00\x00\x0B\x01\x05\x00\x00\x00\x01' +
+            b'hello world'
+        )
+
+    def test_headers_frame_parses_properly(self):
+        s = (
+            b'\x00\x00\x0B\x01\x05\x00\x00\x00\x01' +
+            b'hello world'
+        )
+        f = decode_frame(s)
+
+        assert isinstance(f, HeadersFrame)
+        assert f.flags == set(['END_STREAM', 'END_HEADERS'])
+        assert f.data == b'hello world'
+        assert f.body_len == 11
+
+    def test_headers_frame_with_priority_parses_properly(self):
+        # This test also tests that we can receive a HEADERS frame with no
+        # actual headers on it. This is technically possible.
+        s = (
+            b'\x00\x00\x05\x01\x20\x00\x00\x00\x01' +
+            b'\x80\x00\x00\x04\x40'
+        )
+        f = decode_frame(s)
+
+        assert isinstance(f, HeadersFrame)
+        assert f.flags == set(['PRIORITY'])
+        assert f.data == b''
+        assert f.depends_on == 4
+        assert f.stream_weight == 64
+        assert f.exclusive is True
+        assert f.body_len == 5
+
+    def test_headers_frame_with_priority_serializes_properly(self):
+        # This test also tests that we can receive a HEADERS frame with no
+        # actual headers on it. This is technically possible.
+        s = (
+            b'\x00\x00\x05\x01\x20\x00\x00\x00\x01' +
+            b'\x80\x00\x00\x04\x40'
+        )
+        f = HeadersFrame(1)
+        f.flags = set(['PRIORITY'])
+        f.data = b''
+        f.depends_on = 4
+        f.stream_weight = 64
+        f.exclusive = True
+
+        assert f.serialize() == s
+
+    def test_headers_frame_with_invalid_padding_fails_to_parse(self):
+        # This frame has a padding length of 6 bytes, but a total length of
+        # only 5.
+        data = b'\x00\x00\x05\x01\x08\x00\x00\x00\x01\x06\x54\x65\x73\x74'
+
+        with pytest.raises(InvalidPaddingError):
+            decode_frame(data)
+
+    def test_headers_frame_with_no_length_parses(self):
+        # Fixes issue with empty data frames raising InvalidPaddingError.
+        f = HeadersFrame(1)
+        f.data = b''
+        data = f.serialize()
+
+        new_frame = decode_frame(data)
+        assert new_frame.data == b''
+
+
+class TestContinuationFrame(object):
+    def test_continuation_frame_flags(self):
+        f = ContinuationFrame(1)
+        flags = f.parse_flags(0xFF)
+
+        assert flags == set(['END_HEADERS'])
+
+    def test_continuation_frame_serializes(self):
+        f = ContinuationFrame(1)
+        f.parse_flags(0x04)
+        f.data = b'hello world'
+
+        s = f.serialize()
+        assert s == (
+            b'\x00\x00\x0B\x09\x04\x00\x00\x00\x01' +
+            b'hello world'
+        )
+
+    def test_continuation_frame_parses_properly(self):
+        s = b'\x00\x00\x0B\x09\x04\x00\x00\x00\x01hello world'
+        f = decode_frame(s)
+
+        assert isinstance(f, ContinuationFrame)
+        assert f.flags == set(['END_HEADERS'])
+        assert f.data == b'hello world'
+        assert f.body_len == 11
+
+
+class TestAltSvcFrame(object):
+    payload_with_origin = (
+        b'\x00\x00\x31'  # Length
+        b'\x0A'  # Type
+        b'\x00'  # Flags
+        b'\x00\x00\x00\x00'  # Stream ID
+        b'\x00\x0B'  # Origin len
+        b'example.com'  # Origin
+        b'h2="alt.example.com:8000", h2=":443"'  # Field Value
+    )
+    payload_without_origin = (
+        b'\x00\x00\x13'  # Length
+        b'\x0A'  # Type
+        b'\x00'  # Flags
+        b'\x00\x00\x00\x01'  # Stream ID
+        b'\x00\x00'  # Origin len
+        b''  # Origin
+        b'h2=":8000"; ma=60'  # Field Value
+    )
+    payload_with_origin_and_stream = (
+        b'\x00\x00\x36'  # Length
+        b'\x0A'  # Type
+        b'\x00'  # Flags
+        b'\x00\x00\x00\x01'  # Stream ID
+        b'\x00\x0B'  # Origin len
+        b'example.com'  # Origin
+        b'Alt-Svc: h2=":443"; ma=2592000; persist=1'  # Field Value
+    )
+
+    def test_altsvc_frame_flags(self):
+        f = AltSvcFrame(stream_id=0)
+        flags = f.parse_flags(0xFF)
+
+        assert flags == set()
+
+    def test_altsvc_frame_with_origin_serializes_properly(self):
+        f = AltSvcFrame(stream_id=0)
+        f.origin = b'example.com'
+        f.field = b'h2="alt.example.com:8000", h2=":443"'
+
+        s = f.serialize()
+        assert s == self.payload_with_origin
+
+    def test_altsvc_frame_with_origin_parses_properly(self):
+        f = decode_frame(self.payload_with_origin)
+
+        assert isinstance(f, AltSvcFrame)
+        assert f.origin == b'example.com'
+        assert f.field == b'h2="alt.example.com:8000", h2=":443"'
+        assert f.body_len == 49
+        assert f.stream_id == 0
+
+    def test_altsvc_frame_without_origin_serializes_properly(self):
+        f = AltSvcFrame(stream_id=1, origin=b'', field=b'h2=":8000"; ma=60')
+        s = f.serialize()
+        assert s == self.payload_without_origin
+
+    def test_altsvc_frame_without_origin_parses_properly(self):
+        f = decode_frame(self.payload_without_origin)
+
+        assert isinstance(f, AltSvcFrame)
+        assert f.origin == b''
+        assert f.field == b'h2=":8000"; ma=60'
+        assert f.body_len == 19
+        assert f.stream_id == 1
+
+    def test_altsvc_frame_without_origin_parses_with_good_repr(self):
+        f = decode_frame(self.payload_without_origin)
+
+        assert repr(f) == (
+            "AltSvcFrame(Stream: 1; Flags: None): 000068323d223a383030..."
+        )
+
+    def test_altsvc_frame_with_origin_and_stream_serializes_properly(self):
+        # This frame is not valid, but we allow it to be serialized anyway.
+        f = AltSvcFrame(stream_id=1)
+        f.origin = b'example.com'
+        f.field = b'Alt-Svc: h2=":443"; ma=2592000; persist=1'
+
+        assert f.serialize() == self.payload_with_origin_and_stream
+
+    def test_short_altsvc_frame_errors(self):
+        with pytest.raises(InvalidFrameError):
+            decode_frame(self.payload_with_origin[:12])
+
+        with pytest.raises(InvalidFrameError):
+            decode_frame(self.payload_with_origin[:10])
+
+    def test_altsvc_with_unicode_origin_fails(self):
+        with pytest.raises(ValueError):
+            AltSvcFrame(
+                stream_id=0, origin=u'hello', field=b'h2=":8000"; ma=60'
+
+            )
+
+    def test_altsvc_with_unicode_field_fails(self):
+        with pytest.raises(ValueError):
+            AltSvcFrame(
+                stream_id=0, origin=b'hello', field=u'h2=":8000"; ma=60'
+            )
diff --git a/tools/tox.ini b/tools/tox.ini
index f0a5ffe..3eb12c3 100644
--- a/tools/tox.ini
+++ b/tools/tox.ini
@@ -13,7 +13,6 @@
   pytest-cov
   mock
   hypothesis
-  h2
   pytest-catchlog
 
 commands =
diff --git a/tools/wpt/tox.ini b/tools/wpt/tox.ini
index d52f6f4..e486bb2 100644
--- a/tools/wpt/tox.ini
+++ b/tools/wpt/tox.ini
@@ -21,7 +21,6 @@
      pycodestyle==2.3.1
      pyflakes==1.6.0
      pep8-naming==0.4.1
-     h2
 
 commands =
      flake8 --append-config=../flake8.ini {posargs}
diff --git a/tools/wptrunner/requirements.txt b/tools/wptrunner/requirements.txt
index c4b8dfc..ac9b3be 100644
--- a/tools/wptrunner/requirements.txt
+++ b/tools/wptrunner/requirements.txt
@@ -2,5 +2,4 @@
 mozinfo == 0.10
 mozlog==3.8
 mozdebug == 0.1
-urllib3[secure] == 1.22
-h2 == 3.0.1
\ No newline at end of file
+urllib3[secure] == 1.22
\ No newline at end of file
diff --git a/tools/wptrunner/tox.ini b/tools/wptrunner/tox.ini
index 1ca5d2e..fa7985e 100644
--- a/tools/wptrunner/tox.ini
+++ b/tools/wptrunner/tox.ini
@@ -31,7 +31,6 @@
      pycodestyle==2.3.1
      pyflakes==1.6.0
      pep8-naming==0.4.1
-     h2
 
 commands =
      flake8 --append-config=../flake8.ini
diff --git a/tools/wptrunner/wptrunner/environment.py b/tools/wptrunner/wptrunner/environment.py
index 264febb..309174c 100644
--- a/tools/wptrunner/wptrunner/environment.py
+++ b/tools/wptrunner/wptrunner/environment.py
@@ -137,7 +137,7 @@
             "http": [8000, 8001],
             "https": [8443],
             "ws": [8888],
-            "wss": [8889]
+            "wss": [8889],
         }
 
         if os.path.exists(override_path):
diff --git a/tools/wptserve/setup.py b/tools/wptserve/setup.py
index c9c364b..194c337 100644
--- a/tools/wptserve/setup.py
+++ b/tools/wptserve/setup.py
@@ -1,7 +1,7 @@
 from setuptools import setup
 
 PACKAGE_VERSION = '2.0'
-deps = ["six>=1.8", "h2==3.0.1"]
+deps = ["six>=1.8"]
 
 setup(name='wptserve',
       version=PACKAGE_VERSION,