git-svn-id: http://googleappengine.googlecode.com/svn/trunk/python@193 80f5ef21-4148-0410-bacc-cfb02402ada8
diff --git a/RELEASE_NOTES b/RELEASE_NOTES
index 490a34a..3ada141 100644
--- a/RELEASE_NOTES
+++ b/RELEASE_NOTES
@@ -3,6 +3,38 @@
 
 App Engine Python SDK - Release Notes
 
+Version 1.5.3
+=============================
+- We've removed the limit on the size of blob uploads using the Blobstore API.
+- You can now send emails with any attachment extension that is not included on
+  the email attachment extension blacklist.
+- Added a db.get_indexes() method to retrieve an application's indexes and
+  their corresponding states.
+- The dev_appserver has been updated to understand the reduced index
+  requirements of the 1.5.2 datastore query planner changes.
+- The Datastore Admin functionality can now be enabled directly in the Admin
+  Console.
+- Added cas(), the compare-and-set function, to the Memcache API.
+   http://code.google.com/p/googleappengine/issues/detail?id=2139
+- Added a set_default_fetch_deadline to the URLFetch API which sets the
+  URLFetch deadline globally.
+- Added app_identity api with methods to get the application id, default
+  hostname, and service accounts for asserting identity on outbound HTTP calls.
+    http://code.google.com/appengine/docs/python/appidentity/overview.html
+- Added an improved HRD migration tool that requires a read-only period relative
+  to your datastore write rate (as opposed to your datastore size, which is how
+  the current version behaves). The tool is not yet generally available.  If you
+  are interested in being an early adopter please fill out this form:
+  http://goo.gl/3jrXu
+- Fixed an issue in the Channel API where jsapi was not served with the correct
+  mime type.
+- Fixed an issue that broke use_library when the Python SDK was located in a
+  directory that contained the word 'django'.
+- Fixed an issue where blobs could not be uploaded using HTTPS.
+- Fixed an issue where GQL didn't allow querying for valid kind names
+  containing '.', '-', and ':' by supporting quoted identifiers.
+    http://code.google.com/p/googleappengine/issues/detail?id=2584
+
 Version 1.5.2
 =============================
 - You can now specify the minimum pending latency for instances and the maximum
@@ -96,7 +128,7 @@
   code, unless code download is disabled for the application.
 - Added db.py support for making calls to the datastore asynchronously.
   Available functions are get_async(), put_async(), delete_async(),
-  allocate_ids_async(). Call get_result on the return value of asynchronous	
+  allocate_ids_async(). Call get_result on the return value of asynchronous
   datastore functions to block on the call.
 - Metadata queries can now get all namespaces, kinds, and properties in a given
   range.
diff --git a/VERSION b/VERSION
index 9d4ad65..862b270 100644
--- a/VERSION
+++ b/VERSION
@@ -1,3 +1,3 @@
-release: "1.5.2"
-timestamp: 1308730906
+release: "1.5.3"
+timestamp: 1311108376
 api_versions: ['1']
diff --git a/appcfg.py b/appcfg.py
index e8360c1..8f18e20 100755
--- a/appcfg.py
+++ b/appcfg.py
@@ -47,6 +47,7 @@
   os.path.join(DIR_PATH, 'lib', 'ipaddr'),
   os.path.join(DIR_PATH, 'lib', 'protorpc'),
   os.path.join(DIR_PATH, 'lib', 'webob'),
+  os.path.join(DIR_PATH, 'lib', 'whoosh'),
   os.path.join(DIR_PATH, 'lib', 'yaml', 'lib'),
   os.path.join(DIR_PATH, 'lib', 'simplejson'),
   os.path.join(DIR_PATH, 'lib', 'graphy'),
diff --git a/bulkload_client.py b/bulkload_client.py
index e8360c1..8f18e20 100755
--- a/bulkload_client.py
+++ b/bulkload_client.py
@@ -47,6 +47,7 @@
   os.path.join(DIR_PATH, 'lib', 'ipaddr'),
   os.path.join(DIR_PATH, 'lib', 'protorpc'),
   os.path.join(DIR_PATH, 'lib', 'webob'),
+  os.path.join(DIR_PATH, 'lib', 'whoosh'),
   os.path.join(DIR_PATH, 'lib', 'yaml', 'lib'),
   os.path.join(DIR_PATH, 'lib', 'simplejson'),
   os.path.join(DIR_PATH, 'lib', 'graphy'),
diff --git a/bulkloader.py b/bulkloader.py
index e8360c1..8f18e20 100755
--- a/bulkloader.py
+++ b/bulkloader.py
@@ -47,6 +47,7 @@
   os.path.join(DIR_PATH, 'lib', 'ipaddr'),
   os.path.join(DIR_PATH, 'lib', 'protorpc'),
   os.path.join(DIR_PATH, 'lib', 'webob'),
+  os.path.join(DIR_PATH, 'lib', 'whoosh'),
   os.path.join(DIR_PATH, 'lib', 'yaml', 'lib'),
   os.path.join(DIR_PATH, 'lib', 'simplejson'),
   os.path.join(DIR_PATH, 'lib', 'graphy'),
diff --git a/dev_appserver.py b/dev_appserver.py
index e8360c1..8f18e20 100755
--- a/dev_appserver.py
+++ b/dev_appserver.py
@@ -47,6 +47,7 @@
   os.path.join(DIR_PATH, 'lib', 'ipaddr'),
   os.path.join(DIR_PATH, 'lib', 'protorpc'),
   os.path.join(DIR_PATH, 'lib', 'webob'),
+  os.path.join(DIR_PATH, 'lib', 'whoosh'),
   os.path.join(DIR_PATH, 'lib', 'yaml', 'lib'),
   os.path.join(DIR_PATH, 'lib', 'simplejson'),
   os.path.join(DIR_PATH, 'lib', 'graphy'),
diff --git a/gen_protorpc.py b/gen_protorpc.py
index e8360c1..8f18e20 100755
--- a/gen_protorpc.py
+++ b/gen_protorpc.py
@@ -47,6 +47,7 @@
   os.path.join(DIR_PATH, 'lib', 'ipaddr'),
   os.path.join(DIR_PATH, 'lib', 'protorpc'),
   os.path.join(DIR_PATH, 'lib', 'webob'),
+  os.path.join(DIR_PATH, 'lib', 'whoosh'),
   os.path.join(DIR_PATH, 'lib', 'yaml', 'lib'),
   os.path.join(DIR_PATH, 'lib', 'simplejson'),
   os.path.join(DIR_PATH, 'lib', 'graphy'),
diff --git a/google/appengine/api/app_identity/app_identity.py b/google/appengine/api/app_identity/app_identity.py
index d6992ff..a419f92 100644
--- a/google/appengine/api/app_identity/app_identity.py
+++ b/google/appengine/api/app_identity/app_identity.py
@@ -31,12 +31,14 @@
 import os
 
 from google.appengine.api import apiproxy_stub_map
+from google.appengine.api import memcache
 from google.appengine.api.app_identity import app_identity_service_pb
 from google.appengine.runtime import apiproxy_errors
 
 __all__ = ['BackendDeadlineExceeded',
            'BlobSizeTooLarge',
            'InternalError',
+           'InvalidScope',
            'Error',
            'create_rpc',
            'make_sign_blob_call',
@@ -48,6 +50,9 @@
            'get_service_account_name',
            'get_application_id',
            'get_default_version_hostname',
+           'get_access_token',
+           'get_access_token_uncached',
+           'make_get_access_token_call',
           ]
 
 
@@ -55,8 +60,11 @@
 _SIGN_FOR_APP_METHOD_NAME = 'SignForApp'
 _GET_CERTS_METHOD_NAME = 'GetPublicCertificatesForApp'
 _GET_SERVICE_ACCOUNT_NAME_METHOD_NAME = 'GetServiceAccountName'
+_GET_ACCESS_TOKEN_METHOD_NAME = 'GetAccessToken'
 _PARTITION_SEPARATOR = '~'
 _DOMAIN_SEPARATOR = ':'
+_MEMCACHE_KEY_PREFIX = '_ah_app_identity_'
+_MEMCACHE_NAMESPACE = '_ah_'
 
 
 class Error(Exception):
@@ -75,6 +83,10 @@
   """Unspecified internal failure."""
 
 
+class InvalidScope(Error):
+  """Invalid scope."""
+
+
 def _to_app_identity_error(error):
   """Translate an application error to an external Error, if possible.
 
@@ -93,6 +105,8 @@
       BlobSizeTooLarge,
       app_identity_service_pb.AppIdentityServiceError.UNKNOWN_ERROR:
       InternalError,
+      app_identity_service_pb.AppIdentityServiceError.UNKNOWN_SCOPE:
+      InvalidScope,
       }
   if error.application_error in error_map:
     return error_map[error.application_error](error.error_detail)
@@ -148,9 +162,6 @@
   request.set_bytes_to_sign(bytes_to_sign)
   response = app_identity_service_pb.SignForAppResponse()
 
-  if rpc.deadline is not None:
-    request.set_deadline(rpc.deadline)
-
   def signing_for_app_result(rpc):
     """Check success, handle exceptions, and return converted RPC result.
 
@@ -189,9 +200,6 @@
   request = app_identity_service_pb.GetPublicCertificateForAppRequest()
   response = app_identity_service_pb.GetPublicCertificateForAppResponse()
 
-  if rpc.deadline is not None:
-    request.set_deadline(rpc.deadline)
-
   def get_certs_result(rpc):
     """Check success, handle exceptions, and return converted RPC result.
 
@@ -362,3 +370,98 @@
 
 
   return os.getenv('DEFAULT_VERSION_HOSTNAME')
+
+
+def make_get_access_token_call(rpc, scopes):
+  """OAuth2 access token to act on behalf of the application (async, uncached).
+
+  Most developers should use get_access_token instead.
+
+  Args:
+    rpc: RPC object.
+    scopes: The requested API scope string, or a list of strings.
+  Raises:
+    InvalidScope: if the scopes are unspecified or invalid.
+  """
+
+  request = app_identity_service_pb.GetAccessTokenRequest()
+  if not scopes:
+    raise InvalidScope('No scopes specified.')
+  if isinstance(scopes, basestring):
+    request.add_scope(scopes)
+  else:
+    for scope in scopes:
+      request.add_scope(scope)
+  response = app_identity_service_pb.GetAccessTokenResponse()
+
+  def get_access_token_result(rpc):
+    """Check success, handle exceptions, and return converted RPC result.
+
+    This method waits for the RPC if it has not yet finished, and calls the
+    post-call hooks on the first invocation.
+
+    Args:
+      rpc: A UserRPC object.
+
+    Returns:
+      Pair, Access token (string) and expiration time (seconds since the epoch).
+    """
+    assert rpc.service == _APP_IDENTITY_SERVICE_NAME, repr(rpc.service)
+    assert rpc.method == _GET_ACCESS_TOKEN_METHOD_NAME, repr(rpc.method)
+    try:
+      rpc.check_success()
+    except apiproxy_errors.ApplicationError, err:
+      raise _to_app_identity_error(err)
+
+    return response.access_token(), response.expiration_time()
+
+
+  rpc.make_call(_GET_ACCESS_TOKEN_METHOD_NAME, request,
+                response, get_access_token_result)
+
+
+def get_access_token_uncached(scopes, deadline=None):
+  """OAuth2 access token to act on behalf of the application (sync, uncached).
+
+  Most developers should use get_access_token instead.
+
+  Args:
+    scopes: The requested API scope string, or a list of strings.
+    deadline: Optional deadline in seconds for the operation; the default
+      is a system-specific deadline (typically 5 seconds).
+  Returns:
+    Pair, Access token (string) and expiration time (seconds since the epoch).
+  """
+  rpc = create_rpc(deadline)
+  make_get_access_token_call(rpc, scopes)
+  rpc.wait()
+  return rpc.get_result()
+
+
+def get_access_token(scopes):
+  """OAuth2 access token to act on behalf of the application, cached.
+
+  Generates and caches an OAuth2 access token for the service account for the
+  appengine application.
+
+  Each application has an associated Google account. This function returns
+  OAuth2 access token corresponding to the running app. Access tokens are safe
+  to cache and reuse until their expiry time as returned. This method will
+  do that using memcache.
+
+  Args:
+    scopes: The requested API scope string, or a list of strings.
+  Returns:
+    Pair, Access token (string) and expiration time (seconds since the epoch).
+  """
+
+  memcache_key = _MEMCACHE_KEY_PREFIX + str(scopes)
+  memcache_value = memcache.get(memcache_key, namespace=_MEMCACHE_NAMESPACE)
+  if memcache_value:
+    access_token, expires_at = memcache_value
+  else:
+    access_token, expires_at = get_access_token_uncached(scopes)
+
+    memcache.add(memcache_key, (access_token, expires_at), expires_at - 300,
+                 namespace=_MEMCACHE_NAMESPACE)
+  return access_token, expires_at
diff --git a/google/appengine/api/app_identity/app_identity_service_pb.py b/google/appengine/api/app_identity/app_identity_service_pb.py
index 61e1b61..5d620d1 100644
--- a/google/appengine/api/app_identity/app_identity_service_pb.py
+++ b/google/appengine/api/app_identity/app_identity_service_pb.py
@@ -47,6 +47,8 @@
 class AppIdentityServiceError(ProtocolBuffer.ProtocolMessage):
 
 
+  SUCCESS      =    0
+  UNKNOWN_SCOPE =    9
   BLOB_TOO_LARGE = 1000
   DEADLINE_EXCEEDED = 1001
   NOT_A_VALID_APP = 1002
@@ -54,6 +56,8 @@
   GAIAMINT_NOT_INITIAILIZED = 1004
 
   _ErrorCode_NAMES = {
+    0: "SUCCESS",
+    9: "UNKNOWN_SCOPE",
     1000: "BLOB_TOO_LARGE",
     1001: "DEADLINE_EXCEEDED",
     1002: "NOT_A_VALID_APP",
@@ -155,7 +159,7 @@
   _STYLE = """"""
   _STYLE_CONTENT_TYPE = """"""
   _SERIALIZED_DESCRIPTOR = array.array('B')
-  _SERIALIZED_DESCRIPTOR.fromstring(base64.decodestring("WjZhcHBob3N0aW5nL2FwaS9hcHBfaWRlbnRpdHkvYXBwX2lkZW50aXR5X3NlcnZpY2UucHJvdG8KImFwcGhvc3RpbmcuQXBwSWRlbnRpdHlTZXJ2aWNlRXJyb3JzeglFcnJvckNvZGWLAZIBDkJMT0JfVE9PX0xBUkdFmAHoB4wBiwGSARFERUFETElORV9FWENFRURFRJgB6QeMAYsBkgEPTk9UX0FfVkFMSURfQVBQmAHqB4wBiwGSAQ1VTktOT1dOX0VSUk9SmAHrB4wBiwGSARlHQUlBTUlOVF9OT1RfSU5JVElBSUxJWkVEmAHsB4wBdLoBuQgKNmFwcGhvc3RpbmcvYXBpL2FwcF9pZGVudGl0eS9hcHBfaWRlbnRpdHlfc2VydmljZS5wcm90bxIKYXBwaG9zdGluZyKeAQoXQXBwSWRlbnRpdHlTZXJ2aWNlRXJyb3IiggEKCUVycm9yQ29kZRITCg5CTE9CX1RPT19MQVJHRRDoBxIWChFERUFETElORV9FWENFRURFRBDpBxIUCg9OT1RfQV9WQUxJRF9BUFAQ6gcSEgoNVU5LTk9XTl9FUlJPUhDrBxIeChlHQUlBTUlOVF9OT1RfSU5JVElBSUxJWkVEEOwHIioKEVNpZ25Gb3JBcHBSZXF1ZXN0EhUKDWJ5dGVzX3RvX3NpZ24YASABKAwiPwoSU2lnbkZvckFwcFJlc3BvbnNlEhAKCGtleV9uYW1lGAEgASgJEhcKD3NpZ25hdHVyZV9ieXRlcxgCIAEoDCIjCiFHZXRQdWJsaWNDZXJ0aWZpY2F0ZUZvckFwcFJlcXVlc3QiQwoRUHVibGljQ2VydGlmaWNhdGUSEAoIa2V5X25hbWUYASABKAkSHAoUeDUwOV9jZXJ0aWZpY2F0ZV9wZW0YAiABKAkijQEKIkdldFB1YmxpY0NlcnRpZmljYXRlRm9yQXBwUmVzcG9uc2USPgoXcHVibGljX2NlcnRpZmljYXRlX2xpc3QYASADKAsyHS5hcHBob3N0aW5nLlB1YmxpY0NlcnRpZmljYXRlEicKH21heF9jbGllbnRfY2FjaGVfdGltZV9pbl9zZWNvbmQYAiABKAMiHgocR2V0U2VydmljZUFjY291bnROYW1lUmVxdWVzdCI9Ch1HZXRTZXJ2aWNlQWNjb3VudE5hbWVSZXNwb25zZRIcChRzZXJ2aWNlX2FjY291bnRfbmFtZRgBIAEoCTLJAgoOU2lnbmluZ1NlcnZpY2USSwoKU2lnbkZvckFwcBIdLmFwcGhvc3RpbmcuU2lnbkZvckFwcFJlcXVlc3QaHi5hcHBob3N0aW5nLlNpZ25Gb3JBcHBSZXNwb25zZRJ8ChtHZXRQdWJsaWNDZXJ0aWZpY2F0ZXNGb3JBcHASLS5hcHBob3N0aW5nLkdldFB1YmxpY0NlcnRpZmljYXRlRm9yQXBwUmVxdWVzdBouLmFwcGhvc3RpbmcuR2V0UHVibGljQ2VydGlmaWNhdGVGb3JBcHBSZXNwb25zZRJsChVHZXRTZXJ2aWNlQWNjb3VudE5hbWUSKC5hcHBob3N0aW5nLkdldFNlcnZpY2VBY2NvdW50TmFtZVJlcXVlc3QaKS5hcHBob3N0aW5nLkdldFNlcnZpY2VBY2NvdW50TmFtZVJlc3BvbnNlQkAKJGNvbS5nb29nbGUuYXBwZW5naW5lLmFwaS5hcHBpZGVudGl0eSABKAJCFEFwcElkZW50aXR5U2VydmljZVBi"))
+  _SERIALIZED_DESCRIPTOR.fromstring(base64.decodestring("WjZhcHBob3N0aW5nL2FwaS9hcHBfaWRlbnRpdHkvYXBwX2lkZW50aXR5X3NlcnZpY2UucHJvdG8KImFwcGhvc3RpbmcuQXBwSWRlbnRpdHlTZXJ2aWNlRXJyb3JzeglFcnJvckNvZGWLAZIBB1NVQ0NFU1OYAQCMAYsBkgENVU5LTk9XTl9TQ09QRZgBCYwBiwGSAQ5CTE9CX1RPT19MQVJHRZgB6AeMAYsBkgERREVBRExJTkVfRVhDRUVERUSYAekHjAGLAZIBD05PVF9BX1ZBTElEX0FQUJgB6geMAYsBkgENVU5LTk9XTl9FUlJPUpgB6weMAYsBkgEZR0FJQU1JTlRfTk9UX0lOSVRJQUlMSVpFRJgB7AeMAXS6AaMKCjZhcHBob3N0aW5nL2FwaS9hcHBfaWRlbnRpdHkvYXBwX2lkZW50aXR5X3NlcnZpY2UucHJvdG8SCmFwcGhvc3RpbmcivgEKF0FwcElkZW50aXR5U2VydmljZUVycm9yIqIBCglFcnJvckNvZGUSCwoHU1VDQ0VTUxAAEhEKDVVOS05PV05fU0NPUEUQCRITCg5CTE9CX1RPT19MQVJHRRDoBxIWChFERUFETElORV9FWENFRURFRBDpBxIUCg9OT1RfQV9WQUxJRF9BUFAQ6gcSEgoNVU5LTk9XTl9FUlJPUhDrBxIeChlHQUlBTUlOVF9OT1RfSU5JVElBSUxJWkVEEOwHIioKEVNpZ25Gb3JBcHBSZXF1ZXN0EhUKDWJ5dGVzX3RvX3NpZ24YASABKAwiPwoSU2lnbkZvckFwcFJlc3BvbnNlEhAKCGtleV9uYW1lGAEgASgJEhcKD3NpZ25hdHVyZV9ieXRlcxgCIAEoDCIjCiFHZXRQdWJsaWNDZXJ0aWZpY2F0ZUZvckFwcFJlcXVlc3QiQwoRUHVibGljQ2VydGlmaWNhdGUSEAoIa2V5X25hbWUYASABKAkSHAoUeDUwOV9jZXJ0aWZpY2F0ZV9wZW0YAiABKAkijQEKIkdldFB1YmxpY0NlcnRpZmljYXRlRm9yQXBwUmVzcG9uc2USPgoXcHVibGljX2NlcnRpZmljYXRlX2xpc3QYASADKAsyHS5hcHBob3N0aW5nLlB1YmxpY0NlcnRpZmljYXRlEicKH21heF9jbGllbnRfY2FjaGVfdGltZV9pbl9zZWNvbmQYAiABKAMiHgocR2V0U2VydmljZUFjY291bnROYW1lUmVxdWVzdCI9Ch1HZXRTZXJ2aWNlQWNjb3VudE5hbWVSZXNwb25zZRIcChRzZXJ2aWNlX2FjY291bnRfbmFtZRgBIAEoCSImChVHZXRBY2Nlc3NUb2tlblJlcXVlc3QSDQoFc2NvcGUYASADKAkiRwoWR2V0QWNjZXNzVG9rZW5SZXNwb25zZRIUCgxhY2Nlc3NfdG9rZW4YASABKAkSFwoPZXhwaXJhdGlvbl90aW1lGAIgASgDMqIDCg5TaWduaW5nU2VydmljZRJLCgpTaWduRm9yQXBwEh0uYXBwaG9zdGluZy5TaWduRm9yQXBwUmVxdWVzdBoeLmFwcGhvc3RpbmcuU2lnbkZvckFwcFJlc3BvbnNlEnwKG0dldFB1YmxpY0NlcnRpZmljYXRlc0ZvckFwcBItLmFwcGhvc3RpbmcuR2V0UHVibGljQ2VydGlmaWNhdGVGb3JBcHBSZXF1ZXN0Gi4uYXBwaG9zdGluZy5HZXRQdWJsaWNDZXJ0aWZpY2F0ZUZvckFwcFJlc3BvbnNlEmwKFUdldFNlcnZpY2VBY2NvdW50TmFtZRIoLmFwcGhvc3RpbmcuR2V0U2VydmljZUFjY291bnROYW1lUmVxdWVzdBopLmFwcGhvc3RpbmcuR2V0U2VydmljZUFjY291bnROYW1lUmVzcG9uc2USVwoOR2V0QWNjZXNzVG9rZW4SIS5hcHBob3N0aW5nLkdldEFjY2Vzc1Rva2VuUmVxdWVzdBoiLmFwcGhvc3RpbmcuR2V0QWNjZXNzVG9rZW5SZXNwb25zZUJACiRjb20uZ29vZ2xlLmFwcGVuZ2luZS5hcGkuYXBwaWRlbnRpdHkgASgCQhRBcHBJZGVudGl0eVNlcnZpY2VQYg=="))
   if _net_proto___parse__python is not None:
     _net_proto___parse__python.RegisterType(
         _SERIALIZED_DESCRIPTOR.tostring())
@@ -1110,6 +1114,303 @@
     _net_proto___parse__python.RegisterType(
         _SERIALIZED_DESCRIPTOR.tostring())
 
+class GetAccessTokenRequest(ProtocolBuffer.ProtocolMessage):
+
+  def __init__(self, contents=None):
+    self.scope_ = []
+    if contents is not None: self.MergeFromString(contents)
+
+  def scope_size(self): return len(self.scope_)
+  def scope_list(self): return self.scope_
+
+  def scope(self, i):
+    return self.scope_[i]
+
+  def set_scope(self, i, x):
+    self.scope_[i] = x
+
+  def add_scope(self, x):
+    self.scope_.append(x)
+
+  def clear_scope(self):
+    self.scope_ = []
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+    for i in xrange(x.scope_size()): self.add_scope(x.scope(i))
+
+  if _net_proto___parse__python is not None:
+    def _CMergeFromString(self, s):
+      _net_proto___parse__python.MergeFromString(self, 'apphosting.GetAccessTokenRequest', s)
+
+  if _net_proto___parse__python is not None:
+    def _CEncode(self):
+      return _net_proto___parse__python.Encode(self, 'apphosting.GetAccessTokenRequest')
+
+  if _net_proto___parse__python is not None:
+    def _CEncodePartial(self):
+      return _net_proto___parse__python.EncodePartial(self, 'apphosting.GetAccessTokenRequest')
+
+  if _net_proto___parse__python is not None:
+    def _CToASCII(self, output_format):
+      return _net_proto___parse__python.ToASCII(self, 'apphosting.GetAccessTokenRequest', output_format)
+
+
+  if _net_proto___parse__python is not None:
+    def ParseASCII(self, s):
+      _net_proto___parse__python.ParseASCII(self, 'apphosting.GetAccessTokenRequest', s)
+
+
+  if _net_proto___parse__python is not None:
+    def ParseASCIIIgnoreUnknown(self, s):
+      _net_proto___parse__python.ParseASCIIIgnoreUnknown(self, 'apphosting.GetAccessTokenRequest', s)
+
+
+  def Equals(self, x):
+    if x is self: return 1
+    if len(self.scope_) != len(x.scope_): return 0
+    for e1, e2 in zip(self.scope_, x.scope_):
+      if e1 != e2: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    n += 1 * len(self.scope_)
+    for i in xrange(len(self.scope_)): n += self.lengthString(len(self.scope_[i]))
+    return n
+
+  def ByteSizePartial(self):
+    n = 0
+    n += 1 * len(self.scope_)
+    for i in xrange(len(self.scope_)): n += self.lengthString(len(self.scope_[i]))
+    return n
+
+  def Clear(self):
+    self.clear_scope()
+
+  def OutputUnchecked(self, out):
+    for i in xrange(len(self.scope_)):
+      out.putVarInt32(10)
+      out.putPrefixedString(self.scope_[i])
+
+  def OutputPartial(self, out):
+    for i in xrange(len(self.scope_)):
+      out.putVarInt32(10)
+      out.putPrefixedString(self.scope_[i])
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 10:
+        self.add_scope(d.getPrefixedString())
+        continue
+
+
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    cnt=0
+    for e in self.scope_:
+      elm=""
+      if printElemNumber: elm="(%d)" % cnt
+      res+=prefix+("scope%s: %s\n" % (elm, self.DebugFormatString(e)))
+      cnt+=1
+    return res
+
+
+  def _BuildTagLookupTable(sparse, maxtag, default=None):
+    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
+
+  kscope = 1
+
+  _TEXT = _BuildTagLookupTable({
+    0: "ErrorCode",
+    1: "scope",
+  }, 1)
+
+  _TYPES = _BuildTagLookupTable({
+    0: ProtocolBuffer.Encoder.NUMERIC,
+    1: ProtocolBuffer.Encoder.STRING,
+  }, 1, ProtocolBuffer.Encoder.MAX_TYPE)
+
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+  _SERIALIZED_DESCRIPTOR = array.array('B')
+  _SERIALIZED_DESCRIPTOR.fromstring(base64.decodestring("WjZhcHBob3N0aW5nL2FwaS9hcHBfaWRlbnRpdHkvYXBwX2lkZW50aXR5X3NlcnZpY2UucHJvdG8KIGFwcGhvc3RpbmcuR2V0QWNjZXNzVG9rZW5SZXF1ZXN0ExoFc2NvcGUgASgCMAk4AxTCASJhcHBob3N0aW5nLkFwcElkZW50aXR5U2VydmljZUVycm9y"))
+  if _net_proto___parse__python is not None:
+    _net_proto___parse__python.RegisterType(
+        _SERIALIZED_DESCRIPTOR.tostring())
+
+class GetAccessTokenResponse(ProtocolBuffer.ProtocolMessage):
+  has_access_token_ = 0
+  access_token_ = ""
+  has_expiration_time_ = 0
+  expiration_time_ = 0
+
+  def __init__(self, contents=None):
+    if contents is not None: self.MergeFromString(contents)
+
+  def access_token(self): return self.access_token_
+
+  def set_access_token(self, x):
+    self.has_access_token_ = 1
+    self.access_token_ = x
+
+  def clear_access_token(self):
+    if self.has_access_token_:
+      self.has_access_token_ = 0
+      self.access_token_ = ""
+
+  def has_access_token(self): return self.has_access_token_
+
+  def expiration_time(self): return self.expiration_time_
+
+  def set_expiration_time(self, x):
+    self.has_expiration_time_ = 1
+    self.expiration_time_ = x
+
+  def clear_expiration_time(self):
+    if self.has_expiration_time_:
+      self.has_expiration_time_ = 0
+      self.expiration_time_ = 0
+
+  def has_expiration_time(self): return self.has_expiration_time_
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+    if (x.has_access_token()): self.set_access_token(x.access_token())
+    if (x.has_expiration_time()): self.set_expiration_time(x.expiration_time())
+
+  if _net_proto___parse__python is not None:
+    def _CMergeFromString(self, s):
+      _net_proto___parse__python.MergeFromString(self, 'apphosting.GetAccessTokenResponse', s)
+
+  if _net_proto___parse__python is not None:
+    def _CEncode(self):
+      return _net_proto___parse__python.Encode(self, 'apphosting.GetAccessTokenResponse')
+
+  if _net_proto___parse__python is not None:
+    def _CEncodePartial(self):
+      return _net_proto___parse__python.EncodePartial(self, 'apphosting.GetAccessTokenResponse')
+
+  if _net_proto___parse__python is not None:
+    def _CToASCII(self, output_format):
+      return _net_proto___parse__python.ToASCII(self, 'apphosting.GetAccessTokenResponse', output_format)
+
+
+  if _net_proto___parse__python is not None:
+    def ParseASCII(self, s):
+      _net_proto___parse__python.ParseASCII(self, 'apphosting.GetAccessTokenResponse', s)
+
+
+  if _net_proto___parse__python is not None:
+    def ParseASCIIIgnoreUnknown(self, s):
+      _net_proto___parse__python.ParseASCIIIgnoreUnknown(self, 'apphosting.GetAccessTokenResponse', s)
+
+
+  def Equals(self, x):
+    if x is self: return 1
+    if self.has_access_token_ != x.has_access_token_: return 0
+    if self.has_access_token_ and self.access_token_ != x.access_token_: return 0
+    if self.has_expiration_time_ != x.has_expiration_time_: return 0
+    if self.has_expiration_time_ and self.expiration_time_ != x.expiration_time_: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    if (self.has_access_token_): n += 1 + self.lengthString(len(self.access_token_))
+    if (self.has_expiration_time_): n += 1 + self.lengthVarInt64(self.expiration_time_)
+    return n
+
+  def ByteSizePartial(self):
+    n = 0
+    if (self.has_access_token_): n += 1 + self.lengthString(len(self.access_token_))
+    if (self.has_expiration_time_): n += 1 + self.lengthVarInt64(self.expiration_time_)
+    return n
+
+  def Clear(self):
+    self.clear_access_token()
+    self.clear_expiration_time()
+
+  def OutputUnchecked(self, out):
+    if (self.has_access_token_):
+      out.putVarInt32(10)
+      out.putPrefixedString(self.access_token_)
+    if (self.has_expiration_time_):
+      out.putVarInt32(16)
+      out.putVarInt64(self.expiration_time_)
+
+  def OutputPartial(self, out):
+    if (self.has_access_token_):
+      out.putVarInt32(10)
+      out.putPrefixedString(self.access_token_)
+    if (self.has_expiration_time_):
+      out.putVarInt32(16)
+      out.putVarInt64(self.expiration_time_)
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 10:
+        self.set_access_token(d.getPrefixedString())
+        continue
+      if tt == 16:
+        self.set_expiration_time(d.getVarInt64())
+        continue
+
+
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    if self.has_access_token_: res+=prefix+("access_token: %s\n" % self.DebugFormatString(self.access_token_))
+    if self.has_expiration_time_: res+=prefix+("expiration_time: %s\n" % self.DebugFormatInt64(self.expiration_time_))
+    return res
+
+
+  def _BuildTagLookupTable(sparse, maxtag, default=None):
+    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
+
+  kaccess_token = 1
+  kexpiration_time = 2
+
+  _TEXT = _BuildTagLookupTable({
+    0: "ErrorCode",
+    1: "access_token",
+    2: "expiration_time",
+  }, 2)
+
+  _TYPES = _BuildTagLookupTable({
+    0: ProtocolBuffer.Encoder.NUMERIC,
+    1: ProtocolBuffer.Encoder.STRING,
+    2: ProtocolBuffer.Encoder.NUMERIC,
+  }, 2, ProtocolBuffer.Encoder.MAX_TYPE)
+
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+  _SERIALIZED_DESCRIPTOR = array.array('B')
+  _SERIALIZED_DESCRIPTOR.fromstring(base64.decodestring("WjZhcHBob3N0aW5nL2FwaS9hcHBfaWRlbnRpdHkvYXBwX2lkZW50aXR5X3NlcnZpY2UucHJvdG8KIWFwcGhvc3RpbmcuR2V0QWNjZXNzVG9rZW5SZXNwb25zZRMaDGFjY2Vzc190b2tlbiABKAIwCTgBFBMaD2V4cGlyYXRpb25fdGltZSACKAAwAzgBFMIBImFwcGhvc3RpbmcuQXBwSWRlbnRpdHlTZXJ2aWNlRXJyb3I="))
+  if _net_proto___parse__python is not None:
+    _net_proto___parse__python.RegisterType(
+        _SERIALIZED_DESCRIPTOR.tostring())
+
 
 
 class _SigningService_ClientBaseStub(_client_stub_base_class):
@@ -1119,6 +1420,7 @@
       '_protorpc_SignForApp', '_full_name_SignForApp',
       '_protorpc_GetPublicCertificatesForApp', '_full_name_GetPublicCertificatesForApp',
       '_protorpc_GetServiceAccountName', '_full_name_GetServiceAccountName',
+      '_protorpc_GetAccessToken', '_full_name_GetAccessToken',
   )
 
   def __init__(self, rpc_stub):
@@ -1136,6 +1438,10 @@
     self._full_name_GetServiceAccountName = self._stub.GetFullMethodName(
         'GetServiceAccountName')
 
+    self._protorpc_GetAccessToken = pywraprpc.RPC()
+    self._full_name_GetAccessToken = self._stub.GetFullMethodName(
+        'GetAccessToken')
+
   def SignForApp(self, request, rpc=None, callback=None, response=None):
     """Make a SignForApp RPC call.
 
@@ -1211,6 +1517,31 @@
                           callback,
                           self._protorpc_GetServiceAccountName)
 
+  def GetAccessToken(self, request, rpc=None, callback=None, response=None):
+    """Make a GetAccessToken RPC call.
+
+    Args:
+      request: a GetAccessTokenRequest instance.
+      rpc: Optional RPC instance to use for the call.
+      callback: Optional final callback. Will be called as
+          callback(rpc, result) when the rpc completes. If None, the
+          call is synchronous.
+      response: Optional ProtocolMessage to be filled in with response.
+
+    Returns:
+      The GetAccessTokenResponse if callback is None. Otherwise, returns None.
+    """
+
+    if response is None:
+      response = GetAccessTokenResponse
+    return self._MakeCall(rpc,
+                          self._full_name_GetAccessToken,
+                          'GetAccessToken',
+                          request,
+                          response,
+                          callback,
+                          self._protorpc_GetAccessToken)
+
 
 class _SigningService_ClientStub(_SigningService_ClientBaseStub):
   __slots__ = ('_params',)
@@ -1309,6 +1640,17 @@
     """
     raise NotImplementedError
 
+
+  def GetAccessToken(self, rpc, request, response):
+    """Handles a GetAccessToken RPC call. You should override this.
+
+    Args:
+      rpc: a Stubby RPC object
+      request: a GetAccessTokenRequest that contains the client request
+      response: a GetAccessTokenResponse that should be modified to send the response
+    """
+    raise NotImplementedError
+
   def _AddMethodAttributes(self):
     """Sets attributes on Python RPC handlers.
 
@@ -1332,6 +1674,12 @@
         GetServiceAccountNameResponse,
         None,
         'none')
+    rpcserver._GetHandlerDecorator(
+        self.GetAccessToken.im_func,
+        GetAccessTokenRequest,
+        GetAccessTokenResponse,
+        None,
+        'none')
 
 
-__all__ = ['AppIdentityServiceError','SignForAppRequest','SignForAppResponse','GetPublicCertificateForAppRequest','PublicCertificate','GetPublicCertificateForAppResponse','GetServiceAccountNameRequest','GetServiceAccountNameResponse','SigningService']
+__all__ = ['AppIdentityServiceError','SignForAppRequest','SignForAppResponse','GetPublicCertificateForAppRequest','PublicCertificate','GetPublicCertificateForAppResponse','GetServiceAccountNameRequest','GetServiceAccountNameResponse','GetAccessTokenRequest','GetAccessTokenResponse','SigningService']
diff --git a/google/appengine/api/app_identity/app_identity_stub.py b/google/appengine/api/app_identity/app_identity_stub.py
index f41372c..22b964e 100644
--- a/google/appengine/api/app_identity/app_identity_stub.py
+++ b/google/appengine/api/app_identity/app_identity_stub.py
@@ -35,6 +35,7 @@
 
 
 import binascii
+import time
 
 try:
   from Crypto.Hash import SHA256
@@ -124,3 +125,16 @@
   def _Dynamic_GetServiceAccountName(self, request, response):
     """Implementation of AppIdentityService::GetServiceAccountName"""
     response.set_service_account_name(APP_SERVICE_ACCOUNT_NAME)
+
+  def _Dynamic_GetAccessToken(self, request, response):
+    """Implementation of AppIdentityService::GetAccessToken.
+
+    This API returns an invalid token, as the dev_appserver does not have
+    access to an actual service account.
+    """
+    response.set_access_token('InvalidToken:%s:%s' %
+                              (':'.join(request.scope_list()),
+                               time.time() % 100))
+
+    response.set_expiration_time(time.time() + 1800)
+
diff --git a/google/appengine/api/appinfo.py b/google/appengine/api/appinfo.py
index eedc8c9..c320e03 100755
--- a/google/appengine/api/appinfo.py
+++ b/google/appengine/api/appinfo.py
@@ -102,6 +102,9 @@
 VERSION_RE_STRING = r'(?!-)[a-z\d\-]{1,%d}' % MAJOR_VERSION_ID_MAX_LEN
 ALTERNATE_HOSTNAME_SEPARATOR = '-dot-'
 
+
+BUILTIN_NAME_PREFIX = 'ah-builtin'
+
 RUNTIME_RE_STRING = r'[a-z][a-z0-9]{0,29}'
 
 API_VERSION_RE_STRING = r'[\w.]{1,32}'
@@ -195,9 +198,13 @@
 
 SUPPORTED_LIBRARIES = {
     'django': ['1.2'],
+    'lxml': ['2.3'],
+    'numpy': ['1.5.1'],
+    'PIL': ['1.1.7'],
     'pycrypto': ['2.3'],
     'yaml': ['3.05'],
-    'webob': ['0.9'],
+    'webapp2': ['2.0.2'],
+    'webob': ['1.0.8'],
 }
 
 
@@ -822,11 +829,16 @@
       - Number of url mappers doesn't exceed MAX_URL_MAPS.
       - Major version does not contain the string -dot-.
       - If api_endpoints are defined, an api_config stanza must be defined.
+      - If the runtime is python27 and threadsafe is set, then no CGI handlers
+        can be used.
+      - That the version name doesn't start with BUILTIN_NAME_PREFIX
 
     Raises:
       MissingURLMapping: if no URLMap object is present in the object.
       TooManyURLMappings: if there are too many URLMap entries.
       MissingApiConfig: if api_endpoints exist without an api_config.
+      ThreadsafeWithCgiHandler: if the runtime is python27, threadsafe is set
+          and CGI handlers are specified.
     """
     super(AppInfoExternal, self).CheckInitialized()
     if not self.handlers and not self.builtins and not self.includes:
@@ -852,6 +864,11 @@
       raise validation.ValidationError(
           'Version "%s" cannot contain the string "%s"' % (
               self.version, ALTERNATE_HOSTNAME_SEPARATOR))
+    if self.version and self.version.startswith(BUILTIN_NAME_PREFIX):
+      raise validation.ValidationError(
+          ('Version "%s" cannot start with "%s" because it is a '
+           'reserved version name prefix.') % (self.version,
+                                               BUILTIN_NAME_PREFIX))
     if self.handlers:
       api_endpoints = [handler.url for handler in self.handlers
                        if handler.GetHandlerType() == HANDLER_API_ENDPOINT]
@@ -859,6 +876,13 @@
         raise appinfo_errors.MissingApiConfig(
             'An api_endpoint handler was specified, but the required '
             'api_config stanza was not configured.')
+      if self.threadsafe and self.runtime == 'python27':
+        for handler in self.handlers:
+          if (handler.script and (handler.script.endswith('.py') or
+                                  '/' in handler.script)):
+            raise appinfo_errors.ThreadsafeWithCgiHandler(
+                'Threadsafe cannot be enabled with CGI handler: %s' %
+                handler.script)
 
   def ApplyBackendSettings(self, backend_name):
     """Applies settings from the indicated backend to the AppInfoExternal.
diff --git a/google/appengine/api/appinfo_errors.py b/google/appengine/api/appinfo_errors.py
index 801dae4..d45fd3d 100755
--- a/google/appengine/api/appinfo_errors.py
+++ b/google/appengine/api/appinfo_errors.py
@@ -85,17 +85,26 @@
 class DuplicateBackend(Error):
   """Raised when a backend is found more than once in 'backends'."""
 
+
 class MissingApiConfig(Error):
   """Raised if an api_endpoint handler is configured but no api_config."""
 
+
 class RuntimeDoesNotSupportLibraries(Error):
   """Raised when 'libraries' is used in a runtime that does not support it."""
 
+
 class DuplicateLibrary(Error):
   """Raised when a library is found more than once in 'libraries'."""
 
+
 class InvalidLibraryVersion(Error):
   """Raised when a library uses a version that isn't supported."""
 
+
 class InvalidLibraryName(Error):
   """Raised when a library is specified that isn't supported."""
+
+
+class ThreadsafeWithCgiHandler(Error):
+  """Raised when threadsafe is enabled with a CGI handler specified."""
diff --git a/google/appengine/api/appinfo_includes.py b/google/appengine/api/appinfo_includes.py
index 2cba9cc..3dd6114 100755
--- a/google/appengine/api/appinfo_includes.py
+++ b/google/appengine/api/appinfo_includes.py
@@ -65,6 +65,13 @@
     raise appinfo_errors.TooManyURLMappings(
         'Found more than %d URLMap entries in application configuration' %
         appinfo.MAX_URL_MAPS)
+  if appyaml.runtime == 'python27' and appyaml.threadsafe:
+    for handler in appyaml.handlers:
+      if (handler.script and (handler.script.endswith('.py') or
+                              '/' in handler.script)):
+        raise appinfo_errors.ThreadsafeWithCgiHandler(
+            'Threadsafe cannot be enabled with CGI handler: %s' %
+            handler.script)
 
   return appyaml
 
diff --git a/google/appengine/api/channel/channel_service_pb.py b/google/appengine/api/channel/channel_service_pb.py
index 891b4b6..cfd229e 100755
--- a/google/appengine/api/channel/channel_service_pb.py
+++ b/google/appengine/api/channel/channel_service_pb.py
@@ -24,8 +24,6 @@
 __pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
                    unusednames=printElemNumber,debug_strs no-special"""
 
-from google.appengine.api.api_base_pb import *
-import google.appengine.api.api_base_pb
 class ChannelServiceError(ProtocolBuffer.ProtocolMessage):
 
 
diff --git a/google/appengine/api/conversion/conversion.py b/google/appengine/api/conversion/conversion.py
index bc73d82..f9e3daa 100644
--- a/google/appengine/api/conversion/conversion.py
+++ b/google/appengine/api/conversion/conversion.py
@@ -30,7 +30,7 @@
 
 
 from google.appengine.api import apiproxy_stub_map
-from google.appengine.api.conversion import conversion_service_pb2
+from google.appengine.api.conversion import conversion_service_pb
 from google.appengine.runtime import apiproxy_errors
 
 
@@ -91,19 +91,19 @@
     error: ConversionApi specific error message.
   """
   error_map = {
-      conversion_service_pb2.ConversionServiceError.TIMEOUT:
+      conversion_service_pb.ConversionServiceError.TIMEOUT:
       BackendDeadlineExceeded,
-      conversion_service_pb2.ConversionServiceError.TRANSIENT_ERROR:
+      conversion_service_pb.ConversionServiceError.TRANSIENT_ERROR:
       TransientError,
-      conversion_service_pb2.ConversionServiceError.INTERNAL_ERROR:
+      conversion_service_pb.ConversionServiceError.INTERNAL_ERROR:
       BackendError,
-      conversion_service_pb2.ConversionServiceError.UNSUPPORTED_CONVERSION:
+      conversion_service_pb.ConversionServiceError.UNSUPPORTED_CONVERSION:
       ConversionUnsupported,
-      conversion_service_pb2.ConversionServiceError.CONVERSION_TOO_LARGE:
+      conversion_service_pb.ConversionServiceError.CONVERSION_TOO_LARGE:
       ConversionTooLarge,
-      conversion_service_pb2.ConversionServiceError.TOO_MANY_CONVERSIONS:
+      conversion_service_pb.ConversionServiceError.TOO_MANY_CONVERSIONS:
       TooManyConversions,
-      conversion_service_pb2.ConversionServiceError.INVALID_REQUEST:
+      conversion_service_pb.ConversionServiceError.INVALID_REQUEST:
       InvalidRequest,
       }
   if error.application_error in error_map:
@@ -116,25 +116,25 @@
   """Translate an error code to an error message, if possible.
 
   Args:
-    error_code: An conversion_service_pb2.ConversionServiceError error code.
+    error_code: An conversion_service_pb.ConversionServiceError error code.
 
   Returns:
     Human readable error message.
   """
   error_map = {
-      conversion_service_pb2.ConversionServiceError.TIMEOUT:
+      conversion_service_pb.ConversionServiceError.TIMEOUT:
       "BackendDeadlineExceeded",
-      conversion_service_pb2.ConversionServiceError.TRANSIENT_ERROR:
+      conversion_service_pb.ConversionServiceError.TRANSIENT_ERROR:
       "TransientError",
-      conversion_service_pb2.ConversionServiceError.INTERNAL_ERROR:
+      conversion_service_pb.ConversionServiceError.INTERNAL_ERROR:
       "BackendError",
-      conversion_service_pb2.ConversionServiceError.UNSUPPORTED_CONVERSION:
+      conversion_service_pb.ConversionServiceError.UNSUPPORTED_CONVERSION:
       "ConversionUnsupported",
-      conversion_service_pb2.ConversionServiceError.CONVERSION_TOO_LARGE:
+      conversion_service_pb.ConversionServiceError.CONVERSION_TOO_LARGE:
       "ConversionTooLarge",
-      conversion_service_pb2.ConversionServiceError.TOO_MANY_CONVERSIONS:
+      conversion_service_pb.ConversionServiceError.TOO_MANY_CONVERSIONS:
       "TooManyConversions",
-      conversion_service_pb2.ConversionServiceError.INVALID_REQUEST:
+      conversion_service_pb.ConversionServiceError.INVALID_REQUEST:
       "InvalidRequest",
       }
   if error_code in error_map:
@@ -160,21 +160,18 @@
       name: name of the asset (string).
 
     Raises:
-      TypeError: if input arguments are not string or data is missing.
+      TypeError: if input arguments are not string.
     """
-    if mime_type is not None:
-      if not isinstance(mime_type, (str, unicode)):
-        raise TypeError("mime_type %r is not a string" % mime_type)
-    self._mime_type = mime_type
+    if not isinstance(mime_type, basestring):
+      raise TypeError("mime type %r is not a string" % mime_type)
+    self._mime_type = mime_type.lower()
 
-    if data is None:
-      raise TypeError("Asset must have a data field")
-    if not isinstance(data, (str, unicode)):
+    if not isinstance(data, basestring):
       raise TypeError("data %r is not a string" % data)
     self._data = data
 
     if name is not None:
-      if not isinstance(name, (str, unicode)):
+      if not isinstance(name, basestring):
         raise TypeError("name %r is not a string" % name)
     self._name = name
 
@@ -193,19 +190,17 @@
     """The name of the asset (string)."""
     return self._name
 
-  def to_proto(self):
-    """Create and return an AssetInfo protocol buffer.
+  def _fill_proto(self, asset_info_pb):
+    """Fill an AssetInfo protocol buffer with Asset properties.
 
-    Returns:
-      An conversion_service_pb2.AssetInfo protocol buffer.
+    Args:
+      asset_info_pb: An AssetInfo protocol buffer.
     """
-    asset_info_pb = conversion_service_pb2.AssetInfo()
-    if self.mime_type is not None:
-      asset_info_pb.mime_type = self._mime_type
-    asset_info_pb.data = self._data
-    if self.name is not None:
-      asset_info_pb.name = self._name
-    return asset_info_pb
+    if self._mime_type is not None:
+      asset_info_pb.set_mime_type(self._mime_type)
+    asset_info_pb.set_data(self._data)
+    if self._name is not None:
+      asset_info_pb.set_name(self._name)
 
 
 class ConversionRequest(object):
@@ -216,31 +211,30 @@
   for example images in HTML.
   """
 
-  def __init__(self, asset, output_type):
+  def __init__(self, asset, output_mime_type):
     """Create a single conversion.
 
     Args:
       asset: An Asset instance.
-      output_type: output data mime type (string), put into the
-                   output_mime_type field.
+      output_mime_type: output data mime type (string), put into the
+                        output_mime_type field.
 
     Raises:
-      TypeError: if input arguments are not string or data is missing.
+      TypeError: if asset mime type or output_mime_type is not a string.
+      ValueError: if asset mime type or output_mime_type is empty.
     """
-    self.input_assets = []
+    self._assets = []
 
     if not asset.mime_type:
-      raise TypeError("Asset's mime type %r should not be None or empty" %
-                      asset.mime_type)
+      raise ValueError("Asset mime type should not be empty")
 
-    if not output_type:
-      raise TypeError("Output mime type %r should not be None or empty" %
-                      output_type)
-    if not isinstance(output_type, (str, unicode)):
-      raise TypeError("Output mime type %r is not a string" % output_type)
+    if not isinstance(output_mime_type, basestring):
+      raise TypeError("Output mime type %r is not a string" % output_mime_type)
+    if not output_mime_type:
+      raise ValueError("Output mime type should not be empty")
 
     self.add_asset(asset)
-    self.output_mime_type = output_type
+    self._output_mime_type = output_mime_type.lower()
 
   def add_asset(self, asset):
     """Add an asset into the conversion request.
@@ -254,16 +248,18 @@
     if not isinstance(asset, Asset):
       raise TypeError("Input %r is not an Asset instance" % asset)
 
-    self.input_assets.append(asset.to_proto())
+    self._assets.append(asset)
 
-  def to_proto(self):
-    """Create and return a ConversionInput protocol buffer."""
-    conversion = conversion_service_pb2.ConversionInput()
-    for asset in self.input_assets:
-      conversion.input.asset.extend([asset])
-    conversion.output_mime_type = self.output_mime_type
+  def _fill_proto(self, conversion_input_pb):
+    """Fill a ConversionInput protocol buffer with ConversionRequest properties.
 
-    return conversion
+    Args:
+      conversion_input_pb: A ConversionInput protocol buffer.
+    """
+    for asset in self._assets:
+      asset_pb = conversion_input_pb.mutable_input().add_asset()
+      asset._fill_proto(asset_pb)
+    conversion_input_pb.set_output_mime_type(self._output_mime_type)
 
 
 class ConversionOutput(object):
@@ -282,16 +278,16 @@
       AssertionError: if asset_info_proto is not an AssetInfo protocol buffer.
     """
     assert isinstance(conversion_output_proto,
-                      conversion_service_pb2.ConversionOutput)
+                      conversion_service_pb.ConversionOutput)
 
-    self._error_code = conversion_output_proto.error_code
+    self._error_code = conversion_output_proto.error_code()
     self._error_text = "OK"
-    if self._error_code != conversion_service_pb2.ConversionServiceError.OK:
+    if self._error_code != conversion_service_pb.ConversionServiceError.OK:
       self._error_text = _to_error_text(self._error_code)
     self._assets = []
-    for asset_pb in conversion_output_proto.output.asset:
+    for asset_pb in conversion_output_proto.output().asset_list():
       self._assets.append(Asset(
-          asset_pb.mime_type, asset_pb.data, asset_pb.name))
+          asset_pb.mime_type(), asset_pb.data(), asset_pb.name()))
 
   @property
   def error_code(self):
@@ -360,8 +356,8 @@
     TypeError: Input conversion_requests with wrong type.
     See more details in _to_conversion_error function.
   """
-  request = conversion_service_pb2.ConversionRequest()
-  response = conversion_service_pb2.ConversionResponse()
+  request = conversion_service_pb.ConversionRequest()
+  response = conversion_service_pb.ConversionResponse()
 
   try:
     conversion_requests = list(iter(conversion_request))
@@ -373,7 +369,8 @@
 
   for conversion in conversion_requests:
     if isinstance(conversion, ConversionRequest):
-      request.conversion.extend([conversion.to_proto()])
+      conversion_input_pb = request.add_conversion()
+      conversion._fill_proto(conversion_input_pb)
     else:
       raise TypeError("conversion_request must be a ConversionRequest instance "
                       "or a list of ConversionRequest instances")
@@ -404,7 +401,7 @@
     raise _to_conversion_error(e)
 
   results = []
-  for output_pb in rpc.response.result:
+  for output_pb in rpc.response.result_list():
     results.append(ConversionOutput(output_pb))
 
   multiple = rpc.user_data
diff --git a/google/appengine/api/conversion/conversion_service_pb.py b/google/appengine/api/conversion/conversion_service_pb.py
new file mode 100644
index 0000000..eb60d2c
--- /dev/null
+++ b/google/appengine/api/conversion/conversion_service_pb.py
@@ -0,0 +1,1095 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+
+from google.net.proto import ProtocolBuffer
+import array
+import dummy_thread as thread
+
+__pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
+                   unusednames=printElemNumber,debug_strs no-special"""
+
+class ConversionServiceError(ProtocolBuffer.ProtocolMessage):
+
+
+  OK           =    0
+  TIMEOUT      =    1
+  TRANSIENT_ERROR =    2
+  INTERNAL_ERROR =    3
+  UNSUPPORTED_CONVERSION =    4
+  CONVERSION_TOO_LARGE =    5
+  TOO_MANY_CONVERSIONS =    6
+  INVALID_REQUEST =    7
+
+  _ErrorCode_NAMES = {
+    0: "OK",
+    1: "TIMEOUT",
+    2: "TRANSIENT_ERROR",
+    3: "INTERNAL_ERROR",
+    4: "UNSUPPORTED_CONVERSION",
+    5: "CONVERSION_TOO_LARGE",
+    6: "TOO_MANY_CONVERSIONS",
+    7: "INVALID_REQUEST",
+  }
+
+  def ErrorCode_Name(cls, x): return cls._ErrorCode_NAMES.get(x, "")
+  ErrorCode_Name = classmethod(ErrorCode_Name)
+
+
+  def __init__(self, contents=None):
+    pass
+    if contents is not None: self.MergeFromString(contents)
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+
+  def Equals(self, x):
+    if x is self: return 1
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    return n
+
+  def ByteSizePartial(self):
+    n = 0
+    return n
+
+  def Clear(self):
+    pass
+
+  def OutputUnchecked(self, out):
+    pass
+
+  def OutputPartial(self, out):
+    pass
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+
+
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    return res
+
+
+  def _BuildTagLookupTable(sparse, maxtag, default=None):
+    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
+
+
+  _TEXT = _BuildTagLookupTable({
+    0: "ErrorCode",
+  }, 0)
+
+  _TYPES = _BuildTagLookupTable({
+    0: ProtocolBuffer.Encoder.NUMERIC,
+  }, 0, ProtocolBuffer.Encoder.MAX_TYPE)
+
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+class AssetInfo(ProtocolBuffer.ProtocolMessage):
+  has_name_ = 0
+  name_ = ""
+  has_data_ = 0
+  data_ = ""
+  has_mime_type_ = 0
+  mime_type_ = ""
+
+  def __init__(self, contents=None):
+    if contents is not None: self.MergeFromString(contents)
+
+  def name(self): return self.name_
+
+  def set_name(self, x):
+    self.has_name_ = 1
+    self.name_ = x
+
+  def clear_name(self):
+    if self.has_name_:
+      self.has_name_ = 0
+      self.name_ = ""
+
+  def has_name(self): return self.has_name_
+
+  def data(self): return self.data_
+
+  def set_data(self, x):
+    self.has_data_ = 1
+    self.data_ = x
+
+  def clear_data(self):
+    if self.has_data_:
+      self.has_data_ = 0
+      self.data_ = ""
+
+  def has_data(self): return self.has_data_
+
+  def mime_type(self): return self.mime_type_
+
+  def set_mime_type(self, x):
+    self.has_mime_type_ = 1
+    self.mime_type_ = x
+
+  def clear_mime_type(self):
+    if self.has_mime_type_:
+      self.has_mime_type_ = 0
+      self.mime_type_ = ""
+
+  def has_mime_type(self): return self.has_mime_type_
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+    if (x.has_name()): self.set_name(x.name())
+    if (x.has_data()): self.set_data(x.data())
+    if (x.has_mime_type()): self.set_mime_type(x.mime_type())
+
+  def Equals(self, x):
+    if x is self: return 1
+    if self.has_name_ != x.has_name_: return 0
+    if self.has_name_ and self.name_ != x.name_: return 0
+    if self.has_data_ != x.has_data_: return 0
+    if self.has_data_ and self.data_ != x.data_: return 0
+    if self.has_mime_type_ != x.has_mime_type_: return 0
+    if self.has_mime_type_ and self.mime_type_ != x.mime_type_: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    if (self.has_name_): n += 1 + self.lengthString(len(self.name_))
+    if (self.has_data_): n += 1 + self.lengthString(len(self.data_))
+    if (self.has_mime_type_): n += 1 + self.lengthString(len(self.mime_type_))
+    return n
+
+  def ByteSizePartial(self):
+    n = 0
+    if (self.has_name_): n += 1 + self.lengthString(len(self.name_))
+    if (self.has_data_): n += 1 + self.lengthString(len(self.data_))
+    if (self.has_mime_type_): n += 1 + self.lengthString(len(self.mime_type_))
+    return n
+
+  def Clear(self):
+    self.clear_name()
+    self.clear_data()
+    self.clear_mime_type()
+
+  def OutputUnchecked(self, out):
+    if (self.has_name_):
+      out.putVarInt32(10)
+      out.putPrefixedString(self.name_)
+    if (self.has_data_):
+      out.putVarInt32(18)
+      out.putPrefixedString(self.data_)
+    if (self.has_mime_type_):
+      out.putVarInt32(26)
+      out.putPrefixedString(self.mime_type_)
+
+  def OutputPartial(self, out):
+    if (self.has_name_):
+      out.putVarInt32(10)
+      out.putPrefixedString(self.name_)
+    if (self.has_data_):
+      out.putVarInt32(18)
+      out.putPrefixedString(self.data_)
+    if (self.has_mime_type_):
+      out.putVarInt32(26)
+      out.putPrefixedString(self.mime_type_)
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 10:
+        self.set_name(d.getPrefixedString())
+        continue
+      if tt == 18:
+        self.set_data(d.getPrefixedString())
+        continue
+      if tt == 26:
+        self.set_mime_type(d.getPrefixedString())
+        continue
+
+
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    if self.has_name_: res+=prefix+("name: %s\n" % self.DebugFormatString(self.name_))
+    if self.has_data_: res+=prefix+("data: %s\n" % self.DebugFormatString(self.data_))
+    if self.has_mime_type_: res+=prefix+("mime_type: %s\n" % self.DebugFormatString(self.mime_type_))
+    return res
+
+
+  def _BuildTagLookupTable(sparse, maxtag, default=None):
+    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
+
+  kname = 1
+  kdata = 2
+  kmime_type = 3
+
+  _TEXT = _BuildTagLookupTable({
+    0: "ErrorCode",
+    1: "name",
+    2: "data",
+    3: "mime_type",
+  }, 3)
+
+  _TYPES = _BuildTagLookupTable({
+    0: ProtocolBuffer.Encoder.NUMERIC,
+    1: ProtocolBuffer.Encoder.STRING,
+    2: ProtocolBuffer.Encoder.STRING,
+    3: ProtocolBuffer.Encoder.STRING,
+  }, 3, ProtocolBuffer.Encoder.MAX_TYPE)
+
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+class DocumentInfo(ProtocolBuffer.ProtocolMessage):
+
+  def __init__(self, contents=None):
+    self.asset_ = []
+    if contents is not None: self.MergeFromString(contents)
+
+  def asset_size(self): return len(self.asset_)
+  def asset_list(self): return self.asset_
+
+  def asset(self, i):
+    return self.asset_[i]
+
+  def mutable_asset(self, i):
+    return self.asset_[i]
+
+  def add_asset(self):
+    x = AssetInfo()
+    self.asset_.append(x)
+    return x
+
+  def clear_asset(self):
+    self.asset_ = []
+
+  def MergeFrom(self, x):
+    assert x is not self
+    for i in xrange(x.asset_size()): self.add_asset().CopyFrom(x.asset(i))
+
+  def Equals(self, x):
+    if x is self: return 1
+    if len(self.asset_) != len(x.asset_): return 0
+    for e1, e2 in zip(self.asset_, x.asset_):
+      if e1 != e2: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    for p in self.asset_:
+      if not p.IsInitialized(debug_strs): initialized=0
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    n += 1 * len(self.asset_)
+    for i in xrange(len(self.asset_)): n += self.lengthString(self.asset_[i].ByteSize())
+    return n
+
+  def ByteSizePartial(self):
+    n = 0
+    n += 1 * len(self.asset_)
+    for i in xrange(len(self.asset_)): n += self.lengthString(self.asset_[i].ByteSizePartial())
+    return n
+
+  def Clear(self):
+    self.clear_asset()
+
+  def OutputUnchecked(self, out):
+    for i in xrange(len(self.asset_)):
+      out.putVarInt32(10)
+      out.putVarInt32(self.asset_[i].ByteSize())
+      self.asset_[i].OutputUnchecked(out)
+
+  def OutputPartial(self, out):
+    for i in xrange(len(self.asset_)):
+      out.putVarInt32(10)
+      out.putVarInt32(self.asset_[i].ByteSizePartial())
+      self.asset_[i].OutputPartial(out)
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 10:
+        length = d.getVarInt32()
+        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
+        d.skip(length)
+        self.add_asset().TryMerge(tmp)
+        continue
+
+
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    cnt=0
+    for e in self.asset_:
+      elm=""
+      if printElemNumber: elm="(%d)" % cnt
+      res+=prefix+("asset%s <\n" % elm)
+      res+=e.__str__(prefix + "  ", printElemNumber)
+      res+=prefix+">\n"
+      cnt+=1
+    return res
+
+
+  def _BuildTagLookupTable(sparse, maxtag, default=None):
+    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
+
+  kasset = 1
+
+  _TEXT = _BuildTagLookupTable({
+    0: "ErrorCode",
+    1: "asset",
+  }, 1)
+
+  _TYPES = _BuildTagLookupTable({
+    0: ProtocolBuffer.Encoder.NUMERIC,
+    1: ProtocolBuffer.Encoder.STRING,
+  }, 1, ProtocolBuffer.Encoder.MAX_TYPE)
+
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+class ConversionInput_AuxData(ProtocolBuffer.ProtocolMessage):
+  has_key_ = 0
+  key_ = ""
+  has_value_ = 0
+  value_ = ""
+
+  def __init__(self, contents=None):
+    if contents is not None: self.MergeFromString(contents)
+
+  def key(self): return self.key_
+
+  def set_key(self, x):
+    self.has_key_ = 1
+    self.key_ = x
+
+  def clear_key(self):
+    if self.has_key_:
+      self.has_key_ = 0
+      self.key_ = ""
+
+  def has_key(self): return self.has_key_
+
+  def value(self): return self.value_
+
+  def set_value(self, x):
+    self.has_value_ = 1
+    self.value_ = x
+
+  def clear_value(self):
+    if self.has_value_:
+      self.has_value_ = 0
+      self.value_ = ""
+
+  def has_value(self): return self.has_value_
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+    if (x.has_key()): self.set_key(x.key())
+    if (x.has_value()): self.set_value(x.value())
+
+  def Equals(self, x):
+    if x is self: return 1
+    if self.has_key_ != x.has_key_: return 0
+    if self.has_key_ and self.key_ != x.key_: return 0
+    if self.has_value_ != x.has_value_: return 0
+    if self.has_value_ and self.value_ != x.value_: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    if (not self.has_key_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: key not set.')
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    n += self.lengthString(len(self.key_))
+    if (self.has_value_): n += 1 + self.lengthString(len(self.value_))
+    return n + 1
+
+  def ByteSizePartial(self):
+    n = 0
+    if (self.has_key_):
+      n += 1
+      n += self.lengthString(len(self.key_))
+    if (self.has_value_): n += 1 + self.lengthString(len(self.value_))
+    return n
+
+  def Clear(self):
+    self.clear_key()
+    self.clear_value()
+
+  def OutputUnchecked(self, out):
+    out.putVarInt32(10)
+    out.putPrefixedString(self.key_)
+    if (self.has_value_):
+      out.putVarInt32(18)
+      out.putPrefixedString(self.value_)
+
+  def OutputPartial(self, out):
+    if (self.has_key_):
+      out.putVarInt32(10)
+      out.putPrefixedString(self.key_)
+    if (self.has_value_):
+      out.putVarInt32(18)
+      out.putPrefixedString(self.value_)
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 10:
+        self.set_key(d.getPrefixedString())
+        continue
+      if tt == 18:
+        self.set_value(d.getPrefixedString())
+        continue
+
+
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    if self.has_key_: res+=prefix+("key: %s\n" % self.DebugFormatString(self.key_))
+    if self.has_value_: res+=prefix+("value: %s\n" % self.DebugFormatString(self.value_))
+    return res
+
+
+  def _BuildTagLookupTable(sparse, maxtag, default=None):
+    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
+
+  kkey = 1
+  kvalue = 2
+
+  _TEXT = _BuildTagLookupTable({
+    0: "ErrorCode",
+    1: "key",
+    2: "value",
+  }, 2)
+
+  _TYPES = _BuildTagLookupTable({
+    0: ProtocolBuffer.Encoder.NUMERIC,
+    1: ProtocolBuffer.Encoder.STRING,
+    2: ProtocolBuffer.Encoder.STRING,
+  }, 2, ProtocolBuffer.Encoder.MAX_TYPE)
+
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+class ConversionInput(ProtocolBuffer.ProtocolMessage):
+  has_input_ = 0
+  has_output_mime_type_ = 0
+  output_mime_type_ = ""
+
+  def __init__(self, contents=None):
+    self.input_ = DocumentInfo()
+    self.flag_ = []
+    if contents is not None: self.MergeFromString(contents)
+
+  def input(self): return self.input_
+
+  def mutable_input(self): self.has_input_ = 1; return self.input_
+
+  def clear_input(self):self.has_input_ = 0; self.input_.Clear()
+
+  def has_input(self): return self.has_input_
+
+  def output_mime_type(self): return self.output_mime_type_
+
+  def set_output_mime_type(self, x):
+    self.has_output_mime_type_ = 1
+    self.output_mime_type_ = x
+
+  def clear_output_mime_type(self):
+    if self.has_output_mime_type_:
+      self.has_output_mime_type_ = 0
+      self.output_mime_type_ = ""
+
+  def has_output_mime_type(self): return self.has_output_mime_type_
+
+  def flag_size(self): return len(self.flag_)
+  def flag_list(self): return self.flag_
+
+  def flag(self, i):
+    return self.flag_[i]
+
+  def mutable_flag(self, i):
+    return self.flag_[i]
+
+  def add_flag(self):
+    x = ConversionInput_AuxData()
+    self.flag_.append(x)
+    return x
+
+  def clear_flag(self):
+    self.flag_ = []
+
+  def MergeFrom(self, x):
+    assert x is not self
+    if (x.has_input()): self.mutable_input().MergeFrom(x.input())
+    if (x.has_output_mime_type()): self.set_output_mime_type(x.output_mime_type())
+    for i in xrange(x.flag_size()): self.add_flag().CopyFrom(x.flag(i))
+
+  def Equals(self, x):
+    if x is self: return 1
+    if self.has_input_ != x.has_input_: return 0
+    if self.has_input_ and self.input_ != x.input_: return 0
+    if self.has_output_mime_type_ != x.has_output_mime_type_: return 0
+    if self.has_output_mime_type_ and self.output_mime_type_ != x.output_mime_type_: return 0
+    if len(self.flag_) != len(x.flag_): return 0
+    for e1, e2 in zip(self.flag_, x.flag_):
+      if e1 != e2: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    if (not self.has_input_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: input not set.')
+    elif not self.input_.IsInitialized(debug_strs): initialized = 0
+    if (not self.has_output_mime_type_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: output_mime_type not set.')
+    for p in self.flag_:
+      if not p.IsInitialized(debug_strs): initialized=0
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    n += self.lengthString(self.input_.ByteSize())
+    n += self.lengthString(len(self.output_mime_type_))
+    n += 1 * len(self.flag_)
+    for i in xrange(len(self.flag_)): n += self.lengthString(self.flag_[i].ByteSize())
+    return n + 2
+
+  def ByteSizePartial(self):
+    n = 0
+    if (self.has_input_):
+      n += 1
+      n += self.lengthString(self.input_.ByteSizePartial())
+    if (self.has_output_mime_type_):
+      n += 1
+      n += self.lengthString(len(self.output_mime_type_))
+    n += 1 * len(self.flag_)
+    for i in xrange(len(self.flag_)): n += self.lengthString(self.flag_[i].ByteSizePartial())
+    return n
+
+  def Clear(self):
+    self.clear_input()
+    self.clear_output_mime_type()
+    self.clear_flag()
+
+  def OutputUnchecked(self, out):
+    out.putVarInt32(10)
+    out.putVarInt32(self.input_.ByteSize())
+    self.input_.OutputUnchecked(out)
+    out.putVarInt32(18)
+    out.putPrefixedString(self.output_mime_type_)
+    for i in xrange(len(self.flag_)):
+      out.putVarInt32(26)
+      out.putVarInt32(self.flag_[i].ByteSize())
+      self.flag_[i].OutputUnchecked(out)
+
+  def OutputPartial(self, out):
+    if (self.has_input_):
+      out.putVarInt32(10)
+      out.putVarInt32(self.input_.ByteSizePartial())
+      self.input_.OutputPartial(out)
+    if (self.has_output_mime_type_):
+      out.putVarInt32(18)
+      out.putPrefixedString(self.output_mime_type_)
+    for i in xrange(len(self.flag_)):
+      out.putVarInt32(26)
+      out.putVarInt32(self.flag_[i].ByteSizePartial())
+      self.flag_[i].OutputPartial(out)
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 10:
+        length = d.getVarInt32()
+        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
+        d.skip(length)
+        self.mutable_input().TryMerge(tmp)
+        continue
+      if tt == 18:
+        self.set_output_mime_type(d.getPrefixedString())
+        continue
+      if tt == 26:
+        length = d.getVarInt32()
+        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
+        d.skip(length)
+        self.add_flag().TryMerge(tmp)
+        continue
+
+
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    if self.has_input_:
+      res+=prefix+"input <\n"
+      res+=self.input_.__str__(prefix + "  ", printElemNumber)
+      res+=prefix+">\n"
+    if self.has_output_mime_type_: res+=prefix+("output_mime_type: %s\n" % self.DebugFormatString(self.output_mime_type_))
+    cnt=0
+    for e in self.flag_:
+      elm=""
+      if printElemNumber: elm="(%d)" % cnt
+      res+=prefix+("flag%s <\n" % elm)
+      res+=e.__str__(prefix + "  ", printElemNumber)
+      res+=prefix+">\n"
+      cnt+=1
+    return res
+
+
+  def _BuildTagLookupTable(sparse, maxtag, default=None):
+    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
+
+  kinput = 1
+  koutput_mime_type = 2
+  kflag = 3
+
+  _TEXT = _BuildTagLookupTable({
+    0: "ErrorCode",
+    1: "input",
+    2: "output_mime_type",
+    3: "flag",
+  }, 3)
+
+  _TYPES = _BuildTagLookupTable({
+    0: ProtocolBuffer.Encoder.NUMERIC,
+    1: ProtocolBuffer.Encoder.STRING,
+    2: ProtocolBuffer.Encoder.STRING,
+    3: ProtocolBuffer.Encoder.STRING,
+  }, 3, ProtocolBuffer.Encoder.MAX_TYPE)
+
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+class ConversionOutput(ProtocolBuffer.ProtocolMessage):
+  has_error_code_ = 0
+  error_code_ = 0
+  has_output_ = 0
+  output_ = None
+
+  def __init__(self, contents=None):
+    self.lazy_init_lock_ = thread.allocate_lock()
+    if contents is not None: self.MergeFromString(contents)
+
+  def error_code(self): return self.error_code_
+
+  def set_error_code(self, x):
+    self.has_error_code_ = 1
+    self.error_code_ = x
+
+  def clear_error_code(self):
+    if self.has_error_code_:
+      self.has_error_code_ = 0
+      self.error_code_ = 0
+
+  def has_error_code(self): return self.has_error_code_
+
+  def output(self):
+    if self.output_ is None:
+      self.lazy_init_lock_.acquire()
+      try:
+        if self.output_ is None: self.output_ = DocumentInfo()
+      finally:
+        self.lazy_init_lock_.release()
+    return self.output_
+
+  def mutable_output(self): self.has_output_ = 1; return self.output()
+
+  def clear_output(self):
+
+    if self.has_output_:
+      self.has_output_ = 0;
+      if self.output_ is not None: self.output_.Clear()
+
+  def has_output(self): return self.has_output_
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+    if (x.has_error_code()): self.set_error_code(x.error_code())
+    if (x.has_output()): self.mutable_output().MergeFrom(x.output())
+
+  def Equals(self, x):
+    if x is self: return 1
+    if self.has_error_code_ != x.has_error_code_: return 0
+    if self.has_error_code_ and self.error_code_ != x.error_code_: return 0
+    if self.has_output_ != x.has_output_: return 0
+    if self.has_output_ and self.output_ != x.output_: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    if (not self.has_error_code_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: error_code not set.')
+    if (self.has_output_ and not self.output_.IsInitialized(debug_strs)): initialized = 0
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    n += self.lengthVarInt64(self.error_code_)
+    if (self.has_output_): n += 1 + self.lengthString(self.output_.ByteSize())
+    return n + 1
+
+  def ByteSizePartial(self):
+    n = 0
+    if (self.has_error_code_):
+      n += 1
+      n += self.lengthVarInt64(self.error_code_)
+    if (self.has_output_): n += 1 + self.lengthString(self.output_.ByteSizePartial())
+    return n
+
+  def Clear(self):
+    self.clear_error_code()
+    self.clear_output()
+
+  def OutputUnchecked(self, out):
+    out.putVarInt32(8)
+    out.putVarInt32(self.error_code_)
+    if (self.has_output_):
+      out.putVarInt32(18)
+      out.putVarInt32(self.output_.ByteSize())
+      self.output_.OutputUnchecked(out)
+
+  def OutputPartial(self, out):
+    if (self.has_error_code_):
+      out.putVarInt32(8)
+      out.putVarInt32(self.error_code_)
+    if (self.has_output_):
+      out.putVarInt32(18)
+      out.putVarInt32(self.output_.ByteSizePartial())
+      self.output_.OutputPartial(out)
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 8:
+        self.set_error_code(d.getVarInt32())
+        continue
+      if tt == 18:
+        length = d.getVarInt32()
+        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
+        d.skip(length)
+        self.mutable_output().TryMerge(tmp)
+        continue
+
+
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    if self.has_error_code_: res+=prefix+("error_code: %s\n" % self.DebugFormatInt32(self.error_code_))
+    if self.has_output_:
+      res+=prefix+"output <\n"
+      res+=self.output_.__str__(prefix + "  ", printElemNumber)
+      res+=prefix+">\n"
+    return res
+
+
+  def _BuildTagLookupTable(sparse, maxtag, default=None):
+    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
+
+  kerror_code = 1
+  koutput = 2
+
+  _TEXT = _BuildTagLookupTable({
+    0: "ErrorCode",
+    1: "error_code",
+    2: "output",
+  }, 2)
+
+  _TYPES = _BuildTagLookupTable({
+    0: ProtocolBuffer.Encoder.NUMERIC,
+    1: ProtocolBuffer.Encoder.NUMERIC,
+    2: ProtocolBuffer.Encoder.STRING,
+  }, 2, ProtocolBuffer.Encoder.MAX_TYPE)
+
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+class ConversionRequest(ProtocolBuffer.ProtocolMessage):
+
+  def __init__(self, contents=None):
+    self.conversion_ = []
+    if contents is not None: self.MergeFromString(contents)
+
+  def conversion_size(self): return len(self.conversion_)
+  def conversion_list(self): return self.conversion_
+
+  def conversion(self, i):
+    return self.conversion_[i]
+
+  def mutable_conversion(self, i):
+    return self.conversion_[i]
+
+  def add_conversion(self):
+    x = ConversionInput()
+    self.conversion_.append(x)
+    return x
+
+  def clear_conversion(self):
+    self.conversion_ = []
+
+  def MergeFrom(self, x):
+    assert x is not self
+    for i in xrange(x.conversion_size()): self.add_conversion().CopyFrom(x.conversion(i))
+
+  def Equals(self, x):
+    if x is self: return 1
+    if len(self.conversion_) != len(x.conversion_): return 0
+    for e1, e2 in zip(self.conversion_, x.conversion_):
+      if e1 != e2: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    for p in self.conversion_:
+      if not p.IsInitialized(debug_strs): initialized=0
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    n += 1 * len(self.conversion_)
+    for i in xrange(len(self.conversion_)): n += self.lengthString(self.conversion_[i].ByteSize())
+    return n
+
+  def ByteSizePartial(self):
+    n = 0
+    n += 1 * len(self.conversion_)
+    for i in xrange(len(self.conversion_)): n += self.lengthString(self.conversion_[i].ByteSizePartial())
+    return n
+
+  def Clear(self):
+    self.clear_conversion()
+
+  def OutputUnchecked(self, out):
+    for i in xrange(len(self.conversion_)):
+      out.putVarInt32(10)
+      out.putVarInt32(self.conversion_[i].ByteSize())
+      self.conversion_[i].OutputUnchecked(out)
+
+  def OutputPartial(self, out):
+    for i in xrange(len(self.conversion_)):
+      out.putVarInt32(10)
+      out.putVarInt32(self.conversion_[i].ByteSizePartial())
+      self.conversion_[i].OutputPartial(out)
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 10:
+        length = d.getVarInt32()
+        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
+        d.skip(length)
+        self.add_conversion().TryMerge(tmp)
+        continue
+
+
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    cnt=0
+    for e in self.conversion_:
+      elm=""
+      if printElemNumber: elm="(%d)" % cnt
+      res+=prefix+("conversion%s <\n" % elm)
+      res+=e.__str__(prefix + "  ", printElemNumber)
+      res+=prefix+">\n"
+      cnt+=1
+    return res
+
+
+  def _BuildTagLookupTable(sparse, maxtag, default=None):
+    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
+
+  kconversion = 1
+
+  _TEXT = _BuildTagLookupTable({
+    0: "ErrorCode",
+    1: "conversion",
+  }, 1)
+
+  _TYPES = _BuildTagLookupTable({
+    0: ProtocolBuffer.Encoder.NUMERIC,
+    1: ProtocolBuffer.Encoder.STRING,
+  }, 1, ProtocolBuffer.Encoder.MAX_TYPE)
+
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+class ConversionResponse(ProtocolBuffer.ProtocolMessage):
+
+  def __init__(self, contents=None):
+    self.result_ = []
+    if contents is not None: self.MergeFromString(contents)
+
+  def result_size(self): return len(self.result_)
+  def result_list(self): return self.result_
+
+  def result(self, i):
+    return self.result_[i]
+
+  def mutable_result(self, i):
+    return self.result_[i]
+
+  def add_result(self):
+    x = ConversionOutput()
+    self.result_.append(x)
+    return x
+
+  def clear_result(self):
+    self.result_ = []
+
+  def MergeFrom(self, x):
+    assert x is not self
+    for i in xrange(x.result_size()): self.add_result().CopyFrom(x.result(i))
+
+  def Equals(self, x):
+    if x is self: return 1
+    if len(self.result_) != len(x.result_): return 0
+    for e1, e2 in zip(self.result_, x.result_):
+      if e1 != e2: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    for p in self.result_:
+      if not p.IsInitialized(debug_strs): initialized=0
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    n += 1 * len(self.result_)
+    for i in xrange(len(self.result_)): n += self.lengthString(self.result_[i].ByteSize())
+    return n
+
+  def ByteSizePartial(self):
+    n = 0
+    n += 1 * len(self.result_)
+    for i in xrange(len(self.result_)): n += self.lengthString(self.result_[i].ByteSizePartial())
+    return n
+
+  def Clear(self):
+    self.clear_result()
+
+  def OutputUnchecked(self, out):
+    for i in xrange(len(self.result_)):
+      out.putVarInt32(10)
+      out.putVarInt32(self.result_[i].ByteSize())
+      self.result_[i].OutputUnchecked(out)
+
+  def OutputPartial(self, out):
+    for i in xrange(len(self.result_)):
+      out.putVarInt32(10)
+      out.putVarInt32(self.result_[i].ByteSizePartial())
+      self.result_[i].OutputPartial(out)
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 10:
+        length = d.getVarInt32()
+        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
+        d.skip(length)
+        self.add_result().TryMerge(tmp)
+        continue
+
+
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    cnt=0
+    for e in self.result_:
+      elm=""
+      if printElemNumber: elm="(%d)" % cnt
+      res+=prefix+("result%s <\n" % elm)
+      res+=e.__str__(prefix + "  ", printElemNumber)
+      res+=prefix+">\n"
+      cnt+=1
+    return res
+
+
+  def _BuildTagLookupTable(sparse, maxtag, default=None):
+    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
+
+  kresult = 1
+
+  _TEXT = _BuildTagLookupTable({
+    0: "ErrorCode",
+    1: "result",
+  }, 1)
+
+  _TYPES = _BuildTagLookupTable({
+    0: ProtocolBuffer.Encoder.NUMERIC,
+    1: ProtocolBuffer.Encoder.STRING,
+  }, 1, ProtocolBuffer.Encoder.MAX_TYPE)
+
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+
+__all__ = ['ConversionServiceError','AssetInfo','DocumentInfo','ConversionInput_AuxData','ConversionInput','ConversionOutput','ConversionRequest','ConversionResponse']
diff --git a/google/appengine/api/conversion/conversion_service_pb2.py b/google/appengine/api/conversion/conversion_service_pb2.py
deleted file mode 100644
index 3b66a7a..0000000
--- a/google/appengine/api/conversion/conversion_service_pb2.py
+++ /dev/null
@@ -1,548 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2007 Google Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-
-from google.net.proto2.python.public import descriptor
-from google.net.proto2.python.public import message
-from google.net.proto2.python.public import reflection
-from google.net.proto2.proto import descriptor_pb2
-import sys
-try:
-  __import__('google.net.rpc.python.rpc_internals_lite')
-  __import__('google.net.rpc.python.pywraprpc_lite')
-  rpc_internals = sys.modules.get('google.net.rpc.python.rpc_internals_lite')
-  pywraprpc = sys.modules.get('google.net.rpc.python.pywraprpc_lite')
-  _client_stub_base_class = rpc_internals.StubbyRPCBaseStub
-except ImportError:
-  _client_stub_base_class = object
-try:
-  __import__('google.net.rpc.python.rpcserver')
-  rpcserver = sys.modules.get('google.net.rpc.python.rpcserver')
-  _server_stub_base_class = rpcserver.BaseRpcServer
-except ImportError:
-  _server_stub_base_class = object
-
-
-
-
-
-DESCRIPTOR = descriptor.FileDescriptor(
-  name='apphosting/api/conversion/conversion_service.proto',
-  package='apphosting',
-  serialized_pb='\n2apphosting/api/conversion/conversion_service.proto\x12\napphosting\"\xc9\x01\n\x16\x43onversionServiceError\"\xae\x01\n\tErrorCode\x12\x06\n\x02OK\x10\x00\x12\x0b\n\x07TIMEOUT\x10\x01\x12\x13\n\x0fTRANSIENT_ERROR\x10\x02\x12\x12\n\x0eINTERNAL_ERROR\x10\x03\x12\x1a\n\x16UNSUPPORTED_CONVERSION\x10\x04\x12\x18\n\x14\x43ONVERSION_TOO_LARGE\x10\x05\x12\x18\n\x14TOO_MANY_CONVERSIONS\x10\x06\x12\x13\n\x0fINVALID_REQUEST\x10\x07\">\n\tAssetInfo\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x42\x02\x08\x01\x12\x11\n\tmime_type\x18\x03 \x01(\t\"4\n\x0c\x44ocumentInfo\x12$\n\x05\x61sset\x18\x01 \x03(\x0b\x32\x15.apphosting.AssetInfo\"\xae\x01\n\x0f\x43onversionInput\x12\'\n\x05input\x18\x01 \x02(\x0b\x32\x18.apphosting.DocumentInfo\x12\x18\n\x10output_mime_type\x18\x02 \x02(\t\x12\x31\n\x04\x66lag\x18\x03 \x03(\x0b\x32#.apphosting.ConversionInput.AuxData\x1a%\n\x07\x41uxData\x12\x0b\n\x03key\x18\x01 \x02(\t\x12\r\n\x05value\x18\x02 \x01(\t\"~\n\x10\x43onversionOutput\x12@\n\nerror_code\x18\x01 \x02(\x0e\x32,.apphosting.ConversionServiceError.ErrorCode\x12(\n\x06output\x18\x02 \x01(\x0b\x32\x18.apphosting.DocumentInfo\"D\n\x11\x43onversionRequest\x12/\n\nconversion\x18\x01 \x03(\x0b\x32\x1b.apphosting.ConversionInput\"B\n\x12\x43onversionResponse\x12,\n\x06result\x18\x01 \x03(\x0b\x32\x1c.apphosting.ConversionOutput2]\n\x11\x43onversionService\x12H\n\x07\x43onvert\x12\x1d.apphosting.ConversionRequest\x1a\x1e.apphosting.ConversionResponseB@\n#com.google.appengine.api.conversion\x10\x02 \x02(\x02\x42\x13\x43onversionServicePb')
-
-
-
-_CONVERSIONSERVICEERROR_ERRORCODE = descriptor.EnumDescriptor(
-  name='ErrorCode',
-  full_name='apphosting.ConversionServiceError.ErrorCode',
-  filename=None,
-  file=DESCRIPTOR,
-  values=[
-    descriptor.EnumValueDescriptor(
-      name='OK', index=0, number=0,
-      options=None,
-      type=None),
-    descriptor.EnumValueDescriptor(
-      name='TIMEOUT', index=1, number=1,
-      options=None,
-      type=None),
-    descriptor.EnumValueDescriptor(
-      name='TRANSIENT_ERROR', index=2, number=2,
-      options=None,
-      type=None),
-    descriptor.EnumValueDescriptor(
-      name='INTERNAL_ERROR', index=3, number=3,
-      options=None,
-      type=None),
-    descriptor.EnumValueDescriptor(
-      name='UNSUPPORTED_CONVERSION', index=4, number=4,
-      options=None,
-      type=None),
-    descriptor.EnumValueDescriptor(
-      name='CONVERSION_TOO_LARGE', index=5, number=5,
-      options=None,
-      type=None),
-    descriptor.EnumValueDescriptor(
-      name='TOO_MANY_CONVERSIONS', index=6, number=6,
-      options=None,
-      type=None),
-    descriptor.EnumValueDescriptor(
-      name='INVALID_REQUEST', index=7, number=7,
-      options=None,
-      type=None),
-  ],
-  containing_type=None,
-  options=None,
-  serialized_start=94,
-  serialized_end=268,
-)
-
-
-_CONVERSIONSERVICEERROR = descriptor.Descriptor(
-  name='ConversionServiceError',
-  full_name='apphosting.ConversionServiceError',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-    _CONVERSIONSERVICEERROR_ERRORCODE,
-  ],
-  options=None,
-  is_extendable=False,
-  extension_ranges=[],
-  serialized_start=67,
-  serialized_end=268,
-)
-
-
-_ASSETINFO = descriptor.Descriptor(
-  name='AssetInfo',
-  full_name='apphosting.AssetInfo',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    descriptor.FieldDescriptor(
-      name='name', full_name='apphosting.AssetInfo.name', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=unicode("", "utf-8"),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None),
-    descriptor.FieldDescriptor(
-      name='data', full_name='apphosting.AssetInfo.data', index=1,
-      number=2, type=12, cpp_type=9, label=1,
-      has_default_value=False, default_value="",
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=descriptor._ParseOptions(descriptor_pb2.FieldOptions(), '\010\001')),
-    descriptor.FieldDescriptor(
-      name='mime_type', full_name='apphosting.AssetInfo.mime_type', index=2,
-      number=3, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=unicode("", "utf-8"),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  extension_ranges=[],
-  serialized_start=270,
-  serialized_end=332,
-)
-
-
-_DOCUMENTINFO = descriptor.Descriptor(
-  name='DocumentInfo',
-  full_name='apphosting.DocumentInfo',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    descriptor.FieldDescriptor(
-      name='asset', full_name='apphosting.DocumentInfo.asset', index=0,
-      number=1, type=11, cpp_type=10, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  extension_ranges=[],
-  serialized_start=334,
-  serialized_end=386,
-)
-
-
-_CONVERSIONINPUT_AUXDATA = descriptor.Descriptor(
-  name='AuxData',
-  full_name='apphosting.ConversionInput.AuxData',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    descriptor.FieldDescriptor(
-      name='key', full_name='apphosting.ConversionInput.AuxData.key', index=0,
-      number=1, type=9, cpp_type=9, label=2,
-      has_default_value=False, default_value=unicode("", "utf-8"),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None),
-    descriptor.FieldDescriptor(
-      name='value', full_name='apphosting.ConversionInput.AuxData.value', index=1,
-      number=2, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=unicode("", "utf-8"),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  extension_ranges=[],
-  serialized_start=526,
-  serialized_end=563,
-)
-
-_CONVERSIONINPUT = descriptor.Descriptor(
-  name='ConversionInput',
-  full_name='apphosting.ConversionInput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    descriptor.FieldDescriptor(
-      name='input', full_name='apphosting.ConversionInput.input', index=0,
-      number=1, type=11, cpp_type=10, label=2,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None),
-    descriptor.FieldDescriptor(
-      name='output_mime_type', full_name='apphosting.ConversionInput.output_mime_type', index=1,
-      number=2, type=9, cpp_type=9, label=2,
-      has_default_value=False, default_value=unicode("", "utf-8"),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None),
-    descriptor.FieldDescriptor(
-      name='flag', full_name='apphosting.ConversionInput.flag', index=2,
-      number=3, type=11, cpp_type=10, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None),
-  ],
-  extensions=[
-  ],
-  nested_types=[_CONVERSIONINPUT_AUXDATA, ],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  extension_ranges=[],
-  serialized_start=389,
-  serialized_end=563,
-)
-
-
-_CONVERSIONOUTPUT = descriptor.Descriptor(
-  name='ConversionOutput',
-  full_name='apphosting.ConversionOutput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    descriptor.FieldDescriptor(
-      name='error_code', full_name='apphosting.ConversionOutput.error_code', index=0,
-      number=1, type=14, cpp_type=8, label=2,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None),
-    descriptor.FieldDescriptor(
-      name='output', full_name='apphosting.ConversionOutput.output', index=1,
-      number=2, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  extension_ranges=[],
-  serialized_start=565,
-  serialized_end=691,
-)
-
-
-_CONVERSIONREQUEST = descriptor.Descriptor(
-  name='ConversionRequest',
-  full_name='apphosting.ConversionRequest',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    descriptor.FieldDescriptor(
-      name='conversion', full_name='apphosting.ConversionRequest.conversion', index=0,
-      number=1, type=11, cpp_type=10, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  extension_ranges=[],
-  serialized_start=693,
-  serialized_end=761,
-)
-
-
-_CONVERSIONRESPONSE = descriptor.Descriptor(
-  name='ConversionResponse',
-  full_name='apphosting.ConversionResponse',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    descriptor.FieldDescriptor(
-      name='result', full_name='apphosting.ConversionResponse.result', index=0,
-      number=1, type=11, cpp_type=10, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  extension_ranges=[],
-  serialized_start=763,
-  serialized_end=829,
-)
-
-_CONVERSIONSERVICEERROR_ERRORCODE.containing_type = _CONVERSIONSERVICEERROR;
-_DOCUMENTINFO.fields_by_name['asset'].message_type = _ASSETINFO
-_CONVERSIONINPUT_AUXDATA.containing_type = _CONVERSIONINPUT;
-_CONVERSIONINPUT.fields_by_name['input'].message_type = _DOCUMENTINFO
-_CONVERSIONINPUT.fields_by_name['flag'].message_type = _CONVERSIONINPUT_AUXDATA
-_CONVERSIONOUTPUT.fields_by_name['error_code'].enum_type = _CONVERSIONSERVICEERROR_ERRORCODE
-_CONVERSIONOUTPUT.fields_by_name['output'].message_type = _DOCUMENTINFO
-_CONVERSIONREQUEST.fields_by_name['conversion'].message_type = _CONVERSIONINPUT
-_CONVERSIONRESPONSE.fields_by_name['result'].message_type = _CONVERSIONOUTPUT
-DESCRIPTOR.message_types_by_name['ConversionServiceError'] = _CONVERSIONSERVICEERROR
-DESCRIPTOR.message_types_by_name['AssetInfo'] = _ASSETINFO
-DESCRIPTOR.message_types_by_name['DocumentInfo'] = _DOCUMENTINFO
-DESCRIPTOR.message_types_by_name['ConversionInput'] = _CONVERSIONINPUT
-DESCRIPTOR.message_types_by_name['ConversionOutput'] = _CONVERSIONOUTPUT
-DESCRIPTOR.message_types_by_name['ConversionRequest'] = _CONVERSIONREQUEST
-DESCRIPTOR.message_types_by_name['ConversionResponse'] = _CONVERSIONRESPONSE
-
-class ConversionServiceError(message.Message):
-  __metaclass__ = reflection.GeneratedProtocolMessageType
-  DESCRIPTOR = _CONVERSIONSERVICEERROR
-
-
-
-class AssetInfo(message.Message):
-  __metaclass__ = reflection.GeneratedProtocolMessageType
-  DESCRIPTOR = _ASSETINFO
-
-
-
-class DocumentInfo(message.Message):
-  __metaclass__ = reflection.GeneratedProtocolMessageType
-  DESCRIPTOR = _DOCUMENTINFO
-
-
-
-class ConversionInput(message.Message):
-  __metaclass__ = reflection.GeneratedProtocolMessageType
-
-  class AuxData(message.Message):
-    __metaclass__ = reflection.GeneratedProtocolMessageType
-    DESCRIPTOR = _CONVERSIONINPUT_AUXDATA
-
-
-  DESCRIPTOR = _CONVERSIONINPUT
-
-
-
-class ConversionOutput(message.Message):
-  __metaclass__ = reflection.GeneratedProtocolMessageType
-  DESCRIPTOR = _CONVERSIONOUTPUT
-
-
-
-class ConversionRequest(message.Message):
-  __metaclass__ = reflection.GeneratedProtocolMessageType
-  DESCRIPTOR = _CONVERSIONREQUEST
-
-
-
-class ConversionResponse(message.Message):
-  __metaclass__ = reflection.GeneratedProtocolMessageType
-  DESCRIPTOR = _CONVERSIONRESPONSE
-
-
-
-
-
-class _ConversionService_ClientBaseStub(_client_stub_base_class):
-  """Makes Stubby RPC calls to a ConversionService server."""
-
-  __slots__ = (
-      '_protorpc_Convert', '_full_name_Convert',
-  )
-
-  def __init__(self, rpc_stub):
-    self._stub = rpc_stub
-
-    self._protorpc_Convert = pywraprpc.RPC()
-    self._full_name_Convert = self._stub.GetFullMethodName(
-        'Convert')
-
-  def Convert(self, request, rpc=None, callback=None, response=None):
-    """Make a Convert RPC call.
-
-    Args:
-      request: a ConversionRequest instance.
-      rpc: Optional RPC instance to use for the call.
-      callback: Optional final callback. Will be called as
-          callback(rpc, result) when the rpc completes. If None, the
-          call is synchronous.
-      response: Optional ProtocolMessage to be filled in with response.
-
-    Returns:
-      The ConversionResponse if callback is None. Otherwise, returns None.
-    """
-
-    if response is None:
-      response = ConversionResponse
-    return self._MakeCall(rpc,
-                          self._full_name_Convert,
-                          'Convert',
-                          request,
-                          response,
-                          callback,
-                          self._protorpc_Convert)
-
-
-class _ConversionService_ClientStub(_ConversionService_ClientBaseStub):
-  __slots__ = ('_params',)
-  def __init__(self, rpc_stub_parameters, service_name):
-    if service_name is None:
-      service_name = 'ConversionService'
-    _ConversionService_ClientBaseStub.__init__(self, pywraprpc.RPC_GenericStub(service_name, rpc_stub_parameters))
-    self._params = rpc_stub_parameters
-
-
-class _ConversionService_RPC2ClientStub(_ConversionService_ClientBaseStub):
-  __slots__ = ()
-  def __init__(self, server, channel, service_name):
-    if service_name is None:
-      service_name = 'ConversionService'
-    if channel is not None:
-      if channel.version() == 1:
-        raise RuntimeError('Expecting an RPC2 channel to create the stub')
-      _ConversionService_ClientBaseStub.__init__(self, pywraprpc.RPC_GenericStub(service_name, channel))
-    elif server is not None:
-      _ConversionService_ClientBaseStub.__init__(self, pywraprpc.RPC_GenericStub(service_name, pywraprpc.NewClientChannel(server)))
-    else:
-      raise RuntimeError('Invalid argument combination to create a stub')
-
-
-class ConversionService(_server_stub_base_class):
-  """Base class for ConversionService Stubby servers."""
-
-  def __init__(self, *args, **kwargs):
-    """Creates a Stubby RPC server.
-
-    See BaseRpcServer.__init__ in rpcserver.py for detail on arguments.
-    """
-    if _server_stub_base_class is object:
-      raise NotImplementedError('Add //net/rpc/python:rpcserver as a '
-                                'dependency for Stubby server support.')
-    _server_stub_base_class.__init__(self, 'apphosting.ConversionService', *args, **kwargs)
-
-  @staticmethod
-  def NewStub(rpc_stub_parameters, service_name=None):
-    """Creates a new ConversionService Stubby client stub.
-
-    Args:
-      rpc_stub_parameters: an RPC_StubParameter instance.
-      service_name: the service name used by the Stubby server.
-    """
-
-    if _client_stub_base_class is object:
-      raise RuntimeError('Add //net/rpc/python as a dependency to use Stubby')
-    return _ConversionService_ClientStub(rpc_stub_parameters, service_name)
-
-  @staticmethod
-  def NewRPC2Stub(server=None, channel=None, service_name=None):
-    """Creates a new ConversionService Stubby2 client stub.
-
-    Args:
-      server: host:port or bns address.
-      channel: directly use a channel to create a stub. Will ignore server
-          argument if this is specified.
-      service_name: the service name used by the Stubby server.
-    """
-
-    if _client_stub_base_class is object:
-      raise RuntimeError('Add //net/rpc/python as a dependency to use Stubby')
-    return _ConversionService_RPC2ClientStub(server, channel, service_name)
-
-  def Convert(self, rpc, request, response):
-    """Handles a Convert RPC call. You should override this.
-
-    Args:
-      rpc: a Stubby RPC object
-      request: a ConversionRequest that contains the client request
-      response: a ConversionResponse that should be modified to send the response
-    """
-    raise NotImplementedError
-
-  def _AddMethodAttributes(self):
-    """Sets attributes on Python RPC handlers.
-
-    See BaseRpcServer in rpcserver.py for details.
-    """
-    rpcserver._GetHandlerDecorator(
-        self.Convert.im_func,
-        ConversionRequest,
-        ConversionResponse,
-        None,
-        'none')
-
-
diff --git a/google/appengine/api/conversion/conversion_stub.py b/google/appengine/api/conversion/conversion_stub.py
index 81eda06..deaf0eb 100644
--- a/google/appengine/api/conversion/conversion_stub.py
+++ b/google/appengine/api/conversion/conversion_stub.py
@@ -31,7 +31,7 @@
 
 from google.appengine.api import apiproxy_stub
 from google.appengine.api import conversion
-from google.appengine.api.conversion import conversion_service_pb2
+from google.appengine.api.conversion import conversion_service_pb
 from google.appengine.runtime import apiproxy_errors
 
 
@@ -59,48 +59,48 @@
   """
   if not request.IsInitialized():
     raise apiproxy_errors.ApplicationError(
-        conversion_service_pb2.ConversionServiceError.INVALID_REQUEST,
+        conversion_service_pb.ConversionServiceError.INVALID_REQUEST,
         "The conversion request is not initialized correctly")
 
-  if not request.conversion:
+  if not request.conversion_list():
     raise apiproxy_errors.ApplicationError(
-        conversion_service_pb2.ConversionServiceError.INVALID_REQUEST,
+        conversion_service_pb.ConversionServiceError.INVALID_REQUEST,
         "At least one conversion is required in the request")
 
-  if len(request.conversion) > conversion.CONVERSION_MAX_NUM_PER_REQUEST:
+  if request.conversion_size() > conversion.CONVERSION_MAX_NUM_PER_REQUEST:
     raise apiproxy_errors.ApplicationError(
-        conversion_service_pb2.ConversionServiceError.TOO_MANY_CONVERSIONS,
+        conversion_service_pb.ConversionServiceError.TOO_MANY_CONVERSIONS,
         "At most ten conversions are allowed in the request")
 
-  for x in range(0, len(request.conversion)):
-    if (request.conversion[x].ByteSize() >
+  for x in range(0, request.conversion_size()):
+    if (request.conversion(x).ByteSize() >
         conversion.CONVERSION_MAX_DOC_SIZE_BYTES):
       raise apiproxy_errors.ApplicationError(
-          conversion_service_pb2.ConversionServiceError.CONVERSION_TOO_LARGE,
+          conversion_service_pb.ConversionServiceError.CONVERSION_TOO_LARGE,
           "Each conversion should not be over 10MB")
 
-    if not request.conversion[x].input.asset:
+    if not request.conversion(x).input().asset_list():
       raise apiproxy_errors.ApplicationError(
-          conversion_service_pb2.ConversionServiceError.INVALID_REQUEST,
+          conversion_service_pb.ConversionServiceError.INVALID_REQUEST,
           "At least one asset is required in input document")
 
-    for y in range(0, len(request.conversion[x].input.asset)):
-      input_asset = request.conversion[x].input.asset[y]
-      if not input_asset.HasField("data"):
+    for y in range(0, request.conversion(x).input().asset_size()):
+      input_asset = request.conversion(x).input().asset(y)
+      if not input_asset.has_data():
         raise apiproxy_errors.ApplicationError(
-            conversion_service_pb2.ConversionServiceError.INVALID_REQUEST,
+            conversion_service_pb.ConversionServiceError.INVALID_REQUEST,
             "Asset data field must be set in input document")
-      if not input_asset.HasField("mime_type"):
+      if not input_asset.has_mime_type():
         raise apiproxy_errors.ApplicationError(
-            conversion_service_pb2.ConversionServiceError.INVALID_REQUEST,
+            conversion_service_pb.ConversionServiceError.INVALID_REQUEST,
             "Asset mime type field must be set in input document")
 
 
 
-    output_mime_type = request.conversion[x].output_mime_type
+    output_mime_type = request.conversion(x).output_mime_type()
     if output_mime_type not in CONVERTED_FILES_STUB:
       raise apiproxy_errors.ApplicationError(
-          conversion_service_pb2.ConversionServiceError.UNSUPPORTED_CONVERSION,
+          conversion_service_pb.ConversionServiceError.UNSUPPORTED_CONVERSION,
           "Output mime type %s is not supported" % output_mime_type)
 
 
@@ -123,13 +123,13 @@
   def _Dynamic_Convert(self, request, response):
     _validate_conversion_request(request)
 
-    for x in range(0, len(request.conversion)):
-      result = response.result.add()
-      result.error_code = conversion_service_pb2.ConversionServiceError.OK
-      output_mime_type = request.conversion[x].output_mime_type
-      output_asset = result.output.asset.add()
-      output_asset.mime_type = output_mime_type
-      output_asset.data = CONVERTED_FILES_STUB[output_mime_type]
-      first_input_asset = request.conversion[x].input.asset[0]
-      if first_input_asset.HasField("name"):
-        output_asset.name = first_input_asset.name
+    for x in range(0, request.conversion_size()):
+      result = response.add_result()
+      result.set_error_code(conversion_service_pb.ConversionServiceError.OK)
+      output_mime_type = request.conversion(x).output_mime_type()
+      output_asset = result.mutable_output().add_asset()
+      output_asset.set_mime_type(output_mime_type)
+      output_asset.set_data(CONVERTED_FILES_STUB[output_mime_type])
+      first_input_asset = request.conversion(x).input().asset(0)
+      if first_input_asset.has_name():
+        output_asset.set_name(first_input_asset.name())
diff --git a/google/appengine/api/datastore.py b/google/appengine/api/datastore.py
index dde6ac4..8363ade 100755
--- a/google/appengine/api/datastore.py
+++ b/google/appengine/api/datastore.py
@@ -216,12 +216,99 @@
     raise datastore_errors.BadArgumentError(
       'rpc= argument should be None or a UserRPC instance')
   if config is not None:
-    if not isinstance(config, (datastore_rpc.Configuration,
-                               apiproxy_stub_map.UserRPC)):
+    if not (datastore_rpc.Configuration.is_configuration(config) or
+            isinstance(config, apiproxy_stub_map.UserRPC)):
       raise datastore_errors.BadArgumentError(
       'config= argument should be None or a Configuration instance')
   return config
 
+class _BaseIndex(object):
+
+
+  BUILDING, SERVING, DELETING, ERROR = range(4)
+
+
+  ASCENDING = datastore_query.PropertyOrder.ASCENDING
+  DESCENDING = datastore_query.PropertyOrder.DESCENDING
+
+  def __init__(self, index_id, kind, has_ancestor, properties):
+    """Construct a datastore index instance.
+
+    Args:
+      index_id: Required long; Uniquely identifies the index
+      kind: Required string; Specifies the kind of the entities to index
+      has_ancestor: Required boolean; indicates if the index supports a query
+        that filters entities by the entity group parent
+      properties: Required list of (string, int) tuples; The entity properties
+        to index. First item in a tuple is the property name and the second
+        item is the sorting direction (ASCENDING|DESCENDING).
+        The order of the properties is based on the order in the index.
+    """
+    argument_error = datastore_errors.BadArgumentError
+    datastore_types.ValidateInteger(index_id, 'index_id', argument_error)
+    datastore_types.ValidateString(kind, 'kind', argument_error)
+    if not isinstance(properties, (list, tuple)):
+      raise argument_error('properties must be a list or a tuple')
+    for idx, index_property in enumerate(properties):
+      if not isinstance(index_property, (list, tuple)):
+        raise argument_error('property[%d] must be a list or a tuple' % idx)
+      if len(index_property) != 2:
+        raise argument_error('property[%d] length should be 2 but was %d' %
+                        (idx, len(index_property)))
+      datastore_types.ValidateString(index_property[0], 'property name',
+                                     argument_error)
+      _BaseIndex.__ValidateEnum(index_property[1],
+                               (self.ASCENDING, self.DESCENDING),
+                               'sort direction')
+    self.__id = long(index_id)
+    self.__kind = kind
+    self.__has_ancestor = bool(has_ancestor)
+    self.__properties = properties
+
+  @staticmethod
+  def __ValidateEnum(value, accepted_values, name='value',
+                     exception=datastore_errors.BadArgumentError):
+    datastore_types.ValidateInteger(value, name, exception)
+    if not value in accepted_values:
+      raise exception('%s should be one of %s but was %d' %
+                      (name, str(accepted_values), value))
+
+  def _Id(self):
+    """Returns the index id, a long."""
+    return self.__id
+
+  def _Kind(self):
+    """Returns the index kind, a string."""
+    return self.__kind
+
+  def _HasAncestor(self):
+    """Indicates if this is an ancestor index, a boolean."""
+    return self.__has_ancestor
+
+  def _Properties(self):
+    """Returns the index properties. a tuple of
+    (index name as a string, [ASCENDING|DESCENDING]) tuples.
+    """
+    return self.__properties
+
+  def __eq__(self, other):
+    return self.__id == other.__id
+
+  def __ne__(self, other):
+    return self.__id != other.__id
+
+  def __hash__(self):
+    return hash(self.__id)
+
+
+class Index(_BaseIndex):
+  """A datastore index."""
+
+  Id = _BaseIndex._Id
+  Kind = _BaseIndex._Kind
+  HasAncestor = _BaseIndex._HasAncestor
+  Properties = _BaseIndex._Properties
+
 
 class DatastoreAdapter(datastore_rpc.AbstractAdapter):
   """Adapter between datatypes defined here (Entity etc.) and protobufs.
@@ -229,6 +316,20 @@
   See the base class in datastore_rpc.py for more docs.
   """
 
+
+  index_state_mappings = {
+          entity_pb.CompositeIndex.ERROR: Index.ERROR,
+          entity_pb.CompositeIndex.DELETED: Index.DELETING,
+          entity_pb.CompositeIndex.READ_WRITE: Index.SERVING,
+          entity_pb.CompositeIndex.WRITE_ONLY: Index.BUILDING
+      }
+
+
+  index_direction_mappings = {
+          entity_pb.Index_Property.ASCENDING: Index.ASCENDING,
+          entity_pb.Index_Property.DESCENDING: Index.DESCENDING
+      }
+
   def key_to_pb(self, key):
     return key._Key__reference
 
@@ -241,6 +342,16 @@
   def pb_to_entity(self, pb):
     return Entity._FromPb(pb)
 
+  def pb_to_index(self, pb):
+    index_def = pb.definition()
+    properties = [(property.name(),
+          DatastoreAdapter.index_direction_mappings.get(property.direction()))
+          for property in index_def.property_list()]
+    index = Index(pb.id(), index_def.entity_type(), index_def.ancestor(),
+                  properties)
+    state = DatastoreAdapter.index_state_mappings.get(pb.state())
+    return index, state
+
 
 _adapter = DatastoreAdapter()
 _thread_local = threading.local()
@@ -393,7 +504,7 @@
   Returns:
     None or a datastore_rpc.Configuration object.
   """
-  if rpc is None or isinstance(rpc, datastore_rpc.Configuration):
+  if rpc is None or datastore_rpc.Configuration.is_configuration(rpc):
     return rpc
   read_policy = getattr(rpc, 'read_policy', None)
   return datastore_rpc.Configuration(deadline=rpc.deadline,
@@ -515,6 +626,39 @@
   """
   return GetAsync(keys, **kwargs).get_result()
 
+def GetIndexesAsync(**kwargs):
+  """Asynchronously retrieves the application indexes and their states.
+
+  Identical to GetIndexes() except returns an asynchronous object. Call
+  get_result() on the return value to block on the call and get the results.
+  """
+  extra_hook = kwargs.pop('extra_hook', None)
+  config = _GetConfigFromKwargs(kwargs)
+
+  def local_extra_hook(result):
+    if extra_hook:
+      return extra_hook(result)
+    return result
+
+  return _GetConnection().async_get_indexes(config, local_extra_hook)
+
+
+def GetIndexes(**kwargs):
+  """Retrieves the application indexes and their states.
+
+  Args:
+    config: Optional Configuration to use for this request, must be specified
+      as a keyword argument.
+
+  Returns:
+    A list of (Index, Index.[BUILDING|SERVING|DELETING|ERROR]) tuples.
+    An index can be in the following states:
+      Index.BUILDING: Index is being built and therefore can not serve queries
+      Index.SERVING: Index is ready to service queries
+      Index.DELETING: Index is being deleted
+      Index.ERROR: Index encounted an error in the BUILDING state
+  """
+  return GetIndexesAsync(**kwargs).get_result()
 
 def DeleteAsync(keys, **kwargs):
   """Asynchronously deletes one or more entities from the datastore.
@@ -1239,11 +1383,11 @@
         direction = Query.ASCENDING
 
       if (self.__kind is None and
-          (property != datastore_types._KEY_SPECIAL_PROPERTY or
+          (property != datastore_types.KEY_SPECIAL_PROPERTY or
           direction != Query.ASCENDING)):
         raise datastore_errors.BadArgumentError(
             'Only %s ascending orders are supported on kindless queries' %
-            datastore_types._KEY_SPECIAL_PROPERTY)
+            datastore_types.KEY_SPECIAL_PROPERTY)
 
       orderings[i] = (property, direction)
 
@@ -1667,11 +1811,11 @@
             ', '.join(self.INEQUALITY_OPERATORS))
 
     if (self.__kind is None and
-        property != datastore_types._KEY_SPECIAL_PROPERTY and
+        property != datastore_types.KEY_SPECIAL_PROPERTY and
         property != datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY):
       raise datastore_errors.BadFilterError(
           'Only %s filters are allowed on kindless queries.' %
-          datastore_types._KEY_SPECIAL_PROPERTY)
+          datastore_types.KEY_SPECIAL_PROPERTY)
 
     if property == datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY:
       if self.__kind:
@@ -1689,12 +1833,12 @@
 
 
 
-      if property == datastore_types._KEY_SPECIAL_PROPERTY:
+      if property == datastore_types.KEY_SPECIAL_PROPERTY:
         for value in values:
           if not isinstance(value, Key):
             raise datastore_errors.BadFilterError(
               '%s filter value must be a Key; received %s (a %s)' %
-              (datastore_types._KEY_SPECIAL_PROPERTY, value, typename(value)))
+              (datastore_types.KEY_SPECIAL_PROPERTY, value, typename(value)))
 
     return match
 
@@ -2172,10 +2316,10 @@
      number of times.
 
     Args:
-    # a function to be run inside the transaction
-    function: callable
-    # positional arguments to pass to the function
-    args: variable number of any type
+      function: a function to be run inside the transaction on all remaining
+        arguments
+      *args: positional arguments for function.
+      **kwargs: keyword arguments for function.
 
   Returns:
     the function's return value, if any
@@ -2183,8 +2327,7 @@
   Raises:
     TransactionFailedError, if the transaction could not be committed.
   """
-  return RunInTransactionCustomRetries(
-      DEFAULT_TRANSACTION_RETRIES, function, *args, **kwargs)
+  return RunInTransactionOptions(None, function, *args, **kwargs)
 
 
 
@@ -2193,6 +2336,29 @@
 def RunInTransactionCustomRetries(retries, function, *args, **kwargs):
   """Runs a function inside a datastore transaction.
 
+     Runs the user-provided function inside transaction, with a specified
+     number of retries.
+
+    Args:
+      retries: number of retries (not counting the initial try)
+      function: a function to be run inside the transaction on all remaining
+        arguments
+      *args: positional arguments for function.
+      **kwargs: keyword arguments for function.
+
+  Returns:
+    the function's return value, if any
+
+  Raises:
+    TransactionFailedError, if the transaction could not be committed.
+  """
+  options = datastore_rpc.TransactionOptions(retries=retries)
+  return RunInTransactionOptions(options, function, *args, **kwargs)
+
+
+def RunInTransactionOptions(options, function, *args, **kwargs):
+  """Runs a function inside a datastore transaction.
+
   Runs the user-provided function inside a full-featured, ACID datastore
   transaction. Every Put, Get, and Delete call in the function is made within
   the transaction. All entities involved in these calls must belong to the
@@ -2246,12 +2412,12 @@
   Nested transactions are not supported.
 
   Args:
-    # number of retries (not counting the initial try)
-    retries: integer
-    # a function to be run inside the transaction
-    function: callable
-    # positional arguments to pass to the function
-    args: variable number of any type
+    options: TransactionOptions specifying options (number of retries, etc) for
+      this transaction
+    function: a function to be run inside the transaction on all remaining
+      arguments
+      *args: positional arguments for function.
+      **kwargs: keyword arguments for function.
 
   Returns:
     the function's return value, if any
@@ -2268,9 +2434,9 @@
 
 
 
-  if retries < 0:
-    raise datastore_errors.BadRequestError(
-      'Number of retries should be non-negative number.')
+  retries = datastore_rpc.TransactionOptions.retries(options)
+  if retries is None:
+    retries = DEFAULT_TRANSACTION_RETRIES
 
 
   if IsInTransaction():
@@ -2280,7 +2446,7 @@
   old_connection = _GetConnection()
 
   for i in range(0, retries + 1):
-    new_connection = old_connection.new_transaction()
+    new_connection = old_connection.new_transaction(options)
     _SetConnection(new_connection)
     try:
       ok, result = _DoOneTry(new_connection, function, args, kwargs)
@@ -2300,8 +2466,8 @@
   Args:
     new_connection: The new, transactional, connection object.
     function: The function to call.
-    args: Tuple of positional arguments.
-    kwargs: Dict of keyword arguments.
+    *args: Tuple of positional arguments.
+    **kwargs: Dict of keyword arguments.
   """
 
   try:
@@ -2361,8 +2527,7 @@
 
 
 datastore_rpc._positional(1)
-def Transactional(func=None, require_new=False,
-                  retries=DEFAULT_TRANSACTION_RETRIES):
+def Transactional(_func=None, require_new=False, **kwargs):
   """A decorator that makes sure a function is run in a transaction.
 
   WARNING: Reading from the datastore while in a transaction will not see any
@@ -2370,24 +2535,24 @@
   on seeing all changes made in the calling scoope, set require_new=True.
 
   Args:
+    _func: do not use.
     require_new: A bool that indicates the function requires its own transaction
       and cannot share a transaction with the calling scope (nested transactions
       are not currently supported by the datastore).
-    retries: An integer that indicates how many times the function should be
-      tried not including the inital attempt. This value is ignored if using
-      a transaction from the calling scope.
+    **kwargs: TransactionOptions configuration options.
 
   Returns:
     A wrapper for the given function that creates a new transaction if needed.
   """
-  if func is None:
-    return lambda function: Transactional(func=function,
+  if _func is None:
+    return lambda function: Transactional(_func=function,
                                           require_new=require_new,
-                                          retries=retries)
+                                          **kwargs)
+  options = datastore_rpc.TransactionOptions(**kwargs)
   def wrapper(*args, **kwds):
     if not require_new and IsInTransaction():
-      return func(*args, **kwds)
-    return RunInTransactionCustomRetries(retries, func, *args, **kwds)
+      return _func(*args, **kwds)
+    return RunInTransactionOptions(options, _func, *args, **kwds)
   return wrapper
 
 
@@ -2441,7 +2606,7 @@
     if property == datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY:
       raise KeyError(property)
 
-    assert property == datastore_types._KEY_SPECIAL_PROPERTY
+    assert property == datastore_types.KEY_SPECIAL_PROPERTY
     return entity.key()
   else:
     return entity[property]
diff --git a/google/appengine/api/datastore_admin.py b/google/appengine/api/datastore_admin.py
index 8dc4ec6..c43da53 100755
--- a/google/appengine/api/datastore_admin.py
+++ b/google/appengine/api/datastore_admin.py
@@ -44,9 +44,6 @@
     list of entity_pb.CompositeIndex
   """
 
-
-
-
   req = api_base_pb.StringProto()
   req.set_value(datastore_types.ResolveAppId(_app))
   resp = datastore_pb.CompositeIndices()
diff --git a/google/appengine/api/datastore_file_stub.py b/google/appengine/api/datastore_file_stub.py
index 4598e1f..a3c1902 100755
--- a/google/appengine/api/datastore_file_stub.py
+++ b/google/appengine/api/datastore_file_stub.py
@@ -47,6 +47,7 @@
 import sys
 import tempfile
 import threading
+import weakref
 
 
 
@@ -56,7 +57,6 @@
 from google.appengine.api import datastore
 from google.appengine.api import datastore_types
 from google.appengine.datastore import datastore_pb
-from google.appengine.datastore import datastore_query
 from google.appengine.datastore import datastore_stub_util
 from google.appengine.runtime import apiproxy_errors
 from google.net.proto import ProtocolBuffer
@@ -333,7 +333,8 @@
     datastore_stub_util.BaseDatastore.__init__(self, require_indexes,
                                                consistency_policy)
     apiproxy_stub.APIProxyStub.__init__(self, service_name)
-    datastore_stub_util.DatastoreStub.__init__(self, self, app_id, trusted)
+    datastore_stub_util.DatastoreStub.__init__(self, weakref.proxy(self),
+                                               app_id, trusted)
 
 
 
@@ -368,7 +369,7 @@
 
 
     self._RegisterPseudoKind(KindPseudoKind())
-    self._RegisterPseudoKind(PropertyPseudoKind(self))
+    self._RegisterPseudoKind(PropertyPseudoKind(weakref.proxy(self)))
     self._RegisterPseudoKind(NamespacePseudoKind())
 
     self.Read()
diff --git a/google/appengine/api/datastore_types.py b/google/appengine/api/datastore_types.py
index 9cced31..57dff8c 100755
--- a/google/appengine/api/datastore_types.py
+++ b/google/appengine/api/datastore_types.py
@@ -91,10 +91,11 @@
 
 
 
-_KEY_SPECIAL_PROPERTY = '__key__'
+KEY_SPECIAL_PROPERTY = '__key__'
+_KEY_SPECIAL_PROPERTY = KEY_SPECIAL_PROPERTY
 _UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY = '__unapplied_log_timestamp_us__'
 _SPECIAL_PROPERTIES = frozenset(
-    [_KEY_SPECIAL_PROPERTY, _UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY])
+    [KEY_SPECIAL_PROPERTY, _UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY])
 
 
 
diff --git a/google/appengine/api/files/crc32c.py b/google/appengine/api/files/crc32c.py
index af7c254..4426849 100644
--- a/google/appengine/api/files/crc32c.py
+++ b/google/appengine/api/files/crc32c.py
@@ -18,7 +18,7 @@
 
 
 
-"""Implementation of CRC-32C checksumming.
+"""Implementation of CRC-32C checksumming as in rfc3720 section B.4.
 
 See http://en.wikipedia.org/wiki/Cyclic_redundancy_check for details on CRC-32C.
 
@@ -101,7 +101,9 @@
 
 
 
-CRC_INIT = 0xffffffffL
+CRC_INIT = 0
+
+_MASK = 0xFFFFFFFFL
 
 
 def crc_update(crc, data):
@@ -120,10 +122,11 @@
   else:
     buf = data
 
+  crc = crc ^ _MASK
   for b in buf:
     table_index = (crc ^ b) & 0xff
-    crc = (CRC_TABLE[table_index] ^ (crc >> 8)) & 0xffffffffL
-  return crc & 0xffffffffL
+    crc = (CRC_TABLE[table_index] ^ (crc >> 8)) & _MASK
+  return crc ^ _MASK
 
 
 def crc_finalize(crc):
@@ -137,7 +140,7 @@
   Returns:
     finalized 32-bit checksum as long
   """
-  return crc ^ 0xffffffffL
+  return crc & _MASK
 
 
 def crc(data):
diff --git a/google/appengine/api/files/file.py b/google/appengine/api/files/file.py
index ef57998..9308560 100755
--- a/google/appengine/api/files/file.py
+++ b/google/appengine/api/files/file.py
@@ -42,10 +42,8 @@
            'UnsupportedContentTypeError',
            'UnsupportedOpenModeError',
            'WrongContentTypeError' ,
-           'WrongKeyOrderError',
            'WrongOpenModeError',
 
-           'ORDERED_KEY_VALUE',
            'RAW',
 
            'delete',
@@ -80,10 +78,6 @@
   """Function argument has invalid value."""
 
 
-class WrongKeyOrderError(Error):
-  """Key order is not ascending."""
-
-
 class FinalizationError(Error):
   """File is in wrong finalization state."""
 
@@ -156,9 +150,6 @@
 RAW = file_service_pb.FileContentType.RAW
 
 
-ORDERED_KEY_VALUE = file_service_pb.FileContentType.ORDERED_KEY_VALUE
-
-
 def _raise_app_error(e):
   """Convert RPC error into api-specific exception."""
   if (e.application_error ==
@@ -168,9 +159,6 @@
         file_service_pb.FileServiceErrors.API_TEMPORARILY_UNAVAILABLE):
     raise ApiTemporaryUnavailableError()
   elif (e.application_error ==
-        file_service_pb.FileServiceErrors.WRONG_KEY_ORDER):
-    raise WrongKeyOrderError()
-  elif (e.application_error ==
         file_service_pb.FileServiceErrors.FINALIZATION_ERROR):
     raise FinalizationError()
   elif (e.application_error ==
@@ -244,65 +232,6 @@
     _raise_app_error(e)
 
 
-
-
-class _ItemsIterator(object):
-  """Iterator over key/value pairs in key/value file."""
-
-  def __init__(self, filename, max_bytes, start_key):
-    """Constructor.
-
-    Args:
-      filename: File name as string.
-      max_bytes: Maximum number of bytes to read, in one batch as integer.
-      start_key: Start key as string.
-    """
-    self._filename = filename
-    self._max_bytes = max_bytes
-    self._start_key = start_key
-
-  def __iter__(self):
-    key = self._start_key
-    while True:
-      request = file_service_pb.ReadKeyValueRequest()
-      response = file_service_pb.ReadKeyValueResponse()
-      request.set_filename(self._filename)
-      request.set_start_key(key)
-      request.set_max_bytes(self._max_bytes)
-      _make_call('ReadKeyValue', request, response)
-
-      if response.truncated_value():
-
-        key = response.data(0).key()
-        value = response.data(0).value()
-        while True:
-          request = file_service_pb.ReadKeyValueRequest()
-          response = file_service_pb.ReadKeyValueResponse()
-          request.set_filename(self._filename)
-          request.set_start_key(key)
-          request.set_max_bytes(self._max_bytes)
-          request.set_value_pos(len(value))
-          _make_call('ReadKeyValue', request, response)
-          value += response.data(0).value()
-          if response.data_size() > 1:
-            for kv in response.data_list():
-              yield (kv.key(), kv.value())
-            break
-          if not response.truncated_value():
-            break
-        yield (key, value)
-      else:
-        if not response.data_size():
-          return
-
-        for kv in response.data_list():
-          yield (kv.key(), kv.value())
-
-      if not response.has_next_key():
-        return
-      key = response.next_key()
-
-
 class _File(object):
   """File object.
 
@@ -315,7 +244,8 @@
 
     Args:
       filename: File's name as string.
-      content_type: File's content type. Either RAW or ORDERED_KEY_VALUE.
+      content_type: File's content type. Value from FileContentType.ContentType
+        enum.
     """
     self._filename = filename
     self._closed = False
@@ -351,8 +281,7 @@
 
     Args:
       data: Data to be written to the file. For RAW files it should be a string
-        or byte sequence. For ORDERED_KEY_VALUE should be a tuple of strings
-        or byte sequences.
+        or byte sequence.
       sequence_key: Sequence key to use for write. Is used for RAW files only.
         File API infrastructure ensures that sequence_key are monotonically
         increasing. If sequence key less than previous one is used, a
@@ -375,18 +304,6 @@
       if sequence_key:
         request.set_sequence_key(sequence_key)
       self._make_rpc_call_with_retry('Append', request, response)
-    elif self._content_type == ORDERED_KEY_VALUE:
-      if not isinstance(data, tuple):
-        raise InvalidArgumentError('Tuple expected. Got: %s' % type(data))
-      if len(data) != 2:
-        raise InvalidArgumentError(
-            'Tuple of length 2 expected. Got: %s' % len(data))
-      request = file_service_pb.AppendKeyValueRequest()
-      response = file_service_pb.AppendKeyValueResponse()
-      request.set_filename(self._filename)
-      request.set_key(data[0])
-      request.set_value(data[1])
-      self._make_rpc_call_with_retry('AppendKeyValue', request, response)
     else:
       raise UnsupportedContentTypeError(
           'Unsupported content type: %s' % self._content_type)
@@ -445,24 +362,6 @@
     if self._mode != 'r':
       raise WrongOpenModeError('File is opened for write.')
 
-
-
-  def _items(self, max_bytes=900000, start_key=''):
-    """Returns iterator over key values in the file.
-
-    Args:
-      max_bytes: Maximum number of bytes to read in single batch as integer.
-      start_key: Starting key to start reading from.
-
-    Returns:
-      Iterator which yields (key, value) pair, where key and value are strings.
-    """
-    if self._content_type != ORDERED_KEY_VALUE:
-      raise UnsupportedContentTypeError(
-          'Unsupported content type: %s' % self._content_type)
-
-    return _ItemsIterator(self._filename, max_bytes, start_key)
-
   def _open(self):
     request = file_service_pb.OpenRequest()
     response = file_service_pb.OpenResponse()
@@ -502,7 +401,8 @@
   Args:
     filename: A name of the file as string.
     mode: File open mode. Either 'a' or 'r'.
-    content_type: File content type. Either RAW or ORDERED_KEY_VALUE.
+    content_type: File's content type. Value from FileContentType.ContentType
+      enum.
     exclusive_lock: If file should be exclusively locked. All other exclusive
       lock attempts will file untile file is correctly closed.
 
@@ -514,7 +414,7 @@
   if not isinstance(filename, basestring):
     raise InvalidArgumentError('Filename should be a string but is %s (%s)' %
                                (filename.__class__, filename))
-  if content_type != RAW and content_type != ORDERED_KEY_VALUE:
+  if content_type != RAW:
     raise InvalidArgumentError('Invalid content type')
 
   f = _File(filename,
@@ -529,13 +429,14 @@
 
   Args:
     filename: File name as string.
-    content_type: File content type. Either RAW or ORDERED_KEY_VALUE.
+    content_type: File's content type. Value from FileContentType.ContentType
+      enum.
   """
   if not filename:
     raise InvalidArgumentError('Filename is empty')
   if not isinstance(filename, basestring):
     raise InvalidArgumentError('Filename should be a string')
-  if content_type != RAW and content_type != ORDERED_KEY_VALUE:
+  if content_type != RAW:
     raise InvalidArgumentError('Invalid content type')
 
   try:
diff --git a/google/appengine/api/files/file_service_pb.py b/google/appengine/api/files/file_service_pb.py
index d77460a..fa36f79 100755
--- a/google/appengine/api/files/file_service_pb.py
+++ b/google/appengine/api/files/file_service_pb.py
@@ -50,7 +50,6 @@
   READ_ONLY    =  103
   EXCLUSIVE_LOCK_FAILED =  104
   SEQUENCE_KEY_OUT_OF_ORDER =  300
-  WRONG_KEY_ORDER =  400
   OUT_OF_BOUNDS =  500
   GLOBS_NOT_SUPPORTED =  600
   FILE_NAME_NOT_SPECIFIED =  701
@@ -84,7 +83,6 @@
     103: "READ_ONLY",
     104: "EXCLUSIVE_LOCK_FAILED",
     300: "SEQUENCE_KEY_OUT_OF_ORDER",
-    400: "WRONG_KEY_ORDER",
     500: "OUT_OF_BOUNDS",
     600: "GLOBS_NOT_SUPPORTED",
     701: "FILE_NAME_NOT_SPECIFIED",
@@ -449,12 +447,12 @@
 
 
   RAW          =    0
-  ORDERED_KEY_VALUE =    2
+  DEPRECATED_1 =    2
   INVALID_TYPE =  127
 
   _ContentType_NAMES = {
     0: "RAW",
-    2: "ORDERED_KEY_VALUE",
+    2: "DEPRECATED_1",
     127: "INVALID_TYPE",
   }
 
@@ -2364,247 +2362,6 @@
 
   _STYLE = """"""
   _STYLE_CONTENT_TYPE = """"""
-class AppendKeyValueRequest(ProtocolBuffer.ProtocolMessage):
-  has_filename_ = 0
-  filename_ = ""
-  has_key_ = 0
-  key_ = ""
-  has_value_ = 0
-  value_ = ""
-
-  def __init__(self, contents=None):
-    if contents is not None: self.MergeFromString(contents)
-
-  def filename(self): return self.filename_
-
-  def set_filename(self, x):
-    self.has_filename_ = 1
-    self.filename_ = x
-
-  def clear_filename(self):
-    if self.has_filename_:
-      self.has_filename_ = 0
-      self.filename_ = ""
-
-  def has_filename(self): return self.has_filename_
-
-  def key(self): return self.key_
-
-  def set_key(self, x):
-    self.has_key_ = 1
-    self.key_ = x
-
-  def clear_key(self):
-    if self.has_key_:
-      self.has_key_ = 0
-      self.key_ = ""
-
-  def has_key(self): return self.has_key_
-
-  def value(self): return self.value_
-
-  def set_value(self, x):
-    self.has_value_ = 1
-    self.value_ = x
-
-  def clear_value(self):
-    if self.has_value_:
-      self.has_value_ = 0
-      self.value_ = ""
-
-  def has_value(self): return self.has_value_
-
-
-  def MergeFrom(self, x):
-    assert x is not self
-    if (x.has_filename()): self.set_filename(x.filename())
-    if (x.has_key()): self.set_key(x.key())
-    if (x.has_value()): self.set_value(x.value())
-
-  def Equals(self, x):
-    if x is self: return 1
-    if self.has_filename_ != x.has_filename_: return 0
-    if self.has_filename_ and self.filename_ != x.filename_: return 0
-    if self.has_key_ != x.has_key_: return 0
-    if self.has_key_ and self.key_ != x.key_: return 0
-    if self.has_value_ != x.has_value_: return 0
-    if self.has_value_ and self.value_ != x.value_: return 0
-    return 1
-
-  def IsInitialized(self, debug_strs=None):
-    initialized = 1
-    if (not self.has_filename_):
-      initialized = 0
-      if debug_strs is not None:
-        debug_strs.append('Required field: filename not set.')
-    if (not self.has_key_):
-      initialized = 0
-      if debug_strs is not None:
-        debug_strs.append('Required field: key not set.')
-    if (not self.has_value_):
-      initialized = 0
-      if debug_strs is not None:
-        debug_strs.append('Required field: value not set.')
-    return initialized
-
-  def ByteSize(self):
-    n = 0
-    n += self.lengthString(len(self.filename_))
-    n += self.lengthString(len(self.key_))
-    n += self.lengthString(len(self.value_))
-    return n + 3
-
-  def ByteSizePartial(self):
-    n = 0
-    if (self.has_filename_):
-      n += 1
-      n += self.lengthString(len(self.filename_))
-    if (self.has_key_):
-      n += 1
-      n += self.lengthString(len(self.key_))
-    if (self.has_value_):
-      n += 1
-      n += self.lengthString(len(self.value_))
-    return n
-
-  def Clear(self):
-    self.clear_filename()
-    self.clear_key()
-    self.clear_value()
-
-  def OutputUnchecked(self, out):
-    out.putVarInt32(10)
-    out.putPrefixedString(self.filename_)
-    out.putVarInt32(18)
-    out.putPrefixedString(self.key_)
-    out.putVarInt32(26)
-    out.putPrefixedString(self.value_)
-
-  def OutputPartial(self, out):
-    if (self.has_filename_):
-      out.putVarInt32(10)
-      out.putPrefixedString(self.filename_)
-    if (self.has_key_):
-      out.putVarInt32(18)
-      out.putPrefixedString(self.key_)
-    if (self.has_value_):
-      out.putVarInt32(26)
-      out.putPrefixedString(self.value_)
-
-  def TryMerge(self, d):
-    while d.avail() > 0:
-      tt = d.getVarInt32()
-      if tt == 10:
-        self.set_filename(d.getPrefixedString())
-        continue
-      if tt == 18:
-        self.set_key(d.getPrefixedString())
-        continue
-      if tt == 26:
-        self.set_value(d.getPrefixedString())
-        continue
-
-
-      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
-      d.skipData(tt)
-
-
-  def __str__(self, prefix="", printElemNumber=0):
-    res=""
-    if self.has_filename_: res+=prefix+("filename: %s\n" % self.DebugFormatString(self.filename_))
-    if self.has_key_: res+=prefix+("key: %s\n" % self.DebugFormatString(self.key_))
-    if self.has_value_: res+=prefix+("value: %s\n" % self.DebugFormatString(self.value_))
-    return res
-
-
-  def _BuildTagLookupTable(sparse, maxtag, default=None):
-    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
-
-  kfilename = 1
-  kkey = 2
-  kvalue = 3
-
-  _TEXT = _BuildTagLookupTable({
-    0: "ErrorCode",
-    1: "filename",
-    2: "key",
-    3: "value",
-  }, 3)
-
-  _TYPES = _BuildTagLookupTable({
-    0: ProtocolBuffer.Encoder.NUMERIC,
-    1: ProtocolBuffer.Encoder.STRING,
-    2: ProtocolBuffer.Encoder.STRING,
-    3: ProtocolBuffer.Encoder.STRING,
-  }, 3, ProtocolBuffer.Encoder.MAX_TYPE)
-
-
-  _STYLE = """"""
-  _STYLE_CONTENT_TYPE = """"""
-class AppendKeyValueResponse(ProtocolBuffer.ProtocolMessage):
-
-  def __init__(self, contents=None):
-    pass
-    if contents is not None: self.MergeFromString(contents)
-
-
-  def MergeFrom(self, x):
-    assert x is not self
-
-  def Equals(self, x):
-    if x is self: return 1
-    return 1
-
-  def IsInitialized(self, debug_strs=None):
-    initialized = 1
-    return initialized
-
-  def ByteSize(self):
-    n = 0
-    return n
-
-  def ByteSizePartial(self):
-    n = 0
-    return n
-
-  def Clear(self):
-    pass
-
-  def OutputUnchecked(self, out):
-    pass
-
-  def OutputPartial(self, out):
-    pass
-
-  def TryMerge(self, d):
-    while d.avail() > 0:
-      tt = d.getVarInt32()
-
-
-      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
-      d.skipData(tt)
-
-
-  def __str__(self, prefix="", printElemNumber=0):
-    res=""
-    return res
-
-
-  def _BuildTagLookupTable(sparse, maxtag, default=None):
-    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
-
-
-  _TEXT = _BuildTagLookupTable({
-    0: "ErrorCode",
-  }, 0)
-
-  _TYPES = _BuildTagLookupTable({
-    0: ProtocolBuffer.Encoder.NUMERIC,
-  }, 0, ProtocolBuffer.Encoder.MAX_TYPE)
-
-
-  _STYLE = """"""
-  _STYLE_CONTENT_TYPE = """"""
 class DeleteRequest(ProtocolBuffer.ProtocolMessage):
   has_filename_ = 0
   filename_ = ""
@@ -4109,4 +3866,4 @@
   _STYLE = """"""
   _STYLE_CONTENT_TYPE = """"""
 
-__all__ = ['FileServiceErrors','KeyValue','KeyValues','FileContentType','CreateRequest_Parameter','CreateRequest','CreateResponse','OpenRequest','OpenResponse','CloseRequest','CloseResponse','FileStat','StatRequest','StatResponse','AppendRequest','AppendResponse','AppendKeyValueRequest','AppendKeyValueResponse','DeleteRequest','DeleteResponse','ReadRequest','ReadResponse','ReadKeyValueRequest','ReadKeyValueResponse_KeyValue','ReadKeyValueResponse','ShuffleRequest','ShuffleResponse','GetShuffleStatusRequest','GetShuffleStatusResponse']
+__all__ = ['FileServiceErrors','KeyValue','KeyValues','FileContentType','CreateRequest_Parameter','CreateRequest','CreateResponse','OpenRequest','OpenResponse','CloseRequest','CloseResponse','FileStat','StatRequest','StatResponse','AppendRequest','AppendResponse','DeleteRequest','DeleteResponse','ReadRequest','ReadResponse','ReadKeyValueRequest','ReadKeyValueResponse_KeyValue','ReadKeyValueResponse','ShuffleRequest','ShuffleResponse','GetShuffleStatusRequest','GetShuffleStatusResponse']
diff --git a/google/appengine/api/files/records.py b/google/appengine/api/files/records.py
index 14f71c0..26d2578 100644
--- a/google/appengine/api/files/records.py
+++ b/google/appengine/api/files/records.py
@@ -32,7 +32,7 @@
 Each block consists of a sequence of records:
    block := record* trailer?
    record :=
-      checksum: uint32  // crc32c of type and data[]
+      checksum: uint32  // masked crc32c of type and data[]
       length: uint16
       type: uint8       // One of FULL, FIRST, MIDDLE, LAST
       data: uint8[length]
@@ -162,6 +162,31 @@
     raise NotImplementedError()
 
 
+_CRC_MASK_DELTA = 0xa282ead8
+
+def _mask_crc(crc):
+  """Mask crc.
+
+  Args:
+    crc: integer crc.
+  Returns:
+    masked integer crc.
+  """
+  return (((crc >> 15) | (crc << 17)) + _CRC_MASK_DELTA) & 0xFFFFFFFFL
+
+
+def _unmask_crc(masked_crc):
+  """Unmask crc.
+
+  Args:
+    masked_crc: masked integer crc.
+  Retruns:
+    orignal crc.
+  """
+  rot = (masked_crc - _CRC_MASK_DELTA) & 0xFFFFFFFFL
+  return ((rot >> 17) | (rot << 15)) & 0xFFFFFFFFL
+
+
 class RecordsWriter(object):
   """A writer for records format.
 
@@ -173,7 +198,7 @@
   RecordsWriter will pad last block with 0 when exiting with statement scope.
   """
 
-  def __init__(self, writer):
+  def __init__(self, writer, _pad_last_block=True):
     """Constructor.
 
     Args:
@@ -182,6 +207,7 @@
     self.__writer = writer
     self.__position = 0
     self.__entered = False
+    self.__pad_last_block = _pad_last_block
 
   def __write_record(self, record_type, data):
     """Write single physical record."""
@@ -191,7 +217,8 @@
     crc = crc32c.crc_update(crc, data)
     crc = crc32c.crc_finalize(crc)
 
-    self.__writer.write(struct.pack(HEADER_FORMAT, crc, length, record_type))
+    self.__writer.write(
+        struct.pack(HEADER_FORMAT, _mask_crc(crc), length, record_type))
     self.__writer.write(data)
     self.__position += HEADER_LENGTH + length
 
@@ -236,9 +263,10 @@
     self.close()
 
   def close(self):
-    pad_length = BLOCK_SIZE - self.__position % BLOCK_SIZE
-    if pad_length and pad_length != BLOCK_SIZE:
-      self.__writer.write('\x00' * pad_length)
+    if self.__pad_last_block:
+      pad_length = BLOCK_SIZE - self.__position % BLOCK_SIZE
+      if pad_length and pad_length != BLOCK_SIZE:
+        self.__writer.write('\x00' * pad_length)
 
 
 class RecordsReader(object):
@@ -265,7 +293,8 @@
       raise EOFError('Read %s bytes instead of %s' %
                      (len(header), HEADER_LENGTH))
 
-    (crc, length, record_type) = struct.unpack(HEADER_FORMAT, header)
+    (masked_crc, length, record_type) = struct.unpack(HEADER_FORMAT, header)
+    crc = _unmask_crc(masked_crc)
 
     if length + HEADER_LENGTH > block_remaining:
 
diff --git a/google/appengine/api/images/__init__.py b/google/appengine/api/images/__init__.py
index 0abe15d..38b0c10 100755
--- a/google/appengine/api/images/__init__.py
+++ b/google/appengine/api/images/__init__.py
@@ -39,7 +39,10 @@
 
 import struct
 
-import simplejson as json
+try:
+  import json
+except:
+  import simplejson as json
 
 from google.appengine.api import apiproxy_stub_map
 from google.appengine.api import datastore_types
diff --git a/google/appengine/api/lib_config.py b/google/appengine/api/lib_config.py
index 9265ce9..feeea0b 100755
--- a/google/appengine/api/lib_config.py
+++ b/google/appengine/api/lib_config.py
@@ -84,11 +84,15 @@
 import logging
 import os
 import sys
+import threading
 
 
 DEFAULT_MODNAME = 'appengine_config'
 
 
+
+
+
 class LibConfigRegistry(object):
   """A registry for library configuration values."""
 
@@ -105,6 +109,7 @@
     self._modname = modname
     self._registrations = {}
     self._module = None
+    self._lock = threading.RLock()
 
   def register(self, prefix, mapping):
     """Register a set of configuration names.
@@ -122,10 +127,14 @@
     """
     if not prefix.endswith('_'):
       prefix += '_'
-    handle = self._registrations.get(prefix)
-    if handle is None:
-      handle = ConfigHandle(prefix, self)
-      self._registrations[prefix] = handle
+    self._lock.acquire()
+    try:
+      handle = self._registrations.get(prefix)
+      if handle is None:
+        handle = ConfigHandle(prefix, self)
+        self._registrations[prefix] = handle
+    finally:
+      self._lock.release()
     handle._update_defaults(mapping)
     return handle
 
@@ -146,33 +155,41 @@
     Args:
       import_func: Used for dependency injection.
     """
-    if (self._module is not None and
-        self._module is sys.modules.get(self._modname)):
-      return
+    self._lock.acquire()
     try:
-      import_func(self._modname)
-    except ImportError, err:
-      if str(err) != 'No module named %s' % self._modname:
+      if (self._module is not None and
+          self._module is sys.modules.get(self._modname)):
+        return
+      try:
+        import_func(self._modname)
+      except ImportError, err:
+        if str(err) != 'No module named %s' % self._modname:
 
-        raise
-      self._module = object()
-      sys.modules[self._modname] = self._module
-    else:
-      self._module = sys.modules[self._modname]
+          raise
+        self._module = object()
+        sys.modules[self._modname] = self._module
+      else:
+        self._module = sys.modules[self._modname]
+    finally:
+      self._lock.release()
 
   def reset(self):
     """Drops the imported config module.
 
     If the config module has not been imported then this is a no-op.
     """
-    if self._module is None:
+    self._lock.acquire()
+    try:
+      if self._module is None:
 
-      return
+        return
 
-    self._module = None
-    for handle in self._registrations.itervalues():
+      self._module = None
+      handles = self._registrations.values()
+    finally:
+      self._lock.release()
+    for handle in handles:
       handle._clear_cache()
-      handle._initialized = False
 
   def _pairs(self, prefix):
     """Generate (key, value) pairs from the config module matching prefix.
@@ -184,26 +201,37 @@
       (key, value) pairs where key is the configuration name with
       prefix removed, and value is the corresponding value.
     """
-    mapping = getattr(self._module, '__dict__', None)
-    if not mapping:
-      return
+    self._lock.acquire()
+    try:
+      mapping = getattr(self._module, '__dict__', None)
+      if not mapping:
+        return
+      items = mapping.items()
+    finally:
+      self._lock.release()
     nskip = len(prefix)
-    for key, value in mapping.iteritems():
+    for key, value in items:
       if key.startswith(prefix):
         yield key[nskip:], value
 
   def _dump(self):
     """Print info about all registrations to stdout."""
     self.initialize()
-    if not hasattr(self._module, '__dict__'):
-      print 'Module %s.py does not exist.' % self._modname
-    elif not self._registrations:
-      print 'No registrations for %s.py.' % self._modname
-    else:
-      print 'Registrations in %s.py:' % self._modname
-      print '-'*40
-      for prefix in sorted(self._registrations):
-        self._registrations[prefix]._dump()
+    handles = []
+    self._lock.acquire()
+    try:
+      if not hasattr(self._module, '__dict__'):
+        print 'Module %s.py does not exist.' % self._modname
+      elif not self._registrations:
+        print 'No registrations for %s.py.' % self._modname
+      else:
+        print 'Registrations in %s.py:' % self._modname
+        print '-'*40
+        handles = self._registrations.items()
+    finally:
+      self._lock.release()
+    for _, handle in sorted(handles):
+      handle._dump()
 
 
 class ConfigHandle(object):
@@ -229,6 +257,7 @@
     self._defaults = {}
     self._overrides = {}
     self._registry = registry
+    self._lock = threading.RLock()
 
   def _update_defaults(self, mapping):
     """Update the default mappings.
@@ -236,12 +265,16 @@
     Args:
       mapping: A dict mapping suffix strings to default values.
     """
-    for key, value in mapping.iteritems():
-      if key.startswith('__') and key.endswith('__'):
-        continue
-      self._defaults[key] = value
-    if self._initialized:
-      self._update_configs()
+    self._lock.acquire()
+    try:
+      for key, value in mapping.iteritems():
+        if key.startswith('__') and key.endswith('__'):
+          continue
+        self._defaults[key] = value
+      if self._initialized:
+        self._update_configs()
+    finally:
+      self._lock.release()
 
   def _update_configs(self):
     """Update the configuration values.
@@ -249,40 +282,53 @@
     This clears the cached values, initializes the registry, and loads
     the configuration values from the config module.
     """
-    if self._initialized:
-      self._clear_cache()
-    self._registry.initialize()
-    for key, value in self._registry._pairs(self._prefix):
-      if key not in self._defaults:
-        logging.warn('Configuration "%s" not recognized', self._prefix + key)
-      else:
-        self._overrides[key] = value
-    self._initialized = True
+    self._lock.acquire()
+    try:
+      if self._initialized:
+        self._clear_cache()
+      self._registry.initialize()
+      for key, value in self._registry._pairs(self._prefix):
+        if key not in self._defaults:
+          logging.warn('Configuration "%s" not recognized', self._prefix + key)
+        else:
+          self._overrides[key] = value
+      self._initialized = True
+    finally:
+      self._lock.release()
 
   def _clear_cache(self):
     """Clear the cached values."""
-    for key in self._defaults:
-      try:
-        delattr(self, key)
-      except AttributeError:
-        pass
+    self._lock.acquire()
+    try:
+      self._initialized = False
+      for key in self._defaults:
+        try:
+          delattr(self, key)
+        except AttributeError:
+          pass
+    finally:
+      self._lock.release()
 
   def _dump(self):
     """Print info about this set of registrations to stdout."""
-    print 'Prefix %s:' % self._prefix
-    if self._overrides:
-      print '  Overrides:'
-      for key in sorted(self._overrides):
-        print '    %s = %r' % (key, self._overrides[key])
-    else:
-      print '  No overrides'
-    if self._defaults:
-      print '  Defaults:'
-      for key in sorted(self._defaults):
-        print '    %s = %r' % (key, self._defaults[key])
-    else:
-      print '  No defaults'
-    print '-'*40
+    self._lock.acquire()
+    try:
+      print 'Prefix %s:' % self._prefix
+      if self._overrides:
+        print '  Overrides:'
+        for key in sorted(self._overrides):
+          print '    %s = %r' % (key, self._overrides[key])
+      else:
+        print '  No overrides'
+      if self._defaults:
+        print '  Defaults:'
+        for key in sorted(self._defaults):
+          print '    %s = %r' % (key, self._defaults[key])
+      else:
+        print '  No defaults'
+      print '-'*40
+    finally:
+      self._lock.release()
 
   def __getattr__(self, suffix):
     """Dynamic attribute access.
@@ -300,17 +346,21 @@
     The value returned taken either from the config module or from the
     registered default.
     """
-    if not self._initialized:
-      self._update_configs()
-    if suffix in self._overrides:
-      value = self._overrides[suffix]
-    elif suffix in self._defaults:
-      value = self._defaults[suffix]
-    else:
-      raise AttributeError(suffix)
+    self._lock.acquire()
+    try:
+      if not self._initialized:
+        self._update_configs()
+      if suffix in self._overrides:
+        value = self._overrides[suffix]
+      elif suffix in self._defaults:
+        value = self._defaults[suffix]
+      else:
+        raise AttributeError(suffix)
 
-    setattr(self, suffix, value)
-    return value
+      setattr(self, suffix, value)
+      return value
+    finally:
+      self._lock.release()
 
 
 
diff --git a/google/appengine/api/logservice/log_service_pb.py b/google/appengine/api/logservice/log_service_pb.py
index 168d688..a5eb9d2 100755
--- a/google/appengine/api/logservice/log_service_pb.py
+++ b/google/appengine/api/logservice/log_service_pb.py
@@ -168,7 +168,7 @@
   _STYLE = """"""
   _STYLE_CONTENT_TYPE = """"""
   _SERIALIZED_DESCRIPTOR = array.array('B')
-  _SERIALIZED_DESCRIPTOR.fromstring(base64.decodestring("WithcHBob3N0aW5nL2FwaS9sb2dzZXJ2aWNlL2xvZ19zZXJ2aWNlLnByb3RvChdhcHBob3N0aW5nLkZsdXNoUmVxdWVzdBMaBGxvZ3MgASgCMAk4ARS6AesCCithcHBob3N0aW5nL2FwaS9sb2dzZXJ2aWNlL2xvZ19zZXJ2aWNlLnByb3RvEgphcHBob3N0aW5nGh1hcHBob3N0aW5nL2FwaS9hcGlfYmFzZS5wcm90byIcCgxGbHVzaFJlcXVlc3QSDAoEbG9ncxgBIAEoDCIiChBTZXRTdGF0dXNSZXF1ZXN0Eg4KBnN0YXR1cxgBIAIoCTKSAQoKTG9nU2VydmljZRI9CgVGbHVzaBIYLmFwcGhvc3RpbmcuRmx1c2hSZXF1ZXN0GhouYXBwaG9zdGluZy5iYXNlLlZvaWRQcm90bxJFCglTZXRTdGF0dXMSHC5hcHBob3N0aW5nLlNldFN0YXR1c1JlcXVlc3QaGi5hcHBob3N0aW5nLmJhc2UuVm9pZFByb3RvQjoKJGNvbS5nb29nbGUuYXBwaG9zdGluZy5hcGkubG9nc2VydmljZRABIAEoAUIMTG9nU2VydmljZVBi"))
+  _SERIALIZED_DESCRIPTOR.fromstring(base64.decodestring("WithcHBob3N0aW5nL2FwaS9sb2dzZXJ2aWNlL2xvZ19zZXJ2aWNlLnByb3RvChdhcHBob3N0aW5nLkZsdXNoUmVxdWVzdBMaBGxvZ3MgASgCMAk4ARS6Ab0MCithcHBob3N0aW5nL2FwaS9sb2dzZXJ2aWNlL2xvZ19zZXJ2aWNlLnByb3RvEgphcHBob3N0aW5nGh1hcHBob3N0aW5nL2FwaS9hcGlfYmFzZS5wcm90byIcCgxGbHVzaFJlcXVlc3QSDAoEbG9ncxgBIAEoDCIiChBTZXRTdGF0dXNSZXF1ZXN0Eg4KBnN0YXR1cxgBIAIoCSIfCglMb2dPZmZzZXQSEgoKcmVxdWVzdF9pZBgBIAEoCSI7CgdMb2dMaW5lEgwKBHRpbWUYASACKAMSDQoFbGV2ZWwYAiACKAUSEwoLbG9nX21lc3NhZ2UYAyACKAki1wUKClJlcXVlc3RMb2cSDgoGYXBwX2lkGAEgAigJEhIKCnZlcnNpb25faWQYAiACKAkSEgoKcmVxdWVzdF9pZBgDIAIoCRIKCgJpcBgEIAIoCRIQCghuaWNrbmFtZRgFIAEoCRISCgpzdGFydF90aW1lGAYgAigDEhAKCGVuZF90aW1lGAcgAigDEg8KB2xhdGVuY3kYCCACKAMSDwoHbWN5Y2xlcxgJIAIoAxIOCgZtZXRob2QYCiACKAkSEAoIcmVzb3VyY2UYCyACKAkSFAoMaHR0cF92ZXJzaW9uGAwgAigJEg4KBnN0YXR1cxgNIAIoBRIVCg1yZXNwb25zZV9zaXplGA4gAigDEhAKCHJlZmVycmVyGA8gASgJEhIKCnVzZXJfYWdlbnQYECABKAkSFQoNdXJsX21hcF9lbnRyeRgRIAIoCRIQCghjb21iaW5lZBgSIAIoCRITCgthcGlfbWN5Y2xlcxgTIAEoAxIMCgRob3N0GBQgASgJEgwKBGNvc3QYFSABKAESFwoPdGFza19xdWV1ZV9uYW1lGBYgASgJEhEKCXRhc2tfbmFtZRgXIAEoCRIbChN3YXNfbG9hZGluZ19yZXF1ZXN0GBggASgIEhQKDHBlbmRpbmdfdGltZRgZIAEoAxIZCg1yZXBsaWNhX2luZGV4GBogASgFOgItMRIWCghmaW5pc2hlZBgbIAEoCDoEdHJ1ZRIRCgljbG9uZV9rZXkYHCABKAwSIQoEbGluZRgdIAMoCzITLmFwcGhvc3RpbmcuTG9nTGluZRITCgtleGl0X3JlYXNvbhgeIAEoBRIeChZ3YXNfdGhyb3R0bGVkX2Zvcl90aW1lGB8gASgIEiIKGndhc190aHJvdHRsZWRfZm9yX3JlcXVlc3RzGCAgASgIEhYKDnRocm90dGxlZF90aW1lGCEgASgDEhMKC3NlcnZlcl9uYW1lGCIgASgMIrgCCg5Mb2dSZWFkUmVxdWVzdBIOCgZhcHBfaWQYASACKAkSEgoKdmVyc2lvbl9pZBgCIAMoCRISCgpzdGFydF90aW1lGAMgASgDEhAKCGVuZF90aW1lGAQgASgDEiUKBm9mZnNldBgFIAEoCzIVLmFwcGhvc3RpbmcuTG9nT2Zmc2V0EhIKCnJlcXVlc3RfaWQYBiADKAkSGQoRbWluaW11bV9sb2dfbGV2ZWwYByABKAUSGgoSaW5jbHVkZV9pbmNvbXBsZXRlGAggASgIEg0KBWNvdW50GAkgASgDEhgKEGluY2x1ZGVfYXBwX2xvZ3MYCiABKAgSFAoMaW5jbHVkZV9ob3N0GAsgASgIEhMKC2luY2x1ZGVfYWxsGAwgASgIEhYKDmNhY2hlX2l0ZXJhdG9yGA0gASgIIl0KD0xvZ1JlYWRSZXNwb25zZRIjCgNsb2cYASADKAsyFi5hcHBob3N0aW5nLlJlcXVlc3RMb2cSJQoGb2Zmc2V0GAIgASgLMhUuYXBwaG9zdGluZy5Mb2dPZmZzZXQykgEKCkxvZ1NlcnZpY2USPQoFRmx1c2gSGC5hcHBob3N0aW5nLkZsdXNoUmVxdWVzdBoaLmFwcGhvc3RpbmcuYmFzZS5Wb2lkUHJvdG8SRQoJU2V0U3RhdHVzEhwuYXBwaG9zdGluZy5TZXRTdGF0dXNSZXF1ZXN0GhouYXBwaG9zdGluZy5iYXNlLlZvaWRQcm90b0I6CiRjb20uZ29vZ2xlLmFwcGhvc3RpbmcuYXBpLmxvZ3NlcnZpY2UQASABKAFCDExvZ1NlcnZpY2VQYg=="))
   if _net_proto___parse__python is not None:
     _net_proto___parse__python.RegisterType(
         _SERIALIZED_DESCRIPTOR.tostring())
@@ -305,6 +305,2456 @@
     _net_proto___parse__python.RegisterType(
         _SERIALIZED_DESCRIPTOR.tostring())
 
+class LogOffset(ProtocolBuffer.ProtocolMessage):
+  has_request_id_ = 0
+  request_id_ = ""
+
+  def __init__(self, contents=None):
+    if contents is not None: self.MergeFromString(contents)
+
+  def request_id(self): return self.request_id_
+
+  def set_request_id(self, x):
+    self.has_request_id_ = 1
+    self.request_id_ = x
+
+  def clear_request_id(self):
+    if self.has_request_id_:
+      self.has_request_id_ = 0
+      self.request_id_ = ""
+
+  def has_request_id(self): return self.has_request_id_
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+    if (x.has_request_id()): self.set_request_id(x.request_id())
+
+  if _net_proto___parse__python is not None:
+    def _CMergeFromString(self, s):
+      _net_proto___parse__python.MergeFromString(self, 'apphosting.LogOffset', s)
+
+  if _net_proto___parse__python is not None:
+    def _CEncode(self):
+      return _net_proto___parse__python.Encode(self, 'apphosting.LogOffset')
+
+  if _net_proto___parse__python is not None:
+    def _CEncodePartial(self):
+      return _net_proto___parse__python.EncodePartial(self, 'apphosting.LogOffset')
+
+  if _net_proto___parse__python is not None:
+    def _CToASCII(self, output_format):
+      return _net_proto___parse__python.ToASCII(self, 'apphosting.LogOffset', output_format)
+
+
+  if _net_proto___parse__python is not None:
+    def ParseASCII(self, s):
+      _net_proto___parse__python.ParseASCII(self, 'apphosting.LogOffset', s)
+
+
+  if _net_proto___parse__python is not None:
+    def ParseASCIIIgnoreUnknown(self, s):
+      _net_proto___parse__python.ParseASCIIIgnoreUnknown(self, 'apphosting.LogOffset', s)
+
+
+  def Equals(self, x):
+    if x is self: return 1
+    if self.has_request_id_ != x.has_request_id_: return 0
+    if self.has_request_id_ and self.request_id_ != x.request_id_: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    if (self.has_request_id_): n += 1 + self.lengthString(len(self.request_id_))
+    return n
+
+  def ByteSizePartial(self):
+    n = 0
+    if (self.has_request_id_): n += 1 + self.lengthString(len(self.request_id_))
+    return n
+
+  def Clear(self):
+    self.clear_request_id()
+
+  def OutputUnchecked(self, out):
+    if (self.has_request_id_):
+      out.putVarInt32(10)
+      out.putPrefixedString(self.request_id_)
+
+  def OutputPartial(self, out):
+    if (self.has_request_id_):
+      out.putVarInt32(10)
+      out.putPrefixedString(self.request_id_)
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 10:
+        self.set_request_id(d.getPrefixedString())
+        continue
+
+
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    if self.has_request_id_: res+=prefix+("request_id: %s\n" % self.DebugFormatString(self.request_id_))
+    return res
+
+
+  def _BuildTagLookupTable(sparse, maxtag, default=None):
+    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
+
+  krequest_id = 1
+
+  _TEXT = _BuildTagLookupTable({
+    0: "ErrorCode",
+    1: "request_id",
+  }, 1)
+
+  _TYPES = _BuildTagLookupTable({
+    0: ProtocolBuffer.Encoder.NUMERIC,
+    1: ProtocolBuffer.Encoder.STRING,
+  }, 1, ProtocolBuffer.Encoder.MAX_TYPE)
+
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+  _SERIALIZED_DESCRIPTOR = array.array('B')
+  _SERIALIZED_DESCRIPTOR.fromstring(base64.decodestring("WithcHBob3N0aW5nL2FwaS9sb2dzZXJ2aWNlL2xvZ19zZXJ2aWNlLnByb3RvChRhcHBob3N0aW5nLkxvZ09mZnNldBMaCnJlcXVlc3RfaWQgASgCMAk4ARTCARdhcHBob3N0aW5nLkZsdXNoUmVxdWVzdA=="))
+  if _net_proto___parse__python is not None:
+    _net_proto___parse__python.RegisterType(
+        _SERIALIZED_DESCRIPTOR.tostring())
+
+class LogLine(ProtocolBuffer.ProtocolMessage):
+  has_time_ = 0
+  time_ = 0
+  has_level_ = 0
+  level_ = 0
+  has_log_message_ = 0
+  log_message_ = ""
+
+  def __init__(self, contents=None):
+    if contents is not None: self.MergeFromString(contents)
+
+  def time(self): return self.time_
+
+  def set_time(self, x):
+    self.has_time_ = 1
+    self.time_ = x
+
+  def clear_time(self):
+    if self.has_time_:
+      self.has_time_ = 0
+      self.time_ = 0
+
+  def has_time(self): return self.has_time_
+
+  def level(self): return self.level_
+
+  def set_level(self, x):
+    self.has_level_ = 1
+    self.level_ = x
+
+  def clear_level(self):
+    if self.has_level_:
+      self.has_level_ = 0
+      self.level_ = 0
+
+  def has_level(self): return self.has_level_
+
+  def log_message(self): return self.log_message_
+
+  def set_log_message(self, x):
+    self.has_log_message_ = 1
+    self.log_message_ = x
+
+  def clear_log_message(self):
+    if self.has_log_message_:
+      self.has_log_message_ = 0
+      self.log_message_ = ""
+
+  def has_log_message(self): return self.has_log_message_
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+    if (x.has_time()): self.set_time(x.time())
+    if (x.has_level()): self.set_level(x.level())
+    if (x.has_log_message()): self.set_log_message(x.log_message())
+
+  if _net_proto___parse__python is not None:
+    def _CMergeFromString(self, s):
+      _net_proto___parse__python.MergeFromString(self, 'apphosting.LogLine', s)
+
+  if _net_proto___parse__python is not None:
+    def _CEncode(self):
+      return _net_proto___parse__python.Encode(self, 'apphosting.LogLine')
+
+  if _net_proto___parse__python is not None:
+    def _CEncodePartial(self):
+      return _net_proto___parse__python.EncodePartial(self, 'apphosting.LogLine')
+
+  if _net_proto___parse__python is not None:
+    def _CToASCII(self, output_format):
+      return _net_proto___parse__python.ToASCII(self, 'apphosting.LogLine', output_format)
+
+
+  if _net_proto___parse__python is not None:
+    def ParseASCII(self, s):
+      _net_proto___parse__python.ParseASCII(self, 'apphosting.LogLine', s)
+
+
+  if _net_proto___parse__python is not None:
+    def ParseASCIIIgnoreUnknown(self, s):
+      _net_proto___parse__python.ParseASCIIIgnoreUnknown(self, 'apphosting.LogLine', s)
+
+
+  def Equals(self, x):
+    if x is self: return 1
+    if self.has_time_ != x.has_time_: return 0
+    if self.has_time_ and self.time_ != x.time_: return 0
+    if self.has_level_ != x.has_level_: return 0
+    if self.has_level_ and self.level_ != x.level_: return 0
+    if self.has_log_message_ != x.has_log_message_: return 0
+    if self.has_log_message_ and self.log_message_ != x.log_message_: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    if (not self.has_time_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: time not set.')
+    if (not self.has_level_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: level not set.')
+    if (not self.has_log_message_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: log_message not set.')
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    n += self.lengthVarInt64(self.time_)
+    n += self.lengthVarInt64(self.level_)
+    n += self.lengthString(len(self.log_message_))
+    return n + 3
+
+  def ByteSizePartial(self):
+    n = 0
+    if (self.has_time_):
+      n += 1
+      n += self.lengthVarInt64(self.time_)
+    if (self.has_level_):
+      n += 1
+      n += self.lengthVarInt64(self.level_)
+    if (self.has_log_message_):
+      n += 1
+      n += self.lengthString(len(self.log_message_))
+    return n
+
+  def Clear(self):
+    self.clear_time()
+    self.clear_level()
+    self.clear_log_message()
+
+  def OutputUnchecked(self, out):
+    out.putVarInt32(8)
+    out.putVarInt64(self.time_)
+    out.putVarInt32(16)
+    out.putVarInt32(self.level_)
+    out.putVarInt32(26)
+    out.putPrefixedString(self.log_message_)
+
+  def OutputPartial(self, out):
+    if (self.has_time_):
+      out.putVarInt32(8)
+      out.putVarInt64(self.time_)
+    if (self.has_level_):
+      out.putVarInt32(16)
+      out.putVarInt32(self.level_)
+    if (self.has_log_message_):
+      out.putVarInt32(26)
+      out.putPrefixedString(self.log_message_)
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 8:
+        self.set_time(d.getVarInt64())
+        continue
+      if tt == 16:
+        self.set_level(d.getVarInt32())
+        continue
+      if tt == 26:
+        self.set_log_message(d.getPrefixedString())
+        continue
+
+
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    if self.has_time_: res+=prefix+("time: %s\n" % self.DebugFormatInt64(self.time_))
+    if self.has_level_: res+=prefix+("level: %s\n" % self.DebugFormatInt32(self.level_))
+    if self.has_log_message_: res+=prefix+("log_message: %s\n" % self.DebugFormatString(self.log_message_))
+    return res
+
+
+  def _BuildTagLookupTable(sparse, maxtag, default=None):
+    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
+
+  ktime = 1
+  klevel = 2
+  klog_message = 3
+
+  _TEXT = _BuildTagLookupTable({
+    0: "ErrorCode",
+    1: "time",
+    2: "level",
+    3: "log_message",
+  }, 3)
+
+  _TYPES = _BuildTagLookupTable({
+    0: ProtocolBuffer.Encoder.NUMERIC,
+    1: ProtocolBuffer.Encoder.NUMERIC,
+    2: ProtocolBuffer.Encoder.NUMERIC,
+    3: ProtocolBuffer.Encoder.STRING,
+  }, 3, ProtocolBuffer.Encoder.MAX_TYPE)
+
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+  _SERIALIZED_DESCRIPTOR = array.array('B')
+  _SERIALIZED_DESCRIPTOR.fromstring(base64.decodestring("WithcHBob3N0aW5nL2FwaS9sb2dzZXJ2aWNlL2xvZ19zZXJ2aWNlLnByb3RvChJhcHBob3N0aW5nLkxvZ0xpbmUTGgR0aW1lIAEoADADOAIUExoFbGV2ZWwgAigAMAU4AhQTGgtsb2dfbWVzc2FnZSADKAIwCTgCFMIBF2FwcGhvc3RpbmcuRmx1c2hSZXF1ZXN0"))
+  if _net_proto___parse__python is not None:
+    _net_proto___parse__python.RegisterType(
+        _SERIALIZED_DESCRIPTOR.tostring())
+
+class RequestLog(ProtocolBuffer.ProtocolMessage):
+  has_app_id_ = 0
+  app_id_ = ""
+  has_version_id_ = 0
+  version_id_ = ""
+  has_request_id_ = 0
+  request_id_ = ""
+  has_ip_ = 0
+  ip_ = ""
+  has_nickname_ = 0
+  nickname_ = ""
+  has_start_time_ = 0
+  start_time_ = 0
+  has_end_time_ = 0
+  end_time_ = 0
+  has_latency_ = 0
+  latency_ = 0
+  has_mcycles_ = 0
+  mcycles_ = 0
+  has_method_ = 0
+  method_ = ""
+  has_resource_ = 0
+  resource_ = ""
+  has_http_version_ = 0
+  http_version_ = ""
+  has_status_ = 0
+  status_ = 0
+  has_response_size_ = 0
+  response_size_ = 0
+  has_referrer_ = 0
+  referrer_ = ""
+  has_user_agent_ = 0
+  user_agent_ = ""
+  has_url_map_entry_ = 0
+  url_map_entry_ = ""
+  has_combined_ = 0
+  combined_ = ""
+  has_api_mcycles_ = 0
+  api_mcycles_ = 0
+  has_host_ = 0
+  host_ = ""
+  has_cost_ = 0
+  cost_ = 0.0
+  has_task_queue_name_ = 0
+  task_queue_name_ = ""
+  has_task_name_ = 0
+  task_name_ = ""
+  has_was_loading_request_ = 0
+  was_loading_request_ = 0
+  has_pending_time_ = 0
+  pending_time_ = 0
+  has_replica_index_ = 0
+  replica_index_ = -1
+  has_finished_ = 0
+  finished_ = 1
+  has_clone_key_ = 0
+  clone_key_ = ""
+  has_exit_reason_ = 0
+  exit_reason_ = 0
+  has_was_throttled_for_time_ = 0
+  was_throttled_for_time_ = 0
+  has_was_throttled_for_requests_ = 0
+  was_throttled_for_requests_ = 0
+  has_throttled_time_ = 0
+  throttled_time_ = 0
+  has_server_name_ = 0
+  server_name_ = ""
+
+  def __init__(self, contents=None):
+    self.line_ = []
+    if contents is not None: self.MergeFromString(contents)
+
+  def app_id(self): return self.app_id_
+
+  def set_app_id(self, x):
+    self.has_app_id_ = 1
+    self.app_id_ = x
+
+  def clear_app_id(self):
+    if self.has_app_id_:
+      self.has_app_id_ = 0
+      self.app_id_ = ""
+
+  def has_app_id(self): return self.has_app_id_
+
+  def version_id(self): return self.version_id_
+
+  def set_version_id(self, x):
+    self.has_version_id_ = 1
+    self.version_id_ = x
+
+  def clear_version_id(self):
+    if self.has_version_id_:
+      self.has_version_id_ = 0
+      self.version_id_ = ""
+
+  def has_version_id(self): return self.has_version_id_
+
+  def request_id(self): return self.request_id_
+
+  def set_request_id(self, x):
+    self.has_request_id_ = 1
+    self.request_id_ = x
+
+  def clear_request_id(self):
+    if self.has_request_id_:
+      self.has_request_id_ = 0
+      self.request_id_ = ""
+
+  def has_request_id(self): return self.has_request_id_
+
+  def ip(self): return self.ip_
+
+  def set_ip(self, x):
+    self.has_ip_ = 1
+    self.ip_ = x
+
+  def clear_ip(self):
+    if self.has_ip_:
+      self.has_ip_ = 0
+      self.ip_ = ""
+
+  def has_ip(self): return self.has_ip_
+
+  def nickname(self): return self.nickname_
+
+  def set_nickname(self, x):
+    self.has_nickname_ = 1
+    self.nickname_ = x
+
+  def clear_nickname(self):
+    if self.has_nickname_:
+      self.has_nickname_ = 0
+      self.nickname_ = ""
+
+  def has_nickname(self): return self.has_nickname_
+
+  def start_time(self): return self.start_time_
+
+  def set_start_time(self, x):
+    self.has_start_time_ = 1
+    self.start_time_ = x
+
+  def clear_start_time(self):
+    if self.has_start_time_:
+      self.has_start_time_ = 0
+      self.start_time_ = 0
+
+  def has_start_time(self): return self.has_start_time_
+
+  def end_time(self): return self.end_time_
+
+  def set_end_time(self, x):
+    self.has_end_time_ = 1
+    self.end_time_ = x
+
+  def clear_end_time(self):
+    if self.has_end_time_:
+      self.has_end_time_ = 0
+      self.end_time_ = 0
+
+  def has_end_time(self): return self.has_end_time_
+
+  def latency(self): return self.latency_
+
+  def set_latency(self, x):
+    self.has_latency_ = 1
+    self.latency_ = x
+
+  def clear_latency(self):
+    if self.has_latency_:
+      self.has_latency_ = 0
+      self.latency_ = 0
+
+  def has_latency(self): return self.has_latency_
+
+  def mcycles(self): return self.mcycles_
+
+  def set_mcycles(self, x):
+    self.has_mcycles_ = 1
+    self.mcycles_ = x
+
+  def clear_mcycles(self):
+    if self.has_mcycles_:
+      self.has_mcycles_ = 0
+      self.mcycles_ = 0
+
+  def has_mcycles(self): return self.has_mcycles_
+
+  def method(self): return self.method_
+
+  def set_method(self, x):
+    self.has_method_ = 1
+    self.method_ = x
+
+  def clear_method(self):
+    if self.has_method_:
+      self.has_method_ = 0
+      self.method_ = ""
+
+  def has_method(self): return self.has_method_
+
+  def resource(self): return self.resource_
+
+  def set_resource(self, x):
+    self.has_resource_ = 1
+    self.resource_ = x
+
+  def clear_resource(self):
+    if self.has_resource_:
+      self.has_resource_ = 0
+      self.resource_ = ""
+
+  def has_resource(self): return self.has_resource_
+
+  def http_version(self): return self.http_version_
+
+  def set_http_version(self, x):
+    self.has_http_version_ = 1
+    self.http_version_ = x
+
+  def clear_http_version(self):
+    if self.has_http_version_:
+      self.has_http_version_ = 0
+      self.http_version_ = ""
+
+  def has_http_version(self): return self.has_http_version_
+
+  def status(self): return self.status_
+
+  def set_status(self, x):
+    self.has_status_ = 1
+    self.status_ = x
+
+  def clear_status(self):
+    if self.has_status_:
+      self.has_status_ = 0
+      self.status_ = 0
+
+  def has_status(self): return self.has_status_
+
+  def response_size(self): return self.response_size_
+
+  def set_response_size(self, x):
+    self.has_response_size_ = 1
+    self.response_size_ = x
+
+  def clear_response_size(self):
+    if self.has_response_size_:
+      self.has_response_size_ = 0
+      self.response_size_ = 0
+
+  def has_response_size(self): return self.has_response_size_
+
+  def referrer(self): return self.referrer_
+
+  def set_referrer(self, x):
+    self.has_referrer_ = 1
+    self.referrer_ = x
+
+  def clear_referrer(self):
+    if self.has_referrer_:
+      self.has_referrer_ = 0
+      self.referrer_ = ""
+
+  def has_referrer(self): return self.has_referrer_
+
+  def user_agent(self): return self.user_agent_
+
+  def set_user_agent(self, x):
+    self.has_user_agent_ = 1
+    self.user_agent_ = x
+
+  def clear_user_agent(self):
+    if self.has_user_agent_:
+      self.has_user_agent_ = 0
+      self.user_agent_ = ""
+
+  def has_user_agent(self): return self.has_user_agent_
+
+  def url_map_entry(self): return self.url_map_entry_
+
+  def set_url_map_entry(self, x):
+    self.has_url_map_entry_ = 1
+    self.url_map_entry_ = x
+
+  def clear_url_map_entry(self):
+    if self.has_url_map_entry_:
+      self.has_url_map_entry_ = 0
+      self.url_map_entry_ = ""
+
+  def has_url_map_entry(self): return self.has_url_map_entry_
+
+  def combined(self): return self.combined_
+
+  def set_combined(self, x):
+    self.has_combined_ = 1
+    self.combined_ = x
+
+  def clear_combined(self):
+    if self.has_combined_:
+      self.has_combined_ = 0
+      self.combined_ = ""
+
+  def has_combined(self): return self.has_combined_
+
+  def api_mcycles(self): return self.api_mcycles_
+
+  def set_api_mcycles(self, x):
+    self.has_api_mcycles_ = 1
+    self.api_mcycles_ = x
+
+  def clear_api_mcycles(self):
+    if self.has_api_mcycles_:
+      self.has_api_mcycles_ = 0
+      self.api_mcycles_ = 0
+
+  def has_api_mcycles(self): return self.has_api_mcycles_
+
+  def host(self): return self.host_
+
+  def set_host(self, x):
+    self.has_host_ = 1
+    self.host_ = x
+
+  def clear_host(self):
+    if self.has_host_:
+      self.has_host_ = 0
+      self.host_ = ""
+
+  def has_host(self): return self.has_host_
+
+  def cost(self): return self.cost_
+
+  def set_cost(self, x):
+    self.has_cost_ = 1
+    self.cost_ = x
+
+  def clear_cost(self):
+    if self.has_cost_:
+      self.has_cost_ = 0
+      self.cost_ = 0.0
+
+  def has_cost(self): return self.has_cost_
+
+  def task_queue_name(self): return self.task_queue_name_
+
+  def set_task_queue_name(self, x):
+    self.has_task_queue_name_ = 1
+    self.task_queue_name_ = x
+
+  def clear_task_queue_name(self):
+    if self.has_task_queue_name_:
+      self.has_task_queue_name_ = 0
+      self.task_queue_name_ = ""
+
+  def has_task_queue_name(self): return self.has_task_queue_name_
+
+  def task_name(self): return self.task_name_
+
+  def set_task_name(self, x):
+    self.has_task_name_ = 1
+    self.task_name_ = x
+
+  def clear_task_name(self):
+    if self.has_task_name_:
+      self.has_task_name_ = 0
+      self.task_name_ = ""
+
+  def has_task_name(self): return self.has_task_name_
+
+  def was_loading_request(self): return self.was_loading_request_
+
+  def set_was_loading_request(self, x):
+    self.has_was_loading_request_ = 1
+    self.was_loading_request_ = x
+
+  def clear_was_loading_request(self):
+    if self.has_was_loading_request_:
+      self.has_was_loading_request_ = 0
+      self.was_loading_request_ = 0
+
+  def has_was_loading_request(self): return self.has_was_loading_request_
+
+  def pending_time(self): return self.pending_time_
+
+  def set_pending_time(self, x):
+    self.has_pending_time_ = 1
+    self.pending_time_ = x
+
+  def clear_pending_time(self):
+    if self.has_pending_time_:
+      self.has_pending_time_ = 0
+      self.pending_time_ = 0
+
+  def has_pending_time(self): return self.has_pending_time_
+
+  def replica_index(self): return self.replica_index_
+
+  def set_replica_index(self, x):
+    self.has_replica_index_ = 1
+    self.replica_index_ = x
+
+  def clear_replica_index(self):
+    if self.has_replica_index_:
+      self.has_replica_index_ = 0
+      self.replica_index_ = -1
+
+  def has_replica_index(self): return self.has_replica_index_
+
+  def finished(self): return self.finished_
+
+  def set_finished(self, x):
+    self.has_finished_ = 1
+    self.finished_ = x
+
+  def clear_finished(self):
+    if self.has_finished_:
+      self.has_finished_ = 0
+      self.finished_ = 1
+
+  def has_finished(self): return self.has_finished_
+
+  def clone_key(self): return self.clone_key_
+
+  def set_clone_key(self, x):
+    self.has_clone_key_ = 1
+    self.clone_key_ = x
+
+  def clear_clone_key(self):
+    if self.has_clone_key_:
+      self.has_clone_key_ = 0
+      self.clone_key_ = ""
+
+  def has_clone_key(self): return self.has_clone_key_
+
+  def line_size(self): return len(self.line_)
+  def line_list(self): return self.line_
+
+  def line(self, i):
+    return self.line_[i]
+
+  def mutable_line(self, i):
+    return self.line_[i]
+
+  def add_line(self):
+    x = LogLine()
+    self.line_.append(x)
+    return x
+
+  def clear_line(self):
+    self.line_ = []
+  def exit_reason(self): return self.exit_reason_
+
+  def set_exit_reason(self, x):
+    self.has_exit_reason_ = 1
+    self.exit_reason_ = x
+
+  def clear_exit_reason(self):
+    if self.has_exit_reason_:
+      self.has_exit_reason_ = 0
+      self.exit_reason_ = 0
+
+  def has_exit_reason(self): return self.has_exit_reason_
+
+  def was_throttled_for_time(self): return self.was_throttled_for_time_
+
+  def set_was_throttled_for_time(self, x):
+    self.has_was_throttled_for_time_ = 1
+    self.was_throttled_for_time_ = x
+
+  def clear_was_throttled_for_time(self):
+    if self.has_was_throttled_for_time_:
+      self.has_was_throttled_for_time_ = 0
+      self.was_throttled_for_time_ = 0
+
+  def has_was_throttled_for_time(self): return self.has_was_throttled_for_time_
+
+  def was_throttled_for_requests(self): return self.was_throttled_for_requests_
+
+  def set_was_throttled_for_requests(self, x):
+    self.has_was_throttled_for_requests_ = 1
+    self.was_throttled_for_requests_ = x
+
+  def clear_was_throttled_for_requests(self):
+    if self.has_was_throttled_for_requests_:
+      self.has_was_throttled_for_requests_ = 0
+      self.was_throttled_for_requests_ = 0
+
+  def has_was_throttled_for_requests(self): return self.has_was_throttled_for_requests_
+
+  def throttled_time(self): return self.throttled_time_
+
+  def set_throttled_time(self, x):
+    self.has_throttled_time_ = 1
+    self.throttled_time_ = x
+
+  def clear_throttled_time(self):
+    if self.has_throttled_time_:
+      self.has_throttled_time_ = 0
+      self.throttled_time_ = 0
+
+  def has_throttled_time(self): return self.has_throttled_time_
+
+  def server_name(self): return self.server_name_
+
+  def set_server_name(self, x):
+    self.has_server_name_ = 1
+    self.server_name_ = x
+
+  def clear_server_name(self):
+    if self.has_server_name_:
+      self.has_server_name_ = 0
+      self.server_name_ = ""
+
+  def has_server_name(self): return self.has_server_name_
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+    if (x.has_app_id()): self.set_app_id(x.app_id())
+    if (x.has_version_id()): self.set_version_id(x.version_id())
+    if (x.has_request_id()): self.set_request_id(x.request_id())
+    if (x.has_ip()): self.set_ip(x.ip())
+    if (x.has_nickname()): self.set_nickname(x.nickname())
+    if (x.has_start_time()): self.set_start_time(x.start_time())
+    if (x.has_end_time()): self.set_end_time(x.end_time())
+    if (x.has_latency()): self.set_latency(x.latency())
+    if (x.has_mcycles()): self.set_mcycles(x.mcycles())
+    if (x.has_method()): self.set_method(x.method())
+    if (x.has_resource()): self.set_resource(x.resource())
+    if (x.has_http_version()): self.set_http_version(x.http_version())
+    if (x.has_status()): self.set_status(x.status())
+    if (x.has_response_size()): self.set_response_size(x.response_size())
+    if (x.has_referrer()): self.set_referrer(x.referrer())
+    if (x.has_user_agent()): self.set_user_agent(x.user_agent())
+    if (x.has_url_map_entry()): self.set_url_map_entry(x.url_map_entry())
+    if (x.has_combined()): self.set_combined(x.combined())
+    if (x.has_api_mcycles()): self.set_api_mcycles(x.api_mcycles())
+    if (x.has_host()): self.set_host(x.host())
+    if (x.has_cost()): self.set_cost(x.cost())
+    if (x.has_task_queue_name()): self.set_task_queue_name(x.task_queue_name())
+    if (x.has_task_name()): self.set_task_name(x.task_name())
+    if (x.has_was_loading_request()): self.set_was_loading_request(x.was_loading_request())
+    if (x.has_pending_time()): self.set_pending_time(x.pending_time())
+    if (x.has_replica_index()): self.set_replica_index(x.replica_index())
+    if (x.has_finished()): self.set_finished(x.finished())
+    if (x.has_clone_key()): self.set_clone_key(x.clone_key())
+    for i in xrange(x.line_size()): self.add_line().CopyFrom(x.line(i))
+    if (x.has_exit_reason()): self.set_exit_reason(x.exit_reason())
+    if (x.has_was_throttled_for_time()): self.set_was_throttled_for_time(x.was_throttled_for_time())
+    if (x.has_was_throttled_for_requests()): self.set_was_throttled_for_requests(x.was_throttled_for_requests())
+    if (x.has_throttled_time()): self.set_throttled_time(x.throttled_time())
+    if (x.has_server_name()): self.set_server_name(x.server_name())
+
+  if _net_proto___parse__python is not None:
+    def _CMergeFromString(self, s):
+      _net_proto___parse__python.MergeFromString(self, 'apphosting.RequestLog', s)
+
+  if _net_proto___parse__python is not None:
+    def _CEncode(self):
+      return _net_proto___parse__python.Encode(self, 'apphosting.RequestLog')
+
+  if _net_proto___parse__python is not None:
+    def _CEncodePartial(self):
+      return _net_proto___parse__python.EncodePartial(self, 'apphosting.RequestLog')
+
+  if _net_proto___parse__python is not None:
+    def _CToASCII(self, output_format):
+      return _net_proto___parse__python.ToASCII(self, 'apphosting.RequestLog', output_format)
+
+
+  if _net_proto___parse__python is not None:
+    def ParseASCII(self, s):
+      _net_proto___parse__python.ParseASCII(self, 'apphosting.RequestLog', s)
+
+
+  if _net_proto___parse__python is not None:
+    def ParseASCIIIgnoreUnknown(self, s):
+      _net_proto___parse__python.ParseASCIIIgnoreUnknown(self, 'apphosting.RequestLog', s)
+
+
+  def Equals(self, x):
+    if x is self: return 1
+    if self.has_app_id_ != x.has_app_id_: return 0
+    if self.has_app_id_ and self.app_id_ != x.app_id_: return 0
+    if self.has_version_id_ != x.has_version_id_: return 0
+    if self.has_version_id_ and self.version_id_ != x.version_id_: return 0
+    if self.has_request_id_ != x.has_request_id_: return 0
+    if self.has_request_id_ and self.request_id_ != x.request_id_: return 0
+    if self.has_ip_ != x.has_ip_: return 0
+    if self.has_ip_ and self.ip_ != x.ip_: return 0
+    if self.has_nickname_ != x.has_nickname_: return 0
+    if self.has_nickname_ and self.nickname_ != x.nickname_: return 0
+    if self.has_start_time_ != x.has_start_time_: return 0
+    if self.has_start_time_ and self.start_time_ != x.start_time_: return 0
+    if self.has_end_time_ != x.has_end_time_: return 0
+    if self.has_end_time_ and self.end_time_ != x.end_time_: return 0
+    if self.has_latency_ != x.has_latency_: return 0
+    if self.has_latency_ and self.latency_ != x.latency_: return 0
+    if self.has_mcycles_ != x.has_mcycles_: return 0
+    if self.has_mcycles_ and self.mcycles_ != x.mcycles_: return 0
+    if self.has_method_ != x.has_method_: return 0
+    if self.has_method_ and self.method_ != x.method_: return 0
+    if self.has_resource_ != x.has_resource_: return 0
+    if self.has_resource_ and self.resource_ != x.resource_: return 0
+    if self.has_http_version_ != x.has_http_version_: return 0
+    if self.has_http_version_ and self.http_version_ != x.http_version_: return 0
+    if self.has_status_ != x.has_status_: return 0
+    if self.has_status_ and self.status_ != x.status_: return 0
+    if self.has_response_size_ != x.has_response_size_: return 0
+    if self.has_response_size_ and self.response_size_ != x.response_size_: return 0
+    if self.has_referrer_ != x.has_referrer_: return 0
+    if self.has_referrer_ and self.referrer_ != x.referrer_: return 0
+    if self.has_user_agent_ != x.has_user_agent_: return 0
+    if self.has_user_agent_ and self.user_agent_ != x.user_agent_: return 0
+    if self.has_url_map_entry_ != x.has_url_map_entry_: return 0
+    if self.has_url_map_entry_ and self.url_map_entry_ != x.url_map_entry_: return 0
+    if self.has_combined_ != x.has_combined_: return 0
+    if self.has_combined_ and self.combined_ != x.combined_: return 0
+    if self.has_api_mcycles_ != x.has_api_mcycles_: return 0
+    if self.has_api_mcycles_ and self.api_mcycles_ != x.api_mcycles_: return 0
+    if self.has_host_ != x.has_host_: return 0
+    if self.has_host_ and self.host_ != x.host_: return 0
+    if self.has_cost_ != x.has_cost_: return 0
+    if self.has_cost_ and self.cost_ != x.cost_: return 0
+    if self.has_task_queue_name_ != x.has_task_queue_name_: return 0
+    if self.has_task_queue_name_ and self.task_queue_name_ != x.task_queue_name_: return 0
+    if self.has_task_name_ != x.has_task_name_: return 0
+    if self.has_task_name_ and self.task_name_ != x.task_name_: return 0
+    if self.has_was_loading_request_ != x.has_was_loading_request_: return 0
+    if self.has_was_loading_request_ and self.was_loading_request_ != x.was_loading_request_: return 0
+    if self.has_pending_time_ != x.has_pending_time_: return 0
+    if self.has_pending_time_ and self.pending_time_ != x.pending_time_: return 0
+    if self.has_replica_index_ != x.has_replica_index_: return 0
+    if self.has_replica_index_ and self.replica_index_ != x.replica_index_: return 0
+    if self.has_finished_ != x.has_finished_: return 0
+    if self.has_finished_ and self.finished_ != x.finished_: return 0
+    if self.has_clone_key_ != x.has_clone_key_: return 0
+    if self.has_clone_key_ and self.clone_key_ != x.clone_key_: return 0
+    if len(self.line_) != len(x.line_): return 0
+    for e1, e2 in zip(self.line_, x.line_):
+      if e1 != e2: return 0
+    if self.has_exit_reason_ != x.has_exit_reason_: return 0
+    if self.has_exit_reason_ and self.exit_reason_ != x.exit_reason_: return 0
+    if self.has_was_throttled_for_time_ != x.has_was_throttled_for_time_: return 0
+    if self.has_was_throttled_for_time_ and self.was_throttled_for_time_ != x.was_throttled_for_time_: return 0
+    if self.has_was_throttled_for_requests_ != x.has_was_throttled_for_requests_: return 0
+    if self.has_was_throttled_for_requests_ and self.was_throttled_for_requests_ != x.was_throttled_for_requests_: return 0
+    if self.has_throttled_time_ != x.has_throttled_time_: return 0
+    if self.has_throttled_time_ and self.throttled_time_ != x.throttled_time_: return 0
+    if self.has_server_name_ != x.has_server_name_: return 0
+    if self.has_server_name_ and self.server_name_ != x.server_name_: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    if (not self.has_app_id_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: app_id not set.')
+    if (not self.has_version_id_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: version_id not set.')
+    if (not self.has_request_id_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: request_id not set.')
+    if (not self.has_ip_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: ip not set.')
+    if (not self.has_start_time_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: start_time not set.')
+    if (not self.has_end_time_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: end_time not set.')
+    if (not self.has_latency_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: latency not set.')
+    if (not self.has_mcycles_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: mcycles not set.')
+    if (not self.has_method_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: method not set.')
+    if (not self.has_resource_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: resource not set.')
+    if (not self.has_http_version_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: http_version not set.')
+    if (not self.has_status_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: status not set.')
+    if (not self.has_response_size_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: response_size not set.')
+    if (not self.has_url_map_entry_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: url_map_entry not set.')
+    if (not self.has_combined_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: combined not set.')
+    for p in self.line_:
+      if not p.IsInitialized(debug_strs): initialized=0
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    n += self.lengthString(len(self.app_id_))
+    n += self.lengthString(len(self.version_id_))
+    n += self.lengthString(len(self.request_id_))
+    n += self.lengthString(len(self.ip_))
+    if (self.has_nickname_): n += 1 + self.lengthString(len(self.nickname_))
+    n += self.lengthVarInt64(self.start_time_)
+    n += self.lengthVarInt64(self.end_time_)
+    n += self.lengthVarInt64(self.latency_)
+    n += self.lengthVarInt64(self.mcycles_)
+    n += self.lengthString(len(self.method_))
+    n += self.lengthString(len(self.resource_))
+    n += self.lengthString(len(self.http_version_))
+    n += self.lengthVarInt64(self.status_)
+    n += self.lengthVarInt64(self.response_size_)
+    if (self.has_referrer_): n += 1 + self.lengthString(len(self.referrer_))
+    if (self.has_user_agent_): n += 2 + self.lengthString(len(self.user_agent_))
+    n += self.lengthString(len(self.url_map_entry_))
+    n += self.lengthString(len(self.combined_))
+    if (self.has_api_mcycles_): n += 2 + self.lengthVarInt64(self.api_mcycles_)
+    if (self.has_host_): n += 2 + self.lengthString(len(self.host_))
+    if (self.has_cost_): n += 10
+    if (self.has_task_queue_name_): n += 2 + self.lengthString(len(self.task_queue_name_))
+    if (self.has_task_name_): n += 2 + self.lengthString(len(self.task_name_))
+    if (self.has_was_loading_request_): n += 3
+    if (self.has_pending_time_): n += 2 + self.lengthVarInt64(self.pending_time_)
+    if (self.has_replica_index_): n += 2 + self.lengthVarInt64(self.replica_index_)
+    if (self.has_finished_): n += 3
+    if (self.has_clone_key_): n += 2 + self.lengthString(len(self.clone_key_))
+    n += 2 * len(self.line_)
+    for i in xrange(len(self.line_)): n += self.lengthString(self.line_[i].ByteSize())
+    if (self.has_exit_reason_): n += 2 + self.lengthVarInt64(self.exit_reason_)
+    if (self.has_was_throttled_for_time_): n += 3
+    if (self.has_was_throttled_for_requests_): n += 3
+    if (self.has_throttled_time_): n += 2 + self.lengthVarInt64(self.throttled_time_)
+    if (self.has_server_name_): n += 2 + self.lengthString(len(self.server_name_))
+    return n + 17
+
+  def ByteSizePartial(self):
+    n = 0
+    if (self.has_app_id_):
+      n += 1
+      n += self.lengthString(len(self.app_id_))
+    if (self.has_version_id_):
+      n += 1
+      n += self.lengthString(len(self.version_id_))
+    if (self.has_request_id_):
+      n += 1
+      n += self.lengthString(len(self.request_id_))
+    if (self.has_ip_):
+      n += 1
+      n += self.lengthString(len(self.ip_))
+    if (self.has_nickname_): n += 1 + self.lengthString(len(self.nickname_))
+    if (self.has_start_time_):
+      n += 1
+      n += self.lengthVarInt64(self.start_time_)
+    if (self.has_end_time_):
+      n += 1
+      n += self.lengthVarInt64(self.end_time_)
+    if (self.has_latency_):
+      n += 1
+      n += self.lengthVarInt64(self.latency_)
+    if (self.has_mcycles_):
+      n += 1
+      n += self.lengthVarInt64(self.mcycles_)
+    if (self.has_method_):
+      n += 1
+      n += self.lengthString(len(self.method_))
+    if (self.has_resource_):
+      n += 1
+      n += self.lengthString(len(self.resource_))
+    if (self.has_http_version_):
+      n += 1
+      n += self.lengthString(len(self.http_version_))
+    if (self.has_status_):
+      n += 1
+      n += self.lengthVarInt64(self.status_)
+    if (self.has_response_size_):
+      n += 1
+      n += self.lengthVarInt64(self.response_size_)
+    if (self.has_referrer_): n += 1 + self.lengthString(len(self.referrer_))
+    if (self.has_user_agent_): n += 2 + self.lengthString(len(self.user_agent_))
+    if (self.has_url_map_entry_):
+      n += 2
+      n += self.lengthString(len(self.url_map_entry_))
+    if (self.has_combined_):
+      n += 2
+      n += self.lengthString(len(self.combined_))
+    if (self.has_api_mcycles_): n += 2 + self.lengthVarInt64(self.api_mcycles_)
+    if (self.has_host_): n += 2 + self.lengthString(len(self.host_))
+    if (self.has_cost_): n += 10
+    if (self.has_task_queue_name_): n += 2 + self.lengthString(len(self.task_queue_name_))
+    if (self.has_task_name_): n += 2 + self.lengthString(len(self.task_name_))
+    if (self.has_was_loading_request_): n += 3
+    if (self.has_pending_time_): n += 2 + self.lengthVarInt64(self.pending_time_)
+    if (self.has_replica_index_): n += 2 + self.lengthVarInt64(self.replica_index_)
+    if (self.has_finished_): n += 3
+    if (self.has_clone_key_): n += 2 + self.lengthString(len(self.clone_key_))
+    n += 2 * len(self.line_)
+    for i in xrange(len(self.line_)): n += self.lengthString(self.line_[i].ByteSizePartial())
+    if (self.has_exit_reason_): n += 2 + self.lengthVarInt64(self.exit_reason_)
+    if (self.has_was_throttled_for_time_): n += 3
+    if (self.has_was_throttled_for_requests_): n += 3
+    if (self.has_throttled_time_): n += 2 + self.lengthVarInt64(self.throttled_time_)
+    if (self.has_server_name_): n += 2 + self.lengthString(len(self.server_name_))
+    return n
+
+  def Clear(self):
+    self.clear_app_id()
+    self.clear_version_id()
+    self.clear_request_id()
+    self.clear_ip()
+    self.clear_nickname()
+    self.clear_start_time()
+    self.clear_end_time()
+    self.clear_latency()
+    self.clear_mcycles()
+    self.clear_method()
+    self.clear_resource()
+    self.clear_http_version()
+    self.clear_status()
+    self.clear_response_size()
+    self.clear_referrer()
+    self.clear_user_agent()
+    self.clear_url_map_entry()
+    self.clear_combined()
+    self.clear_api_mcycles()
+    self.clear_host()
+    self.clear_cost()
+    self.clear_task_queue_name()
+    self.clear_task_name()
+    self.clear_was_loading_request()
+    self.clear_pending_time()
+    self.clear_replica_index()
+    self.clear_finished()
+    self.clear_clone_key()
+    self.clear_line()
+    self.clear_exit_reason()
+    self.clear_was_throttled_for_time()
+    self.clear_was_throttled_for_requests()
+    self.clear_throttled_time()
+    self.clear_server_name()
+
+  def OutputUnchecked(self, out):
+    out.putVarInt32(10)
+    out.putPrefixedString(self.app_id_)
+    out.putVarInt32(18)
+    out.putPrefixedString(self.version_id_)
+    out.putVarInt32(26)
+    out.putPrefixedString(self.request_id_)
+    out.putVarInt32(34)
+    out.putPrefixedString(self.ip_)
+    if (self.has_nickname_):
+      out.putVarInt32(42)
+      out.putPrefixedString(self.nickname_)
+    out.putVarInt32(48)
+    out.putVarInt64(self.start_time_)
+    out.putVarInt32(56)
+    out.putVarInt64(self.end_time_)
+    out.putVarInt32(64)
+    out.putVarInt64(self.latency_)
+    out.putVarInt32(72)
+    out.putVarInt64(self.mcycles_)
+    out.putVarInt32(82)
+    out.putPrefixedString(self.method_)
+    out.putVarInt32(90)
+    out.putPrefixedString(self.resource_)
+    out.putVarInt32(98)
+    out.putPrefixedString(self.http_version_)
+    out.putVarInt32(104)
+    out.putVarInt32(self.status_)
+    out.putVarInt32(112)
+    out.putVarInt64(self.response_size_)
+    if (self.has_referrer_):
+      out.putVarInt32(122)
+      out.putPrefixedString(self.referrer_)
+    if (self.has_user_agent_):
+      out.putVarInt32(130)
+      out.putPrefixedString(self.user_agent_)
+    out.putVarInt32(138)
+    out.putPrefixedString(self.url_map_entry_)
+    out.putVarInt32(146)
+    out.putPrefixedString(self.combined_)
+    if (self.has_api_mcycles_):
+      out.putVarInt32(152)
+      out.putVarInt64(self.api_mcycles_)
+    if (self.has_host_):
+      out.putVarInt32(162)
+      out.putPrefixedString(self.host_)
+    if (self.has_cost_):
+      out.putVarInt32(169)
+      out.putDouble(self.cost_)
+    if (self.has_task_queue_name_):
+      out.putVarInt32(178)
+      out.putPrefixedString(self.task_queue_name_)
+    if (self.has_task_name_):
+      out.putVarInt32(186)
+      out.putPrefixedString(self.task_name_)
+    if (self.has_was_loading_request_):
+      out.putVarInt32(192)
+      out.putBoolean(self.was_loading_request_)
+    if (self.has_pending_time_):
+      out.putVarInt32(200)
+      out.putVarInt64(self.pending_time_)
+    if (self.has_replica_index_):
+      out.putVarInt32(208)
+      out.putVarInt32(self.replica_index_)
+    if (self.has_finished_):
+      out.putVarInt32(216)
+      out.putBoolean(self.finished_)
+    if (self.has_clone_key_):
+      out.putVarInt32(226)
+      out.putPrefixedString(self.clone_key_)
+    for i in xrange(len(self.line_)):
+      out.putVarInt32(234)
+      out.putVarInt32(self.line_[i].ByteSize())
+      self.line_[i].OutputUnchecked(out)
+    if (self.has_exit_reason_):
+      out.putVarInt32(240)
+      out.putVarInt32(self.exit_reason_)
+    if (self.has_was_throttled_for_time_):
+      out.putVarInt32(248)
+      out.putBoolean(self.was_throttled_for_time_)
+    if (self.has_was_throttled_for_requests_):
+      out.putVarInt32(256)
+      out.putBoolean(self.was_throttled_for_requests_)
+    if (self.has_throttled_time_):
+      out.putVarInt32(264)
+      out.putVarInt64(self.throttled_time_)
+    if (self.has_server_name_):
+      out.putVarInt32(274)
+      out.putPrefixedString(self.server_name_)
+
+  def OutputPartial(self, out):
+    if (self.has_app_id_):
+      out.putVarInt32(10)
+      out.putPrefixedString(self.app_id_)
+    if (self.has_version_id_):
+      out.putVarInt32(18)
+      out.putPrefixedString(self.version_id_)
+    if (self.has_request_id_):
+      out.putVarInt32(26)
+      out.putPrefixedString(self.request_id_)
+    if (self.has_ip_):
+      out.putVarInt32(34)
+      out.putPrefixedString(self.ip_)
+    if (self.has_nickname_):
+      out.putVarInt32(42)
+      out.putPrefixedString(self.nickname_)
+    if (self.has_start_time_):
+      out.putVarInt32(48)
+      out.putVarInt64(self.start_time_)
+    if (self.has_end_time_):
+      out.putVarInt32(56)
+      out.putVarInt64(self.end_time_)
+    if (self.has_latency_):
+      out.putVarInt32(64)
+      out.putVarInt64(self.latency_)
+    if (self.has_mcycles_):
+      out.putVarInt32(72)
+      out.putVarInt64(self.mcycles_)
+    if (self.has_method_):
+      out.putVarInt32(82)
+      out.putPrefixedString(self.method_)
+    if (self.has_resource_):
+      out.putVarInt32(90)
+      out.putPrefixedString(self.resource_)
+    if (self.has_http_version_):
+      out.putVarInt32(98)
+      out.putPrefixedString(self.http_version_)
+    if (self.has_status_):
+      out.putVarInt32(104)
+      out.putVarInt32(self.status_)
+    if (self.has_response_size_):
+      out.putVarInt32(112)
+      out.putVarInt64(self.response_size_)
+    if (self.has_referrer_):
+      out.putVarInt32(122)
+      out.putPrefixedString(self.referrer_)
+    if (self.has_user_agent_):
+      out.putVarInt32(130)
+      out.putPrefixedString(self.user_agent_)
+    if (self.has_url_map_entry_):
+      out.putVarInt32(138)
+      out.putPrefixedString(self.url_map_entry_)
+    if (self.has_combined_):
+      out.putVarInt32(146)
+      out.putPrefixedString(self.combined_)
+    if (self.has_api_mcycles_):
+      out.putVarInt32(152)
+      out.putVarInt64(self.api_mcycles_)
+    if (self.has_host_):
+      out.putVarInt32(162)
+      out.putPrefixedString(self.host_)
+    if (self.has_cost_):
+      out.putVarInt32(169)
+      out.putDouble(self.cost_)
+    if (self.has_task_queue_name_):
+      out.putVarInt32(178)
+      out.putPrefixedString(self.task_queue_name_)
+    if (self.has_task_name_):
+      out.putVarInt32(186)
+      out.putPrefixedString(self.task_name_)
+    if (self.has_was_loading_request_):
+      out.putVarInt32(192)
+      out.putBoolean(self.was_loading_request_)
+    if (self.has_pending_time_):
+      out.putVarInt32(200)
+      out.putVarInt64(self.pending_time_)
+    if (self.has_replica_index_):
+      out.putVarInt32(208)
+      out.putVarInt32(self.replica_index_)
+    if (self.has_finished_):
+      out.putVarInt32(216)
+      out.putBoolean(self.finished_)
+    if (self.has_clone_key_):
+      out.putVarInt32(226)
+      out.putPrefixedString(self.clone_key_)
+    for i in xrange(len(self.line_)):
+      out.putVarInt32(234)
+      out.putVarInt32(self.line_[i].ByteSizePartial())
+      self.line_[i].OutputPartial(out)
+    if (self.has_exit_reason_):
+      out.putVarInt32(240)
+      out.putVarInt32(self.exit_reason_)
+    if (self.has_was_throttled_for_time_):
+      out.putVarInt32(248)
+      out.putBoolean(self.was_throttled_for_time_)
+    if (self.has_was_throttled_for_requests_):
+      out.putVarInt32(256)
+      out.putBoolean(self.was_throttled_for_requests_)
+    if (self.has_throttled_time_):
+      out.putVarInt32(264)
+      out.putVarInt64(self.throttled_time_)
+    if (self.has_server_name_):
+      out.putVarInt32(274)
+      out.putPrefixedString(self.server_name_)
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 10:
+        self.set_app_id(d.getPrefixedString())
+        continue
+      if tt == 18:
+        self.set_version_id(d.getPrefixedString())
+        continue
+      if tt == 26:
+        self.set_request_id(d.getPrefixedString())
+        continue
+      if tt == 34:
+        self.set_ip(d.getPrefixedString())
+        continue
+      if tt == 42:
+        self.set_nickname(d.getPrefixedString())
+        continue
+      if tt == 48:
+        self.set_start_time(d.getVarInt64())
+        continue
+      if tt == 56:
+        self.set_end_time(d.getVarInt64())
+        continue
+      if tt == 64:
+        self.set_latency(d.getVarInt64())
+        continue
+      if tt == 72:
+        self.set_mcycles(d.getVarInt64())
+        continue
+      if tt == 82:
+        self.set_method(d.getPrefixedString())
+        continue
+      if tt == 90:
+        self.set_resource(d.getPrefixedString())
+        continue
+      if tt == 98:
+        self.set_http_version(d.getPrefixedString())
+        continue
+      if tt == 104:
+        self.set_status(d.getVarInt32())
+        continue
+      if tt == 112:
+        self.set_response_size(d.getVarInt64())
+        continue
+      if tt == 122:
+        self.set_referrer(d.getPrefixedString())
+        continue
+      if tt == 130:
+        self.set_user_agent(d.getPrefixedString())
+        continue
+      if tt == 138:
+        self.set_url_map_entry(d.getPrefixedString())
+        continue
+      if tt == 146:
+        self.set_combined(d.getPrefixedString())
+        continue
+      if tt == 152:
+        self.set_api_mcycles(d.getVarInt64())
+        continue
+      if tt == 162:
+        self.set_host(d.getPrefixedString())
+        continue
+      if tt == 169:
+        self.set_cost(d.getDouble())
+        continue
+      if tt == 178:
+        self.set_task_queue_name(d.getPrefixedString())
+        continue
+      if tt == 186:
+        self.set_task_name(d.getPrefixedString())
+        continue
+      if tt == 192:
+        self.set_was_loading_request(d.getBoolean())
+        continue
+      if tt == 200:
+        self.set_pending_time(d.getVarInt64())
+        continue
+      if tt == 208:
+        self.set_replica_index(d.getVarInt32())
+        continue
+      if tt == 216:
+        self.set_finished(d.getBoolean())
+        continue
+      if tt == 226:
+        self.set_clone_key(d.getPrefixedString())
+        continue
+      if tt == 234:
+        length = d.getVarInt32()
+        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
+        d.skip(length)
+        self.add_line().TryMerge(tmp)
+        continue
+      if tt == 240:
+        self.set_exit_reason(d.getVarInt32())
+        continue
+      if tt == 248:
+        self.set_was_throttled_for_time(d.getBoolean())
+        continue
+      if tt == 256:
+        self.set_was_throttled_for_requests(d.getBoolean())
+        continue
+      if tt == 264:
+        self.set_throttled_time(d.getVarInt64())
+        continue
+      if tt == 274:
+        self.set_server_name(d.getPrefixedString())
+        continue
+
+
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_))
+    if self.has_version_id_: res+=prefix+("version_id: %s\n" % self.DebugFormatString(self.version_id_))
+    if self.has_request_id_: res+=prefix+("request_id: %s\n" % self.DebugFormatString(self.request_id_))
+    if self.has_ip_: res+=prefix+("ip: %s\n" % self.DebugFormatString(self.ip_))
+    if self.has_nickname_: res+=prefix+("nickname: %s\n" % self.DebugFormatString(self.nickname_))
+    if self.has_start_time_: res+=prefix+("start_time: %s\n" % self.DebugFormatInt64(self.start_time_))
+    if self.has_end_time_: res+=prefix+("end_time: %s\n" % self.DebugFormatInt64(self.end_time_))
+    if self.has_latency_: res+=prefix+("latency: %s\n" % self.DebugFormatInt64(self.latency_))
+    if self.has_mcycles_: res+=prefix+("mcycles: %s\n" % self.DebugFormatInt64(self.mcycles_))
+    if self.has_method_: res+=prefix+("method: %s\n" % self.DebugFormatString(self.method_))
+    if self.has_resource_: res+=prefix+("resource: %s\n" % self.DebugFormatString(self.resource_))
+    if self.has_http_version_: res+=prefix+("http_version: %s\n" % self.DebugFormatString(self.http_version_))
+    if self.has_status_: res+=prefix+("status: %s\n" % self.DebugFormatInt32(self.status_))
+    if self.has_response_size_: res+=prefix+("response_size: %s\n" % self.DebugFormatInt64(self.response_size_))
+    if self.has_referrer_: res+=prefix+("referrer: %s\n" % self.DebugFormatString(self.referrer_))
+    if self.has_user_agent_: res+=prefix+("user_agent: %s\n" % self.DebugFormatString(self.user_agent_))
+    if self.has_url_map_entry_: res+=prefix+("url_map_entry: %s\n" % self.DebugFormatString(self.url_map_entry_))
+    if self.has_combined_: res+=prefix+("combined: %s\n" % self.DebugFormatString(self.combined_))
+    if self.has_api_mcycles_: res+=prefix+("api_mcycles: %s\n" % self.DebugFormatInt64(self.api_mcycles_))
+    if self.has_host_: res+=prefix+("host: %s\n" % self.DebugFormatString(self.host_))
+    if self.has_cost_: res+=prefix+("cost: %s\n" % self.DebugFormat(self.cost_))
+    if self.has_task_queue_name_: res+=prefix+("task_queue_name: %s\n" % self.DebugFormatString(self.task_queue_name_))
+    if self.has_task_name_: res+=prefix+("task_name: %s\n" % self.DebugFormatString(self.task_name_))
+    if self.has_was_loading_request_: res+=prefix+("was_loading_request: %s\n" % self.DebugFormatBool(self.was_loading_request_))
+    if self.has_pending_time_: res+=prefix+("pending_time: %s\n" % self.DebugFormatInt64(self.pending_time_))
+    if self.has_replica_index_: res+=prefix+("replica_index: %s\n" % self.DebugFormatInt32(self.replica_index_))
+    if self.has_finished_: res+=prefix+("finished: %s\n" % self.DebugFormatBool(self.finished_))
+    if self.has_clone_key_: res+=prefix+("clone_key: %s\n" % self.DebugFormatString(self.clone_key_))
+    cnt=0
+    for e in self.line_:
+      elm=""
+      if printElemNumber: elm="(%d)" % cnt
+      res+=prefix+("line%s <\n" % elm)
+      res+=e.__str__(prefix + "  ", printElemNumber)
+      res+=prefix+">\n"
+      cnt+=1
+    if self.has_exit_reason_: res+=prefix+("exit_reason: %s\n" % self.DebugFormatInt32(self.exit_reason_))
+    if self.has_was_throttled_for_time_: res+=prefix+("was_throttled_for_time: %s\n" % self.DebugFormatBool(self.was_throttled_for_time_))
+    if self.has_was_throttled_for_requests_: res+=prefix+("was_throttled_for_requests: %s\n" % self.DebugFormatBool(self.was_throttled_for_requests_))
+    if self.has_throttled_time_: res+=prefix+("throttled_time: %s\n" % self.DebugFormatInt64(self.throttled_time_))
+    if self.has_server_name_: res+=prefix+("server_name: %s\n" % self.DebugFormatString(self.server_name_))
+    return res
+
+
+  def _BuildTagLookupTable(sparse, maxtag, default=None):
+    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
+
+  kapp_id = 1
+  kversion_id = 2
+  krequest_id = 3
+  kip = 4
+  knickname = 5
+  kstart_time = 6
+  kend_time = 7
+  klatency = 8
+  kmcycles = 9
+  kmethod = 10
+  kresource = 11
+  khttp_version = 12
+  kstatus = 13
+  kresponse_size = 14
+  kreferrer = 15
+  kuser_agent = 16
+  kurl_map_entry = 17
+  kcombined = 18
+  kapi_mcycles = 19
+  khost = 20
+  kcost = 21
+  ktask_queue_name = 22
+  ktask_name = 23
+  kwas_loading_request = 24
+  kpending_time = 25
+  kreplica_index = 26
+  kfinished = 27
+  kclone_key = 28
+  kline = 29
+  kexit_reason = 30
+  kwas_throttled_for_time = 31
+  kwas_throttled_for_requests = 32
+  kthrottled_time = 33
+  kserver_name = 34
+
+  _TEXT = _BuildTagLookupTable({
+    0: "ErrorCode",
+    1: "app_id",
+    2: "version_id",
+    3: "request_id",
+    4: "ip",
+    5: "nickname",
+    6: "start_time",
+    7: "end_time",
+    8: "latency",
+    9: "mcycles",
+    10: "method",
+    11: "resource",
+    12: "http_version",
+    13: "status",
+    14: "response_size",
+    15: "referrer",
+    16: "user_agent",
+    17: "url_map_entry",
+    18: "combined",
+    19: "api_mcycles",
+    20: "host",
+    21: "cost",
+    22: "task_queue_name",
+    23: "task_name",
+    24: "was_loading_request",
+    25: "pending_time",
+    26: "replica_index",
+    27: "finished",
+    28: "clone_key",
+    29: "line",
+    30: "exit_reason",
+    31: "was_throttled_for_time",
+    32: "was_throttled_for_requests",
+    33: "throttled_time",
+    34: "server_name",
+  }, 34)
+
+  _TYPES = _BuildTagLookupTable({
+    0: ProtocolBuffer.Encoder.NUMERIC,
+    1: ProtocolBuffer.Encoder.STRING,
+    2: ProtocolBuffer.Encoder.STRING,
+    3: ProtocolBuffer.Encoder.STRING,
+    4: ProtocolBuffer.Encoder.STRING,
+    5: ProtocolBuffer.Encoder.STRING,
+    6: ProtocolBuffer.Encoder.NUMERIC,
+    7: ProtocolBuffer.Encoder.NUMERIC,
+    8: ProtocolBuffer.Encoder.NUMERIC,
+    9: ProtocolBuffer.Encoder.NUMERIC,
+    10: ProtocolBuffer.Encoder.STRING,
+    11: ProtocolBuffer.Encoder.STRING,
+    12: ProtocolBuffer.Encoder.STRING,
+    13: ProtocolBuffer.Encoder.NUMERIC,
+    14: ProtocolBuffer.Encoder.NUMERIC,
+    15: ProtocolBuffer.Encoder.STRING,
+    16: ProtocolBuffer.Encoder.STRING,
+    17: ProtocolBuffer.Encoder.STRING,
+    18: ProtocolBuffer.Encoder.STRING,
+    19: ProtocolBuffer.Encoder.NUMERIC,
+    20: ProtocolBuffer.Encoder.STRING,
+    21: ProtocolBuffer.Encoder.DOUBLE,
+    22: ProtocolBuffer.Encoder.STRING,
+    23: ProtocolBuffer.Encoder.STRING,
+    24: ProtocolBuffer.Encoder.NUMERIC,
+    25: ProtocolBuffer.Encoder.NUMERIC,
+    26: ProtocolBuffer.Encoder.NUMERIC,
+    27: ProtocolBuffer.Encoder.NUMERIC,
+    28: ProtocolBuffer.Encoder.STRING,
+    29: ProtocolBuffer.Encoder.STRING,
+    30: ProtocolBuffer.Encoder.NUMERIC,
+    31: ProtocolBuffer.Encoder.NUMERIC,
+    32: ProtocolBuffer.Encoder.NUMERIC,
+    33: ProtocolBuffer.Encoder.NUMERIC,
+    34: ProtocolBuffer.Encoder.STRING,
+  }, 34, ProtocolBuffer.Encoder.MAX_TYPE)
+
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+  _SERIALIZED_DESCRIPTOR = array.array('B')
+  _SERIALIZED_DESCRIPTOR.fromstring(base64.decodestring("WithcHBob3N0aW5nL2FwaS9sb2dzZXJ2aWNlL2xvZ19zZXJ2aWNlLnByb3RvChVhcHBob3N0aW5nLlJlcXVlc3RMb2cTGgZhcHBfaWQgASgCMAk4AhQTGgp2ZXJzaW9uX2lkIAIoAjAJOAIUExoKcmVxdWVzdF9pZCADKAIwCTgCFBMaAmlwIAQoAjAJOAIUExoIbmlja25hbWUgBSgCMAk4ARQTGgpzdGFydF90aW1lIAYoADADOAIUExoIZW5kX3RpbWUgBygAMAM4AhQTGgdsYXRlbmN5IAgoADADOAIUExoHbWN5Y2xlcyAJKAAwAzgCFBMaBm1ldGhvZCAKKAIwCTgCFBMaCHJlc291cmNlIAsoAjAJOAIUExoMaHR0cF92ZXJzaW9uIAwoAjAJOAIUExoGc3RhdHVzIA0oADAFOAIUExoNcmVzcG9uc2Vfc2l6ZSAOKAAwAzgCFBMaCHJlZmVycmVyIA8oAjAJOAEUExoKdXNlcl9hZ2VudCAQKAIwCTgBFBMaDXVybF9tYXBfZW50cnkgESgCMAk4AhQTGghjb21iaW5lZCASKAIwCTgCFBMaC2FwaV9tY3ljbGVzIBMoADADOAEUExoEaG9zdCAUKAIwCTgBFBMaBGNvc3QgFSgBMAE4ARQTGg90YXNrX3F1ZXVlX25hbWUgFigCMAk4ARQTGgl0YXNrX25hbWUgFygCMAk4ARQTGhN3YXNfbG9hZGluZ19yZXF1ZXN0IBgoADAIOAEUExoMcGVuZGluZ190aW1lIBkoADADOAEUExoNcmVwbGljYV9pbmRleCAaKAAwBTgBQgItMaMBqgEHZGVmYXVsdLIBAi0xpAEUExoIZmluaXNoZWQgGygAMAg4AUIEdHJ1ZaMBqgEHZGVmYXVsdLIBBHRydWWkARQTGgljbG9uZV9rZXkgHCgCMAk4ARQTGgRsaW5lIB0oAjALOANKEmFwcGhvc3RpbmcuTG9nTGluZRQTGgtleGl0X3JlYXNvbiAeKAAwBTgBFBMaFndhc190aHJvdHRsZWRfZm9yX3RpbWUgHygAMAg4ARQTGhp3YXNfdGhyb3R0bGVkX2Zvcl9yZXF1ZXN0cyAgKAAwCDgBFBMaDnRocm90dGxlZF90aW1lICEoADADOAEUExoLc2VydmVyX25hbWUgIigCMAk4ARTCARdhcHBob3N0aW5nLkZsdXNoUmVxdWVzdA=="))
+  if _net_proto___parse__python is not None:
+    _net_proto___parse__python.RegisterType(
+        _SERIALIZED_DESCRIPTOR.tostring())
+
+class LogReadRequest(ProtocolBuffer.ProtocolMessage):
+  has_app_id_ = 0
+  app_id_ = ""
+  has_start_time_ = 0
+  start_time_ = 0
+  has_end_time_ = 0
+  end_time_ = 0
+  has_offset_ = 0
+  offset_ = None
+  has_minimum_log_level_ = 0
+  minimum_log_level_ = 0
+  has_include_incomplete_ = 0
+  include_incomplete_ = 0
+  has_count_ = 0
+  count_ = 0
+  has_include_app_logs_ = 0
+  include_app_logs_ = 0
+  has_include_host_ = 0
+  include_host_ = 0
+  has_include_all_ = 0
+  include_all_ = 0
+  has_cache_iterator_ = 0
+  cache_iterator_ = 0
+
+  def __init__(self, contents=None):
+    self.version_id_ = []
+    self.request_id_ = []
+    self.lazy_init_lock_ = thread.allocate_lock()
+    if contents is not None: self.MergeFromString(contents)
+
+  def app_id(self): return self.app_id_
+
+  def set_app_id(self, x):
+    self.has_app_id_ = 1
+    self.app_id_ = x
+
+  def clear_app_id(self):
+    if self.has_app_id_:
+      self.has_app_id_ = 0
+      self.app_id_ = ""
+
+  def has_app_id(self): return self.has_app_id_
+
+  def version_id_size(self): return len(self.version_id_)
+  def version_id_list(self): return self.version_id_
+
+  def version_id(self, i):
+    return self.version_id_[i]
+
+  def set_version_id(self, i, x):
+    self.version_id_[i] = x
+
+  def add_version_id(self, x):
+    self.version_id_.append(x)
+
+  def clear_version_id(self):
+    self.version_id_ = []
+
+  def start_time(self): return self.start_time_
+
+  def set_start_time(self, x):
+    self.has_start_time_ = 1
+    self.start_time_ = x
+
+  def clear_start_time(self):
+    if self.has_start_time_:
+      self.has_start_time_ = 0
+      self.start_time_ = 0
+
+  def has_start_time(self): return self.has_start_time_
+
+  def end_time(self): return self.end_time_
+
+  def set_end_time(self, x):
+    self.has_end_time_ = 1
+    self.end_time_ = x
+
+  def clear_end_time(self):
+    if self.has_end_time_:
+      self.has_end_time_ = 0
+      self.end_time_ = 0
+
+  def has_end_time(self): return self.has_end_time_
+
+  def offset(self):
+    if self.offset_ is None:
+      self.lazy_init_lock_.acquire()
+      try:
+        if self.offset_ is None: self.offset_ = LogOffset()
+      finally:
+        self.lazy_init_lock_.release()
+    return self.offset_
+
+  def mutable_offset(self): self.has_offset_ = 1; return self.offset()
+
+  def clear_offset(self):
+
+    if self.has_offset_:
+      self.has_offset_ = 0;
+      if self.offset_ is not None: self.offset_.Clear()
+
+  def has_offset(self): return self.has_offset_
+
+  def request_id_size(self): return len(self.request_id_)
+  def request_id_list(self): return self.request_id_
+
+  def request_id(self, i):
+    return self.request_id_[i]
+
+  def set_request_id(self, i, x):
+    self.request_id_[i] = x
+
+  def add_request_id(self, x):
+    self.request_id_.append(x)
+
+  def clear_request_id(self):
+    self.request_id_ = []
+
+  def minimum_log_level(self): return self.minimum_log_level_
+
+  def set_minimum_log_level(self, x):
+    self.has_minimum_log_level_ = 1
+    self.minimum_log_level_ = x
+
+  def clear_minimum_log_level(self):
+    if self.has_minimum_log_level_:
+      self.has_minimum_log_level_ = 0
+      self.minimum_log_level_ = 0
+
+  def has_minimum_log_level(self): return self.has_minimum_log_level_
+
+  def include_incomplete(self): return self.include_incomplete_
+
+  def set_include_incomplete(self, x):
+    self.has_include_incomplete_ = 1
+    self.include_incomplete_ = x
+
+  def clear_include_incomplete(self):
+    if self.has_include_incomplete_:
+      self.has_include_incomplete_ = 0
+      self.include_incomplete_ = 0
+
+  def has_include_incomplete(self): return self.has_include_incomplete_
+
+  def count(self): return self.count_
+
+  def set_count(self, x):
+    self.has_count_ = 1
+    self.count_ = x
+
+  def clear_count(self):
+    if self.has_count_:
+      self.has_count_ = 0
+      self.count_ = 0
+
+  def has_count(self): return self.has_count_
+
+  def include_app_logs(self): return self.include_app_logs_
+
+  def set_include_app_logs(self, x):
+    self.has_include_app_logs_ = 1
+    self.include_app_logs_ = x
+
+  def clear_include_app_logs(self):
+    if self.has_include_app_logs_:
+      self.has_include_app_logs_ = 0
+      self.include_app_logs_ = 0
+
+  def has_include_app_logs(self): return self.has_include_app_logs_
+
+  def include_host(self): return self.include_host_
+
+  def set_include_host(self, x):
+    self.has_include_host_ = 1
+    self.include_host_ = x
+
+  def clear_include_host(self):
+    if self.has_include_host_:
+      self.has_include_host_ = 0
+      self.include_host_ = 0
+
+  def has_include_host(self): return self.has_include_host_
+
+  def include_all(self): return self.include_all_
+
+  def set_include_all(self, x):
+    self.has_include_all_ = 1
+    self.include_all_ = x
+
+  def clear_include_all(self):
+    if self.has_include_all_:
+      self.has_include_all_ = 0
+      self.include_all_ = 0
+
+  def has_include_all(self): return self.has_include_all_
+
+  def cache_iterator(self): return self.cache_iterator_
+
+  def set_cache_iterator(self, x):
+    self.has_cache_iterator_ = 1
+    self.cache_iterator_ = x
+
+  def clear_cache_iterator(self):
+    if self.has_cache_iterator_:
+      self.has_cache_iterator_ = 0
+      self.cache_iterator_ = 0
+
+  def has_cache_iterator(self): return self.has_cache_iterator_
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+    if (x.has_app_id()): self.set_app_id(x.app_id())
+    for i in xrange(x.version_id_size()): self.add_version_id(x.version_id(i))
+    if (x.has_start_time()): self.set_start_time(x.start_time())
+    if (x.has_end_time()): self.set_end_time(x.end_time())
+    if (x.has_offset()): self.mutable_offset().MergeFrom(x.offset())
+    for i in xrange(x.request_id_size()): self.add_request_id(x.request_id(i))
+    if (x.has_minimum_log_level()): self.set_minimum_log_level(x.minimum_log_level())
+    if (x.has_include_incomplete()): self.set_include_incomplete(x.include_incomplete())
+    if (x.has_count()): self.set_count(x.count())
+    if (x.has_include_app_logs()): self.set_include_app_logs(x.include_app_logs())
+    if (x.has_include_host()): self.set_include_host(x.include_host())
+    if (x.has_include_all()): self.set_include_all(x.include_all())
+    if (x.has_cache_iterator()): self.set_cache_iterator(x.cache_iterator())
+
+  if _net_proto___parse__python is not None:
+    def _CMergeFromString(self, s):
+      _net_proto___parse__python.MergeFromString(self, 'apphosting.LogReadRequest', s)
+
+  if _net_proto___parse__python is not None:
+    def _CEncode(self):
+      return _net_proto___parse__python.Encode(self, 'apphosting.LogReadRequest')
+
+  if _net_proto___parse__python is not None:
+    def _CEncodePartial(self):
+      return _net_proto___parse__python.EncodePartial(self, 'apphosting.LogReadRequest')
+
+  if _net_proto___parse__python is not None:
+    def _CToASCII(self, output_format):
+      return _net_proto___parse__python.ToASCII(self, 'apphosting.LogReadRequest', output_format)
+
+
+  if _net_proto___parse__python is not None:
+    def ParseASCII(self, s):
+      _net_proto___parse__python.ParseASCII(self, 'apphosting.LogReadRequest', s)
+
+
+  if _net_proto___parse__python is not None:
+    def ParseASCIIIgnoreUnknown(self, s):
+      _net_proto___parse__python.ParseASCIIIgnoreUnknown(self, 'apphosting.LogReadRequest', s)
+
+
+  def Equals(self, x):
+    if x is self: return 1
+    if self.has_app_id_ != x.has_app_id_: return 0
+    if self.has_app_id_ and self.app_id_ != x.app_id_: return 0
+    if len(self.version_id_) != len(x.version_id_): return 0
+    for e1, e2 in zip(self.version_id_, x.version_id_):
+      if e1 != e2: return 0
+    if self.has_start_time_ != x.has_start_time_: return 0
+    if self.has_start_time_ and self.start_time_ != x.start_time_: return 0
+    if self.has_end_time_ != x.has_end_time_: return 0
+    if self.has_end_time_ and self.end_time_ != x.end_time_: return 0
+    if self.has_offset_ != x.has_offset_: return 0
+    if self.has_offset_ and self.offset_ != x.offset_: return 0
+    if len(self.request_id_) != len(x.request_id_): return 0
+    for e1, e2 in zip(self.request_id_, x.request_id_):
+      if e1 != e2: return 0
+    if self.has_minimum_log_level_ != x.has_minimum_log_level_: return 0
+    if self.has_minimum_log_level_ and self.minimum_log_level_ != x.minimum_log_level_: return 0
+    if self.has_include_incomplete_ != x.has_include_incomplete_: return 0
+    if self.has_include_incomplete_ and self.include_incomplete_ != x.include_incomplete_: return 0
+    if self.has_count_ != x.has_count_: return 0
+    if self.has_count_ and self.count_ != x.count_: return 0
+    if self.has_include_app_logs_ != x.has_include_app_logs_: return 0
+    if self.has_include_app_logs_ and self.include_app_logs_ != x.include_app_logs_: return 0
+    if self.has_include_host_ != x.has_include_host_: return 0
+    if self.has_include_host_ and self.include_host_ != x.include_host_: return 0
+    if self.has_include_all_ != x.has_include_all_: return 0
+    if self.has_include_all_ and self.include_all_ != x.include_all_: return 0
+    if self.has_cache_iterator_ != x.has_cache_iterator_: return 0
+    if self.has_cache_iterator_ and self.cache_iterator_ != x.cache_iterator_: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    if (not self.has_app_id_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: app_id not set.')
+    if (self.has_offset_ and not self.offset_.IsInitialized(debug_strs)): initialized = 0
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    n += self.lengthString(len(self.app_id_))
+    n += 1 * len(self.version_id_)
+    for i in xrange(len(self.version_id_)): n += self.lengthString(len(self.version_id_[i]))
+    if (self.has_start_time_): n += 1 + self.lengthVarInt64(self.start_time_)
+    if (self.has_end_time_): n += 1 + self.lengthVarInt64(self.end_time_)
+    if (self.has_offset_): n += 1 + self.lengthString(self.offset_.ByteSize())
+    n += 1 * len(self.request_id_)
+    for i in xrange(len(self.request_id_)): n += self.lengthString(len(self.request_id_[i]))
+    if (self.has_minimum_log_level_): n += 1 + self.lengthVarInt64(self.minimum_log_level_)
+    if (self.has_include_incomplete_): n += 2
+    if (self.has_count_): n += 1 + self.lengthVarInt64(self.count_)
+    if (self.has_include_app_logs_): n += 2
+    if (self.has_include_host_): n += 2
+    if (self.has_include_all_): n += 2
+    if (self.has_cache_iterator_): n += 2
+    return n + 1
+
+  def ByteSizePartial(self):
+    n = 0
+    if (self.has_app_id_):
+      n += 1
+      n += self.lengthString(len(self.app_id_))
+    n += 1 * len(self.version_id_)
+    for i in xrange(len(self.version_id_)): n += self.lengthString(len(self.version_id_[i]))
+    if (self.has_start_time_): n += 1 + self.lengthVarInt64(self.start_time_)
+    if (self.has_end_time_): n += 1 + self.lengthVarInt64(self.end_time_)
+    if (self.has_offset_): n += 1 + self.lengthString(self.offset_.ByteSizePartial())
+    n += 1 * len(self.request_id_)
+    for i in xrange(len(self.request_id_)): n += self.lengthString(len(self.request_id_[i]))
+    if (self.has_minimum_log_level_): n += 1 + self.lengthVarInt64(self.minimum_log_level_)
+    if (self.has_include_incomplete_): n += 2
+    if (self.has_count_): n += 1 + self.lengthVarInt64(self.count_)
+    if (self.has_include_app_logs_): n += 2
+    if (self.has_include_host_): n += 2
+    if (self.has_include_all_): n += 2
+    if (self.has_cache_iterator_): n += 2
+    return n
+
+  def Clear(self):
+    self.clear_app_id()
+    self.clear_version_id()
+    self.clear_start_time()
+    self.clear_end_time()
+    self.clear_offset()
+    self.clear_request_id()
+    self.clear_minimum_log_level()
+    self.clear_include_incomplete()
+    self.clear_count()
+    self.clear_include_app_logs()
+    self.clear_include_host()
+    self.clear_include_all()
+    self.clear_cache_iterator()
+
+  def OutputUnchecked(self, out):
+    out.putVarInt32(10)
+    out.putPrefixedString(self.app_id_)
+    for i in xrange(len(self.version_id_)):
+      out.putVarInt32(18)
+      out.putPrefixedString(self.version_id_[i])
+    if (self.has_start_time_):
+      out.putVarInt32(24)
+      out.putVarInt64(self.start_time_)
+    if (self.has_end_time_):
+      out.putVarInt32(32)
+      out.putVarInt64(self.end_time_)
+    if (self.has_offset_):
+      out.putVarInt32(42)
+      out.putVarInt32(self.offset_.ByteSize())
+      self.offset_.OutputUnchecked(out)
+    for i in xrange(len(self.request_id_)):
+      out.putVarInt32(50)
+      out.putPrefixedString(self.request_id_[i])
+    if (self.has_minimum_log_level_):
+      out.putVarInt32(56)
+      out.putVarInt32(self.minimum_log_level_)
+    if (self.has_include_incomplete_):
+      out.putVarInt32(64)
+      out.putBoolean(self.include_incomplete_)
+    if (self.has_count_):
+      out.putVarInt32(72)
+      out.putVarInt64(self.count_)
+    if (self.has_include_app_logs_):
+      out.putVarInt32(80)
+      out.putBoolean(self.include_app_logs_)
+    if (self.has_include_host_):
+      out.putVarInt32(88)
+      out.putBoolean(self.include_host_)
+    if (self.has_include_all_):
+      out.putVarInt32(96)
+      out.putBoolean(self.include_all_)
+    if (self.has_cache_iterator_):
+      out.putVarInt32(104)
+      out.putBoolean(self.cache_iterator_)
+
+  def OutputPartial(self, out):
+    if (self.has_app_id_):
+      out.putVarInt32(10)
+      out.putPrefixedString(self.app_id_)
+    for i in xrange(len(self.version_id_)):
+      out.putVarInt32(18)
+      out.putPrefixedString(self.version_id_[i])
+    if (self.has_start_time_):
+      out.putVarInt32(24)
+      out.putVarInt64(self.start_time_)
+    if (self.has_end_time_):
+      out.putVarInt32(32)
+      out.putVarInt64(self.end_time_)
+    if (self.has_offset_):
+      out.putVarInt32(42)
+      out.putVarInt32(self.offset_.ByteSizePartial())
+      self.offset_.OutputPartial(out)
+    for i in xrange(len(self.request_id_)):
+      out.putVarInt32(50)
+      out.putPrefixedString(self.request_id_[i])
+    if (self.has_minimum_log_level_):
+      out.putVarInt32(56)
+      out.putVarInt32(self.minimum_log_level_)
+    if (self.has_include_incomplete_):
+      out.putVarInt32(64)
+      out.putBoolean(self.include_incomplete_)
+    if (self.has_count_):
+      out.putVarInt32(72)
+      out.putVarInt64(self.count_)
+    if (self.has_include_app_logs_):
+      out.putVarInt32(80)
+      out.putBoolean(self.include_app_logs_)
+    if (self.has_include_host_):
+      out.putVarInt32(88)
+      out.putBoolean(self.include_host_)
+    if (self.has_include_all_):
+      out.putVarInt32(96)
+      out.putBoolean(self.include_all_)
+    if (self.has_cache_iterator_):
+      out.putVarInt32(104)
+      out.putBoolean(self.cache_iterator_)
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 10:
+        self.set_app_id(d.getPrefixedString())
+        continue
+      if tt == 18:
+        self.add_version_id(d.getPrefixedString())
+        continue
+      if tt == 24:
+        self.set_start_time(d.getVarInt64())
+        continue
+      if tt == 32:
+        self.set_end_time(d.getVarInt64())
+        continue
+      if tt == 42:
+        length = d.getVarInt32()
+        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
+        d.skip(length)
+        self.mutable_offset().TryMerge(tmp)
+        continue
+      if tt == 50:
+        self.add_request_id(d.getPrefixedString())
+        continue
+      if tt == 56:
+        self.set_minimum_log_level(d.getVarInt32())
+        continue
+      if tt == 64:
+        self.set_include_incomplete(d.getBoolean())
+        continue
+      if tt == 72:
+        self.set_count(d.getVarInt64())
+        continue
+      if tt == 80:
+        self.set_include_app_logs(d.getBoolean())
+        continue
+      if tt == 88:
+        self.set_include_host(d.getBoolean())
+        continue
+      if tt == 96:
+        self.set_include_all(d.getBoolean())
+        continue
+      if tt == 104:
+        self.set_cache_iterator(d.getBoolean())
+        continue
+
+
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_))
+    cnt=0
+    for e in self.version_id_:
+      elm=""
+      if printElemNumber: elm="(%d)" % cnt
+      res+=prefix+("version_id%s: %s\n" % (elm, self.DebugFormatString(e)))
+      cnt+=1
+    if self.has_start_time_: res+=prefix+("start_time: %s\n" % self.DebugFormatInt64(self.start_time_))
+    if self.has_end_time_: res+=prefix+("end_time: %s\n" % self.DebugFormatInt64(self.end_time_))
+    if self.has_offset_:
+      res+=prefix+"offset <\n"
+      res+=self.offset_.__str__(prefix + "  ", printElemNumber)
+      res+=prefix+">\n"
+    cnt=0
+    for e in self.request_id_:
+      elm=""
+      if printElemNumber: elm="(%d)" % cnt
+      res+=prefix+("request_id%s: %s\n" % (elm, self.DebugFormatString(e)))
+      cnt+=1
+    if self.has_minimum_log_level_: res+=prefix+("minimum_log_level: %s\n" % self.DebugFormatInt32(self.minimum_log_level_))
+    if self.has_include_incomplete_: res+=prefix+("include_incomplete: %s\n" % self.DebugFormatBool(self.include_incomplete_))
+    if self.has_count_: res+=prefix+("count: %s\n" % self.DebugFormatInt64(self.count_))
+    if self.has_include_app_logs_: res+=prefix+("include_app_logs: %s\n" % self.DebugFormatBool(self.include_app_logs_))
+    if self.has_include_host_: res+=prefix+("include_host: %s\n" % self.DebugFormatBool(self.include_host_))
+    if self.has_include_all_: res+=prefix+("include_all: %s\n" % self.DebugFormatBool(self.include_all_))
+    if self.has_cache_iterator_: res+=prefix+("cache_iterator: %s\n" % self.DebugFormatBool(self.cache_iterator_))
+    return res
+
+
+  def _BuildTagLookupTable(sparse, maxtag, default=None):
+    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
+
+  kapp_id = 1
+  kversion_id = 2
+  kstart_time = 3
+  kend_time = 4
+  koffset = 5
+  krequest_id = 6
+  kminimum_log_level = 7
+  kinclude_incomplete = 8
+  kcount = 9
+  kinclude_app_logs = 10
+  kinclude_host = 11
+  kinclude_all = 12
+  kcache_iterator = 13
+
+  _TEXT = _BuildTagLookupTable({
+    0: "ErrorCode",
+    1: "app_id",
+    2: "version_id",
+    3: "start_time",
+    4: "end_time",
+    5: "offset",
+    6: "request_id",
+    7: "minimum_log_level",
+    8: "include_incomplete",
+    9: "count",
+    10: "include_app_logs",
+    11: "include_host",
+    12: "include_all",
+    13: "cache_iterator",
+  }, 13)
+
+  _TYPES = _BuildTagLookupTable({
+    0: ProtocolBuffer.Encoder.NUMERIC,
+    1: ProtocolBuffer.Encoder.STRING,
+    2: ProtocolBuffer.Encoder.STRING,
+    3: ProtocolBuffer.Encoder.NUMERIC,
+    4: ProtocolBuffer.Encoder.NUMERIC,
+    5: ProtocolBuffer.Encoder.STRING,
+    6: ProtocolBuffer.Encoder.STRING,
+    7: ProtocolBuffer.Encoder.NUMERIC,
+    8: ProtocolBuffer.Encoder.NUMERIC,
+    9: ProtocolBuffer.Encoder.NUMERIC,
+    10: ProtocolBuffer.Encoder.NUMERIC,
+    11: ProtocolBuffer.Encoder.NUMERIC,
+    12: ProtocolBuffer.Encoder.NUMERIC,
+    13: ProtocolBuffer.Encoder.NUMERIC,
+  }, 13, ProtocolBuffer.Encoder.MAX_TYPE)
+
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+  _SERIALIZED_DESCRIPTOR = array.array('B')
+  _SERIALIZED_DESCRIPTOR.fromstring(base64.decodestring("WithcHBob3N0aW5nL2FwaS9sb2dzZXJ2aWNlL2xvZ19zZXJ2aWNlLnByb3RvChlhcHBob3N0aW5nLkxvZ1JlYWRSZXF1ZXN0ExoGYXBwX2lkIAEoAjAJOAIUExoKdmVyc2lvbl9pZCACKAIwCTgDFBMaCnN0YXJ0X3RpbWUgAygAMAM4ARQTGghlbmRfdGltZSAEKAAwAzgBFBMaBm9mZnNldCAFKAIwCzgBShRhcHBob3N0aW5nLkxvZ09mZnNldBQTGgpyZXF1ZXN0X2lkIAYoAjAJOAMUExoRbWluaW11bV9sb2dfbGV2ZWwgBygAMAU4ARQTGhJpbmNsdWRlX2luY29tcGxldGUgCCgAMAg4ARQTGgVjb3VudCAJKAAwAzgBFBMaEGluY2x1ZGVfYXBwX2xvZ3MgCigAMAg4ARQTGgxpbmNsdWRlX2hvc3QgCygAMAg4ARQTGgtpbmNsdWRlX2FsbCAMKAAwCDgBFBMaDmNhY2hlX2l0ZXJhdG9yIA0oADAIOAEUwgEXYXBwaG9zdGluZy5GbHVzaFJlcXVlc3Q="))
+  if _net_proto___parse__python is not None:
+    _net_proto___parse__python.RegisterType(
+        _SERIALIZED_DESCRIPTOR.tostring())
+
+class LogReadResponse(ProtocolBuffer.ProtocolMessage):
+  has_offset_ = 0
+  offset_ = None
+
+  def __init__(self, contents=None):
+    self.log_ = []
+    self.lazy_init_lock_ = thread.allocate_lock()
+    if contents is not None: self.MergeFromString(contents)
+
+  def log_size(self): return len(self.log_)
+  def log_list(self): return self.log_
+
+  def log(self, i):
+    return self.log_[i]
+
+  def mutable_log(self, i):
+    return self.log_[i]
+
+  def add_log(self):
+    x = RequestLog()
+    self.log_.append(x)
+    return x
+
+  def clear_log(self):
+    self.log_ = []
+  def offset(self):
+    if self.offset_ is None:
+      self.lazy_init_lock_.acquire()
+      try:
+        if self.offset_ is None: self.offset_ = LogOffset()
+      finally:
+        self.lazy_init_lock_.release()
+    return self.offset_
+
+  def mutable_offset(self): self.has_offset_ = 1; return self.offset()
+
+  def clear_offset(self):
+
+    if self.has_offset_:
+      self.has_offset_ = 0;
+      if self.offset_ is not None: self.offset_.Clear()
+
+  def has_offset(self): return self.has_offset_
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+    for i in xrange(x.log_size()): self.add_log().CopyFrom(x.log(i))
+    if (x.has_offset()): self.mutable_offset().MergeFrom(x.offset())
+
+  if _net_proto___parse__python is not None:
+    def _CMergeFromString(self, s):
+      _net_proto___parse__python.MergeFromString(self, 'apphosting.LogReadResponse', s)
+
+  if _net_proto___parse__python is not None:
+    def _CEncode(self):
+      return _net_proto___parse__python.Encode(self, 'apphosting.LogReadResponse')
+
+  if _net_proto___parse__python is not None:
+    def _CEncodePartial(self):
+      return _net_proto___parse__python.EncodePartial(self, 'apphosting.LogReadResponse')
+
+  if _net_proto___parse__python is not None:
+    def _CToASCII(self, output_format):
+      return _net_proto___parse__python.ToASCII(self, 'apphosting.LogReadResponse', output_format)
+
+
+  if _net_proto___parse__python is not None:
+    def ParseASCII(self, s):
+      _net_proto___parse__python.ParseASCII(self, 'apphosting.LogReadResponse', s)
+
+
+  if _net_proto___parse__python is not None:
+    def ParseASCIIIgnoreUnknown(self, s):
+      _net_proto___parse__python.ParseASCIIIgnoreUnknown(self, 'apphosting.LogReadResponse', s)
+
+
+  def Equals(self, x):
+    if x is self: return 1
+    if len(self.log_) != len(x.log_): return 0
+    for e1, e2 in zip(self.log_, x.log_):
+      if e1 != e2: return 0
+    if self.has_offset_ != x.has_offset_: return 0
+    if self.has_offset_ and self.offset_ != x.offset_: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    for p in self.log_:
+      if not p.IsInitialized(debug_strs): initialized=0
+    if (self.has_offset_ and not self.offset_.IsInitialized(debug_strs)): initialized = 0
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    n += 1 * len(self.log_)
+    for i in xrange(len(self.log_)): n += self.lengthString(self.log_[i].ByteSize())
+    if (self.has_offset_): n += 1 + self.lengthString(self.offset_.ByteSize())
+    return n
+
+  def ByteSizePartial(self):
+    n = 0
+    n += 1 * len(self.log_)
+    for i in xrange(len(self.log_)): n += self.lengthString(self.log_[i].ByteSizePartial())
+    if (self.has_offset_): n += 1 + self.lengthString(self.offset_.ByteSizePartial())
+    return n
+
+  def Clear(self):
+    self.clear_log()
+    self.clear_offset()
+
+  def OutputUnchecked(self, out):
+    for i in xrange(len(self.log_)):
+      out.putVarInt32(10)
+      out.putVarInt32(self.log_[i].ByteSize())
+      self.log_[i].OutputUnchecked(out)
+    if (self.has_offset_):
+      out.putVarInt32(18)
+      out.putVarInt32(self.offset_.ByteSize())
+      self.offset_.OutputUnchecked(out)
+
+  def OutputPartial(self, out):
+    for i in xrange(len(self.log_)):
+      out.putVarInt32(10)
+      out.putVarInt32(self.log_[i].ByteSizePartial())
+      self.log_[i].OutputPartial(out)
+    if (self.has_offset_):
+      out.putVarInt32(18)
+      out.putVarInt32(self.offset_.ByteSizePartial())
+      self.offset_.OutputPartial(out)
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 10:
+        length = d.getVarInt32()
+        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
+        d.skip(length)
+        self.add_log().TryMerge(tmp)
+        continue
+      if tt == 18:
+        length = d.getVarInt32()
+        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
+        d.skip(length)
+        self.mutable_offset().TryMerge(tmp)
+        continue
+
+
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    cnt=0
+    for e in self.log_:
+      elm=""
+      if printElemNumber: elm="(%d)" % cnt
+      res+=prefix+("log%s <\n" % elm)
+      res+=e.__str__(prefix + "  ", printElemNumber)
+      res+=prefix+">\n"
+      cnt+=1
+    if self.has_offset_:
+      res+=prefix+"offset <\n"
+      res+=self.offset_.__str__(prefix + "  ", printElemNumber)
+      res+=prefix+">\n"
+    return res
+
+
+  def _BuildTagLookupTable(sparse, maxtag, default=None):
+    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
+
+  klog = 1
+  koffset = 2
+
+  _TEXT = _BuildTagLookupTable({
+    0: "ErrorCode",
+    1: "log",
+    2: "offset",
+  }, 2)
+
+  _TYPES = _BuildTagLookupTable({
+    0: ProtocolBuffer.Encoder.NUMERIC,
+    1: ProtocolBuffer.Encoder.STRING,
+    2: ProtocolBuffer.Encoder.STRING,
+  }, 2, ProtocolBuffer.Encoder.MAX_TYPE)
+
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+  _SERIALIZED_DESCRIPTOR = array.array('B')
+  _SERIALIZED_DESCRIPTOR.fromstring(base64.decodestring("WithcHBob3N0aW5nL2FwaS9sb2dzZXJ2aWNlL2xvZ19zZXJ2aWNlLnByb3RvChphcHBob3N0aW5nLkxvZ1JlYWRSZXNwb25zZRMaA2xvZyABKAIwCzgDShVhcHBob3N0aW5nLlJlcXVlc3RMb2cUExoGb2Zmc2V0IAIoAjALOAFKFGFwcGhvc3RpbmcuTG9nT2Zmc2V0FMIBF2FwcGhvc3RpbmcuRmx1c2hSZXF1ZXN0"))
+  if _net_proto___parse__python is not None:
+    _net_proto___parse__python.RegisterType(
+        _SERIALIZED_DESCRIPTOR.tostring())
+
 
 
 class _LogService_ClientBaseStub(_client_stub_base_class):
@@ -482,4 +2932,4 @@
         'none')
 
 
-__all__ = ['FlushRequest','SetStatusRequest','LogService']
+__all__ = ['FlushRequest','SetStatusRequest','LogOffset','LogLine','RequestLog','LogReadRequest','LogReadResponse','LogService']
diff --git a/google/appengine/api/mail.py b/google/appengine/api/mail.py
index cfd19fd..1b073a8 100755
--- a/google/appengine/api/mail.py
+++ b/google/appengine/api/mail.py
@@ -86,9 +86,11 @@
     'css': 'text/css',
     'csv': 'text/csv',
     'doc': 'application/msword',
+    'docx': 'application/msword',
     'diff': 'text/plain',
     'flac': 'audio/flac',
     'gif': 'image/gif',
+    'gzip': 'application/x-gzip',
     'htm': 'text/html',
     'html': 'text/html',
     'ics': 'text/calendar',
@@ -116,6 +118,7 @@
     'pot': 'text/plain',
     'pps': 'application/vnd.ms-powerpoint',
     'ppt': 'application/vnd.ms-powerpoint',
+    'pptx': 'application/vnd.ms-powerpoint',
     'qt': 'video/quicktime',
     'rmi': 'audio/mid',
     'rss': 'text/rss+xml',
@@ -132,9 +135,45 @@
     'webm': 'video/webm',
     'webp': 'image/webp',
     'xls': 'application/vnd.ms-excel',
+    'xlsx': 'application/vnd.ms-excel',
+    'zip': 'application/zip'
     }
 
-EXTENSION_WHITELIST = frozenset(EXTENSION_MIME_MAP.iterkeys())
+
+
+
+
+EXTENSION_BLACKLIST = [
+    'ade',
+    'adp',
+    'bat',
+    'chm',
+    'cmd',
+    'com',
+    'cpl',
+    'exe',
+    'hta',
+    'ins',
+    'isp',
+    'jse',
+    'lib',
+    'mde',
+    'msc',
+    'msp',
+    'mst',
+    'pif',
+    'scr',
+    'sct',
+    'shb',
+    'sys',
+    'vb',
+    'vbe',
+    'vbs',
+    'vxd',
+    'wsc',
+    'wsf',
+    'wsh',
+    ]
 
 
 HEADER_WHITELIST = frozenset([
@@ -401,13 +440,15 @@
   """
   extension_index = file_name.rfind('.')
   if extension_index == -1:
+    extension = ''
+  else:
+    extension = file_name[extension_index + 1:].lower()
+  if extension in EXTENSION_BLACKLIST:
     raise InvalidAttachmentTypeError(
-        "File '%s' does not have an extension" % file_name)
-  extension = file_name[extension_index + 1:].lower()
+        'Extension %s is not supported.' % extension)
   mime_type = EXTENSION_MIME_MAP.get(extension, None)
   if mime_type is None:
-    raise InvalidAttachmentTypeError(
-        "Extension '%s' is not supported." % extension)
+    mime_type = 'application/octet-stream'
   return mime_type
 
 
diff --git a/google/appengine/api/memcache/__init__.py b/google/appengine/api/memcache/__init__.py
index 14fdb32..e0aef9d 100755
--- a/google/appengine/api/memcache/__init__.py
+++ b/google/appengine/api/memcache/__init__.py
@@ -103,6 +103,11 @@
 CAPABILITY = capabilities.CapabilitySet('memcache')
 
 
+def _is_pair(obj):
+  """Helper to test if something is a pair (2-tuple)."""
+  return isinstance(obj, tuple) and len(obj) == 2
+
+
 def _add_name_space(message, namespace=None):
   """Populate the name_space field in a messagecol buffer.
 
@@ -140,9 +145,9 @@
 
   Raises:
     TypeError: If provided key isn't a string or tuple of (int, string)
-      or key_prefix or server_to_user_dict are of the wrong type.
+      or key_prefix.
   """
-  if type(key) is types.TupleType:
+  if _is_pair(key):
     key = key[1]
   if not isinstance(key, basestring):
     raise TypeError('Key must be a string instance, received %r' % key)
@@ -162,9 +167,7 @@
     server_key = hashlib.sha1(server_key).hexdigest()
 
   if server_to_user_dict is not None:
-    if not isinstance(server_to_user_dict, dict):
-      raise TypeError('server_to_user_dict must be a dict instance, ' +
-                      'received %r' % key)
+    assert isinstance(server_to_user_dict, dict)
     server_to_user_dict[server_key] = key
 
   return server_key
@@ -271,6 +274,20 @@
   assert False, "Shouldn't get here."
 
 
+def create_rpc(deadline=None, callback=None):
+  """Creates an RPC object for use with the memcache API.
+
+  Args:
+    deadline: Optional deadline in seconds for the operation; the default
+      is a system-specific deadline (typically 5 seconds).
+    callback: Optional callable to invoke on completion.
+
+  Returns:
+    An apiproxy_stub_map.UserRPC object specialized for this service.
+  """
+  return apiproxy_stub_map.UserRPC('memcache', deadline, callback)
+
+
 class Client(object):
   """Memcache client object, through which one invokes all memcache operations.
 
@@ -296,7 +313,7 @@
                unpickler=cPickle.Unpickler,
                pload=None,
                pid=None,
-               make_sync_call=apiproxy_stub_map.MakeSyncCall,
+               make_sync_call=None,
                _app_id=None,
                _num_memcacheg_backends=None):
     """Create a new Client object.
@@ -311,8 +328,7 @@
       unpickler: pickle.Unpickler sub-class to use for unpickling.
       pload: Callable to use for retrieving objects by persistent id.
       pid: Callable to use for determine the persistent id for objects, if any.
-      make_sync_call: Function to use to make an App Engine service call.
-        Used for testing.
+      make_sync_call: Ignored; only for compatibility with an earlier version.
     """
 
 
@@ -328,13 +344,41 @@
     self._pickle_protocol = pickleProtocol
     self._persistent_id = pid
     self._persistent_load = pload
-    self._make_sync_call = make_sync_call
     self._app_id = _app_id
     self._num_memcacheg_backends = _num_memcacheg_backends
+    self._cas_ids = {}
     if _app_id and not _num_memcacheg_backends:
       raise ValueError('If you specify an _app_id, you must also '
                        'provide _num_memcacheg_backends')
 
+  def cas_reset(self):
+    """Clear the remembered CAS ids."""
+    self._cas_ids.clear()
+
+  def _make_async_call(self, rpc, method, request, response,
+                       get_result_hook, user_data):
+    """Internal helper to schedule an asynchronous RPC.
+
+    Args:
+      rpc: None or a UserRPC object.
+      method: Method name, e.g. 'Get'.
+      request: Request protobuf.
+      response: Response protobuf.
+      get_result_hook: None or hook function used to process results
+        (See UserRPC.make_call() for more info).
+      user_data: None or user data for hook function.
+
+    Returns:
+      A UserRPC object; either the one passed in as the first argument,
+      or a new one (if the first argument was None).
+    """
+
+    if rpc is None:
+      rpc = create_rpc()
+    assert rpc.service == 'memcache', repr(rpc.service)
+    rpc.make_call(method, request, response, get_result_hook, user_data)
+    return rpc
+
   def _do_pickle(self, value):
     """Pickles a provided value."""
     pickle_data = cStringIO.StringIO()
@@ -416,14 +460,23 @@
 
       On error, returns None.
     """
+    rpc = self.get_stats_async()
+    return rpc.get_result()
+
+  def get_stats_async(self, rpc=None):
+    """Async version of get_stats()."""
     request = MemcacheStatsRequest()
     self._add_app_id(request)
     response = MemcacheStatsResponse()
+    return self._make_async_call(rpc, 'Stats', request, response,
+                                 self.__get_stats_hook, None)
+
+  def __get_stats_hook(self, rpc):
     try:
-      self._make_sync_call('memcache', 'Stats', request, response)
+      rpc.check_success()
     except apiproxy_errors.Error:
       return None
-
+    response = rpc.response
     if not response.has_stats():
       return {
         STAT_HITS: 0,
@@ -450,16 +503,25 @@
     Returns:
       True on success, False on RPC or server error.
     """
+    rpc = self.flush_all_async()
+    return rpc.get_result()
+
+  def flush_all_async(self, rpc=None):
+    """Async version of flush_all()."""
     request = MemcacheFlushRequest()
     self._add_app_id(request)
     response = MemcacheFlushResponse()
+    return self._make_async_call(rpc, 'FlushAll', request, response,
+                                 self.__flush_all_hook, None)
+
+  def __flush_all_hook(self, rpc):
     try:
-      self._make_sync_call('memcache', 'FlushAll', request, response)
+      rpc.check_success()
     except apiproxy_errors.Error:
       return False
     return True
 
-  def get(self, key, namespace=None):
+  def get(self, key, namespace=None, for_cas=False):
     """Looks up a single key in memcache.
 
     If you have multiple items to load, though, it's much more efficient
@@ -472,28 +534,23 @@
         for details of format.
       namespace: a string specifying an optional namespace to use in
         the request.
+      for_cas: If True, request and store CAS ids on the client (see
+        cas() operation below).
 
     Returns:
       The value of the key, if found in memcache, else None.
     """
-    request = MemcacheGetRequest()
-    self._add_app_id(request)
-    request.add_key(_key_string(key))
-    _add_name_space(request, namespace)
-    response = MemcacheGetResponse()
-    try:
-      self._make_sync_call('memcache', 'Get', request, response)
-    except apiproxy_errors.Error:
-      return None
+    if _is_pair(key):
+      key = key[1]
+    rpc = self.get_multi_async([key], namespace=namespace, for_cas=for_cas)
+    results = rpc.get_result()
+    return results.get(key)
 
-    if not response.item_size():
-      return None
+  def gets(self, key, namespace=None):
+    """An alias for get(..., for_cas=True)."""
+    return self.get(key, namespace=namespace, for_cas=True)
 
-    return _decode_value(response.item(0).value(),
-                         response.item(0).flags(),
-                         self._do_unpickle)
-
-  def get_multi(self, keys, key_prefix='', namespace=None):
+  def get_multi(self, keys, key_prefix='', namespace=None, for_cas=False):
     """Looks up multiple keys from memcache in one operation.
 
     This is the recommended way to do bulk loads.
@@ -508,29 +565,48 @@
         not included in the returned dictionary.
       namespace: a string specifying an optional namespace to use in
         the request.
+      for_cas: If True, request and store CAS ids on the client.
 
     Returns:
       A dictionary of the keys and values that were present in memcache.
       Even if the key_prefix was specified, that key_prefix won't be on
       the keys in the returned dictionary.
     """
+    rpc = self.get_multi_async(keys, key_prefix, namespace, for_cas)
+    return rpc.get_result()
+
+  def get_multi_async(self, keys, key_prefix='', namespace=None,
+                      for_cas=False, rpc=None):
+    """Async version of get_multi()."""
     request = MemcacheGetRequest()
     self._add_app_id(request)
     _add_name_space(request, namespace)
+    if for_cas:
+      request.set_for_cas(True)
     response = MemcacheGetResponse()
     user_key = {}
     for key in keys:
       request.add_key(_key_string(key, key_prefix, user_key))
+
+    return self._make_async_call(rpc, 'Get', request, response,
+                                 self.__get_hook, user_key)
+
+  def __get_hook(self, rpc):
     try:
-      self._make_sync_call('memcache', 'Get', request, response)
+      rpc.check_success()
     except apiproxy_errors.Error:
       return {}
-
+    for_cas = rpc.request.for_cas()
+    response = rpc.response
+    user_key = rpc.user_data
     return_value = {}
     for returned_item in response.item_list():
       value = _decode_value(returned_item.value(), returned_item.flags(),
                             self._do_unpickle)
-      return_value[user_key[returned_item.key()]] = value
+      raw_key = returned_item.key()
+      if for_cas:
+        self._cas_ids[raw_key] = returned_item.cas_id()
+      return_value[user_key[raw_key]] = value
     return return_value
 
 
@@ -560,28 +636,14 @@
       This can be used as a boolean value, where a network failure is the
       only bad condition.
     """
-    if not isinstance(seconds, (int, long, float)):
-      raise TypeError('Delete timeout must be a number.')
-    if seconds < 0:
-      raise ValueError('Delete timeout must be non-negative.')
-
-    request = MemcacheDeleteRequest()
-    self._add_app_id(request)
-    _add_name_space(request, namespace)
-    response = MemcacheDeleteResponse()
-
-    delete_item = request.add_item()
-    delete_item.set_key(_key_string(key))
-    delete_item.set_delete_time(int(math.ceil(seconds)))
-    try:
-      self._make_sync_call('memcache', 'Delete', request, response)
-    except apiproxy_errors.Error:
+    rpc = self.delete_multi_async([key], seconds, namespace=namespace)
+    results = rpc.get_result()
+    if not results:
       return DELETE_NETWORK_FAILURE
-    assert response.delete_status_size() == 1, 'Unexpected status size.'
-
-    if response.delete_status(0) == MemcacheDeleteResponse.DELETED:
+    status = results[0]
+    if status == MemcacheDeleteResponse.DELETED:
       return DELETE_SUCCESSFUL
-    elif response.delete_status(0) == MemcacheDeleteResponse.NOT_FOUND:
+    elif status == MemcacheDeleteResponse.NOT_FOUND:
       return DELETE_ITEM_MISSING
     assert False, 'Unexpected deletion status code.'
 
@@ -605,6 +667,20 @@
       True if all operations completed successfully.  False if one
       or more failed to complete.
     """
+    rpc = self.delete_multi_async(keys, seconds, key_prefix, namespace)
+    results = rpc.get_result()
+    return bool(results)
+
+  def delete_multi_async(self, keys, seconds=0, key_prefix='',
+                         namespace=None, rpc=None):
+    """Async version of delete_multi() -- note different return value.
+
+    Returns:
+      A UserRPC instance whose get_result() method returns None if
+      there was a network error, or a list of status values otherwise,
+      where each status corresponds to a key and is either DELETED or
+      NOT_FOUND.
+    """
     if not isinstance(seconds, (int, long, float)):
       raise TypeError('Delete timeout must be a number.')
     if seconds < 0:
@@ -619,11 +695,24 @@
       delete_item = request.add_item()
       delete_item.set_key(_key_string(key, key_prefix=key_prefix))
       delete_item.set_delete_time(int(math.ceil(seconds)))
+
+    return self._make_async_call(rpc, 'Delete', request, response,
+                                 self.__delete_hook, None)
+
+  def __delete_hook(self, rpc):
     try:
-      self._make_sync_call('memcache', 'Delete', request, response)
+      rpc.check_success()
     except apiproxy_errors.Error:
-      return False
-    return True
+      return None
+    return rpc.response.delete_status_list()
+
+
+
+
+
+
+
+
 
   def set(self, key, value, time=0, min_compress_len=0, namespace=None):
     """Sets a key's value, regardless of previous contents in cache.
@@ -692,13 +781,55 @@
     return self._set_with_policy(MemcacheSetRequest.REPLACE,
                                  key, value, time=time, namespace=namespace)
 
+  def cas(self, key, value, time=0, min_compress_len=0, namespace=None):
+    """Compare-And-Set update.
+
+    This requires that the key has previously been successfully
+    fetched with gets() or get(..., for_cas=True), and that no changes
+    have been made to the key since that fetch.  Typical usage is:
+
+      key = ...
+      client = memcache.Client()
+      value = client.gets(key)  # OR client.get(key, for_cas=True)
+      <updated value>
+      ok = client.cas(key, value)
+
+    If two processes run similar code, the first one calling cas()
+    will succeed (ok == True), while the second one will fail (ok ==
+    False).  This can be used to detect race conditions.
+
+    NOTE: some state (the CAS id) is stored on the Client object for
+    each key ever used with gets().  To prevent ever-increasing memory
+    usage, you must use a Client object when using cas(), and the
+    lifetime of your Client object should be limited to that of one
+    incoming HTTP request.  You cannot use the global-function-based
+    API.
+
+    Args:
+      key: Key to set.  See docs on Client for details.
+      value: The new value.
+      time: Optional expiration time, either relative number of seconds
+        from current time (up to 1 month), or an absolute Unix epoch time.
+        By default, items never expire, though items may be evicted due to
+        memory pressure.  Float values will be rounded up to the nearest
+        whole second.
+      min_compress_len: Ignored option for compatibility.
+      namespace: a string specifying an optional namespace to use in
+        the request.
+
+    Returns:
+      True if updated.  False on RPC error or if the CAS id didn't match.
+    """
+    return self._set_with_policy(MemcacheSetRequest.CAS, key, value,
+                                 time, namespace)
+
   def _set_with_policy(self, policy, key, value, time=0, namespace=None):
     """Sets a single key with a specified policy.
 
     Helper function for set(), add(), and replace().
 
     Args:
-      policy:  One of MemcacheSetRequest.SET, .ADD, or .REPLACE.
+      policy:  One of MemcacheSetRequest.SET, .ADD, .REPLACE or .CAS.
       key: Key to add, set, or replace.  See docs on Client for details.
       value: Value to set.
       time: Expiration time, defaulting to 0 (never expiring).
@@ -710,29 +841,13 @@
       that failed due to the item not already existing, or an add
       failing due to the item not already existing.
     """
-    if not isinstance(time, (int, long, float)):
-      raise TypeError('Expiration must be a number.')
-    if time < 0:
-      raise ValueError('Expiration must not be negative.')
+    rpc = self._set_multi_async_with_policy(policy, {key: value},
+                                            time, '', namespace)
+    status_dict = rpc.get_result()
+    if not status_dict:
+      return False
+    return status_dict.get(key) == MemcacheSetResponse.STORED
 
-    request = MemcacheSetRequest()
-    self._add_app_id(request)
-    item = request.add_item()
-    item.set_key(_key_string(key))
-    stored_value, flags = _validate_encode_value(value, self._do_pickle)
-    item.set_value(stored_value)
-    item.set_flags(flags)
-    item.set_set_policy(policy)
-    item.set_expiration_time(int(math.ceil(time)))
-    _add_name_space(request, namespace)
-    response = MemcacheSetResponse()
-    try:
-      self._make_sync_call('memcache', 'Set', request, response)
-    except apiproxy_errors.Error:
-      return False
-    if response.set_status_size() != 1:
-      return False
-    return response.set_status(0) == MemcacheSetResponse.STORED
 
 
 
@@ -746,8 +861,9 @@
     reduces the network latency of doing many requests in serial.
 
     Args:
-      policy:  One of MemcacheSetRequest.SET, ADD, or REPLACE.
-      mapping: Dictionary of keys to values.
+      policy:  One of MemcacheSetRequest.SET, .ADD, .REPLACE or .CAS.
+      mapping: Dictionary of keys to values.  If policy == CAS, the
+        values must be (value, cas_id) tuples.
       time: Optional expiration time, either relative number of seconds
         from current time (up to 1 month), or an absolute Unix epoch time.
         By default, items never expire, though items may be evicted due to
@@ -763,6 +879,35 @@
       a list of all input keys is returned; in this case the keys
       may or may not have been updated.
     """
+    rpc = self._set_multi_async_with_policy(policy, mapping, time,
+                                            key_prefix, namespace)
+    status_dict = rpc.get_result()
+    server_keys, user_key = rpc.user_data
+
+    if not status_dict:
+      return user_key.values()
+
+
+    unset_list = []
+    for server_key in server_keys:
+      key = user_key[server_key]
+      set_status = status_dict[key]
+      if set_status != MemcacheSetResponse.STORED:
+        unset_list.append(key)
+
+    return unset_list
+
+
+  def _set_multi_async_with_policy(self, policy, mapping, time=0,
+                                   key_prefix='', namespace=None, rpc=None):
+    """Async version of _set_multi_with_policy() -- note different return.
+
+    Returns:
+      A UserRPC instance whose get_result() method returns None if
+      there was a network error, or a dict mapping (user) keys to
+      status values otherwise, where each status is one of STORED,
+      NOT_STORED, ERROR, or EXISTS.
+    """
     if not isinstance(time, (int, long, float)):
       raise TypeError('Expiration must be a number.')
     if time < 0.0:
@@ -773,6 +918,7 @@
     _add_name_space(request, namespace)
     user_key = {}
     server_keys = []
+    set_cas_id = (policy == MemcacheSetRequest.CAS)
     for key, value in mapping.iteritems():
       server_key = _key_string(key, key_prefix, user_key)
       stored_value, flags = _validate_encode_value(value, self._do_pickle)
@@ -784,22 +930,35 @@
       item.set_flags(flags)
       item.set_set_policy(policy)
       item.set_expiration_time(int(math.ceil(time)))
+      if set_cas_id:
+        cas_id = self._cas_ids.get(server_key)
+
+        if cas_id is not None:
+          item.set_cas_id(cas_id)
+
+
+          item.set_for_cas(True)
 
     response = MemcacheSetResponse()
+
+
+    return self._make_async_call(rpc, 'Set', request, response,
+                                 self.__set_with_policy_hook,
+                                 (server_keys, user_key))
+
+  def __set_with_policy_hook(self, rpc):
     try:
-      self._make_sync_call('memcache', 'Set', request, response)
+      rpc.check_success()
     except apiproxy_errors.Error:
-      return user_key.values()
+      return None
 
+    response = rpc.response
+    server_keys, user_key = rpc.user_data
     assert response.set_status_size() == len(server_keys)
-
-
-    unset_list = []
-    for server_key, set_status in zip(server_keys, response.set_status_list()):
-      if set_status != MemcacheSetResponse.STORED:
-        unset_list.append(user_key[server_key])
-
-    return unset_list
+    status_dict = {}
+    for server_key, status in zip(server_keys, response.set_status_list()):
+      status_dict[user_key[server_key]] = status
+    return status_dict
 
   def set_multi(self, mapping, time=0, key_prefix='', min_compress_len=0,
                 namespace=None):
@@ -825,6 +984,17 @@
                                        time=time, key_prefix=key_prefix,
                                        namespace=namespace)
 
+  def set_multi_async(self, mapping, time=0,  key_prefix='',
+                      min_compress_len=0, namespace=None, rpc=None):
+    """Async version of set_multi() -- note different return value.
+
+    Returns:
+      See _set_multi_async_with_policy().
+    """
+    return self._set_multi_async_with_policy(MemcacheSetRequest.SET, mapping,
+                                             time=time, key_prefix=key_prefix,
+                                             namespace=namespace, rpc=rpc)
+
   def add_multi(self, mapping, time=0, key_prefix='', min_compress_len=0,
                 namespace=None):
     """Set multiple keys' values iff items are not already in memcache.
@@ -849,6 +1019,17 @@
                                        time=time, key_prefix=key_prefix,
                                        namespace=namespace)
 
+  def add_multi_async(self, mapping, time=0,  key_prefix='',
+                      min_compress_len=0, namespace=None, rpc=None):
+    """Async version of add_multi() -- note different return value.
+
+    Returns:
+      See _set_multi_async_with_policy().
+    """
+    return self._set_multi_async_with_policy(MemcacheSetRequest.ADD, mapping,
+                                             time=time, key_prefix=key_prefix,
+                                             namespace=namespace, rpc=rpc)
+
   def replace_multi(self, mapping, time=0, key_prefix='', min_compress_len=0,
                     namespace=None):
     """Replace multiple keys' values, failing if the items aren't in memcache.
@@ -873,6 +1054,55 @@
                                        time=time, key_prefix=key_prefix,
                                        namespace=namespace)
 
+  def replace_multi_async(self, mapping, time=0,  key_prefix='',
+                          min_compress_len=0, namespace=None, rpc=None):
+    """Async version of replace_multi() -- note different return value.
+
+    Returns:
+      See _set_multi_async_with_policy().
+    """
+    return self._set_multi_async_with_policy(MemcacheSetRequest.REPLACE,
+                                             mapping,
+                                             time=time, key_prefix=key_prefix,
+                                             namespace=namespace, rpc=rpc)
+
+  def cas_multi(self, mapping, time=0, key_prefix='', min_compress_len=0,
+                namespace=None):
+    """Compare-And-Set update for multiple keys.
+
+    See cas() docstring for an explanation.
+
+    Args:
+      mapping: Dictionary of keys to values.
+      time: Optional expiration time, either relative number of seconds
+        from current time (up to 1 month), or an absolute Unix epoch time.
+        By default, items never expire, though items may be evicted due to
+        memory pressure.  Float values will be rounded up to the nearest
+        whole second.
+      key_prefix: Prefix for to prepend to all keys.
+      min_compress_len: Unimplemented compatibility option.
+      namespace: a string specifying an optional namespace to use in
+        the request.
+
+    Returns:
+      A list of keys whose values were NOT set because the compare
+      failed.  On total success, this list should be empty.
+    """
+    return self._set_multi_with_policy(MemcacheSetRequest.CAS, mapping,
+                                       time=time, key_prefix=key_prefix,
+                                       namespace=namespace)
+
+  def cas_multi_async(self, mapping, time=0,  key_prefix='',
+                      min_compress_len=0, namespace=None, rpc=None):
+    """Async version of cas_multi() -- note different return value.
+
+    Returns:
+      See _set_multi_async_with_policy().
+    """
+    return self._set_multi_async_with_policy(MemcacheSetRequest.CAS, mapping,
+                                             time=time, key_prefix=key_prefix,
+                                             namespace=namespace, rpc=rpc)
+
   def incr(self, key, delta=1, namespace=None, initial_value=None):
     """Atomically increments a key's value.
 
@@ -913,6 +1143,12 @@
     return self._incrdecr(key, False, delta, namespace=namespace,
                           initial_value=initial_value)
 
+  def incr_async(self, key, delta=1, namespace=None, initial_value=None,
+                 rpc=None):
+    """Async version of incr()."""
+    return self._incrdecr_async(key, False, delta, namespace=namespace,
+                                initial_value=initial_value, rpc=rpc)
+
   def decr(self, key, delta=1, namespace=None, initial_value=None):
     """Atomically decrements a key's value.
 
@@ -949,6 +1185,12 @@
     return self._incrdecr(key, True, delta, namespace=namespace,
                           initial_value=initial_value)
 
+  def decr_async(self, key, delta=1, namespace=None, initial_value=None,
+                 rpc=None):
+    """Async version of decr()."""
+    return self._incrdecr_async(key, True, delta, namespace=namespace,
+                                initial_value=initial_value, rpc=rpc)
+
   def _incrdecr(self, key, is_negative, delta, namespace=None,
                 initial_value=None):
     """Increment or decrement a key by a provided delta.
@@ -973,6 +1215,13 @@
       ValueError: If delta is negative.
       TypeError: If delta isn't an int or long.
     """
+    rpc = self._incrdecr_async(key, is_negative, delta, namespace,
+                               initial_value)
+    return rpc.get_result()
+
+  def _incrdecr_async(self, key, is_negative, delta, namespace=None,
+                initial_value=None, rpc=None):
+    """Async version of _incrdecr()."""
     if not isinstance(delta, (int, long)):
       raise TypeError('Delta must be an integer or long, received %r' % delta)
     if delta < 0:
@@ -981,16 +1230,17 @@
 
     if not isinstance(key, basestring):
       try:
-        iter(key)
-        if is_negative:
-          delta = -delta
-        return self.offset_multi(
-            dict((k, delta) for k in key),
-            namespace=namespace,
-            initial_value=initial_value)
+        it = iter(key)
       except TypeError:
 
         pass
+      else:
+        if is_negative:
+          delta = -delta
+        return self.offset_multi_async(dict((k, delta) for k in it),
+                                       namespace=namespace,
+                                       initial_value=initial_value,
+                                       rpc=rpc)
 
 
     request = MemcacheIncrementRequest()
@@ -1006,11 +1256,16 @@
     if initial_value is not None:
       request.set_initial_value(long(initial_value))
 
+    return self._make_async_call(rpc, 'Increment', request, response,
+                                 self.__incrdecr_hook, None)
+
+  def __incrdecr_hook(self, rpc):
     try:
-      self._make_sync_call('memcache', 'Increment', request, response)
+      rpc.check_success()
     except apiproxy_errors.Error:
       return None
 
+    response = rpc.response
     if response.has_new_value():
       return response.new_value()
     return None
@@ -1035,6 +1290,13 @@
       was not an integer type. The values will wrap-around at unsigned 64-bit
       integer-maximum and underflow will be floored at zero.
     """
+    rpc = self.offset_multi_async(mapping, key_prefix,
+                                  namespace, initial_value)
+    return rpc.get_result()
+
+  def offset_multi_async(self, mapping, key_prefix='',
+                         namespace=None, initial_value=None, rpc=None):
+    """Async version of offset_multi()."""
     if initial_value is not None:
       if not isinstance(initial_value, (int, long)):
         raise TypeError('initial_value must be an integer')
@@ -1064,11 +1326,18 @@
       if initial_value is not None:
         item.set_initial_value(initial_value)
 
+
+    return self._make_async_call(rpc, 'BatchIncrement', request, response,
+                                 self.__offset_hook, mapping)
+
+  def __offset_hook(self, rpc):
+    mapping = rpc.user_data
     try:
-      self._make_sync_call('memcache', 'BatchIncrement', request, response)
+      rpc.check_success()
     except apiproxy_errors.Error:
       return dict((k, None) for k in mapping.iterkeys())
 
+    response = rpc.response
     assert response.item_size() == len(mapping)
 
     result_dict = {}
@@ -1094,6 +1363,10 @@
   Use this method if you want to have customer persistent_id() or
   persistent_load() functions associated with your client.
 
+  NOTE: We don't expose the _async methods as functions; they're too
+  obscure; and we don't expose gets(), cas() and cas_multi() because
+  they maintain state on the client object.
+
   Args:
     client_obj: Instance of the memcache.Client object.
   """
diff --git a/google/appengine/api/quota.py b/google/appengine/api/quota.py
index a2b6b80..3ae4930 100755
--- a/google/appengine/api/quota.py
+++ b/google/appengine/api/quota.py
@@ -29,9 +29,9 @@
 
 
 try:
-  from google3.apphosting.runtime import _apphosting_runtime___python__apiproxy
+  from google.appengine.runtime import apiproxy
 except ImportError:
-  _apphosting_runtime___python__apiproxy = None
+  apiproxy = None
 
 def get_request_cpu_usage():
   """Get the amount of CPU used so far for the current request.
@@ -42,8 +42,8 @@
   Does nothing when used in the dev_appserver.
   """
 
-  if _apphosting_runtime___python__apiproxy:
-    return _apphosting_runtime___python__apiproxy.get_request_cpu_usage()
+  if apiproxy:
+    return apiproxy.GetRequestCpuUsage()
 
   return 0
 
@@ -56,8 +56,8 @@
   Does nothing when used in the dev_appserver.
   """
 
-  if _apphosting_runtime___python__apiproxy:
-    return _apphosting_runtime___python__apiproxy.get_request_api_cpu_usage()
+  if apiproxy:
+    return apiproxy.GetRequestApiCpuUsage()
 
   return 0
 
diff --git a/google/appengine/api/rdbms.py b/google/appengine/api/rdbms.py
index 3a9a1b6..15123d2 100755
--- a/google/appengine/api/rdbms.py
+++ b/google/appengine/api/rdbms.py
@@ -50,7 +50,20 @@
 
   if 'db' in kwargs and not database:
     database = kwargs.pop('db')
+
+  user = None
+  if 'user' in kwargs:
+    user = kwargs.pop('user')
+
+  password = None
+  if 'password' in kwargs:
+    password = kwargs.pop('password')
+
   if kwargs:
     logging.info('Ignoring extra kwargs to connect(): %r', kwargs)
 
-  return rdbms_apiproxy.connect('unused_address', instance, database=database)
+  return rdbms_apiproxy.connect('unused_address',
+                                instance,
+                                database=database,
+                                user=user,
+                                password=password)
diff --git a/google/appengine/api/runtime/runtime.py b/google/appengine/api/runtime/runtime.py
index 3cea23d..01a25d8 100644
--- a/google/appengine/api/runtime/runtime.py
+++ b/google/appengine/api/runtime/runtime.py
@@ -26,6 +26,10 @@
 
 
 
+from __future__ import with_statement
+
+
+import threading
 
 from google.appengine.api import apiproxy_stub_map
 from google.appengine.api.system import system_service_pb
@@ -66,13 +70,16 @@
   return response
 
 
+__shutdown_mutex = threading.Lock()
 __shutdown_hook = None
 __shuting_down = False
 
 
 def is_shutting_down():
   """Returns true if the server is shutting down."""
-  return __shuting_down
+  with __shutdown_mutex:
+    shutting_down = __shuting_down
+  return shutting_down
 
 
 def set_shutdown_hook(hook):
@@ -97,8 +104,9 @@
   if hook is not None and not callable(hook):
     raise TypeError("hook must be callable, got %s" % hook.__class__)
   global __shutdown_hook
-  old_hook = __shutdown_hook
-  __shutdown_hook = hook
+  with __shutdown_mutex:
+    old_hook = __shutdown_hook
+    __shutdown_hook = hook
   return old_hook
 
 
@@ -106,6 +114,8 @@
 
 
   global __shuting_down
-  __shuting_down = True
-  if __shutdown_hook:
-    __shutdown_hook()
+  with __shutdown_mutex:
+    __shuting_down = True
+    shutdown_hook = __shutdown_hook
+  if shutdown_hook:
+    shutdown_hook()
diff --git a/google/appengine/api/search/QueryLexer.py b/google/appengine/api/search/QueryLexer.py
new file mode 100644
index 0000000..8328302
--- /dev/null
+++ b/google/appengine/api/search/QueryLexer.py
@@ -0,0 +1,971 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import sys
+from antlr3 import *
+from antlr3.compat import set, frozenset
+
+
+
+HIDDEN = BaseRecognizer.HIDDEN
+
+
+DOLLAR=35
+GE=16
+LT=13
+LSQUARE=26
+TO=28
+LETTER=34
+CONJUNCTION=4
+NUMBER=8
+UNDERSCORE=36
+LCURLY=27
+INT=23
+NAME_START=32
+NOT=19
+RSQUARE=29
+TEXT=24
+VALUE=12
+AND=17
+EOF=-1
+LPAREN=20
+PHRASE=25
+RESTRICTION=9
+WORD=11
+COLON=33
+DISJUNCTION=5
+RPAREN=21
+SELECTOR=22
+WS=38
+NEGATION=6
+NONE=7
+OR=18
+RCURLY=30
+GT=15
+DIGIT=31
+MISC=37
+LE=14
+STRING=10
+
+
+class QueryLexer(Lexer):
+
+    grammarFileName = "apphosting/api/search/Query.g"
+    antlr_version = version_str_to_tuple("3.1.1")
+    antlr_version_str = "3.1.1"
+
+    def __init__(self, input=None, state=None):
+        if state is None:
+            state = RecognizerSharedState()
+        Lexer.__init__(self, input, state)
+
+        self.dfa5 = self.DFA5(
+            self, 5,
+            eot = self.DFA5_eot,
+            eof = self.DFA5_eof,
+            min = self.DFA5_min,
+            max = self.DFA5_max,
+            accept = self.DFA5_accept,
+            special = self.DFA5_special,
+            transition = self.DFA5_transition
+            )
+
+
+
+
+
+
+
+    def mOR(self, ):
+
+        try:
+            _type = OR
+            _channel = DEFAULT_CHANNEL
+
+
+
+            pass
+            self.match("OR")
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+
+
+    def mAND(self, ):
+
+        try:
+            _type = AND
+            _channel = DEFAULT_CHANNEL
+
+
+
+            pass
+            self.match("AND")
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+
+
+    def mNOT(self, ):
+
+        try:
+            _type = NOT
+            _channel = DEFAULT_CHANNEL
+
+
+
+            pass
+            self.match("NOT")
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+
+
+    def mTO(self, ):
+
+        try:
+            _type = TO
+            _channel = DEFAULT_CHANNEL
+
+
+
+            pass
+            self.match("TO")
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+
+
+    def mLPAREN(self, ):
+
+        try:
+            _type = LPAREN
+            _channel = DEFAULT_CHANNEL
+
+
+
+            pass
+            self.match(40)
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+
+
+    def mRPAREN(self, ):
+
+        try:
+            _type = RPAREN
+            _channel = DEFAULT_CHANNEL
+
+
+
+            pass
+            self.match(41)
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+
+
+    def mLSQUARE(self, ):
+
+        try:
+            _type = LSQUARE
+            _channel = DEFAULT_CHANNEL
+
+
+
+            pass
+            self.match(91)
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+
+
+    def mRSQUARE(self, ):
+
+        try:
+            _type = RSQUARE
+            _channel = DEFAULT_CHANNEL
+
+
+
+            pass
+            self.match(93)
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+
+
+    def mLCURLY(self, ):
+
+        try:
+            _type = LCURLY
+            _channel = DEFAULT_CHANNEL
+
+
+
+            pass
+            self.match(123)
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+
+
+    def mRCURLY(self, ):
+
+        try:
+            _type = RCURLY
+            _channel = DEFAULT_CHANNEL
+
+
+
+            pass
+            self.match(125)
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+
+
+    def mINT(self, ):
+
+        try:
+            _type = INT
+            _channel = DEFAULT_CHANNEL
+
+
+
+            pass
+
+            cnt1 = 0
+            while True:
+                alt1 = 2
+                LA1_0 = self.input.LA(1)
+
+                if ((48 <= LA1_0 <= 57)) :
+                    alt1 = 1
+
+
+                if alt1 == 1:
+
+                    pass
+                    self.mDIGIT()
+
+
+                else:
+                    if cnt1 >= 1:
+                        break
+
+                    eee = EarlyExitException(1, self.input)
+                    raise eee
+
+                cnt1 += 1
+
+
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+
+
+    def mSELECTOR(self, ):
+
+        try:
+            _type = SELECTOR
+            _channel = DEFAULT_CHANNEL
+
+
+
+            pass
+            self.mNAME_START()
+
+            while True:
+                alt2 = 2
+                LA2_0 = self.input.LA(1)
+
+                if (LA2_0 == 36 or (48 <= LA2_0 <= 57) or (65 <= LA2_0 <= 90) or LA2_0 == 95 or (97 <= LA2_0 <= 122) or (192 <= LA2_0 <= 214) or (216 <= LA2_0 <= 246) or (248 <= LA2_0 <= 8191) or (12352 <= LA2_0 <= 12687) or (13056 <= LA2_0 <= 13183) or (13312 <= LA2_0 <= 15661) or (19968 <= LA2_0 <= 40959) or (63744 <= LA2_0 <= 64255)) :
+                    alt2 = 1
+
+
+                if alt2 == 1:
+
+                    pass
+                    if self.input.LA(1) == 36 or (48 <= self.input.LA(1) <= 57) or (65 <= self.input.LA(1) <= 90) or self.input.LA(1) == 95 or (97 <= self.input.LA(1) <= 122) or (192 <= self.input.LA(1) <= 214) or (216 <= self.input.LA(1) <= 246) or (248 <= self.input.LA(1) <= 8191) or (12352 <= self.input.LA(1) <= 12687) or (13056 <= self.input.LA(1) <= 13183) or (13312 <= self.input.LA(1) <= 15661) or (19968 <= self.input.LA(1) <= 40959) or (63744 <= self.input.LA(1) <= 64255):
+                        self.input.consume()
+                    else:
+                        mse = MismatchedSetException(None, self.input)
+                        self.recover(mse)
+                        raise mse
+
+
+
+                else:
+                    break
+
+
+            self.mCOLON()
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+
+
+    def mTEXT(self, ):
+
+        try:
+            _type = TEXT
+            _channel = DEFAULT_CHANNEL
+
+
+
+            pass
+
+            cnt3 = 0
+            while True:
+                alt3 = 2
+                LA3_0 = self.input.LA(1)
+
+                if (LA3_0 == 33 or (35 <= LA3_0 <= 39) or (44 <= LA3_0 <= 57) or LA3_0 == 59 or LA3_0 == 61 or (63 <= LA3_0 <= 90) or LA3_0 == 92 or (94 <= LA3_0 <= 122) or LA3_0 == 126 or (192 <= LA3_0 <= 214) or (216 <= LA3_0 <= 246) or (248 <= LA3_0 <= 8191) or (12352 <= LA3_0 <= 12687) or (13056 <= LA3_0 <= 13183) or (13312 <= LA3_0 <= 15661) or (19968 <= LA3_0 <= 40959) or (63744 <= LA3_0 <= 64255)) :
+                    alt3 = 1
+
+
+                if alt3 == 1:
+
+                    pass
+                    if self.input.LA(1) == 33 or (35 <= self.input.LA(1) <= 39) or (44 <= self.input.LA(1) <= 57) or self.input.LA(1) == 59 or self.input.LA(1) == 61 or (63 <= self.input.LA(1) <= 90) or self.input.LA(1) == 92 or (94 <= self.input.LA(1) <= 122) or self.input.LA(1) == 126 or (192 <= self.input.LA(1) <= 214) or (216 <= self.input.LA(1) <= 246) or (248 <= self.input.LA(1) <= 8191) or (12352 <= self.input.LA(1) <= 12687) or (13056 <= self.input.LA(1) <= 13183) or (13312 <= self.input.LA(1) <= 15661) or (19968 <= self.input.LA(1) <= 40959) or (63744 <= self.input.LA(1) <= 64255):
+                        self.input.consume()
+                    else:
+                        mse = MismatchedSetException(None, self.input)
+                        self.recover(mse)
+                        raise mse
+
+
+
+                else:
+                    if cnt3 >= 1:
+                        break
+
+                    eee = EarlyExitException(3, self.input)
+                    raise eee
+
+                cnt3 += 1
+
+
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+
+
+    def mPHRASE(self, ):
+
+        try:
+            _type = PHRASE
+            _channel = DEFAULT_CHANNEL
+
+
+
+            pass
+            self.match(34)
+
+            while True:
+                alt4 = 2
+                LA4_0 = self.input.LA(1)
+
+                if ((0 <= LA4_0 <= 33) or (35 <= LA4_0 <= 91) or (93 <= LA4_0 <= 65535)) :
+                    alt4 = 1
+
+
+                if alt4 == 1:
+
+                    pass
+                    if (0 <= self.input.LA(1) <= 33) or (35 <= self.input.LA(1) <= 91) or (93 <= self.input.LA(1) <= 65535):
+                        self.input.consume()
+                    else:
+                        mse = MismatchedSetException(None, self.input)
+                        self.recover(mse)
+                        raise mse
+
+
+
+                else:
+                    break
+
+
+            self.match(34)
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+
+
+    def mWS(self, ):
+
+        try:
+            _type = WS
+            _channel = DEFAULT_CHANNEL
+
+
+
+            pass
+            if (9 <= self.input.LA(1) <= 10) or self.input.LA(1) == 12 or self.input.LA(1) == 32:
+                self.input.consume()
+            else:
+                mse = MismatchedSetException(None, self.input)
+                self.recover(mse)
+                raise mse
+
+
+            self.skip()
+
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+
+
+    def mLETTER(self, ):
+
+        try:
+
+
+            pass
+            if (65 <= self.input.LA(1) <= 90) or (97 <= self.input.LA(1) <= 122) or (192 <= self.input.LA(1) <= 214) or (216 <= self.input.LA(1) <= 246) or (248 <= self.input.LA(1) <= 8191) or (12352 <= self.input.LA(1) <= 12687) or (13056 <= self.input.LA(1) <= 13183) or (13312 <= self.input.LA(1) <= 15661) or (19968 <= self.input.LA(1) <= 40959) or (63744 <= self.input.LA(1) <= 64255):
+                self.input.consume()
+            else:
+                mse = MismatchedSetException(None, self.input)
+                self.recover(mse)
+                raise mse
+
+
+
+
+
+        finally:
+
+            pass
+
+
+
+
+
+
+    def mDIGIT(self, ):
+
+        try:
+
+
+            pass
+            self.matchRange(48, 57)
+
+
+
+
+        finally:
+
+            pass
+
+
+
+
+
+
+    def mMISC(self, ):
+
+        try:
+
+
+            pass
+            if self.input.LA(1) == 33 or self.input.LA(1) == 35 or (37 <= self.input.LA(1) <= 39) or (44 <= self.input.LA(1) <= 47) or self.input.LA(1) == 59 or self.input.LA(1) == 61 or (63 <= self.input.LA(1) <= 64) or self.input.LA(1) == 92 or self.input.LA(1) == 94 or self.input.LA(1) == 96 or self.input.LA(1) == 126:
+                self.input.consume()
+            else:
+                mse = MismatchedSetException(None, self.input)
+                self.recover(mse)
+                raise mse
+
+
+
+
+
+        finally:
+
+            pass
+
+
+
+
+
+
+    def mUNDERSCORE(self, ):
+
+        try:
+
+
+            pass
+            self.match(95)
+
+
+
+
+        finally:
+
+            pass
+
+
+
+
+
+
+    def mNAME_START(self, ):
+
+        try:
+
+
+            pass
+            if self.input.LA(1) == 36 or (65 <= self.input.LA(1) <= 90) or self.input.LA(1) == 95 or (97 <= self.input.LA(1) <= 122) or (192 <= self.input.LA(1) <= 214) or (216 <= self.input.LA(1) <= 246) or (248 <= self.input.LA(1) <= 8191) or (12352 <= self.input.LA(1) <= 12687) or (13056 <= self.input.LA(1) <= 13183) or (13312 <= self.input.LA(1) <= 15661) or (19968 <= self.input.LA(1) <= 40959) or (63744 <= self.input.LA(1) <= 64255):
+                self.input.consume()
+            else:
+                mse = MismatchedSetException(None, self.input)
+                self.recover(mse)
+                raise mse
+
+
+
+
+
+        finally:
+
+            pass
+
+
+
+
+
+
+    def mDOLLAR(self, ):
+
+        try:
+
+
+            pass
+            self.match(36)
+
+
+
+
+        finally:
+
+            pass
+
+
+
+
+
+
+    def mCOLON(self, ):
+
+        try:
+
+
+            pass
+            self.match(58)
+
+
+
+
+        finally:
+
+            pass
+
+
+
+
+
+    def mTokens(self):
+
+        alt5 = 15
+        alt5 = self.dfa5.predict(self.input)
+        if alt5 == 1:
+
+            pass
+            self.mOR()
+
+
+        elif alt5 == 2:
+
+            pass
+            self.mAND()
+
+
+        elif alt5 == 3:
+
+            pass
+            self.mNOT()
+
+
+        elif alt5 == 4:
+
+            pass
+            self.mTO()
+
+
+        elif alt5 == 5:
+
+            pass
+            self.mLPAREN()
+
+
+        elif alt5 == 6:
+
+            pass
+            self.mRPAREN()
+
+
+        elif alt5 == 7:
+
+            pass
+            self.mLSQUARE()
+
+
+        elif alt5 == 8:
+
+            pass
+            self.mRSQUARE()
+
+
+        elif alt5 == 9:
+
+            pass
+            self.mLCURLY()
+
+
+        elif alt5 == 10:
+
+            pass
+            self.mRCURLY()
+
+
+        elif alt5 == 11:
+
+            pass
+            self.mINT()
+
+
+        elif alt5 == 12:
+
+            pass
+            self.mSELECTOR()
+
+
+        elif alt5 == 13:
+
+            pass
+            self.mTEXT()
+
+
+        elif alt5 == 14:
+
+            pass
+            self.mPHRASE()
+
+
+        elif alt5 == 15:
+
+            pass
+            self.mWS()
+
+
+
+
+
+
+
+
+
+    DFA5_eot = DFA.unpack(
+        u"\1\uffff\4\15\6\uffff\1\26\1\15\3\uffff\1\27\1\15\1\uffff\2\15"
+        u"\1\32\2\uffff\1\33\1\34\3\uffff"
+        )
+
+    DFA5_eof = DFA.unpack(
+        u"\35\uffff"
+        )
+
+    DFA5_min = DFA.unpack(
+        u"\1\11\4\44\6\uffff\1\41\1\44\3\uffff\1\41\1\44\1\uffff\2\44\1\41"
+        u"\2\uffff\2\41\3\uffff"
+        )
+
+    DFA5_max = DFA.unpack(
+        u"\5\ufaff\6\uffff\2\ufaff\3\uffff\2\ufaff\1\uffff\3\ufaff\2\uffff"
+        u"\2\ufaff\3\uffff"
+        )
+
+    DFA5_accept = DFA.unpack(
+        u"\5\uffff\1\5\1\6\1\7\1\10\1\11\1\12\2\uffff\1\15\1\16\1\17\2\uffff"
+        u"\1\14\3\uffff\1\13\1\1\2\uffff\1\4\1\2\1\3"
+        )
+
+    DFA5_special = DFA.unpack(
+        u"\35\uffff"
+        )
+
+
+    DFA5_transition = [
+        DFA.unpack(u"\2\17\1\uffff\1\17\23\uffff\1\17\1\15\1\16\1\15\1\14"
+        u"\3\15\1\5\1\6\2\uffff\4\15\12\13\1\uffff\1\15\1\uffff\1\15\1\uffff"
+        u"\2\15\1\2\14\14\1\3\1\1\4\14\1\4\6\14\1\7\1\15\1\10\1\15\1\14\1"
+        u"\15\32\14\1\11\1\uffff\1\12\1\15\101\uffff\27\14\1\uffff\37\14"
+        u"\1\uffff\u1f08\14\u1040\uffff\u0150\14\u0170\uffff\u0080\14\u0080"
+        u"\uffff\u092e\14\u10d2\uffff\u5200\14\u5900\uffff\u0200\14"),
+        DFA.unpack(u"\1\21\13\uffff\12\21\1\22\6\uffff\21\21\1\20\10\21"
+        u"\4\uffff\1\21\1\uffff\32\21\105\uffff\27\21\1\uffff\37\21\1\uffff"
+        u"\u1f08\21\u1040\uffff\u0150\21\u0170\uffff\u0080\21\u0080\uffff"
+        u"\u092e\21\u10d2\uffff\u5200\21\u5900\uffff\u0200\21"),
+        DFA.unpack(u"\1\21\13\uffff\12\21\1\22\6\uffff\15\21\1\23\14\21"
+        u"\4\uffff\1\21\1\uffff\32\21\105\uffff\27\21\1\uffff\37\21\1\uffff"
+        u"\u1f08\21\u1040\uffff\u0150\21\u0170\uffff\u0080\21\u0080\uffff"
+        u"\u092e\21\u10d2\uffff\u5200\21\u5900\uffff\u0200\21"),
+        DFA.unpack(u"\1\21\13\uffff\12\21\1\22\6\uffff\16\21\1\24\13\21"
+        u"\4\uffff\1\21\1\uffff\32\21\105\uffff\27\21\1\uffff\37\21\1\uffff"
+        u"\u1f08\21\u1040\uffff\u0150\21\u0170\uffff\u0080\21\u0080\uffff"
+        u"\u092e\21\u10d2\uffff\u5200\21\u5900\uffff\u0200\21"),
+        DFA.unpack(u"\1\21\13\uffff\12\21\1\22\6\uffff\16\21\1\25\13\21"
+        u"\4\uffff\1\21\1\uffff\32\21\105\uffff\27\21\1\uffff\37\21\1\uffff"
+        u"\u1f08\21\u1040\uffff\u0150\21\u0170\uffff\u0080\21\u0080\uffff"
+        u"\u092e\21\u10d2\uffff\u5200\21\u5900\uffff\u0200\21"),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u"\1\15\1\uffff\5\15\4\uffff\4\15\12\13\1\uffff\1\15"
+        u"\1\uffff\1\15\1\uffff\34\15\1\uffff\1\15\1\uffff\35\15\3\uffff"
+        u"\1\15\101\uffff\27\15\1\uffff\37\15\1\uffff\u1f08\15\u1040\uffff"
+        u"\u0150\15\u0170\uffff\u0080\15\u0080\uffff\u092e\15\u10d2\uffff"
+        u"\u5200\15\u5900\uffff\u0200\15"),
+        DFA.unpack(u"\1\21\13\uffff\12\21\1\22\6\uffff\32\21\4\uffff\1\21"
+        u"\1\uffff\32\21\105\uffff\27\21\1\uffff\37\21\1\uffff\u1f08\21\u1040"
+        u"\uffff\u0150\21\u0170\uffff\u0080\21\u0080\uffff\u092e\21\u10d2"
+        u"\uffff\u5200\21\u5900\uffff\u0200\21"),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u"\1\15\1\uffff\1\15\1\21\3\15\4\uffff\4\15\12\21\1\22"
+        u"\1\15\1\uffff\1\15\1\uffff\2\15\32\21\1\uffff\1\15\1\uffff\1\15"
+        u"\1\21\1\15\32\21\3\uffff\1\15\101\uffff\27\21\1\uffff\37\21\1\uffff"
+        u"\u1f08\21\u1040\uffff\u0150\21\u0170\uffff\u0080\21\u0080\uffff"
+        u"\u092e\21\u10d2\uffff\u5200\21\u5900\uffff\u0200\21"),
+        DFA.unpack(u"\1\21\13\uffff\12\21\1\22\6\uffff\32\21\4\uffff\1\21"
+        u"\1\uffff\32\21\105\uffff\27\21\1\uffff\37\21\1\uffff\u1f08\21\u1040"
+        u"\uffff\u0150\21\u0170\uffff\u0080\21\u0080\uffff\u092e\21\u10d2"
+        u"\uffff\u5200\21\u5900\uffff\u0200\21"),
+        DFA.unpack(u""),
+        DFA.unpack(u"\1\21\13\uffff\12\21\1\22\6\uffff\3\21\1\30\26\21\4"
+        u"\uffff\1\21\1\uffff\32\21\105\uffff\27\21\1\uffff\37\21\1\uffff"
+        u"\u1f08\21\u1040\uffff\u0150\21\u0170\uffff\u0080\21\u0080\uffff"
+        u"\u092e\21\u10d2\uffff\u5200\21\u5900\uffff\u0200\21"),
+        DFA.unpack(u"\1\21\13\uffff\12\21\1\22\6\uffff\23\21\1\31\6\21\4"
+        u"\uffff\1\21\1\uffff\32\21\105\uffff\27\21\1\uffff\37\21\1\uffff"
+        u"\u1f08\21\u1040\uffff\u0150\21\u0170\uffff\u0080\21\u0080\uffff"
+        u"\u092e\21\u10d2\uffff\u5200\21\u5900\uffff\u0200\21"),
+        DFA.unpack(u"\1\15\1\uffff\1\15\1\21\3\15\4\uffff\4\15\12\21\1\22"
+        u"\1\15\1\uffff\1\15\1\uffff\2\15\32\21\1\uffff\1\15\1\uffff\1\15"
+        u"\1\21\1\15\32\21\3\uffff\1\15\101\uffff\27\21\1\uffff\37\21\1\uffff"
+        u"\u1f08\21\u1040\uffff\u0150\21\u0170\uffff\u0080\21\u0080\uffff"
+        u"\u092e\21\u10d2\uffff\u5200\21\u5900\uffff\u0200\21"),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u"\1\15\1\uffff\1\15\1\21\3\15\4\uffff\4\15\12\21\1\22"
+        u"\1\15\1\uffff\1\15\1\uffff\2\15\32\21\1\uffff\1\15\1\uffff\1\15"
+        u"\1\21\1\15\32\21\3\uffff\1\15\101\uffff\27\21\1\uffff\37\21\1\uffff"
+        u"\u1f08\21\u1040\uffff\u0150\21\u0170\uffff\u0080\21\u0080\uffff"
+        u"\u092e\21\u10d2\uffff\u5200\21\u5900\uffff\u0200\21"),
+        DFA.unpack(u"\1\15\1\uffff\1\15\1\21\3\15\4\uffff\4\15\12\21\1\22"
+        u"\1\15\1\uffff\1\15\1\uffff\2\15\32\21\1\uffff\1\15\1\uffff\1\15"
+        u"\1\21\1\15\32\21\3\uffff\1\15\101\uffff\27\21\1\uffff\37\21\1\uffff"
+        u"\u1f08\21\u1040\uffff\u0150\21\u0170\uffff\u0080\21\u0080\uffff"
+        u"\u092e\21\u10d2\uffff\u5200\21\u5900\uffff\u0200\21"),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u"")
+    ]
+
+
+
+    DFA5 = DFA
+
+
+
+
+def main(argv, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr):
+    from antlr3.main import LexerMain
+    main = LexerMain(QueryLexer)
+    main.stdin = stdin
+    main.stdout = stdout
+    main.stderr = stderr
+    main.execute(argv)
+
+
+if __name__ == '__main__':
+    main(sys.argv)
diff --git a/google/appengine/api/search/QueryParser.py b/google/appengine/api/search/QueryParser.py
new file mode 100644
index 0000000..a0de716
--- /dev/null
+++ b/google/appengine/api/search/QueryParser.py
@@ -0,0 +1,2001 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import sys
+from antlr3 import *
+from antlr3.compat import set, frozenset
+
+from antlr3.tree import *
+
+
+
+
+
+
+
+
+
+
+HIDDEN = BaseRecognizer.HIDDEN
+
+
+DOLLAR=35
+GE=16
+LT=13
+LSQUARE=26
+TO=28
+LETTER=34
+CONJUNCTION=4
+NUMBER=8
+UNDERSCORE=36
+LCURLY=27
+INT=23
+NAME_START=32
+NOT=19
+RSQUARE=29
+TEXT=24
+VALUE=12
+AND=17
+EOF=-1
+PHRASE=25
+LPAREN=20
+RESTRICTION=9
+COLON=33
+WORD=11
+DISJUNCTION=5
+RPAREN=21
+WS=38
+SELECTOR=22
+NEGATION=6
+NONE=7
+RCURLY=30
+OR=18
+GT=15
+DIGIT=31
+MISC=37
+LE=14
+STRING=10
+
+
+tokenNames = [
+    "<invalid>", "<EOR>", "<DOWN>", "<UP>",
+    "CONJUNCTION", "DISJUNCTION", "NEGATION", "NONE", "NUMBER", "RESTRICTION",
+    "STRING", "WORD", "VALUE", "LT", "LE", "GT", "GE", "AND", "OR", "NOT",
+    "LPAREN", "RPAREN", "SELECTOR", "INT", "TEXT", "PHRASE", "LSQUARE",
+    "LCURLY", "TO", "RSQUARE", "RCURLY", "DIGIT", "NAME_START", "COLON",
+    "LETTER", "DOLLAR", "UNDERSCORE", "MISC", "WS"
+]
+
+
+
+
+class QueryParser(Parser):
+    grammarFileName = "apphosting/api/search/Query.g"
+    antlr_version = version_str_to_tuple("3.1.1")
+    antlr_version_str = "3.1.1"
+    tokenNames = tokenNames
+
+    def __init__(self, input, state=None):
+        if state is None:
+            state = RecognizerSharedState()
+
+        Parser.__init__(self, input, state)
+
+
+        self.dfa12 = self.DFA12(
+            self, 12,
+            eot = self.DFA12_eot,
+            eof = self.DFA12_eof,
+            min = self.DFA12_min,
+            max = self.DFA12_max,
+            accept = self.DFA12_accept,
+            special = self.DFA12_special,
+            transition = self.DFA12_transition
+            )
+
+
+
+
+
+
+
+        self._adaptor = CommonTreeAdaptor()
+
+
+
+    def getTreeAdaptor(self):
+        return self._adaptor
+
+    def setTreeAdaptor(self, adaptor):
+        self._adaptor = adaptor
+
+    adaptor = property(getTreeAdaptor, setTreeAdaptor)
+
+
+
+    def trimLast(self, selector):
+      return selector[:len(selector)-1]
+
+    def normalizeSpace(self, phrase):
+
+      return phrase
+
+
+    class query_return(ParserRuleReturnScope):
+        def __init__(self):
+            ParserRuleReturnScope.__init__(self)
+
+            self.tree = None
+
+
+
+
+
+
+    def query(self, ):
+
+        retval = self.query_return()
+        retval.start = self.input.LT(1)
+
+        root_0 = None
+
+        EOF2 = None
+        expression1 = None
+
+
+        EOF2_tree = None
+
+        try:
+            try:
+
+
+                pass
+                root_0 = self._adaptor.nil()
+
+                self._state.following.append(self.FOLLOW_expression_in_query131)
+                expression1 = self.expression()
+
+                self._state.following.pop()
+                self._adaptor.addChild(root_0, expression1.tree)
+                EOF2=self.match(self.input, EOF, self.FOLLOW_EOF_in_query133)
+
+                EOF2_tree = self._adaptor.createWithPayload(EOF2)
+                self._adaptor.addChild(root_0, EOF2_tree)
+
+
+
+
+                retval.stop = self.input.LT(-1)
+
+
+                retval.tree = self._adaptor.rulePostProcessing(root_0)
+                self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
+
+
+            except RecognitionException, re:
+                self.reportError(re)
+                self.recover(self.input, re)
+                retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
+        finally:
+
+            pass
+
+        return retval
+
+
+
+    class expression_return(ParserRuleReturnScope):
+        def __init__(self):
+            ParserRuleReturnScope.__init__(self)
+
+            self.tree = None
+
+
+
+
+
+
+    def expression(self, ):
+
+        retval = self.expression_return()
+        retval.start = self.input.LT(1)
+
+        root_0 = None
+
+        AND4 = None
+        factor3 = None
+
+        factor5 = None
+
+
+        AND4_tree = None
+        stream_AND = RewriteRuleTokenStream(self._adaptor, "token AND")
+        stream_factor = RewriteRuleSubtreeStream(self._adaptor, "rule factor")
+        try:
+            try:
+
+
+                pass
+                self._state.following.append(self.FOLLOW_factor_in_expression151)
+                factor3 = self.factor()
+
+                self._state.following.pop()
+                stream_factor.add(factor3.tree)
+
+                while True:
+                    alt2 = 2
+                    LA2_0 = self.input.LA(1)
+
+                    if (LA2_0 == AND or (NOT <= LA2_0 <= LPAREN) or (SELECTOR <= LA2_0 <= PHRASE)) :
+                        alt2 = 1
+
+
+                    if alt2 == 1:
+
+                        pass
+
+                        alt1 = 2
+                        LA1_0 = self.input.LA(1)
+
+                        if (LA1_0 == AND) :
+                            alt1 = 1
+                        if alt1 == 1:
+
+                            pass
+                            AND4=self.match(self.input, AND, self.FOLLOW_AND_in_expression154)
+                            stream_AND.add(AND4)
+
+
+
+                        self._state.following.append(self.FOLLOW_factor_in_expression157)
+                        factor5 = self.factor()
+
+                        self._state.following.pop()
+                        stream_factor.add(factor5.tree)
+
+
+                    else:
+                        break
+
+
+
+
+
+
+
+
+
+
+                retval.tree = root_0
+
+                if retval is not None:
+                    stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
+                else:
+                    stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
+
+
+                root_0 = self._adaptor.nil()
+
+
+                root_1 = self._adaptor.nil()
+                root_1 = self._adaptor.becomeRoot(self._adaptor.createFromType(CONJUNCTION, "CONJUNCTION"), root_1)
+
+
+                if not (stream_factor.hasNext()):
+                    raise RewriteEarlyExitException()
+
+                while stream_factor.hasNext():
+                    self._adaptor.addChild(root_1, stream_factor.nextTree())
+
+
+                stream_factor.reset()
+
+                self._adaptor.addChild(root_0, root_1)
+
+
+
+                retval.tree = root_0
+
+
+
+                retval.stop = self.input.LT(-1)
+
+
+                retval.tree = self._adaptor.rulePostProcessing(root_0)
+                self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
+
+
+            except RecognitionException, re:
+                self.reportError(re)
+                self.recover(self.input, re)
+                retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
+        finally:
+
+            pass
+
+        return retval
+
+
+
+    class factor_return(ParserRuleReturnScope):
+        def __init__(self):
+            ParserRuleReturnScope.__init__(self)
+
+            self.tree = None
+
+
+
+
+
+
+    def factor(self, ):
+
+        retval = self.factor_return()
+        retval.start = self.input.LT(1)
+
+        root_0 = None
+
+        OR7 = None
+        term6 = None
+
+        term8 = None
+
+
+        OR7_tree = None
+        stream_OR = RewriteRuleTokenStream(self._adaptor, "token OR")
+        stream_term = RewriteRuleSubtreeStream(self._adaptor, "rule term")
+        try:
+            try:
+
+
+                pass
+                self._state.following.append(self.FOLLOW_term_in_factor185)
+                term6 = self.term()
+
+                self._state.following.pop()
+                stream_term.add(term6.tree)
+
+                while True:
+                    alt3 = 2
+                    LA3_0 = self.input.LA(1)
+
+                    if (LA3_0 == OR) :
+                        alt3 = 1
+
+
+                    if alt3 == 1:
+
+                        pass
+                        OR7=self.match(self.input, OR, self.FOLLOW_OR_in_factor188)
+                        stream_OR.add(OR7)
+                        self._state.following.append(self.FOLLOW_term_in_factor190)
+                        term8 = self.term()
+
+                        self._state.following.pop()
+                        stream_term.add(term8.tree)
+
+
+                    else:
+                        break
+
+
+
+
+
+
+
+
+
+
+                retval.tree = root_0
+
+                if retval is not None:
+                    stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
+                else:
+                    stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
+
+
+                root_0 = self._adaptor.nil()
+
+
+                root_1 = self._adaptor.nil()
+                root_1 = self._adaptor.becomeRoot(self._adaptor.createFromType(DISJUNCTION, "DISJUNCTION"), root_1)
+
+
+                if not (stream_term.hasNext()):
+                    raise RewriteEarlyExitException()
+
+                while stream_term.hasNext():
+                    self._adaptor.addChild(root_1, stream_term.nextTree())
+
+
+                stream_term.reset()
+
+                self._adaptor.addChild(root_0, root_1)
+
+
+
+                retval.tree = root_0
+
+
+
+                retval.stop = self.input.LT(-1)
+
+
+                retval.tree = self._adaptor.rulePostProcessing(root_0)
+                self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
+
+
+            except RecognitionException, re:
+                self.reportError(re)
+                self.recover(self.input, re)
+                retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
+        finally:
+
+            pass
+
+        return retval
+
+
+
+    class term_return(ParserRuleReturnScope):
+        def __init__(self):
+            ParserRuleReturnScope.__init__(self)
+
+            self.tree = None
+
+
+
+
+
+
+    def term(self, ):
+
+        retval = self.term_return()
+        retval.start = self.input.LT(1)
+
+        root_0 = None
+
+        NOT9 = None
+        primitive10 = None
+
+        primitive11 = None
+
+
+        NOT9_tree = None
+        stream_NOT = RewriteRuleTokenStream(self._adaptor, "token NOT")
+        stream_primitive = RewriteRuleSubtreeStream(self._adaptor, "rule primitive")
+        try:
+            try:
+
+                alt4 = 2
+                LA4_0 = self.input.LA(1)
+
+                if (LA4_0 == NOT) :
+                    alt4 = 1
+                elif (LA4_0 == LPAREN or (SELECTOR <= LA4_0 <= PHRASE)) :
+                    alt4 = 2
+                else:
+                    nvae = NoViableAltException("", 4, 0, self.input)
+
+                    raise nvae
+
+                if alt4 == 1:
+
+                    pass
+                    NOT9=self.match(self.input, NOT, self.FOLLOW_NOT_in_term219)
+                    stream_NOT.add(NOT9)
+                    self._state.following.append(self.FOLLOW_primitive_in_term221)
+                    primitive10 = self.primitive()
+
+                    self._state.following.pop()
+                    stream_primitive.add(primitive10.tree)
+
+
+
+
+
+
+
+
+                    retval.tree = root_0
+
+                    if retval is not None:
+                        stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
+                    else:
+                        stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
+
+
+                    root_0 = self._adaptor.nil()
+
+
+                    root_1 = self._adaptor.nil()
+                    root_1 = self._adaptor.becomeRoot(self._adaptor.createFromType(NEGATION, "NEGATION"), root_1)
+
+                    self._adaptor.addChild(root_1, stream_primitive.nextTree())
+
+                    self._adaptor.addChild(root_0, root_1)
+
+
+
+                    retval.tree = root_0
+
+
+                elif alt4 == 2:
+
+                    pass
+                    self._state.following.append(self.FOLLOW_primitive_in_term235)
+                    primitive11 = self.primitive()
+
+                    self._state.following.pop()
+                    stream_primitive.add(primitive11.tree)
+
+
+
+
+
+
+
+
+                    retval.tree = root_0
+
+                    if retval is not None:
+                        stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
+                    else:
+                        stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
+
+
+                    root_0 = self._adaptor.nil()
+
+                    self._adaptor.addChild(root_0, stream_primitive.nextTree())
+
+
+
+                    retval.tree = root_0
+
+
+                retval.stop = self.input.LT(-1)
+
+
+                retval.tree = self._adaptor.rulePostProcessing(root_0)
+                self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
+
+
+            except RecognitionException, re:
+                self.reportError(re)
+                self.recover(self.input, re)
+                retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
+        finally:
+
+            pass
+
+        return retval
+
+
+
+    class primitive_return(ParserRuleReturnScope):
+        def __init__(self):
+            ParserRuleReturnScope.__init__(self)
+
+            self.tree = None
+
+
+
+
+
+
+    def primitive(self, ):
+
+        retval = self.primitive_return()
+        retval.start = self.input.LT(1)
+
+        root_0 = None
+
+        LPAREN14 = None
+        RPAREN16 = None
+        field = None
+
+        value12 = None
+
+        atom13 = None
+
+        expression15 = None
+
+
+        LPAREN14_tree = None
+        RPAREN16_tree = None
+        stream_RPAREN = RewriteRuleTokenStream(self._adaptor, "token RPAREN")
+        stream_LPAREN = RewriteRuleTokenStream(self._adaptor, "token LPAREN")
+        stream_expression = RewriteRuleSubtreeStream(self._adaptor, "rule expression")
+        stream_selector = RewriteRuleSubtreeStream(self._adaptor, "rule selector")
+        stream_atom = RewriteRuleSubtreeStream(self._adaptor, "rule atom")
+        stream_value = RewriteRuleSubtreeStream(self._adaptor, "rule value")
+        try:
+            try:
+
+                alt5 = 3
+                LA5 = self.input.LA(1)
+                if LA5 == SELECTOR:
+                    alt5 = 1
+                elif LA5 == INT or LA5 == TEXT or LA5 == PHRASE:
+                    alt5 = 2
+                elif LA5 == LPAREN:
+                    alt5 = 3
+                else:
+                    nvae = NoViableAltException("", 5, 0, self.input)
+
+                    raise nvae
+
+                if alt5 == 1:
+
+                    pass
+                    self._state.following.append(self.FOLLOW_selector_in_primitive260)
+                    field = self.selector()
+
+                    self._state.following.pop()
+                    stream_selector.add(field.tree)
+                    self._state.following.append(self.FOLLOW_value_in_primitive262)
+                    value12 = self.value()
+
+                    self._state.following.pop()
+                    stream_value.add(value12.tree)
+
+
+
+
+
+
+
+
+                    retval.tree = root_0
+
+                    if field is not None:
+                        stream_field = RewriteRuleSubtreeStream(self._adaptor, "token field", field.tree)
+                    else:
+                        stream_field = RewriteRuleSubtreeStream(self._adaptor, "token field", None)
+
+
+                    if retval is not None:
+                        stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
+                    else:
+                        stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
+
+
+                    root_0 = self._adaptor.nil()
+
+
+                    root_1 = self._adaptor.nil()
+                    root_1 = self._adaptor.becomeRoot(self._adaptor.createFromType(RESTRICTION, "RESTRICTION"), root_1)
+
+                    self._adaptor.addChild(root_1, stream_field.nextTree())
+                    self._adaptor.addChild(root_1, stream_value.nextTree())
+
+                    self._adaptor.addChild(root_0, root_1)
+
+
+
+                    retval.tree = root_0
+
+
+                elif alt5 == 2:
+
+                    pass
+                    self._state.following.append(self.FOLLOW_atom_in_primitive279)
+                    atom13 = self.atom()
+
+                    self._state.following.pop()
+                    stream_atom.add(atom13.tree)
+
+
+
+
+
+
+
+
+                    retval.tree = root_0
+
+                    if retval is not None:
+                        stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
+                    else:
+                        stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
+
+
+                    root_0 = self._adaptor.nil()
+
+
+                    root_1 = self._adaptor.nil()
+                    root_1 = self._adaptor.becomeRoot(self._adaptor.createFromType(RESTRICTION, "RESTRICTION"), root_1)
+
+                    self._adaptor.addChild(root_1, self._adaptor.createFromType(NONE, "NONE"))
+                    self._adaptor.addChild(root_1, stream_atom.nextTree())
+
+                    self._adaptor.addChild(root_0, root_1)
+
+
+
+                    retval.tree = root_0
+
+
+                elif alt5 == 3:
+
+                    pass
+                    LPAREN14=self.match(self.input, LPAREN, self.FOLLOW_LPAREN_in_primitive295)
+                    stream_LPAREN.add(LPAREN14)
+                    self._state.following.append(self.FOLLOW_expression_in_primitive297)
+                    expression15 = self.expression()
+
+                    self._state.following.pop()
+                    stream_expression.add(expression15.tree)
+                    RPAREN16=self.match(self.input, RPAREN, self.FOLLOW_RPAREN_in_primitive299)
+                    stream_RPAREN.add(RPAREN16)
+
+
+
+
+
+
+
+
+                    retval.tree = root_0
+
+                    if retval is not None:
+                        stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
+                    else:
+                        stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
+
+
+                    root_0 = self._adaptor.nil()
+
+                    self._adaptor.addChild(root_0, stream_expression.nextTree())
+
+
+
+                    retval.tree = root_0
+
+
+                retval.stop = self.input.LT(-1)
+
+
+                retval.tree = self._adaptor.rulePostProcessing(root_0)
+                self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
+
+
+            except RecognitionException, re:
+                self.reportError(re)
+                self.recover(self.input, re)
+                retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
+        finally:
+
+            pass
+
+        return retval
+
+
+
+    class value_return(ParserRuleReturnScope):
+        def __init__(self):
+            ParserRuleReturnScope.__init__(self)
+
+            self.tree = None
+
+
+
+
+
+
+    def value(self, ):
+
+        retval = self.value_return()
+        retval.start = self.input.LT(1)
+
+        root_0 = None
+
+        atom17 = None
+
+        range18 = None
+
+
+        stream_atom = RewriteRuleSubtreeStream(self._adaptor, "rule atom")
+        stream_range = RewriteRuleSubtreeStream(self._adaptor, "rule range")
+        try:
+            try:
+
+                alt6 = 2
+                LA6_0 = self.input.LA(1)
+
+                if ((INT <= LA6_0 <= PHRASE)) :
+                    alt6 = 1
+                elif ((LSQUARE <= LA6_0 <= LCURLY)) :
+                    alt6 = 2
+                else:
+                    nvae = NoViableAltException("", 6, 0, self.input)
+
+                    raise nvae
+
+                if alt6 == 1:
+
+                    pass
+                    self._state.following.append(self.FOLLOW_atom_in_value318)
+                    atom17 = self.atom()
+
+                    self._state.following.pop()
+                    stream_atom.add(atom17.tree)
+
+
+
+
+
+
+
+
+                    retval.tree = root_0
+
+                    if retval is not None:
+                        stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
+                    else:
+                        stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
+
+
+                    root_0 = self._adaptor.nil()
+
+                    self._adaptor.addChild(root_0, stream_atom.nextTree())
+
+
+
+                    retval.tree = root_0
+
+
+                elif alt6 == 2:
+
+                    pass
+                    self._state.following.append(self.FOLLOW_range_in_value328)
+                    range18 = self.range()
+
+                    self._state.following.pop()
+                    stream_range.add(range18.tree)
+
+
+
+
+
+
+
+
+                    retval.tree = root_0
+
+                    if retval is not None:
+                        stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
+                    else:
+                        stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
+
+
+                    root_0 = self._adaptor.nil()
+
+                    self._adaptor.addChild(root_0, stream_range.nextTree())
+
+
+
+                    retval.tree = root_0
+
+
+                retval.stop = self.input.LT(-1)
+
+
+                retval.tree = self._adaptor.rulePostProcessing(root_0)
+                self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
+
+
+            except RecognitionException, re:
+                self.reportError(re)
+                self.recover(self.input, re)
+                retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
+        finally:
+
+            pass
+
+        return retval
+
+
+
+    class selector_return(ParserRuleReturnScope):
+        def __init__(self):
+            ParserRuleReturnScope.__init__(self)
+
+            self.tree = None
+
+
+
+
+
+
+    def selector(self, ):
+
+        retval = self.selector_return()
+        retval.start = self.input.LT(1)
+
+        root_0 = None
+
+        s = None
+
+        s_tree = None
+
+        try:
+            try:
+
+
+                pass
+                root_0 = self._adaptor.nil()
+
+                s=self.match(self.input, SELECTOR, self.FOLLOW_SELECTOR_in_selector348)
+
+                s_tree = self._adaptor.createWithPayload(s)
+                self._adaptor.addChild(root_0, s_tree)
+
+
+                s.setText(self.trimLast(s.text))
+
+
+
+
+                retval.stop = self.input.LT(-1)
+
+
+                retval.tree = self._adaptor.rulePostProcessing(root_0)
+                self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
+
+
+            except RecognitionException, re:
+                self.reportError(re)
+                self.recover(self.input, re)
+                retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
+        finally:
+
+            pass
+
+        return retval
+
+
+
+    class atom_return(ParserRuleReturnScope):
+        def __init__(self):
+            ParserRuleReturnScope.__init__(self)
+
+            self.tree = None
+
+
+
+
+
+
+    def atom(self, ):
+
+        retval = self.atom_return()
+        retval.start = self.input.LT(1)
+
+        root_0 = None
+
+        v = None
+        t = None
+        p = None
+
+        v_tree = None
+        t_tree = None
+        p_tree = None
+        stream_INT = RewriteRuleTokenStream(self._adaptor, "token INT")
+        stream_TEXT = RewriteRuleTokenStream(self._adaptor, "token TEXT")
+        stream_PHRASE = RewriteRuleTokenStream(self._adaptor, "token PHRASE")
+
+        try:
+            try:
+
+                alt7 = 3
+                LA7 = self.input.LA(1)
+                if LA7 == INT:
+                    alt7 = 1
+                elif LA7 == TEXT:
+                    alt7 = 2
+                elif LA7 == PHRASE:
+                    alt7 = 3
+                else:
+                    nvae = NoViableAltException("", 7, 0, self.input)
+
+                    raise nvae
+
+                if alt7 == 1:
+
+                    pass
+                    v=self.match(self.input, INT, self.FOLLOW_INT_in_atom370)
+                    stream_INT.add(v)
+
+
+
+
+
+
+
+
+                    retval.tree = root_0
+                    stream_v = RewriteRuleTokenStream(self._adaptor, "token v", v)
+
+                    if retval is not None:
+                        stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
+                    else:
+                        stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
+
+
+                    root_0 = self._adaptor.nil()
+
+
+                    root_1 = self._adaptor.nil()
+                    root_1 = self._adaptor.becomeRoot(self._adaptor.createFromType(VALUE, "VALUE"), root_1)
+
+                    self._adaptor.addChild(root_1, self._adaptor.createFromType(NUMBER, "NUMBER"))
+                    self._adaptor.addChild(root_1, stream_v.nextNode())
+
+                    self._adaptor.addChild(root_0, root_1)
+
+
+
+                    retval.tree = root_0
+
+
+                elif alt7 == 2:
+
+                    pass
+                    t=self.match(self.input, TEXT, self.FOLLOW_TEXT_in_atom389)
+                    stream_TEXT.add(t)
+
+
+
+
+
+
+
+
+                    retval.tree = root_0
+                    stream_t = RewriteRuleTokenStream(self._adaptor, "token t", t)
+
+                    if retval is not None:
+                        stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
+                    else:
+                        stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
+
+
+                    root_0 = self._adaptor.nil()
+
+
+                    root_1 = self._adaptor.nil()
+                    root_1 = self._adaptor.becomeRoot(self._adaptor.createFromType(VALUE, "VALUE"), root_1)
+
+                    self._adaptor.addChild(root_1, self._adaptor.createFromType(WORD, "WORD"))
+                    self._adaptor.addChild(root_1, stream_t.nextNode())
+
+                    self._adaptor.addChild(root_0, root_1)
+
+
+
+                    retval.tree = root_0
+
+
+                elif alt7 == 3:
+
+                    pass
+                    p=self.match(self.input, PHRASE, self.FOLLOW_PHRASE_in_atom408)
+                    stream_PHRASE.add(p)
+
+                    p.setText(self.normalizeSpace(p.text))
+
+
+
+
+
+
+
+
+
+                    retval.tree = root_0
+                    stream_p = RewriteRuleTokenStream(self._adaptor, "token p", p)
+
+                    if retval is not None:
+                        stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
+                    else:
+                        stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
+
+
+                    root_0 = self._adaptor.nil()
+
+
+                    root_1 = self._adaptor.nil()
+                    root_1 = self._adaptor.becomeRoot(self._adaptor.createFromType(VALUE, "VALUE"), root_1)
+
+                    self._adaptor.addChild(root_1, self._adaptor.createFromType(STRING, "STRING"))
+                    self._adaptor.addChild(root_1, stream_p.nextNode())
+
+                    self._adaptor.addChild(root_0, root_1)
+
+
+
+                    retval.tree = root_0
+
+
+                retval.stop = self.input.LT(-1)
+
+
+                retval.tree = self._adaptor.rulePostProcessing(root_0)
+                self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
+
+
+            except RecognitionException, re:
+                self.reportError(re)
+                self.recover(self.input, re)
+                retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
+        finally:
+
+            pass
+
+        return retval
+
+
+
+    class range_return(ParserRuleReturnScope):
+        def __init__(self):
+            ParserRuleReturnScope.__init__(self)
+
+            self.tree = None
+
+
+
+
+
+
+    def range(self, ):
+
+        retval = self.range_return()
+        retval.start = self.input.LT(1)
+
+        root_0 = None
+
+        LSQUARE19 = None
+        LCURLY20 = None
+        TO21 = None
+        RSQUARE23 = None
+        LSQUARE24 = None
+        LCURLY25 = None
+        TO26 = None
+        RCURLY28 = None
+        LSQUARE29 = None
+        TO31 = None
+        RSQUARE32 = None
+        RCURLY33 = None
+        LCURLY34 = None
+        TO36 = None
+        RSQUARE37 = None
+        RCURLY38 = None
+        LSQUARE39 = None
+        TO40 = None
+        RSQUARE41 = None
+        LCURLY42 = None
+        TO43 = None
+        RSQUARE44 = None
+        LSQUARE45 = None
+        TO46 = None
+        RCURLY47 = None
+        LCURLY48 = None
+        TO49 = None
+        RCURLY50 = None
+        l = None
+
+        h = None
+
+        atom22 = None
+
+        atom27 = None
+
+        atom30 = None
+
+        atom35 = None
+
+
+        LSQUARE19_tree = None
+        LCURLY20_tree = None
+        TO21_tree = None
+        RSQUARE23_tree = None
+        LSQUARE24_tree = None
+        LCURLY25_tree = None
+        TO26_tree = None
+        RCURLY28_tree = None
+        LSQUARE29_tree = None
+        TO31_tree = None
+        RSQUARE32_tree = None
+        RCURLY33_tree = None
+        LCURLY34_tree = None
+        TO36_tree = None
+        RSQUARE37_tree = None
+        RCURLY38_tree = None
+        LSQUARE39_tree = None
+        TO40_tree = None
+        RSQUARE41_tree = None
+        LCURLY42_tree = None
+        TO43_tree = None
+        RSQUARE44_tree = None
+        LSQUARE45_tree = None
+        TO46_tree = None
+        RCURLY47_tree = None
+        LCURLY48_tree = None
+        TO49_tree = None
+        RCURLY50_tree = None
+        stream_LCURLY = RewriteRuleTokenStream(self._adaptor, "token LCURLY")
+        stream_LSQUARE = RewriteRuleTokenStream(self._adaptor, "token LSQUARE")
+        stream_RSQUARE = RewriteRuleTokenStream(self._adaptor, "token RSQUARE")
+        stream_TO = RewriteRuleTokenStream(self._adaptor, "token TO")
+        stream_RCURLY = RewriteRuleTokenStream(self._adaptor, "token RCURLY")
+        stream_atom = RewriteRuleSubtreeStream(self._adaptor, "rule atom")
+        try:
+            try:
+
+                alt12 = 8
+                alt12 = self.dfa12.predict(self.input)
+                if alt12 == 1:
+
+                    pass
+
+                    alt8 = 2
+                    LA8_0 = self.input.LA(1)
+
+                    if (LA8_0 == LSQUARE) :
+                        alt8 = 1
+                    elif (LA8_0 == LCURLY) :
+                        alt8 = 2
+                    else:
+                        nvae = NoViableAltException("", 8, 0, self.input)
+
+                        raise nvae
+
+                    if alt8 == 1:
+
+                        pass
+                        LSQUARE19=self.match(self.input, LSQUARE, self.FOLLOW_LSQUARE_in_range436)
+                        stream_LSQUARE.add(LSQUARE19)
+
+
+                    elif alt8 == 2:
+
+                        pass
+                        LCURLY20=self.match(self.input, LCURLY, self.FOLLOW_LCURLY_in_range440)
+                        stream_LCURLY.add(LCURLY20)
+
+
+
+                    TO21=self.match(self.input, TO, self.FOLLOW_TO_in_range443)
+                    stream_TO.add(TO21)
+                    self._state.following.append(self.FOLLOW_atom_in_range445)
+                    atom22 = self.atom()
+
+                    self._state.following.pop()
+                    stream_atom.add(atom22.tree)
+                    RSQUARE23=self.match(self.input, RSQUARE, self.FOLLOW_RSQUARE_in_range447)
+                    stream_RSQUARE.add(RSQUARE23)
+
+
+
+
+
+
+
+
+                    retval.tree = root_0
+
+                    if retval is not None:
+                        stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
+                    else:
+                        stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
+
+
+                    root_0 = self._adaptor.nil()
+
+
+                    root_1 = self._adaptor.nil()
+                    root_1 = self._adaptor.becomeRoot(self._adaptor.createFromType(LE, "LE"), root_1)
+
+                    self._adaptor.addChild(root_1, stream_atom.nextTree())
+
+                    self._adaptor.addChild(root_0, root_1)
+
+
+
+                    retval.tree = root_0
+
+
+                elif alt12 == 2:
+
+                    pass
+
+                    alt9 = 2
+                    LA9_0 = self.input.LA(1)
+
+                    if (LA9_0 == LSQUARE) :
+                        alt9 = 1
+                    elif (LA9_0 == LCURLY) :
+                        alt9 = 2
+                    else:
+                        nvae = NoViableAltException("", 9, 0, self.input)
+
+                        raise nvae
+
+                    if alt9 == 1:
+
+                        pass
+                        LSQUARE24=self.match(self.input, LSQUARE, self.FOLLOW_LSQUARE_in_range462)
+                        stream_LSQUARE.add(LSQUARE24)
+
+
+                    elif alt9 == 2:
+
+                        pass
+                        LCURLY25=self.match(self.input, LCURLY, self.FOLLOW_LCURLY_in_range466)
+                        stream_LCURLY.add(LCURLY25)
+
+
+
+                    TO26=self.match(self.input, TO, self.FOLLOW_TO_in_range469)
+                    stream_TO.add(TO26)
+                    self._state.following.append(self.FOLLOW_atom_in_range471)
+                    atom27 = self.atom()
+
+                    self._state.following.pop()
+                    stream_atom.add(atom27.tree)
+                    RCURLY28=self.match(self.input, RCURLY, self.FOLLOW_RCURLY_in_range473)
+                    stream_RCURLY.add(RCURLY28)
+
+
+
+
+
+
+
+
+                    retval.tree = root_0
+
+                    if retval is not None:
+                        stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
+                    else:
+                        stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
+
+
+                    root_0 = self._adaptor.nil()
+
+
+                    root_1 = self._adaptor.nil()
+                    root_1 = self._adaptor.becomeRoot(self._adaptor.createFromType(LT, "LT"), root_1)
+
+                    self._adaptor.addChild(root_1, stream_atom.nextTree())
+
+                    self._adaptor.addChild(root_0, root_1)
+
+
+
+                    retval.tree = root_0
+
+
+                elif alt12 == 3:
+
+                    pass
+                    LSQUARE29=self.match(self.input, LSQUARE, self.FOLLOW_LSQUARE_in_range487)
+                    stream_LSQUARE.add(LSQUARE29)
+                    self._state.following.append(self.FOLLOW_atom_in_range489)
+                    atom30 = self.atom()
+
+                    self._state.following.pop()
+                    stream_atom.add(atom30.tree)
+                    TO31=self.match(self.input, TO, self.FOLLOW_TO_in_range491)
+                    stream_TO.add(TO31)
+
+                    alt10 = 2
+                    LA10_0 = self.input.LA(1)
+
+                    if (LA10_0 == RSQUARE) :
+                        alt10 = 1
+                    elif (LA10_0 == RCURLY) :
+                        alt10 = 2
+                    else:
+                        nvae = NoViableAltException("", 10, 0, self.input)
+
+                        raise nvae
+
+                    if alt10 == 1:
+
+                        pass
+                        RSQUARE32=self.match(self.input, RSQUARE, self.FOLLOW_RSQUARE_in_range494)
+                        stream_RSQUARE.add(RSQUARE32)
+
+
+                    elif alt10 == 2:
+
+                        pass
+                        RCURLY33=self.match(self.input, RCURLY, self.FOLLOW_RCURLY_in_range498)
+                        stream_RCURLY.add(RCURLY33)
+
+
+
+
+
+
+
+
+
+
+
+                    retval.tree = root_0
+
+                    if retval is not None:
+                        stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
+                    else:
+                        stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
+
+
+                    root_0 = self._adaptor.nil()
+
+
+                    root_1 = self._adaptor.nil()
+                    root_1 = self._adaptor.becomeRoot(self._adaptor.createFromType(GE, "GE"), root_1)
+
+                    self._adaptor.addChild(root_1, stream_atom.nextTree())
+
+                    self._adaptor.addChild(root_0, root_1)
+
+
+
+                    retval.tree = root_0
+
+
+                elif alt12 == 4:
+
+                    pass
+                    LCURLY34=self.match(self.input, LCURLY, self.FOLLOW_LCURLY_in_range513)
+                    stream_LCURLY.add(LCURLY34)
+                    self._state.following.append(self.FOLLOW_atom_in_range515)
+                    atom35 = self.atom()
+
+                    self._state.following.pop()
+                    stream_atom.add(atom35.tree)
+                    TO36=self.match(self.input, TO, self.FOLLOW_TO_in_range517)
+                    stream_TO.add(TO36)
+
+                    alt11 = 2
+                    LA11_0 = self.input.LA(1)
+
+                    if (LA11_0 == RSQUARE) :
+                        alt11 = 1
+                    elif (LA11_0 == RCURLY) :
+                        alt11 = 2
+                    else:
+                        nvae = NoViableAltException("", 11, 0, self.input)
+
+                        raise nvae
+
+                    if alt11 == 1:
+
+                        pass
+                        RSQUARE37=self.match(self.input, RSQUARE, self.FOLLOW_RSQUARE_in_range520)
+                        stream_RSQUARE.add(RSQUARE37)
+
+
+                    elif alt11 == 2:
+
+                        pass
+                        RCURLY38=self.match(self.input, RCURLY, self.FOLLOW_RCURLY_in_range524)
+                        stream_RCURLY.add(RCURLY38)
+
+
+
+
+
+
+
+
+
+
+
+                    retval.tree = root_0
+
+                    if retval is not None:
+                        stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
+                    else:
+                        stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
+
+
+                    root_0 = self._adaptor.nil()
+
+
+                    root_1 = self._adaptor.nil()
+                    root_1 = self._adaptor.becomeRoot(self._adaptor.createFromType(GT, "GT"), root_1)
+
+                    self._adaptor.addChild(root_1, stream_atom.nextTree())
+
+                    self._adaptor.addChild(root_0, root_1)
+
+
+
+                    retval.tree = root_0
+
+
+                elif alt12 == 5:
+
+                    pass
+                    LSQUARE39=self.match(self.input, LSQUARE, self.FOLLOW_LSQUARE_in_range539)
+                    stream_LSQUARE.add(LSQUARE39)
+                    self._state.following.append(self.FOLLOW_atom_in_range543)
+                    l = self.atom()
+
+                    self._state.following.pop()
+                    stream_atom.add(l.tree)
+                    TO40=self.match(self.input, TO, self.FOLLOW_TO_in_range545)
+                    stream_TO.add(TO40)
+                    self._state.following.append(self.FOLLOW_atom_in_range549)
+                    h = self.atom()
+
+                    self._state.following.pop()
+                    stream_atom.add(h.tree)
+                    RSQUARE41=self.match(self.input, RSQUARE, self.FOLLOW_RSQUARE_in_range551)
+                    stream_RSQUARE.add(RSQUARE41)
+
+
+
+
+
+
+
+
+                    retval.tree = root_0
+
+                    if retval is not None:
+                        stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
+                    else:
+                        stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
+
+
+                    if l is not None:
+                        stream_l = RewriteRuleSubtreeStream(self._adaptor, "token l", l.tree)
+                    else:
+                        stream_l = RewriteRuleSubtreeStream(self._adaptor, "token l", None)
+
+
+                    if h is not None:
+                        stream_h = RewriteRuleSubtreeStream(self._adaptor, "token h", h.tree)
+                    else:
+                        stream_h = RewriteRuleSubtreeStream(self._adaptor, "token h", None)
+
+
+                    root_0 = self._adaptor.nil()
+
+
+                    root_1 = self._adaptor.nil()
+                    root_1 = self._adaptor.becomeRoot(self._adaptor.createFromType(CONJUNCTION, "CONJUNCTION"), root_1)
+
+
+                    root_2 = self._adaptor.nil()
+                    root_2 = self._adaptor.becomeRoot(self._adaptor.createFromType(GE, "GE"), root_2)
+
+                    self._adaptor.addChild(root_2, stream_l.nextTree())
+
+                    self._adaptor.addChild(root_1, root_2)
+
+                    root_2 = self._adaptor.nil()
+                    root_2 = self._adaptor.becomeRoot(self._adaptor.createFromType(LE, "LE"), root_2)
+
+                    self._adaptor.addChild(root_2, stream_h.nextTree())
+
+                    self._adaptor.addChild(root_1, root_2)
+
+                    self._adaptor.addChild(root_0, root_1)
+
+
+
+                    retval.tree = root_0
+
+
+                elif alt12 == 6:
+
+                    pass
+                    LCURLY42=self.match(self.input, LCURLY, self.FOLLOW_LCURLY_in_range577)
+                    stream_LCURLY.add(LCURLY42)
+                    self._state.following.append(self.FOLLOW_atom_in_range581)
+                    l = self.atom()
+
+                    self._state.following.pop()
+                    stream_atom.add(l.tree)
+                    TO43=self.match(self.input, TO, self.FOLLOW_TO_in_range583)
+                    stream_TO.add(TO43)
+                    self._state.following.append(self.FOLLOW_atom_in_range587)
+                    h = self.atom()
+
+                    self._state.following.pop()
+                    stream_atom.add(h.tree)
+                    RSQUARE44=self.match(self.input, RSQUARE, self.FOLLOW_RSQUARE_in_range589)
+                    stream_RSQUARE.add(RSQUARE44)
+
+
+
+
+
+
+
+
+                    retval.tree = root_0
+
+                    if retval is not None:
+                        stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
+                    else:
+                        stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
+
+
+                    if l is not None:
+                        stream_l = RewriteRuleSubtreeStream(self._adaptor, "token l", l.tree)
+                    else:
+                        stream_l = RewriteRuleSubtreeStream(self._adaptor, "token l", None)
+
+
+                    if h is not None:
+                        stream_h = RewriteRuleSubtreeStream(self._adaptor, "token h", h.tree)
+                    else:
+                        stream_h = RewriteRuleSubtreeStream(self._adaptor, "token h", None)
+
+
+                    root_0 = self._adaptor.nil()
+
+
+                    root_1 = self._adaptor.nil()
+                    root_1 = self._adaptor.becomeRoot(self._adaptor.createFromType(CONJUNCTION, "CONJUNCTION"), root_1)
+
+
+                    root_2 = self._adaptor.nil()
+                    root_2 = self._adaptor.becomeRoot(self._adaptor.createFromType(GT, "GT"), root_2)
+
+                    self._adaptor.addChild(root_2, stream_l.nextTree())
+
+                    self._adaptor.addChild(root_1, root_2)
+
+                    root_2 = self._adaptor.nil()
+                    root_2 = self._adaptor.becomeRoot(self._adaptor.createFromType(LE, "LE"), root_2)
+
+                    self._adaptor.addChild(root_2, stream_h.nextTree())
+
+                    self._adaptor.addChild(root_1, root_2)
+
+                    self._adaptor.addChild(root_0, root_1)
+
+
+
+                    retval.tree = root_0
+
+
+                elif alt12 == 7:
+
+                    pass
+                    LSQUARE45=self.match(self.input, LSQUARE, self.FOLLOW_LSQUARE_in_range615)
+                    stream_LSQUARE.add(LSQUARE45)
+                    self._state.following.append(self.FOLLOW_atom_in_range619)
+                    l = self.atom()
+
+                    self._state.following.pop()
+                    stream_atom.add(l.tree)
+                    TO46=self.match(self.input, TO, self.FOLLOW_TO_in_range621)
+                    stream_TO.add(TO46)
+                    self._state.following.append(self.FOLLOW_atom_in_range625)
+                    h = self.atom()
+
+                    self._state.following.pop()
+                    stream_atom.add(h.tree)
+                    RCURLY47=self.match(self.input, RCURLY, self.FOLLOW_RCURLY_in_range627)
+                    stream_RCURLY.add(RCURLY47)
+
+
+
+
+
+
+
+
+                    retval.tree = root_0
+
+                    if retval is not None:
+                        stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
+                    else:
+                        stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
+
+
+                    if l is not None:
+                        stream_l = RewriteRuleSubtreeStream(self._adaptor, "token l", l.tree)
+                    else:
+                        stream_l = RewriteRuleSubtreeStream(self._adaptor, "token l", None)
+
+
+                    if h is not None:
+                        stream_h = RewriteRuleSubtreeStream(self._adaptor, "token h", h.tree)
+                    else:
+                        stream_h = RewriteRuleSubtreeStream(self._adaptor, "token h", None)
+
+
+                    root_0 = self._adaptor.nil()
+
+
+                    root_1 = self._adaptor.nil()
+                    root_1 = self._adaptor.becomeRoot(self._adaptor.createFromType(CONJUNCTION, "CONJUNCTION"), root_1)
+
+
+                    root_2 = self._adaptor.nil()
+                    root_2 = self._adaptor.becomeRoot(self._adaptor.createFromType(GE, "GE"), root_2)
+
+                    self._adaptor.addChild(root_2, stream_l.nextTree())
+
+                    self._adaptor.addChild(root_1, root_2)
+
+                    root_2 = self._adaptor.nil()
+                    root_2 = self._adaptor.becomeRoot(self._adaptor.createFromType(LT, "LT"), root_2)
+
+                    self._adaptor.addChild(root_2, stream_h.nextTree())
+
+                    self._adaptor.addChild(root_1, root_2)
+
+                    self._adaptor.addChild(root_0, root_1)
+
+
+
+                    retval.tree = root_0
+
+
+                elif alt12 == 8:
+
+                    pass
+                    LCURLY48=self.match(self.input, LCURLY, self.FOLLOW_LCURLY_in_range653)
+                    stream_LCURLY.add(LCURLY48)
+                    self._state.following.append(self.FOLLOW_atom_in_range657)
+                    l = self.atom()
+
+                    self._state.following.pop()
+                    stream_atom.add(l.tree)
+                    TO49=self.match(self.input, TO, self.FOLLOW_TO_in_range659)
+                    stream_TO.add(TO49)
+                    self._state.following.append(self.FOLLOW_atom_in_range663)
+                    h = self.atom()
+
+                    self._state.following.pop()
+                    stream_atom.add(h.tree)
+                    RCURLY50=self.match(self.input, RCURLY, self.FOLLOW_RCURLY_in_range665)
+                    stream_RCURLY.add(RCURLY50)
+
+
+
+
+
+
+
+
+                    retval.tree = root_0
+
+                    if retval is not None:
+                        stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
+                    else:
+                        stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
+
+
+                    if l is not None:
+                        stream_l = RewriteRuleSubtreeStream(self._adaptor, "token l", l.tree)
+                    else:
+                        stream_l = RewriteRuleSubtreeStream(self._adaptor, "token l", None)
+
+
+                    if h is not None:
+                        stream_h = RewriteRuleSubtreeStream(self._adaptor, "token h", h.tree)
+                    else:
+                        stream_h = RewriteRuleSubtreeStream(self._adaptor, "token h", None)
+
+
+                    root_0 = self._adaptor.nil()
+
+
+                    root_1 = self._adaptor.nil()
+                    root_1 = self._adaptor.becomeRoot(self._adaptor.createFromType(CONJUNCTION, "CONJUNCTION"), root_1)
+
+
+                    root_2 = self._adaptor.nil()
+                    root_2 = self._adaptor.becomeRoot(self._adaptor.createFromType(GT, "GT"), root_2)
+
+                    self._adaptor.addChild(root_2, stream_l.nextTree())
+
+                    self._adaptor.addChild(root_1, root_2)
+
+                    root_2 = self._adaptor.nil()
+                    root_2 = self._adaptor.becomeRoot(self._adaptor.createFromType(LT, "LT"), root_2)
+
+                    self._adaptor.addChild(root_2, stream_h.nextTree())
+
+                    self._adaptor.addChild(root_1, root_2)
+
+                    self._adaptor.addChild(root_0, root_1)
+
+
+
+                    retval.tree = root_0
+
+
+                retval.stop = self.input.LT(-1)
+
+
+                retval.tree = self._adaptor.rulePostProcessing(root_0)
+                self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
+
+
+            except RecognitionException, re:
+                self.reportError(re)
+                self.recover(self.input, re)
+                retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
+        finally:
+
+            pass
+
+        return retval
+
+
+
+
+
+
+
+
+
+    DFA12_eot = DFA.unpack(
+        u"\35\uffff"
+        )
+
+    DFA12_eof = DFA.unpack(
+        u"\35\uffff"
+        )
+
+    DFA12_min = DFA.unpack(
+        u"\1\32\3\27\6\34\3\35\2\27\3\uffff\3\35\1\uffff\3\35\4\uffff"
+        )
+
+    DFA12_max = DFA.unpack(
+        u"\1\33\2\34\1\31\6\34\5\36\3\uffff\3\36\1\uffff\3\36\4\uffff"
+        )
+
+    DFA12_accept = DFA.unpack(
+        u"\17\uffff\1\2\1\1\1\3\3\uffff\1\4\3\uffff\1\5\1\7\1\6\1\10"
+        )
+
+    DFA12_special = DFA.unpack(
+        u"\35\uffff"
+        )
+
+
+    DFA12_transition = [
+        DFA.unpack(u"\1\1\1\2"),
+        DFA.unpack(u"\1\4\1\5\1\6\2\uffff\1\3"),
+        DFA.unpack(u"\1\7\1\10\1\11\2\uffff\1\3"),
+        DFA.unpack(u"\1\12\1\13\1\14"),
+        DFA.unpack(u"\1\15"),
+        DFA.unpack(u"\1\15"),
+        DFA.unpack(u"\1\15"),
+        DFA.unpack(u"\1\16"),
+        DFA.unpack(u"\1\16"),
+        DFA.unpack(u"\1\16"),
+        DFA.unpack(u"\1\20\1\17"),
+        DFA.unpack(u"\1\20\1\17"),
+        DFA.unpack(u"\1\20\1\17"),
+        DFA.unpack(u"\1\22\1\23\1\24\3\uffff\2\21"),
+        DFA.unpack(u"\1\26\1\27\1\30\3\uffff\2\25"),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u"\1\31\1\32"),
+        DFA.unpack(u"\1\31\1\32"),
+        DFA.unpack(u"\1\31\1\32"),
+        DFA.unpack(u""),
+        DFA.unpack(u"\1\33\1\34"),
+        DFA.unpack(u"\1\33\1\34"),
+        DFA.unpack(u"\1\33\1\34"),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u"")
+    ]
+
+
+
+    DFA12 = DFA
+
+
+    FOLLOW_expression_in_query131 = frozenset([])
+    FOLLOW_EOF_in_query133 = frozenset([1])
+    FOLLOW_factor_in_expression151 = frozenset([1, 17, 19, 20, 22, 23, 24, 25])
+    FOLLOW_AND_in_expression154 = frozenset([17, 19, 20, 22, 23, 24, 25])
+    FOLLOW_factor_in_expression157 = frozenset([1, 17, 19, 20, 22, 23, 24, 25])
+    FOLLOW_term_in_factor185 = frozenset([1, 18])
+    FOLLOW_OR_in_factor188 = frozenset([17, 19, 20, 22, 23, 24, 25])
+    FOLLOW_term_in_factor190 = frozenset([1, 18])
+    FOLLOW_NOT_in_term219 = frozenset([17, 19, 20, 22, 23, 24, 25])
+    FOLLOW_primitive_in_term221 = frozenset([1])
+    FOLLOW_primitive_in_term235 = frozenset([1])
+    FOLLOW_selector_in_primitive260 = frozenset([23, 24, 25, 26, 27])
+    FOLLOW_value_in_primitive262 = frozenset([1])
+    FOLLOW_atom_in_primitive279 = frozenset([1])
+    FOLLOW_LPAREN_in_primitive295 = frozenset([17, 19, 20, 22, 23, 24, 25])
+    FOLLOW_expression_in_primitive297 = frozenset([21])
+    FOLLOW_RPAREN_in_primitive299 = frozenset([1])
+    FOLLOW_atom_in_value318 = frozenset([1])
+    FOLLOW_range_in_value328 = frozenset([1])
+    FOLLOW_SELECTOR_in_selector348 = frozenset([1])
+    FOLLOW_INT_in_atom370 = frozenset([1])
+    FOLLOW_TEXT_in_atom389 = frozenset([1])
+    FOLLOW_PHRASE_in_atom408 = frozenset([1])
+    FOLLOW_LSQUARE_in_range436 = frozenset([28])
+    FOLLOW_LCURLY_in_range440 = frozenset([28])
+    FOLLOW_TO_in_range443 = frozenset([23, 24, 25])
+    FOLLOW_atom_in_range445 = frozenset([29])
+    FOLLOW_RSQUARE_in_range447 = frozenset([1])
+    FOLLOW_LSQUARE_in_range462 = frozenset([28])
+    FOLLOW_LCURLY_in_range466 = frozenset([28])
+    FOLLOW_TO_in_range469 = frozenset([23, 24, 25])
+    FOLLOW_atom_in_range471 = frozenset([30])
+    FOLLOW_RCURLY_in_range473 = frozenset([1])
+    FOLLOW_LSQUARE_in_range487 = frozenset([23, 24, 25])
+    FOLLOW_atom_in_range489 = frozenset([28])
+    FOLLOW_TO_in_range491 = frozenset([29, 30])
+    FOLLOW_RSQUARE_in_range494 = frozenset([1])
+    FOLLOW_RCURLY_in_range498 = frozenset([1])
+    FOLLOW_LCURLY_in_range513 = frozenset([23, 24, 25])
+    FOLLOW_atom_in_range515 = frozenset([28])
+    FOLLOW_TO_in_range517 = frozenset([29, 30])
+    FOLLOW_RSQUARE_in_range520 = frozenset([1])
+    FOLLOW_RCURLY_in_range524 = frozenset([1])
+    FOLLOW_LSQUARE_in_range539 = frozenset([23, 24, 25])
+    FOLLOW_atom_in_range543 = frozenset([28])
+    FOLLOW_TO_in_range545 = frozenset([23, 24, 25])
+    FOLLOW_atom_in_range549 = frozenset([29])
+    FOLLOW_RSQUARE_in_range551 = frozenset([1])
+    FOLLOW_LCURLY_in_range577 = frozenset([23, 24, 25])
+    FOLLOW_atom_in_range581 = frozenset([28])
+    FOLLOW_TO_in_range583 = frozenset([23, 24, 25])
+    FOLLOW_atom_in_range587 = frozenset([29])
+    FOLLOW_RSQUARE_in_range589 = frozenset([1])
+    FOLLOW_LSQUARE_in_range615 = frozenset([23, 24, 25])
+    FOLLOW_atom_in_range619 = frozenset([28])
+    FOLLOW_TO_in_range621 = frozenset([23, 24, 25])
+    FOLLOW_atom_in_range625 = frozenset([30])
+    FOLLOW_RCURLY_in_range627 = frozenset([1])
+    FOLLOW_LCURLY_in_range653 = frozenset([23, 24, 25])
+    FOLLOW_atom_in_range657 = frozenset([28])
+    FOLLOW_TO_in_range659 = frozenset([23, 24, 25])
+    FOLLOW_atom_in_range663 = frozenset([30])
+    FOLLOW_RCURLY_in_range665 = frozenset([1])
+
+
+
+def main(argv, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr):
+    from antlr3.main import ParserMain
+    main = ParserMain("QueryLexer", QueryParser)
+    main.stdin = stdin
+    main.stdout = stdout
+    main.stderr = stderr
+    main.execute(argv)
+
+
+if __name__ == '__main__':
+    main(sys.argv)
diff --git a/google/appengine/api/search/__init__.py b/google/appengine/api/search/__init__.py
index 789f418..2fa5b46 100644
--- a/google/appengine/api/search/__init__.py
+++ b/google/appengine/api/search/__init__.py
@@ -20,27 +20,22 @@
 
 """Search API module."""
 
-from search_api import *
-
-
-__all__ = [
-    'AtomField',
-    'DateField',
-    'Document',
-    'Error',
-    'Field',
-    'FieldExpression',
-    'HtmlField',
-    'Index',
-    'InternalError',
-    'InvalidRequestError',
-    'OperationResult',
-    'TransientError',
-    'ScorerSpec',
-    'SearchRequest',
-    'SearchResult',
-    'SearchResponse',
-    'SortSpec',
-    'TextField',
-    'list_indexes',
-    ]
+from search import AtomField
+from search import DateField
+from search import Document
+from search import Error
+from search import Field
+from search import FieldExpression
+from search import HtmlField
+from search import Index
+from search import InternalError
+from search import InvalidRequestError
+from search import list_indexes
+from search import OperationResult
+from search import ScorerSpec
+from search import SearchRequest
+from search import SearchResponse
+from search import SearchResult
+from search import SortSpec
+from search import TextField
+from search import TransientError
diff --git a/google/appengine/api/search/query_parser.py b/google/appengine/api/search/query_parser.py
new file mode 100644
index 0000000..b837f9f
--- /dev/null
+++ b/google/appengine/api/search/query_parser.py
@@ -0,0 +1,164 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+
+
+"""Wrapper for QueryParser."""
+
+
+import google
+import antlr3
+
+from google.appengine.api.search import QueryLexer
+from google.appengine.api.search import QueryParser
+
+
+_OPERATOR_MAP = {
+    QueryParser.CONJUNCTION: 'AND',
+    QueryParser.DISJUNCTION: 'OR',
+    QueryParser.NEGATION: 'NOT',
+    QueryParser.NONE: 'None',
+    QueryParser.NUMBER: 'Number',
+    QueryParser.RESTRICTION: 'Restrict',
+    QueryParser.STRING: 'String',
+    QueryParser.WORD: 'Word',
+    QueryParser.VALUE: 'Value',
+    QueryParser.TEXT: 'Text',
+    QueryParser.EOF: 'EOF',
+    QueryParser.LT: '<',
+    QueryParser.LE: '<=',
+    QueryParser.GT: '>',
+    QueryParser.GE: '>=',
+    QueryParser.SELECTOR: 'Selector',
+    QueryParser.PHRASE: 'Phrase',
+    QueryParser.INT: 'Int'}
+
+
+class QueryException(Exception):
+  """An error occurred while parsing the query input string."""
+
+
+class QueryLexerWithErrors(QueryLexer.QueryLexer):
+  """An overridden Lexer that raises exceptions."""
+
+  def emitErrorMessage(self, msg):
+    """Raise an exception if the input fails to parse correctly.
+
+    Overriding the default, which normally just prints a message to
+    stderr.
+
+    Arguments:
+      msg: the error message
+    Raises:
+      QueryException: always.
+    """
+    raise QueryException(msg)
+
+
+class QueryParserWithErrors(QueryParser.QueryParser):
+  """An overridden Parser that raises exceptions."""
+
+  def emitErrorMessage(self, msg):
+    """Raise an exception if the input fails to parse correctly.
+
+    Overriding the default, which normally just prints a message to
+    stderr.
+
+    Arguments:
+      msg: the error message
+    Raises:
+      QueryException: always.
+    """
+    raise QueryException(msg)
+
+
+def CreateParser(query):
+  """Creates a Query Parser."""
+  input_string = antlr3.ANTLRStringStream(query)
+  lexer = QueryLexerWithErrors(input_string)
+  tokens = antlr3.CommonTokenStream(lexer)
+  parser = QueryParserWithErrors(tokens)
+  return parser
+
+
+def _Parse(query):
+  parser = CreateParser(query)
+  try:
+    return parser.query()
+  except Exception, e:
+    raise QueryException(e.message)
+
+
+def _TypeToString(node_type):
+  """Converts a node_type to a string."""
+  if node_type in _OPERATOR_MAP.keys():
+    return _OPERATOR_MAP[node_type]
+  return 'unknown operator: ' + str(node_type)
+
+
+def Simplify(parser_return):
+  """Simplifies the output of the parser."""
+  if parser_return.tree:
+    return _SimplifyNode(parser_return.tree)
+  return parser_return
+
+
+def _SimplifyNode(node):
+  """Simplifies the node removing singleton conjunctions and others."""
+  if not node.getType():
+    return _SimplifyNode(node.children[0])
+  elif node.getType() is QueryParser.CONJUNCTION and node.getChildCount() is 1:
+    return _SimplifyNode(node.children[0])
+  elif node.getType() is QueryParser.DISJUNCTION and node.getChildCount() is 1:
+    return _SimplifyNode(node.children[0])
+  elif (node.getType() is QueryParser.RESTRICTION and node.getChildCount() is 2
+        and node.children[0].getType() is QueryParser.NONE):
+    return _SimplifyNode(node.children[1])
+  elif (node.getType() is QueryParser.VALUE and node.getChildCount() is 2 and
+        (node.children[0].getType() is QueryParser.WORD or
+         node.children[0].getType() is QueryParser.STRING or
+         node.children[0].getType() is QueryParser.NUMBER)):
+    return _SimplifyNode(node.children[1])
+  for i, child in enumerate(node.children):
+    node.setChild(i, _SimplifyNode(child))
+  return node
+
+
+def ToString(node):
+  """Translates the node in a parse tree into a query string fragment."""
+  output = ''
+  if node.getChildCount():
+    output += '('
+  if (node.getType() is QueryParser.TEXT or
+      node.getType() is QueryParser.SELECTOR or
+      node.getType() is QueryParser.PHRASE or
+      node.getType() is QueryParser.INT):
+    output += node.getText()
+  else:
+    output += _TypeToString(node.getType())
+  if node.getChildCount():
+    output += ' '
+  output += ' '.join([ToString(child) for child in node.children])
+  if node.getChildCount():
+    output += ')'
+  return output
+
+
+def Parse(query):
+  """Parses a query and simplifies the parse tree."""
+  return Simplify(_Parse(query))
diff --git a/google/appengine/api/search/search_api.py b/google/appengine/api/search/search.py
similarity index 93%
rename from google/appengine/api/search/search_api.py
rename to google/appengine/api/search/search.py
index 6197762..cee69b4 100644
--- a/google/appengine/api/search/search_api.py
+++ b/google/appengine/api/search/search.py
@@ -42,6 +42,28 @@
 from google.appengine.runtime import apiproxy_errors
 
 
+__all__ = [
+    'AtomField',
+    'DateField',
+    'Document',
+    'Error',
+    'Field',
+    'FieldExpression',
+    'HtmlField',
+    'Index',
+    'InternalError',
+    'InvalidRequestError',
+    'OperationResult',
+    'TransientError',
+    'ScorerSpec',
+    'SearchRequest',
+    'SearchResult',
+    'SearchResponse',
+    'SortSpec',
+    'TextField',
+    'list_indexes',
+    ]
+
 _MAXIMUM_INDEX_NAME_LENGTH = 100
 _MAXIMUM_FIELD_VALUE_LENGTH = 1024 * 1024
 _MAXIMUM_FIELD_ATOM_LENGTH = 500
@@ -234,7 +256,8 @@
 def _ValidatePrintableAsciiNotReserved(value, name):
   """Raises an exception if value is not printable ASCII string nor reserved.
 
-  Printable ASCII strings starting with '!' are reserved for internal use.
+  Non-space whitespace characters are also excluded. Printable ASCII
+  strings starting with '!' are reserved for internal use.
 
   Args:
     value: The value to validate.
@@ -248,20 +271,29 @@
   """
   for char in value:
     if char not in _ASCII_PRINTABLE:
-      raise ValueError('%s must be printable ASCII: %s' % (name, value))
+      raise ValueError(
+          '%s must be printable ASCII and not non-space whitespace: %s'
+          % (name, value))
   if value.startswith('!'):
     raise ValueError('%s must not start with "!": %s' % (name, value))
   return value
 
 
 def _CheckIndexName(index_name):
-  """Checks index_name is a string which is not too long, and returns it."""
+  """Checks index_name is a string which is not too long, and returns it.
+
+  Index names must be ASCII printable, not reserved (start with '!') nor
+  include non-space whitespace characters.
+  """
   _ValidateString(index_name, 'index name', _MAXIMUM_INDEX_NAME_LENGTH)
   return _ValidatePrintableAsciiNotReserved(index_name, 'index_name')
 
 
 def _CheckFieldName(name):
-  """Checks field name is not too long, is ASCII printable and not reserved."""
+  """Checks field name is not too long, is ASCII printable and not reserved.
+
+  Non-space whitespace characters are also excluded from field names.
+  """
   _ValidateString(name, 'name', _MAXIMUM_FIELD_NAME_LENGTH)
   name_str = str(name)
   _ValidatePrintableAsciiNotReserved(name, 'field name')
@@ -308,7 +340,11 @@
 
 
 def _CheckDocumentId(doc_id):
-  """Checks doc_id is a valid document identifier, and returns it."""
+  """Checks doc_id is a valid document identifier, and returns it.
+
+  Document ids must be ASCII printable, not include non-space whitespace
+  characters, and not start with '!'.
+  """
   _ValidateString(doc_id, 'doc_id', _MAXIMUM_DOCUMENT_ID_LENGTH)
   _ValidatePrintableAsciiNotReserved(doc_id, 'doc_id')
   return doc_id
@@ -352,7 +388,7 @@
 
 def _Repr(class_instance, ordered_dictionary):
   """Generates an unambiguous representation for instance and ordered dict."""
-  return 'search_api.%s(%s)' % (class_instance.__class__.__name__, ', '.join(
+  return 'search.%s(%s)' % (class_instance.__class__.__name__, ', '.join(
       ["%s='%s'" % (key, value) for (key, value) in ordered_dictionary
        if value is not None and value != []]))
 
@@ -404,7 +440,8 @@
     Args:
       name: The name of the field. Field names must have maximum length
         _MAXIMUM_FIELD_NAME_LENGTH, be ASCII printable and not matched
-        reserved pattern '_[A-Z]*' nor start with '!'.
+        reserved pattern '_[A-Z]*' nor start with '!'. Further, field
+        names cannot contain non-space whitespace characters.
       value: The value of the field which can be a str, unicode or date.
         (optional)
       language: The ISO 693-1 two letter code of the language used in the value.
@@ -582,7 +619,7 @@
 
   def _CopyValueToProtocolBuffer(self, field_value_pb):
     field_value_pb.set_type(document_pb.FieldValue.DATE)
-    field_value_pb.set_date_value(self.value.isoformat())
+    field_value_pb.set_string_value(self.value.isoformat())
 
 
 def _GetValue(value_pb):
@@ -592,8 +629,8 @@
       return value_pb.string_value()
     return None
   if value_pb.type() == document_pb.FieldValue.DATE:
-    if value_pb.has_date_value():
-      return datetime.datetime.strptime(value_pb.date_value(),
+    if value_pb.has_string_value():
+      return datetime.datetime.strptime(value_pb.string_value(),
                                         '%Y-%m-%d').date()
     return None
   raise TypeError('unknown FieldValue type %d' % value_pb.type())
@@ -644,7 +681,8 @@
 
     Args:
       doc_id: The printable ASCII string identifying the document which does
-        not start with '!' which is reserved.
+        not start with '!' which is reserved. Non-space whitespace characters
+        are also excluded from ids.
       fields: An iterable of Field instances representing the content of the
         document. (optional)
       language: The code of the language used in the field values. Defaults
@@ -708,7 +746,7 @@
 def _CopyDocumentToProtocolBuffer(document, pb):
   """Copies Document to a document_pb.Document protocol buffer."""
   pb.set_storage(document_pb.Document.DISK)
-  pb.set_doc_id(document.doc_id)
+  pb.set_id(document.doc_id)
   if document.language:
     pb.set_language(document.language)
   for field in document.fields:
@@ -724,7 +762,7 @@
   lang = None
   if doc_pb.has_language():
     lang = doc_pb.language()
-  return Document(doc_id=doc_pb.doc_id(), fields=fields,
+  return Document(doc_id=doc_pb.id(), fields=fields,
                   language=lang,
                   order_id=doc_pb.order_id())
 
@@ -890,7 +928,6 @@
   return pb
 
 
-
 class ScorerSpec(object):
   """Specifies how to score a search result.
 
@@ -901,12 +938,11 @@
                limit=5000)
   """
 
-  GENERIC, HIT_COUNT, TIME_STAMP, MATCH_SCORER = ('GENERIC', 'HIT_COUNT',
-                                             'TIME_STAMP', 'MATCH_SCORER')
+  GENERIC, MATCH_SCORER = ('GENERIC', 'MATCH_SCORER')
   _DEFAULT_LIMIT = 1000
   _MAXIMUM_LIMIT = 10000
 
-  _TYPES = frozenset([GENERIC, HIT_COUNT, TIME_STAMP, MATCH_SCORER])
+  _TYPES = frozenset([GENERIC, MATCH_SCORER])
 
   _CONSTRUCTOR_KWARGS = frozenset(['scorer_type', 'limit'])
 
@@ -917,8 +953,6 @@
       scorer_type: The type of scorer to use on search results. Defaults to
         GENERIC.  (optional) The possible types include:
           GENERIC: A generic scorer that uses match scoring and rescoring.
-          HIT_COUNT: A simple scorer that counts hits as the score.
-          TIME_STAMP: A scorer that returns the document timestamp as the score.
           MATCH_SCORER: A scorer that returns a score based on term frequency
           divided by document frequency.
       limit: The limit on the number of documents to score. Defaults to
@@ -962,8 +996,6 @@
 
 _SCORER_TYPE_PB_MAP = {
     ScorerSpec.GENERIC: search_service_pb.ScorerSpec.GENERIC,
-    ScorerSpec.HIT_COUNT: search_service_pb.ScorerSpec.HIT_COUNT,
-    ScorerSpec.TIME_STAMP: search_service_pb.ScorerSpec.TIME_STAMP,
     ScorerSpec.MATCH_SCORER: search_service_pb.ScorerSpec.MATCH_SCORER}
 
 
@@ -1221,7 +1253,7 @@
       or request.returned_expressions):
     field_spec_pb = pb.mutable_field_spec()
     for field in request.returned_fields:
-      field_spec_pb.add_field_name(field)
+      field_spec_pb.add_name(field)
     for snippeted_field in request.snippeted_fields:
       _CopyFieldExpressionToProtocolBuffer(
           FieldExpression(
@@ -1538,7 +1570,8 @@
 
     Args:
       name: The name of the index. An index name must be a printable ASCII
-        string not starting with '!'.
+        string not starting with '!', but not containing any non-space
+        whitespace characters.
       namespace: The namespace of the index name.
       consistency: The consistency mode of the index, either GLOBALLY_CONSISTENT
         or PER_DOCUMENT_CONSISTENT. Defaults to PER_DOCUMENT_CONSISTENT.
@@ -1695,7 +1728,7 @@
     _CopyMetadataToProtocolBuffer(self, params.mutable_index_spec())
     for document_id in doc_ids:
       _CheckDocumentId(document_id)
-      params.add_document_id(document_id)
+      params.add_doc_id(document_id)
 
     try:
       apiproxy_stub_map.MakeSyncCall('search', 'DeleteDocument', request,
@@ -1762,6 +1795,58 @@
         matched_count=response.matched_count(),
         returned_count=response.result_size())
 
+  def list_documents(self, start_doc_id=None, include_start_doc=True,
+                     limit=100, keys_only=False, **kwargs):
+    """List documents in the index, in doc_id order.
+
+    Args:
+      start_doc_id: String containing the document Id from which to list
+        documents from. By default, starts at the first document Id.
+      include_start_doc: If true, include the document with the Id specified by
+        the start_doc_id parameter.
+      limit: The maximum number of documents to return. Defaults to 100.
+      keys_only: If true, the documents returned only contain their keys.
+
+    Returns:
+      A list of Documents, ordered by Id.
+
+    Raises:
+      TransientError: The request failed but retrying may succeed.
+      InternalError: A problem with the backend was encountered.
+      InvalidRequestError: The request is not well formed.
+      TypeError: An unknown attribute is passed in.
+    """
+    request = search_service_pb.ListDocumentsRequest()
+    if 'app_id' in kwargs:
+      request.set_app_id(kwargs.pop('app_id'))
+
+    if kwargs:
+      raise TypeError('Invalid arguments: %s' % ', '.join(kwargs))
+
+    params = request.mutable_params()
+    _CopyMetadataToProtocolBuffer(self, params.mutable_index_spec())
+
+    if start_doc_id:
+      params.set_start_doc_id(start_doc_id)
+    params.set_include_start_doc(include_start_doc)
+
+    params.set_limit(limit)
+    params.set_keys_only(keys_only)
+
+    response = search_service_pb.ListDocumentsResponse()
+    try:
+      apiproxy_stub_map.MakeSyncCall('search', 'ListDocuments', request,
+                                     response)
+    except apiproxy_errors.ApplicationError, e:
+      raise _ToSearchError(e)
+
+    _CheckStatus(response.status())
+    documents = []
+    for doc_proto in response.document_list():
+      documents.append(_NewDocumentFromPb(doc_proto))
+
+    return documents
+
 
 
 
@@ -1778,7 +1863,7 @@
 
 def _CopyMetadataToProtocolBuffer(index, spec_pb):
   """Copies Index specification to a search_service_pb.IndexSpec."""
-  spec_pb.set_index_name(index.name)
+  spec_pb.set_name(index.name)
   spec_pb.set_namespace(index.namespace)
   spec_pb.set_consistency(_CONSISTENCY_MODES_TO_PB_MAP.get(index.consistency))
 
@@ -1790,4 +1875,4 @@
     return Index(name=spec_pb.index_name(), namespace=spec_pb.namespace(),
                  consistency=consistency)
   else:
-    return Index(name=spec_pb.index_name(), consistency=consistency)
+    return Index(name=spec_pb.name(), consistency=consistency)
diff --git a/google/appengine/api/search/search_service_pb.py b/google/appengine/api/search/search_service_pb.py
index b751026..31d3187 100644
--- a/google/appengine/api/search/search_service_pb.py
+++ b/google/appengine/api/search/search_service_pb.py
@@ -254,8 +254,8 @@
   def Consistency_Name(cls, x): return cls._Consistency_NAMES.get(x, "")
   Consistency_Name = classmethod(Consistency_Name)
 
-  has_index_name_ = 0
-  index_name_ = ""
+  has_name_ = 0
+  name_ = ""
   has_consistency_ = 0
   consistency_ = 1
   has_namespace_ = 0
@@ -266,18 +266,18 @@
   def __init__(self, contents=None):
     if contents is not None: self.MergeFromString(contents)
 
-  def index_name(self): return self.index_name_
+  def name(self): return self.name_
 
-  def set_index_name(self, x):
-    self.has_index_name_ = 1
-    self.index_name_ = x
+  def set_name(self, x):
+    self.has_name_ = 1
+    self.name_ = x
 
-  def clear_index_name(self):
-    if self.has_index_name_:
-      self.has_index_name_ = 0
-      self.index_name_ = ""
+  def clear_name(self):
+    if self.has_name_:
+      self.has_name_ = 0
+      self.name_ = ""
 
-  def has_index_name(self): return self.has_index_name_
+  def has_name(self): return self.has_name_
 
   def consistency(self): return self.consistency_
 
@@ -321,15 +321,15 @@
 
   def MergeFrom(self, x):
     assert x is not self
-    if (x.has_index_name()): self.set_index_name(x.index_name())
+    if (x.has_name()): self.set_name(x.name())
     if (x.has_consistency()): self.set_consistency(x.consistency())
     if (x.has_namespace()): self.set_namespace(x.namespace())
     if (x.has_version()): self.set_version(x.version())
 
   def Equals(self, x):
     if x is self: return 1
-    if self.has_index_name_ != x.has_index_name_: return 0
-    if self.has_index_name_ and self.index_name_ != x.index_name_: return 0
+    if self.has_name_ != x.has_name_: return 0
+    if self.has_name_ and self.name_ != x.name_: return 0
     if self.has_consistency_ != x.has_consistency_: return 0
     if self.has_consistency_ and self.consistency_ != x.consistency_: return 0
     if self.has_namespace_ != x.has_namespace_: return 0
@@ -340,15 +340,15 @@
 
   def IsInitialized(self, debug_strs=None):
     initialized = 1
-    if (not self.has_index_name_):
+    if (not self.has_name_):
       initialized = 0
       if debug_strs is not None:
-        debug_strs.append('Required field: index_name not set.')
+        debug_strs.append('Required field: name not set.')
     return initialized
 
   def ByteSize(self):
     n = 0
-    n += self.lengthString(len(self.index_name_))
+    n += self.lengthString(len(self.name_))
     if (self.has_consistency_): n += 1 + self.lengthVarInt64(self.consistency_)
     if (self.has_namespace_): n += 1 + self.lengthString(len(self.namespace_))
     if (self.has_version_): n += 1 + self.lengthVarInt64(self.version_)
@@ -356,23 +356,23 @@
 
   def ByteSizePartial(self):
     n = 0
-    if (self.has_index_name_):
+    if (self.has_name_):
       n += 1
-      n += self.lengthString(len(self.index_name_))
+      n += self.lengthString(len(self.name_))
     if (self.has_consistency_): n += 1 + self.lengthVarInt64(self.consistency_)
     if (self.has_namespace_): n += 1 + self.lengthString(len(self.namespace_))
     if (self.has_version_): n += 1 + self.lengthVarInt64(self.version_)
     return n
 
   def Clear(self):
-    self.clear_index_name()
+    self.clear_name()
     self.clear_consistency()
     self.clear_namespace()
     self.clear_version()
 
   def OutputUnchecked(self, out):
     out.putVarInt32(10)
-    out.putPrefixedString(self.index_name_)
+    out.putPrefixedString(self.name_)
     if (self.has_consistency_):
       out.putVarInt32(16)
       out.putVarInt32(self.consistency_)
@@ -384,9 +384,9 @@
       out.putVarInt32(self.version_)
 
   def OutputPartial(self, out):
-    if (self.has_index_name_):
+    if (self.has_name_):
       out.putVarInt32(10)
-      out.putPrefixedString(self.index_name_)
+      out.putPrefixedString(self.name_)
     if (self.has_consistency_):
       out.putVarInt32(16)
       out.putVarInt32(self.consistency_)
@@ -401,7 +401,7 @@
     while d.avail() > 0:
       tt = d.getVarInt32()
       if tt == 10:
-        self.set_index_name(d.getPrefixedString())
+        self.set_name(d.getPrefixedString())
         continue
       if tt == 16:
         self.set_consistency(d.getVarInt32())
@@ -420,7 +420,7 @@
 
   def __str__(self, prefix="", printElemNumber=0):
     res=""
-    if self.has_index_name_: res+=prefix+("index_name: %s\n" % self.DebugFormatString(self.index_name_))
+    if self.has_name_: res+=prefix+("name: %s\n" % self.DebugFormatString(self.name_))
     if self.has_consistency_: res+=prefix+("consistency: %s\n" % self.DebugFormatInt32(self.consistency_))
     if self.has_namespace_: res+=prefix+("namespace: %s\n" % self.DebugFormatString(self.namespace_))
     if self.has_version_: res+=prefix+("version: %s\n" % self.DebugFormatInt32(self.version_))
@@ -430,14 +430,14 @@
   def _BuildTagLookupTable(sparse, maxtag, default=None):
     return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
 
-  kindex_name = 1
+  kname = 1
   kconsistency = 2
   knamespace = 3
   kversion = 4
 
   _TEXT = _BuildTagLookupTable({
     0: "ErrorCode",
-    1: "index_name",
+    1: "name",
     2: "consistency",
     3: "namespace",
     4: "version",
@@ -1065,24 +1065,24 @@
   has_index_spec_ = 0
 
   def __init__(self, contents=None):
-    self.document_id_ = []
+    self.doc_id_ = []
     self.index_spec_ = IndexSpec()
     if contents is not None: self.MergeFromString(contents)
 
-  def document_id_size(self): return len(self.document_id_)
-  def document_id_list(self): return self.document_id_
+  def doc_id_size(self): return len(self.doc_id_)
+  def doc_id_list(self): return self.doc_id_
 
-  def document_id(self, i):
-    return self.document_id_[i]
+  def doc_id(self, i):
+    return self.doc_id_[i]
 
-  def set_document_id(self, i, x):
-    self.document_id_[i] = x
+  def set_doc_id(self, i, x):
+    self.doc_id_[i] = x
 
-  def add_document_id(self, x):
-    self.document_id_.append(x)
+  def add_doc_id(self, x):
+    self.doc_id_.append(x)
 
-  def clear_document_id(self):
-    self.document_id_ = []
+  def clear_doc_id(self):
+    self.doc_id_ = []
 
   def index_spec(self): return self.index_spec_
 
@@ -1095,13 +1095,13 @@
 
   def MergeFrom(self, x):
     assert x is not self
-    for i in xrange(x.document_id_size()): self.add_document_id(x.document_id(i))
+    for i in xrange(x.doc_id_size()): self.add_doc_id(x.doc_id(i))
     if (x.has_index_spec()): self.mutable_index_spec().MergeFrom(x.index_spec())
 
   def Equals(self, x):
     if x is self: return 1
-    if len(self.document_id_) != len(x.document_id_): return 0
-    for e1, e2 in zip(self.document_id_, x.document_id_):
+    if len(self.doc_id_) != len(x.doc_id_): return 0
+    for e1, e2 in zip(self.doc_id_, x.doc_id_):
       if e1 != e2: return 0
     if self.has_index_spec_ != x.has_index_spec_: return 0
     if self.has_index_spec_ and self.index_spec_ != x.index_spec_: return 0
@@ -1118,36 +1118,36 @@
 
   def ByteSize(self):
     n = 0
-    n += 1 * len(self.document_id_)
-    for i in xrange(len(self.document_id_)): n += self.lengthString(len(self.document_id_[i]))
+    n += 1 * len(self.doc_id_)
+    for i in xrange(len(self.doc_id_)): n += self.lengthString(len(self.doc_id_[i]))
     n += self.lengthString(self.index_spec_.ByteSize())
     return n + 1
 
   def ByteSizePartial(self):
     n = 0
-    n += 1 * len(self.document_id_)
-    for i in xrange(len(self.document_id_)): n += self.lengthString(len(self.document_id_[i]))
+    n += 1 * len(self.doc_id_)
+    for i in xrange(len(self.doc_id_)): n += self.lengthString(len(self.doc_id_[i]))
     if (self.has_index_spec_):
       n += 1
       n += self.lengthString(self.index_spec_.ByteSizePartial())
     return n
 
   def Clear(self):
-    self.clear_document_id()
+    self.clear_doc_id()
     self.clear_index_spec()
 
   def OutputUnchecked(self, out):
-    for i in xrange(len(self.document_id_)):
+    for i in xrange(len(self.doc_id_)):
       out.putVarInt32(10)
-      out.putPrefixedString(self.document_id_[i])
+      out.putPrefixedString(self.doc_id_[i])
     out.putVarInt32(18)
     out.putVarInt32(self.index_spec_.ByteSize())
     self.index_spec_.OutputUnchecked(out)
 
   def OutputPartial(self, out):
-    for i in xrange(len(self.document_id_)):
+    for i in xrange(len(self.doc_id_)):
       out.putVarInt32(10)
-      out.putPrefixedString(self.document_id_[i])
+      out.putPrefixedString(self.doc_id_[i])
     if (self.has_index_spec_):
       out.putVarInt32(18)
       out.putVarInt32(self.index_spec_.ByteSizePartial())
@@ -1157,7 +1157,7 @@
     while d.avail() > 0:
       tt = d.getVarInt32()
       if tt == 10:
-        self.add_document_id(d.getPrefixedString())
+        self.add_doc_id(d.getPrefixedString())
         continue
       if tt == 18:
         length = d.getVarInt32()
@@ -1174,10 +1174,10 @@
   def __str__(self, prefix="", printElemNumber=0):
     res=""
     cnt=0
-    for e in self.document_id_:
+    for e in self.doc_id_:
       elm=""
       if printElemNumber: elm="(%d)" % cnt
-      res+=prefix+("document_id%s: %s\n" % (elm, self.DebugFormatString(e)))
+      res+=prefix+("doc_id%s: %s\n" % (elm, self.DebugFormatString(e)))
       cnt+=1
     if self.has_index_spec_:
       res+=prefix+"index_spec <\n"
@@ -1189,12 +1189,12 @@
   def _BuildTagLookupTable(sparse, maxtag, default=None):
     return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
 
-  kdocument_id = 1
+  kdoc_id = 1
   kindex_spec = 2
 
   _TEXT = _BuildTagLookupTable({
     0: "ErrorCode",
-    1: "document_id",
+    1: "doc_id",
     2: "index_spec",
   }, 2)
 
@@ -1457,310 +1457,7 @@
 
   _STYLE = """"""
   _STYLE_CONTENT_TYPE = """"""
-class CreateIndexRequest(ProtocolBuffer.ProtocolMessage):
-  has_index_spec_ = 0
-  has_app_id_ = 0
-  app_id_ = ""
-
-  def __init__(self, contents=None):
-    self.index_spec_ = IndexSpec()
-    if contents is not None: self.MergeFromString(contents)
-
-  def index_spec(self): return self.index_spec_
-
-  def mutable_index_spec(self): self.has_index_spec_ = 1; return self.index_spec_
-
-  def clear_index_spec(self):self.has_index_spec_ = 0; self.index_spec_.Clear()
-
-  def has_index_spec(self): return self.has_index_spec_
-
-  def app_id(self): return self.app_id_
-
-  def set_app_id(self, x):
-    self.has_app_id_ = 1
-    self.app_id_ = x
-
-  def clear_app_id(self):
-    if self.has_app_id_:
-      self.has_app_id_ = 0
-      self.app_id_ = ""
-
-  def has_app_id(self): return self.has_app_id_
-
-
-  def MergeFrom(self, x):
-    assert x is not self
-    if (x.has_index_spec()): self.mutable_index_spec().MergeFrom(x.index_spec())
-    if (x.has_app_id()): self.set_app_id(x.app_id())
-
-  def Equals(self, x):
-    if x is self: return 1
-    if self.has_index_spec_ != x.has_index_spec_: return 0
-    if self.has_index_spec_ and self.index_spec_ != x.index_spec_: return 0
-    if self.has_app_id_ != x.has_app_id_: return 0
-    if self.has_app_id_ and self.app_id_ != x.app_id_: return 0
-    return 1
-
-  def IsInitialized(self, debug_strs=None):
-    initialized = 1
-    if (not self.has_index_spec_):
-      initialized = 0
-      if debug_strs is not None:
-        debug_strs.append('Required field: index_spec not set.')
-    elif not self.index_spec_.IsInitialized(debug_strs): initialized = 0
-    return initialized
-
-  def ByteSize(self):
-    n = 0
-    n += self.lengthString(self.index_spec_.ByteSize())
-    if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
-    return n + 1
-
-  def ByteSizePartial(self):
-    n = 0
-    if (self.has_index_spec_):
-      n += 1
-      n += self.lengthString(self.index_spec_.ByteSizePartial())
-    if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
-    return n
-
-  def Clear(self):
-    self.clear_index_spec()
-    self.clear_app_id()
-
-  def OutputUnchecked(self, out):
-    out.putVarInt32(10)
-    out.putVarInt32(self.index_spec_.ByteSize())
-    self.index_spec_.OutputUnchecked(out)
-    if (self.has_app_id_):
-      out.putVarInt32(26)
-      out.putPrefixedString(self.app_id_)
-
-  def OutputPartial(self, out):
-    if (self.has_index_spec_):
-      out.putVarInt32(10)
-      out.putVarInt32(self.index_spec_.ByteSizePartial())
-      self.index_spec_.OutputPartial(out)
-    if (self.has_app_id_):
-      out.putVarInt32(26)
-      out.putPrefixedString(self.app_id_)
-
-  def TryMerge(self, d):
-    while d.avail() > 0:
-      tt = d.getVarInt32()
-      if tt == 10:
-        length = d.getVarInt32()
-        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
-        d.skip(length)
-        self.mutable_index_spec().TryMerge(tmp)
-        continue
-      if tt == 26:
-        self.set_app_id(d.getPrefixedString())
-        continue
-
-
-      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
-      d.skipData(tt)
-
-
-  def __str__(self, prefix="", printElemNumber=0):
-    res=""
-    if self.has_index_spec_:
-      res+=prefix+"index_spec <\n"
-      res+=self.index_spec_.__str__(prefix + "  ", printElemNumber)
-      res+=prefix+">\n"
-    if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_))
-    return res
-
-
-  def _BuildTagLookupTable(sparse, maxtag, default=None):
-    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
-
-  kindex_spec = 1
-  kapp_id = 3
-
-  _TEXT = _BuildTagLookupTable({
-    0: "ErrorCode",
-    1: "index_spec",
-    3: "app_id",
-  }, 3)
-
-  _TYPES = _BuildTagLookupTable({
-    0: ProtocolBuffer.Encoder.NUMERIC,
-    1: ProtocolBuffer.Encoder.STRING,
-    3: ProtocolBuffer.Encoder.STRING,
-  }, 3, ProtocolBuffer.Encoder.MAX_TYPE)
-
-
-  _STYLE = """"""
-  _STYLE_CONTENT_TYPE = """"""
-class CreateIndexResponse(ProtocolBuffer.ProtocolMessage):
-  has_status_ = 0
-  has_index_spec_ = 0
-  index_spec_ = None
-
-  def __init__(self, contents=None):
-    self.status_ = RequestStatus()
-    self.lazy_init_lock_ = thread.allocate_lock()
-    if contents is not None: self.MergeFromString(contents)
-
-  def status(self): return self.status_
-
-  def mutable_status(self): self.has_status_ = 1; return self.status_
-
-  def clear_status(self):self.has_status_ = 0; self.status_.Clear()
-
-  def has_status(self): return self.has_status_
-
-  def index_spec(self):
-    if self.index_spec_ is None:
-      self.lazy_init_lock_.acquire()
-      try:
-        if self.index_spec_ is None: self.index_spec_ = IndexSpec()
-      finally:
-        self.lazy_init_lock_.release()
-    return self.index_spec_
-
-  def mutable_index_spec(self): self.has_index_spec_ = 1; return self.index_spec()
-
-  def clear_index_spec(self):
-
-    if self.has_index_spec_:
-      self.has_index_spec_ = 0;
-      if self.index_spec_ is not None: self.index_spec_.Clear()
-
-  def has_index_spec(self): return self.has_index_spec_
-
-
-  def MergeFrom(self, x):
-    assert x is not self
-    if (x.has_status()): self.mutable_status().MergeFrom(x.status())
-    if (x.has_index_spec()): self.mutable_index_spec().MergeFrom(x.index_spec())
-
-  def Equals(self, x):
-    if x is self: return 1
-    if self.has_status_ != x.has_status_: return 0
-    if self.has_status_ and self.status_ != x.status_: return 0
-    if self.has_index_spec_ != x.has_index_spec_: return 0
-    if self.has_index_spec_ and self.index_spec_ != x.index_spec_: return 0
-    return 1
-
-  def IsInitialized(self, debug_strs=None):
-    initialized = 1
-    if (not self.has_status_):
-      initialized = 0
-      if debug_strs is not None:
-        debug_strs.append('Required field: status not set.')
-    elif not self.status_.IsInitialized(debug_strs): initialized = 0
-    if (self.has_index_spec_ and not self.index_spec_.IsInitialized(debug_strs)): initialized = 0
-    return initialized
-
-  def ByteSize(self):
-    n = 0
-    n += self.lengthString(self.status_.ByteSize())
-    if (self.has_index_spec_): n += 1 + self.lengthString(self.index_spec_.ByteSize())
-    return n + 1
-
-  def ByteSizePartial(self):
-    n = 0
-    if (self.has_status_):
-      n += 1
-      n += self.lengthString(self.status_.ByteSizePartial())
-    if (self.has_index_spec_): n += 1 + self.lengthString(self.index_spec_.ByteSizePartial())
-    return n
-
-  def Clear(self):
-    self.clear_status()
-    self.clear_index_spec()
-
-  def OutputUnchecked(self, out):
-    out.putVarInt32(10)
-    out.putVarInt32(self.status_.ByteSize())
-    self.status_.OutputUnchecked(out)
-    if (self.has_index_spec_):
-      out.putVarInt32(18)
-      out.putVarInt32(self.index_spec_.ByteSize())
-      self.index_spec_.OutputUnchecked(out)
-
-  def OutputPartial(self, out):
-    if (self.has_status_):
-      out.putVarInt32(10)
-      out.putVarInt32(self.status_.ByteSizePartial())
-      self.status_.OutputPartial(out)
-    if (self.has_index_spec_):
-      out.putVarInt32(18)
-      out.putVarInt32(self.index_spec_.ByteSizePartial())
-      self.index_spec_.OutputPartial(out)
-
-  def TryMerge(self, d):
-    while d.avail() > 0:
-      tt = d.getVarInt32()
-      if tt == 10:
-        length = d.getVarInt32()
-        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
-        d.skip(length)
-        self.mutable_status().TryMerge(tmp)
-        continue
-      if tt == 18:
-        length = d.getVarInt32()
-        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
-        d.skip(length)
-        self.mutable_index_spec().TryMerge(tmp)
-        continue
-
-
-      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
-      d.skipData(tt)
-
-
-  def __str__(self, prefix="", printElemNumber=0):
-    res=""
-    if self.has_status_:
-      res+=prefix+"status <\n"
-      res+=self.status_.__str__(prefix + "  ", printElemNumber)
-      res+=prefix+">\n"
-    if self.has_index_spec_:
-      res+=prefix+"index_spec <\n"
-      res+=self.index_spec_.__str__(prefix + "  ", printElemNumber)
-      res+=prefix+">\n"
-    return res
-
-
-  def _BuildTagLookupTable(sparse, maxtag, default=None):
-    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
-
-  kstatus = 1
-  kindex_spec = 2
-
-  _TEXT = _BuildTagLookupTable({
-    0: "ErrorCode",
-    1: "status",
-    2: "index_spec",
-  }, 2)
-
-  _TYPES = _BuildTagLookupTable({
-    0: ProtocolBuffer.Encoder.NUMERIC,
-    1: ProtocolBuffer.Encoder.STRING,
-    2: ProtocolBuffer.Encoder.STRING,
-  }, 2, ProtocolBuffer.Encoder.MAX_TYPE)
-
-
-  _STYLE = """"""
-  _STYLE_CONTENT_TYPE = """"""
 class ListDocumentsParams(ProtocolBuffer.ProtocolMessage):
-
-
-  FULL_DOCUMENTS =    0
-  KEYS_ONLY    =    1
-
-  _ListMode_NAMES = {
-    0: "FULL_DOCUMENTS",
-    1: "KEYS_ONLY",
-  }
-
-  def ListMode_Name(cls, x): return cls._ListMode_NAMES.get(x, "")
-  ListMode_Name = classmethod(ListMode_Name)
-
   has_index_spec_ = 0
   has_start_doc_id_ = 0
   start_doc_id_ = ""
@@ -1768,8 +1465,8 @@
   include_start_doc_ = 1
   has_limit_ = 0
   limit_ = 100
-  has_mode_ = 0
-  mode_ = 0
+  has_keys_only_ = 0
+  keys_only_ = 0
 
   def __init__(self, contents=None):
     self.index_spec_ = IndexSpec()
@@ -1822,18 +1519,18 @@
 
   def has_limit(self): return self.has_limit_
 
-  def mode(self): return self.mode_
+  def keys_only(self): return self.keys_only_
 
-  def set_mode(self, x):
-    self.has_mode_ = 1
-    self.mode_ = x
+  def set_keys_only(self, x):
+    self.has_keys_only_ = 1
+    self.keys_only_ = x
 
-  def clear_mode(self):
-    if self.has_mode_:
-      self.has_mode_ = 0
-      self.mode_ = 0
+  def clear_keys_only(self):
+    if self.has_keys_only_:
+      self.has_keys_only_ = 0
+      self.keys_only_ = 0
 
-  def has_mode(self): return self.has_mode_
+  def has_keys_only(self): return self.has_keys_only_
 
 
   def MergeFrom(self, x):
@@ -1842,7 +1539,7 @@
     if (x.has_start_doc_id()): self.set_start_doc_id(x.start_doc_id())
     if (x.has_include_start_doc()): self.set_include_start_doc(x.include_start_doc())
     if (x.has_limit()): self.set_limit(x.limit())
-    if (x.has_mode()): self.set_mode(x.mode())
+    if (x.has_keys_only()): self.set_keys_only(x.keys_only())
 
   def Equals(self, x):
     if x is self: return 1
@@ -1854,8 +1551,8 @@
     if self.has_include_start_doc_ and self.include_start_doc_ != x.include_start_doc_: return 0
     if self.has_limit_ != x.has_limit_: return 0
     if self.has_limit_ and self.limit_ != x.limit_: return 0
-    if self.has_mode_ != x.has_mode_: return 0
-    if self.has_mode_ and self.mode_ != x.mode_: return 0
+    if self.has_keys_only_ != x.has_keys_only_: return 0
+    if self.has_keys_only_ and self.keys_only_ != x.keys_only_: return 0
     return 1
 
   def IsInitialized(self, debug_strs=None):
@@ -1873,7 +1570,7 @@
     if (self.has_start_doc_id_): n += 1 + self.lengthString(len(self.start_doc_id_))
     if (self.has_include_start_doc_): n += 2
     if (self.has_limit_): n += 1 + self.lengthVarInt64(self.limit_)
-    if (self.has_mode_): n += 1 + self.lengthVarInt64(self.mode_)
+    if (self.has_keys_only_): n += 2
     return n + 1
 
   def ByteSizePartial(self):
@@ -1884,7 +1581,7 @@
     if (self.has_start_doc_id_): n += 1 + self.lengthString(len(self.start_doc_id_))
     if (self.has_include_start_doc_): n += 2
     if (self.has_limit_): n += 1 + self.lengthVarInt64(self.limit_)
-    if (self.has_mode_): n += 1 + self.lengthVarInt64(self.mode_)
+    if (self.has_keys_only_): n += 2
     return n
 
   def Clear(self):
@@ -1892,7 +1589,7 @@
     self.clear_start_doc_id()
     self.clear_include_start_doc()
     self.clear_limit()
-    self.clear_mode()
+    self.clear_keys_only()
 
   def OutputUnchecked(self, out):
     out.putVarInt32(10)
@@ -1907,9 +1604,9 @@
     if (self.has_limit_):
       out.putVarInt32(32)
       out.putVarInt32(self.limit_)
-    if (self.has_mode_):
+    if (self.has_keys_only_):
       out.putVarInt32(40)
-      out.putVarInt32(self.mode_)
+      out.putBoolean(self.keys_only_)
 
   def OutputPartial(self, out):
     if (self.has_index_spec_):
@@ -1925,9 +1622,9 @@
     if (self.has_limit_):
       out.putVarInt32(32)
       out.putVarInt32(self.limit_)
-    if (self.has_mode_):
+    if (self.has_keys_only_):
       out.putVarInt32(40)
-      out.putVarInt32(self.mode_)
+      out.putBoolean(self.keys_only_)
 
   def TryMerge(self, d):
     while d.avail() > 0:
@@ -1948,7 +1645,7 @@
         self.set_limit(d.getVarInt32())
         continue
       if tt == 40:
-        self.set_mode(d.getVarInt32())
+        self.set_keys_only(d.getBoolean())
         continue
 
 
@@ -1965,7 +1662,7 @@
     if self.has_start_doc_id_: res+=prefix+("start_doc_id: %s\n" % self.DebugFormatString(self.start_doc_id_))
     if self.has_include_start_doc_: res+=prefix+("include_start_doc: %s\n" % self.DebugFormatBool(self.include_start_doc_))
     if self.has_limit_: res+=prefix+("limit: %s\n" % self.DebugFormatInt32(self.limit_))
-    if self.has_mode_: res+=prefix+("mode: %s\n" % self.DebugFormatInt32(self.mode_))
+    if self.has_keys_only_: res+=prefix+("keys_only: %s\n" % self.DebugFormatBool(self.keys_only_))
     return res
 
 
@@ -1976,7 +1673,7 @@
   kstart_doc_id = 2
   kinclude_start_doc = 3
   klimit = 4
-  kmode = 5
+  kkeys_only = 5
 
   _TEXT = _BuildTagLookupTable({
     0: "ErrorCode",
@@ -1984,7 +1681,7 @@
     2: "start_doc_id",
     3: "include_start_doc",
     4: "limit",
-    5: "mode",
+    5: "keys_only",
   }, 5)
 
   _TYPES = _BuildTagLookupTable({
@@ -2296,7 +1993,7 @@
   has_fetch_schema_ = 0
   fetch_schema_ = 0
   has_limit_ = 0
-  limit_ = 100
+  limit_ = 20
   has_namespace_ = 0
   namespace_ = ""
   has_start_index_name_ = 0
@@ -2333,7 +2030,7 @@
   def clear_limit(self):
     if self.has_limit_:
       self.has_limit_ = 0
-      self.limit_ = 100
+      self.limit_ = 20
 
   def has_limit(self): return self.has_limit_
 
@@ -2887,7 +2584,7 @@
   has_sort_expression_ = 0
   sort_expression_ = ""
   has_sort_descending_ = 0
-  sort_descending_ = 0
+  sort_descending_ = 1
   has_default_value_text_ = 0
   default_value_text_ = ""
   has_default_value_numeric_ = 0
@@ -2918,7 +2615,7 @@
   def clear_sort_descending(self):
     if self.has_sort_descending_:
       self.has_sort_descending_ = 0
-      self.sort_descending_ = 0
+      self.sort_descending_ = 1
 
   def has_sort_descending(self): return self.has_sort_descending_
 
@@ -2974,26 +2671,22 @@
       initialized = 0
       if debug_strs is not None:
         debug_strs.append('Required field: sort_expression not set.')
-    if (not self.has_sort_descending_):
-      initialized = 0
-      if debug_strs is not None:
-        debug_strs.append('Required field: sort_descending not set.')
     return initialized
 
   def ByteSize(self):
     n = 0
     n += self.lengthString(len(self.sort_expression_))
+    if (self.has_sort_descending_): n += 2
     if (self.has_default_value_text_): n += 1 + self.lengthString(len(self.default_value_text_))
     if (self.has_default_value_numeric_): n += 9
-    return n + 3
+    return n + 1
 
   def ByteSizePartial(self):
     n = 0
     if (self.has_sort_expression_):
       n += 1
       n += self.lengthString(len(self.sort_expression_))
-    if (self.has_sort_descending_):
-      n += 2
+    if (self.has_sort_descending_): n += 2
     if (self.has_default_value_text_): n += 1 + self.lengthString(len(self.default_value_text_))
     if (self.has_default_value_numeric_): n += 9
     return n
@@ -3007,8 +2700,9 @@
   def OutputUnchecked(self, out):
     out.putVarInt32(10)
     out.putPrefixedString(self.sort_expression_)
-    out.putVarInt32(16)
-    out.putBoolean(self.sort_descending_)
+    if (self.has_sort_descending_):
+      out.putVarInt32(16)
+      out.putBoolean(self.sort_descending_)
     if (self.has_default_value_text_):
       out.putVarInt32(34)
       out.putPrefixedString(self.default_value_text_)
@@ -3091,15 +2785,11 @@
 
 
   GENERIC      =    0
-  HIT_COUNT    =    1
-  TIME_STAMP   =    2
-  MATCH_SCORER =    3
+  MATCH_SCORER =    2
 
   _Scorer_NAMES = {
     0: "GENERIC",
-    1: "HIT_COUNT",
-    2: "TIME_STAMP",
-    3: "MATCH_SCORER",
+    2: "MATCH_SCORER",
   }
 
   def Scorer_Name(cls, x): return cls._Scorer_NAMES.get(x, "")
@@ -3108,9 +2798,9 @@
   has_scorer_ = 0
   scorer_ = 0
   has_limit_ = 0
-  limit_ = 0
-  has_escorer_parameters_ = 0
-  escorer_parameters_ = ""
+  limit_ = 1000
+  has_match_scorer_parameters_ = 0
+  match_scorer_parameters_ = ""
 
   def __init__(self, contents=None):
     if contents is not None: self.MergeFromString(contents)
@@ -3137,29 +2827,29 @@
   def clear_limit(self):
     if self.has_limit_:
       self.has_limit_ = 0
-      self.limit_ = 0
+      self.limit_ = 1000
 
   def has_limit(self): return self.has_limit_
 
-  def escorer_parameters(self): return self.escorer_parameters_
+  def match_scorer_parameters(self): return self.match_scorer_parameters_
 
-  def set_escorer_parameters(self, x):
-    self.has_escorer_parameters_ = 1
-    self.escorer_parameters_ = x
+  def set_match_scorer_parameters(self, x):
+    self.has_match_scorer_parameters_ = 1
+    self.match_scorer_parameters_ = x
 
-  def clear_escorer_parameters(self):
-    if self.has_escorer_parameters_:
-      self.has_escorer_parameters_ = 0
-      self.escorer_parameters_ = ""
+  def clear_match_scorer_parameters(self):
+    if self.has_match_scorer_parameters_:
+      self.has_match_scorer_parameters_ = 0
+      self.match_scorer_parameters_ = ""
 
-  def has_escorer_parameters(self): return self.has_escorer_parameters_
+  def has_match_scorer_parameters(self): return self.has_match_scorer_parameters_
 
 
   def MergeFrom(self, x):
     assert x is not self
     if (x.has_scorer()): self.set_scorer(x.scorer())
     if (x.has_limit()): self.set_limit(x.limit())
-    if (x.has_escorer_parameters()): self.set_escorer_parameters(x.escorer_parameters())
+    if (x.has_match_scorer_parameters()): self.set_match_scorer_parameters(x.match_scorer_parameters())
 
   def Equals(self, x):
     if x is self: return 1
@@ -3167,8 +2857,8 @@
     if self.has_scorer_ and self.scorer_ != x.scorer_: return 0
     if self.has_limit_ != x.has_limit_: return 0
     if self.has_limit_ and self.limit_ != x.limit_: return 0
-    if self.has_escorer_parameters_ != x.has_escorer_parameters_: return 0
-    if self.has_escorer_parameters_ and self.escorer_parameters_ != x.escorer_parameters_: return 0
+    if self.has_match_scorer_parameters_ != x.has_match_scorer_parameters_: return 0
+    if self.has_match_scorer_parameters_ and self.match_scorer_parameters_ != x.match_scorer_parameters_: return 0
     return 1
 
   def IsInitialized(self, debug_strs=None):
@@ -3179,20 +2869,20 @@
     n = 0
     if (self.has_scorer_): n += 1 + self.lengthVarInt64(self.scorer_)
     if (self.has_limit_): n += 1 + self.lengthVarInt64(self.limit_)
-    if (self.has_escorer_parameters_): n += 1 + self.lengthString(len(self.escorer_parameters_))
+    if (self.has_match_scorer_parameters_): n += 1 + self.lengthString(len(self.match_scorer_parameters_))
     return n
 
   def ByteSizePartial(self):
     n = 0
     if (self.has_scorer_): n += 1 + self.lengthVarInt64(self.scorer_)
     if (self.has_limit_): n += 1 + self.lengthVarInt64(self.limit_)
-    if (self.has_escorer_parameters_): n += 1 + self.lengthString(len(self.escorer_parameters_))
+    if (self.has_match_scorer_parameters_): n += 1 + self.lengthString(len(self.match_scorer_parameters_))
     return n
 
   def Clear(self):
     self.clear_scorer()
     self.clear_limit()
-    self.clear_escorer_parameters()
+    self.clear_match_scorer_parameters()
 
   def OutputUnchecked(self, out):
     if (self.has_scorer_):
@@ -3201,9 +2891,9 @@
     if (self.has_limit_):
       out.putVarInt32(16)
       out.putVarInt32(self.limit_)
-    if (self.has_escorer_parameters_):
+    if (self.has_match_scorer_parameters_):
       out.putVarInt32(74)
-      out.putPrefixedString(self.escorer_parameters_)
+      out.putPrefixedString(self.match_scorer_parameters_)
 
   def OutputPartial(self, out):
     if (self.has_scorer_):
@@ -3212,9 +2902,9 @@
     if (self.has_limit_):
       out.putVarInt32(16)
       out.putVarInt32(self.limit_)
-    if (self.has_escorer_parameters_):
+    if (self.has_match_scorer_parameters_):
       out.putVarInt32(74)
-      out.putPrefixedString(self.escorer_parameters_)
+      out.putPrefixedString(self.match_scorer_parameters_)
 
   def TryMerge(self, d):
     while d.avail() > 0:
@@ -3226,7 +2916,7 @@
         self.set_limit(d.getVarInt32())
         continue
       if tt == 74:
-        self.set_escorer_parameters(d.getPrefixedString())
+        self.set_match_scorer_parameters(d.getPrefixedString())
         continue
 
 
@@ -3238,7 +2928,7 @@
     res=""
     if self.has_scorer_: res+=prefix+("scorer: %s\n" % self.DebugFormatInt32(self.scorer_))
     if self.has_limit_: res+=prefix+("limit: %s\n" % self.DebugFormatInt32(self.limit_))
-    if self.has_escorer_parameters_: res+=prefix+("escorer_parameters: %s\n" % self.DebugFormatString(self.escorer_parameters_))
+    if self.has_match_scorer_parameters_: res+=prefix+("match_scorer_parameters: %s\n" % self.DebugFormatString(self.match_scorer_parameters_))
     return res
 
 
@@ -3247,13 +2937,13 @@
 
   kscorer = 1
   klimit = 2
-  kescorer_parameters = 9
+  kmatch_scorer_parameters = 9
 
   _TEXT = _BuildTagLookupTable({
     0: "ErrorCode",
     1: "scorer",
     2: "limit",
-    9: "escorer_parameters",
+    9: "match_scorer_parameters",
   }, 9)
 
   _TYPES = _BuildTagLookupTable({
@@ -3386,24 +3076,24 @@
 class FieldSpec(ProtocolBuffer.ProtocolMessage):
 
   def __init__(self, contents=None):
-    self.field_name_ = []
+    self.name_ = []
     self.expression_ = []
     if contents is not None: self.MergeFromString(contents)
 
-  def field_name_size(self): return len(self.field_name_)
-  def field_name_list(self): return self.field_name_
+  def name_size(self): return len(self.name_)
+  def name_list(self): return self.name_
 
-  def field_name(self, i):
-    return self.field_name_[i]
+  def name(self, i):
+    return self.name_[i]
 
-  def set_field_name(self, i, x):
-    self.field_name_[i] = x
+  def set_name(self, i, x):
+    self.name_[i] = x
 
-  def add_field_name(self, x):
-    self.field_name_.append(x)
+  def add_name(self, x):
+    self.name_.append(x)
 
-  def clear_field_name(self):
-    self.field_name_ = []
+  def clear_name(self):
+    self.name_ = []
 
   def expression_size(self): return len(self.expression_)
   def expression_list(self): return self.expression_
@@ -3424,13 +3114,13 @@
 
   def MergeFrom(self, x):
     assert x is not self
-    for i in xrange(x.field_name_size()): self.add_field_name(x.field_name(i))
+    for i in xrange(x.name_size()): self.add_name(x.name(i))
     for i in xrange(x.expression_size()): self.add_expression().CopyFrom(x.expression(i))
 
   def Equals(self, x):
     if x is self: return 1
-    if len(self.field_name_) != len(x.field_name_): return 0
-    for e1, e2 in zip(self.field_name_, x.field_name_):
+    if len(self.name_) != len(x.name_): return 0
+    for e1, e2 in zip(self.name_, x.name_):
       if e1 != e2: return 0
     if len(self.expression_) != len(x.expression_): return 0
     for e1, e2 in zip(self.expression_, x.expression_):
@@ -3445,37 +3135,37 @@
 
   def ByteSize(self):
     n = 0
-    n += 1 * len(self.field_name_)
-    for i in xrange(len(self.field_name_)): n += self.lengthString(len(self.field_name_[i]))
+    n += 1 * len(self.name_)
+    for i in xrange(len(self.name_)): n += self.lengthString(len(self.name_[i]))
     n += 2 * len(self.expression_)
     for i in xrange(len(self.expression_)): n += self.expression_[i].ByteSize()
     return n
 
   def ByteSizePartial(self):
     n = 0
-    n += 1 * len(self.field_name_)
-    for i in xrange(len(self.field_name_)): n += self.lengthString(len(self.field_name_[i]))
+    n += 1 * len(self.name_)
+    for i in xrange(len(self.name_)): n += self.lengthString(len(self.name_[i]))
     n += 2 * len(self.expression_)
     for i in xrange(len(self.expression_)): n += self.expression_[i].ByteSizePartial()
     return n
 
   def Clear(self):
-    self.clear_field_name()
+    self.clear_name()
     self.clear_expression()
 
   def OutputUnchecked(self, out):
-    for i in xrange(len(self.field_name_)):
+    for i in xrange(len(self.name_)):
       out.putVarInt32(10)
-      out.putPrefixedString(self.field_name_[i])
+      out.putPrefixedString(self.name_[i])
     for i in xrange(len(self.expression_)):
       out.putVarInt32(19)
       self.expression_[i].OutputUnchecked(out)
       out.putVarInt32(20)
 
   def OutputPartial(self, out):
-    for i in xrange(len(self.field_name_)):
+    for i in xrange(len(self.name_)):
       out.putVarInt32(10)
-      out.putPrefixedString(self.field_name_[i])
+      out.putPrefixedString(self.name_[i])
     for i in xrange(len(self.expression_)):
       out.putVarInt32(19)
       self.expression_[i].OutputPartial(out)
@@ -3485,7 +3175,7 @@
     while d.avail() > 0:
       tt = d.getVarInt32()
       if tt == 10:
-        self.add_field_name(d.getPrefixedString())
+        self.add_name(d.getPrefixedString())
         continue
       if tt == 19:
         self.add_expression().TryMerge(d)
@@ -3499,10 +3189,10 @@
   def __str__(self, prefix="", printElemNumber=0):
     res=""
     cnt=0
-    for e in self.field_name_:
+    for e in self.name_:
       elm=""
       if printElemNumber: elm="(%d)" % cnt
-      res+=prefix+("field_name%s: %s\n" % (elm, self.DebugFormatString(e)))
+      res+=prefix+("name%s: %s\n" % (elm, self.DebugFormatString(e)))
       cnt+=1
     cnt=0
     for e in self.expression_:
@@ -3518,14 +3208,14 @@
   def _BuildTagLookupTable(sparse, maxtag, default=None):
     return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
 
-  kfield_name = 1
+  kname = 1
   kExpressionGroup = 2
   kExpressionname = 3
   kExpressionexpression = 4
 
   _TEXT = _BuildTagLookupTable({
     0: "ErrorCode",
-    1: "field_name",
+    1: "name",
     2: "Expression",
     3: "name",
     4: "expression",
@@ -3568,9 +3258,9 @@
   has_cursor_type_ = 0
   cursor_type_ = 0
   has_limit_ = 0
-  limit_ = 0
+  limit_ = 20
   has_matched_count_accuracy_ = 0
-  matched_count_accuracy_ = 0
+  matched_count_accuracy_ = 100
   has_scorer_spec_ = 0
   scorer_spec_ = None
   has_field_spec_ = 0
@@ -3651,7 +3341,7 @@
   def clear_limit(self):
     if self.has_limit_:
       self.has_limit_ = 0
-      self.limit_ = 0
+      self.limit_ = 20
 
   def has_limit(self): return self.has_limit_
 
@@ -3664,7 +3354,7 @@
   def clear_matched_count_accuracy(self):
     if self.has_matched_count_accuracy_:
       self.has_matched_count_accuracy_ = 0
-      self.matched_count_accuracy_ = 0
+      self.matched_count_accuracy_ = 100
 
   def has_matched_count_accuracy(self): return self.has_matched_count_accuracy_
 
@@ -3772,10 +3462,6 @@
       initialized = 0
       if debug_strs is not None:
         debug_strs.append('Required field: query not set.')
-    if (not self.has_limit_):
-      initialized = 0
-      if debug_strs is not None:
-        debug_strs.append('Required field: limit not set.')
     for p in self.sort_spec_:
       if not p.IsInitialized(debug_strs): initialized=0
     if (self.has_scorer_spec_ and not self.scorer_spec_.IsInitialized(debug_strs)): initialized = 0
@@ -3789,13 +3475,13 @@
     if (self.has_cursor_): n += 1 + self.lengthString(len(self.cursor_))
     if (self.has_offset_): n += 1 + self.lengthVarInt64(self.offset_)
     if (self.has_cursor_type_): n += 1 + self.lengthVarInt64(self.cursor_type_)
-    n += self.lengthVarInt64(self.limit_)
+    if (self.has_limit_): n += 1 + self.lengthVarInt64(self.limit_)
     if (self.has_matched_count_accuracy_): n += 1 + self.lengthVarInt64(self.matched_count_accuracy_)
     n += 1 * len(self.sort_spec_)
     for i in xrange(len(self.sort_spec_)): n += self.lengthString(self.sort_spec_[i].ByteSize())
     if (self.has_scorer_spec_): n += 1 + self.lengthString(self.scorer_spec_.ByteSize())
     if (self.has_field_spec_): n += 1 + self.lengthString(self.field_spec_.ByteSize())
-    return n + 3
+    return n + 2
 
   def ByteSizePartial(self):
     n = 0
@@ -3808,9 +3494,7 @@
     if (self.has_cursor_): n += 1 + self.lengthString(len(self.cursor_))
     if (self.has_offset_): n += 1 + self.lengthVarInt64(self.offset_)
     if (self.has_cursor_type_): n += 1 + self.lengthVarInt64(self.cursor_type_)
-    if (self.has_limit_):
-      n += 1
-      n += self.lengthVarInt64(self.limit_)
+    if (self.has_limit_): n += 1 + self.lengthVarInt64(self.limit_)
     if (self.has_matched_count_accuracy_): n += 1 + self.lengthVarInt64(self.matched_count_accuracy_)
     n += 1 * len(self.sort_spec_)
     for i in xrange(len(self.sort_spec_)): n += self.lengthString(self.sort_spec_[i].ByteSizePartial())
@@ -3842,8 +3526,9 @@
     if (self.has_cursor_type_):
       out.putVarInt32(40)
       out.putVarInt32(self.cursor_type_)
-    out.putVarInt32(48)
-    out.putVarInt32(self.limit_)
+    if (self.has_limit_):
+      out.putVarInt32(48)
+      out.putVarInt32(self.limit_)
     if (self.has_matched_count_accuracy_):
       out.putVarInt32(56)
       out.putVarInt32(self.matched_count_accuracy_)
@@ -4624,4 +4309,4 @@
   _STYLE = """"""
   _STYLE_CONTENT_TYPE = """"""
 
-__all__ = ['SearchServiceError','RequestStatus','IndexSpec','IndexMetadata','IndexDocumentParams','IndexDocumentRequest','IndexDocumentResponse','DeleteDocumentParams','DeleteDocumentRequest','DeleteDocumentResponse','CreateIndexRequest','CreateIndexResponse','ListDocumentsParams','ListDocumentsRequest','ListDocumentsResponse','ListIndexesParams','ListIndexesRequest','ListIndexesResponse','SortSpec','ScorerSpec','FieldSpec','FieldSpec_Expression','SearchParams','SearchRequest','SearchResult','SearchResponse']
+__all__ = ['SearchServiceError','RequestStatus','IndexSpec','IndexMetadata','IndexDocumentParams','IndexDocumentRequest','IndexDocumentResponse','DeleteDocumentParams','DeleteDocumentRequest','DeleteDocumentResponse','ListDocumentsParams','ListDocumentsRequest','ListDocumentsResponse','ListIndexesParams','ListIndexesRequest','ListIndexesResponse','SortSpec','ScorerSpec','FieldSpec','FieldSpec_Expression','SearchParams','SearchRequest','SearchResult','SearchResponse']
diff --git a/google/appengine/api/search/simple_search_stub.py b/google/appengine/api/search/simple_search_stub.py
index f7c9452..444e943 100644
--- a/google/appengine/api/search/simple_search_stub.py
+++ b/google/appengine/api/search/simple_search_stub.py
@@ -29,21 +29,32 @@
 
 
 
+import bisect
+import copy
 import random
 import string
 import urllib
 
+from whoosh import analysis
+
 from google.appengine.datastore import document_pb
 from google.appengine.api import apiproxy_stub
-from google.appengine.api.search import search_api
+from google.appengine.api.search import query_parser
+from google.appengine.api.search import QueryParser
+from google.appengine.api.search import search
 from google.appengine.api.search import search_service_pb
 from google.appengine.runtime import apiproxy_errors
 
 __all__ = ['IndexConsistencyError',
+           'Number',
+           'Posting',
+           'PostingList',
+           'Quote',
+           'RamInvertedIndex',
            'SearchServiceStub',
            'SimpleIndex',
-           'RamInvertedIndex',
-           'SimpleTokenizer'
+           'Token',
+           'WhooshTokenizer',
           ]
 
 
@@ -53,27 +64,219 @@
 
 def _Repr(class_instance, ordered_dictionary):
   """Generates an unambiguous representation for instance and ordered dict."""
-  return 'search_api.%s(%s)' % (class_instance.__class__.__name__, ', '.join(
+  return 'search.%s(%s)' % (class_instance.__class__.__name__, ', '.join(
       ["%s='%s'" % (key, value) for (key, value) in ordered_dictionary
        if value is not None and value != []]))
 
 
-class SimpleTokenizer(object):
-  """A simple tokenizer that breaks up string on white space characters."""
+class Token(object):
+  """Represents a token, usually a word, extracted from some document field."""
+
+  _CONSTRUCTOR_KWARGS = frozenset(['chars', 'position', 'field_name'])
+
+  def __init__(self, **kwargs):
+    """Initializer.
+
+    Args:
+      chars: The string representation of the token.
+      position: The position of the token in the sequence from the document
+        field.
+      field_name: The name of the field the token occured in.
+
+    Raises:
+      TypeError: If an unknown argument is passed.
+    """
+    args_diff = set(kwargs.iterkeys()) - self._CONSTRUCTOR_KWARGS
+    if args_diff:
+      raise TypeError('Invalid arguments: %s' % ', '.join(args_diff))
+
+    self._chars = kwargs.get('chars')
+    self._position = kwargs.get('position')
+    self._field_name = kwargs.get('field_name')
+
+  @property
+  def chars(self):
+    """Returns a list of fields of the document."""
+    if self._field_name:
+      return self._field_name + ':' + str(self._chars)
+    return str(self._chars)
+
+  @property
+  def position(self):
+    """Returns a list of fields of the document."""
+    return self._position
+
+  def RestrictField(self, field_name):
+    """Creates a copy of this Token and sets field_name."""
+    return Token(chars=self.chars, position=self.position,
+                 field_name=field_name)
+
+  def __repr__(self):
+    return _Repr(self, [('chars', self.chars), ('position', self.position)])
+
+  def __eq__(self, other):
+    return self.chars == other.chars
+
+  def __hash__(self):
+    return hash(self.chars)
+
+
+class Quote(Token):
+  """Represents a single or double quote in a document field or query."""
+
+  def __init__(self, **kwargs):
+    Token.__init__(self, **kwargs)
+
+
+class Number(Token):
+  """Represents a number in a document field or query."""
+
+  def __init__(self, **kwargs):
+    Token.__init__(self, **kwargs)
+
+
+class Posting(object):
+  """Represents a occurrences of some token at positions in a document."""
+
+  _CONSTRUCTOR_KWARGS = frozenset(['doc_id'])
+
+  def __init__(self, **kwargs):
+    """Initializer.
+
+    Args:
+      doc_id: The identifier of the document with token occurrences.
+
+    Raises:
+      TypeError: If an unknown argument is passed.
+    """
+    args_diff = set(kwargs.iterkeys()) - self._CONSTRUCTOR_KWARGS
+    if args_diff:
+      raise TypeError('Invalid arguments: %s' % ', '.join(args_diff))
+
+    self._doc_id = kwargs.get('doc_id')
+    self._positions = []
+
+  @property
+  def doc_id(self):
+    """Return id of the document that the token occurred in."""
+    return self._doc_id
+
+  def AddPosition(self, position):
+    """Adds the position in token sequence to occurrences for token."""
+    pos = bisect.bisect_left(self._positions, position)
+    if pos < len(self._positions) and self._positions[pos] == position:
+      return
+    self._positions.insert(pos, position)
+
+  def RemovePosition(self, position):
+    """Removes the position in token sequence from occurrences for token."""
+    pos = bisect.bisect_left(self._positions, position)
+    if pos < len(self._positions) and self._positions[pos] == position:
+      del self._positions[pos]
+
+  def __cmp__(self, other):
+    if not isinstance(other, Posting):
+      return -2
+    return cmp(self.doc_id, other.doc_id)
+
+  @property
+  def positions(self):
+    return self._positions
+
+  def __repr__(self):
+    return _Repr(self, [('doc_id', self.doc_id), ('positions', self.positions)])
+
+
+class WhooshTokenizer(object):
+  """A wrapper around whoosh tokenizer pipeline."""
 
   def __init__(self, split_restricts=True):
+    self._tokenizer = analysis.RegexTokenizer() | analysis.LowercaseFilter()
     self._split_restricts = split_restricts
 
-  def Tokenize(self, content):
+  def TokenizeText(self, text, token_position=0):
+    """Tokenizes the text into a sequence of Tokens."""
+    return self._TokenizeForType(field_type=document_pb.FieldValue.TEXT,
+                                value=text, token_position=token_position)
+
+  def TokenizeValue(self, field_value, token_position=0):
+    """Tokenizes a document_pb.FieldValue into a sequence of Tokens."""
+    return self._TokenizeForType(field_type=field_value.type(),
+                                value=field_value.string_value(),
+                                token_position=token_position)
+
+  def _TokenizeForType(self, field_type, value, token_position=0):
+    """Tokenizes value into a sequence of Tokens."""
+    if field_type is document_pb.FieldValue.NUMBER:
+      return [Token(chars=value, position=token_position)]
+
     tokens = []
-    for token in content.lower().split():
+    token_strings = []
+    if not self._split_restricts:
+      token_strings = value.lower().split()
+    else:
+      token_strings = [token.text for token in self._tokenizer(unicode(value))]
+    for token in token_strings:
       if ':' in token and self._split_restricts:
-        tokens.extend(token.split(':'))
+        for subtoken in token.split(':'):
+          tokens.append(Token(chars=subtoken, position=token_position))
+          token_position += 1
+      elif '"' in token:
+        for subtoken in token.split('"'):
+          if not subtoken:
+            tokens.append(Quote(chars='"', position=token_position))
+          else:
+            tokens.append(Token(chars=subtoken, position=token_position))
+          token_position += 1
       else:
-        tokens.append(token)
+        tokens.append(Token(chars=token, position=token_position))
+        token_position += 1
     return tokens
 
 
+class PostingList(object):
+  """Represents ordered positions of some token in document.
+
+  A PostingList consists of a document id and a sequence of positions
+  that the same token occurs in the document.
+  """
+
+  def __init__(self):
+    self._postings = []
+
+  def Add(self, doc_id, position):
+    """Adds the token position for the given doc_id."""
+    posting = Posting(doc_id=doc_id)
+    pos = bisect.bisect_left(self._postings, posting)
+    if pos < len(self._postings) and self._postings[
+        pos].doc_id == posting.doc_id:
+      posting = self._postings[pos]
+    else:
+      self._postings.insert(pos, posting)
+    posting.AddPosition(position)
+
+  def Remove(self, doc_id, position):
+    """Removes the token position for the given doc_id."""
+    posting = Posting(doc_id=doc_id)
+    pos = bisect.bisect_left(self._postings, posting)
+    if pos < len(self._postings) and self._postings[
+        pos].doc_id == posting.doc_id:
+      posting = self._postings[pos]
+      posting.RemovePosition(position)
+      if not posting.positions:
+        del self._postings[pos]
+
+  @property
+  def postings(self):
+    return self._postings
+
+  def __iter__(self):
+    return iter(self._postings)
+
+  def __repr__(self):
+    return _Repr(self, [('postings', self.postings)])
+
+
 class RamInvertedIndex(object):
   """A simple RAM-resident inverted file over documents."""
 
@@ -83,48 +286,49 @@
 
   def AddDocument(self, document):
     """Adds a document into the index."""
-    doc_id = document.doc_id()
+    doc_id = document.id()
+    token_position = 0
     for field in document.field_list():
-      self._AddTokens(doc_id, field.name(), field.value().string_value())
+      self._AddTokens(doc_id, field.name(), field.value(),
+                      token_position)
 
   def RemoveDocument(self, document):
     """Removes a document from the index."""
-    doc_id = document.doc_id()
+    doc_id = document.id()
     for field in document.field_list():
-      self._RemoveTokens(doc_id, field.name(), field.value().string_value())
+      self._RemoveTokens(doc_id, field.name(), field.value())
 
-  def _AddTokens(self, doc_id, field_name, field_value):
+  def _AddTokens(self, doc_id, field_name, field_value, token_position):
     """Adds token occurrences for a given doc's field value."""
-    for token in self._tokenizer.Tokenize(field_value):
+    for token in self._tokenizer.TokenizeValue(field_value, token_position):
       self._AddToken(doc_id, token)
-      self._AddToken(doc_id, field_name + ':' + token)
+      self._AddToken(doc_id, token.RestrictField(field_name))
 
   def _RemoveTokens(self, doc_id, field_name, field_value):
     """Removes tokens occurrences for a given doc's field value."""
-    for token in self._tokenizer.Tokenize(field_value):
+    for token in self._tokenizer.TokenizeValue(field_value=field_value):
       self._RemoveToken(doc_id, token)
-      self._RemoveToken(doc_id, field_name + ':' + token)
+      self._RemoveToken(doc_id, token.RestrictField(field_name))
 
   def _AddToken(self, doc_id, token):
     """Adds a token occurrence for a document."""
-    doc_ids = self._inverted_index.get(token)
-    if doc_ids is None:
-      self._inverted_index[token] = doc_ids = set([])
-    doc_ids.add(doc_id)
+    postings = self._inverted_index.get(token)
+    if postings is None:
+      self._inverted_index[token] = postings = PostingList()
+    postings.Add(doc_id, token.position)
 
   def _RemoveToken(self, doc_id, token):
     """Removes a token occurrence for a document."""
     if token in self._inverted_index:
-      doc_ids = self._inverted_index[token]
-      if doc_id in doc_ids:
-        doc_ids.remove(doc_id)
-        if not doc_ids:
-          del self._inverted_index[token]
+      postings = self._inverted_index[token]
+      postings.Remove(doc_id, token.position)
+      if not postings.postings:
+        del self._inverted_index[token]
 
-  def GetDocsForToken(self, token):
-    """Returns all documents which contain the token."""
+  def GetPostingsForToken(self, token):
+    """Returns all document postings which for the token."""
     if token in self._inverted_index:
-      return self._inverted_index[token]
+      return self._inverted_index[token].postings
     return []
 
   def __repr__(self):
@@ -137,8 +341,8 @@
   def __init__(self, index_spec):
     self._index_spec = index_spec
     self._documents = {}
-    self._parser = SimpleTokenizer(split_restricts=False)
-    self._inverted_index = RamInvertedIndex(SimpleTokenizer())
+    self._parser = WhooshTokenizer(split_restricts=False)
+    self._inverted_index = RamInvertedIndex(WhooshTokenizer())
 
   @property
   def IndexSpec(self):
@@ -148,7 +352,7 @@
   def IndexDocuments(self, documents, response):
     """Indexes an iterable DocumentPb.Document."""
     for document in documents:
-      doc_id = document.doc_id()
+      doc_id = document.id()
       if doc_id in self._documents:
         old_document = self._documents[doc_id]
         self._inverted_index.RemoveDocument(old_document)
@@ -167,30 +371,124 @@
       delete_status = response.add_status()
       delete_status.set_status(search_service_pb.SearchServiceError.OK)
 
-  def _DocumentsForDocIds(self, doc_ids):
-    """Returns the documents for the given doc_ids."""
+  def _DocumentsForPostings(self, postings):
+    """Returns the documents for the given postings."""
     docs = []
-    for doc_id in doc_ids:
-      if doc_id in self._documents:
-        docs.append(self._documents[doc_id])
+    for posting in postings:
+      if posting.doc_id in self._documents:
+        docs.append(self._documents[posting.doc_id])
     return docs
 
+  def _FilterSpecialTokens(self, tokens):
+    """Returns a filted set of tokens not including special characters."""
+    return [token for token in tokens if not isinstance(token, Quote)]
+
+  def _PhraseOccurs(self, doc_id, phrase, position_posting, next_position=None):
+    """Checks phrase occurs for doc_id looking at next_position in phrase."""
+    if not phrase:
+      return True
+    token = phrase[0]
+    for posting in position_posting[token.position]:
+      if posting.doc_id == doc_id:
+        for position in posting.positions:
+          if next_position == None or position == next_position:
+            if self._PhraseOccurs(doc_id, phrase[1:], position_posting,
+                                  position + 1):
+              return True
+          if position > next_position:
+            return False
+    return False
+
+  def _RestrictPhrase(self, phrase, postings, position_posting):
+    """Restricts postings to those where phrase occurs."""
+    return [posting for posting in postings if
+            self._PhraseOccurs(posting.doc_id, phrase, position_posting)]
+
+  def _PostingsForToken(self, token):
+    """Returns the postings for the token."""
+    return self._inverted_index.GetPostingsForToken(token)
+
+  def _SplitPhrase(self, phrase):
+    """Returns the list of tokens for the phrase."""
+    phrase = phrase[1:len(phrase) - 1]
+    return self._parser.TokenizeText(phrase)
+
+  def _MakeToken(self, value):
+    """Makes a token from the given value."""
+    return self._parser.TokenizeText(value)[0]
+
+  def _AddFieldToTokens(self, field, tokens):
+    """Adds the field restriction to each Token in tokens."""
+    if field:
+      return [token.RestrictField(field) for token in tokens]
+    return tokens
+
+  def _EvaluatePhrase(self, node, field=None):
+    """Evaluates the phrase node returning matching postings."""
+    tokens = self._SplitPhrase(node.getText())
+    tokens = self._AddFieldToTokens(field, tokens)
+    position_posting = {}
+    token = tokens[0]
+    postings = self._PostingsForToken(token)
+    position_posting[token.position] = postings
+    if len(tokens) > 1:
+      for token in tokens[1:]:
+        next_postings = self._PostingsForToken(token)
+        position_posting[token.position] = next_postings
+        postings = [posting for posting in postings if posting in
+                    next_postings]
+        if not postings:
+          break
+    return self._RestrictPhrase(tokens, postings, position_posting)
+
+  def _PostingsForFieldToken(self, field, value):
+    """Returns postings for the value occurring in the given field."""
+    token = field + ':' + str(value)
+    token = self._MakeToken(token)
+    return self._PostingsForToken(token)
+
+  def _Evaluate(self, node):
+    """Translates the node in a parse tree into a query string fragment."""
+    if node.getType() is QueryParser.CONJUNCTION:
+      postings = self._Evaluate(node.children[0])
+      for child in node.children[1:]:
+        next_postings = self._Evaluate(child)
+        postings = [posting for posting in postings if posting in next_postings]
+        if not postings:
+          break
+      return postings
+    if node.getType() is QueryParser.DISJUNCTION:
+      postings = []
+      for child in node.children:
+        postings.extend(self._Evaluate(child))
+      return postings
+    if node.getType() is QueryParser.RESTRICTION:
+      field_name = node.children[0].getText()
+
+      child = node.children[1]
+      if child.getType() is QueryParser.PHRASE:
+        return self._EvaluatePhrase(node=child, field=field_name)
+      return self._PostingsForFieldToken(field_name, child.getText())
+    if node.getType() is QueryParser.PHRASE:
+      return self._EvaluatePhrase(node)
+    if (node.getType() is QueryParser.TEXT or
+        node.getType() is QueryParser.SELECTOR or
+        node.getType() is QueryParser.INT):
+      token = node.getText()
+      token = self._MakeToken(token)
+      return self._PostingsForToken(token)
+
+    return []
+
   def Search(self, search_request):
     """Searches the simple index for ."""
     query = urllib.unquote(search_request.query())
-    tokens = self._parser.Tokenize(query)
-    if not tokens:
-      return self._documents.values()
-    else:
-      token = tokens[0]
-      doc_ids = self._inverted_index.GetDocsForToken(token)
-      if len(tokens) > 1:
-        for token in tokens[1]:
-          next_doc_ids = self._inverted_index.GetDocsForToken(token)
-          doc_ids = [doc_id for doc_id in doc_ids if doc_id in next_doc_ids]
-          if not doc_ids:
-            break
-      return self._DocumentsForDocIds(doc_ids)
+    query = query.strip()
+    if not query:
+      return copy.copy(self._documents.values())
+    query_tree = query_parser.Parse(query)
+    postings = self._Evaluate(query_tree)
+    return self._DocumentsForPostings(postings)
 
   def __repr__(self):
     return _Repr(self, [('_index_spec', self._index_spec),
@@ -223,12 +521,12 @@
     status.set_status(search_service_pb.SearchServiceError.INVALID_REQUEST)
     status.set_error_detail('no index for %r' % index_spec)
 
-  def _GetOrCreateIndex(self, index_spec, create=True):
-    index = self.__indexes.get(index_spec.index_name())
+  def _GetIndex(self, index_spec, create=False):
+    index = self.__indexes.get(index_spec.name())
     if index is None:
       if create:
         index = SimpleIndex(index_spec)
-        self.__indexes[index_spec.index_name()] = index
+        self.__indexes[index_spec.name()] = index
       else:
         return None
     elif index.IndexSpec.consistency() != index_spec.consistency():
@@ -236,30 +534,6 @@
                                   ' different consistency mode')
     return index
 
-  def _GetIndex(self, index_spec):
-    return self._GetOrCreateIndex(index_spec=index_spec, create=False)
-
-  def _Dynamic_CreateIndex(self, request, response):
-    """A local implementation of SearchService.CreateIndex RPC.
-
-    Create an index based on a supplied IndexSpec.
-
-    Args:
-      request: A search_service_pb.CreateIndexRequest.
-      response: An search_service_pb.CreateIndexResponse.
-    """
-    index_spec = request.index_spec()
-    index = None
-    try:
-      index = self._GetOrCreateIndex(index_spec)
-    except IndexConsistencyError, exception:
-      self._InvalidRequest(response.mutable_status(), exception)
-      return
-    spec_pb = response.mutable_index_spec()
-    spec_pb.MergeFrom(index.IndexSpec)
-    response.mutable_status().set_status(
-        search_service_pb.SearchServiceError.OK)
-
   def _Dynamic_IndexDocument(self, request, response):
     """A local implementation of SearchService.IndexDocument RPC.
 
@@ -271,7 +545,7 @@
     """
     params = request.params()
     try:
-      index = self._GetOrCreateIndex(params.index_spec())
+      index = self._GetIndex(params.index_spec(), create=True)
       index.IndexDocuments(params.document_list(), response)
     except IndexConsistencyError, exception:
       self._InvalidRequest(response.add_status(), exception)
@@ -290,7 +564,7 @@
       if index is None:
         self._UnknownIndex(response.add_status(), index_spec)
         return
-      index.DeleteDocuments(params.document_id_list(), response)
+      index.DeleteDocuments(params.doc_id_list(), response)
     except IndexConsistencyError, exception:
       self._InvalidRequest(response.add_status(), exception)
 
@@ -310,10 +584,11 @@
 
       for _ in xrange(random.randint(0, 2) * random.randint(5, 15)):
         new_index_spec = response.add_index_metadata().mutable_index_spec()
-        new_index_spec.set_index_name(
-            ''.join(random.choice(string.printable)
+        new_index_spec.set_name(
+            random.choice(list(search._ASCII_PRINTABLE - set('!'))) +
+            ''.join(random.choice(list(search._ASCII_PRINTABLE))
                     for _ in xrange(random.randint(
-                        1, search_api._MAXIMUM_INDEX_NAME_LENGTH))))
+                        0, search._MAXIMUM_INDEX_NAME_LENGTH))))
         new_index_spec.set_consistency(random.choice([
             search_service_pb.IndexSpec.GLOBAL,
             search_service_pb.IndexSpec.PER_DOCUMENT]))
@@ -326,15 +601,20 @@
     for index in self.__indexes.values():
       index_spec = index.IndexSpec
       new_index_spec = response.add_index_metadata().mutable_index_spec()
-      new_index_spec.set_index_name(index_spec.index_name())
+      new_index_spec.set_name(index_spec.name())
       new_index_spec.set_consistency(index_spec.consistency())
     response.mutable_status().set_status(
         search_service_pb.SearchServiceError.OK)
 
   def _RandomSearchResponse(self, request, response):
 
-    if random.random() < 0.1:
+    random.seed()
+    if random.random() < 0.03:
       raise apiproxy_errors.ResponseTooLargeError()
+    response.mutable_status().set_status(
+        random.choice([search_service_pb.SearchServiceError.OK] * 30 +
+                      [search_service_pb.SearchServiceError.TRANSIENT_ERROR] +
+                      [search_service_pb.SearchServiceError.INTERNAL_ERROR]))
 
     params = request.params()
     random.seed(params.query())
@@ -364,7 +644,7 @@
       result = response.add_result()
       doc = result.mutable_document()
       doc_id = RandomText(string.letters + string.digits, 8, 10)
-      doc.set_doc_id(doc_id)
+      doc.set_id(doc_id)
       random.seed(doc_id)
       for _ in params.sort_spec_list():
         result.add_score(random.random())
@@ -394,10 +674,6 @@
         value.set_type(document_pb.FieldValue.TEXT)
         value.set_string_value(RandomText(string.printable, 0, 100))
 
-    response.mutable_status().set_status(
-        random.choice([search_service_pb.SearchServiceError.OK] * 10 +
-                      [search_service_pb.SearchServiceError.TRANSIENT_ERROR] +
-                      [search_service_pb.SearchServiceError.INTERNAL_ERROR]))
     response.set_matched_count(matched_count)
 
   def _Dynamic_Search(self, request, response):
diff --git a/google/appengine/api/taskqueue/taskqueue_service_pb.py b/google/appengine/api/taskqueue/taskqueue_service_pb.py
index dc992b8..b68b17a 100755
--- a/google/appengine/api/taskqueue/taskqueue_service_pb.py
+++ b/google/appengine/api/taskqueue/taskqueue_service_pb.py
@@ -6044,6 +6044,8 @@
   payload_ = None
   has_retry_parameters_ = 0
   retry_parameters_ = None
+  has_first_try_usec_ = 0
+  first_try_usec_ = 0
 
   def __init__(self, contents=None):
     self.header_ = []
@@ -6259,6 +6261,19 @@
 
   def has_retry_parameters(self): return self.has_retry_parameters_
 
+  def first_try_usec(self): return self.first_try_usec_
+
+  def set_first_try_usec(self, x):
+    self.has_first_try_usec_ = 1
+    self.first_try_usec_ = x
+
+  def clear_first_try_usec(self):
+    if self.has_first_try_usec_:
+      self.has_first_try_usec_ = 0
+      self.first_try_usec_ = 0
+
+  def has_first_try_usec(self): return self.has_first_try_usec_
+
 
   def MergeFrom(self, x):
     assert x is not self
@@ -6276,6 +6291,7 @@
     if (x.has_description()): self.set_description(x.description())
     if (x.has_payload()): self.mutable_payload().MergeFrom(x.payload())
     if (x.has_retry_parameters()): self.mutable_retry_parameters().MergeFrom(x.retry_parameters())
+    if (x.has_first_try_usec()): self.set_first_try_usec(x.first_try_usec())
 
   def Equals(self, x):
     if x is self: return 1
@@ -6308,6 +6324,8 @@
     if self.has_payload_ and self.payload_ != x.payload_: return 0
     if self.has_retry_parameters_ != x.has_retry_parameters_: return 0
     if self.has_retry_parameters_ and self.retry_parameters_ != x.retry_parameters_: return 0
+    if self.has_first_try_usec_ != x.has_first_try_usec_: return 0
+    if self.has_first_try_usec_ and self.first_try_usec_ != x.first_try_usec_: return 0
     return 1
 
   def IsInitialized(self, debug_strs=None):
@@ -6349,6 +6367,7 @@
     if (self.has_description_): n += 2 + self.lengthString(len(self.description_))
     if (self.has_payload_): n += 2 + self.lengthString(self.payload_.ByteSize())
     if (self.has_retry_parameters_): n += 2 + self.lengthString(self.retry_parameters_.ByteSize())
+    if (self.has_first_try_usec_): n += 2 + self.lengthVarInt64(self.first_try_usec_)
     return n + 3
 
   def ByteSizePartial(self):
@@ -6374,6 +6393,7 @@
     if (self.has_description_): n += 2 + self.lengthString(len(self.description_))
     if (self.has_payload_): n += 2 + self.lengthString(self.payload_.ByteSizePartial())
     if (self.has_retry_parameters_): n += 2 + self.lengthString(self.retry_parameters_.ByteSizePartial())
+    if (self.has_first_try_usec_): n += 2 + self.lengthVarInt64(self.first_try_usec_)
     return n
 
   def Clear(self):
@@ -6391,6 +6411,7 @@
     self.clear_description()
     self.clear_payload()
     self.clear_retry_parameters()
+    self.clear_first_try_usec()
 
   def OutputUnchecked(self, out):
     out.putVarInt32(18)
@@ -6437,6 +6458,9 @@
       out.putVarInt32(186)
       out.putVarInt32(self.retry_parameters_.ByteSize())
       self.retry_parameters_.OutputUnchecked(out)
+    if (self.has_first_try_usec_):
+      out.putVarInt32(192)
+      out.putVarInt64(self.first_try_usec_)
 
   def OutputPartial(self, out):
     if (self.has_task_name_):
@@ -6486,6 +6510,9 @@
       out.putVarInt32(186)
       out.putVarInt32(self.retry_parameters_.ByteSizePartial())
       self.retry_parameters_.OutputPartial(out)
+    if (self.has_first_try_usec_):
+      out.putVarInt32(192)
+      out.putVarInt64(self.first_try_usec_)
 
   def TryMerge(self, d):
     while 1:
@@ -6539,6 +6566,9 @@
         d.skip(length)
         self.mutable_retry_parameters().TryMerge(tmp)
         continue
+      if tt == 192:
+        self.set_first_try_usec(d.getVarInt64())
+        continue
 
 
       if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
@@ -6580,6 +6610,7 @@
       res+=prefix+"retry_parameters <\n"
       res+=self.retry_parameters_.__str__(prefix + "  ", printElemNumber)
       res+=prefix+">\n"
+    if self.has_first_try_usec_: res+=prefix+("first_try_usec: %s\n" % self.DebugFormatInt64(self.first_try_usec_))
     return res
 
 class TaskQueueQueryTasksResponse(ProtocolBuffer.ProtocolMessage):
@@ -6700,6 +6731,7 @@
   kTaskdescription = 21
   kTaskpayload = 22
   kTaskretry_parameters = 23
+  kTaskfirst_try_usec = 24
 
   _TEXT = _BuildTagLookupTable({
     0: "ErrorCode",
@@ -6726,7 +6758,8 @@
     21: "description",
     22: "payload",
     23: "retry_parameters",
-  }, 23)
+    24: "first_try_usec",
+  }, 24)
 
   _TYPES = _BuildTagLookupTable({
     0: ProtocolBuffer.Encoder.NUMERIC,
@@ -6753,7 +6786,8 @@
     21: ProtocolBuffer.Encoder.STRING,
     22: ProtocolBuffer.Encoder.STRING,
     23: ProtocolBuffer.Encoder.STRING,
-  }, 23, ProtocolBuffer.Encoder.MAX_TYPE)
+    24: ProtocolBuffer.Encoder.NUMERIC,
+  }, 24, ProtocolBuffer.Encoder.MAX_TYPE)
 
 
   _STYLE = """"""
diff --git a/google/appengine/api/taskqueue/taskqueue_stub.py b/google/appengine/api/taskqueue/taskqueue_stub.py
index 3e216ff..e39f5d1 100755
--- a/google/appengine/api/taskqueue/taskqueue_stub.py
+++ b/google/appengine/api/taskqueue/taskqueue_stub.py
@@ -45,6 +45,7 @@
 import base64
 import bisect
 import calendar
+import cgi
 import datetime
 import httplib
 import logging
@@ -55,6 +56,7 @@
 import time
 
 import taskqueue_service_pb
+import taskqueue
 
 from google.appengine.api import api_base_pb
 from google.appengine.api import apiproxy_stub
@@ -857,6 +859,78 @@
     self._queues[request.queue_name()].ModifyTaskLease_Rpc(request, response)
 
 
+class Retry(object):
+  """Task retry caclulator class.
+
+  Determines if and when a task should next be run
+  """
+
+
+
+  _default_params = taskqueue_service_pb.TaskQueueRetryParameters()
+  _default_params.set_min_backoff_sec(0.001)
+  _default_params.set_max_backoff_sec(3600)
+  _default_params.set_max_doublings(100000)
+
+  def __init__(self, task, queue):
+    """Constructor.
+
+    Args:
+      task: A taskqueue_service_pb.TaskQueueQueryTasksResponse_Task instance.
+          May be None.
+      queue: A _Queue instance. May be None.
+    """
+    if task is not None and task.has_retry_parameters():
+      self._params = task.retry_parameters()
+    elif queue is not None and queue.retry_parameters is not None:
+      self._params = queue.retry_parameters
+    else:
+      self._params = self._default_params
+
+  def CanRetry(self, retry_count, age_usec):
+    """Computes whether a task can be retried.
+
+    Args:
+      retry_count: An integer specifying which retry this is.
+      age_usec: An integer specifying the microseconds since the first try.
+
+    Returns:
+     True if a task is eligible for retrying.
+    """
+    if self._params.has_retry_limit() and self._params.has_age_limit_sec():
+      return (self._params.retry_limit() >= retry_count or
+              self._params.age_limit_sec() >= _UsecToSec(age_usec))
+
+    if self._params.has_retry_limit():
+      return self._params.retry_limit() >= retry_count
+
+    if self._params.has_age_limit_sec():
+      return self._params.age_limit_sec() >= _UsecToSec(age_usec)
+
+    return True
+
+  def CalculateBackoffUsec(self, retry_count):
+    """Calculates time before the specified retry.
+
+    Args:
+      retry_count: An integer specifying which retry this is.
+
+    Returns:
+      The number of microseconds before a task should be retried.
+    """
+    exponent = min(retry_count - 1, self._params.max_doublings())
+    linear_steps = retry_count - exponent
+    min_backoff_usec = _SecToUsec(self._params.min_backoff_sec())
+    max_backoff_usec = _SecToUsec(self._params.max_backoff_sec())
+    backoff_usec = min_backoff_usec
+    if exponent > 0:
+      backoff_usec *= (2 ** (min(1023, exponent)))
+    if linear_steps > 1:
+      backoff_usec *= linear_steps
+
+    return int(min(max_backoff_usec, backoff_usec))
+
+
 class _Queue(object):
   """A Taskqueue Queue.
 
@@ -1052,7 +1126,18 @@
 
     leased_tasks = self._sorted_by_eta[:max_tasks]
     self._sorted_by_eta = self._sorted_by_eta[max_tasks:]
+    tasks_to_delete = []
     for _, name, task in leased_tasks:
+      retry = Retry(task, self)
+      if not retry.CanRetry(task.retry_count() + 1, 0):
+        logging.warning(
+            'Task %s in queue %s cannot be leased again after %d leases.',
+             task.task_name(), self.queue_name, task.retry_count())
+        tasks_to_delete.append(task)
+
+        self._PostponeTaskInsertOnly(task, task.eta_usec())
+        continue
+
 
       self._PostponeTaskInsertOnly(
           task, now_eta_usec + _SecToUsec(lease_seconds))
@@ -1068,6 +1153,10 @@
 
       task_response.set_body(task.body())
 
+
+    for task in tasks_to_delete:
+      self._DeleteNoAcquireLock(task.task_name())
+
   @_WithLock
   def ModifyTaskLease_Rpc(self, request, response):
     """Implementation of the ModifyTaskLease RPC.
@@ -1637,14 +1726,25 @@
     now = time.time()
     queue, task = self._group.GetNextPushTask()
     while task and _UsecToSec(task.eta_usec()) <= now:
+      if task.retry_count() == 0:
+        task.set_first_try_usec(_SecToUsec(now))
       if self.task_executor.ExecuteTask(task, queue):
         queue.Delete(task.task_name())
       else:
-        logging.warning(
-            'Task %s failed to execute. This task will retry in %.1f seconds',
-            task.task_name(), self.default_retry_seconds)
-        queue.PostponeTask(task, _SecToUsec(
-            now + self.default_retry_seconds))
+        retry = Retry(task, queue)
+        age_usec = _SecToUsec(now) - task.first_try_usec()
+        if retry.CanRetry(task.retry_count() + 1, age_usec):
+          retry_usec = retry.CalculateBackoffUsec(task.retry_count() + 1)
+          logging.warning(
+              'Task %s failed to execute. This task will retry in %.3f seconds',
+              task.task_name(), _UsecToSec(retry_usec))
+          queue.PostponeTask(task, _SecToUsec(now) + retry_usec)
+        else:
+          logging.warning(
+              'Task %s failed to execute. The task has no remaining retries. '
+              'Failing permanently after %d retries and %d seconds',
+              task.task_name(), task.retry_count(), _UsecToSec(age_usec))
+          queue.Delete(task.task_name())
       queue, task = self._group.GetNextPushTask()
 
     if task:
@@ -2139,3 +2239,55 @@
     """
 
     self._GetGroup().ModifyTaskLease_Rpc(request, response)
+
+
+
+
+
+  def get_filtered_tasks(self, url=None, name=None, queue_names=None):
+    """Get the tasks in the task queue with filters.
+
+    Args:
+      url: A URL that all returned tasks should point at.
+      name: The name of all returned tasks.
+      queue_names: A list of queue names to retrieve tasks from. If left blank
+        this will get default to all queues available.
+
+    Returns:
+      A list of taskqueue.Task objects.
+    """
+    all_queue_names = [queue['name'] for queue in self.GetQueues()]
+
+
+    if isinstance(queue_names, basestring):
+      queue_names = [queue_names]
+
+
+    if queue_names is None:
+      queue_names = all_queue_names
+
+
+    task_dicts = []
+    for queue_name in queue_names:
+      if queue_name in all_queue_names:
+        for task in self.GetTasks(queue_name):
+          if url is not None and task['url'] != url:
+            continue
+          if name is not None and task['name'] != name:
+            continue
+          task_dicts.append(task)
+
+    tasks = []
+    for task in task_dicts:
+
+      decoded_body = base64.b64decode(task['body'])
+      if decoded_body:
+        task['params'] = cgi.parse_qs(decoded_body)
+
+      task['eta'] = datetime.datetime.strptime(task['eta'], '%Y/%m/%d %H:%M:%S')
+
+      task_object = taskqueue.Task(name=task['name'], method=task['method'],
+                                   url=task['url'], headers=task['headers'],
+                                   params=task.get('params'), eta=task['eta'])
+      tasks.append(task_object)
+    return tasks
diff --git a/google/appengine/api/urlfetch.py b/google/appengine/api/urlfetch.py
index 40ee8c3..ad55b61 100755
--- a/google/appengine/api/urlfetch.py
+++ b/google/appengine/api/urlfetch.py
@@ -33,6 +33,7 @@
 
 
 import os
+import threading
 import UserDict
 import urllib2
 import urlparse
@@ -53,7 +54,6 @@
 PUT = 4
 DELETE = 5
 
-
 _URL_STRING_MAP = {
     'GET': GET,
     'POST': POST,
@@ -62,9 +62,10 @@
     'DELETE': DELETE,
 }
 
-
 _VALID_METHODS = frozenset(_URL_STRING_MAP.values())
 
+_thread_local_settings = threading.local()
+
 
 class _CaselessDict(UserDict.IterableUserDict):
   """Case insensitive dictionary.
@@ -212,6 +213,8 @@
   Returns:
     An apiproxy_stub_map.UserRPC object specialized for this service.
   """
+  if deadline is None:
+    deadline = get_default_fetch_deadline()
   return apiproxy_stub_map.UserRPC('urlfetch', deadline, callback)
 
 
@@ -396,3 +399,21 @@
     self.final_url = response_proto.finalurl() or None
     for header_proto in response_proto.header_list():
       self.headers[header_proto.key()] = header_proto.value()
+
+
+def get_default_fetch_deadline():
+  """Get the default value for create_rpc()'s deadline parameter."""
+  return getattr(_thread_local_settings, "default_fetch_deadline", None)
+
+
+def set_default_fetch_deadline(value):
+  """Set the default value for create_rpc()'s deadline parameter.
+
+  This setting is thread-specific (i.e. it's stored in a thread local).
+  This function doesn't do any range or type checking of the value.  The
+  default is None.
+
+  See also: create_rpc(), fetch()
+
+  """
+  _thread_local_settings.default_fetch_deadline = value
diff --git a/google/appengine/datastore/datastore_index.py b/google/appengine/datastore/datastore_index.py
index febad19..642b111 100755
--- a/google/appengine/datastore/datastore_index.py
+++ b/google/appengine/datastore/datastore_index.py
@@ -84,7 +84,7 @@
 class Index(validation.Validated):
   """Individual index definition.
 
-  Order of the properties properties determins a given indixes sort priority.
+  Order of the properties determines a given indexes sort priority.
 
   Attributes:
     kind: Datastore kind that index belongs to.
@@ -248,14 +248,14 @@
 
 
 
-  if datastore_types._KEY_SPECIAL_PROPERTY in eq_properties:
+  if datastore_types.KEY_SPECIAL_PROPERTY in eq_properties:
     orders = []
 
 
 
   new_orders = []
   for o in orders:
-    if o.property() == datastore_types._KEY_SPECIAL_PROPERTY:
+    if o.property() == datastore_types.KEY_SPECIAL_PROPERTY:
       new_orders.append(o)
       break
     new_orders.append(o)
@@ -276,7 +276,7 @@
 
 
   has_key_desc_order = False
-  if orders and orders[-1].property() == datastore_types._KEY_SPECIAL_PROPERTY:
+  if orders and orders[-1].property() == datastore_types.KEY_SPECIAL_PROPERTY:
     if orders[-1].direction() == ASCENDING:
       orders = orders[:-1]
     else:
@@ -290,11 +290,11 @@
   if not has_key_desc_order:
     for f in filters:
       if (f.op() in INEQUALITY_OPERATORS and
-          f.property(0).name() != datastore_types._KEY_SPECIAL_PROPERTY):
+          f.property(0).name() != datastore_types.KEY_SPECIAL_PROPERTY):
         break
     else:
       filters = [f for f in filters
-          if f.property(0).name() != datastore_types._KEY_SPECIAL_PROPERTY]
+          if f.property(0).name() != datastore_types.KEY_SPECIAL_PROPERTY]
 
   return (filters, orders)
 
@@ -476,6 +476,67 @@
 
   return (required, kind, ancestor, tuple(props), len(eq_filters))
 
+def MinimalCompositeIndexForQuery(query, index_defs):
+  """Computes the minimal composite index for this query.
+
+  Unlike datastore_index.CompositeIndexForQuery, this function takes into
+  account indexes that already exist in the system.
+
+  Args:
+    query: the datastore_pb.Query to compute suggestions for
+    index_defs: a list of datastore_index.Index objects that
+      already exist.
+
+  Returns:
+    None if no index is needed, otherwise the minimal index in the form
+  (is_most_efficient, kind, ancestor, properties). Where is_most_efficient is a
+  bool denoting if the suggested index is the most efficient (i.e. the one
+  returned by datastore_index.CompositeIndexForQuery).
+  """
+
+  required, kind, ancestor, props, num_eq = CompositeIndexForQuery(query)
+
+  if not required:
+    return None
+
+
+  postfix = props[num_eq:]
+  eq_props = set(prop[0] for prop in props[:num_eq])
+  prefix_remaining = eq_props.copy()
+  ancestor_remaining = ancestor
+
+  for definition in index_defs:
+    if (kind != definition.kind or
+
+        (not ancestor and definition.ancestor)):
+      continue
+
+    _, _, index_props = IndexToKey(definition)
+
+    if index_props[-len(postfix):] != postfix:
+      continue
+
+
+    index_eq_props = set(prop[0] for prop in index_props[:-len(postfix)])
+    if index_eq_props - eq_props:
+      continue
+
+
+    prefix_remaining -= index_eq_props
+    if definition.ancestor:
+      ancestor_remaining = False
+
+    if not (prefix_remaining or ancestor_remaining):
+      return None
+
+  minimal_props = tuple((prop, datastore_pb.Query_Order.ASCENDING)
+                   for prop in sorted(prefix_remaining)) + postfix
+
+  return (minimal_props == props and ancestor_remaining == ancestor,
+          kind,
+          ancestor_remaining,
+          minimal_props)
+
 
 def IndexYamlForQuery(kind, ancestor, props):
   """Return the composite index definition YAML needed for a query.
diff --git a/google/appengine/datastore/datastore_pb.py b/google/appengine/datastore/datastore_pb.py
index d173c7e..b46d73e 100644
--- a/google/appengine/datastore/datastore_pb.py
+++ b/google/appengine/datastore/datastore_pb.py
@@ -4683,6 +4683,8 @@
   _STYLE = """"""
   _STYLE_CONTENT_TYPE = """"""
 class TouchRequest(ProtocolBuffer.ProtocolMessage):
+  has_force_ = 0
+  force_ = 0
 
   def __init__(self, contents=None):
     self.key_ = []
@@ -4721,11 +4723,25 @@
 
   def clear_composite_index(self):
     self.composite_index_ = []
+  def force(self): return self.force_
+
+  def set_force(self, x):
+    self.has_force_ = 1
+    self.force_ = x
+
+  def clear_force(self):
+    if self.has_force_:
+      self.has_force_ = 0
+      self.force_ = 0
+
+  def has_force(self): return self.has_force_
+
 
   def MergeFrom(self, x):
     assert x is not self
     for i in xrange(x.key_size()): self.add_key().CopyFrom(x.key(i))
     for i in xrange(x.composite_index_size()): self.add_composite_index().CopyFrom(x.composite_index(i))
+    if (x.has_force()): self.set_force(x.force())
 
   def Equals(self, x):
     if x is self: return 1
@@ -4735,6 +4751,8 @@
     if len(self.composite_index_) != len(x.composite_index_): return 0
     for e1, e2 in zip(self.composite_index_, x.composite_index_):
       if e1 != e2: return 0
+    if self.has_force_ != x.has_force_: return 0
+    if self.has_force_ and self.force_ != x.force_: return 0
     return 1
 
   def IsInitialized(self, debug_strs=None):
@@ -4751,6 +4769,7 @@
     for i in xrange(len(self.key_)): n += self.lengthString(self.key_[i].ByteSize())
     n += 1 * len(self.composite_index_)
     for i in xrange(len(self.composite_index_)): n += self.lengthString(self.composite_index_[i].ByteSize())
+    if (self.has_force_): n += 2
     return n
 
   def ByteSizePartial(self):
@@ -4759,11 +4778,13 @@
     for i in xrange(len(self.key_)): n += self.lengthString(self.key_[i].ByteSizePartial())
     n += 1 * len(self.composite_index_)
     for i in xrange(len(self.composite_index_)): n += self.lengthString(self.composite_index_[i].ByteSizePartial())
+    if (self.has_force_): n += 2
     return n
 
   def Clear(self):
     self.clear_key()
     self.clear_composite_index()
+    self.clear_force()
 
   def OutputUnchecked(self, out):
     for i in xrange(len(self.key_)):
@@ -4774,6 +4795,9 @@
       out.putVarInt32(18)
       out.putVarInt32(self.composite_index_[i].ByteSize())
       self.composite_index_[i].OutputUnchecked(out)
+    if (self.has_force_):
+      out.putVarInt32(24)
+      out.putBoolean(self.force_)
 
   def OutputPartial(self, out):
     for i in xrange(len(self.key_)):
@@ -4784,6 +4808,9 @@
       out.putVarInt32(18)
       out.putVarInt32(self.composite_index_[i].ByteSizePartial())
       self.composite_index_[i].OutputPartial(out)
+    if (self.has_force_):
+      out.putVarInt32(24)
+      out.putBoolean(self.force_)
 
   def TryMerge(self, d):
     while d.avail() > 0:
@@ -4800,6 +4827,9 @@
         d.skip(length)
         self.add_composite_index().TryMerge(tmp)
         continue
+      if tt == 24:
+        self.set_force(d.getBoolean())
+        continue
 
 
       if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
@@ -4824,6 +4854,7 @@
       res+=e.__str__(prefix + "  ", printElemNumber)
       res+=prefix+">\n"
       cnt+=1
+    if self.has_force_: res+=prefix+("force: %s\n" % self.DebugFormatBool(self.force_))
     return res
 
 
@@ -4832,18 +4863,21 @@
 
   kkey = 1
   kcomposite_index = 2
+  kforce = 3
 
   _TEXT = _BuildTagLookupTable({
     0: "ErrorCode",
     1: "key",
     2: "composite_index",
-  }, 2)
+    3: "force",
+  }, 3)
 
   _TYPES = _BuildTagLookupTable({
     0: ProtocolBuffer.Encoder.NUMERIC,
     1: ProtocolBuffer.Encoder.STRING,
     2: ProtocolBuffer.Encoder.STRING,
-  }, 2, ProtocolBuffer.Encoder.MAX_TYPE)
+    3: ProtocolBuffer.Encoder.NUMERIC,
+  }, 3, ProtocolBuffer.Encoder.MAX_TYPE)
 
 
   _STYLE = """"""
@@ -6549,6 +6583,8 @@
 class BeginTransactionRequest(ProtocolBuffer.ProtocolMessage):
   has_app_ = 0
   app_ = ""
+  has_allow_multiple_eg_ = 0
+  allow_multiple_eg_ = 0
 
   def __init__(self, contents=None):
     if contents is not None: self.MergeFromString(contents)
@@ -6566,15 +6602,31 @@
 
   def has_app(self): return self.has_app_
 
+  def allow_multiple_eg(self): return self.allow_multiple_eg_
+
+  def set_allow_multiple_eg(self, x):
+    self.has_allow_multiple_eg_ = 1
+    self.allow_multiple_eg_ = x
+
+  def clear_allow_multiple_eg(self):
+    if self.has_allow_multiple_eg_:
+      self.has_allow_multiple_eg_ = 0
+      self.allow_multiple_eg_ = 0
+
+  def has_allow_multiple_eg(self): return self.has_allow_multiple_eg_
+
 
   def MergeFrom(self, x):
     assert x is not self
     if (x.has_app()): self.set_app(x.app())
+    if (x.has_allow_multiple_eg()): self.set_allow_multiple_eg(x.allow_multiple_eg())
 
   def Equals(self, x):
     if x is self: return 1
     if self.has_app_ != x.has_app_: return 0
     if self.has_app_ and self.app_ != x.app_: return 0
+    if self.has_allow_multiple_eg_ != x.has_allow_multiple_eg_: return 0
+    if self.has_allow_multiple_eg_ and self.allow_multiple_eg_ != x.allow_multiple_eg_: return 0
     return 1
 
   def IsInitialized(self, debug_strs=None):
@@ -6588,6 +6640,7 @@
   def ByteSize(self):
     n = 0
     n += self.lengthString(len(self.app_))
+    if (self.has_allow_multiple_eg_): n += 2
     return n + 1
 
   def ByteSizePartial(self):
@@ -6595,19 +6648,27 @@
     if (self.has_app_):
       n += 1
       n += self.lengthString(len(self.app_))
+    if (self.has_allow_multiple_eg_): n += 2
     return n
 
   def Clear(self):
     self.clear_app()
+    self.clear_allow_multiple_eg()
 
   def OutputUnchecked(self, out):
     out.putVarInt32(10)
     out.putPrefixedString(self.app_)
+    if (self.has_allow_multiple_eg_):
+      out.putVarInt32(16)
+      out.putBoolean(self.allow_multiple_eg_)
 
   def OutputPartial(self, out):
     if (self.has_app_):
       out.putVarInt32(10)
       out.putPrefixedString(self.app_)
+    if (self.has_allow_multiple_eg_):
+      out.putVarInt32(16)
+      out.putBoolean(self.allow_multiple_eg_)
 
   def TryMerge(self, d):
     while d.avail() > 0:
@@ -6615,6 +6676,9 @@
       if tt == 10:
         self.set_app(d.getPrefixedString())
         continue
+      if tt == 16:
+        self.set_allow_multiple_eg(d.getBoolean())
+        continue
 
 
       if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
@@ -6624,6 +6688,7 @@
   def __str__(self, prefix="", printElemNumber=0):
     res=""
     if self.has_app_: res+=prefix+("app: %s\n" % self.DebugFormatString(self.app_))
+    if self.has_allow_multiple_eg_: res+=prefix+("allow_multiple_eg: %s\n" % self.DebugFormatBool(self.allow_multiple_eg_))
     return res
 
 
@@ -6631,16 +6696,19 @@
     return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
 
   kapp = 1
+  kallow_multiple_eg = 2
 
   _TEXT = _BuildTagLookupTable({
     0: "ErrorCode",
     1: "app",
-  }, 1)
+    2: "allow_multiple_eg",
+  }, 2)
 
   _TYPES = _BuildTagLookupTable({
     0: ProtocolBuffer.Encoder.NUMERIC,
     1: ProtocolBuffer.Encoder.STRING,
-  }, 1, ProtocolBuffer.Encoder.MAX_TYPE)
+    2: ProtocolBuffer.Encoder.NUMERIC,
+  }, 2, ProtocolBuffer.Encoder.MAX_TYPE)
 
 
   _STYLE = """"""
diff --git a/google/appengine/datastore/datastore_query.py b/google/appengine/datastore/datastore_query.py
index fdafb33..b403846 100755
--- a/google/appengine/datastore/datastore_query.py
+++ b/google/appengine/datastore/datastore_query.py
@@ -39,6 +39,7 @@
            'Batcher',
            'CompositeFilter',
            'CompositeOrder',
+           'CorrelationFilter',
            'Cursor',
            'FetchOptions',
            'FilterPredicate',
@@ -55,7 +56,6 @@
 
 import base64
 import pickle
-import heapq
 import collections
 
 from google.appengine.datastore import entity_pb
@@ -131,8 +131,8 @@
           datastore_types.PropertyValueToKeyValue(prop.value()))
 
 
-  if datastore_types._KEY_SPECIAL_PROPERTY in value_map:
-    value_map[datastore_types._KEY_SPECIAL_PROPERTY] = [
+  if datastore_types.KEY_SPECIAL_PROPERTY in value_map:
+    value_map[datastore_types.KEY_SPECIAL_PROPERTY] = [
         datastore_types.ReferenceToKeyValue(entity.key())]
 
   return value_map
@@ -194,7 +194,7 @@
 
     Args:
       key_value_map: the comparable value map from which to remove
-        values.
+        values. Does not need to contain values for all filtered properties.
 
     Returns:
       A value that evaluates to False if every value in a single list was
@@ -241,10 +241,16 @@
     return False
 
   def _prune(self, value_map):
+
+
+
+
+    if self._get_prop_name() not in value_map:
+      return True
     values = [value for value in value_map[self._get_prop_name()]
               if self._apply_to_value(value)]
     value_map[self._get_prop_name()] = values
-    return values
+    return bool(values)
 
 
 class PropertyFilter(_SinglePropertyFilter):
@@ -258,6 +264,9 @@
       '=': datastore_pb.Query_Filter.EQUAL,
       }
 
+  _OPERATORS_INVERSE = dict((value, key)
+                            for key, value in _OPERATORS.iteritems())
+
   _OPERATORS_TO_PYTHON_OPERATOR = {
       datastore_pb.Query_Filter.LESS_THAN: '<',
       datastore_pb.Query_Filter.LESS_THAN_OR_EQUAL: '<=',
@@ -299,6 +308,22 @@
     self._filter.set_op(self._OPERATORS[op])
     self._filter.add_property().CopyFrom(value)
 
+  @property
+  def op(self):
+    raw_op = self._filter.op()
+    return self._OPERATORS_INVERSE.get(raw_op, str(raw_op))
+
+  @property
+  def value(self):
+
+    return self._filter.property(0)
+
+  def __repr__(self):
+    prop = self.value
+    name = prop.name()
+    value = datastore_types.FromPropertyPb(prop)
+    return '%s(%r, <%r, %r>)' % (self.__class__.__name__, self.op, name, value)
+
   def _get_prop_name(self):
     return self._filter.property(0).name()
 
@@ -568,6 +593,106 @@
         'Pickling of %r is unsupported.' % self)
 
 
+class CorrelationFilter(FilterPredicate):
+  """A filter that isolates correlated values and applies a sub-filter on them.
+
+  This filter assumes that every property used by the sub-filter should be
+  grouped before being passed to the sub-filter. The default grouping puts
+  each value in its own group. Consider:
+    e = {a: [1, 2], b: [2, 1, 3], c: 4}
+
+  A correlation filter with a sub-filter that operates on (a, b) will be tested
+  against the following 3 sets of values:
+    {a: 1, b: 2}
+    {a: 2, b: 1}
+    {b: 3}
+
+  In this case CorrelationFilter('a = 2 AND b = 2') won't match this entity but
+  CorrelationFilter('a = 2 AND b = 1') will. To apply an uncorrelated filter on
+  c, the filter must be applied in parallel to the correlation filter. For
+  example:
+    CompositeFilter(AND, [CorrelationFilter('a = 2 AND b = 1'), 'c = 3'])
+
+  If 'c = 3' was included in the correlation filter, c would be grouped as well.
+  This would result in the following values:
+    {a: 1, b: 2, c: 3}
+    {a: 2, b: 1}
+    {b: 3}
+
+  If any set of correlated values match the sub-filter then the entity matches
+  the correlation filter.
+  """
+
+  def __init__(self, subfilter):
+    """Constructor.
+
+    Args:
+      subfilter: A FilterPredicate to apply to the correlated values
+    """
+    self._subfilter = subfilter
+
+  @property
+  def subfilter(self):
+    return self._subfilter
+
+  def __repr__(self):
+    return '%s(%r)' % (self.__class__.__name__, self.subfilter)
+
+  def _apply(self, value_map):
+
+
+    base_map = dict((prop, []) for prop in self._get_prop_names())
+
+
+    value_maps = []
+    for prop in base_map:
+
+      grouped = self._group_values(prop, value_map[prop])
+
+      while len(value_maps) < len(grouped):
+        value_maps.append(base_map.copy())
+
+      for value, map in zip(grouped, value_maps):
+        map[prop] = value
+
+    return self._apply_correlated(value_maps)
+
+  def _apply_correlated(self, value_maps):
+    """Applies sub-filter to the correlated value maps.
+
+    The default implementation matches when any value_map in value_maps
+    matches the sub-filter.
+
+    Args:
+      value_maps: A list of correlated value_maps.
+    Returns:
+      True if any the entity matches the correlation filter.
+    """
+
+    for map in value_maps:
+      if self._subfilter._apply(map):
+        return True
+    return False
+
+  def _group_values(self, prop, values):
+    """A function that groups the given values.
+
+    Override this function to introduce custom grouping logic. The default
+    implementation assumes each value belongs in its own group.
+
+    Args:
+      prop: The name of the property who's values are being grouped.
+      values: A list of opaque values.
+
+   Returns:
+      A list of lists of grouped values.
+    """
+    return [[value] for value in values]
+
+  def _get_prop_names(self):
+    return self._subfilter._get_prop_names()
+
+
 class CompositeFilter(FilterPredicate):
   """An immutable filter predicate that combines other predicates.
 
@@ -596,22 +721,22 @@
     """
     if not op in self._OPERATORS:
       raise datastore_errors.BadArgumentError('unknown operator (%s)' % (op,))
-    if not filters or not isinstance(filters, list):
+    if not filters or not isinstance(filters, (list, tuple)):
       raise datastore_errors.BadArgumentError(
           'filters argument should be a non-empty list (%r)' % (filters,))
 
     super(CompositeFilter, self).__init__()
     self._op = op
-    self._filters = []
+    flattened = []
 
 
     for f in filters:
       if isinstance(f, CompositeFilter) and f._op == self._op:
 
 
-        self._filters.extend(f._filters)
+        flattened.extend(f._filters)
       elif isinstance(f, FilterPredicate):
-        self._filters.append(f)
+        flattened.append(f)
       else:
         raise datastore_errors.BadArgumentError(
             'filters argument must be a list of FilterPredicates, found (%r)' %
@@ -619,8 +744,8 @@
 
 
     if op == self.AND:
-      filters = self._filters
-      self._filters = []
+      filters = flattened
+      flattened = []
       ineq_map = {}
 
       for f in filters:
@@ -629,17 +754,35 @@
           name = f._get_prop_name()
           index = ineq_map.get(name)
           if index is not None:
-            range_filter = self._filters[index]
-            self._filters[index] = range_filter.intersect(f)
+            range_filter = flattened[index]
+            flattened[index] = range_filter.intersect(f)
           else:
             if isinstance(f, PropertyFilter):
               range_filter = _PropertyRangeFilter.from_property_filter(f)
             else:
               range_filter = f
-            ineq_map[name] = len(self._filters)
-            self._filters.append(range_filter)
+            ineq_map[name] = len(flattened)
+            flattened.append(range_filter)
         else:
-          self._filters.append(f)
+          flattened.append(f)
+
+    self._filters = tuple(flattened)
+
+  @property
+  def op(self):
+    return self._op
+
+  @property
+  def filters(self):
+    return self._filters
+
+  def __repr__(self):
+    op = self.op
+    if op == self.AND:
+      op = 'AND'
+    else:
+      op = str(op)
+    return '%s(%s, %r)' % (self.__class__.__name__, op, list(self.filters))
 
   def _get_prop_names(self):
     names = set()
@@ -722,7 +865,7 @@
     self._keys = key_value_set
 
   def _get_prop_name(self):
-    return datastore_types._KEY_SPECIAL_PROPERTY
+    return datastore_types.KEY_SPECIAL_PROPERTY
 
   def _apply_to_value(self, value):
     return value not in self._keys
@@ -752,6 +895,14 @@
   always used.
   """
 
+  def reversed(self):
+    """Constructs an order representing the reverse of the current order.
+
+    Returns:
+      A new order representing the reverse direction.
+    """
+    raise NotImplementedError
+
   def _key(self, lhs_value_map):
     """Creates a key for the given value map."""
     raise NotImplementedError
@@ -789,7 +940,7 @@
       the current order.
     """
     names = self._get_prop_names()
-    names.add(datastore_types._KEY_SPECIAL_PROPERTY)
+    names.add(datastore_types.KEY_SPECIAL_PROPERTY)
     if filter_predicate is not None:
       names |= filter_predicate._get_prop_names()
 
@@ -797,7 +948,7 @@
     if filter_predicate is not None:
       filter_predicate._prune(value_map)
     return (self._key(value_map),
-            value_map[datastore_types._KEY_SPECIAL_PROPERTY])
+            value_map[datastore_types.KEY_SPECIAL_PROPERTY])
 
   def cmp(self, lhs, rhs, filter_predicate=None):
     """Compares the given values taking into account any filters.
@@ -834,9 +985,9 @@
 
 
 
-    lhs_key = (lhs_value_map.get(datastore_types._KEY_SPECIAL_PROPERTY) or
+    lhs_key = (lhs_value_map.get(datastore_types.KEY_SPECIAL_PROPERTY) or
                datastore_types.ReferenceToKeyValue(lhs.key()))
-    rhs_key = (rhs_value_map.get(datastore_types._KEY_SPECIAL_PROPERTY) or
+    rhs_key = (rhs_value_map.get(datastore_types.KEY_SPECIAL_PROPERTY) or
                datastore_types.ReferenceToKeyValue(rhs.key()))
 
     return cmp(lhs_key, rhs_key)
@@ -893,6 +1044,31 @@
     self.__order.set_property(prop.encode('utf-8'))
     self.__order.set_direction(direction)
 
+  @property
+  def prop(self):
+    return self.__order.property()
+
+  @property
+  def direction(self):
+    return self.__order.direction()
+
+  def __repr__(self):
+    name = self.prop
+    direction = self.direction
+    extra = ''
+    if direction == self.DESCENDING:
+      extra = ', DESCENDING'
+    name = repr(name).encode('utf-8')[1:-1]
+    return '%s(<%s>%s)' % (self.__class__.__name__, name, extra)
+
+  def reversed(self):
+    if self.__order.direction() == self.ASCENDING:
+      return PropertyOrder(self.__order.property().decode('utf-8'),
+                           self.DESCENDING)
+    else:
+      return PropertyOrder(self.__order.property().decode('utf-8'),
+                           self.ASCENDING)
+
   def _get_prop_names(self):
     return set([self.__order.property()])
 
@@ -956,20 +1132,31 @@
     Args:
       orders: A list of Orders which are applied in order.
     """
-    if not isinstance(orders, list):
+    if not isinstance(orders, (list, tuple)):
       raise datastore_errors.BadArgumentError(
-          'orders argument should be list (%r)' % (orders,))
+          'orders argument should be list or tuple (%r)' % (orders,))
 
     super(CompositeOrder, self).__init__()
-    self._orders = []
+    flattened = []
     for order in orders:
       if isinstance(order, CompositeOrder):
-        self._orders.extend(order._orders)
+        flattened.extend(order._orders)
       elif isinstance(order, Order):
-        self._orders.append(order)
+        flattened.append(order)
       else:
         raise datastore_errors.BadArgumentError(
             'orders argument should only contain Order (%r)' % (order,))
+    self._orders = tuple(flattened)
+
+  @property
+  def orders(self):
+    return self._orders
+
+  def __repr__(self):
+    return '%s(%r)' % (self.__class__.__name__, list(self.orders))
+
+  def reversed(self):
+    return CompositeOrder([order.reversed() for order in self._orders])
 
   def _get_prop_names(self):
     names = set()
@@ -1207,6 +1394,24 @@
     else:
       self.__compiled_cursor = datastore_pb.CompiledCursor()
 
+  def __repr__(self):
+    arg = self.to_websafe_string()
+    if arg:
+      arg = '<%s>' % arg
+    return '%s(%s)' % (self.__class__.__name__, arg)
+
+  def reversed(self):
+    """Creates a cursor for use in a query with a reversed sort order."""
+    for pos in self.__compiled_cursor.position_list():
+      if pos.has_start_key():
+        raise datastore_errors.BadRequestError('Cursor cannot be reversed.')
+
+    rev_pb = datastore_pb.CompiledCursor()
+    rev_pb.CopyFrom(self.__compiled_cursor)
+    for pos in rev_pb.position_list():
+      pos.set_start_inclusive(not pos.start_inclusive())
+    return Cursor(_cursor_pb=rev_pb)
+
   def to_bytes(self):
     """Serialize cursor as a byte string."""
     return self.__compiled_cursor.Encode()
@@ -1361,8 +1566,14 @@
         raise datastore_errors.BadArgumentError(
             'ancestor argument should match namespace ("%r" != "%r")' %
             (ancestor.name_space(), namespace))
+
+      pb = entity_pb.Reference()
+      pb.CopyFrom(ancestor)
+      ancestor = pb
+      self.__ancestor = ancestor
       self.__path = ancestor.path().element_list()
     else:
+      self.__ancestor = None
       self.__path = None
 
     super(_QueryKeyFilter, self).__init__()
@@ -1371,6 +1582,23 @@
         datastore_types.ResolveNamespace(namespace).encode('utf-8'))
     self.__kind = kind and kind.encode('utf-8')
 
+  @property
+  def app(self):
+    return self.__app
+
+  @property
+  def namespace(self):
+    return self.__namespace
+
+  @property
+  def kind(self):
+    return self.__kind
+
+  @property
+  def ancestor(self):
+
+    return self.__ancestor
+
   def __call__(self, entity_or_reference):
     """Apply the filter.
 
@@ -1403,12 +1631,9 @@
     datastore_types.SetNamespace(pb, self.__namespace)
     if self.__kind is not None:
       pb.set_kind(self.__kind)
-    if self.__path:
+    if self.__ancestor:
       ancestor = pb.mutable_ancestor()
-      ancestor.set_app(pb.app())
-      datastore_types.SetNamespace(ancestor, self.__namespace)
-      for elm in self.__path:
-        ancestor.mutable_path().add_element().CopyFrom(elm)
+      ancestor.CopyFrom(self.__ancestor)
 
     return pb
 
@@ -1470,7 +1695,7 @@
       namespace: Optional namespace to query, derived from the environment if
         not specified.
       kind: Optional kind to query.
-      ancestor: Optional ancestor to query.
+      ancestor: Optional ancestor to query, an entity_pb.Reference.
       filter_predicate: Optional FilterPredicate by which to restrict the query.
       order: Optional Order in which to return results.
 
@@ -1498,12 +1723,57 @@
     self._order = order
     self._filter_predicate = filter_predicate
 
+  @property
+  def app(self):
+    return self._key_filter.app
+
+  @property
+  def namespace(self):
+    return self._key_filter.namespace
+
+  @property
+  def kind(self):
+    return self._key_filter.kind
+
+  @property
+  def ancestor(self):
+    return self._key_filter.ancestor
+
+  @property
+  def filter_predicate(self):
+    return self._filter_predicate
+
+  @property
+  def order(self):
+    return self._order
+
+  def __repr__(self):
+    args = []
+    args.append('app=%r' % self.app)
+    ns = self.namespace
+    if ns:
+      args.append('namespace=%r' % ns)
+    kind = self.kind
+    if kind is not None:
+      args.append('kind=%r' % kind)
+    ancestor = self.ancestor
+    if ancestor is not None:
+      websafe = base64.urlsafe_b64encode(ancestor.Encode())
+      args.append('ancestor=<%s>' % websafe)
+    filter_predicate = self.filter_predicate
+    if filter_predicate is not None:
+      args.append('filter_predicate=%r' % filter_predicate)
+    order = self.order
+    if order is not None:
+      args.append('order=%r' % order)
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(args))
+
   def run_async(self, conn, query_options=None):
     if not isinstance(conn, datastore_rpc.BaseConnection):
       raise datastore_errors.BadArgumentError(
           'conn should be a datastore_rpc.BaseConnection (%r)' % (conn,))
 
-    if not isinstance(query_options, QueryOptions):
+    if not QueryOptions.is_configuration(query_options):
 
 
       query_options = QueryOptions(config=query_options)
@@ -1711,7 +1981,7 @@
       raise datastore_errors.BadArgumentError(
           'conn should be a datastore_rpc.BaseConnection (%r)' % (conn,))
 
-    if not isinstance(query_options, QueryOptions):
+    if not QueryOptions.is_configuration(query_options):
 
 
       query_options = QueryOptions(config=query_options)
@@ -1992,9 +2262,7 @@
     fetch_options, next_batch = self._make_next_batch(fetch_options)
     req = self._to_pb(fetch_options)
 
-
-    config = datastore_rpc.Configuration.merge(self.__query_options,
-                                               fetch_options)
+    config = self.__query_options.merge(fetch_options)
     return next_batch._make_query_result_rpc_call(
         'Next', config, req)
 
@@ -2053,7 +2321,7 @@
         yaml = datastore_index.IndexYamlForQuery(
             *datastore_index.CompositeIndexForQuery(rpc.request)[1:-1])
         raise datastore_errors.NeedIndexError(
-            str(exc) + '\nThis query needs this index:\n' + yaml)
+            str(exc) + '\nThe suggested index for this query is:\n' + yaml)
       raise
     query_result = rpc.response
     if query_result.has_compiled_query():
@@ -2063,12 +2331,7 @@
     self.__end_cursor = Cursor._from_query_result(query_result)
     self._skipped_results = query_result.skipped_results()
 
-
-
-    if (query_result.more_results() and
-        (isinstance(rpc.request, datastore_pb.Query) or
-         query_result.skipped_results() or
-         query_result.result_size())):
+    if query_result.more_results():
       self.__datastore_cursor = query_result.cursor()
       self.__more_results = True
     else:
diff --git a/google/appengine/datastore/datastore_rpc.py b/google/appengine/datastore/datastore_rpc.py
index 7735582..b18db16 100755
--- a/google/appengine/datastore/datastore_rpc.py
+++ b/google/appengine/datastore/datastore_rpc.py
@@ -44,6 +44,7 @@
            'IdentityAdapter',
            'MultiRpc',
            'TransactionalConnection',
+           'TransactionOptions',
           ]
 
 
@@ -51,7 +52,6 @@
 
 import collections
 import logging
-import os
 
 
 from google.appengine.datastore import entity_pb
@@ -120,6 +120,11 @@
     """Turn an entity_pb.EntityProto into a user-level entity."""
     raise NotImplementedError
 
+  def pb_to_index(self, pb):
+   """Turn an entity_pb.CompositeIndex into a user-level Index
+   representation."""
+   raise NotImplementedError
+
   def pb_to_query_result(self, pb, keys_only=False):
     """Turn an entity_pb.EntityProto into a user-level query result."""
     if keys_only:
@@ -163,6 +168,9 @@
   def entity_to_pb(self, entity):
     return entity
 
+  def pb_to_index(self, pb):
+    return pb
+
 
 class ConfigOption(object):
   """A descriptor for a Configuration option.
@@ -203,13 +211,16 @@
       datastore_errors.BadArgumentError if a given in object is not a
     configuration object.
     """
+    name = self.validator.__name__
     for config in args:
-      if isinstance(config, self._cls):
-        if self.validator.__name__ in config._values:
-          return config._values[self.validator.__name__]
-      elif config is not None and not isinstance(config, BaseConfiguration):
+
+      if isinstance(config, (type(None), apiproxy_stub_map.UserRPC)):
+        pass
+      elif not isinstance(config, BaseConfiguration):
         raise datastore_errors.BadArgumentError(
             'invalid config argument (%r)' % (config,))
+      elif name in config._values and self is config._options[name]:
+        return config._values[name]
     return None
 
 
@@ -222,6 +233,10 @@
   """
 
   def __new__(metaclass, classname, bases, classDict):
+    if classname == '_MergedConfiguration':
+
+      return type.__new__(metaclass, classname, bases, classDict)
+
     classDict['__slots__'] = ['_values']
     cls = type.__new__(metaclass, classname, bases, classDict)
     if object not in bases:
@@ -236,6 +251,8 @@
     return cls
 
 
+
+
 class BaseConfiguration(object):
   """A base class for a configuration object.
 
@@ -285,10 +302,12 @@
         return config
 
       for key, value in config._values.iteritems():
-        kwargs.setdefault(key, value)
+
+        if issubclass(cls, config._options[key]._cls):
+          kwargs.setdefault(key, value)
     else:
       raise datastore_errors.BadArgumentError(
-        'config argument should be Configuration (%r)' % (config,))
+          'config argument should be Configuration (%r)' % (config,))
 
     obj = super(BaseConfiguration, cls).__new__(cls)
     obj._values = {}
@@ -304,11 +323,9 @@
   def __eq__(self, other):
     if self is other:
       return True
-
-    if (not isinstance(other, self.__class__) and
-        not isinstance(self, other.__class__)):
+    if not isinstance(other, BaseConfiguration):
       return NotImplemented
-    return self._values == other._values
+    return self._options == other._options and self._values == other._values
 
   def __ne__(self, other):
     equal = self.__eq__(other)
@@ -316,11 +333,21 @@
       return equal
     return not equal
 
+  def __hash__(self):
+    return (hash(frozenset(self._values.iteritems())) ^
+            hash(frozenset(self._options.iteritems())))
+
+  def __repr__(self):
+    args = []
+    for key_value in sorted(self._values.iteritems()):
+      args.append('%s=%r' % key_value)
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(args))
+
   def __is_stronger(self, **kwargs):
     """Internal helper to ask whether a configuration is stronger than another.
 
-    A configuration is stronger when every value it contains is equal to or
-    missing from the values in the kwargs.
+    A configuration is stronger when it contains every name/value pair in
+    kwargs.
 
     Example: a configuration with:
       (deadline=5, on_configuration=None, read_policy=EVENTUAL_CONSISTENCY)
@@ -344,6 +371,23 @@
         return False
     return True
 
+  @classmethod
+  def is_configuration(cls, obj):
+    """True if configuration obj handles all options of this class.
+
+    Use this method rather than isinstance(obj, cls) to test if a
+    configuration object handles the options of cls (is_configuration
+    is handled specially for results of merge which may handle the options
+    of unrelated configuration classes).
+
+    Args:
+      obj: the object to test.
+    """
+    return isinstance(obj, BaseConfiguration) and obj._is_configuration(cls)
+
+  def _is_configuration(self, cls):
+    return isinstance(self, cls)
+
   def merge(self, config):
     """Merge two configurations.
 
@@ -355,8 +399,13 @@
         be omitted).
 
     Returns:
-      Either a new Configuration object or (if it would be equivalent)
+      Either a new configuration object or (if it would be equivalent)
       self or the config argument unchanged, but never None.
+
+    Raises:
+      BadArgumentError if self or config are of configurations classes
+      with conflicting options (i.e. the same option name defined in
+      two different configuration classes).
     """
     if config is None or config is self:
 
@@ -364,22 +413,90 @@
 
 
 
-    if isinstance(config, self.__class__):
-      for key in self._values:
-        if key not in config._values:
-          break
-      else:
-        return config
-
-    if self.__is_stronger(**config._values):
-      return self
+    if not (isinstance(config, _MergedConfiguration) or
+            isinstance(self, _MergedConfiguration)):
 
 
-    obj = type(self)()
-    obj._values = self._values.copy()
-    obj._values.update(config._values)
+
+      if isinstance(config, self.__class__):
+        for key in self._values:
+          if key not in config._values:
+            break
+        else:
+          return config
+      if isinstance(self, config.__class__):
+        if  self.__is_stronger(**config._values):
+          return self
+
+
+      def _quick_merge(obj):
+        obj._values = self._values.copy()
+        obj._values.update(config._values)
+        return obj
+
+      if isinstance(config, self.__class__):
+        return _quick_merge(type(config)())
+      if isinstance(self, config.__class__):
+        return _quick_merge(type(self)())
+
+
+    return _MergedConfiguration(config, self)
+
+
+class _MergedConfiguration(BaseConfiguration):
+  """Helper class to handle merges of configurations.
+
+  Instances of _MergedConfiguration are in some sense "subclasses" of the
+  argument configurations, i.e.:
+  - they handle exactly the configuration options of the argument configurations
+  - the value of these options is taken in priority order from the arguments
+  - isinstance is true on this configuration if it is true on any of the
+    argument configurations
+  This class raises an exception if two argument configurations have an option
+  with the same name but coming from a different configuration class.
+  """
+  __slots__ = ['_values', '_configs', '_options', '_classes']
+
+  def __new__(cls, *configs):
+    obj = super(BaseConfiguration, cls).__new__(cls)
+    obj._configs = configs
+
+
+    obj._options = {}
+    for config in configs:
+      for name, option in config._options.iteritems():
+        if name in obj._options:
+          if option is not obj._options[name]:
+            error = ("merge conflict on '%s' from '%s' and '%s'" %
+                     (name, option._cls.__name__,
+                      obj._options[name]._cls.__name__))
+            raise datastore_errors.BadArgumentError(error)
+        obj._options[name] = option
+
+    obj._values = {}
+    for config in reversed(configs):
+      for name, value in config._values.iteritems():
+        obj._values[name] = value
+
     return obj
 
+  def __repr__(self):
+    return '%s%r' % (self.__class__.__name__, tuple(self._configs))
+
+  def _is_configuration(self, cls):
+    for config in self._configs:
+      if config._is_configuration(cls):
+        return True
+    return False
+
+  def __getattr__(self, name):
+    if name in self._options:
+      if name in self._values:
+        return self._values[name]
+      else:
+        return None
+    raise AttributeError("Configuration has no attribute '%s'" % (name,))
+
 
 class Configuration(BaseConfiguration):
   """Configuration parameters for datastore RPCs.
@@ -802,7 +919,7 @@
 
     if config is None:
       config = Configuration()
-    elif not isinstance(config, Configuration):
+    elif not Configuration.is_configuration(config):
       raise datastore_errors.BadArgumentError(
         'invalid config argument (%r)' % (config,))
     self.__config = config
@@ -928,15 +1045,6 @@
 
 
 
-  def _check_entity_group(self, key_pbs):
-    pass
-
-  def _update_entity_group(self, key_pbs):
-    pass
-
-  def _get_transaction(self, request=None):
-    return None
-
   def create_rpc(self, config=None):
     """Create an RPC object using the configuration parameters.
 
@@ -1239,7 +1347,6 @@
 
     def make_get_call(req, pbs, user_data=None):
       req.key_list().extend(pbs)
-      self._check_entity_group(req.key_list())
       self._set_request_transaction(req)
       resp = datastore_pb.GetResponse()
       return self.make_rpc_call(config, 'Get', req, resp,
@@ -1249,7 +1356,7 @@
     self._set_request_read_policy(base_req, config)
 
 
-    if isinstance(config, apiproxy_stub_map.UserRPC):
+    if isinstance(config, apiproxy_stub_map.UserRPC) or len(keys) <= 1:
       pbs = [self.__adapter.key_to_pb(key) for key in keys]
       return make_get_call(base_req, pbs, extra_hook)
 
@@ -1305,6 +1412,40 @@
       entities = rpc.user_data(entities)
     return entities
 
+  def get_indexes(self):
+    """Synchronous get indexes operation.
+
+    Returns:
+      user-level indexes representation
+    """
+    return self.async_get_indexes(None).get_result()
+
+  def async_get_indexes(self, config, extra_hook=None, _app=None):
+    """Asynchronous get indexes operation.
+
+    Args:
+      config: A Configuration object or None.  Defaults are taken from
+        the connection's default configuration.
+      extra_hook: Optional function to be called once the RPC has completed.
+
+    Returns:
+      A MultiRpc object.
+    """
+    req = api_base_pb.StringProto()
+    req.set_value(datastore_types.ResolveAppId(_app))
+    resp = datastore_pb.CompositeIndices()
+    return self.make_rpc_call(config, 'GetIndices', req, resp,
+                                self.__get_indexes_hook, extra_hook)
+
+  def __get_indexes_hook(self, rpc):
+    """Internal method used as get_result_hook for Get operation."""
+    self.check_rpc_success(rpc)
+    indexes = [self.__adapter.pb_to_index(index)
+               for index in rpc.response.index_list()]
+    if rpc.user_data:
+      indexes = rpc.user_data(indexes)
+    return indexes
+
   def put(self, entities):
     """Synchronous Put operation.
 
@@ -1339,7 +1480,6 @@
 
     def make_put_call(req, pbs, user_data=None):
       req.entity_list().extend(pbs)
-      self._check_entity_group(e.key() for e in req.entity_list())
       self._set_request_transaction(req)
       resp = datastore_pb.PutResponse()
       return self.make_rpc_call(config, 'Put', req, resp,
@@ -1347,16 +1487,14 @@
 
 
     base_req = datastore_pb.PutRequest()
+    if Configuration.force_writes(config, self.__config):
+      base_req.set_force(True)
 
 
-    if isinstance(config, apiproxy_stub_map.UserRPC):
-      if self.__config.force_writes:
-        base_req.set_force(True)
+    if isinstance(config, apiproxy_stub_map.UserRPC) or len(entities) <= 1:
       pbs = [self.__adapter.entity_to_pb(entity) for entity in entities]
       return make_put_call(base_req, pbs, extra_hook)
 
-    if Configuration.force_writes(config, self.__config):
-      base_req.set_force(True)
     base_size = self._get_base_size(base_req)
     max_count = (Configuration.max_put_entities(config, self.__config) or
                  self.MAX_PUT_ENTITIES)
@@ -1383,7 +1521,6 @@
   def __put_hook(self, rpc):
     """Internal method used as get_result_hook for Put operation."""
     self.check_rpc_success(rpc)
-    self._update_entity_group(rpc.response.key_list())
     keys = [self.__adapter.pb_to_key(pb)
             for pb in rpc.response.key_list()]
 
@@ -1418,7 +1555,6 @@
 
     def make_delete_call(req, pbs, user_data=None):
       req.key_list().extend(pbs)
-      self._check_entity_group(req.key_list())
       self._set_request_transaction(req)
       resp = datastore_pb.DeleteResponse()
       return self.make_rpc_call(config, 'Delete', req, resp,
@@ -1426,16 +1562,14 @@
 
 
     base_req = datastore_pb.DeleteRequest()
+    if Configuration.force_writes(config, self.__config):
+      base_req.set_force(True)
 
 
-    if isinstance(config, apiproxy_stub_map.UserRPC):
-      if self.__config.force_writes:
-        base_req.set_force(True)
+    if isinstance(config, apiproxy_stub_map.UserRPC) or len(keys) <= 1:
       pbs = [self.__adapter.key_to_pb(key) for key in keys]
       return make_delete_call(base_req, pbs, extra_hook)
 
-    if Configuration.force_writes(config, self.__config):
-      base_req.set_force(True)
     base_size = self._get_base_size(base_req)
     max_count = (Configuration.max_delete_keys(config, self.__config) or
                  self.MAX_DELETE_KEYS)
@@ -1487,7 +1621,7 @@
     """Asynchronous BeginTransaction operation.
 
     Args:
-      config: A Configuration object or None.  Defaults are taken from
+      config: A configuration object or None.  Defaults are taken from
         the connection's default configuration.
       app: Application ID.
 
@@ -1500,6 +1634,8 @@
         (app,))
     req = datastore_pb.BeginTransactionRequest()
     req.set_app(app)
+    if TransactionOptions.allow_multiple_entity_groups(config, self.__config):
+      req.set_allow_multiple_eg(True)
     resp = datastore_pb.Transaction()
     rpc = self.make_rpc_call(config, 'BeginTransaction', req, resp,
                              self.__begin_transaction_hook)
@@ -1535,20 +1671,19 @@
 
 
 
-  def new_transaction(self):
+  def new_transaction(self, config=None):
     """Create a new transactional connection based on this one.
 
     This is different from, and usually preferred over, the
     begin_transaction() method; new_transaction() returns a new
-    TransactionalConnection object which will begin the transaction
-    lazily.  This is necessary because the low-level
-    begin_transaction() method needs the app id which will be gleaned
-    from the transaction's entity group, which in turn is gleaned from
-    the first key used in the transaction.
-    """
+    TransactionalConnection object.
 
-    return TransactionalConnection(adapter=self.__adapter,
-                                   config=self.__config)
+    Args:
+      config: A configuration object for the new connection, merged
+        with this connection's config.
+    """
+    config = self.__config.merge(config)
+    return TransactionalConnection(adapter=self.__adapter, config=config)
 
 
 
@@ -1623,6 +1758,47 @@
     return pair
 
 
+class TransactionOptions(Configuration):
+  """An immutable class that contains options for a transaction."""
+
+  @ConfigOption
+  def allow_multiple_entity_groups(value):
+    """If the transaction can cover multiple entity groups.
+
+    Raises: datastore_errors.BadArgumentError if value is not a bool.
+    """
+    if not isinstance(value, bool):
+      raise datastore_errors.BadArgumentError(
+          'allow_multiple_entity_groups argument should be bool (%r)' %
+          (value,))
+    return value
+
+  @ConfigOption
+  def retries(value):
+    """How many retries to attempt on the transaction.
+
+    Raises: datastore_errors.BadArgumentError if value is not an integer or
+      is not greater than zero.
+    """
+    datastore_types.ValidateInteger(value,
+                                    'retries',
+                                    datastore_errors.BadArgumentError,
+                                    zero_ok=True)
+    return value
+
+  @ConfigOption
+  def app(value):
+    """The application in which to perform the transaction.
+
+    Raises: datastore_errors.BadArgumentError if value is not a string
+      or is the empty string.
+    """
+    datastore_types.ValidateString(value,
+                                   'app',
+                                   datastore_errors.BadArgumentError)
+    return value
+
+
 class TransactionalConnection(BaseConnection):
   """A connection specific to one transaction.
 
@@ -1643,45 +1819,27 @@
         default IdentityAdapter.
       config: Optional Configuration object.
       transaction: Optional datastore_db.Transaction object.
-      entity_group: Optional user-level key to be used as entity group
-        constraining the transaction.  If specified, must be a
-        top-level key.
+      entity_group: Deprecated, do not use.
     """
     super(TransactionalConnection, self).__init__(adapter=adapter,
                                                   config=config)
     self.__adapter = self.adapter
-    if transaction is not None:
+    if transaction is None:
+      app = TransactionOptions.app(self.config)
+      app = datastore_types.ResolveAppId(TransactionOptions.app(self.config))
+      self.__transaction_rpc = self.async_begin_transaction(None, app)
+    else:
       if not isinstance(transaction, datastore_pb.Transaction):
         raise datastore_errors.BadArgumentError(
           'Invalid transaction (%r)' % (transaction,))
-    self.__transaction = transaction
-    self.__entity_group_pb = None
-    if entity_group is not None:
-      self.__entity_group_pb = self.__adapter.key_to_pb(entity_group)
-      if self.__entity_group_pb.path().element_list()[1:]:
-        raise datastore_errors.BadArgumentError(
-          'Entity group must be a toplevel key')
-      if transaction is not None:
-        if self.__entity_group_pb.app() != transaction.app():
-          raise datastore_errors.BadArgumentError(
-            'Entity group app (%s) does not match transaction app (%s)' %
-            (self.__entity_group_pb.app(), transaction.app()))
+      self.__transaction = transaction
+      self.__transaction_rpc = None
     self.__finished = False
 
   def _get_base_size(self, base_req):
     """Internal helper: return size in bytes plus room for transaction."""
-    trans = self.__transaction
-    if trans is None:
-
-
-
-
-      incr_size = 1000
-    else:
-
-      incr_size = trans.lengthString(trans.ByteSize()) + 1
     return (super(TransactionalConnection, self)._get_base_size(base_req) +
-            incr_size)
+            self.transaction.lengthString(self.transaction.ByteSize()) + 1)
 
   @property
   def finished(self):
@@ -1689,12 +1847,11 @@
 
   @property
   def transaction(self):
+    if self.__transaction_rpc is not None:
+      self.__transaction = self.__transaction_rpc.get_result()
+      self.__transaction_rpc = None
     return self.__transaction
 
-  @property
-  def entity_group(self):
-    return self.adapter.pb_to_key(self.__entity_group_pb)
-
   def _set_request_transaction(self, request):
     """Set the current transaction on a request.
 
@@ -1708,133 +1865,12 @@
     Returns:
       A datastore_pb.Transaction object or None.
     """
-    transaction = self._get_transaction(request)
-    request.mutable_transaction().CopyFrom(transaction)
-    return transaction
-
-  def _check_entity_group(self, key_pbs):
-    """Check that a list of keys are consistent with the entity group.
-
-    This also updates the connection's entity group if necessary.
-
-    Args:
-      key_pbs: A list of entity_pb.Reference objects.
-
-    Raises:
-      datastore_errors.BadRequestError if one or more of the keys
-      refers to a different top-level key than the the connection's
-      entity group.
-    """
-    base_entity_group_pb = self.__entity_group_pb
-    for ref in key_pbs:
-      entity_group_pb = ref
-      if entity_group_pb.path().element_list()[1:]:
-        entity_group_pb = self.__adapter.new_key_pb()
-        entity_group_pb.CopyFrom(ref)
-        del entity_group_pb.path().element_list()[1:]
-      if base_entity_group_pb is None:
-
-
-
-
-
-        base_entity_group_pb = entity_group_pb
-      else:
-        pb1 = entity_group_pb.path().element(0)
-        ok = (entity_group_pb == base_entity_group_pb)
-        if ok:
-
-
-
-
-          ok = (entity_group_pb is base_entity_group_pb or
-                pb1.id() or pb1.name())
-        if not ok:
-          pb0 = base_entity_group_pb.path().element(0)
-          def helper(pb):
-            if pb.name():
-              return 'name=%r' % pb.name()
-            else:
-              return 'id=%r' % pb.id()
-          raise datastore_errors.BadRequestError(
-              'Cannot operate on different entity groups in a transaction: '
-              '(kind=%r, %s) and (kind=%r, %s).' %
-              (pb0.type(), helper(pb0), pb1.type(), helper(pb1)))
-    self.__entity_group_pb = base_entity_group_pb
-
-  def _update_entity_group(self, key_pbs):
-    """Patch up the entity group if we wrote an entity with an incomplete key.
-
-    This should be called after a put() which could have assigned a
-    key to an entity with an incomplete key.
-
-    Args:
-      key_pbs: A list of entity_pb.Reference objects.
-    """
-    if self.__entity_group_pb is None:
-      assert not key_pbs
-      return
-
-    pb = self.__entity_group_pb.path().element(0)
-    if pb.id() or pb.name():
-      return
-    if not key_pbs:
-      return
-    ref = key_pbs[0]
-    assert not ref.path().element_list()[1:]
-    self.__entity_group_pb = ref
-
-  def _get_transaction(self, request=None):
-    """Get the transaction object for the current connection.
-
-    This may send an RPC to get the transaction object and block
-    waiting for it to complete.
-
-    Args:
-      request: Optional request protobuf object.  This is only used
-        if it is a Query object; it is then used to extract the ancestor
-        key for purposes of checking or setting the entity group.
-
-    Returns:
-      A datastore_pb.Transaction object.
-
-    Raises:
-      datastore_errors.BadRequestError if the transaction is already
-      finished, or if the request argument represents an ancestor-less
-      query, or if the ancestor does not match the connection's entity
-      group.
-    """
-
     if self.__finished:
       raise datastore_errors.BadRequestError(
-        'Cannot start a new operation in a finished transaction.')
-    key_pbs = None
-    if isinstance(request, datastore_pb.Query):
-      ok = request.has_ancestor()
-      if ok:
-        ref = request.ancestor()
-        path = ref.path()
-        ok = path.element_size()
-        if ok:
-          elem = path.element(ok - 1)
-          ok = elem.id() or elem.name()
-      if not ok:
-        raise datastore_errors.BadRequestError(
-          'Only ancestor queries are allowed inside a transaction.')
-      key_pbs = [ref]
-    if key_pbs is not None:
-      self._check_entity_group(key_pbs)
-
-    if self.__transaction is not None:
-      return self.__transaction
-    app = None
-    if self.__entity_group_pb is not None:
-      app = self.__entity_group_pb.app()
-    if app is None:
-
-      app = os.getenv('APPLICATION_ID')
-    self.__transaction = self.begin_transaction(app)
-    return self.__transaction
+          'Cannot start a new operation in a finished transaction.')
+    transaction = self.transaction
+    request.mutable_transaction().CopyFrom(transaction)
+    return transaction
 
   def _end_transaction(self):
     """Finish the current transaction.
@@ -1857,7 +1893,7 @@
 
     self.wait_for_all_pending_rpcs()
     assert not self.get_pending_rpcs()
-    transaction = self.__transaction
+    transaction = self.transaction
     self.__finished = True
     self.__transaction = None
     return transaction
diff --git a/google/appengine/datastore/datastore_stub_util.py b/google/appengine/datastore/datastore_stub_util.py
index 04f0529..d6d659b 100644
--- a/google/appengine/datastore/datastore_stub_util.py
+++ b/google/appengine/datastore/datastore_stub_util.py
@@ -75,8 +75,10 @@
     }
 
 
+
 _SCATTER_PROPORTION = 32768
 
+
 def _GetScatterProperty(entity_proto):
   """Gets the scatter property for an object.
 
@@ -118,6 +120,7 @@
     '__scatter__' : (False, True, _GetScatterProperty)
     }
 
+
 def GetInvisibleSpecialPropertyNames():
   """Gets the names of all non user-visible special properties."""
   invisible_names = []
@@ -127,6 +130,7 @@
       invisible_names.append(name)
   return invisible_names
 
+
 def _PrepareSpecialProperties(entity_proto, is_load):
   """Computes special properties for loading or storing.
   Strips other special properties."""
@@ -282,7 +286,7 @@
     max_query_components: limit on query complexity
   """
 
-  key_prop_name = datastore_types._KEY_SPECIAL_PROPERTY
+  key_prop_name = datastore_types.KEY_SPECIAL_PROPERTY
   unapplied_log_timestamp_us_name = (
       datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY)
 
@@ -462,7 +466,7 @@
 
   remaining_filters = []
   key_range = ValueRange()
-  key_prop = datastore_types._KEY_SPECIAL_PROPERTY
+  key_prop = datastore_types.KEY_SPECIAL_PROPERTY
   for f in filters:
     op = f.op()
     if not (f.property_size() == 1 and
@@ -481,7 +485,7 @@
   remaining_orders = []
   for o in orders:
     if not (o.direction() == datastore_pb.Query_Order.ASCENDING and
-            o.property() == datastore_types._KEY_SPECIAL_PROPERTY):
+            o.property() == datastore_types.KEY_SPECIAL_PROPERTY):
       remaining_orders.append(o)
     else:
       break
@@ -611,6 +615,7 @@
 
   return key_range
 
+
 def _PropertyKeyToString(key, default_property):
   """Extract property name from __property__ key.
 
@@ -1021,7 +1026,8 @@
       Check(self._entity_group == entity_group,
             'Transactions cannot span entity groups')
     else:
-      Check(self._app == reference.app())
+      Check(self._app == reference.app(),
+            'Transactions cannot span applications')
       self._entity_group = entity_group
 
   def _CheckOrSetSnapshot(self, reference):
@@ -1443,7 +1449,7 @@
     transaction.set_handle(id(txn))
     return transaction
 
-  def GetTxn(self, transaction, request_trusted=False, request_app=None):
+  def GetTxn(self, transaction, request_trusted, request_app):
     """Gets the LiveTxn object associated with the given transaction.
 
     Args:
@@ -1472,6 +1478,19 @@
     finally:
       self._meta_data_lock.release()
 
+  def Flush(self):
+    """Applies all outstanding transactions."""
+    for meta_data in self._meta_data.itervalues():
+      if not meta_data._apply_queue:
+        continue
+
+
+      meta_data._write_lock.acquire()
+      try:
+        meta_data.CatchUp()
+      finally:
+        meta_data._write_lock.release()
+
   def _GetMetaData(self, entity_group):
     """Safely gets the EntityGroupMetaData object for the given entity_group.
     """
@@ -1763,6 +1782,12 @@
     self._require_indexes = require_indexes
     self._pseudo_kinds = {}
 
+  def __del__(self):
+
+
+    self.Flush()
+    self.Write()
+
   def Clear(self):
     """Clears out all stored values."""
 
@@ -1808,7 +1833,7 @@
 
       Check(raw_query.kind() not in self._pseudo_kinds,
             'transactional queries on "%s" not allowed' % raw_query.kind())
-      txn = self.GetTxn(raw_query.transaction())
+      txn = self.GetTxn(raw_query.transaction(), trusted, calling_app)
       return txn.GetQueryCursor(raw_query, filters, orders)
 
     if raw_query.has_ancestor() and raw_query.kind() not in self._pseudo_kinds:
@@ -1862,7 +1887,7 @@
     if transaction:
 
       Check(len(grouped_keys) == 1, 'Transactions cannot span entity groups')
-      txn = self.GetTxn(transaction)
+      txn = self.GetTxn(transaction, trusted, calling_app)
 
       return [txn.Get(key) for key, _ in grouped_keys.values()[0]]
     else:
@@ -2019,43 +2044,28 @@
     Args:
       query: the datstore_pb.Query to check
     """
-
-
     if query.kind() in self._pseudo_kinds or not self._require_indexes:
       return
 
+    minimal_index = datastore_index.MinimalCompositeIndexForQuery(query,
+        (datastore_index.ProtoToIndexDefinition(index)
+        for index in self.GetIndexes(query.app())
+        if index.state() == datastore_pb.CompositeIndex.READ_WRITE))
+    if minimal_index is not None:
+      msg = ('This query requires a composite index that is not defined. '
+          'You must update the index.yaml file in your application root.')
+      if not minimal_index[0]:
 
-    required, kind, ancestor, props, num_eq_filters = datastore_index.CompositeIndexForQuery(query)
-
-    if not required:
-      return
-
-    indexes = self.GetIndexes(query.app())
-    eq_filters_set = set(props[:num_eq_filters])
-    remaining_filters = props[num_eq_filters:]
-    required_key = kind, ancestor, props
-    for index in indexes:
-      definition = datastore_index.ProtoToIndexDefinition(index)
-      index_key = datastore_index.IndexToKey(definition)
-      if required_key == index_key:
-        break
-      if num_eq_filters > 1 and (kind, ancestor) == index_key[:2]:
-
-        this_props = index_key[2]
-        this_eq_filters_set = set(this_props[:num_eq_filters])
-        this_remaining_filters = this_props[num_eq_filters:]
-        if (eq_filters_set == this_eq_filters_set and
-            remaining_filters == this_remaining_filters):
-          break
-    else:
-
-      raise apiproxy_errors.ApplicationError(
-          datastore_pb.Error.NEED_INDEX,
-          "This query requires a composite index that is not defined. "
-          "You must update the index.yaml file in your application root.")
+        yaml = datastore_index.IndexYamlForQuery(*minimal_index[1:])
+        msg += '\nThe following index is the minimum index required:\n' + yaml
+      raise apiproxy_errors.ApplicationError(datastore_pb.Error.NEED_INDEX, msg)
 
 
 
+  def Write(self):
+    """Writes the datastore to disk."""
+    raise NotImplemented
+
   def _GetQueryCursor(self, query, filters, orders):
     """Runs the given datastore_pb.Query and returns a QueryCursor for it.
 
@@ -2214,7 +2224,7 @@
       return
 
     transaction = request.add_request_list()[0].transaction()
-    txn = self._datastore.GetTxn(transaction)
+    txn = self._datastore.GetTxn(transaction, self._trusted, self._app_id)
     new_actions = []
     for add_request in request.add_request_list():
 
diff --git a/google/appengine/datastore/document_pb.py b/google/appengine/datastore/document_pb.py
index 34b69f7..9826e39 100644
--- a/google/appengine/datastore/document_pb.py
+++ b/google/appengine/datastore/document_pb.py
@@ -31,12 +31,14 @@
   HTML         =    1
   ATOM         =    2
   DATE         =    3
+  NUMBER       =    4
 
   _ContentType_NAMES = {
     0: "TEXT",
     1: "HTML",
     2: "ATOM",
     3: "DATE",
+    4: "NUMBER",
   }
 
   def ContentType_Name(cls, x): return cls._ContentType_NAMES.get(x, "")
@@ -45,11 +47,9 @@
   has_type_ = 0
   type_ = 0
   has_language_ = 0
-  language_ = ""
+  language_ = "en"
   has_string_value_ = 0
   string_value_ = ""
-  has_date_value_ = 0
-  date_value_ = ""
 
   def __init__(self, contents=None):
     if contents is not None: self.MergeFromString(contents)
@@ -76,7 +76,7 @@
   def clear_language(self):
     if self.has_language_:
       self.has_language_ = 0
-      self.language_ = ""
+      self.language_ = "en"
 
   def has_language(self): return self.has_language_
 
@@ -93,26 +93,12 @@
 
   def has_string_value(self): return self.has_string_value_
 
-  def date_value(self): return self.date_value_
-
-  def set_date_value(self, x):
-    self.has_date_value_ = 1
-    self.date_value_ = x
-
-  def clear_date_value(self):
-    if self.has_date_value_:
-      self.has_date_value_ = 0
-      self.date_value_ = ""
-
-  def has_date_value(self): return self.has_date_value_
-
 
   def MergeFrom(self, x):
     assert x is not self
     if (x.has_type()): self.set_type(x.type())
     if (x.has_language()): self.set_language(x.language())
     if (x.has_string_value()): self.set_string_value(x.string_value())
-    if (x.has_date_value()): self.set_date_value(x.date_value())
 
   def Equals(self, x):
     if x is self: return 1
@@ -122,8 +108,6 @@
     if self.has_language_ and self.language_ != x.language_: return 0
     if self.has_string_value_ != x.has_string_value_: return 0
     if self.has_string_value_ and self.string_value_ != x.string_value_: return 0
-    if self.has_date_value_ != x.has_date_value_: return 0
-    if self.has_date_value_ and self.date_value_ != x.date_value_: return 0
     return 1
 
   def IsInitialized(self, debug_strs=None):
@@ -135,7 +119,6 @@
     if (self.has_type_): n += 1 + self.lengthVarInt64(self.type_)
     if (self.has_language_): n += 1 + self.lengthString(len(self.language_))
     if (self.has_string_value_): n += 1 + self.lengthString(len(self.string_value_))
-    if (self.has_date_value_): n += 1 + self.lengthString(len(self.date_value_))
     return n
 
   def ByteSizePartial(self):
@@ -143,14 +126,12 @@
     if (self.has_type_): n += 1 + self.lengthVarInt64(self.type_)
     if (self.has_language_): n += 1 + self.lengthString(len(self.language_))
     if (self.has_string_value_): n += 1 + self.lengthString(len(self.string_value_))
-    if (self.has_date_value_): n += 1 + self.lengthString(len(self.date_value_))
     return n
 
   def Clear(self):
     self.clear_type()
     self.clear_language()
     self.clear_string_value()
-    self.clear_date_value()
 
   def OutputUnchecked(self, out):
     if (self.has_type_):
@@ -162,9 +143,6 @@
     if (self.has_string_value_):
       out.putVarInt32(26)
       out.putPrefixedString(self.string_value_)
-    if (self.has_date_value_):
-      out.putVarInt32(34)
-      out.putPrefixedString(self.date_value_)
 
   def OutputPartial(self, out):
     if (self.has_type_):
@@ -176,9 +154,6 @@
     if (self.has_string_value_):
       out.putVarInt32(26)
       out.putPrefixedString(self.string_value_)
-    if (self.has_date_value_):
-      out.putVarInt32(34)
-      out.putPrefixedString(self.date_value_)
 
   def TryMerge(self, d):
     while d.avail() > 0:
@@ -192,9 +167,6 @@
       if tt == 26:
         self.set_string_value(d.getPrefixedString())
         continue
-      if tt == 34:
-        self.set_date_value(d.getPrefixedString())
-        continue
 
 
       if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
@@ -206,7 +178,6 @@
     if self.has_type_: res+=prefix+("type: %s\n" % self.DebugFormatInt32(self.type_))
     if self.has_language_: res+=prefix+("language: %s\n" % self.DebugFormatString(self.language_))
     if self.has_string_value_: res+=prefix+("string_value: %s\n" % self.DebugFormatString(self.string_value_))
-    if self.has_date_value_: res+=prefix+("date_value: %s\n" % self.DebugFormatString(self.date_value_))
     return res
 
 
@@ -216,23 +187,20 @@
   ktype = 1
   klanguage = 2
   kstring_value = 3
-  kdate_value = 4
 
   _TEXT = _BuildTagLookupTable({
     0: "ErrorCode",
     1: "type",
     2: "language",
     3: "string_value",
-    4: "date_value",
-  }, 4)
+  }, 3)
 
   _TYPES = _BuildTagLookupTable({
     0: ProtocolBuffer.Encoder.NUMERIC,
     1: ProtocolBuffer.Encoder.NUMERIC,
     2: ProtocolBuffer.Encoder.STRING,
     3: ProtocolBuffer.Encoder.STRING,
-    4: ProtocolBuffer.Encoder.STRING,
-  }, 4, ProtocolBuffer.Encoder.MAX_TYPE)
+  }, 3, ProtocolBuffer.Encoder.MAX_TYPE)
 
 
   _STYLE = """"""
@@ -533,10 +501,10 @@
   def Storage_Name(cls, x): return cls._Storage_NAMES.get(x, "")
   Storage_Name = classmethod(Storage_Name)
 
-  has_doc_id_ = 0
-  doc_id_ = ""
+  has_id_ = 0
+  id_ = ""
   has_language_ = 0
-  language_ = ""
+  language_ = "en"
   has_order_id_ = 0
   order_id_ = 0
   has_storage_ = 0
@@ -546,18 +514,18 @@
     self.field_ = []
     if contents is not None: self.MergeFromString(contents)
 
-  def doc_id(self): return self.doc_id_
+  def id(self): return self.id_
 
-  def set_doc_id(self, x):
-    self.has_doc_id_ = 1
-    self.doc_id_ = x
+  def set_id(self, x):
+    self.has_id_ = 1
+    self.id_ = x
 
-  def clear_doc_id(self):
-    if self.has_doc_id_:
-      self.has_doc_id_ = 0
-      self.doc_id_ = ""
+  def clear_id(self):
+    if self.has_id_:
+      self.has_id_ = 0
+      self.id_ = ""
 
-  def has_doc_id(self): return self.has_doc_id_
+  def has_id(self): return self.has_id_
 
   def language(self): return self.language_
 
@@ -568,7 +536,7 @@
   def clear_language(self):
     if self.has_language_:
       self.has_language_ = 0
-      self.language_ = ""
+      self.language_ = "en"
 
   def has_language(self): return self.has_language_
 
@@ -617,7 +585,7 @@
 
   def MergeFrom(self, x):
     assert x is not self
-    if (x.has_doc_id()): self.set_doc_id(x.doc_id())
+    if (x.has_id()): self.set_id(x.id())
     if (x.has_language()): self.set_language(x.language())
     for i in xrange(x.field_size()): self.add_field().CopyFrom(x.field(i))
     if (x.has_order_id()): self.set_order_id(x.order_id())
@@ -625,8 +593,8 @@
 
   def Equals(self, x):
     if x is self: return 1
-    if self.has_doc_id_ != x.has_doc_id_: return 0
-    if self.has_doc_id_ and self.doc_id_ != x.doc_id_: return 0
+    if self.has_id_ != x.has_id_: return 0
+    if self.has_id_ and self.id_ != x.id_: return 0
     if self.has_language_ != x.has_language_: return 0
     if self.has_language_ and self.language_ != x.language_: return 0
     if len(self.field_) != len(x.field_): return 0
@@ -640,17 +608,17 @@
 
   def IsInitialized(self, debug_strs=None):
     initialized = 1
-    if (not self.has_doc_id_):
+    if (not self.has_id_):
       initialized = 0
       if debug_strs is not None:
-        debug_strs.append('Required field: doc_id not set.')
+        debug_strs.append('Required field: id not set.')
     for p in self.field_:
       if not p.IsInitialized(debug_strs): initialized=0
     return initialized
 
   def ByteSize(self):
     n = 0
-    n += self.lengthString(len(self.doc_id_))
+    n += self.lengthString(len(self.id_))
     if (self.has_language_): n += 1 + self.lengthString(len(self.language_))
     n += 1 * len(self.field_)
     for i in xrange(len(self.field_)): n += self.lengthString(self.field_[i].ByteSize())
@@ -660,9 +628,9 @@
 
   def ByteSizePartial(self):
     n = 0
-    if (self.has_doc_id_):
+    if (self.has_id_):
       n += 1
-      n += self.lengthString(len(self.doc_id_))
+      n += self.lengthString(len(self.id_))
     if (self.has_language_): n += 1 + self.lengthString(len(self.language_))
     n += 1 * len(self.field_)
     for i in xrange(len(self.field_)): n += self.lengthString(self.field_[i].ByteSizePartial())
@@ -671,7 +639,7 @@
     return n
 
   def Clear(self):
-    self.clear_doc_id()
+    self.clear_id()
     self.clear_language()
     self.clear_field()
     self.clear_order_id()
@@ -679,7 +647,7 @@
 
   def OutputUnchecked(self, out):
     out.putVarInt32(10)
-    out.putPrefixedString(self.doc_id_)
+    out.putPrefixedString(self.id_)
     if (self.has_language_):
       out.putVarInt32(18)
       out.putPrefixedString(self.language_)
@@ -695,9 +663,9 @@
       out.putVarInt32(self.storage_)
 
   def OutputPartial(self, out):
-    if (self.has_doc_id_):
+    if (self.has_id_):
       out.putVarInt32(10)
-      out.putPrefixedString(self.doc_id_)
+      out.putPrefixedString(self.id_)
     if (self.has_language_):
       out.putVarInt32(18)
       out.putPrefixedString(self.language_)
@@ -716,7 +684,7 @@
     while d.avail() > 0:
       tt = d.getVarInt32()
       if tt == 10:
-        self.set_doc_id(d.getPrefixedString())
+        self.set_id(d.getPrefixedString())
         continue
       if tt == 18:
         self.set_language(d.getPrefixedString())
@@ -741,7 +709,7 @@
 
   def __str__(self, prefix="", printElemNumber=0):
     res=""
-    if self.has_doc_id_: res+=prefix+("doc_id: %s\n" % self.DebugFormatString(self.doc_id_))
+    if self.has_id_: res+=prefix+("id: %s\n" % self.DebugFormatString(self.id_))
     if self.has_language_: res+=prefix+("language: %s\n" % self.DebugFormatString(self.language_))
     cnt=0
     for e in self.field_:
@@ -759,7 +727,7 @@
   def _BuildTagLookupTable(sparse, maxtag, default=None):
     return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
 
-  kdoc_id = 1
+  kid = 1
   klanguage = 2
   kfield = 3
   korder_id = 4
@@ -767,7 +735,7 @@
 
   _TEXT = _BuildTagLookupTable({
     0: "ErrorCode",
-    1: "doc_id",
+    1: "id",
     2: "language",
     3: "field",
     4: "order_id",
diff --git a/google/appengine/dist/_library.py b/google/appengine/dist/_library.py
index 44dbca7..57fde4d 100755
--- a/google/appengine/dist/_library.py
+++ b/google/appengine/dist/_library.py
@@ -308,8 +308,8 @@
 
     sys.path[:] = [dirname
                    for dirname in sys.path
-                   if not (dirname.startswith(PYTHON_LIB) and
-                           'django' in dirname)]
+                   if not dirname.startswith(os.path.join(
+                       PYTHON_LIB, 'lib', 'django'))]
 
 
 
diff --git a/google/appengine/ext/admin/__init__.py b/google/appengine/ext/admin/__init__.py
index d421c44..2176759 100755
--- a/google/appengine/ext/admin/__init__.py
+++ b/google/appengine/ext/admin/__init__.py
@@ -29,6 +29,7 @@
 
 
 import cgi
+import collections
 import csv
 import cStringIO
 import datetime
@@ -50,6 +51,7 @@
 
 
 
+
 try:
   from google.appengine.cron import groctimespecification
   from google.appengine.api import croninfo
@@ -142,6 +144,7 @@
         'request': self.request,
         'home_path': base_path + DefaultPageHandler.PATH,
         'datastore_path': base_path + DatastoreQueryHandler.PATH,
+        'datastore_indexes': base_path + DatastoreGetIndexesHandler.PATH,
         'datastore_edit_path': base_path + DatastoreEditHandler.PATH,
         'datastore_batch_edit_path': base_path + DatastoreBatchEditHandler.PATH,
         'datastore_stats_path': base_path + DatastoreStatsHandler.PATH,
@@ -916,6 +919,33 @@
     self.redirect(next)
 
 
+class DatastoreGetIndexesHandler(BaseRequestHandler):
+  """Our main request handler that displays indexes"""
+
+  PATH = '/datastore_indexes'
+
+  def get(self):
+    indexes = collections.defaultdict(list)
+    for index, state in datastore.GetIndexes():
+      properties = []
+      for property_name, sort_direction in index.Properties():
+        properties.append({
+          'name': property_name,
+          'sort_symbol': ('&#x25b2;', '&#x25bc;')[sort_direction - 1],
+          'sort_direction': ('ASCENDING', 'DESCENDING')[sort_direction - 1]
+        })
+      kind = str(index.Kind())
+      kind_indexes = indexes[kind]
+      kind_indexes.append({
+        'id': str(index.Id()),
+        'status': ('BUILDING', 'SERVING', 'DELETING', 'ERROR')[state],
+        'has_ancestor': bool(index.HasAncestor()),
+        'properties': properties
+      })
+    self.generate('datastore_indexes.html',
+                  {'request': self.request, 'indexes': sorted(indexes.items())})
+
+
 class DatastoreRequestHandler(BaseRequestHandler):
   """The base request handler for our datastore admin pages.
 
@@ -1810,6 +1840,7 @@
 
 def main():
   handlers = [
+      ('.*' + DatastoreGetIndexesHandler.PATH, DatastoreGetIndexesHandler),
       ('.*' + DatastoreQueryHandler.PATH, DatastoreQueryHandler),
       ('.*' + DatastoreEditHandler.PATH, DatastoreEditHandler),
       ('.*' + DatastoreBatchEditHandler.PATH, DatastoreBatchEditHandler),
diff --git a/google/appengine/ext/admin/datastore_stats_generator.py b/google/appengine/ext/admin/datastore_stats_generator.py
index 1004f59..fbd9796 100644
--- a/google/appengine/ext/admin/datastore_stats_generator.py
+++ b/google/appengine/ext/admin/datastore_stats_generator.py
@@ -249,7 +249,8 @@
       count = 0
 
 
-    self.__Increment(self.namespace_stats, count,
+    self.__Increment(
+        self.namespace_stats, count,
         (stats.NamespaceGlobalStat, 'total_entity_usage', namespace), size)
 
   def __Increment(self, stats_dict, count, stat_key, size, **kwds):
diff --git a/google/appengine/ext/admin/templates/base.html b/google/appengine/ext/admin/templates/base.html
index 4a27535..dd42103 100644
--- a/google/appengine/ext/admin/templates/base.html
+++ b/google/appengine/ext/admin/templates/base.html
@@ -40,6 +40,7 @@
 
             <ul id="menu">
               <li><a href="{{ datastore_path }}">Datastore Viewer</a></li>
+              <li><a href="{{ datastore_indexes }}">Datastore Indexes</a></li>
               <li><a href="{{ datastore_stats_path }}">Datastore Stats</a></li>
               <li><a href="{{ interactive_path }}">Interactive Console</a></li>
               <li><a href="{{ memcache_path }}">Memcache Viewer</a></li>
diff --git a/google/appengine/ext/admin/templates/css/datastore_indexes.css b/google/appengine/ext/admin/templates/css/datastore_indexes.css
new file mode 100644
index 0000000..382cbaa
--- /dev/null
+++ b/google/appengine/ext/admin/templates/css/datastore_indexes.css
@@ -0,0 +1,57 @@
+.ae-datastore-index-name {
+  font-size: 1.2em;
+  font-weight:bold;
+}
+
+.ae-datastore-index-status {
+  border:1px solid #c0dfbf;
+  background:#f3f7f3;
+  margin:0 25px 0 0;
+  padding:3px
+}
+
+#ae-datastore-index-status-col {
+  width:15%
+}
+
+.ae-datastore-index-status-Building {
+  border-color:#edebcd;
+  background:#fefdec
+}
+
+.ae-datastore-index-status-Deleting {
+  border-color:#ccc;
+  background:#eee
+}
+
+.ae-datastore-index-status-Error {
+  border-color:#ffd3b4;
+  background:#ffeae0
+}
+
+.ae-datastore-index-defs {
+  padding-left:20px
+}
+
+.ae-datastore-index-defs-row {
+  border-top:1px solid #ddd
+}
+
+.ae-datastore-index-defs .ae-unimportant {
+  font-size:.8em
+}
+
+.ae-unimportant {
+  color: #666;
+  font-size: 80%;
+}
+
+.ae-nowrap {
+  white-space: nowrap;
+}
+
+.ae-field-hint {
+  margin-top:.2em;
+  color:#666667;
+  font-size:.85em;
+}
diff --git a/google/appengine/ext/admin/templates/datastore_indexes.html b/google/appengine/ext/admin/templates/datastore_indexes.html
new file mode 100644
index 0000000..9153fb7
--- /dev/null
+++ b/google/appengine/ext/admin/templates/datastore_indexes.html
@@ -0,0 +1,67 @@
+{% extends "base.html" %}
+
+{% block title %}{{ application_name }} Development Console - Datastore Indexes{% endblock %}
+
+{% block head %}
+  <style type="text/css">{% include "css/ae.css" %}</style>
+  <style type="text/css">{% include "css/datastore.css" %}</style>
+  <style type="text/css">{% include "css/datastore_indexes.css" %}</style>
+  <script type="text/javascript">
+  //<![CDATA[
+
+  //]]>
+  </script>
+{% endblock %}
+{% block body %}
+  {% if indexes %}
+    <p>
+    Below are indexes for the application.
+    Indexes are managed in an index.yaml file<br/>
+    Learn more about
+    <a href="http://code.google.com/appengine/kb/general.html#indexes" target="_blank">indexes</a>
+    </p>
+    <table id="ah-indexes" class="ae-table">
+      <colgroup>
+      <col>
+        <col id="ae-datastore-index-status-col">
+      </colgroup>
+      <thead>
+        <tr>
+          <th>Entity and Indexes</th>
+          <th>Status</th>
+        </tr>
+      </thead>
+      <tbody>
+      {% for kind_indexes in indexes %}
+        <tr class="{% if forloop.counter|divisibleby:2 %}ae-even{% else %}ae-odd{% endif %}">
+          <td colspan="2" class="ae-datastore-index-name">{{ kind_indexes.0 }}</td>
+        </tr>
+        {% for kind_index in kind_indexes.1 %}
+          <tr class="ae-datastore-index-defs-row {% if forloop.parentloop.counter|divisibleby:2 %}ae-even{% else %}ae-odd{% endif %}"/>
+            <td class="ae-datastore-index-defs" valign="top">
+              &nbsp;&nbsp;&nbsp;
+              {% for property in kind_index.properties %}
+                {{ property.name }}
+                <span class="ae-unimportant" title="{{ property.sort_direction }}">{{ property.sort_symbol }}</span>
+              {% endfor %}&nbsp;
+              {% if kind_index.has_ancestor %}
+                <div class="ae-unimportant">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Includes ancestors</div>
+              {% endif %}
+            </td>
+            <td>
+              <div class="ae-datastore-index-status">
+                <strong>{{ kind_index.status }}</strong>
+                <div class="ae-nowrap ae-field-hint"></div>
+              </div>
+            </td>
+          </tr>
+        {% endfor %}
+      {% endfor %}
+      </tbody>
+    </table>
+  {% else %}
+    <p id="no_kind" style="font-size: medium">
+      Datastore contains no indexes.
+    </p>
+  {% endif %}
+{% endblock %}
diff --git a/google/appengine/ext/appstats/static/appstats_js.js b/google/appengine/ext/appstats/static/appstats_js.js
index 90fa86b..ff2d952 100755
--- a/google/appengine/ext/appstats/static/appstats_js.js
+++ b/google/appengine/ext/appstats/static/appstats_js.js
@@ -1,80 +1,79 @@
 /* Copyright 2008-10 Google Inc. All Rights Reserved. */ (function(){function e(a){throw a;}
-var i=void 0,m=null,n,o=this,aa=function(a,b){var c=a.split("."),d=o;!(c[0]in d)&&d.execScript&&d.execScript("var "+c[0]);for(var f;c.length&&(f=c.shift());)!c.length&&b!==i?d[f]=b:d=d[f]?d[f]:d[f]={}},ba=function(){},ca=function(a){a.Q=function(){return a.ac||(a.ac=new a)}},da=function(a){var b=typeof a;if(b=="object")if(a){if(a instanceof Array)return"array";else if(a instanceof Object)return b;var c=Object.prototype.toString.call(a);if(c=="[object Window]")return"object";if(c=="[object Array]"||
+var i=void 0,m=null,n,o=this,aa=function(a,b){var c=a.split("."),d=o;!(c[0]in d)&&d.execScript&&d.execScript("var "+c[0]);for(var f;c.length&&(f=c.shift());)!c.length&&b!==i?d[f]=b:d=d[f]?d[f]:d[f]={}},ba=function(){},ca=function(a){a.Q=function(){return a.$b||(a.$b=new a)}},da=function(a){var b=typeof a;if(b=="object")if(a){if(a instanceof Array)return"array";else if(a instanceof Object)return b;var c=Object.prototype.toString.call(a);if(c=="[object Window]")return"object";if(c=="[object Array]"||
 typeof a.length=="number"&&typeof a.splice!="undefined"&&typeof a.propertyIsEnumerable!="undefined"&&!a.propertyIsEnumerable("splice"))return"array";if(c=="[object Function]"||typeof a.call!="undefined"&&typeof a.propertyIsEnumerable!="undefined"&&!a.propertyIsEnumerable("call"))return"function"}else return"null";else if(b=="function"&&typeof a.call=="undefined")return"object";return b},ea=function(a){return da(a)=="array"},fa=function(a){var b=da(a);return b=="array"||b=="object"&&typeof a.length==
 "number"},p=function(a){return typeof a=="string"},q=function(a){return da(a)=="function"},ga=function(a){a=da(a);return a=="object"||a=="array"||a=="function"},s=function(a){return a[ha]||(a[ha]=++ia)},ha="closure_uid_"+Math.floor(Math.random()*2147483648).toString(36),ia=0,ja=function(a,b){var c=Array.prototype.slice.call(arguments,1);return function(){var b=Array.prototype.slice.call(arguments);b.unshift.apply(b,c);return a.apply(this,b)}},t=function(a,b){function c(){}c.prototype=b.prototype;
 a.c=b.prototype;a.prototype=new c;a.prototype.constructor=a};var ka=function(a){this.stack=Error().stack||"";if(a)this.message=String(a)};t(ka,Error);ka.prototype.name="CustomError";var la=function(a,b){for(var c=1;c<arguments.length;c++)var d=String(arguments[c]).replace(/\$/g,"$$$$"),a=a.replace(/\%s/,d);return a},ma=function(a){return a.replace(/^[\s\xa0]+|[\s\xa0]+$/g,"")},sa=function(a){if(!na.test(a))return a;a.indexOf("&")!=-1&&(a=a.replace(oa,"&amp;"));a.indexOf("<")!=-1&&(a=a.replace(pa,"&lt;"));a.indexOf(">")!=-1&&(a=a.replace(qa,"&gt;"));a.indexOf('"')!=-1&&(a=a.replace(ra,"&quot;"));return a},oa=/&/g,pa=/</g,qa=/>/g,ra=/\"/g,na=/[&<>\"]/,ua=function(a,b){for(var c=
-0,d=ma(String(a)).split("."),f=ma(String(b)).split("."),g=Math.max(d.length,f.length),h=0;c==0&&h<g;h++){var j=d[h]||"",k=f[h]||"",l=RegExp("(\\d*)(\\D*)","g"),M=RegExp("(\\d*)(\\D*)","g");do{var r=l.exec(j)||["","",""],I=M.exec(k)||["","",""];if(r[0].length==0&&I[0].length==0)break;c=ta(r[1].length==0?0:parseInt(r[1],10),I[1].length==0?0:parseInt(I[1],10))||ta(r[2].length==0,I[2].length==0)||ta(r[2],I[2])}while(c==0)}return c},ta=function(a,b){if(a<b)return-1;else if(a>b)return 1;return 0};var va=function(a,b){b.unshift(a);ka.call(this,la.apply(m,b));b.shift();this.mc=a};t(va,ka);va.prototype.name="AssertionError";var wa=function(a,b,c){if(!a){var d=Array.prototype.slice.call(arguments,2),f="Assertion failed";if(b){f+=": "+b;var g=d}e(new va(""+f,g||[]))}};var u=Array.prototype,xa=u.indexOf?function(a,b,c){wa(a.length!=m);return u.indexOf.call(a,b,c)}:function(a,b,c){c=c==m?0:c<0?Math.max(0,a.length+c):c;if(p(a))return!p(b)||b.length!=1?-1:a.indexOf(b,c);for(;c<a.length;c++)if(c in a&&a[c]===b)return c;return-1},ya=u.forEach?function(a,b,c){wa(a.length!=m);u.forEach.call(a,b,c)}:function(a,b,c){for(var d=a.length,f=p(a)?a.split(""):a,g=0;g<d;g++)g in f&&b.call(c,f[g],g,a)},za=u.every?function(a,b,c){wa(a.length!=m);return u.every.call(a,b,c)}:function(a,
+0,d=ma(String(a)).split("."),f=ma(String(b)).split("."),g=Math.max(d.length,f.length),h=0;c==0&&h<g;h++){var j=d[h]||"",k=f[h]||"",l=RegExp("(\\d*)(\\D*)","g"),L=RegExp("(\\d*)(\\D*)","g");do{var r=l.exec(j)||["","",""],I=L.exec(k)||["","",""];if(r[0].length==0&&I[0].length==0)break;c=ta(r[1].length==0?0:parseInt(r[1],10),I[1].length==0?0:parseInt(I[1],10))||ta(r[2].length==0,I[2].length==0)||ta(r[2],I[2])}while(c==0)}return c},ta=function(a,b){if(a<b)return-1;else if(a>b)return 1;return 0};var va=function(a,b){b.unshift(a);ka.call(this,la.apply(m,b));b.shift();this.lc=a};t(va,ka);va.prototype.name="AssertionError";var wa=function(a,b,c){if(!a){var d=Array.prototype.slice.call(arguments,2),f="Assertion failed";if(b){f+=": "+b;var g=d}e(new va(""+f,g||[]))}};var u=Array.prototype,xa=u.indexOf?function(a,b,c){wa(a.length!=m);return u.indexOf.call(a,b,c)}:function(a,b,c){c=c==m?0:c<0?Math.max(0,a.length+c):c;if(p(a))return!p(b)||b.length!=1?-1:a.indexOf(b,c);for(;c<a.length;c++)if(c in a&&a[c]===b)return c;return-1},ya=u.forEach?function(a,b,c){wa(a.length!=m);u.forEach.call(a,b,c)}:function(a,b,c){for(var d=a.length,f=p(a)?a.split(""):a,g=0;g<d;g++)g in f&&b.call(c,f[g],g,a)},za=u.every?function(a,b,c){wa(a.length!=m);return u.every.call(a,b,c)}:function(a,
 b,c){for(var d=a.length,f=p(a)?a.split(""):a,g=0;g<d;g++)if(g in f&&!b.call(c,f[g],g,a))return!1;return!0},Aa=function(a,b){return xa(a,b)>=0},Ba=function(a,b){var c=xa(a,b);c>=0&&(wa(a.length!=m),u.splice.call(a,c,1))},Ca=function(a){return u.concat.apply(u,arguments)},Da=function(a){if(ea(a))return Ca(a);else{for(var b=[],c=0,d=a.length;c<d;c++)b[c]=a[c];return b}},Fa=function(a,b,c,d){wa(a.length!=m);u.splice.apply(a,Ea(arguments,1))},Ea=function(a,b,c){wa(a.length!=m);return arguments.length<=
-2?u.slice.call(a,b):u.slice.call(a,b,c)};var Ga=function(a,b){for(var c in a)b.call(i,a[c],c,a)},Ha=function(a,b){b in a&&delete a[b]},Ia=function(a,b,c){b in a&&e(Error('The object already contains the key "'+b+'"'));a[b]=c},Ja=function(a){var b={},c;for(c in a)b[a[c]]=c;return b},Ka=["constructor","hasOwnProperty","isPrototypeOf","propertyIsEnumerable","toLocaleString","toString","valueOf"],La=function(a,b){for(var c,d,f=1;f<arguments.length;f++){d=arguments[f];for(c in d)a[c]=d[c];for(var g=0;g<Ka.length;g++)c=Ka[g],Object.prototype.hasOwnProperty.call(d,
-c)&&(a[c]=d[c])}};var Ma,Na,Oa,Pa,Qa=function(){return o.navigator?o.navigator.userAgent:m};Pa=Oa=Na=Ma=!1;var Ra;if(Ra=Qa()){var Sa=o.navigator;Ma=Ra.indexOf("Opera")==0;Na=!Ma&&Ra.indexOf("MSIE")!=-1;Oa=!Ma&&Ra.indexOf("WebKit")!=-1;Pa=!Ma&&!Oa&&Sa.product=="Gecko"}var Ta=Ma,v=Na,w=Pa,x=Oa,Ua=o.navigator,Va=(Ua&&Ua.platform||"").indexOf("Mac")!=-1,Wa;
-a:{var Xa="",Ya;if(Ta&&o.opera)var Za=o.opera.version,Xa=typeof Za=="function"?Za():Za;else if(w?Ya=/rv\:([^\);]+)(\)|;)/:v?Ya=/MSIE\s+([^\);]+)(\)|;)/:x&&(Ya=/WebKit\/(\S+)/),Ya)var $a=Ya.exec(Qa()),Xa=$a?$a[1]:"";if(v){var ab,bb=o.document;ab=bb?bb.documentMode:i;if(ab>parseFloat(Xa)){Wa=String(ab);break a}}Wa=Xa}var cb=Wa,db={},y=function(a){return db[a]||(db[a]=ua(cb,a)>=0)},eb={},fb=function(){return eb[9]||(eb[9]=v&&document.documentMode&&document.documentMode>=9)};var gb,hb=!v||fb();!w&&!v||v&&fb()||w&&y("1.9.1");var ib=v&&!y("9");var jb=function(a){return(a=a.className)&&typeof a.split=="function"?a.split(/\s+/):[]},z=function(a,b){var c=jb(a),d=Ea(arguments,1),f;f=c;for(var g=0,h=0;h<d.length;h++)Aa(f,d[h])||(f.push(d[h]),g++);f=g==d.length;a.className=c.join(" ");return f},kb=function(a,b){var c=jb(a),d=Ea(arguments,1),f;f=c;for(var g=0,h=0;h<f.length;h++)Aa(d,f[h])&&(Fa(f,h--,1),g++);f=g==d.length;a.className=c.join(" ");return f};var nb=function(a){return a?new lb(mb(a)):gb||(gb=new lb)},ob=function(a){return p(a)?document.getElementById(a):a},pb=function(a,b,c){c=c||document;a=a&&a!="*"?a.toUpperCase():"";if(c.querySelectorAll&&c.querySelector&&(!x||document.compatMode=="CSS1Compat"||y("528"))&&(a||b))return c.querySelectorAll(a+(b?"."+b:""));if(b&&c.getElementsByClassName)if(c=c.getElementsByClassName(b),a){for(var d={},f=0,g=0,h;h=c[g];g++)a==h.nodeName&&(d[f++]=h);d.length=f;return d}else return c;c=c.getElementsByTagName(a||
-"*");if(b){d={};for(g=f=0;h=c[g];g++)a=h.className,typeof a.split=="function"&&Aa(a.split(/\s+/),b)&&(d[f++]=h);d.length=f;return d}else return c},rb=function(a,b){Ga(b,function(b,d){d=="style"?a.style.cssText=b:d=="class"?a.className=b:d=="for"?a.htmlFor=b:d in qb?a.setAttribute(qb[d],b):a[d]=b})},qb={cellpadding:"cellPadding",cellspacing:"cellSpacing",colspan:"colSpan",rowspan:"rowSpan",valign:"vAlign",height:"height",width:"width",usemap:"useMap",frameborder:"frameBorder",maxlength:"maxLength",
-type:"type"},tb=function(a,b,c){return sb(document,arguments)},sb=function(a,b){var c=b[0],d=b[1];if(!hb&&d&&(d.name||d.type)){c=["<",c];d.name&&c.push(' name="',sa(d.name),'"');if(d.type){c.push(' type="',sa(d.type),'"');var f={};La(f,d);d=f;delete d.type}c.push(">");c=c.join("")}c=a.createElement(c);if(d)p(d)?c.className=d:ea(d)?z.apply(m,[c].concat(d)):rb(c,d);b.length>2&&ub(a,c,b);return c},ub=function(a,b,c){function d(c){c&&b.appendChild(p(c)?a.createTextNode(c):c)}for(var f=2;f<c.length;f++){var g=
-c[f];if(fa(g)&&!(ga(g)&&g.nodeType>0)){var h;a:{if(g&&typeof g.length=="number")if(ga(g)){h=typeof g.item=="function"||typeof g.item=="string";break a}else if(q(g)){h=typeof g.item=="function";break a}h=!1}ya(h?Da(g):g,d)}else d(g)}},vb=function(a){a&&a.parentNode&&a.parentNode.removeChild(a)},wb=function(a){for(;a&&a.nodeType!=1;)a=a.nextSibling;return a},xb=function(a,b){if(a.contains&&b.nodeType==1)return a==b||a.contains(b);if(typeof a.compareDocumentPosition!="undefined")return a==b||Boolean(a.compareDocumentPosition(b)&
-16);for(;b&&a!=b;)b=b.parentNode;return b==a},mb=function(a){return a.nodeType==9?a:a.ownerDocument||a.document},yb=function(a,b){if("textContent"in a)a.textContent=b;else if(a.firstChild&&a.firstChild.nodeType==3){for(;a.lastChild!=a.firstChild;)a.removeChild(a.lastChild);a.firstChild.data=b}else{for(var c;c=a.firstChild;)a.removeChild(c);a.appendChild(mb(a).createTextNode(b))}},zb={SCRIPT:1,STYLE:1,HEAD:1,IFRAME:1,OBJECT:1},Ab={IMG:" ",BR:"\n"},Bb=function(a){var b=a.getAttributeNode("tabindex");
-return b&&b.specified?(a=a.tabIndex,typeof a=="number"&&a>=0&&a<32768):!1},Cb=function(a,b,c){if(!(a.nodeName in zb))if(a.nodeType==3)c?b.push(String(a.nodeValue).replace(/(\r\n|\r|\n)/g,"")):b.push(a.nodeValue);else if(a.nodeName in Ab)b.push(Ab[a.nodeName]);else for(a=a.firstChild;a;)Cb(a,b,c),a=a.nextSibling},lb=function(a){this.I=a||o.document||document};n=lb.prototype;n.Ma=nb;n.a=function(a){return p(a)?this.I.getElementById(a):a};n.l=function(a,b,c){return sb(this.I,arguments)};
-n.createElement=function(a){return this.I.createElement(a)};n.createTextNode=function(a){return this.I.createTextNode(a)};n.appendChild=function(a,b){a.appendChild(b)};n.contains=xb;var Db=new Function("a","return a");var Eb,Fb=!v||fb(),Gb=v&&!y("8");var A=function(){};A.prototype.bb=!1;A.prototype.A=function(){if(!this.bb)this.bb=!0,this.f()};A.prototype.f=function(){this.hc&&Hb.apply(m,this.hc)};var Hb=function(a){for(var b=0,c=arguments.length;b<c;++b){var d=arguments[b];fa(d)?Hb.apply(m,d):d&&typeof d.A=="function"&&d.A()}};var B=function(a,b){this.type=a;this.currentTarget=this.target=b};t(B,A);n=B.prototype;n.f=function(){delete this.type;delete this.target;delete this.currentTarget};n.da=!1;n.xa=!0;n.stopPropagation=function(){this.da=!0};n.preventDefault=function(){this.xa=!1};var C=function(a,b){a&&this.wa(a,b)};t(C,B);var Ib=[1,4,2];n=C.prototype;n.target=m;n.relatedTarget=m;n.offsetX=0;n.offsetY=0;n.clientX=0;n.clientY=0;n.screenX=0;n.screenY=0;n.button=0;n.keyCode=0;n.charCode=0;n.ctrlKey=!1;n.altKey=!1;n.shiftKey=!1;n.metaKey=!1;n.Xb=!1;n.L=m;
-n.wa=function(a,b){var c=this.type=a.type;B.call(this,c);this.target=a.target||a.srcElement;this.currentTarget=b;var d=a.relatedTarget;if(d){if(w){var f;a:{try{Db(d.nodeName);f=!0;break a}catch(g){}f=!1}f||(d=m)}}else if(c=="mouseover")d=a.fromElement;else if(c=="mouseout")d=a.toElement;this.relatedTarget=d;this.offsetX=a.offsetX!==i?a.offsetX:a.layerX;this.offsetY=a.offsetY!==i?a.offsetY:a.layerY;this.clientX=a.clientX!==i?a.clientX:a.pageX;this.clientY=a.clientY!==i?a.clientY:a.pageY;this.screenX=
-a.screenX||0;this.screenY=a.screenY||0;this.button=a.button;this.keyCode=a.keyCode||0;this.charCode=a.charCode||(c=="keypress"?a.keyCode:0);this.ctrlKey=a.ctrlKey;this.altKey=a.altKey;this.shiftKey=a.shiftKey;this.metaKey=a.metaKey;this.Xb=Va?a.metaKey:a.ctrlKey;this.state=a.state;this.L=a;delete this.xa;delete this.da};var Jb=function(a){return Fb?a.L.button==0:a.type=="click"?!0:!!(a.L.button&Ib[0])};
-C.prototype.stopPropagation=function(){C.c.stopPropagation.call(this);this.L.stopPropagation?this.L.stopPropagation():this.L.cancelBubble=!0};C.prototype.preventDefault=function(){C.c.preventDefault.call(this);var a=this.L;if(a.preventDefault)a.preventDefault();else if(a.returnValue=!1,Gb)try{if(a.ctrlKey||a.keyCode>=112&&a.keyCode<=123)a.keyCode=-1}catch(b){}};C.prototype.f=function(){C.c.f.call(this);this.relatedTarget=this.currentTarget=this.target=this.L=m};var D=function(a,b){this.Db=b;this.ca=[];a>this.Db&&e(Error("[goog.structs.SimplePool] Initial cannot be greater than max"));for(var c=0;c<a;c++)this.ca.push(this.M?this.M():{})};t(D,A);D.prototype.M=m;D.prototype.Eb=m;D.prototype.getObject=function(){return this.ca.length?this.ca.pop():this.M?this.M():{}};var Lb=function(a,b){a.ca.length<a.Db?a.ca.push(b):Kb(a,b)},Kb=function(a,b){if(a.Eb)a.Eb(b);else if(ga(b))if(q(b.A))b.A();else for(var c in b)delete b[c]};
-D.prototype.f=function(){D.c.f.call(this);for(var a=this.ca;a.length;)Kb(this,a.pop());delete this.ca};var Mb,Nb=(Mb="ScriptEngine"in o&&o.ScriptEngine()=="JScript")?o.ScriptEngineMajorVersion()+"."+o.ScriptEngineMinorVersion()+"."+o.ScriptEngineBuildVersion():"0";var Ob=function(){},Pb=0;n=Ob.prototype;n.key=0;n.ba=!1;n.Cb=!1;n.wa=function(a,b,c,d,f,g){q(a)?this.Ab=!0:a&&a.handleEvent&&q(a.handleEvent)?this.Ab=!1:e(Error("Invalid listener argument"));this.ha=a;this.sb=b;this.src=c;this.type=d;this.capture=!!f;this.Ga=g;this.Cb=!1;this.key=++Pb;this.ba=!1};n.handleEvent=function(a){return this.Ab?this.ha.call(this.Ga||this.src,a):this.ha.handleEvent.call(this.ha,a)};var Qb,Rb,Sb,Tb,Ub,Vb,Wb,Xb,Yb,Zb,$b;
-(function(){function a(){return{J:0,G:0}}function b(){return[]}function c(){var a=function(b){return h.call(a.src,a.key,b)};return a}function d(){return new Ob}function f(){return new C}var g=Mb&&!(ua(Nb,"5.7")>=0),h;Vb=function(a){h=a};if(g){Qb=function(){return j.getObject()};Rb=function(a){Lb(j,a)};Sb=function(){return k.getObject()};Tb=function(a){Lb(k,a)};Ub=function(){return l.getObject()};Wb=function(){Lb(l,c())};Xb=function(){return M.getObject()};Yb=function(a){Lb(M,a)};Zb=function(){return r.getObject()};
-$b=function(a){Lb(r,a)};var j=new D(0,600);j.M=a;var k=new D(0,600);k.M=b;var l=new D(0,600);l.M=c;var M=new D(0,600);M.M=d;var r=new D(0,600);r.M=f}else Qb=a,Rb=ba,Sb=b,Tb=ba,Ub=c,Wb=ba,Xb=d,Yb=ba,Zb=f,$b=ba})();var ac={},E={},F={},bc={},G=function(a,b,c,d,f){if(b)if(ea(b)){for(var g=0;g<b.length;g++)G(a,b[g],c,d,f);return m}else{var d=!!d,h=E;b in h||(h[b]=Qb());h=h[b];d in h||(h[d]=Qb(),h.J++);var h=h[d],j=s(a),k;h.G++;if(h[j]){k=h[j];for(g=0;g<k.length;g++)if(h=k[g],h.ha==c&&h.Ga==f){if(h.ba)break;return k[g].key}}else k=h[j]=Sb(),h.J++;g=Ub();g.src=a;h=Xb();h.wa(c,g,a,b,d,f);c=h.key;g.key=c;k.push(h);ac[c]=h;F[j]||(F[j]=Sb());F[j].push(h);a.addEventListener?(a==o||!a.rb)&&a.addEventListener(b,g,d):a.attachEvent(b in
-bc?bc[b]:bc[b]="on"+b,g);return c}else e(Error("Invalid event type"))},cc=function(a,b,c,d,f){if(ea(b))for(var g=0;g<b.length;g++)cc(a,b[g],c,d,f);else if(d=!!d,a=dc(a,b,d))for(g=0;g<a.length;g++)if(a[g].ha==c&&a[g].capture==d&&a[g].Ga==f){H(a[g].key);break}},H=function(a){if(ac[a]){var b=ac[a];if(!b.ba){var c=b.src,d=b.type,f=b.sb,g=b.capture;c.removeEventListener?(c==o||!c.rb)&&c.removeEventListener(d,f,g):c.detachEvent&&c.detachEvent(d in bc?bc[d]:bc[d]="on"+d,f);c=s(c);f=E[d][g][c];if(F[c]){var h=
-F[c];Ba(h,b);h.length==0&&delete F[c]}b.ba=!0;f.yb=!0;ec(d,g,c,f);delete ac[a]}}},ec=function(a,b,c,d){if(!d.Na&&d.yb){for(var f=0,g=0;f<d.length;f++)if(d[f].ba){var h=d[f].sb;h.src=m;Wb(h);Yb(d[f])}else f!=g&&(d[g]=d[f]),g++;d.length=g;d.yb=!1;g==0&&(Tb(d),delete E[a][b][c],E[a][b].J--,E[a][b].J==0&&(Rb(E[a][b]),delete E[a][b],E[a].J--),E[a].J==0&&(Rb(E[a]),delete E[a]))}},fc=function(a){var b,c=0,d=b==m;b=!!b;if(a==m)Ga(F,function(a){for(var f=a.length-1;f>=0;f--){var g=a[f];if(d||b==g.capture)H(g.key),
-c++}});else if(a=s(a),F[a])for(var a=F[a],f=a.length-1;f>=0;f--){var g=a[f];if(d||b==g.capture)H(g.key),c++}},dc=function(a,b,c){var d=E;return b in d&&(d=d[b],c in d&&(d=d[c],a=s(a),d[a]))?d[a]:m},hc=function(a,b,c,d,f){var g=1,b=s(b);if(a[b]){a.G--;a=a[b];a.Na?a.Na++:a.Na=1;try{for(var h=a.length,j=0;j<h;j++){var k=a[j];k&&!k.ba&&(g&=gc(k,f)!==!1)}}finally{a.Na--,ec(c,d,b,a)}}return Boolean(g)},gc=function(a,b){var c=a.handleEvent(b);a.Cb&&H(a.key);return c};
-Vb(function(a,b){if(!ac[a])return!0;var c=ac[a],d=c.type,f=E;if(!(d in f))return!0;var f=f[d],g,h;Eb===i&&(Eb=v&&!o.addEventListener);if(Eb){var j;if(!(j=b))a:{j="window.event".split(".");for(var k=o;g=j.shift();)if(k[g]!=m)k=k[g];else{j=m;break a}j=k}g=j;j=!0 in f;k=!1 in f;if(j){if(g.keyCode<0||g.returnValue!=i)return!0;a:{var l=!1;if(g.keyCode==0)try{g.keyCode=-1;break a}catch(M){l=!0}if(l||g.returnValue==i)g.returnValue=!0}}l=Zb();l.wa(g,this);g=!0;try{if(j){for(var r=Sb(),I=l.currentTarget;I;I=
-I.parentNode)r.push(I);h=f[!0];h.G=h.J;for(var P=r.length-1;!l.da&&P>=0&&h.G;P--)l.currentTarget=r[P],g&=hc(h,r[P],d,!0,l);if(k){h=f[!1];h.G=h.J;for(P=0;!l.da&&P<r.length&&h.G;P++)l.currentTarget=r[P],g&=hc(h,r[P],d,!1,l)}}else g=gc(c,l)}finally{if(r)r.length=0,Tb(r);l.A();$b(l)}return g}d=new C(b,this);try{g=gc(c,d)}finally{d.A()}return g});var ic=function(a){this.vb=a};t(ic,A);
-var jc=new D(0,100),kc=[],J=function(a,b,c,d){ea(c)||(kc[0]=c,c=kc);for(var f=0;f<c.length;f++){var g=a,h=G(b,c[f],d||a,!1,a.vb||a);g.w?g.w[h]=!0:g.U?(g.w=jc.getObject(),g.w[g.U]=!0,g.U=m,g.w[h]=!0):g.U=h}return a},K=function(a,b,c,d,f,g){if(a.U||a.w)if(ea(c))for(var h=0;h<c.length;h++)K(a,b,c[h],d,f,g);else{a:{d=d||a;g=g||a.vb||a;f=!!f;if(b=dc(b,c,f))for(c=0;c<b.length;c++)if(!b[c].ba&&b[c].ha==d&&b[c].capture==f&&b[c].Ga==g){b=b[c];break a}b=m}if(b)if(b=b.key,H(b),a.w)Ha(a.w,b);else if(a.U==b)a.U=
-m}return a},lc=function(a){if(a.w){for(var b in a.w)H(b),delete a.w[b];Lb(jc,a.w);a.w=m}else a.U&&H(a.U)};ic.prototype.f=function(){ic.c.f.call(this);lc(this)};ic.prototype.handleEvent=function(){e(Error("EventHandler.handleEvent not implemented"))};var mc=function(){};t(mc,A);n=mc.prototype;n.rb=!0;n.Ha=m;n.cb=function(a){this.Ha=a};n.addEventListener=function(a,b,c,d){G(this,a,b,c,d)};n.removeEventListener=function(a,b,c,d){cc(this,a,b,c,d)};
-n.dispatchEvent=function(a){var b=a.type||a,c=E;if(b in c){if(p(a))a=new B(a,this);else if(a instanceof B)a.target=a.target||this;else{var d=a,a=new B(b,this);La(a,d)}var d=1,f,c=c[b],b=!0 in c,g;if(b){f=[];for(g=this;g;g=g.Ha)f.push(g);g=c[!0];g.G=g.J;for(var h=f.length-1;!a.da&&h>=0&&g.G;h--)a.currentTarget=f[h],d&=hc(g,f[h],a.type,!0,a)&&a.xa!=!1}if(!1 in c)if(g=c[!1],g.G=g.J,b)for(h=0;!a.da&&h<f.length&&g.G;h++)a.currentTarget=f[h],d&=hc(g,f[h],a.type,!1,a)&&a.xa!=!1;else for(f=this;!a.da&&f&&
-g.G;f=f.Ha)a.currentTarget=f,d&=hc(g,f,a.type,!1,a)&&a.xa!=!1;a=Boolean(d)}else a=!0;return a};n.f=function(){mc.c.f.call(this);fc(this);this.Ha=m};var L=function(a,b){a.style.display=b?"":"none"},nc=w?"MozUserSelect":x?"WebkitUserSelect":m,oc=function(a,b,c){c=!c?a.getElementsByTagName("*"):m;if(nc){if(b=b?"none":"",a.style[nc]=b,c)for(var a=0,d;d=c[a];a++)d.style[nc]=b}else if(v||Ta)if(b=b?"on":"",a.setAttribute("unselectable",b),c)for(a=0;d=c[a];a++)d.setAttribute("unselectable",b)};var pc=function(){};ca(pc);pc.prototype.$b=0;pc.Q();var N=function(a){this.q=a||nb();this.ua=qc};t(N,mc);N.prototype.Zb=pc.Q();var qc=m,rc=function(a,b){switch(a){case 1:return b?"disable":"enable";case 2:return b?"highlight":"unhighlight";case 4:return b?"activate":"deactivate";case 8:return b?"select":"unselect";case 16:return b?"check":"uncheck";case 32:return b?"focus":"blur";case 64:return b?"open":"close"}e(Error("Invalid component state"))};n=N.prototype;n.ia=m;n.e=!1;n.d=m;n.ua=m;n.Mb=m;n.o=m;n.p=m;n.t=m;n.mb=!1;
-var sc=function(a){return a.ia||(a.ia=":"+(a.Zb.$b++).toString(36))},tc=function(a,b){a.o&&a.o.t&&(Ha(a.o.t,a.ia),Ia(a.o.t,b,a));a.ia=b};N.prototype.a=function(){return this.d};var uc=function(a){return a.fa||(a.fa=new ic(a))},wc=function(a,b){a==b&&e(Error("Unable to set parent component"));b&&a.o&&a.ia&&vc(a.o,a.ia)&&a.o!=b&&e(Error("Unable to set parent component"));a.o=b;N.c.cb.call(a,b)};n=N.prototype;n.getParent=function(){return this.o};
-n.cb=function(a){this.o&&this.o!=a&&e(Error("Method not supported"));N.c.cb.call(this,a)};n.Ma=function(){return this.q};n.l=function(){this.d=this.q.createElement("div")};n.K=function(a){if(this.e)e(Error("Component already rendered"));else if(a&&this.aa(a)){this.mb=!0;if(!this.q||this.q.I!=mb(a))this.q=nb(a);this.Va(a);this.r()}else e(Error("Invalid element to decorate"))};n.aa=function(){return!0};n.Va=function(a){this.d=a};n.r=function(){this.e=!0;xc(this,function(a){!a.e&&a.a()&&a.r()})};
-n.X=function(){xc(this,function(a){a.e&&a.X()});this.fa&&lc(this.fa);this.e=!1};n.f=function(){N.c.f.call(this);this.e&&this.X();this.fa&&(this.fa.A(),delete this.fa);xc(this,function(a){a.A()});!this.mb&&this.d&&vb(this.d);this.o=this.Mb=this.d=this.t=this.p=m};n.Fa=function(a,b){this.Sa(a,yc(this),b)};
-n.Sa=function(a,b,c){a.e&&(c||!this.e)&&e(Error("Component already rendered"));(b<0||b>yc(this))&&e(Error("Child component index out of bounds"));if(!this.t||!this.p)this.t={},this.p=[];a.getParent()==this?(this.t[sc(a)]=a,Ba(this.p,a)):Ia(this.t,sc(a),a);wc(a,this);Fa(this.p,b,0,a);a.e&&this.e&&a.getParent()==this?(c=this.C(),c.insertBefore(a.a(),c.childNodes[b]||m)):c?(this.d||this.l(),c=O(this,b+1),b=this.C(),c=c?c.d:m,a.e&&e(Error("Component already rendered")),a.d||a.l(),b?b.insertBefore(a.d,
-c||m):a.q.I.body.appendChild(a.d),(!a.o||a.o.e)&&a.r()):this.e&&!a.e&&a.d&&a.r()};n.C=function(){return this.d};var zc=function(a){if(a.ua==m){var b;a:{b=a.e?a.d:a.q.I.body;var c=mb(b);if(c.defaultView&&c.defaultView.getComputedStyle&&(b=c.defaultView.getComputedStyle(b,m))){b=b.direction||b.getPropertyValue("direction");break a}b=""}a.ua="rtl"==(b||((a.e?a.d:a.q.I.body).currentStyle?(a.e?a.d:a.q.I.body).currentStyle.direction:m)||(a.e?a.d:a.q.I.body).style.direction)}return a.ua};
-N.prototype.ra=function(a){this.e&&e(Error("Component already rendered"));this.ua=a};var yc=function(a){return a.p?a.p.length:0},vc=function(a,b){return a.t&&b?(b in a.t?a.t[b]:i)||m:m},O=function(a,b){return a.p?a.p[b]||m:m},xc=function(a,b,c){a.p&&ya(a.p,b,c)},Ac=function(a,b){return a.p&&b?xa(a.p,b):-1};N.prototype.removeChild=function(a,b){if(a){var c=p(a)?a:sc(a),a=vc(this,c);c&&a&&(Ha(this.t,c),Ba(this.p,a),b&&(a.X(),a.d&&vb(a.d)),wc(a,m))}a||e(Error("Child is not in parent component"));return a};var Bc=function(a,b){a.setAttribute("role",b);a.nc=b};var Dc=function(a,b,c,d,f){if(!v&&(!x||!y("525")))return!0;if(Va&&f)return Cc(a);if(f&&!d)return!1;if(!c&&(b==17||b==18))return!1;if(v&&d&&b==a)return!1;switch(a){case 13:return!(v&&fb());case 27:return!x}return Cc(a)},Cc=function(a){if(a>=48&&a<=57)return!0;if(a>=96&&a<=106)return!0;if(a>=65&&a<=90)return!0;if(x&&a==0)return!0;switch(a){case 32:case 63:case 107:case 109:case 110:case 111:case 186:case 189:case 187:case 188:case 190:case 191:case 192:case 222:case 219:case 220:case 221:return!0;default:return!1}};var Q=function(a,b){a&&Ec(this,a,b)};t(Q,mc);n=Q.prototype;n.d=m;n.Ka=m;n.Xa=m;n.La=m;n.T=-1;n.S=-1;
-var Fc={3:13,12:144,63232:38,63233:40,63234:37,63235:39,63236:112,63237:113,63238:114,63239:115,63240:116,63241:117,63242:118,63243:119,63244:120,63245:121,63246:122,63247:123,63248:44,63272:46,63273:36,63275:35,63276:33,63277:34,63289:144,63302:45},Gc={Up:38,Down:40,Left:37,Right:39,Enter:13,F1:112,F2:113,F3:114,F4:115,F5:116,F6:117,F7:118,F8:119,F9:120,F10:121,F11:122,F12:123,"U+007F":46,Home:36,End:35,PageUp:33,PageDown:34,Insert:45},Hc={61:187,59:186},Ic=v||x&&y("525");
-Q.prototype.Rb=function(a){if(x&&(this.T==17&&!a.ctrlKey||this.T==18&&!a.altKey))this.S=this.T=-1;Ic&&!Dc(a.keyCode,this.T,a.shiftKey,a.ctrlKey,a.altKey)?this.handleEvent(a):this.S=w&&a.keyCode in Hc?Hc[a.keyCode]:a.keyCode};Q.prototype.Sb=function(){this.S=this.T=-1};
-Q.prototype.handleEvent=function(a){var b=a.L,c,d;v&&a.type=="keypress"?(c=this.S,d=c!=13&&c!=27?b.keyCode:0):x&&a.type=="keypress"?(c=this.S,d=b.charCode>=0&&b.charCode<63232&&Cc(c)?b.charCode:0):Ta?(c=this.S,d=Cc(c)?b.keyCode:0):(c=b.keyCode||this.S,d=b.charCode||0,Va&&d==63&&!c&&(c=191));var f=c,g=b.keyIdentifier;c?c>=63232&&c in Fc?f=Fc[c]:c==25&&a.shiftKey&&(f=9):g&&g in Gc&&(f=Gc[g]);a=f==this.T;this.T=f;b=new Jc(f,d,a,b);try{this.dispatchEvent(b)}finally{b.A()}};Q.prototype.a=function(){return this.d};
-var Ec=function(a,b,c){a.La&&a.detach();a.d=b;a.Ka=G(a.d,"keypress",a,c);a.Xa=G(a.d,"keydown",a.Rb,c,a);a.La=G(a.d,"keyup",a.Sb,c,a)};Q.prototype.detach=function(){if(this.Ka)H(this.Ka),H(this.Xa),H(this.La),this.La=this.Xa=this.Ka=m;this.d=m;this.S=this.T=-1};Q.prototype.f=function(){Q.c.f.call(this);this.detach()};var Jc=function(a,b,c,d){d&&this.wa(d,i);this.type="key";this.keyCode=a;this.charCode=b;this.repeat=c};t(Jc,C);var Lc=function(a,b){a||e(Error("Invalid class name "+a));q(b)||e(Error("Invalid decorator function "+b));Kc[a]=b},Mc={},Kc={};var R=function(){},Nc;ca(R);n=R.prototype;n.ga=function(){};n.l=function(a){return a.Ma().l("div",this.va(a).join(" "),a.za)};n.C=function(a){return a};n.ta=function(a,b,c){if(a=a.a?a.a():a)if(v&&!y("7")){var d=Oc(jb(a),b);d.push(b);ja(c?z:kb,a).apply(m,d)}else c?z(a,b):kb(a,b)};n.aa=function(){return!0};
-n.K=function(a,b){b.id&&tc(a,b.id);var c=this.C(b);a.za=c&&c.firstChild?c.firstChild.nextSibling?Da(c.childNodes):c.firstChild:m;var d=0,f=this.m(),g=this.m(),h=!1,j=!1,c=!1,k=jb(b);ya(k,function(a){if(!h&&a==f)h=!0,g==f&&(j=!0);else if(!j&&a==g)j=!0;else{var b=d;if(!this.pb)this.Ja||Pc(this),this.pb=Ja(this.Ja);a=parseInt(this.pb[a],10);d=b|(isNaN(a)?0:a)}},this);a.i=d;h||(k.push(f),g==f&&(j=!0));j||k.push(g);var l=a.D;l&&k.push.apply(k,l);if(v&&!y("7")){var M=Oc(k);M.length>0&&(k.push.apply(k,M),
-c=!0)}if(!h||!j||l||c)b.className=k.join(" ");return b};n.Oa=function(a){zc(a)&&this.ra(a.a(),!0);a.isEnabled()&&this.na(a,a.H())};n.Ba=function(a,b){oc(a,!b,!v&&!Ta)};n.ra=function(a,b){this.ta(a,this.m()+"-rtl",b)};n.W=function(a){var b;return a.s&32&&(b=a.k())?Bb(b):!1};n.na=function(a,b){var c;if(a.s&32&&(c=a.k())){if(!b&&a.i&32){try{c.blur()}catch(d){}a.i&32&&a.oa(m)}if(Bb(c)!=b)b?c.tabIndex=0:c.removeAttribute("tabIndex")}};n.ka=function(a,b){L(a,b)};
-n.v=function(a,b,c){var d=a.a();if(d){var f=Qc(this,b);f&&this.ta(a,f,c);Nc||(Nc={1:"disabled",4:"pressed",8:"selected",16:"checked",64:"expanded"});(a=Nc[b])&&d.setAttribute("aria-"+a,c)}};n.k=function(a){return a.a()};n.m=function(){return"goog-control"};n.va=function(a){var b=this.m(),c=[b],d=this.m();d!=b&&c.push(d);b=a.i;for(d=[];b;){var f=b&-b;d.push(Qc(this,f));b&=~f}c.push.apply(c,d);(a=a.D)&&c.push.apply(c,a);v&&!y("7")&&c.push.apply(c,Oc(c));return c};
-var Oc=function(a,b){var c=[];b&&(a=a.concat([b]));ya([],function(d){za(d,ja(Aa,a))&&(!b||Aa(d,b))&&c.push(d.join("_"))});return c},Qc=function(a,b){a.Ja||Pc(a);return a.Ja[b]},Pc=function(a){var b=a.m();a.Ja={1:b+"-disabled",2:b+"-hover",4:b+"-active",8:b+"-selected",16:b+"-checked",32:b+"-focused",64:b+"-open"}};var S=function(a,b,c){N.call(this,c);if(!b){for(var b=this.constructor,d;b;){d=s(b);if(d=Mc[d])break;b=b.c?b.c.constructor:m}b=d?q(d.Q)?d.Q():new d:m}this.b=b;this.za=a};t(S,N);n=S.prototype;n.za=m;n.i=0;n.s=39;n.Yb=255;n.Da=0;n.n=!0;n.D=m;n.la=!0;n.Aa=!1;n.k=function(){return this.b.k(this)};n.Ca=function(){return this.u||(this.u=new Q)};n.xb=function(){return this.b};
-n.ta=function(a,b){if(b){if(a)this.D?Aa(this.D,a)||this.D.push(a):this.D=[a],this.b.ta(this,a,!0)}else if(a&&this.D){Ba(this.D,a);if(this.D.length==0)this.D=m;this.b.ta(this,a,!1)}};n.l=function(){var a=this.b.l(this);this.d=a;var b=this.b.ga();b&&Bc(a,b);this.Aa||this.b.Ba(a,!1);this.H()||this.b.ka(a,!1)};n.C=function(){return this.b.C(this.a())};n.aa=function(a){return this.b.aa(a)};
-n.Va=function(a){this.d=a=this.b.K(this,a);var b=this.b.ga();b&&Bc(a,b);this.Aa||this.b.Ba(a,!1);this.n=a.style.display!="none"};n.r=function(){S.c.r.call(this);this.b.Oa(this);if(this.s&-2&&(this.la&&Rc(this,!0),this.s&32)){var a=this.k();if(a){var b=this.Ca();Ec(b,a);J(J(J(uc(this),b,"key",this.O),a,"focus",this.qa),a,"blur",this.oa)}}};
-var Rc=function(a,b){var c=uc(a),d=a.a();b?(J(J(J(J(c,d,"mouseover",a.$a),d,"mousedown",a.ma),d,"mouseup",a.ab),d,"mouseout",a.Za),v&&J(c,d,"dblclick",a.tb)):(K(K(K(K(c,d,"mouseover",a.$a),d,"mousedown",a.ma),d,"mouseup",a.ab),d,"mouseout",a.Za),v&&K(c,d,"dblclick",a.tb))};n=S.prototype;n.X=function(){S.c.X.call(this);this.u&&this.u.detach();this.H()&&this.isEnabled()&&this.b.na(this,!1)};n.f=function(){S.c.f.call(this);this.u&&(this.u.A(),delete this.u);delete this.b;this.D=this.za=m};
-n.ra=function(a){S.c.ra.call(this,a);var b=this.a();b&&this.b.ra(b,a)};n.Ba=function(a){this.Aa=a;var b=this.a();b&&this.b.Ba(b,a)};n.H=function(){return this.n};n.ka=function(a,b){if(b||this.n!=a&&this.dispatchEvent(a?"show":"hide")){var c=this.a();c&&this.b.ka(c,a);this.isEnabled()&&this.b.na(this,a);this.n=a;return!0}return!1};n.isEnabled=function(){return!(this.i&1)};
-n.sa=function(a){var b=this.getParent();if((!b||typeof b.isEnabled!="function"||b.isEnabled())&&T(this,1,!a))a||(this.setActive(!1),this.B(!1)),this.H()&&this.b.na(this,a),this.v(1,!a)};n.B=function(a){T(this,2,a)&&this.v(2,a)};n.setActive=function(a){T(this,4,a)&&this.v(4,a)};var Sc=function(a,b){T(a,8,b)&&a.v(8,b)},Tc=function(a,b){T(a,64,b)&&a.v(64,b)};S.prototype.v=function(a,b){if(this.s&a&&b!=!!(this.i&a))this.b.v(this,a,b),this.i=b?this.i|a:this.i&~a};
-var Uc=function(a,b,c){a.e&&a.i&b&&!c&&e(Error("Component already rendered"));!c&&a.i&b&&a.v(b,!1);a.s=c?a.s|b:a.s&~b},U=function(a,b){return!!(a.Yb&b)&&!!(a.s&b)},T=function(a,b,c){return!!(a.s&b)&&!!(a.i&b)!=c&&(!(a.Da&b)||a.dispatchEvent(rc(b,c)))&&!a.bb};n=S.prototype;n.$a=function(a){(!a.relatedTarget||!xb(this.a(),a.relatedTarget))&&this.dispatchEvent("enter")&&this.isEnabled()&&U(this,2)&&this.B(!0)};
-n.Za=function(a){if((!a.relatedTarget||!xb(this.a(),a.relatedTarget))&&this.dispatchEvent("leave"))U(this,4)&&this.setActive(!1),U(this,2)&&this.B(!1)};n.ma=function(a){if(this.isEnabled()&&(U(this,2)&&this.B(!0),Jb(a)&&(!x||!Va||!a.ctrlKey)))U(this,4)&&this.setActive(!0),this.b.W(this)&&this.k().focus();!this.Aa&&Jb(a)&&(!x||!Va||!a.ctrlKey)&&a.preventDefault()};n.ab=function(a){this.isEnabled()&&(U(this,2)&&this.B(!0),this.i&4&&Vc(this,a)&&U(this,4)&&this.setActive(!1))};
-n.tb=function(a){this.isEnabled()&&Vc(this,a)};var Vc=function(a,b){if(U(a,16)){var c=!(a.i&16);T(a,16,c)&&a.v(16,c)}U(a,8)&&Sc(a,!0);U(a,64)&&Tc(a,!(a.i&64));c=new B("action",a);if(b)for(var d=["altKey","ctrlKey","metaKey","shiftKey","platformModifierKey"],f,g=0;f=d[g];g++)c[f]=b[f];return a.dispatchEvent(c)};S.prototype.qa=function(){U(this,32)&&T(this,32,!0)&&this.v(32,!0)};S.prototype.oa=function(){U(this,4)&&this.setActive(!1);U(this,32)&&T(this,32,!1)&&this.v(32,!1)};
-S.prototype.O=function(a){return this.H()&&this.isEnabled()&&this.kb(a)?(a.preventDefault(),a.stopPropagation(),!0):!1};S.prototype.kb=function(a){return a.keyCode==13&&Vc(this,a)};q(S)||e(Error("Invalid component class "+S));q(R)||e(Error("Invalid renderer class "+R));var Wc=s(S);Mc[Wc]=R;Lc("goog-control",function(){return new S(m)});var Xc=function(){};t(Xc,R);ca(Xc);Xc.prototype.l=function(a){return a.Ma().l("div",this.m())};Xc.prototype.K=function(a,b){if(b.tagName=="HR"){var c=b,b=this.l(a);c.parentNode&&c.parentNode.insertBefore(b,c);vb(c)}else z(b,this.m());return b};Xc.prototype.m=function(){return"goog-menuseparator"};var Yc=function(a,b){S.call(this,m,a||Xc.Q(),b);Uc(this,1,!1);Uc(this,2,!1);Uc(this,4,!1);Uc(this,32,!1);this.i=1};t(Yc,S);Yc.prototype.r=function(){Yc.c.r.call(this);Bc(this.a(),"separator")};Lc("goog-menuseparator",function(){return new Yc});var V=function(){};ca(V);V.prototype.ga=function(){};var Zc=function(a,b){if(a)a.tabIndex=b?0:-1};n=V.prototype;n.l=function(a){return a.Ma().l("div",this.va(a).join(" "))};n.C=function(a){return a};n.aa=function(a){return a.tagName=="DIV"};n.K=function(a,b){b.id&&tc(a,b.id);var c=this.m(),d=!1,f=jb(b);f&&ya(f,function(b){b==c?d=!0:b&&this.Wa(a,b,c)},this);d||z(b,c);$c(a,this.C(b));return b};
-n.Wa=function(a,b,c){b==c+"-disabled"?a.sa(!1):b==c+"-horizontal"?ad(a,"horizontal"):b==c+"-vertical"&&ad(a,"vertical")};var $c=function(a,b){if(b)for(var c=b.firstChild,d;c&&c.parentNode==b;){d=c.nextSibling;if(c.nodeType==1){var f;a:{f=i;for(var g=jb(c),h=0,j=g.length;h<j;h++)if(f=g[h]in Kc?Kc[g[h]]():m)break a;f=m}if(f)f.d=c,a.isEnabled()||f.sa(!1),a.Fa(f),f.K(c)}else(!c.nodeValue||ma(c.nodeValue)=="")&&b.removeChild(c);c=d}};
-V.prototype.Oa=function(a){a=a.a();oc(a,!0,w);if(v)a.hideFocus=!0;var b=this.ga();b&&Bc(a,b)};V.prototype.k=function(a){return a.a()};V.prototype.m=function(){return"goog-container"};V.prototype.va=function(a){var b=this.m(),c=[b,a.P=="horizontal"?b+"-horizontal":b+"-vertical"];a.isEnabled()||c.push(b+"-disabled");return c};var W=function(a,b,c){N.call(this,c);this.b=b||V.Q();this.P=a||"vertical"};t(W,N);n=W.prototype;n.Pa=m;n.u=m;n.b=m;n.P=m;n.n=!0;n.$=!0;n.Ya=!0;n.j=-1;n.g=m;n.ea=!1;n.Qb=!1;n.Pb=!0;n.N=m;n.k=function(){return this.Pa||this.b.k(this)};n.Ca=function(){return this.u||(this.u=new Q(this.k()))};n.xb=function(){return this.b};n.l=function(){this.d=this.b.l(this)};n.C=function(){return this.b.C(this.a())};n.aa=function(a){return this.b.aa(a)};
-n.Va=function(a){this.d=this.b.K(this,a);if(a.style.display=="none")this.n=!1};n.r=function(){W.c.r.call(this);xc(this,function(a){a.e&&bd(this,a)},this);var a=this.a();this.b.Oa(this);this.ka(this.n,!0);J(J(J(J(J(J(J(J(uc(this),this,"enter",this.Ib),this,"highlight",this.Jb),this,"unhighlight",this.Lb),this,"open",this.Kb),this,"close",this.Gb),a,"mousedown",this.ma),mb(a),"mouseup",this.Hb),a,["mousedown","mouseup","mouseover","mouseout"],this.Fb);this.W()&&cd(this,!0)};
-var cd=function(a,b){var c=uc(a),d=a.k();b?J(J(J(c,d,"focus",a.qa),d,"blur",a.oa),a.Ca(),"key",a.O):K(K(K(c,d,"focus",a.qa),d,"blur",a.oa),a.Ca(),"key",a.O)};n=W.prototype;n.X=function(){dd(this,-1);this.g&&Tc(this.g,!1);this.ea=!1;W.c.X.call(this)};n.f=function(){W.c.f.call(this);if(this.u)this.u.A(),this.u=m;this.b=this.g=this.N=this.Pa=m};n.Ib=function(){return!0};
-n.Jb=function(a){var b=Ac(this,a.target);if(b>-1&&b!=this.j){var c=O(this,this.j);c&&c.B(!1);this.j=b;c=O(this,this.j);this.ea&&c.setActive(!0);this.Pb&&this.g&&c!=this.g&&(c.s&64?Tc(c,!0):Tc(this.g,!1))}this.a().setAttribute("aria-activedescendant",a.target.a().id)};n.Lb=function(a){if(a.target==O(this,this.j))this.j=-1;this.a().setAttribute("aria-activedescendant","")};n.Kb=function(a){if((a=a.target)&&a!=this.g&&a.getParent()==this)this.g&&Tc(this.g,!1),this.g=a};
-n.Gb=function(a){if(a.target==this.g)this.g=m};n.ma=function(a){if(this.$)this.ea=!0;var b=this.k();b&&Bb(b)?b.focus():a.preventDefault()};n.Hb=function(){this.ea=!1};n.Fb=function(a){var b;a:{b=a.target;if(this.N)for(var c=this.a();b&&b!==c;){var d=b.id;if(d in this.N){b=this.N[d];break a}b=b.parentNode}b=m}if(b)switch(a.type){case "mousedown":b.ma(a);break;case "mouseup":b.ab(a);break;case "mouseover":b.$a(a);break;case "mouseout":b.Za(a)}};n.qa=function(){};
-n.oa=function(){dd(this,-1);this.ea=!1;this.g&&Tc(this.g,!1)};n.O=function(a){return this.isEnabled()&&this.H()&&(yc(this)!=0||this.Pa)&&this.kb(a)?(a.preventDefault(),a.stopPropagation(),!0):!1};
-n.kb=function(a){var b=O(this,this.j);if(b&&typeof b.O=="function"&&b.O(a))return!0;if(this.g&&this.g!=b&&typeof this.g.O=="function"&&this.g.O(a))return!0;if(a.shiftKey||a.ctrlKey||a.metaKey||a.altKey)return!1;switch(a.keyCode){case 27:if(this.W())this.k().blur();else return!1;break;case 36:ed(this);break;case 35:fd(this);break;case 38:if(this.P=="vertical")gd(this);else return!1;break;case 37:if(this.P=="horizontal")zc(this)?hd(this):gd(this);else return!1;break;case 40:if(this.P=="vertical")hd(this);
-else return!1;break;case 39:if(this.P=="horizontal")zc(this)?gd(this):hd(this);else return!1;break;default:return!1}return!0};var bd=function(a,b){var c=b.a(),c=c.id||(c.id=sc(b));if(!a.N)a.N={};a.N[c]=b};W.prototype.Fa=function(a,b){W.c.Fa.call(this,a,b)};W.prototype.Sa=function(a,b,c){a.Da|=2;a.Da|=64;(this.W()||!this.Qb)&&Uc(a,32,!1);a.e&&!1!=a.la&&Rc(a,!1);a.la=!1;W.c.Sa.call(this,a,b,c);c&&this.e&&bd(this,a);b<=this.j&&this.j++};
-W.prototype.removeChild=function(a,b){if(a=p(a)?vc(this,a):a){var c=Ac(this,a);c!=-1&&(c==this.j?a.B(!1):c<this.j&&this.j--);(c=a.a())&&c.id&&Ha(this.N,c.id)}c=a=W.c.removeChild.call(this,a,b);c.e&&!0!=c.la&&Rc(c,!0);c.la=!0;return a};var ad=function(a,b){a.a()&&e(Error("Component already rendered"));a.P=b};n=W.prototype;n.H=function(){return this.n};
-n.ka=function(a,b){if(b||this.n!=a&&this.dispatchEvent(a?"show":"hide")){this.n=a;var c=this.a();c&&(L(c,a),this.W()&&Zc(this.k(),this.$&&this.n),b||this.dispatchEvent(this.n?"aftershow":"afterhide"));return!0}return!1};n.isEnabled=function(){return this.$};n.sa=function(a){if(this.$!=a&&this.dispatchEvent(a?"enable":"disable"))a?(this.$=!0,xc(this,function(a){a.qb?delete a.qb:a.sa(!0)})):(xc(this,function(a){a.isEnabled()?a.sa(!1):a.qb=!0}),this.ea=this.$=!1),this.W()&&Zc(this.k(),a&&this.n)};
-n.W=function(){return this.Ya};n.na=function(a){a!=this.Ya&&this.e&&cd(this,a);this.Ya=a;this.$&&this.n&&Zc(this.k(),a)};var dd=function(a,b){var c=O(a,b);c?c.B(!0):a.j>-1&&O(a,a.j).B(!1)};W.prototype.B=function(a){dd(this,Ac(this,a))};
-var ed=function(a){id(a,function(a,c){return(a+1)%c},yc(a)-1)},fd=function(a){id(a,function(a,c){a--;return a<0?c-1:a},0)},hd=function(a){id(a,function(a,c){return(a+1)%c},a.j)},gd=function(a){id(a,function(a,c){a--;return a<0?c-1:a},a.j)},id=function(a,b,c){for(var c=c<0?Ac(a,a.g):c,d=yc(a),c=b.call(a,c,d),f=0;f<=d;){var g=O(a,c);if(g&&g.H()&&g.isEnabled()&&g.s&2){a.Ua(c);break}f++;c=b.call(a,c,d)}};W.prototype.Ua=function(a){dd(this,a)};var jd=function(){};t(jd,R);ca(jd);n=jd.prototype;n.m=function(){return"goog-tab"};n.ga=function(){return"tab"};n.l=function(a){var b=jd.c.l.call(this,a);(a=a.Qa())&&this.Ta(b,a);return b};n.K=function(a,b){var b=jd.c.K.call(this,a,b),c=this.Qa(b);if(c)a.nb=c;if(a.i&8&&(c=a.getParent())&&q(c.Z))a.v(8,!1),c.Z(a);return b};n.Qa=function(a){return a.title||""};n.Ta=function(a,b){if(a)a.title=b||""};var kd=function(a,b,c){S.call(this,a,b||jd.Q(),c);Uc(this,8,!0);this.Da|=9};t(kd,S);kd.prototype.Qa=function(){return this.nb};kd.prototype.Ta=function(a){this.xb().Ta(this.a(),a);this.nb=a};Lc("goog-tab",function(){return new kd(m)});var X=function(){};t(X,V);ca(X);X.prototype.m=function(){return"goog-tab-bar"};X.prototype.ga=function(){return"tablist"};X.prototype.Wa=function(a,b,c){if(!this.wb)this.Ia||ld(this),this.wb=Ja(this.Ia);var d=this.wb[b];d?(ad(a,md(d)),a.ob=d):X.c.Wa.call(this,a,b,c)};X.prototype.va=function(a){var b=X.c.va.call(this,a);this.Ia||ld(this);b.push(this.Ia[a.ob]);return b};var ld=function(a){var b=a.m();a.Ia={top:b+"-top",bottom:b+"-bottom",start:b+"-start",end:b+"-end"}};var Y=function(a,b,c){a=a||"top";ad(this,md(a));this.ob=a;W.call(this,this.P,b||X.Q(),c);nd(this)};t(Y,W);n=Y.prototype;n.Tb=!0;n.F=m;n.r=function(){Y.c.r.call(this);nd(this)};n.f=function(){Y.c.f.call(this);this.F=m};n.removeChild=function(a,b){od(this,a);return Y.c.removeChild.call(this,a,b)};n.Ua=function(a){Y.c.Ua.call(this,a);this.Tb&&this.Z(O(this,a))};n.Z=function(a){a?Sc(a,!0):this.F&&Sc(this.F,!1)};
-var od=function(a,b){if(b&&b==a.F){for(var c=Ac(a,b),d=c-1;b=O(a,d);d--)if(b.H()&&b.isEnabled()){a.Z(b);return}for(c+=1;b=O(a,c);c++)if(b.H()&&b.isEnabled()){a.Z(b);return}a.Z(m)}};n=Y.prototype;n.dc=function(a){this.F&&this.F!=a.target&&Sc(this.F,!1);this.F=a.target};n.ec=function(a){if(a.target==this.F)this.F=m};n.bc=function(a){od(this,a.target)};n.cc=function(a){od(this,a.target)};n.qa=function(){O(this,this.j)||this.B(this.F||O(this,0))};
-var nd=function(a){J(J(J(J(uc(a),a,"select",a.dc),a,"unselect",a.ec),a,"disable",a.bc),a,"hide",a.cc)},md=function(a){return a=="start"||a=="end"?"vertical":"horizontal"};Lc("goog-tab-bar",function(){return new Y});var Z=function(a,b,c,d,f){function g(a){if(a)a.tabIndex=0,G(a,"click",h.Nb,!1,h),G(a,"keydown",h.Ob,!1,h)}this.q=f||nb();this.Y=this.q.a(a)||m;this.pa=this.q.a(d||m);this.Ea=(this.Ra=q(b)?b:m)||!b?m:this.q.a(b);this.h=c==!0;var h=this;g(this.Y);g(this.pa);this.R(this.h)};t(Z,mc);n=Z.prototype;n.f=function(){this.Y&&fc(this.Y);this.pa&&fc(this.pa);Z.c.f.call(this)};n.C=function(){return this.Ea};
-n.R=function(a){if(this.Ea)L(this.Ea,a);else if(a&&this.Ra)this.Ea=this.Ra();if(this.pa)L(this.Y,!a),L(this.pa,a);else if(this.Y){var b=this.Y;a?z(b,"goog-zippy-expanded"):kb(b,"goog-zippy-expanded");b=this.Y;!a?z(b,"goog-zippy-collapsed"):kb(b,"goog-zippy-collapsed")}this.h=a;this.dispatchEvent(new pd("toggle",this,this.h))};n.Ob=function(a){if(a.keyCode==13||a.keyCode==32)this.R(!this.h),a.preventDefault(),a.stopPropagation()};n.Nb=function(){this.R(!this.h)};
-var pd=function(a,b,c){B.call(this,a,b);this.lc=c};t(pd,B);var rd=function(a,b){this.lb=[];for(var c=ob(a),c=pb("span","ae-zippy",c),d=0,f;f=c[d];d++)this.lb.push(new Z(f,f.parentNode.parentNode.parentNode.nextElementSibling!=i?f.parentNode.parentNode.parentNode.nextElementSibling:wb(f.parentNode.parentNode.parentNode.nextSibling),!1));this.fc=new qd(this.lb,ob(b))};rd.prototype.jc=function(){return this.fc};rd.prototype.kc=function(){return this.lb};
-var qd=function(a,b){this.ya=a;if(this.ya.length)for(var c=0,d;d=this.ya[c];c++)G(d,"toggle",this.Vb,!1,this);this.jb=0;this.h=!1;c="ae-toggle ae-plus ae-action";this.ya.length||(c+=" ae-disabled");this.V=tb("span",{className:c},"Expand All");G(this.V,"click",this.Ub,!1,this);b&&b.appendChild(this.V)};qd.prototype.Ub=function(){this.ya.length&&this.R(!this.h)};
-qd.prototype.Vb=function(a){a=a.currentTarget;a.h?this.jb+=1:this.jb-=1;if(a.h!=this.h)if(a.h)this.h=!0,sd(this,!0);else if(this.jb==0)this.h=!1,sd(this,!1)};qd.prototype.R=function(a){this.h=a;for(var a=0,b;b=this.ya[a];a++)b.h!=this.h&&b.R(this.h);sd(this)};
-var sd=function(a,b){(b!==i?b:a.h)?(kb(a.V,"ae-plus"),z(a.V,"ae-minus"),yb(a.V,"Collapse All")):(kb(a.V,"ae-minus"),z(a.V,"ae-plus"),yb(a.V,"Expand All"))},td=function(a){this.Wb=a;this.Bb={};var b,c=tb("div",{},b=tb("div",{id:"ae-stats-details-tabs",className:"goog-tab-bar goog-tab-bar-top"}),tb("div",{className:"goog-tab-bar-clear"}),a=tb("div",{id:"ae-stats-details-tabs-content",className:"goog-tab-content"})),d=new Y;d.K(b);G(d,"select",this.zb,!1,this);G(d,"unselect",this.zb,!1,this);b=0;for(var f;f=
-this.Wb[b];b++)if(f=ob("ae-stats-details-"+f)){var g=pb("h2",m,f)[0],h;h=g;var j=i;ib&&"innerText"in h?j=h.innerText.replace(/(\r\n|\r|\n)/g,"\n"):(j=[],Cb(h,j,!0),j=j.join(""));j=j.replace(/ \xAD /g," ").replace(/\xAD/g,"");j=j.replace(/\u200B/g,"");ib||(j=j.replace(/ +/g," "));j!=" "&&(j=j.replace(/^\s*/,""));h=j;vb(g);g=new kd(h);this.Bb[s(g)]=f;d.Fa(g,!0);a.appendChild(f);b==0?d.Z(g):L(f,!1)}ob("bd").appendChild(c)};td.prototype.zb=function(a){var b=this.Bb[s(a.target)];L(b,a.type=="select")};
-aa("ae.Stats.Details.Tabs",td);aa("goog.ui.Zippy",Z);Z.prototype.setExpanded=Z.prototype.R;aa("ae.Stats.MakeZippys",rd);rd.prototype.getExpandCollapse=rd.prototype.jc;rd.prototype.getZippys=rd.prototype.kc;qd.prototype.setExpanded=qd.prototype.R;var $=function(){this.eb=[];this.ib=[]},ud=[[5,0.2,1],[6,0.2,1.2],[5,0.25,1.25],[6,0.25,1.5],[4,0.5,2],[5,0.5,2.5],[6,0.5,3],[4,1,4],[5,1,5],[6,1,6],[4,2,8],[5,2,10]],vd=function(a){if(a<=0)return[2,0.5,1];for(var b=1;a<1;)a*=10,b/=10;for(;a>=10;)a/=10,b*=10;for(var c=0;c<ud.length;c++)if(a<=ud[c][2])return[ud[c][0],ud[c][1]*b,ud[c][2]*b];return[5,2*b,10*b]};$.prototype.hb="stats/static/pix.gif";$.prototype.z="ae-stats-gantt-";$.prototype.gb=0;$.prototype.write=function(a){this.ib.push(a)};
-var wd=function(a,b,c,d){a.write('<tr class="'+a.z+'axisrow"><td width="20%"></td><td>');a.write('<div class="'+a.z+'axis">');for(var f=0;f<=b;f++)a.write('<img class="'+a.z+'tick" src="'+a.hb+'" alt="" '),a.write('style="left:'+f*c*d+'%"\n>'),a.write('<span class="'+a.z+'scale" style="left:'+f*c*d+'%">'),a.write("&nbsp;"+f*c+"</span>");a.write("</div></td></tr>\n")};
-$.prototype.ic=function(){this.ib=[];var a=vd(this.gb),b=a[0],c=a[1],a=100/a[2];this.write('<table class="'+this.z+'table">\n');wd(this,b,c,a);for(var d=0;d<this.eb.length;d++){var f=this.eb[d];this.write('<tr class="'+this.z+'datarow"><td width="20%">');f.label.length>0&&(f.ja.length>0&&this.write('<a class="'+this.z+'link" href="'+f.ja+'">'),this.write(f.label),f.ja.length>0&&this.write("</a>"));this.write("</td>\n<td>");this.write('<div class="'+this.z+'container">');f.ja.length>0&&this.write('<a class="'+
-this.z+'link" href="'+f.ja+'"\n>');this.write('<img class="'+this.z+'bar" src="'+this.hb+'" alt="" ');this.write('style="left:'+f.start*a+"%;width:"+f.duration*a+'%;min-width:1px"\n>');f.fb>0&&(this.write('<img class="'+this.z+'extra" src="'+this.hb+'" alt="" '),this.write('style="left:'+f.start*a+"%;width:"+f.fb*a+'%"\n>'));f.ub.length>0&&(this.write('<span class="'+this.z+'inline" style="left:'+(f.start+Math.max(f.duration,f.fb))*a+'%">&nbsp;'),this.write(f.ub),this.write("</span>"));f.ja.length>
-0&&this.write("</a>");this.write("</div></td></tr>\n")}wd(this,b,c,a);this.write("</table>\n");return this.ib.join("")};$.prototype.gc=function(a,b,c,d,f,g){this.gb=Math.max(this.gb,Math.max(b+c,b+d));this.eb.push({label:a,start:b,duration:c,fb:d,ub:f,ja:g})};aa("Gantt",$);$.prototype.add_bar=$.prototype.gc;$.prototype.draw=$.prototype.ic;})();
+2?u.slice.call(a,b):u.slice.call(a,b,c)};var Ga=function(a,b){for(var c in a)b.call(i,a[c],c,a)},Ha=function(a,b,c){b in a&&e(Error('The object already contains the key "'+b+'"'));a[b]=c},Ia=function(a){var b={},c;for(c in a)b[a[c]]=c;return b},Ja="constructor,hasOwnProperty,isPrototypeOf,propertyIsEnumerable,toLocaleString,toString,valueOf".split(","),Ka=function(a,b){for(var c,d,f=1;f<arguments.length;f++){d=arguments[f];for(c in d)a[c]=d[c];for(var g=0;g<Ja.length;g++)c=Ja[g],Object.prototype.hasOwnProperty.call(d,c)&&(a[c]=d[c])}};var La,Ma,Na,Oa,Pa=function(){return o.navigator?o.navigator.userAgent:m};Oa=Na=Ma=La=!1;var Qa;if(Qa=Pa()){var Ra=o.navigator;La=Qa.indexOf("Opera")==0;Ma=!La&&Qa.indexOf("MSIE")!=-1;Na=!La&&Qa.indexOf("WebKit")!=-1;Oa=!La&&!Na&&Ra.product=="Gecko"}var Sa=La,v=Ma,w=Oa,x=Na,Ta=o.navigator,Ua=(Ta&&Ta.platform||"").indexOf("Mac")!=-1,Va;
+a:{var Wa="",Xa;if(Sa&&o.opera)var Ya=o.opera.version,Wa=typeof Ya=="function"?Ya():Ya;else if(w?Xa=/rv\:([^\);]+)(\)|;)/:v?Xa=/MSIE\s+([^\);]+)(\)|;)/:x&&(Xa=/WebKit\/(\S+)/),Xa)var Za=Xa.exec(Pa()),Wa=Za?Za[1]:"";if(v){var $a,ab=o.document;$a=ab?ab.documentMode:i;if($a>parseFloat(Wa)){Va=String($a);break a}}Va=Wa}var bb=Va,cb={},y=function(a){return cb[a]||(cb[a]=ua(bb,a)>=0)},db={},eb=function(){return db[9]||(db[9]=v&&document.documentMode&&document.documentMode>=9)};var fb,gb=!v||eb();!w&&!v||v&&eb()||w&&y("1.9.1");var hb=v&&!y("9");var ib=function(a){return(a=a.className)&&typeof a.split=="function"?a.split(/\s+/):[]},z=function(a,b){var c=ib(a),d=Ea(arguments,1),f;f=c;for(var g=0,h=0;h<d.length;h++)Aa(f,d[h])||(f.push(d[h]),g++);f=g==d.length;a.className=c.join(" ");return f},jb=function(a,b){var c=ib(a),d=Ea(arguments,1),f;f=c;for(var g=0,h=0;h<f.length;h++)Aa(d,f[h])&&(Fa(f,h--,1),g++);f=g==d.length;a.className=c.join(" ");return f};var mb=function(a){return a?new kb(lb(a)):fb||(fb=new kb)},nb=function(a){return p(a)?document.getElementById(a):a},ob=function(a,b,c){c=c||document;a=a&&a!="*"?a.toUpperCase():"";if(c.querySelectorAll&&c.querySelector&&(!x||document.compatMode=="CSS1Compat"||y("528"))&&(a||b))return c.querySelectorAll(a+(b?"."+b:""));if(b&&c.getElementsByClassName)if(c=c.getElementsByClassName(b),a){for(var d={},f=0,g=0,h;h=c[g];g++)a==h.nodeName&&(d[f++]=h);d.length=f;return d}else return c;c=c.getElementsByTagName(a||
+"*");if(b){d={};for(g=f=0;h=c[g];g++)a=h.className,typeof a.split=="function"&&Aa(a.split(/\s+/),b)&&(d[f++]=h);d.length=f;return d}else return c},qb=function(a,b){Ga(b,function(b,d){d=="style"?a.style.cssText=b:d=="class"?a.className=b:d=="for"?a.htmlFor=b:d in pb?a.setAttribute(pb[d],b):d.lastIndexOf("aria-",0)==0?a.setAttribute(d,b):a[d]=b})},pb={cellpadding:"cellPadding",cellspacing:"cellSpacing",colspan:"colSpan",rowspan:"rowSpan",valign:"vAlign",height:"height",width:"width",usemap:"useMap",
+frameborder:"frameBorder",maxlength:"maxLength",type:"type"},sb=function(a,b,c){return rb(document,arguments)},rb=function(a,b){var c=b[0],d=b[1];if(!gb&&d&&(d.name||d.type)){c=["<",c];d.name&&c.push(' name="',sa(d.name),'"');if(d.type){c.push(' type="',sa(d.type),'"');var f={};Ka(f,d);d=f;delete d.type}c.push(">");c=c.join("")}c=a.createElement(c);if(d)p(d)?c.className=d:ea(d)?z.apply(m,[c].concat(d)):qb(c,d);b.length>2&&tb(a,c,b);return c},tb=function(a,b,c){function d(c){c&&b.appendChild(p(c)?
+a.createTextNode(c):c)}for(var f=2;f<c.length;f++){var g=c[f];if(fa(g)&&!(ga(g)&&g.nodeType>0)){var h;a:{if(g&&typeof g.length=="number")if(ga(g)){h=typeof g.item=="function"||typeof g.item=="string";break a}else if(q(g)){h=typeof g.item=="function";break a}h=!1}ya(h?Da(g):g,d)}else d(g)}},ub=function(a){a&&a.parentNode&&a.parentNode.removeChild(a)},vb=function(a){for(;a&&a.nodeType!=1;)a=a.nextSibling;return a},wb=function(a,b){if(a.contains&&b.nodeType==1)return a==b||a.contains(b);if(typeof a.compareDocumentPosition!=
+"undefined")return a==b||Boolean(a.compareDocumentPosition(b)&16);for(;b&&a!=b;)b=b.parentNode;return b==a},lb=function(a){return a.nodeType==9?a:a.ownerDocument||a.document},xb=function(a,b){if("textContent"in a)a.textContent=b;else if(a.firstChild&&a.firstChild.nodeType==3){for(;a.lastChild!=a.firstChild;)a.removeChild(a.lastChild);a.firstChild.data=b}else{for(var c;c=a.firstChild;)a.removeChild(c);a.appendChild(lb(a).createTextNode(b))}},yb={SCRIPT:1,STYLE:1,HEAD:1,IFRAME:1,OBJECT:1},zb={IMG:" ",
+BR:"\n"},Ab=function(a){var b=a.getAttributeNode("tabindex");return b&&b.specified?(a=a.tabIndex,typeof a=="number"&&a>=0&&a<32768):!1},Bb=function(a,b,c){if(!(a.nodeName in yb))if(a.nodeType==3)c?b.push(String(a.nodeValue).replace(/(\r\n|\r|\n)/g,"")):b.push(a.nodeValue);else if(a.nodeName in zb)b.push(zb[a.nodeName]);else for(a=a.firstChild;a;)Bb(a,b,c),a=a.nextSibling},kb=function(a){this.H=a||o.document||document};n=kb.prototype;n.Ka=mb;n.a=function(a){return p(a)?this.H.getElementById(a):a};
+n.l=function(a,b,c){return rb(this.H,arguments)};n.createElement=function(a){return this.H.createElement(a)};n.createTextNode=function(a){return this.H.createTextNode(a)};n.appendChild=function(a,b){a.appendChild(b)};n.contains=wb;var Cb=new Function("a","return a");var Db,Eb=!v||eb(),Fb=v&&!y("8");var A=function(){};A.prototype.ab=!1;A.prototype.z=function(){if(!this.ab)this.ab=!0,this.f()};A.prototype.f=function(){this.gc&&Gb.apply(m,this.gc)};var Gb=function(a){for(var b=0,c=arguments.length;b<c;++b){var d=arguments[b];fa(d)?Gb.apply(m,d):d&&typeof d.z=="function"&&d.z()}};var B=function(a,b){this.type=a;this.currentTarget=this.target=b};t(B,A);n=B.prototype;n.f=function(){delete this.type;delete this.target;delete this.currentTarget};n.ba=!1;n.va=!0;n.stopPropagation=function(){this.ba=!0};n.preventDefault=function(){this.va=!1};var C=function(a,b){a&&this.ua(a,b)};t(C,B);var Hb=[1,4,2];n=C.prototype;n.target=m;n.relatedTarget=m;n.offsetX=0;n.offsetY=0;n.clientX=0;n.clientY=0;n.screenX=0;n.screenY=0;n.button=0;n.keyCode=0;n.charCode=0;n.ctrlKey=!1;n.altKey=!1;n.shiftKey=!1;n.metaKey=!1;n.Wb=!1;n.L=m;
+n.ua=function(a,b){var c=this.type=a.type;B.call(this,c);this.target=a.target||a.srcElement;this.currentTarget=b;var d=a.relatedTarget;if(d){if(w){var f;a:{try{Cb(d.nodeName);f=!0;break a}catch(g){}f=!1}f||(d=m)}}else if(c=="mouseover")d=a.fromElement;else if(c=="mouseout")d=a.toElement;this.relatedTarget=d;this.offsetX=a.offsetX!==i?a.offsetX:a.layerX;this.offsetY=a.offsetY!==i?a.offsetY:a.layerY;this.clientX=a.clientX!==i?a.clientX:a.pageX;this.clientY=a.clientY!==i?a.clientY:a.pageY;this.screenX=
+a.screenX||0;this.screenY=a.screenY||0;this.button=a.button;this.keyCode=a.keyCode||0;this.charCode=a.charCode||(c=="keypress"?a.keyCode:0);this.ctrlKey=a.ctrlKey;this.altKey=a.altKey;this.shiftKey=a.shiftKey;this.metaKey=a.metaKey;this.Wb=Ua?a.metaKey:a.ctrlKey;this.state=a.state;this.L=a;delete this.va;delete this.ba};var Ib=function(a){return Eb?a.L.button==0:a.type=="click"?!0:!!(a.L.button&Hb[0])};
+C.prototype.stopPropagation=function(){C.c.stopPropagation.call(this);this.L.stopPropagation?this.L.stopPropagation():this.L.cancelBubble=!0};C.prototype.preventDefault=function(){C.c.preventDefault.call(this);var a=this.L;if(a.preventDefault)a.preventDefault();else if(a.returnValue=!1,Fb)try{if(a.ctrlKey||a.keyCode>=112&&a.keyCode<=123)a.keyCode=-1}catch(b){}};C.prototype.f=function(){C.c.f.call(this);this.relatedTarget=this.currentTarget=this.target=this.L=m};var D=function(a,b){this.Cb=b;this.aa=[];a>this.Cb&&e(Error("[goog.structs.SimplePool] Initial cannot be greater than max"));for(var c=0;c<a;c++)this.aa.push(this.M?this.M():{})};t(D,A);D.prototype.M=m;D.prototype.Db=m;D.prototype.getObject=function(){return this.aa.length?this.aa.pop():this.M?this.M():{}};var Kb=function(a,b){a.aa.length<a.Cb?a.aa.push(b):Jb(a,b)},Jb=function(a,b){if(a.Db)a.Db(b);else if(ga(b))if(q(b.z))b.z();else for(var c in b)delete b[c]};
+D.prototype.f=function(){D.c.f.call(this);for(var a=this.aa;a.length;)Jb(this,a.pop());delete this.aa};var Lb,Mb=(Lb="ScriptEngine"in o&&o.ScriptEngine()=="JScript")?o.ScriptEngineMajorVersion()+"."+o.ScriptEngineMinorVersion()+"."+o.ScriptEngineBuildVersion():"0";var Nb=function(){},Ob=0;n=Nb.prototype;n.key=0;n.$=!1;n.Ab=!1;n.ua=function(a,b,c,d,f,g){q(a)?this.yb=!0:a&&a.handleEvent&&q(a.handleEvent)?this.yb=!1:e(Error("Invalid listener argument"));this.fa=a;this.rb=b;this.src=c;this.type=d;this.capture=!!f;this.Fa=g;this.Ab=!1;this.key=++Ob;this.$=!1};n.handleEvent=function(a){return this.yb?this.fa.call(this.Fa||this.src,a):this.fa.handleEvent.call(this.fa,a)};var Pb,Qb,Rb,Sb,Tb,Ub,Vb,Wb,Xb,Yb,Zb;
+(function(){function a(){return{I:0,F:0}}function b(){return[]}function c(){var a=function(b){b=h.call(a.src,a.key,b);if(!b)return b};return a}function d(){return new Nb}function f(){return new C}var g=Lb&&!(ua(Mb,"5.7")>=0),h;Ub=function(a){h=a};if(g){Pb=function(){return j.getObject()};Qb=function(a){Kb(j,a)};Rb=function(){return k.getObject()};Sb=function(a){Kb(k,a)};Tb=function(){return l.getObject()};Vb=function(){Kb(l,c())};Wb=function(){return L.getObject()};Xb=function(a){Kb(L,a)};Yb=function(){return r.getObject()};
+Zb=function(a){Kb(r,a)};var j=new D(0,600);j.M=a;var k=new D(0,600);k.M=b;var l=new D(0,600);l.M=c;var L=new D(0,600);L.M=d;var r=new D(0,600);r.M=f}else Pb=a,Qb=ba,Rb=b,Sb=ba,Tb=c,Vb=ba,Wb=d,Xb=ba,Yb=f,Zb=ba})();var $b={},E={},F={},ac={},G=function(a,b,c,d,f){if(b)if(ea(b)){for(var g=0;g<b.length;g++)G(a,b[g],c,d,f);return m}else{var d=!!d,h=E;b in h||(h[b]=Pb());h=h[b];d in h||(h[d]=Pb(),h.I++);var h=h[d],j=s(a),k;h.F++;if(h[j]){k=h[j];for(g=0;g<k.length;g++)if(h=k[g],h.fa==c&&h.Fa==f){if(h.$)break;return k[g].key}}else k=h[j]=Rb(),h.I++;g=Tb();g.src=a;h=Wb();h.ua(c,g,a,b,d,f);c=h.key;g.key=c;k.push(h);$b[c]=h;F[j]||(F[j]=Rb());F[j].push(h);a.addEventListener?(a==o||!a.qb)&&a.addEventListener(b,g,d):a.attachEvent(b in
+ac?ac[b]:ac[b]="on"+b,g);return c}else e(Error("Invalid event type"))},bc=function(a,b,c,d,f){if(ea(b))for(var g=0;g<b.length;g++)bc(a,b[g],c,d,f);else if(d=!!d,a=cc(a,b,d))for(g=0;g<a.length;g++)if(a[g].fa==c&&a[g].capture==d&&a[g].Fa==f){H(a[g].key);break}},H=function(a){if(!$b[a])return!1;var b=$b[a];if(b.$)return!1;var c=b.src,d=b.type,f=b.rb,g=b.capture;c.removeEventListener?(c==o||!c.qb)&&c.removeEventListener(d,f,g):c.detachEvent&&c.detachEvent(d in ac?ac[d]:ac[d]="on"+d,f);c=s(c);f=E[d][g][c];
+if(F[c]){var h=F[c];Ba(h,b);h.length==0&&delete F[c]}b.$=!0;f.wb=!0;dc(d,g,c,f);delete $b[a];return!0},dc=function(a,b,c,d){if(!d.La&&d.wb){for(var f=0,g=0;f<d.length;f++)if(d[f].$){var h=d[f].rb;h.src=m;Vb(h);Xb(d[f])}else f!=g&&(d[g]=d[f]),g++;d.length=g;d.wb=!1;g==0&&(Sb(d),delete E[a][b][c],E[a][b].I--,E[a][b].I==0&&(Qb(E[a][b]),delete E[a][b],E[a].I--),E[a].I==0&&(Qb(E[a]),delete E[a]))}},ec=function(a){var b,c=0,d=b==m;b=!!b;if(a==m)Ga(F,function(a){for(var f=a.length-1;f>=0;f--){var g=a[f];
+if(d||b==g.capture)H(g.key),c++}});else if(a=s(a),F[a])for(var a=F[a],f=a.length-1;f>=0;f--){var g=a[f];if(d||b==g.capture)H(g.key),c++}},cc=function(a,b,c){var d=E;return b in d&&(d=d[b],c in d&&(d=d[c],a=s(a),d[a]))?d[a]:m},gc=function(a,b,c,d,f){var g=1,b=s(b);if(a[b]){a.F--;a=a[b];a.La?a.La++:a.La=1;try{for(var h=a.length,j=0;j<h;j++){var k=a[j];k&&!k.$&&(g&=fc(k,f)!==!1)}}finally{a.La--,dc(c,d,b,a)}}return Boolean(g)},fc=function(a,b){var c=a.handleEvent(b);a.Ab&&H(a.key);return c};
+Ub(function(a,b){if(!$b[a])return!0;var c=$b[a],d=c.type,f=E;if(!(d in f))return!0;var f=f[d],g,h;Db===i&&(Db=v&&!o.addEventListener);if(Db){var j;if(!(j=b))a:{j="window.event".split(".");for(var k=o;g=j.shift();)if(k[g]!=m)k=k[g];else{j=m;break a}j=k}g=j;j=!0 in f;k=!1 in f;if(j){if(g.keyCode<0||g.returnValue!=i)return!0;a:{var l=!1;if(g.keyCode==0)try{g.keyCode=-1;break a}catch(L){l=!0}if(l||g.returnValue==i)g.returnValue=!0}}l=Yb();l.ua(g,this);g=!0;try{if(j){for(var r=Rb(),I=l.currentTarget;I;I=
+I.parentNode)r.push(I);h=f[!0];h.F=h.I;for(var O=r.length-1;!l.ba&&O>=0&&h.F;O--)l.currentTarget=r[O],g&=gc(h,r[O],d,!0,l);if(k){h=f[!1];h.F=h.I;for(O=0;!l.ba&&O<r.length&&h.F;O++)l.currentTarget=r[O],g&=gc(h,r[O],d,!1,l)}}else g=fc(c,l)}finally{if(r)r.length=0,Sb(r);l.z();Zb(l)}return g}d=new C(b,this);try{g=fc(c,d)}finally{d.z()}return g});var hc=function(a){this.Bb=a;this.Ma=[]};t(hc,A);var ic=[],J=function(a,b,c,d){ea(c)||(ic[0]=c,c=ic);for(var f=0;f<c.length;f++)a.Ma.push(G(b,c[f],d||a,!1,a.Bb||a));return a},K=function(a,b,c,d,f,g){if(ea(c))for(var h=0;h<c.length;h++)K(a,b,c[h],d,f,g);else{a:{d=d||a;g=g||a.Bb||a;f=!!f;if(b=cc(b,c,f))for(c=0;c<b.length;c++)if(!b[c].$&&b[c].fa==d&&b[c].capture==f&&b[c].Fa==g){b=b[c];break a}b=m}if(b)b=b.key,H(b),Ba(a.Ma,b)}return a},jc=function(a){ya(a.Ma,H);a.Ma.length=0};
+hc.prototype.f=function(){hc.c.f.call(this);jc(this)};hc.prototype.handleEvent=function(){e(Error("EventHandler.handleEvent not implemented"))};var kc=function(){};t(kc,A);n=kc.prototype;n.qb=!0;n.Ea=m;n.bb=function(a){this.Ea=a};n.addEventListener=function(a,b,c,d){G(this,a,b,c,d)};n.removeEventListener=function(a,b,c,d){bc(this,a,b,c,d)};
+n.dispatchEvent=function(a){var b=a.type||a,c=E;if(b in c){if(p(a))a=new B(a,this);else if(a instanceof B)a.target=a.target||this;else{var d=a,a=new B(b,this);Ka(a,d)}var d=1,f,c=c[b],b=!0 in c,g;if(b){f=[];for(g=this;g;g=g.Ea)f.push(g);g=c[!0];g.F=g.I;for(var h=f.length-1;!a.ba&&h>=0&&g.F;h--)a.currentTarget=f[h],d&=gc(g,f[h],a.type,!0,a)&&a.va!=!1}if(!1 in c)if(g=c[!1],g.F=g.I,b)for(h=0;!a.ba&&h<f.length&&g.F;h++)a.currentTarget=f[h],d&=gc(g,f[h],a.type,!1,a)&&a.va!=!1;else for(f=this;!a.ba&&f&&
+g.F;f=f.Ea)a.currentTarget=f,d&=gc(g,f,a.type,!1,a)&&a.va!=!1;a=Boolean(d)}else a=!0;return a};n.f=function(){kc.c.f.call(this);ec(this);this.Ea=m};var M=function(a,b){a.style.display=b?"":"none"},lc=w?"MozUserSelect":x?"WebkitUserSelect":m,mc=function(a,b,c){c=!c?a.getElementsByTagName("*"):m;if(lc){if(b=b?"none":"",a.style[lc]=b,c)for(var a=0,d;d=c[a];a++)d.style[lc]=b}else if(v||Sa)if(b=b?"on":"",a.setAttribute("unselectable",b),c)for(a=0;d=c[a];a++)d.setAttribute("unselectable",b)};var nc=function(){};ca(nc);nc.prototype.Zb=0;nc.Q();var N=function(a){this.q=a||mb();this.sa=oc};t(N,kc);N.prototype.Yb=nc.Q();var oc=m,pc=function(a,b){switch(a){case 1:return b?"disable":"enable";case 2:return b?"highlight":"unhighlight";case 4:return b?"activate":"deactivate";case 8:return b?"select":"unselect";case 16:return b?"check":"uncheck";case 32:return b?"focus":"blur";case 64:return b?"open":"close"}e(Error("Invalid component state"))};n=N.prototype;n.ga=m;n.e=!1;n.d=m;n.sa=m;n.Lb=m;n.o=m;n.p=m;n.t=m;n.lb=!1;
+var qc=function(a){return a.ga||(a.ga=":"+(a.Yb.Zb++).toString(36))},rc=function(a,b){if(a.o&&a.o.t){var c=a.o.t,d=a.ga;d in c&&delete c[d];Ha(a.o.t,b,a)}a.ga=b};N.prototype.a=function(){return this.d};var sc=function(a){return a.da||(a.da=new hc(a))},uc=function(a,b){a==b&&e(Error("Unable to set parent component"));b&&a.o&&a.ga&&tc(a.o,a.ga)&&a.o!=b&&e(Error("Unable to set parent component"));a.o=b;N.c.bb.call(a,b)};n=N.prototype;n.getParent=function(){return this.o};
+n.bb=function(a){this.o&&this.o!=a&&e(Error("Method not supported"));N.c.bb.call(this,a)};n.Ka=function(){return this.q};n.l=function(){this.d=this.q.createElement("div")};n.J=function(a){if(this.e)e(Error("Component already rendered"));else if(a&&this.Z(a)){this.lb=!0;if(!this.q||this.q.H!=lb(a))this.q=mb(a);this.Ua(a);this.r()}else e(Error("Invalid element to decorate"))};n.Z=function(){return!0};n.Ua=function(a){this.d=a};n.r=function(){this.e=!0;vc(this,function(a){!a.e&&a.a()&&a.r()})};
+n.V=function(){vc(this,function(a){a.e&&a.V()});this.da&&jc(this.da);this.e=!1};n.f=function(){N.c.f.call(this);this.e&&this.V();this.da&&(this.da.z(),delete this.da);vc(this,function(a){a.z()});!this.lb&&this.d&&ub(this.d);this.o=this.Lb=this.d=this.t=this.p=m};n.Da=function(a,b){this.Ra(a,wc(this),b)};
+n.Ra=function(a,b,c){a.e&&(c||!this.e)&&e(Error("Component already rendered"));(b<0||b>wc(this))&&e(Error("Child component index out of bounds"));if(!this.t||!this.p)this.t={},this.p=[];a.getParent()==this?(this.t[qc(a)]=a,Ba(this.p,a)):Ha(this.t,qc(a),a);uc(a,this);Fa(this.p,b,0,a);a.e&&this.e&&a.getParent()==this?(c=this.B(),c.insertBefore(a.a(),c.childNodes[b]||m)):c?(this.d||this.l(),c=P(this,b+1),b=this.B(),c=c?c.d:m,a.e&&e(Error("Component already rendered")),a.d||a.l(),b?b.insertBefore(a.d,
+c||m):a.q.H.body.appendChild(a.d),(!a.o||a.o.e)&&a.r()):this.e&&!a.e&&a.d&&a.r()};n.B=function(){return this.d};var xc=function(a){if(a.sa==m){var b;a:{b=a.e?a.d:a.q.H.body;var c=lb(b);if(c.defaultView&&c.defaultView.getComputedStyle&&(b=c.defaultView.getComputedStyle(b,m))){b=b.direction||b.getPropertyValue("direction");break a}b=""}a.sa="rtl"==(b||((a.e?a.d:a.q.H.body).currentStyle?(a.e?a.d:a.q.H.body).currentStyle.direction:m)||(a.e?a.d:a.q.H.body).style.direction)}return a.sa};
+N.prototype.pa=function(a){this.e&&e(Error("Component already rendered"));this.sa=a};var wc=function(a){return a.p?a.p.length:0},tc=function(a,b){return a.t&&b?(b in a.t?a.t[b]:i)||m:m},P=function(a,b){return a.p?a.p[b]||m:m},vc=function(a,b,c){a.p&&ya(a.p,b,c)},yc=function(a,b){return a.p&&b?xa(a.p,b):-1};
+N.prototype.removeChild=function(a,b){if(a){var c=p(a)?a:qc(a),a=tc(this,c);if(c&&a){var d=this.t;c in d&&delete d[c];Ba(this.p,a);b&&(a.V(),a.d&&ub(a.d));uc(a,m)}}a||e(Error("Child is not in parent component"));return a};var zc=function(a,b){a.setAttribute("role",b);a.mc=b};var Bc=function(a,b,c,d,f){if(!v&&(!x||!y("525")))return!0;if(Ua&&f)return Ac(a);if(f&&!d)return!1;if(!c&&(b==17||b==18))return!1;if(v&&d&&b==a)return!1;switch(a){case 13:return!(v&&eb());case 27:return!x}return Ac(a)},Ac=function(a){if(a>=48&&a<=57)return!0;if(a>=96&&a<=106)return!0;if(a>=65&&a<=90)return!0;if(x&&a==0)return!0;switch(a){case 32:case 63:case 107:case 109:case 110:case 111:case 186:case 189:case 187:case 188:case 190:case 191:case 192:case 222:case 219:case 220:case 221:return!0;default:return!1}};var Q=function(a,b){a&&Cc(this,a,b)};t(Q,kc);n=Q.prototype;n.d=m;n.Ia=m;n.Wa=m;n.Ja=m;n.S=-1;n.R=-1;
+var Dc={3:13,12:144,63232:38,63233:40,63234:37,63235:39,63236:112,63237:113,63238:114,63239:115,63240:116,63241:117,63242:118,63243:119,63244:120,63245:121,63246:122,63247:123,63248:44,63272:46,63273:36,63275:35,63276:33,63277:34,63289:144,63302:45},Ec={Up:38,Down:40,Left:37,Right:39,Enter:13,F1:112,F2:113,F3:114,F4:115,F5:116,F6:117,F7:118,F8:119,F9:120,F10:121,F11:122,F12:123,"U+007F":46,Home:36,End:35,PageUp:33,PageDown:34,Insert:45},Fc={61:187,59:186},Gc=v||x&&y("525");
+Q.prototype.Qb=function(a){if(x&&(this.S==17&&!a.ctrlKey||this.S==18&&!a.altKey))this.R=this.S=-1;Gc&&!Bc(a.keyCode,this.S,a.shiftKey,a.ctrlKey,a.altKey)?this.handleEvent(a):this.R=w&&a.keyCode in Fc?Fc[a.keyCode]:a.keyCode};Q.prototype.Rb=function(){this.R=this.S=-1};
+Q.prototype.handleEvent=function(a){var b=a.L,c,d;v&&a.type=="keypress"?(c=this.R,d=c!=13&&c!=27?b.keyCode:0):x&&a.type=="keypress"?(c=this.R,d=b.charCode>=0&&b.charCode<63232&&Ac(c)?b.charCode:0):Sa?(c=this.R,d=Ac(c)?b.keyCode:0):(c=b.keyCode||this.R,d=b.charCode||0,Ua&&d==63&&!c&&(c=191));var f=c,g=b.keyIdentifier;c?c>=63232&&c in Dc?f=Dc[c]:c==25&&a.shiftKey&&(f=9):g&&g in Ec&&(f=Ec[g]);a=f==this.S;this.S=f;b=new Hc(f,d,a,b);try{this.dispatchEvent(b)}finally{b.z()}};Q.prototype.a=function(){return this.d};
+var Cc=function(a,b,c){a.Ja&&a.detach();a.d=b;a.Ia=G(a.d,"keypress",a,c);a.Wa=G(a.d,"keydown",a.Qb,c,a);a.Ja=G(a.d,"keyup",a.Rb,c,a)};Q.prototype.detach=function(){if(this.Ia)H(this.Ia),H(this.Wa),H(this.Ja),this.Ja=this.Wa=this.Ia=m;this.d=m;this.R=this.S=-1};Q.prototype.f=function(){Q.c.f.call(this);this.detach()};var Hc=function(a,b,c,d){d&&this.ua(d,i);this.type="key";this.keyCode=a;this.charCode=b;this.repeat=c};t(Hc,C);var Jc=function(a,b){a||e(Error("Invalid class name "+a));q(b)||e(Error("Invalid decorator function "+b));Ic[a]=b},Kc={},Ic={};var R=function(){},Lc;ca(R);n=R.prototype;n.ea=function(){};n.l=function(a){return a.Ka().l("div",this.ta(a).join(" "),a.xa)};n.B=function(a){return a};n.ra=function(a,b,c){if(a=a.a?a.a():a)if(v&&!y("7")){var d=Mc(ib(a),b);d.push(b);ja(c?z:jb,a).apply(m,d)}else c?z(a,b):jb(a,b)};n.Z=function(){return!0};
+n.J=function(a,b){b.id&&rc(a,b.id);var c=this.B(b);a.xa=c&&c.firstChild?c.firstChild.nextSibling?Da(c.childNodes):c.firstChild:m;var d=0,f=this.m(),g=this.m(),h=!1,j=!1,c=!1,k=ib(b);ya(k,function(a){if(!h&&a==f)h=!0,g==f&&(j=!0);else if(!j&&a==g)j=!0;else{var b=d;if(!this.ob)this.Ha||Nc(this),this.ob=Ia(this.Ha);a=parseInt(this.ob[a],10);d=b|(isNaN(a)?0:a)}},this);a.h=d;h||(k.push(f),g==f&&(j=!0));j||k.push(g);var l=a.C;l&&k.push.apply(k,l);if(v&&!y("7")){var L=Mc(k);L.length>0&&(k.push.apply(k,L),
+c=!0)}if(!h||!j||l||c)b.className=k.join(" ");return b};n.Na=function(a){xc(a)&&this.pa(a.a(),!0);a.isEnabled()&&this.la(a,a.G())};n.za=function(a,b){mc(a,!b,!v&&!Sa)};n.pa=function(a,b){this.ra(a,this.m()+"-rtl",b)};n.U=function(a){var b;return a.s&32&&(b=a.k())?Ab(b):!1};n.la=function(a,b){var c;if(a.s&32&&(c=a.k())){if(!b&&a.h&32){try{c.blur()}catch(d){}a.h&32&&a.ma(m)}if(Ab(c)!=b)b?c.tabIndex=0:c.removeAttribute("tabIndex")}};n.ia=function(a,b){M(a,b)};
+n.v=function(a,b,c){var d=a.a();if(d){var f=Oc(this,b);f&&this.ra(a,f,c);Lc||(Lc={1:"disabled",4:"pressed",8:"selected",16:"checked",64:"expanded"});(a=Lc[b])&&d.setAttribute("aria-"+a,c)}};n.k=function(a){return a.a()};n.m=function(){return"goog-control"};n.ta=function(a){var b=this.m(),c=[b],d=this.m();d!=b&&c.push(d);b=a.h;for(d=[];b;){var f=b&-b;d.push(Oc(this,f));b&=~f}c.push.apply(c,d);(a=a.C)&&c.push.apply(c,a);v&&!y("7")&&c.push.apply(c,Mc(c));return c};
+var Mc=function(a,b){var c=[];b&&(a=a.concat([b]));ya([],function(d){za(d,ja(Aa,a))&&(!b||Aa(d,b))&&c.push(d.join("_"))});return c},Oc=function(a,b){a.Ha||Nc(a);return a.Ha[b]},Nc=function(a){var b=a.m();a.Ha={1:b+"-disabled",2:b+"-hover",4:b+"-active",8:b+"-selected",16:b+"-checked",32:b+"-focused",64:b+"-open"}};var S=function(a,b,c){N.call(this,c);if(!b){for(var b=this.constructor,d;b;){d=s(b);if(d=Kc[d])break;b=b.c?b.c.constructor:m}b=d?q(d.Q)?d.Q():new d:m}this.b=b;this.xa=a};t(S,N);n=S.prototype;n.xa=m;n.h=0;n.s=39;n.Xb=255;n.Ba=0;n.n=!0;n.C=m;n.ja=!0;n.ya=!1;n.k=function(){return this.b.k(this)};n.Aa=function(){return this.u||(this.u=new Q)};n.vb=function(){return this.b};
+n.ra=function(a,b){if(b){if(a)this.C?Aa(this.C,a)||this.C.push(a):this.C=[a],this.b.ra(this,a,!0)}else if(a&&this.C){Ba(this.C,a);if(this.C.length==0)this.C=m;this.b.ra(this,a,!1)}};n.l=function(){var a=this.b.l(this);this.d=a;var b=this.b.ea();b&&zc(a,b);this.ya||this.b.za(a,!1);this.G()||this.b.ia(a,!1)};n.B=function(){return this.b.B(this.a())};n.Z=function(a){return this.b.Z(a)};
+n.Ua=function(a){this.d=a=this.b.J(this,a);var b=this.b.ea();b&&zc(a,b);this.ya||this.b.za(a,!1);this.n=a.style.display!="none"};n.r=function(){S.c.r.call(this);this.b.Na(this);if(this.s&-2&&(this.ja&&Pc(this,!0),this.s&32)){var a=this.k();if(a){var b=this.Aa();Cc(b,a);J(J(J(sc(this),b,"key",this.O),a,"focus",this.oa),a,"blur",this.ma)}}};
+var Pc=function(a,b){var c=sc(a),d=a.a();b?(J(J(J(J(c,d,"mouseover",a.Za),d,"mousedown",a.ka),d,"mouseup",a.$a),d,"mouseout",a.Ya),v&&J(c,d,"dblclick",a.sb)):(K(K(K(K(c,d,"mouseover",a.Za),d,"mousedown",a.ka),d,"mouseup",a.$a),d,"mouseout",a.Ya),v&&K(c,d,"dblclick",a.sb))};n=S.prototype;n.V=function(){S.c.V.call(this);this.u&&this.u.detach();this.G()&&this.isEnabled()&&this.b.la(this,!1)};n.f=function(){S.c.f.call(this);this.u&&(this.u.z(),delete this.u);delete this.b;this.C=this.xa=m};
+n.pa=function(a){S.c.pa.call(this,a);var b=this.a();b&&this.b.pa(b,a)};n.za=function(a){this.ya=a;var b=this.a();b&&this.b.za(b,a)};n.G=function(){return this.n};n.ia=function(a,b){if(b||this.n!=a&&this.dispatchEvent(a?"show":"hide")){var c=this.a();c&&this.b.ia(c,a);this.isEnabled()&&this.b.la(this,a);this.n=a;return!0}return!1};n.isEnabled=function(){return!(this.h&1)};
+n.qa=function(a){var b=this.getParent();if((!b||typeof b.isEnabled!="function"||b.isEnabled())&&T(this,1,!a))a||(this.setActive(!1),this.A(!1)),this.G()&&this.b.la(this,a),this.v(1,!a)};n.A=function(a){T(this,2,a)&&this.v(2,a)};n.setActive=function(a){T(this,4,a)&&this.v(4,a)};var Qc=function(a,b){T(a,8,b)&&a.v(8,b)},Rc=function(a,b){T(a,64,b)&&a.v(64,b)};S.prototype.v=function(a,b){if(this.s&a&&b!=!!(this.h&a))this.b.v(this,a,b),this.h=b?this.h|a:this.h&~a};
+var Sc=function(a,b,c){a.e&&a.h&b&&!c&&e(Error("Component already rendered"));!c&&a.h&b&&a.v(b,!1);a.s=c?a.s|b:a.s&~b},U=function(a,b){return!!(a.Xb&b)&&!!(a.s&b)},T=function(a,b,c){return!!(a.s&b)&&!!(a.h&b)!=c&&(!(a.Ba&b)||a.dispatchEvent(pc(b,c)))&&!a.ab};n=S.prototype;n.Za=function(a){(!a.relatedTarget||!wb(this.a(),a.relatedTarget))&&this.dispatchEvent("enter")&&this.isEnabled()&&U(this,2)&&this.A(!0)};
+n.Ya=function(a){if((!a.relatedTarget||!wb(this.a(),a.relatedTarget))&&this.dispatchEvent("leave"))U(this,4)&&this.setActive(!1),U(this,2)&&this.A(!1)};n.ka=function(a){if(this.isEnabled()&&(U(this,2)&&this.A(!0),Ib(a)&&(!x||!Ua||!a.ctrlKey)))U(this,4)&&this.setActive(!0),this.b.U(this)&&this.k().focus();!this.ya&&Ib(a)&&(!x||!Ua||!a.ctrlKey)&&a.preventDefault()};n.$a=function(a){this.isEnabled()&&(U(this,2)&&this.A(!0),this.h&4&&Tc(this,a)&&U(this,4)&&this.setActive(!1))};
+n.sb=function(a){this.isEnabled()&&Tc(this,a)};var Tc=function(a,b){if(U(a,16)){var c=!(a.h&16);T(a,16,c)&&a.v(16,c)}U(a,8)&&Qc(a,!0);U(a,64)&&Rc(a,!(a.h&64));c=new B("action",a);if(b)for(var d=["altKey","ctrlKey","metaKey","shiftKey","platformModifierKey"],f,g=0;f=d[g];g++)c[f]=b[f];return a.dispatchEvent(c)};S.prototype.oa=function(){U(this,32)&&T(this,32,!0)&&this.v(32,!0)};S.prototype.ma=function(){U(this,4)&&this.setActive(!1);U(this,32)&&T(this,32,!1)&&this.v(32,!1)};
+S.prototype.O=function(a){return this.G()&&this.isEnabled()&&this.jb(a)?(a.preventDefault(),a.stopPropagation(),!0):!1};S.prototype.jb=function(a){return a.keyCode==13&&Tc(this,a)};q(S)||e(Error("Invalid component class "+S));q(R)||e(Error("Invalid renderer class "+R));var Uc=s(S);Kc[Uc]=R;Jc("goog-control",function(){return new S(m)});var Vc=function(){};t(Vc,R);ca(Vc);Vc.prototype.l=function(a){return a.Ka().l("div",this.m())};Vc.prototype.J=function(a,b){if(b.tagName=="HR"){var c=b,b=this.l(a);c.parentNode&&c.parentNode.insertBefore(b,c);ub(c)}else z(b,this.m());return b};Vc.prototype.m=function(){return"goog-menuseparator"};var Wc=function(a,b){S.call(this,m,a||Vc.Q(),b);Sc(this,1,!1);Sc(this,2,!1);Sc(this,4,!1);Sc(this,32,!1);this.h=1};t(Wc,S);Wc.prototype.r=function(){Wc.c.r.call(this);zc(this.a(),"separator")};Jc("goog-menuseparator",function(){return new Wc});var V=function(){};ca(V);V.prototype.ea=function(){};var Xc=function(a,b){if(a)a.tabIndex=b?0:-1};n=V.prototype;n.l=function(a){return a.Ka().l("div",this.ta(a).join(" "))};n.B=function(a){return a};n.Z=function(a){return a.tagName=="DIV"};n.J=function(a,b){b.id&&rc(a,b.id);var c=this.m(),d=!1,f=ib(b);f&&ya(f,function(b){b==c?d=!0:b&&this.Va(a,b,c)},this);d||z(b,c);Yc(a,this.B(b));return b};
+n.Va=function(a,b,c){b==c+"-disabled"?a.qa(!1):b==c+"-horizontal"?Zc(a,"horizontal"):b==c+"-vertical"&&Zc(a,"vertical")};var Yc=function(a,b){if(b)for(var c=b.firstChild,d;c&&c.parentNode==b;){d=c.nextSibling;if(c.nodeType==1){var f;a:{f=i;for(var g=ib(c),h=0,j=g.length;h<j;h++)if(f=g[h]in Ic?Ic[g[h]]():m)break a;f=m}if(f)f.d=c,a.isEnabled()||f.qa(!1),a.Da(f),f.J(c)}else(!c.nodeValue||ma(c.nodeValue)=="")&&b.removeChild(c);c=d}};
+V.prototype.Na=function(a){a=a.a();mc(a,!0,w);if(v)a.hideFocus=!0;var b=this.ea();b&&zc(a,b)};V.prototype.k=function(a){return a.a()};V.prototype.m=function(){return"goog-container"};V.prototype.ta=function(a){var b=this.m(),c=[b,a.P=="horizontal"?b+"-horizontal":b+"-vertical"];a.isEnabled()||c.push(b+"-disabled");return c};var W=function(a,b,c){N.call(this,c);this.b=b||V.Q();this.P=a||"vertical"};t(W,N);n=W.prototype;n.Oa=m;n.u=m;n.b=m;n.P=m;n.n=!0;n.X=!0;n.Xa=!0;n.i=-1;n.g=m;n.ca=!1;n.Pb=!1;n.Ob=!0;n.N=m;n.k=function(){return this.Oa||this.b.k(this)};n.Aa=function(){return this.u||(this.u=new Q(this.k()))};n.vb=function(){return this.b};n.l=function(){this.d=this.b.l(this)};n.B=function(){return this.b.B(this.a())};n.Z=function(a){return this.b.Z(a)};
+n.Ua=function(a){this.d=this.b.J(this,a);if(a.style.display=="none")this.n=!1};n.r=function(){W.c.r.call(this);vc(this,function(a){a.e&&$c(this,a)},this);var a=this.a();this.b.Na(this);this.ia(this.n,!0);J(J(J(J(J(J(J(J(sc(this),this,"enter",this.Hb),this,"highlight",this.Ib),this,"unhighlight",this.Kb),this,"open",this.Jb),this,"close",this.Fb),a,"mousedown",this.ka),lb(a),"mouseup",this.Gb),a,["mousedown","mouseup","mouseover","mouseout"],this.Eb);this.U()&&ad(this,!0)};
+var ad=function(a,b){var c=sc(a),d=a.k();b?J(J(J(c,d,"focus",a.oa),d,"blur",a.ma),a.Aa(),"key",a.O):K(K(K(c,d,"focus",a.oa),d,"blur",a.ma),a.Aa(),"key",a.O)};n=W.prototype;n.V=function(){bd(this,-1);this.g&&Rc(this.g,!1);this.ca=!1;W.c.V.call(this)};n.f=function(){W.c.f.call(this);if(this.u)this.u.z(),this.u=m;this.b=this.g=this.N=this.Oa=m};n.Hb=function(){return!0};
+n.Ib=function(a){var b=yc(this,a.target);if(b>-1&&b!=this.i){var c=P(this,this.i);c&&c.A(!1);this.i=b;c=P(this,this.i);this.ca&&c.setActive(!0);this.Ob&&this.g&&c!=this.g&&(c.s&64?Rc(c,!0):Rc(this.g,!1))}this.a().setAttribute("aria-activedescendant",a.target.a().id)};n.Kb=function(a){if(a.target==P(this,this.i))this.i=-1;this.a().setAttribute("aria-activedescendant","")};n.Jb=function(a){if((a=a.target)&&a!=this.g&&a.getParent()==this)this.g&&Rc(this.g,!1),this.g=a};
+n.Fb=function(a){if(a.target==this.g)this.g=m};n.ka=function(a){if(this.X)this.ca=!0;var b=this.k();b&&Ab(b)?b.focus():a.preventDefault()};n.Gb=function(){this.ca=!1};n.Eb=function(a){var b;a:{b=a.target;if(this.N)for(var c=this.a();b&&b!==c;){var d=b.id;if(d in this.N){b=this.N[d];break a}b=b.parentNode}b=m}if(b)switch(a.type){case "mousedown":b.ka(a);break;case "mouseup":b.$a(a);break;case "mouseover":b.Za(a);break;case "mouseout":b.Ya(a)}};n.oa=function(){};
+n.ma=function(){bd(this,-1);this.ca=!1;this.g&&Rc(this.g,!1)};n.O=function(a){return this.isEnabled()&&this.G()&&(wc(this)!=0||this.Oa)&&this.jb(a)?(a.preventDefault(),a.stopPropagation(),!0):!1};
+n.jb=function(a){var b=P(this,this.i);if(b&&typeof b.O=="function"&&b.O(a))return!0;if(this.g&&this.g!=b&&typeof this.g.O=="function"&&this.g.O(a))return!0;if(a.shiftKey||a.ctrlKey||a.metaKey||a.altKey)return!1;switch(a.keyCode){case 27:if(this.U())this.k().blur();else return!1;break;case 36:cd(this);break;case 35:dd(this);break;case 38:if(this.P=="vertical")ed(this);else return!1;break;case 37:if(this.P=="horizontal")xc(this)?fd(this):ed(this);else return!1;break;case 40:if(this.P=="vertical")fd(this);
+else return!1;break;case 39:if(this.P=="horizontal")xc(this)?ed(this):fd(this);else return!1;break;default:return!1}return!0};var $c=function(a,b){var c=b.a(),c=c.id||(c.id=qc(b));if(!a.N)a.N={};a.N[c]=b};W.prototype.Da=function(a,b){W.c.Da.call(this,a,b)};W.prototype.Ra=function(a,b,c){a.Ba|=2;a.Ba|=64;(this.U()||!this.Pb)&&Sc(a,32,!1);a.e&&!1!=a.ja&&Pc(a,!1);a.ja=!1;W.c.Ra.call(this,a,b,c);c&&this.e&&$c(this,a);b<=this.i&&this.i++};
+W.prototype.removeChild=function(a,b){if(a=p(a)?tc(this,a):a){var c=yc(this,a);c!=-1&&(c==this.i?a.A(!1):c<this.i&&this.i--);var d=a.a();if(d&&d.id)c=this.N,d=d.id,d in c&&delete c[d]}c=a=W.c.removeChild.call(this,a,b);c.e&&!0!=c.ja&&Pc(c,!0);c.ja=!0;return a};var Zc=function(a,b){a.a()&&e(Error("Component already rendered"));a.P=b};n=W.prototype;n.G=function(){return this.n};
+n.ia=function(a,b){if(b||this.n!=a&&this.dispatchEvent(a?"show":"hide")){this.n=a;var c=this.a();c&&(M(c,a),this.U()&&Xc(this.k(),this.X&&this.n),b||this.dispatchEvent(this.n?"aftershow":"afterhide"));return!0}return!1};n.isEnabled=function(){return this.X};n.qa=function(a){if(this.X!=a&&this.dispatchEvent(a?"enable":"disable"))a?(this.X=!0,vc(this,function(a){a.pb?delete a.pb:a.qa(!0)})):(vc(this,function(a){a.isEnabled()?a.qa(!1):a.pb=!0}),this.ca=this.X=!1),this.U()&&Xc(this.k(),a&&this.n)};
+n.U=function(){return this.Xa};n.la=function(a){a!=this.Xa&&this.e&&ad(this,a);this.Xa=a;this.X&&this.n&&Xc(this.k(),a)};var bd=function(a,b){var c=P(a,b);c?c.A(!0):a.i>-1&&P(a,a.i).A(!1)};W.prototype.A=function(a){bd(this,yc(this,a))};
+var cd=function(a){gd(a,function(a,c){return(a+1)%c},wc(a)-1)},dd=function(a){gd(a,function(a,c){a--;return a<0?c-1:a},0)},fd=function(a){gd(a,function(a,c){return(a+1)%c},a.i)},ed=function(a){gd(a,function(a,c){a--;return a<0?c-1:a},a.i)},gd=function(a,b,c){for(var c=c<0?yc(a,a.g):c,d=wc(a),c=b.call(a,c,d),f=0;f<=d;){var g=P(a,c);if(g&&g.G()&&g.isEnabled()&&g.s&2){a.Ta(c);break}f++;c=b.call(a,c,d)}};W.prototype.Ta=function(a){bd(this,a)};var hd=function(){};t(hd,R);ca(hd);n=hd.prototype;n.m=function(){return"goog-tab"};n.ea=function(){return"tab"};n.l=function(a){var b=hd.c.l.call(this,a);(a=a.Pa())&&this.Sa(b,a);return b};n.J=function(a,b){var b=hd.c.J.call(this,a,b),c=this.Pa(b);if(c)a.mb=c;if(a.h&8&&(c=a.getParent())&&q(c.W))a.v(8,!1),c.W(a);return b};n.Pa=function(a){return a.title||""};n.Sa=function(a,b){if(a)a.title=b||""};var id=function(a,b,c){S.call(this,a,b||hd.Q(),c);Sc(this,8,!0);this.Ba|=9};t(id,S);id.prototype.Pa=function(){return this.mb};id.prototype.Sa=function(a){this.vb().Sa(this.a(),a);this.mb=a};Jc("goog-tab",function(){return new id(m)});var X=function(){};t(X,V);ca(X);X.prototype.m=function(){return"goog-tab-bar"};X.prototype.ea=function(){return"tablist"};X.prototype.Va=function(a,b,c){if(!this.ub)this.Ga||jd(this),this.ub=Ia(this.Ga);var d=this.ub[b];d?(Zc(a,kd(d)),a.nb=d):X.c.Va.call(this,a,b,c)};X.prototype.ta=function(a){var b=X.c.ta.call(this,a);this.Ga||jd(this);b.push(this.Ga[a.nb]);return b};var jd=function(a){var b=a.m();a.Ga={top:b+"-top",bottom:b+"-bottom",start:b+"-start",end:b+"-end"}};var Y=function(a,b,c){a=a||"top";Zc(this,kd(a));this.nb=a;W.call(this,this.P,b||X.Q(),c);ld(this)};t(Y,W);n=Y.prototype;n.Sb=!0;n.D=m;n.r=function(){Y.c.r.call(this);ld(this)};n.f=function(){Y.c.f.call(this);this.D=m};n.removeChild=function(a,b){md(this,a);return Y.c.removeChild.call(this,a,b)};n.Ta=function(a){Y.c.Ta.call(this,a);this.Sb&&this.W(P(this,a))};n.W=function(a){a?Qc(a,!0):this.D&&Qc(this.D,!1)};
+var md=function(a,b){if(b&&b==a.D){for(var c=yc(a,b),d=c-1;b=P(a,d);d--)if(b.G()&&b.isEnabled()){a.W(b);return}for(c+=1;b=P(a,c);c++)if(b.G()&&b.isEnabled()){a.W(b);return}a.W(m)}};n=Y.prototype;n.cc=function(a){this.D&&this.D!=a.target&&Qc(this.D,!1);this.D=a.target};n.dc=function(a){if(a.target==this.D)this.D=m};n.ac=function(a){md(this,a.target)};n.bc=function(a){md(this,a.target)};n.oa=function(){P(this,this.i)||this.A(this.D||P(this,0))};
+var ld=function(a){J(J(J(J(sc(a),a,"select",a.cc),a,"unselect",a.dc),a,"disable",a.ac),a,"hide",a.bc)},kd=function(a){return a=="start"||a=="end"?"vertical":"horizontal"};Jc("goog-tab-bar",function(){return new Y});var Z=function(a,b,c,d,f){function g(a){if(a)a.tabIndex=0,G(a,"click",h.Mb,!1,h),G(a,"keydown",h.Nb,!1,h)}this.q=f||mb();this.K=this.q.a(a)||m;this.na=this.q.a(d||m);this.Ca=(this.Qa=q(b)?b:m)||!b?m:this.q.a(b);this.j=c==!0;var h=this;g(this.K);g(this.na);this.Y(this.j)};t(Z,kc);n=Z.prototype;n.f=function(){this.K&&ec(this.K);this.na&&ec(this.na);Z.c.f.call(this)};n.B=function(){return this.Ca};n.toggle=function(){this.Y(!this.j)};
+n.Y=function(a){if(this.Ca)M(this.Ca,a);else if(a&&this.Qa)this.Ca=this.Qa();if(this.na)M(this.K,!a),M(this.na,a);else{if(this.K){var b=this.K;a?z(b,"goog-zippy-expanded"):jb(b,"goog-zippy-expanded");b=this.K;!a?z(b,"goog-zippy-collapsed"):jb(b,"goog-zippy-collapsed")}this.K&&this.K.setAttribute("aria-expanded",a)}this.j=a;this.dispatchEvent(new nd("toggle",this,this.j))};n.Nb=function(a){if(a.keyCode==13||a.keyCode==32)this.toggle(),a.preventDefault(),a.stopPropagation()};n.Mb=function(){this.toggle()};
+var nd=function(a,b,c){B.call(this,a,b);this.kc=c};t(nd,B);var pd=function(a,b){this.kb=[];for(var c=nb(a),c=ob("span","ae-zippy",c),d=0,f;f=c[d];d++)this.kb.push(new Z(f,f.parentNode.parentNode.parentNode.nextElementSibling!=i?f.parentNode.parentNode.parentNode.nextElementSibling:vb(f.parentNode.parentNode.parentNode.nextSibling),!1));this.ec=new od(this.kb,nb(b))};pd.prototype.ic=function(){return this.ec};pd.prototype.jc=function(){return this.kb};
+var od=function(a,b){this.wa=a;if(this.wa.length)for(var c=0,d;d=this.wa[c];c++)G(d,"toggle",this.Ub,!1,this);this.ib=0;this.j=!1;c="ae-toggle ae-plus ae-action";this.wa.length||(c+=" ae-disabled");this.T=sb("span",{className:c},"Expand All");G(this.T,"click",this.Tb,!1,this);b&&b.appendChild(this.T)};od.prototype.Tb=function(){this.wa.length&&this.Y(!this.j)};
+od.prototype.Ub=function(a){a=a.currentTarget;a.j?this.ib+=1:this.ib-=1;if(a.j!=this.j)if(a.j)this.j=!0,qd(this,!0);else if(this.ib==0)this.j=!1,qd(this,!1)};od.prototype.Y=function(a){this.j=a;for(var a=0,b;b=this.wa[a];a++)b.j!=this.j&&b.Y(this.j);qd(this)};
+var qd=function(a,b){(b!==i?b:a.j)?(jb(a.T,"ae-plus"),z(a.T,"ae-minus"),xb(a.T,"Collapse All")):(jb(a.T,"ae-minus"),z(a.T,"ae-plus"),xb(a.T,"Expand All"))},rd=function(a){this.Vb=a;this.zb={};var b,c=sb("div",{},b=sb("div",{id:"ae-stats-details-tabs",className:"goog-tab-bar goog-tab-bar-top"}),sb("div",{className:"goog-tab-bar-clear"}),a=sb("div",{id:"ae-stats-details-tabs-content",className:"goog-tab-content"})),d=new Y;d.J(b);G(d,"select",this.xb,!1,this);G(d,"unselect",this.xb,!1,this);b=0;for(var f;f=
+this.Vb[b];b++)if(f=nb("ae-stats-details-"+f)){var g=ob("h2",m,f)[0],h;h=g;var j=i;hb&&"innerText"in h?j=h.innerText.replace(/(\r\n|\r|\n)/g,"\n"):(j=[],Bb(h,j,!0),j=j.join(""));j=j.replace(/ \xAD /g," ").replace(/\xAD/g,"");j=j.replace(/\u200B/g,"");hb||(j=j.replace(/ +/g," "));j!=" "&&(j=j.replace(/^\s*/,""));h=j;ub(g);g=new id(h);this.zb[s(g)]=f;d.Da(g,!0);a.appendChild(f);b==0?d.W(g):M(f,!1)}nb("bd").appendChild(c)};rd.prototype.xb=function(a){var b=this.zb[s(a.target)];M(b,a.type=="select")};
+aa("ae.Stats.Details.Tabs",rd);aa("goog.ui.Zippy",Z);Z.prototype.setExpanded=Z.prototype.Y;aa("ae.Stats.MakeZippys",pd);pd.prototype.getExpandCollapse=pd.prototype.ic;pd.prototype.getZippys=pd.prototype.jc;od.prototype.setExpanded=od.prototype.Y;var $=function(){this.cb=[];this.hb=[]},sd=[[5,0.2,1],[6,0.2,1.2],[5,0.25,1.25],[6,0.25,1.5],[4,0.5,2],[5,0.5,2.5],[6,0.5,3],[4,1,4],[5,1,5],[6,1,6],[4,2,8],[5,2,10]],td=function(a){if(a<=0)return[2,0.5,1];for(var b=1;a<1;)a*=10,b/=10;for(;a>=10;)a/=10,b*=10;for(var c=0;c<sd.length;c++)if(a<=sd[c][2])return[sd[c][0],sd[c][1]*b,sd[c][2]*b];return[5,2*b,10*b]};$.prototype.gb="stats/static/pix.gif";$.prototype.w="ae-stats-gantt-";$.prototype.fb=0;$.prototype.write=function(a){this.hb.push(a)};
+var ud=function(a,b,c,d){a.write('<tr class="'+a.w+'axisrow"><td width="20%"></td><td>');a.write('<div class="'+a.w+'axis">');for(var f=0;f<=b;f++)a.write('<img class="'+a.w+'tick" src="'+a.gb+'" alt="" '),a.write('style="left:'+f*c*d+'%"\n>'),a.write('<span class="'+a.w+'scale" style="left:'+f*c*d+'%">'),a.write("&nbsp;"+f*c+"</span>");a.write("</div></td></tr>\n")};
+$.prototype.hc=function(){this.hb=[];var a=td(this.fb),b=a[0],c=a[1],a=100/a[2];this.write('<table class="'+this.w+'table">\n');ud(this,b,c,a);for(var d=0;d<this.cb.length;d++){var f=this.cb[d];this.write('<tr class="'+this.w+'datarow"><td width="20%">');f.label.length>0&&(f.ha.length>0&&this.write('<a class="'+this.w+'link" href="'+f.ha+'">'),this.write(f.label),f.ha.length>0&&this.write("</a>"));this.write("</td>\n<td>");this.write('<div class="'+this.w+'container">');f.ha.length>0&&this.write('<a class="'+
+this.w+'link" href="'+f.ha+'"\n>');this.write('<img class="'+this.w+'bar" src="'+this.gb+'" alt="" ');this.write('style="left:'+f.start*a+"%;width:"+f.duration*a+'%;min-width:1px"\n>');f.eb>0&&(this.write('<img class="'+this.w+'extra" src="'+this.gb+'" alt="" '),this.write('style="left:'+f.start*a+"%;width:"+f.eb*a+'%"\n>'));f.tb.length>0&&(this.write('<span class="'+this.w+'inline" style="left:'+(f.start+Math.max(f.duration,f.eb))*a+'%">&nbsp;'),this.write(f.tb),this.write("</span>"));f.ha.length>
+0&&this.write("</a>");this.write("</div></td></tr>\n")}ud(this,b,c,a);this.write("</table>\n");return this.hb.join("")};$.prototype.fc=function(a,b,c,d,f,g){this.fb=Math.max(this.fb,Math.max(b+c,b+d));this.cb.push({label:a,start:b,duration:c,eb:d,tb:f,ha:g})};aa("Gantt",$);$.prototype.add_bar=$.prototype.fc;$.prototype.draw=$.prototype.hc;})();
diff --git a/google/appengine/ext/datastore_admin/static/css/compiled.css b/google/appengine/ext/datastore_admin/static/css/compiled.css
index 99ef4a7..b2a639e 100755
--- a/google/appengine/ext/datastore_admin/static/css/compiled.css
+++ b/google/appengine/ext/datastore_admin/static/css/compiled.css
@@ -1,2 +1,2 @@
 /* Copyright 2011 Google Inc. All Rights Reserved. */
-html,body,div,h1,h2,h3,h4,h5,h6,p,img,dl,dt,dd,ol,ul,li,table,caption,tbody,tfoot,thead,tr,th,td,form,fieldset,embed,object,applet{margin:0;padding:0;border:0;}body{font-size:62.5%;font-family:Arial,sans-serif;color:#000;background:#fff}a{color:#00c}a:active{color:#f00}a:visited{color:#551a8b}table{border-collapse:collapse;border-width:0;empty-cells:show}ul{padding:0 0 1em 1em}ol{padding:0 0 1em 1.3em}li{line-height:1.5em;padding:0 0 .5em 0}p{padding:0 0 1em 0}h1,h2,h3,h4,h5{padding:0 0 1em 0}h1,h2{font-size:1.3em}h3{font-size:1.1em}h4,h5,table{font-size:1em}sup,sub{font-size:.7em}input,select,textarea,option{font-family:inherit;font-size:inherit}.g-doc,.g-doc-1024,.g-doc-800{font-size:130%}.g-doc{width:100%;text-align:left}.g-section{width:100%;vertical-align:top;display:inline-block}*:first-child+html .g-section{display:block}* html .g-section{overflow:hidden}@-moz-document url-prefix(){.g-section{overflow:hidden}}@-moz-document url-prefix(){.g-section,tt:default{overflow:visible}}.g-section,.g-unit{zoom:1}.g-split .g-unit{text-align:right}.g-split .g-first{text-align:left}.g-doc-1024{width:73.074em;min-width:950px;margin:0 auto;text-align:left}* html .g-doc-1024{width:71.313em}*+html .g-doc-1024{width:71.313em}.g-doc-800{width:57.69em;min-width:750px;margin:0 auto;text-align:left}* html .g-doc-800{width:56.3em}*+html .g-doc-800{width:56.3em}.g-tpl-160 .g-unit,.g-unit .g-tpl-160 .g-unit,.g-unit .g-unit .g-tpl-160 .g-unit,.g-unit .g-unit .g-unit .g-tpl-160 .g-unit{margin:0 0 0 160px;width:auto;float:none}.g-unit .g-unit .g-unit .g-tpl-160 .g-first,.g-unit .g-unit .g-tpl-160 .g-first,.g-unit .g-tpl-160 .g-first,.g-tpl-160 .g-first{margin:0;width:160px;float:left}.g-tpl-160-alt .g-unit,.g-unit .g-tpl-160-alt .g-unit,.g-unit .g-unit .g-tpl-160-alt .g-unit,.g-unit .g-unit .g-unit .g-tpl-160-alt .g-unit{margin:0 160px 0 0;width:auto;float:none}.g-unit .g-unit .g-unit .g-tpl-160-alt .g-first,.g-unit .g-unit .g-tpl-160-alt .g-first,.g-unit .g-tpl-160-alt .g-first,.g-tpl-160-alt .g-first{margin:0;width:160px;float:right}.g-tpl-180 .g-unit,.g-unit .g-tpl-180 .g-unit,.g-unit .g-unit .g-tpl-180 .g-unit,.g-unit .g-unit .g-unit .g-tpl-180 .g-unit{margin:0 0 0 180px;width:auto;float:none}.g-unit .g-unit .g-unit .g-tpl-180 .g-first,.g-unit .g-unit .g-tpl-180 .g-first,.g-unit .g-tpl-180 .g-first,.g-tpl-180 .g-first{margin:0;width:180px;float:left}.g-tpl-180-alt .g-unit,.g-unit .g-tpl-180-alt .g-unit,.g-unit .g-unit .g-tpl-180-alt .g-unit,.g-unit .g-unit .g-unit .g-tpl-180-alt .g-unit{margin:0 180px 0 0;width:auto;float:none}.g-unit .g-unit .g-unit .g-tpl-180-alt .g-first,.g-unit .g-unit .g-tpl-180-alt .g-first,.g-unit .g-tpl-180-alt .g-first,.g-tpl-180-alt .g-first{margin:0;width:180px;float:right}.g-tpl-300 .g-unit,.g-unit .g-tpl-300 .g-unit,.g-unit .g-unit .g-tpl-300 .g-unit,.g-unit .g-unit .g-unit .g-tpl-300 .g-unit{margin:0 0 0 300px;width:auto;float:none}.g-unit .g-unit .g-unit .g-tpl-300 .g-first,.g-unit .g-unit .g-tpl-300 .g-first,.g-unit .g-tpl-300 .g-first,.g-tpl-300 .g-first{margin:0;width:300px;float:left}.g-tpl-300-alt .g-unit,.g-unit .g-tpl-300-alt .g-unit,.g-unit .g-unit .g-tpl-300-alt .g-unit,.g-unit .g-unit .g-unit .g-tpl-300-alt .g-unit{margin:0 300px 0 0;width:auto;float:none}.g-unit .g-unit .g-unit .g-tpl-300-alt .g-first,.g-unit .g-unit .g-tpl-300-alt .g-first,.g-unit .g-tpl-300-alt .g-first,.g-tpl-300-alt .g-first{margin:0;width:300px;float:right}.g-tpl-25-75 .g-unit,.g-unit .g-tpl-25-75 .g-unit,.g-unit .g-unit .g-tpl-25-75 .g-unit,.g-unit .g-unit .g-unit .g-tpl-25-75 .g-unit{width:74.999%;float:right;margin:0}.g-unit .g-unit .g-unit .g-tpl-25-75 .g-first,.g-unit .g-unit .g-tpl-25-75 .g-first,.g-unit .g-tpl-25-75 .g-first,.g-tpl-25-75 .g-first{width:24.999%;float:left;margin:0}.g-tpl-25-75-alt .g-unit,.g-unit .g-tpl-25-75-alt .g-unit,.g-unit .g-unit .g-tpl-25-75-alt .g-unit,.g-unit .g-unit .g-unit .g-tpl-25-75-alt .g-unit{width:24.999%;float:left;margin:0}.g-unit .g-unit .g-unit .g-tpl-25-75-alt .g-first,.g-unit .g-unit .g-tpl-25-75-alt .g-first,.g-unit .g-tpl-25-75-alt .g-first,.g-tpl-25-75-alt .g-first{width:74.999%;float:right;margin:0}.g-tpl-75-25 .g-unit,.g-unit .g-tpl-75-25 .g-unit,.g-unit .g-unit .g-tpl-75-25 .g-unit,.g-unit .g-unit .g-unit .g-tpl-75-25 .g-unit{width:24.999%;float:right;margin:0}.g-unit .g-unit .g-unit .g-tpl-75-25 .g-first,.g-unit .g-unit .g-tpl-75-25 .g-first,.g-unit .g-tpl-75-25 .g-first,.g-tpl-75-25 .g-first{width:74.999%;float:left;margin:0}.g-tpl-75-25-alt .g-unit,.g-unit .g-tpl-75-25-alt .g-unit,.g-unit .g-unit .g-tpl-75-25-alt .g-unit,.g-unit .g-unit .g-unit .g-tpl-75-25-alt .g-unit{width:74.999%;float:left;margin:0}.g-unit .g-unit .g-unit .g-tpl-75-25-alt .g-first,.g-unit .g-unit .g-tpl-75-25-alt .g-first,.g-unit .g-tpl-75-25-alt .g-first,.g-tpl-75-25-alt .g-first{width:24.999%;float:right;margin:0}.g-tpl-33-67 .g-unit,.g-unit .g-tpl-33-67 .g-unit,.g-unit .g-unit .g-tpl-33-67 .g-unit,.g-unit .g-unit .g-unit .g-tpl-33-67 .g-unit{width:66.999%;float:right;margin:0}.g-unit .g-unit .g-unit .g-tpl-33-67 .g-first,.g-unit .g-unit .g-tpl-33-67 .g-first,.g-unit .g-tpl-33-67 .g-first,.g-tpl-33-67 .g-first{width:32.999%;float:left;margin:0}.g-tpl-33-67-alt .g-unit,.g-unit .g-tpl-33-67-alt .g-unit,.g-unit .g-unit .g-tpl-33-67-alt .g-unit,.g-unit .g-unit .g-unit .g-tpl-33-67-alt .g-unit{width:32.999%;float:left;margin:0}.g-unit .g-unit .g-unit .g-tpl-33-67-alt .g-first,.g-unit .g-unit .g-tpl-33-67-alt .g-first,.g-unit .g-tpl-33-67-alt .g-first,.g-tpl-33-67-alt .g-first{width:66.999%;float:right;margin:0}.g-tpl-67-33 .g-unit,.g-unit .g-tpl-67-33 .g-unit,.g-unit .g-unit .g-tpl-67-33 .g-unit,.g-unit .g-unit .g-unit .g-tpl-67-33 .g-unit{width:32.999%;float:right;margin:0}.g-unit .g-unit .g-unit .g-tpl-67-33 .g-first,.g-unit .g-unit .g-tpl-67-33 .g-first,.g-unit .g-tpl-67-33 .g-first,.g-tpl-67-33 .g-first{width:66.999%;float:left;margin:0}.g-tpl-67-33-alt .g-unit,.g-unit .g-tpl-67-33-alt .g-unit,.g-unit .g-unit .g-tpl-67-33-alt .g-unit,.g-unit .g-unit .g-unit .g-tpl-67-33-alt .g-unit{width:66.999%;float:left;margin:0}.g-unit .g-unit .g-unit .g-tpl-67-33-alt .g-first,.g-unit .g-unit .g-tpl-67-33-alt .g-first,.g-unit .g-tpl-67-33-alt .g-first,.g-tpl-67-33-alt .g-first{width:32.999%;float:right;margin:0}.g-tpl-50-50 .g-unit,.g-unit .g-tpl-50-50 .g-unit,.g-unit .g-unit .g-tpl-50-50 .g-unit,.g-unit .g-unit .g-unit .g-tpl-50-50 .g-unit{width:49.999%;float:right;margin:0}.g-unit .g-unit .g-unit .g-tpl-50-50 .g-first,.g-unit .g-unit .g-tpl-50-50 .g-first,.g-unit .g-tpl-50-50 .g-first,.g-tpl-50-50 .g-first{width:49.999%;float:left;margin:0}.g-tpl-50-50-alt .g-unit,.g-unit .g-tpl-50-50-alt .g-unit,.g-unit .g-unit .g-tpl-50-50-alt .g-unit,.g-unit .g-unit .g-unit .g-tpl-50-50-alt .g-unit{width:49.999%;float:left;margin:0}.g-unit .g-unit .g-unit .g-tpl-50-50-alt .g-first,.g-unit .g-unit .g-tpl-50-50-alt .g-first,.g-unit .g-tpl-50-50-alt .g-first,.g-tpl-50-50-alt .g-first{width:49.999%;float:right;margin:0}.g-tpl-nest{width:auto}.g-tpl-nest .g-section{display:inline}.g-tpl-nest .g-unit,.g-unit .g-tpl-nest .g-unit,.g-unit .g-unit .g-tpl-nest .g-unit,.g-unit .g-unit .g-unit .g-tpl-nest .g-unit{float:left;width:auto;margin:0}.g-tpl-nest-alt .g-unit,.g-unit .g-tpl-nest-alt .g-unit,.g-unit .g-unit .g-tpl-nest-alt .g-unit,.g-unit .g-unit .g-unit .g-tpl-nest-alt .g-unit{float:right;width:auto;margin:0}.goog-button{border-width:1px;border-style:solid;border-color:#bbb #999 #999 #bbb;border-radius:2px;-webkit-border-radius:2px;-moz-border-radius:2px;font:normal normal normal 13px/13px Arial,sans-serif;color:#000;text-align:middle;text-decoration:none;text-shadow:0 1px 1px rgba(255,255,255,1);background:#eee;background-image:-webkit-gradient(linear,0 0,0 100%,from(#fff),to(#ddd));background:-moz-linear-gradient(top,#fff,#ddd);filter:progid:DXImageTransform.Microsoft.Gradient(EndColorstr='#dddddd',StartColorstr='#ffffff',GradientType=0);cursor:pointer;margin:0;display:inline;display:-moz-inline-box;display:inline-block;*overflow:visible;padding:4px 8px 5px}a.goog-button,span.goog-button,div.goog-button{padding:4px 8px 5px}.goog-button:visited{color:#000}.goog-button{*display:inline}.goog-button:focus,.goog-button:hover{border-color:#000}.goog-button:active,.goog-button-active{color:#000;background-color:#bbb;border-color:#999 #bbb #bbb #999;background-image:-webkit-gradient(linear,0 0,0 100%,from(#ddd),to(#fff));background-image:-moz-linear-gradient(top,#ddd,#fff);filter:progid:DXImageTransform.Microsoft.Gradient(EndColorstr='#ffffff',StartColorstr='#dddddd',GradientType=0)}.goog-button[disabled],.goog-button[disabled]:active,.goog-button[disabled]:hover{color:#666;border-color:#ddd;background-color:#f3f3f3;background-image:none;text-shadow:none;cursor:auto}.goog-button{padding:5px 8px 4px\9}.goog-button{*padding:4px 7px 2px}html>body input.goog-button,x:-moz-any-link,x:default,html>body button.goog-button,x:-moz-any-link,x:default{padding-top:3px;padding-bottom:2px}a.goog-button,x:-moz-any-link,x:default,span.goog-button,x:-moz-any-link,x:default,div.goog-button,x:-moz-any-link,x:default{padding:4px 8px 5px}.goog-button-fixed{padding-left:0!important;padding-right:0!important;width:100%}button.goog-button-icon-c{padding-top:1px;padding-bottom:1px}button.goog-button-icon-c{padding-top:3px\9;padding-bottom:2px\9}button.goog-button-icon-c{*padding-top:0;*padding-bottom:0}html>body button.goog-button-icon-c,x:-moz-any-link,x:default{padding-top:1px;padding-bottom:1px}.goog-button-icon{display:block;margin:0 auto;height:18px;width:18px}html>body .goog-inline-block{display:-moz-inline-box;display:inline-block;}.goog-inline-block{position:relative;display:inline-block}* html .goog-inline-block{display:inline}*:first-child+html .goog-inline-block{display:inline}.goog-custom-button{margin:0 2px 2px;border:0;padding:0;font:normal Tahoma,Arial,sans-serif;color:#000;text-decoration:none;list-style:none;vertical-align:middle;cursor:pointer;outline:none;background:#eee;background-image:-webkit-gradient(linear,0 0,0 100%,from(#fff),to(#ddd));background:-moz-linear-gradient(top,#fff,#ddd);filter:progid:DXImageTransform.Microsoft.Gradient(EndColorstr='#dddddd',StartColorstr='#ffffff',GradientType=0)}.goog-custom-button-outer-box,.goog-custom-button-inner-box{border-style:solid;border-color:#bbb #999 #999 #bbb;vertical-align:top}.goog-custom-button-outer-box{margin:0;border-width:1px 0;padding:0}.goog-custom-button-inner-box{margin:0 -1px;border-width:0 1px;padding:3px 4px}* html .goog-custom-button-inner-box{left:-1px}* html .goog-custom-button-rtl .goog-custom-button-outer-box{left:-1px}* html .goog-custom-button-rtl .goog-custom-button-inner-box{left:0}*:first-child+html .goog-custom-button-inner-box{left:-1px}*:first-child+html .goog-custom-button-collapse-right .goog-custom-button-inner-box{border-left-width:2px}*:first-child+html .goog-custom-button-collapse-left .goog-custom-button-inner-box{border-right-width:2px}*:first-child+html .goog-custom-button-collapse-right.goog-custom-button-collapse-left .goog-custom-button-inner-box{border-width:0 1px}*:first-child+html .goog-custom-button-rtl .goog-custom-button-inner-box{left:1px}::root .goog-custom-button,::root .goog-custom-button-outer-box{line-height:0}::root .goog-custom-button-inner-box{line-height:normal}.goog-custom-button-disabled{background-image:none!important;opacity:0.4;-moz-opacity:0.4;filter:alpha(opacity=40)}.goog-custom-button-disabled .goog-custom-button-outer-box,.goog-custom-button-disabled .goog-custom-button-inner-box{color:#333!important;border-color:#999!important}* html .goog-custom-button-disabled{margin:2px 1px!important;padding:0 1px!important}*:first-child+html .goog-custom-button-disabled{margin:2px 1px!important;padding:0 1px!important}.goog-custom-button-hover .goog-custom-button-outer-box,.goog-custom-button-hover .goog-custom-button-inner-box{border-color:#000!important;}.goog-custom-button-active,.goog-custom-button-checked{background-color:#bbb;background-position:bottom left;background-image:-webkit-gradient(linear,0 0,0 100%,from(#ddd),to(#fff));background:-moz-linear-gradient(top,#ddd,#fff);filter:progid:DXImageTransform.Microsoft.Gradient(EndColorstr='#ffffff',StartColorstr='#dddddd',GradientType=0)}.goog-custom-button-focused .goog-custom-button-outer-box,.goog-custom-button-focused .goog-custom-button-inner-box,.goog-custom-button-focused.goog-custom-button-collapse-left .goog-custom-button-inner-box,.goog-custom-button-focused.goog-custom-button-collapse-left.goog-custom-button-checked .goog-custom-button-inner-box{border-color:#000}.goog-custom-button-collapse-right,.goog-custom-button-collapse-right .goog-custom-button-outer-box,.goog-custom-button-collapse-right .goog-custom-button-inner-box{margin-right:0}.goog-custom-button-collapse-left,.goog-custom-button-collapse-left .goog-custom-button-outer-box,.goog-custom-button-collapse-left .goog-custom-button-inner-box{margin-left:0}.goog-custom-button-collapse-left .goog-custom-button-inner-box{border-left:1px solid #fff}.goog-custom-button-collapse-left.goog-custom-button-checked .goog-custom-button-inner-box{border-left:1px solid #ddd}* html .goog-custom-button-collapse-left .goog-custom-button-inner-box{left:0}*:first-child+html .goog-custom-button-collapse-left .goog-custom-button-inner-box{left:0}.goog-date-picker th,.goog-date-picker td{font-family:arial,sans-serif;text-align:center}.goog-date-picker th{font-size:.9em;font-weight:bold;color:#666667;background-color:#c3d9ff}.goog-date-picker td{vertical-align:middle;padding:2px 3px}.goog-date-picker{-moz-user-focus:normal;-moz-user-select:none;position:absolute;border:1px solid gray;float:left;font-family:arial,sans-serif;padding-left:1px;background:white}.goog-date-picker-menu{position:absolute;background:threedface;border:1px solid gray;-moz-user-focus:normal}.goog-date-picker-menu ul{list-style:none;margin:0;padding:0}.goog-date-picker-menu ul li{cursor:default}.goog-date-picker-menu-selected{background-color:#aaccee}.goog-date-picker td div{float:left}.goog-date-picker button{padding:0;margin:1px;border:1px outset gray}.goog-date-picker-week{padding:1px 3px}.goog-date-picker-wday{padding:1px 3px}.goog-date-picker-today-cont{text-align:left!important}.goog-date-picker-none-cont{text-align:right!important}.goog-date-picker-head td{text-align:center}.goog-date-picker-month{width:12ex}.goog-date-picker-year{width:6ex}.goog-date-picker table{border-collapse:collapse}.goog-date-picker-selected{background-color:#aaccee!important;color:blue!important}.goog-date-picker-today{font-weight:bold!important}.goog-date-picker-other-month{-moz-opacity:0.3;filter:Alpha(Opacity=30)}.sat,.sun{background:#eee}#button1,#button2{display:block;width:60px;text-align:center;margin:10px;padding:10px;font:normal .8em arial,sans-serif;border:1px solid #000}.goog-menu{position:absolute;color:#000;border:1px solid #b5b6b5;background-color:#f3f3f7;cursor:default;font:normal small arial,helvetica,sans-serif;margin:0;padding:0;outline:none}.goog-menuitem{padding:2px 5px;margin:0;list-style:none}.goog-menuitem-highlight{background-color:#4279a5;color:#fff}.goog-menuitem-disabled{color:#999}.goog-option{padding-left:15px!important}.goog-option-selected{background-image:url(/img/check.gif);background-position:4px 50%;background-repeat:no-repeat}.goog-menuseparator{position:relative;margin:2px 0;border-top:1px solid #999;padding:0;outline:none}.goog-submenu{position:relative}.goog-submenu-arrow{position:absolute;display:block;width:11px;height:11px;right:3px;top:4px;background-image:url(/img/menu-arrows.gif);background-repeat:no-repeat;background-position:0 0;font-size:1px}.goog-menuitem-highlight .goog-submenu-arrow{background-position:0 -11px}.goog-menuitem-disabled .goog-submenu-arrow{display:none}.goog-menu-filter{margin:2px;border:1px solid silver;background:white;overflow:hidden}.goog-menu-filter div{color:gray;position:absolute;padding:1px}.goog-menu-filter input{margin:0;border:0;background:transparent;width:100%}.goog-menuitem-partially-checked{background-image:url(/img/check-outline.gif);background-position:4px 50%;background-repeat:no-repeat}.goog-menuitem-fully-checked{background-image:url(/img/check.gif);background-position:4px 50%;background-repeat:no-repeat}.goog-menu-button{margin:0 2px 2px 2px;border:0;padding:0;font:normal Tahoma,Arial,sans-serif;color:#000;background:#ddd url("/img/button-bg.gif") repeat-x top left;text-decoration:none;list-style:none;vertical-align:middle;cursor:pointer;outline:none}.goog-menu-button-outer-box,.goog-menu-button-inner-box{border-style:solid;border-color:#aaa;vertical-align:middle}.goog-menu-button-outer-box{margin:0;border-width:1px 0;padding:0}.goog-menu-button-inner-box{margin:0 -1px;border-width:0 1px;padding:0 4px 2px 4px}* html .goog-menu-button-inner-box{left:-1px}* html .goog-menu-button-rtl .goog-menu-button-outer-box{left:-1px}* html .goog-menu-button-rtl .goog-menu-button-inner-box{left:0}*:first-child+html .goog-menu-button-inner-box{left:-1px}*:first-child+html .goog-menu-button-rtl .goog-menu-button-inner-box{left:1px}::root .goog-menu-button,::root .goog-menu-button-outer-box,::root .goog-menu-button-inner-box{line-height:0}::root .goog-menu-button-caption,::root .goog-menu-button-dropdown{line-height:normal}.goog-menu-button-disabled{background-image:none!important;opacity:0.4;-moz-opacity:0.4;filter:alpha(opacity=40)}.goog-menu-button-disabled .goog-menu-button-outer-box,.goog-menu-button-disabled .goog-menu-button-inner-box,.goog-menu-button-disabled .goog-menu-button-caption,.goog-menu-button-disabled .goog-menu-button-dropdown{color:#333!important;border-color:#999!important}* html .goog-menu-button-disabled{margin:2px 1px!important;padding:0 1px!important}*:first-child+html .goog-menu-button-disabled{margin:2px 1px!important;padding:0 1px!important}.goog-menu-button-hover .goog-menu-button-outer-box,.goog-menu-button-hover .goog-menu-button-inner-box{border-color:#9cf #69e #69e #7af!important;}.goog-menu-button-active,.goog-menu-button-open{background-color:#bbb;background-position:bottom left}.goog-menu-button-focused .goog-menu-button-outer-box,.goog-menu-button-focused .goog-menu-button-inner-box{border-color:#3366cc}.goog-menu-button-caption{padding:0 4px 0 0;vertical-align:middle}.goog-menu-button-rtl .goog-menu-button-caption{padding:0 0 0 4px}.goog-menu-button-dropdown{width:7px;background:url(/img/toolbar_icons.gif) no-repeat -176px;vertical-align:middle}.goog-flat-menu-button{margin:0 2px;padding:1px 4px;font:normal 95% Tahoma,Arial,sans-serif;color:#333;text-decoration:none;list-style:none;vertical-align:middle;cursor:pointer;outline:none;-moz-outline:none;border-width:1px;border-style:solid;border-color:#c9c9c9;background-color:#fff}.goog-flat-menu-button-disabled *{color:#999;border-color:#ccc;cursor:default}.goog-flat-menu-button-hover,.goog-flat-menu-button-hover{border-color:#9cf #69e #69e #7af!important;}.goog-flat-menu-button-active{background-color:#bbb;background-position:bottom left}.goog-flat-menu-button-focused{border-color:#3366cc}.goog-flat-menu-button-caption{padding-right:10px;vertical-align:middle}.goog-flat-menu-button-dropdown{width:7px;background:url(/img/toolbar_icons.gif) no-repeat -176px;vertical-align:middle}h1{font-size:1.8em}.g-doc{width:auto;margin:0 10px}.g-doc-1024{margin-left:10px}#ae-logo{background:url(//www.google.com/images/logos/app_engine_logo_sm.gif) 0 0 no-repeat;display:block;width:178px;height:30px;margin:4px 0 0 0}.ae-ir span{position:absolute;display:block;width:0;height:0;overflow:hidden}.ae-noscript{position:absolute;left:-5000px}#ae-lhs-nav{border-right:3px solid #e5ecf9}.ae-notification{margin-bottom:.6em;text-align:center}.ae-notification strong{display:block;width:55%;margin:0 auto;text-align:center;padding:.6em;background-color:#fff1a8;font-weight:bold}.ae-alert{font-weight:bold;background:url(/img/icn/warning.png) no-repeat;margin-bottom:.5em;padding-left:1.8em}.ae-info{background:url(/img/icn/icn-info.gif) no-repeat;margin-bottom:.5em;padding-left:1.8em}.ae-promo{padding:.5em .8em;margin:.6em 0;background-color:#fffbe8;border:1px solid #fff1a9;text-align:left}.ae-promo strong{position:relative;top:.3em}.ae-alert-text,.ae-warning-text{background-color:transparent;background-position:right 1px;padding:0 18px 0 0}.ae-alert-text{color:#c00}.ae-warning-text{color:#f90}.ae-alert-c span{display:inline-block}.ae-message{border:1px solid #e5ecf9;background-color:#f6f9ff;margin-bottom:1em;padding:.5em}.ae-errorbox{border:1px solid #f00;background-color:#fee;margin-bottom:1em;padding:1em}#bd .ae-errorbox ul{padding-bottom:0}.ae-form dt{font-weight:bold}.ae-form dt em,.ae-field-hint{margin-top:.2em;color:#666667;font-size:.85em}.ae-field-yyyymmdd,.ae-field-hhmmss{width:6em}.ae-field-hint-hhmmss{margin-left:2.3em}.ae-form label{display:block;margin:0 0 .2em 0;font-weight:bold}.ae-radio{margin-bottom:.3em}.ae-radio label{display:inline}.ae-form dd,.ae-input-row{margin-bottom:.6em}.ae-input-row-group{border:1px solid #fff1a9;background:#fffbe8;padding:8px}.ae-btn-row{margin-top:1.4em;margin-bottom:1em}.ae-btn-row-note{padding:5px 0 6px 0}.ae-btn-row-note span{padding-left:18px;padding-right:.5em;background:transparent url(/img/icn/icn-info.gif) 0 0 no-repeat}.ae-btn-primary{font-weight:bold}form .ae-cancel{margin-left:.5em}.ae-submit-inline{margin-left:.8em}.ae-radio-bullet{width:20px;float:left}.ae-label-hanging-indent{margin-left:5px}.ae-divider{margin:0 .6em 0 .5em}.ae-nowrap{white-space:nowrap}.ae-pre-wrap{white-space:pre-wrap;white-space:-moz-pre-wrap;white-space:-pre-wrap;white-space:-o-pre-wrap;word-wrap:break-word;_white-space:pre;}wbr:after{content:"\00200B"}a button{text-decoration:none}.ae-alert ul{margin-bottom:.75em;margin-top:.25em;line-height:1.5em}.ae-alert h4{color:#000;font-weight:bold;padding:0 0 .5em}.ae-form-simple-list{list-style-type:none;padding:0;margin-bottom:1em}.ae-form-simple-list li{padding:.3em 0 .5em .5em;border-bottom:1px solid #c3d9ff}div.ae-datastore-index-to-delete,div.ae-datastore-index-to-build{color:#aaa}#hd p{padding:0}#hd li{display:inline}ul{padding:0 0 1em 1.2em}#ae-userinfo{text-align:right;white-space:nowrap;}#ae-userinfo ul{padding-bottom:0;padding-top:5px}#ae-appbar-lrg{margin:0 0 1.25em 0;padding:.25em .5em;background-color:#e5ecf9;border-top:1px solid #36c}#ae-appbar-lrg h1{font-size:1.2em;padding:0}#ae-appbar-lrg h1 span{font-size:80%;font-weight:normal}#ae-appbar-lrg form{display:inline;padding-right:.1em;margin-right:.5em}#ae-appbar-lrg strong{white-space:nowrap}#ae-appbar-sml{margin:0 0 1.25em 0;height:8px;padding:0 .5em;background:#e5ecf9}.ae-rounded-sml{border-radius:3px;-moz-border-radius:3px;-webkit-border-radius:3px}#ae-appbar-lrg a{margin-top:.3em}a.ae-ext-link,a span.ae-ext-link{background:url(/img/icn/icn-open-in-new-window.png) no-repeat right;padding-right:18px;margin-right:8px}.ae-no-pad{padding-left:1em}.ae-message h4{margin-bottom:.3em;padding-bottom:0}#ft{text-align:center;margin:2.5em 0 1em;padding-top:.5em;border-top:2px solid #c3d9ff}#bd h3{font-weight:bold;font-size:1.4em}#bd h3 .ae-apps-switch{font-weight:normal;font-size:.7em;margin-left:2em}#bd p{padding:0 0 1em 0}#ae-content{padding-left:1em}.ae-unimportant{color:#666}.ae-new-usr td{border-top:1px solid #ccccce;background-color:#ffe}.ae-error-td td{border:2px solid #f00;background-color:#fee}.ae-delete{cursor:pointer;border:none;background:transparent;}.ae-btn-large{background:#039 url(/img/icn/button_back.png) repeat-x;color:#fff;font-weight:bold;font-size:1.2em;padding:.5em;border:2px outset #000;cursor:pointer}.ae-breadcrumb{margin:0 0 1em}.ae-disabled,a.ae-disabled,a.ae-disabled:hover,a.ae-disabled:active{color:#666!important;text-decoration:none!important;cursor:default!important;opacity:.4!important;-moz-opacity:.4!important;filter:alpha(opacity=40)!important}input.ae-readonly{border:2px solid transparent;border-left:0;background-color:transparent}span.ae-text-input-clone{padding:5px 5px 5px 0}.ae-loading{opacity:.4;-moz-opacity:.4;filter:alpha(opacity=40)}.ae-tip{margin:1em 0;background:url(/img/tip.png) top left no-repeat;padding:2px 0 0 25px}sup.ae-new-sup{color:red}.ae-action{color:#00c;cursor:pointer;text-decoration:underline}.ae-toggle{padding-left:16px;background-position:left center;background-repeat:no-repeat;cursor:pointer}.ae-minus{background-image:url(/img/wgt/minus.gif)}.ae-plus{background-image:url(/img/wgt/plus.gif)}.ae-print{background-image:url(/img/print.gif);padding-left:19px}.ae-currency,.ae-table thead th.ae-currency{text-align:right;white-space:nowrap}#ae-loading{font-size:1.2em;position:absolute;text-align:center;top:0;width:100%}#ae-loading div{margin:0 auto;background:#fff1a9;width:5em;font-weight:bold;padding:4px 10px;-moz-border-radius-bottomleft:3px;-moz-border-radius-bottomright:3px;-webkit-border-radius-bottomleft:3px;-webkit-border-radius-bottomright:3px}.ae-occlude{filter:alpha(opacity=0);position:absolute}.g-tpl-66-34 .g-unit,.g-unit .g-tpl-66-34 .g-unit,.g-unit .g-unit .g-tpl-66-34 .g-unit,.g-unit .g-unit .g-unit .g-tpl-66-34 .g-unit{display:inline;margin:0;width:33.999%;float:right}.g-unit .g-unit .g-unit .g-tpl-66-34 .g-first,.g-unit .g-unit .g-tpl-66-34 .g-first,.g-unit .g-tpl-66-34 .g-first,.g-tpl-66-34 .g-first{display:inline;margin:0;width:65.999%;float:left}.ae-ie6-c{_margin-right:-2000px;_position:relative;_width:100%;background:#fff}h2.ae-section-header{background:#e5ecf9;padding:.2em .4em;margin-bottom:.5em}.ae-field-span{padding:3px 0}select{font:13px/13px Arial,sans-serif;color:#000;border-width:1px;border-style:solid;border-color:#bbb #999 #999 #bbb;-webkit-border-radius:2px;-moz-border-radius:2px;background:#eee;background:-moz-linear-gradient(top,#fff,#ddd);filter:progid:DXImageTransform.Microsoft.Gradient(EndColorstr='#dddddd',StartColorstr='#ffffff',GradientType=0);cursor:pointer;padding:2px 1px;margin:0}select:hover{border-color:#000}select[disabled],select[disabled]:active{color:#666;border-color:#ddd;background-color:#f3f3f3;background-image:none;text-shadow:none;cursor:auto}.ae-table-plain{border-collapse:collapse;width:100%}.ae-table{border:1px solid #c5d7ef;border-collapse:collapse;width:100%}#bd h2.ae-table-title{background:#e5ecf9;margin:0;color:#000;font-size:1em;padding:3px 0 3px 5px;border-left:1px solid #c5d7ef;border-right:1px solid #c5d7ef;border-top:1px solid #c5d7ef}.ae-table-caption,.ae-table caption{border:1px solid #c5d7ef;background:#e5ecf9;-moz-margin-start:-1px}.ae-table caption{padding:3px 5px;text-align:left}.ae-table th,.ae-table td{background-color:#fff;padding:.35em 1em .25em .35em;margin:0}.ae-table thead th{font-weight:bold;text-align:left;background:#c5d7ef;vertical-align:bottom}.ae-table thead th .ae-no-bold{font-weight:normal}.ae-table tfoot tr td{border-top:1px solid #c5d7ef;background-color:#e5ecf9}.ae-table td{border-top:1px solid #c5d7ef;border-bottom:1px solid #c5d7ef}.ae-even>td,.ae-even th,.ae-even-top td,.ae-even-tween td,.ae-even-bottom td,ol.ae-even{background-color:#e9e9e9;border-top:1px solid #c5d7ef;border-bottom:1px solid #c5d7ef}.ae-even-top td{border-bottom:0}.ae-even-bottom td{border-top:0}.ae-even-tween td{border:0}.ae-table .ae-tween td{border:0}.ae-table .ae-tween-top td{border-bottom:0}.ae-table .ae-tween-bottom td{border-top:0}#bd .ae-table .cbc{width:1.5em;padding-right:0}.ae-table #ae-live td{background-color:#ffeac0}.ae-table-fixed{table-layout:fixed}.ae-table-fixed td,.ae-table-nowrap{overflow:hidden;white-space:nowrap}.ae-paginate strong{margin:0 .5em}tfoot .ae-paginate{text-align:right}.ae-table-caption .ae-paginate,.ae-table-caption .ae-orderby{padding:2px 5px}.modal-dialog{background:#c1d9ff;border:1px solid #3a5774;color:#000;padding:4px;position:absolute;font-size:1.3em;-moz-box-shadow:0 1px 4px #333;-webkit-box-shadow:0 1px 4px #333;box-shadow:0 1px 4px #333}.modal-dialog a,.modal-dialog a:link,.modal-dialog a:visited{color:#06c;cursor:pointer}.modal-dialog-bg{background:#666;left:0;position:absolute;top:0}.modal-dialog-title{background:#e0edfe;color:#000;cursor:pointer;font-size:120%;font-weight:bold;padding:8px 15px 8px 8px;position:relative;_zoom:1;}.modal-dialog-title-close{background:#e0edfe url(https://ssl.gstatic.com/editor/editortoolbar.png) no-repeat -528px 0;cursor:default;height:15px;position:absolute;right:10px;top:8px;width:15px;vertical-align:middle}.modal-dialog-buttons,.modal-dialog-content{background-color:#fff;padding:8px}.modal-dialog-buttons button{margin-right:.75em}.goog-buttonset-default{font-weight:bold}.goog-tab{position:relative;border:1px solid #8ac;padding:4px 9px;color:#000;background:#e5ecf9;border-top-left-radius:2px;border-top-right-radius:2px;-moz-border-radius-topleft:2px;-webkit-border-top-left-radius:2px;-moz-border-radius-topright:2px;-webkit-border-top-right-radius:2px}.goog-tab-bar-top .goog-tab{margin:1px 4px 0 0;border-bottom:0;float:left}.goog-tab-bar-bottom .goog-tab{margin:0 4px 1px 0;border-top:0;float:left}.goog-tab-bar-start .goog-tab{margin:0 0 4px 1px;border-right:0}.goog-tab-bar-end .goog-tab{margin:0 1px 4px 0;border-left:0}.goog-tab-hover{text-decoration:underline;cursor:pointer}.goog-tab-disabled{color:#fff;background:#ccc;border-color:#ccc}.goog-tab-selected{background:#fff!important;color:black;font-weight:bold}.goog-tab-bar-top .goog-tab-selected{top:1px;margin-top:0;padding-bottom:5px}.goog-tab-bar-bottom .goog-tab-selected{top:-1px;margin-bottom:0;padding-top:5px}.goog-tab-bar-start .goog-tab-selected{left:1px;margin-left:0;padding-right:9px}.goog-tab-bar-end .goog-tab-selected{left:-1px;margin-right:0;padding-left:9px}.goog-tab-content{padding:.1em .8em .8em .8em;border:1px solid #8ac;border-top:none}.goog-tab-bar{position:relative;margin:0 0 0 5px;border:0;padding:0;list-style:none;cursor:default;outline:none}.goog-tab-bar-clear{border-top:1px solid #8ac;clear:both;height:0;overflow:hidden}.goog-tab-bar-start{float:left}.goog-tab-bar-end{float:right}* html .goog-tab-bar-start{margin-right:-3px}* html .goog-tab-bar-end{margin-left:-3px}#ae-nav ul{list-style-type:none;margin:0;padding:1em 0}#ae-nav ul li{padding-left:.5em}#ae-nav .ae-nav-selected{color:#000;display:block;font-weight:bold;background-color:#e5ecf9;margin-right:-1px;border-top-left-radius:4px;-moz-border-radius-topleft:4px;-webkit-border-top-left-radius:4px;border-bottom-left-radius:4px;-moz-border-radius-bottomleft:4px;-webkit-border-bottom-left-radius:4px}#ae-nav .ae-nav-bold{font-weight:bold}#ae-nav ul li span.ae-nav-disabled{color:#666}#ae-nav ul ul{margin:0;padding:0 0 0 .5em}#ae-nav ul ul li{padding-left:.5em}#ae-nav ul li a,#ae-nav ul li span,#ae-nav ul ul li a{padding-left:.5em}#ae-nav li a:link,#ae-nav li a:visited{color:#00c}.ae-nav-group{padding:.5em;margin:0 .75em 0 0;background-color:#fffbe8;border:1px solid #fff1a9}.ae-nav-group h4{font-weight:bold;padding:auto auto .5em .5em;padding-left:.4em;margin-bottom:.5em;padding-bottom:0}.ae-nav-group ul{margin:0 0 .5em 0;padding:0 0 0 1.3em;list-style-type:none}.ae-nav-group ul li{padding-bottom:.5em}.ae-nav-group li a:link,.ae-nav-group li a:visited{color:#00c}.ae-nav-group li a:hover{color:#00c}@media print{body{font-size:13px;width:8.5in;background:#fff}table,.ae-table-fixed{table-layout:automatic}tr{display:table-row!important}.g-doc-1024{width:8.5in}#ae-appbar-lrg,.ae-table-caption,.ae-table-nowrap,.ae-nowrap,th,td{overflow:visible!important;white-space:normal!important;background:#fff!important}.ae-print,.ae-toggle{display:none}#ae-lhs-nav-c{display:none}#ae-content{margin:0;padding:0}.goog-zippy-collapsed,.goog-zippy-expanded{background:none!important;padding:0!important}}#ae-admin-dev-table{margin:0 0 1em 0}.ae-admin-dev-tip,.ae-admin-dev-tip.ae-tip{margin:-0.31em 0 2.77em}#ae-sms-countryselect{margin-right:.5em}#ae-admin-enable-form{margin-bottom:1em}#ae-admin-services-c{margin-top:2em}#ae-admin-services{padding:0 0 0 3em;margin-bottom:1em;font-weight:bold}#ae-admin-logs-table-c{_margin-right:-2000px;_position:relative;_width:100%;background:#fff}#ae-admin-logs-table{margin:0;padding:0}#ae-admin-logs-filters{padding:3px 0 3px 5px}#ae-admin-logs-pagination{padding:6px 5px 0 0;text-align:right;width:45%}#ae-admin-logs-pagination span.ae-disabled{color:#666;background-color:transparent}#ae-admin-logs-table td{white-space:nowrap}#ae-storage-content div.ae-alert{padding-bottom:5px}#ae-billing-form-c{_margin-right:-3000px;_position:relative;_width:100%}.ae-rounded-top-small{-moz-border-radius-topleft:3px;-webkit-border-top-left-radius:3px;-moz-border-radius-topright:3px;-webkit-border-top-right-radius:3px}.ae-progress-content{height:400px}#ae-billing-tos{text-align:left;width:100%;margin-bottom:.5em}.ae-billing-budget-section{margin-bottom:1.5em}.ae-billing-budget-section .g-unit,.g-unit .ae-billing-budget-section .g-unit,.g-unit .g-unit .ae-billing-budget-section .g-unit{margin:0 0 0 11em;width:auto;float:none}.g-unit .g-unit .ae-billing-budget-section .g-first,.g-unit .ae-billing-budget-section .g-first,.ae-billing-budget-section .g-first{margin:0;width:11em;float:left}#ae-billing-form .ae-btn-row{margin-left:11em}#ae-billing-form .ae-btn-row .ae-info{margin-top:10px}#ae-billing-checkout{width:150px;float:left}#ae-billing-alloc-table{border:1px solid #c5d7ef;border-bottom:none;width:100%;margin-top:.5em}#ae-billing-alloc-table th,#ae-billing-alloc-table td{padding:.35em 1em .25em .35em;border-bottom:1px solid #c5d7ef;color:#000;white-space:nowrap}.ae-billing-resource{background-color:transparent;font-weight:normal}#ae-billing-alloc-table tr th span{font-weight:normal}#ae-billing-alloc-table tr{vertical-align:baseline}#ae-billing-alloc-table th{white-space:nowrap}#ae-billing-alloc-table .ae-editable span.ae-text-input-clone,#ae-billing-alloc-table .ae-readonly input{display:none}#ae-billing-alloc-table .ae-readonly span.ae-text-input-clone,#ae-billing-alloc-table .ae-editable input{display:inline}#ae-billing-alloc-table td span.ae-billing-warn-note,#ae-billing-table-errors .ae-billing-warn-note{margin:0;background-repeat:no-repeat;display:inline-block;background-image:url(/img/icn/warning.png);text-align:right;padding-left:16px;padding-right:.1em;height:16px;font-weight:bold}#ae-billing-alloc-table td span.ae-billing-warn-note span,#ae-billing-table-errors .ae-billing-warn-note span{vertical-align:super;font-size:80%}#ae-billing-alloc-table td span.ae-billing-error-hidden,#ae-billing-table-errors .ae-billing-error-hidden{display:none}.ae-billing-percent{font-size:80%;color:#666;margin-left:3px}#ae-billing-week-info{margin-top:5px;line-height:1.4}#ae-billing-table-errors{margin-top:.3em}#ae-billing-allocation-noscript{margin-top:1.5em}#ae-billing-allocation-custom-opts{margin-left:2.2em}#ae-billing-settings h2{font-size:1em;display:inline}#ae-billing-settings p{padding:.3em 0 .5em}#ae-billing-settings-table{margin:.4em 0 .5em}#ae-settings-resource-col{width:19%}#ae-settings-budget-col{width:11%}#ae-billing-settings-table .ae-settings-budget-col{padding-right:2em}.ae-table th.ae-settings-unit-cell,.ae-table td.ae-settings-unit-cell,.ae-table th.ae-total-unit-cell,.ae-table td.ae-total-unit-cell{padding-left:1.2em}#ae-settings-unit-col{width:18%}#ae-settings-paid-col{width:15%}#ae-settings-free-col{width:15%}#ae-settings-total-col{width:22%}.ae-billing-inline-link{margin-left:.5em}.ae-billing-settings-section{margin-bottom:2em}.ae-billing-settings-formbutton{margin-top:.5em}#ae-billing-budget-setup-checkout{margin-bottom:0}#ae-billing-vat-c .ae-field-hint{width:85%}#ae-billing-checkout-note{margin-top:.8em}.ae-table thead th.ae-currency-th{text-align:right}#ae-billing-logs-date{width:15%}#ae-billing-logs-admin{width:15%}#ae-billing-logs-event{width:54%}#ae-billing-logs-amount{text-align:right;width:8%}#ae-billing-logs-balance{text-align:right;width:8%}#ae-billing-history-expand .ae-action{margin-left:1em}.ae-table .ae-billing-usage-report{width:100%;*width:auto;margin:0 0 1em 0}.ae-table .ae-billing-usage-report th,.ae-billing-charges th{color:#666;border-top:0}.ae-table .ae-billing-usage-report th,.ae-table .ae-billing-usage-report td,.ae-billing-charges th,.ae-billing-charges td{background-color:transparent;padding:.4em 0;border-bottom:1px solid #ddd}.ae-table .ae-billing-usage-report tfoot td,.ae-billing-charges tfoot td{border-bottom:none}.ae-billing-report-resource{width:30%}.ae-billing-report-used{width:20%}.ae-billing-report-free{width:20%}.ae-billing-report-paid{width:15%}.ae-billing-report-charge{width:15%}.ae-billing-change-resource{width:85%}.ae-billing-change-budget{width:15%}#ae-billing-always-on-label{display:inline}#ae-billing-budget-buffer-label{display:inline}.ae-billing-charges{width:50%}.ae-billing-charges-charge{text-align:right}.goog-zippy-expanded{background-image:url(/img/wgt/minus.gif);cursor:pointer;background-repeat:no-repeat;padding-left:17px}.goog-zippy-collapsed{background-image:url(/img/wgt/plus.gif);cursor:pointer;background-repeat:no-repeat;padding-left:17px}#ae-admin-logs-pagination{width:auto}.ae-usage-cycle-note{color:#555}#ae-createapp-start{background-color:#c6d5f1;padding:1em;padding-bottom:2em;text-align:center}#ae-admin-app_id_alias-check,#ae-createapp-id-check{margin:0 0 0 1em}#ae-admin-app_id_alias-message{display:block;margin:.4em 0}#ae-createapp-id-content{width:100%}#ae-createapp-id-content td{vertical-align:top}#ae-createapp-id-td{white-space:nowrap;width:1%}#ae-createapp-id-td #ae-createapp-id-error{position:absolute;width:24em;padding-left:1em;white-space:normal}#ae-createapp-id-error-td{padding-left:1em}#ae-admin-dev-invite label{float:left;width:3.6em;position:relative;top:.3em}#ae-admin-dev-invite .ae-radio{margin-left:3.6em}#ae-admin-dev-invite .ae-radio label{float:none;width:auto;font-weight:normal;position:static}#ae-admin-dev-invite .goog-button{margin-left:3.6em}#ae-admin-dev-invite .ae-field-hint{margin-left:4.2em}#ae-admin-dev-invite .ae-radio .ae-field-hint{margin-left:0}.ae-you{color:#008000}#ae-authdomain-opts{margin-bottom:1em}#ae-authdomain-content .ae-input-text,#ae-authdomain-content .ae-field-hint{margin:.3em 0 .4em 2.5em}#ae-authdomain-opts a{margin-left:1em}#ae-authdomain-opts-hint{margin-top:.2em;color:#666667;font-size:.85em}#ae-authdomain-content #ae-authdomain-desc .ae-field-hint{margin-left:0}#ae-storage-opts{margin-bottom:1em}#ae-storage-content .ae-input-text,#ae-storage-content .ae-field-hint{margin:.3em 0 .4em 2.5em}#ae-storage-opts a{margin-left:1em}#ae-storage-opts-hint{margin-top:.2em;color:#666667;font-size:.85em}#ae-storage-content #ae-storage-desc .ae-field-hint{margin-left:0}#ae-dash .g-section{margin:0 0 1em}#ae-dash * .g-section{margin:0}#ae-dash-quota .ae-alert{padding-left:1.5em}.ae-dash-email-disabled{background:url(/img/icn/exclamation_circle.png) no-repeat;margin-top:.5em;margin-bottom:.5em;min-height:16px;padding-left:1.5em}#ae-dash-email-disabled-footnote{padding-left:1.5em;margin:5px 0 0;font-weight:normal}#ae-dash-graph-c{border:1px solid #c5d7ef;padding:5px 0}#ae-dash-graph-change{margin:0 0 0 5px}#ae-dash-graph-img{padding:5px;margin-top:.5em;background-color:#fff;display:block}#ae-dash-graph-nodata{text-align:center}#ae-dash .ae-logs-severity{margin-right:.5em}#ae-dash .g-c{padding:0 0 0 .1em}#ae-dash .g-tpl-50-50 .g-unit .g-c{padding:0 0 0 1em}#ae-dash .g-tpl-50-50 .g-first .g-c{padding:0 1em 0 .1em}.ae-quota-warnings{background-color:#fffbe8;margin:0;padding:.5em .5em 0;text-align:left}.ae-quota-warnings div{padding:0 0 .5em}#ae-dash-quota-refresh-info{font-size:85%}#ae-dash #ae-dash-quota-bar-col,#ae-dash .ae-dash-quota-bar{width:100px}#ae-dash-quotadetails #ae-dash-quota-bar-col,#ae-dash-quotadetails .ae-dash-quota-bar{width:200px}#ae-dash-quota-percent-col{width:3.5em}#ae-dash-quota-cost-col{width:15%}#ae-dash-quota-alert-col{width:1%}#ae-dash .ae-dash-quota-alert-td{padding:0}.ae-dash-quota-alert-td a{display:block;width:16px;height:16px}#ae-dash .ae-dash-quota-alert-td .ae-alert{display:block;width:16px;height:16px;margin:0;padding:0}#ae-dash .ae-dash-quota-alert-td .ae-dash-email-disabled{display:block;width:16px;height:16px;margin:0;padding:0}#ae-dash-quota tbody th{font-weight:normal}#ae-dash-quota caption{padding:0}#ae-dash-quota caption .g-c{padding:3px}.ae-dash-quota-bar{float:left;background-color:#c0c0c0;height:13px;margin:.1em 0 0 0;position:relative}.ae-dash-quota-bar-free{background:url(/img/free_marker.png) top left no-repeat;width:7px;height:13px;position:absolute;top:0;left:0}#ae-dash-quota-footnote{margin:5px 0 0;font-weight:normal}.ae-quota-warning{background-color:#f90}.ae-quota-alert{background-color:#c00}.ae-quota-normal{background-color:#0b0}.ae-quota-alert-text{color:#c00}.ae-favicon-text{font-size:.85em}#ae-dash-popular{width:97%}#ae-dash-popular-reqsec-col{width:6.5em}#ae-dash-popular-req-col{width:7em}#ae-dash-popular-cpu-avg-col{width:9.5em}#ae-dash-popular-cpu-percent-col{width:7em}#ae-dash-popular .ae-unimportant{font-size:80%}#ae-dash-popular .ae-nowrap,#ae-dash-errors .ae-nowrap{margin-right:5px;overflow:hidden}#ae-dash-popular th span,#ae-dash-errors th span{font-size:.8em;font-weight:normal;display:block}#ae-dash-errors caption .g-unit{width:9em}#ae-dash-errors-count-col{width:5em}#ae-dash-errors-percent-col{width:7em}#ae-dash-graph-chart-type{float:left;margin-right:1em}#ae-apps-all strong.ae-disabled{color:#000;background:#eee}.ae-quota-resource{width:30%}.ae-quota-safety-limit{width:10%}#ae-quota-details h3{padding-bottom:0;margin-bottom:.25em}#ae-quota-details table{margin-bottom:1.75em}#ae-quota-details table.ae-quota-requests{margin-bottom:.5em}#ae-quota-refresh-note p{text-align:right;padding-top:.5em;padding-bottom:0;margin-bottom:0}#ae-quota-first-api.g-section{padding-bottom:0;margin-bottom:.25em}#ae-instances-summary-table,#ae-instances-details-table{margin-bottom:1em}.ae-instances-details-availability-image{float:left;margin-right:.5em}.ae-instances-small-link{font-size:80%}.ae-appbar-superuser-message strong{color:red}#ae-backends-table tr{vertical-align:baseline}.ae-backends-class-reminder{font-size:80%;color:#666;margin-left:3px}#ae-datastore-explorer-c{_margin-right:-3000px;_position:relative;_width:100%}#ae-datastore-explorer form dt{margin:1em 0 0 0}#ae-datastore-explorer #ae-datastore-explorer-labels{margin:0 0 3px}#ae-datastore-explorer-header .ae-action{margin-left:1em}#ae-datastore-explorer .id{white-space:nowrap}#ae-datastore-explorer caption{text-align:right;padding:5px}#ae-datastore-explorer-submit{margin-top:5px}#ae-datastore-explorer-namespace{margin-top:7px;margin-right:5px}#ae-datastore-explorer-gql-spacer{margin-top:22px}h4 #ae-datastore-explorer-gql-label{font-weight:normal}#ae-datastore-form em{font-style:normal;font-weight:normal;margin:0 0 0 .2em;color:#666}#ae-datastore-form dt{font-weight:bold}#ae-datastore-form dd{margin:.4em 0 .3em 1.5em;overflow:auto;zoom:1}#ae-datastore-form dd em{width:4em;float:left}#ae-datastore-form dd.ae-last{margin-bottom:1em}#ae-datastore-explorer-tabs-content{margin-bottom:1em}#ae-datastore-explorer-list .ae-label-row,#ae-datastore-explorer-new .ae-label-row{float:left;padding-top:.2em}#ae-datastore-explorer-list .ae-input-row,#ae-datastore-explorer-list .ae-btn-row,#ae-datastore-explorer-new .ae-input-row,#ae-datastore-explorer-new .ae-btn-row{margin-left:6em}#ae-datastore-explorer-list .ae-btn-row,#ae-datastore-explorer-new .ae-btn-row{margin-bottom:0}.ae-datastore-index-name{font-size:1.2em;font-weight:bold}.ae-table .ae-datastore-index-defs{padding-left:20px}.ae-datastore-index-defs-row{border-top:1px solid #ddd}.ae-datastore-index-defs .ae-unimportant{font-size:.8em}.ae-datastore-index-status{border:1px solid #c0dfbf;background:#f3f7f3;margin:0 25px 0 0;padding:3px}#ae-datastore-index-status-col{width:15%}.ae-datastore-index-status-Building{border-color:#edebcd;background:#fefdec}.ae-datastore-index-status-Deleting{border-color:#ccc;background:#eee}.ae-datastore-index-status-Error{border-color:#ffd3b4;background:#ffeae0}.ae-datastore-pathlink{font-size:.9em}#ae-datastore-stats-top-level-c{padding-bottom:1em;margin-bottom:1em;border-bottom:1px solid #e5ecf9}#ae-datastore-stats-top-level{width:100%}#ae-datastore-stats-piecharts-c{margin-bottom:1em}.ae-datastore-stats-piechart-label{font-size:.85em;font-weight:normal;text-align:center;padding:0}#ae-datastore-stats-property-type{width:65%}#ae-datastore-stats-size-all{width:35%}#ae-datastore-stats-property-name{width:60%}#ae-datastore-stats-type{width:10%}#ae-datastore-stats-size-entity{width:30%}#ae-datastore-blob-filter-form{margin-bottom:1em}#ae-datastore-blob-query-filter-label{padding-right:.5em}#ae-datastore-blob-filter-contents{padding-top:.5em}#ae-datastore-blob-date-after,#ae-datastore-blob-date-before{float:left}#ae-datastore-blob-date-after{margin-right:1em}#ae-datastore-blob-order label{font-weight:normal}#ae-datastore-blob-col-check{width:2%}#ae-datastore-blob-col-file{width:45%}#ae-datastore-blob-col-type{width:14%}#ae-datastore-blob-col-size{width:16%}#ae-blobstore-col-date{width:18%}#ae-blob-detail-filename{padding-bottom:0}#ae-blob-detail-filename span{font-weight:normal}#ae-blob-detail-key{font-size:85%}#ae-blob-detail-preview{margin-top:1em}#ae-blob-detail-dl{text-align:right}#ae-domain-admins-list li{margin-bottom:.3em}#ae-domain-admins-list button{margin-left:.5em}#ae-new-app-dialog-c{width:500px}#ae-new-app-dialog-c .g-section{margin-bottom:1em}#dombilling-tt-setup-note{border:1px solid #ccc;padding:1em;background:#efe}#dombilling-tt-setup-error{padding:0.5em;background:#fee}p.light-note{color:#555}.ae-bottom-message{margin-top:1em}#domusage-apptable{border-top:1px solid #ccc;border-left:1px solid #ccc}#domusage-apptable td,#domusage-apptable th{border-right:1px solid #ccc;border-bottom:1px solid #ccc;padding:2px 6px}#domusage-apptable td.users{text-align:right}#domusage-apptable td.cost{text-align:right}#domusage-apptable td.total-label{text-align:right;border-top:2px solid black;padding:1em 0.25em;border-right:0}#domusage-apptable td.total-cost{font-weight:bold;text-align:right;border-top:2px solid black;padding:1em 0.25em}#domusage-apptable td a{text-decoration:none}#domsettings-form div.ae-radio{margin-left:1.7em}#domsettings-form div.ae-radio input{margin-left:-1.47em;float:left}#ae-logs-c{_margin-right:-2000px;_position:relative;_width:100%;background:#fff}#ae-logs{background-color:#c5d7ef;padding:1px;line-height:1.65}#ae-logs .ae-table-caption{border:0}#ae-logs-c ol,#ae-logs-c li{list-style:none;padding:0;margin:0}#ae-logs-c li li{margin:0 0 0 3px;padding:0 0 0 17px}.ae-log-noerror{padding-left:23px}#ae-logs-form .goog-inline-block{margin-top:0}.ae-logs-reqlog .snippet{margin:.1em}.ae-logs-applog .snippet{color:#666}.ae-logs-severity{display:block;float:left;height:1.2em;width:1.2em;line-height:1.2;text-align:center;text-transform:capitalize;font-weight:bold;border-radius:2px;-moz-border-radius:2px;-webkit-border-radius:2px}.ae-logs-severity-4{background-color:#f22;color:#000}.ae-logs-severity-3{background-color:#f90;color:#000}.ae-logs-severity-2{background-color:#fd0}.ae-logs-severity-1{background-color:#3c0;color:#000}.ae-logs-severity-0{background-color:#09f;color:#000}#ae-logs-legend{margin:1em 0 0 0}#ae-logs-legend ul{list-style:none;margin:0;padding:0}#ae-logs-legend li,#ae-logs-legend strong{float:left;margin:0 1em 0 0}#ae-logs-legend li span{margin-right:.3em}.ae-logs-timestamp{padding:0 5px;font-size:85%}#ae-logs-form-c{margin-bottom:5px;padding-bottom:.5em;padding-left:1em}#ae-logs-form{padding:.3em 0 0}#ae-logs-form .ae-label-row{float:left;padding-top:.2em;margin-right:0.539em}#ae-logs-form .ae-input-row,#ae-logs-form .ae-btn-row{margin-left:4em}#ae-logs-form .ae-btn-row{margin-bottom:0}#ae-logs-requests-c{margin-bottom:.1em}#ae-logs-requests-c input{margin:0}#ae-logs-requests-all-label{margin-right:0.539em}#ae-logs-form-options{margin-top:8px}#ae-logs-tip{margin:.2em 0}#ae-logs-expand{margin-right:.2em}#ae-logs-severity-level-label{margin-top:.3em;display:block}#ae-logs-filter-hint-labels-list{margin:2px 0}#ae-logs-filter-hint-labels-list span{position:absolute}#ae-logs-filter-hint-labels-list ul{margin-left:5.5em;padding:0}#ae-logs-filter-hint-labels-list li{float:left;margin-right:.4em;line-height:1.2}.ae-toggle .ae-logs-getdetails,.ae-toggle pre{display:none}.ae-log-expanded .ae-toggle pre{display:block}#ae-logs-c .ae-log .ae-toggle{cursor:default;background:none;padding-left:0}#ae-logs-c .ae-log .ae-toggle h5{cursor:pointer;background-position:0 .55em;background-repeat:no-repeat;padding-left:17px}.ae-log .ae-plus h5{background-image:url(/img/wgt/plus.gif)}.ae-log .ae-minus h5{background-image:url(/img/wgt/minus.gif)}.ae-log{overflow:hidden;background-color:#fff;padding:.3em 0;line-height:1.65;border-bottom:1px solid #c5d7ef}.ae-log .ae-even{background-color:#e9e9e9;border:0}.ae-log h5{font-weight:normal;white-space:nowrap;padding:.4em 0 0 0}.ae-log span,.ae-log strong{margin:0 .3em}.ae-log .ae-logs-snippet{color:#666}.ae-log pre,.ae-logs-expanded{padding:.3em 0 .5em 1.5em;margin:0;font-family:"Courier New"}.ae-log .file{font-weight:bold}.ae-log.ae-log-expanded .file{white-space:pre-wrap;word-wrap:break-word}.ae-logs-app .ae-logs-req{display:none}.ae-logs-req .ae-app,.ae-logs-both .ae-app{padding-left:1em}#ae-dos-blacklist-rejects-table{text-align:left}#ae-dash-quota-percent-col{width:3.5em}.ae-cron-status-ok{color:#008000;font-size:90%;font-weight:bold}.ae-cron-status-error{color:#a03;font-size:90%;font-weight:bold}#ae-cronjobs-table .ae-table td{vertical-align:top}#ae-tasks-table td{vertical-align:top}#ae-tasks-quota{margin:0 0 1em 0}#ae-tasks-quota .ae-dash-quota-bar{width:150px}#ae-tasks-quota #ae-dash-quota-bar-col,#ae-tasks-quota .ae-dash-quota-bar{width:200px}.ae-tasks-paused-row{color:#666;font-style:italic;font-weight:bold}#ae-tasks-quota .ae-quota-safety-limit{width:30%}#ae-tasks-table{margin-top:1em}#ae-tasks-queuecontrols{margin-top:1em;margin-bottom:1em}#ae-tasks-delete-col{width:1em}#ae-tasks-eta-col,#ae-tasks-creation-col{width:11em}#ae-tasks-actions-col{width:7em}#ae-tasks-retry-col{width:4em}#ae-tasks-body-col{width:6em}#ae-tasks-headers-col{width:7em}.ae-tasks-hex-column,.ae-tasks-ascii-column{width:16em}#ae-tasks-table .ae-tasks-arrow{text-align:center}
\ No newline at end of file
+html,body,div,h1,h2,h3,h4,h5,h6,p,img,dl,dt,dd,ol,ul,li,table,caption,tbody,tfoot,thead,tr,th,td,form,fieldset,embed,object,applet{margin:0;padding:0;border:0;}body{font-size:62.5%;font-family:Arial,sans-serif;color:#000;background:#fff}a{color:#00c}a:active{color:#f00}a:visited{color:#551a8b}table{border-collapse:collapse;border-width:0;empty-cells:show}ul{padding:0 0 1em 1em}ol{padding:0 0 1em 1.3em}li{line-height:1.5em;padding:0 0 .5em 0}p{padding:0 0 1em 0}h1,h2,h3,h4,h5{padding:0 0 1em 0}h1,h2{font-size:1.3em}h3{font-size:1.1em}h4,h5,table{font-size:1em}sup,sub{font-size:.7em}input,select,textarea,option{font-family:inherit;font-size:inherit}.g-doc,.g-doc-1024,.g-doc-800{font-size:130%}.g-doc{width:100%;text-align:left}.g-section{width:100%;vertical-align:top;display:inline-block}*:first-child+html .g-section{display:block}* html .g-section{overflow:hidden}@-moz-document url-prefix(){.g-section{overflow:hidden}}@-moz-document url-prefix(){.g-section,tt:default{overflow:visible}}.g-section,.g-unit{zoom:1}.g-split .g-unit{text-align:right}.g-split .g-first{text-align:left}.g-doc-1024{width:73.074em;min-width:950px;margin:0 auto;text-align:left}* html .g-doc-1024{width:71.313em}*+html .g-doc-1024{width:71.313em}.g-doc-800{width:57.69em;min-width:750px;margin:0 auto;text-align:left}* html .g-doc-800{width:56.3em}*+html .g-doc-800{width:56.3em}.g-tpl-160 .g-unit,.g-unit .g-tpl-160 .g-unit,.g-unit .g-unit .g-tpl-160 .g-unit,.g-unit .g-unit .g-unit .g-tpl-160 .g-unit{margin:0 0 0 160px;width:auto;float:none}.g-unit .g-unit .g-unit .g-tpl-160 .g-first,.g-unit .g-unit .g-tpl-160 .g-first,.g-unit .g-tpl-160 .g-first,.g-tpl-160 .g-first{margin:0;width:160px;float:left}.g-tpl-160-alt .g-unit,.g-unit .g-tpl-160-alt .g-unit,.g-unit .g-unit .g-tpl-160-alt .g-unit,.g-unit .g-unit .g-unit .g-tpl-160-alt .g-unit{margin:0 160px 0 0;width:auto;float:none}.g-unit .g-unit .g-unit .g-tpl-160-alt .g-first,.g-unit .g-unit .g-tpl-160-alt .g-first,.g-unit .g-tpl-160-alt .g-first,.g-tpl-160-alt .g-first{margin:0;width:160px;float:right}.g-tpl-180 .g-unit,.g-unit .g-tpl-180 .g-unit,.g-unit .g-unit .g-tpl-180 .g-unit,.g-unit .g-unit .g-unit .g-tpl-180 .g-unit{margin:0 0 0 180px;width:auto;float:none}.g-unit .g-unit .g-unit .g-tpl-180 .g-first,.g-unit .g-unit .g-tpl-180 .g-first,.g-unit .g-tpl-180 .g-first,.g-tpl-180 .g-first{margin:0;width:180px;float:left}.g-tpl-180-alt .g-unit,.g-unit .g-tpl-180-alt .g-unit,.g-unit .g-unit .g-tpl-180-alt .g-unit,.g-unit .g-unit .g-unit .g-tpl-180-alt .g-unit{margin:0 180px 0 0;width:auto;float:none}.g-unit .g-unit .g-unit .g-tpl-180-alt .g-first,.g-unit .g-unit .g-tpl-180-alt .g-first,.g-unit .g-tpl-180-alt .g-first,.g-tpl-180-alt .g-first{margin:0;width:180px;float:right}.g-tpl-300 .g-unit,.g-unit .g-tpl-300 .g-unit,.g-unit .g-unit .g-tpl-300 .g-unit,.g-unit .g-unit .g-unit .g-tpl-300 .g-unit{margin:0 0 0 300px;width:auto;float:none}.g-unit .g-unit .g-unit .g-tpl-300 .g-first,.g-unit .g-unit .g-tpl-300 .g-first,.g-unit .g-tpl-300 .g-first,.g-tpl-300 .g-first{margin:0;width:300px;float:left}.g-tpl-300-alt .g-unit,.g-unit .g-tpl-300-alt .g-unit,.g-unit .g-unit .g-tpl-300-alt .g-unit,.g-unit .g-unit .g-unit .g-tpl-300-alt .g-unit{margin:0 300px 0 0;width:auto;float:none}.g-unit .g-unit .g-unit .g-tpl-300-alt .g-first,.g-unit .g-unit .g-tpl-300-alt .g-first,.g-unit .g-tpl-300-alt .g-first,.g-tpl-300-alt .g-first{margin:0;width:300px;float:right}.g-tpl-25-75 .g-unit,.g-unit .g-tpl-25-75 .g-unit,.g-unit .g-unit .g-tpl-25-75 .g-unit,.g-unit .g-unit .g-unit .g-tpl-25-75 .g-unit{width:74.999%;float:right;margin:0}.g-unit .g-unit .g-unit .g-tpl-25-75 .g-first,.g-unit .g-unit .g-tpl-25-75 .g-first,.g-unit .g-tpl-25-75 .g-first,.g-tpl-25-75 .g-first{width:24.999%;float:left;margin:0}.g-tpl-25-75-alt .g-unit,.g-unit .g-tpl-25-75-alt .g-unit,.g-unit .g-unit .g-tpl-25-75-alt .g-unit,.g-unit .g-unit .g-unit .g-tpl-25-75-alt .g-unit{width:24.999%;float:left;margin:0}.g-unit .g-unit .g-unit .g-tpl-25-75-alt .g-first,.g-unit .g-unit .g-tpl-25-75-alt .g-first,.g-unit .g-tpl-25-75-alt .g-first,.g-tpl-25-75-alt .g-first{width:74.999%;float:right;margin:0}.g-tpl-75-25 .g-unit,.g-unit .g-tpl-75-25 .g-unit,.g-unit .g-unit .g-tpl-75-25 .g-unit,.g-unit .g-unit .g-unit .g-tpl-75-25 .g-unit{width:24.999%;float:right;margin:0}.g-unit .g-unit .g-unit .g-tpl-75-25 .g-first,.g-unit .g-unit .g-tpl-75-25 .g-first,.g-unit .g-tpl-75-25 .g-first,.g-tpl-75-25 .g-first{width:74.999%;float:left;margin:0}.g-tpl-75-25-alt .g-unit,.g-unit .g-tpl-75-25-alt .g-unit,.g-unit .g-unit .g-tpl-75-25-alt .g-unit,.g-unit .g-unit .g-unit .g-tpl-75-25-alt .g-unit{width:74.999%;float:left;margin:0}.g-unit .g-unit .g-unit .g-tpl-75-25-alt .g-first,.g-unit .g-unit .g-tpl-75-25-alt .g-first,.g-unit .g-tpl-75-25-alt .g-first,.g-tpl-75-25-alt .g-first{width:24.999%;float:right;margin:0}.g-tpl-33-67 .g-unit,.g-unit .g-tpl-33-67 .g-unit,.g-unit .g-unit .g-tpl-33-67 .g-unit,.g-unit .g-unit .g-unit .g-tpl-33-67 .g-unit{width:66.999%;float:right;margin:0}.g-unit .g-unit .g-unit .g-tpl-33-67 .g-first,.g-unit .g-unit .g-tpl-33-67 .g-first,.g-unit .g-tpl-33-67 .g-first,.g-tpl-33-67 .g-first{width:32.999%;float:left;margin:0}.g-tpl-33-67-alt .g-unit,.g-unit .g-tpl-33-67-alt .g-unit,.g-unit .g-unit .g-tpl-33-67-alt .g-unit,.g-unit .g-unit .g-unit .g-tpl-33-67-alt .g-unit{width:32.999%;float:left;margin:0}.g-unit .g-unit .g-unit .g-tpl-33-67-alt .g-first,.g-unit .g-unit .g-tpl-33-67-alt .g-first,.g-unit .g-tpl-33-67-alt .g-first,.g-tpl-33-67-alt .g-first{width:66.999%;float:right;margin:0}.g-tpl-67-33 .g-unit,.g-unit .g-tpl-67-33 .g-unit,.g-unit .g-unit .g-tpl-67-33 .g-unit,.g-unit .g-unit .g-unit .g-tpl-67-33 .g-unit{width:32.999%;float:right;margin:0}.g-unit .g-unit .g-unit .g-tpl-67-33 .g-first,.g-unit .g-unit .g-tpl-67-33 .g-first,.g-unit .g-tpl-67-33 .g-first,.g-tpl-67-33 .g-first{width:66.999%;float:left;margin:0}.g-tpl-67-33-alt .g-unit,.g-unit .g-tpl-67-33-alt .g-unit,.g-unit .g-unit .g-tpl-67-33-alt .g-unit,.g-unit .g-unit .g-unit .g-tpl-67-33-alt .g-unit{width:66.999%;float:left;margin:0}.g-unit .g-unit .g-unit .g-tpl-67-33-alt .g-first,.g-unit .g-unit .g-tpl-67-33-alt .g-first,.g-unit .g-tpl-67-33-alt .g-first,.g-tpl-67-33-alt .g-first{width:32.999%;float:right;margin:0}.g-tpl-50-50 .g-unit,.g-unit .g-tpl-50-50 .g-unit,.g-unit .g-unit .g-tpl-50-50 .g-unit,.g-unit .g-unit .g-unit .g-tpl-50-50 .g-unit{width:49.999%;float:right;margin:0}.g-unit .g-unit .g-unit .g-tpl-50-50 .g-first,.g-unit .g-unit .g-tpl-50-50 .g-first,.g-unit .g-tpl-50-50 .g-first,.g-tpl-50-50 .g-first{width:49.999%;float:left;margin:0}.g-tpl-50-50-alt .g-unit,.g-unit .g-tpl-50-50-alt .g-unit,.g-unit .g-unit .g-tpl-50-50-alt .g-unit,.g-unit .g-unit .g-unit .g-tpl-50-50-alt .g-unit{width:49.999%;float:left;margin:0}.g-unit .g-unit .g-unit .g-tpl-50-50-alt .g-first,.g-unit .g-unit .g-tpl-50-50-alt .g-first,.g-unit .g-tpl-50-50-alt .g-first,.g-tpl-50-50-alt .g-first{width:49.999%;float:right;margin:0}.g-tpl-nest{width:auto}.g-tpl-nest .g-section{display:inline}.g-tpl-nest .g-unit,.g-unit .g-tpl-nest .g-unit,.g-unit .g-unit .g-tpl-nest .g-unit,.g-unit .g-unit .g-unit .g-tpl-nest .g-unit{float:left;width:auto;margin:0}.g-tpl-nest-alt .g-unit,.g-unit .g-tpl-nest-alt .g-unit,.g-unit .g-unit .g-tpl-nest-alt .g-unit,.g-unit .g-unit .g-unit .g-tpl-nest-alt .g-unit{float:right;width:auto;margin:0}.goog-button{border-width:1px;border-style:solid;border-color:#bbb #999 #999 #bbb;border-radius:2px;-webkit-border-radius:2px;-moz-border-radius:2px;font:normal normal normal 13px/13px Arial,sans-serif;color:#000;text-align:middle;text-decoration:none;text-shadow:0 1px 1px rgba(255,255,255,1);background:#eee;background-image:-webkit-gradient(linear,0 0,0 100%,from(#fff),to(#ddd));background:-moz-linear-gradient(top,#fff,#ddd);filter:progid:DXImageTransform.Microsoft.Gradient(EndColorstr='#dddddd',StartColorstr='#ffffff',GradientType=0);cursor:pointer;margin:0;display:inline;display:-moz-inline-box;display:inline-block;*overflow:visible;padding:4px 8px 5px}a.goog-button,span.goog-button,div.goog-button{padding:4px 8px 5px}.goog-button:visited{color:#000}.goog-button{*display:inline}.goog-button:focus,.goog-button:hover{border-color:#000}.goog-button:active,.goog-button-active{color:#000;background-color:#bbb;border-color:#999 #bbb #bbb #999;background-image:-webkit-gradient(linear,0 0,0 100%,from(#ddd),to(#fff));background-image:-moz-linear-gradient(top,#ddd,#fff);filter:progid:DXImageTransform.Microsoft.Gradient(EndColorstr='#ffffff',StartColorstr='#dddddd',GradientType=0)}.goog-button[disabled],.goog-button[disabled]:active,.goog-button[disabled]:hover{color:#666;border-color:#ddd;background-color:#f3f3f3;background-image:none;text-shadow:none;cursor:auto}.goog-button{padding:5px 8px 4px\9}.goog-button{*padding:4px 7px 2px}html>body input.goog-button,x:-moz-any-link,x:default,html>body button.goog-button,x:-moz-any-link,x:default{padding-top:3px;padding-bottom:2px}a.goog-button,x:-moz-any-link,x:default,span.goog-button,x:-moz-any-link,x:default,div.goog-button,x:-moz-any-link,x:default{padding:4px 8px 5px}.goog-button-fixed{padding-left:0!important;padding-right:0!important;width:100%}button.goog-button-icon-c{padding-top:1px;padding-bottom:1px}button.goog-button-icon-c{padding-top:3px\9;padding-bottom:2px\9}button.goog-button-icon-c{*padding-top:0;*padding-bottom:0}html>body button.goog-button-icon-c,x:-moz-any-link,x:default{padding-top:1px;padding-bottom:1px}.goog-button-icon{display:block;margin:0 auto;height:18px;width:18px}html>body .goog-inline-block{display:-moz-inline-box;display:inline-block;}.goog-inline-block{position:relative;display:inline-block}* html .goog-inline-block{display:inline}*:first-child+html .goog-inline-block{display:inline}.goog-custom-button{margin:0 2px 2px;border:0;padding:0;font:normal Tahoma,Arial,sans-serif;color:#000;text-decoration:none;list-style:none;vertical-align:middle;cursor:pointer;outline:none;background:#eee;background-image:-webkit-gradient(linear,0 0,0 100%,from(#fff),to(#ddd));background:-moz-linear-gradient(top,#fff,#ddd);filter:progid:DXImageTransform.Microsoft.Gradient(EndColorstr='#dddddd',StartColorstr='#ffffff',GradientType=0)}.goog-custom-button-outer-box,.goog-custom-button-inner-box{border-style:solid;border-color:#bbb #999 #999 #bbb;vertical-align:top}.goog-custom-button-outer-box{margin:0;border-width:1px 0;padding:0}.goog-custom-button-inner-box{margin:0 -1px;border-width:0 1px;padding:3px 4px}* html .goog-custom-button-inner-box{left:-1px}* html .goog-custom-button-rtl .goog-custom-button-outer-box{left:-1px}* html .goog-custom-button-rtl .goog-custom-button-inner-box{left:0}*:first-child+html .goog-custom-button-inner-box{left:-1px}*:first-child+html .goog-custom-button-collapse-right .goog-custom-button-inner-box{border-left-width:2px}*:first-child+html .goog-custom-button-collapse-left .goog-custom-button-inner-box{border-right-width:2px}*:first-child+html .goog-custom-button-collapse-right.goog-custom-button-collapse-left .goog-custom-button-inner-box{border-width:0 1px}*:first-child+html .goog-custom-button-rtl .goog-custom-button-inner-box{left:1px}::root .goog-custom-button,::root .goog-custom-button-outer-box{line-height:0}::root .goog-custom-button-inner-box{line-height:normal}.goog-custom-button-disabled{background-image:none!important;opacity:0.4;-moz-opacity:0.4;filter:alpha(opacity=40)}.goog-custom-button-disabled .goog-custom-button-outer-box,.goog-custom-button-disabled .goog-custom-button-inner-box{color:#333!important;border-color:#999!important}* html .goog-custom-button-disabled{margin:2px 1px!important;padding:0 1px!important}*:first-child+html .goog-custom-button-disabled{margin:2px 1px!important;padding:0 1px!important}.goog-custom-button-hover .goog-custom-button-outer-box,.goog-custom-button-hover .goog-custom-button-inner-box{border-color:#000!important;}.goog-custom-button-active,.goog-custom-button-checked{background-color:#bbb;background-position:bottom left;background-image:-webkit-gradient(linear,0 0,0 100%,from(#ddd),to(#fff));background:-moz-linear-gradient(top,#ddd,#fff);filter:progid:DXImageTransform.Microsoft.Gradient(EndColorstr='#ffffff',StartColorstr='#dddddd',GradientType=0)}.goog-custom-button-focused .goog-custom-button-outer-box,.goog-custom-button-focused .goog-custom-button-inner-box,.goog-custom-button-focused.goog-custom-button-collapse-left .goog-custom-button-inner-box,.goog-custom-button-focused.goog-custom-button-collapse-left.goog-custom-button-checked .goog-custom-button-inner-box{border-color:#000}.goog-custom-button-collapse-right,.goog-custom-button-collapse-right .goog-custom-button-outer-box,.goog-custom-button-collapse-right .goog-custom-button-inner-box{margin-right:0}.goog-custom-button-collapse-left,.goog-custom-button-collapse-left .goog-custom-button-outer-box,.goog-custom-button-collapse-left .goog-custom-button-inner-box{margin-left:0}.goog-custom-button-collapse-left .goog-custom-button-inner-box{border-left:1px solid #fff}.goog-custom-button-collapse-left.goog-custom-button-checked .goog-custom-button-inner-box{border-left:1px solid #ddd}* html .goog-custom-button-collapse-left .goog-custom-button-inner-box{left:0}*:first-child+html .goog-custom-button-collapse-left .goog-custom-button-inner-box{left:0}.goog-date-picker th,.goog-date-picker td{font-family:arial,sans-serif;text-align:center}.goog-date-picker th{font-size:.9em;font-weight:bold;color:#666667;background-color:#c3d9ff}.goog-date-picker td{vertical-align:middle;padding:2px 3px}.goog-date-picker{-moz-user-focus:normal;-moz-user-select:none;position:absolute;border:1px solid gray;float:left;font-family:arial,sans-serif;padding-left:1px;background:white}.goog-date-picker-menu{position:absolute;background:threedface;border:1px solid gray;-moz-user-focus:normal}.goog-date-picker-menu ul{list-style:none;margin:0;padding:0}.goog-date-picker-menu ul li{cursor:default}.goog-date-picker-menu-selected{background-color:#aaccee}.goog-date-picker td div{float:left}.goog-date-picker button{padding:0;margin:1px;border:1px outset gray}.goog-date-picker-week{padding:1px 3px}.goog-date-picker-wday{padding:1px 3px}.goog-date-picker-today-cont{text-align:left!important}.goog-date-picker-none-cont{text-align:right!important}.goog-date-picker-head td{text-align:center}.goog-date-picker-month{width:12ex}.goog-date-picker-year{width:6ex}.goog-date-picker table{border-collapse:collapse}.goog-date-picker-selected{background-color:#aaccee!important;color:blue!important}.goog-date-picker-today{font-weight:bold!important}.goog-date-picker-other-month{-moz-opacity:0.3;filter:Alpha(Opacity=30)}.sat,.sun{background:#eee}#button1,#button2{display:block;width:60px;text-align:center;margin:10px;padding:10px;font:normal .8em arial,sans-serif;border:1px solid #000}.goog-menu{position:absolute;color:#000;border:1px solid #b5b6b5;background-color:#f3f3f7;cursor:default;font:normal small arial,helvetica,sans-serif;margin:0;padding:0;outline:none}.goog-menuitem{padding:2px 5px;margin:0;list-style:none}.goog-menuitem-highlight{background-color:#4279a5;color:#fff}.goog-menuitem-disabled{color:#999}.goog-option{padding-left:15px!important}.goog-option-selected{background-image:url(/img/check.gif);background-position:4px 50%;background-repeat:no-repeat}.goog-menuseparator{position:relative;margin:2px 0;border-top:1px solid #999;padding:0;outline:none}.goog-submenu{position:relative}.goog-submenu-arrow{position:absolute;display:block;width:11px;height:11px;right:3px;top:4px;background-image:url(/img/menu-arrows.gif);background-repeat:no-repeat;background-position:0 0;font-size:1px}.goog-menuitem-highlight .goog-submenu-arrow{background-position:0 -11px}.goog-menuitem-disabled .goog-submenu-arrow{display:none}.goog-menu-filter{margin:2px;border:1px solid silver;background:white;overflow:hidden}.goog-menu-filter div{color:gray;position:absolute;padding:1px}.goog-menu-filter input{margin:0;border:0;background:transparent;width:100%}.goog-menuitem-partially-checked{background-image:url(/img/check-outline.gif);background-position:4px 50%;background-repeat:no-repeat}.goog-menuitem-fully-checked{background-image:url(/img/check.gif);background-position:4px 50%;background-repeat:no-repeat}.goog-menu-button{margin:0 2px 2px 2px;border:0;padding:0;font:normal Tahoma,Arial,sans-serif;color:#000;background:#ddd url("/img/button-bg.gif") repeat-x top left;text-decoration:none;list-style:none;vertical-align:middle;cursor:pointer;outline:none}.goog-menu-button-outer-box,.goog-menu-button-inner-box{border-style:solid;border-color:#aaa;vertical-align:middle}.goog-menu-button-outer-box{margin:0;border-width:1px 0;padding:0}.goog-menu-button-inner-box{margin:0 -1px;border-width:0 1px;padding:0 4px 2px 4px}* html .goog-menu-button-inner-box{left:-1px}* html .goog-menu-button-rtl .goog-menu-button-outer-box{left:-1px}* html .goog-menu-button-rtl .goog-menu-button-inner-box{left:0}*:first-child+html .goog-menu-button-inner-box{left:-1px}*:first-child+html .goog-menu-button-rtl .goog-menu-button-inner-box{left:1px}::root .goog-menu-button,::root .goog-menu-button-outer-box,::root .goog-menu-button-inner-box{line-height:0}::root .goog-menu-button-caption,::root .goog-menu-button-dropdown{line-height:normal}.goog-menu-button-disabled{background-image:none!important;opacity:0.4;-moz-opacity:0.4;filter:alpha(opacity=40)}.goog-menu-button-disabled .goog-menu-button-outer-box,.goog-menu-button-disabled .goog-menu-button-inner-box,.goog-menu-button-disabled .goog-menu-button-caption,.goog-menu-button-disabled .goog-menu-button-dropdown{color:#333!important;border-color:#999!important}* html .goog-menu-button-disabled{margin:2px 1px!important;padding:0 1px!important}*:first-child+html .goog-menu-button-disabled{margin:2px 1px!important;padding:0 1px!important}.goog-menu-button-hover .goog-menu-button-outer-box,.goog-menu-button-hover .goog-menu-button-inner-box{border-color:#9cf #69e #69e #7af!important;}.goog-menu-button-active,.goog-menu-button-open{background-color:#bbb;background-position:bottom left}.goog-menu-button-focused .goog-menu-button-outer-box,.goog-menu-button-focused .goog-menu-button-inner-box{border-color:#3366cc}.goog-menu-button-caption{padding:0 4px 0 0;vertical-align:middle}.goog-menu-button-rtl .goog-menu-button-caption{padding:0 0 0 4px}.goog-menu-button-dropdown{width:7px;background:url(/img/toolbar_icons.gif) no-repeat -176px;vertical-align:middle}.goog-flat-menu-button{margin:0 2px;padding:1px 4px;font:normal 95% Tahoma,Arial,sans-serif;color:#333;text-decoration:none;list-style:none;vertical-align:middle;cursor:pointer;outline:none;-moz-outline:none;border-width:1px;border-style:solid;border-color:#c9c9c9;background-color:#fff}.goog-flat-menu-button-disabled *{color:#999;border-color:#ccc;cursor:default}.goog-flat-menu-button-hover,.goog-flat-menu-button-hover{border-color:#9cf #69e #69e #7af!important;}.goog-flat-menu-button-active{background-color:#bbb;background-position:bottom left}.goog-flat-menu-button-focused{border-color:#3366cc}.goog-flat-menu-button-caption{padding-right:10px;vertical-align:middle}.goog-flat-menu-button-dropdown{width:7px;background:url(/img/toolbar_icons.gif) no-repeat -176px;vertical-align:middle}h1{font-size:1.8em}.g-doc{width:auto;margin:0 10px}.g-doc-1024{margin-left:10px}#ae-logo{background:url(//www.google.com/images/logos/app_engine_logo_sm.gif) 0 0 no-repeat;display:block;width:178px;height:30px;margin:4px 0 0 0}.ae-ir span{position:absolute;display:block;width:0;height:0;overflow:hidden}.ae-noscript{position:absolute;left:-5000px}#ae-lhs-nav{border-right:3px solid #e5ecf9}.ae-notification{margin-bottom:.6em;text-align:center}.ae-notification strong{display:block;width:55%;margin:0 auto;text-align:center;padding:.6em;background-color:#fff1a8;font-weight:bold}.ae-alert{font-weight:bold;background:url(/img/icn/warning.png) no-repeat;margin-bottom:.5em;padding-left:1.8em}.ae-info{background:url(/img/icn/icn-info.gif) no-repeat;margin-bottom:.5em;padding-left:1.8em}.ae-promo{padding:.5em .8em;margin:.6em 0;background-color:#fffbe8;border:1px solid #fff1a9;text-align:left}.ae-promo strong{position:relative;top:.3em}.ae-alert-text,.ae-warning-text{background-color:transparent;background-position:right 1px;padding:0 18px 0 0}.ae-alert-text{color:#c00}.ae-warning-text{color:#f90}.ae-alert-c span{display:inline-block}.ae-message{border:1px solid #e5ecf9;background-color:#f6f9ff;margin-bottom:1em;padding:.5em}.ae-errorbox{border:1px solid #f00;background-color:#fee;margin-bottom:1em;padding:1em}#bd .ae-errorbox ul{padding-bottom:0}.ae-form dt{font-weight:bold}.ae-form dt em,.ae-field-hint{margin-top:.2em;color:#666667;font-size:.85em}.ae-field-yyyymmdd,.ae-field-hhmmss{width:6em}.ae-field-hint-hhmmss{margin-left:2.3em}.ae-form label{display:block;margin:0 0 .2em 0;font-weight:bold}.ae-radio{margin-bottom:.3em}.ae-radio label{display:inline}.ae-form dd,.ae-input-row{margin-bottom:.6em}.ae-input-row-group{border:1px solid #fff1a9;background:#fffbe8;padding:8px}.ae-btn-row{margin-top:1.4em;margin-bottom:1em}.ae-btn-row-note{padding:5px 0 6px 0}.ae-btn-row-note span{padding-left:18px;padding-right:.5em;background:transparent url(/img/icn/icn-info.gif) 0 0 no-repeat}.ae-btn-primary{font-weight:bold}form .ae-cancel{margin-left:.5em}.ae-submit-inline{margin-left:.8em}.ae-radio-bullet{width:20px;float:left}.ae-label-hanging-indent{margin-left:5px}.ae-divider{margin:0 .6em 0 .5em}.ae-nowrap{white-space:nowrap}.ae-pre-wrap{white-space:pre-wrap;white-space:-moz-pre-wrap;white-space:-pre-wrap;white-space:-o-pre-wrap;word-wrap:break-word;_white-space:pre;}wbr:after{content:"\00200B"}a button{text-decoration:none}.ae-alert ul{margin-bottom:.75em;margin-top:.25em;line-height:1.5em}.ae-alert h4{color:#000;font-weight:bold;padding:0 0 .5em}.ae-form-simple-list{list-style-type:none;padding:0;margin-bottom:1em}.ae-form-simple-list li{padding:.3em 0 .5em .5em;border-bottom:1px solid #c3d9ff}div.ae-datastore-index-to-delete,div.ae-datastore-index-to-build{color:#aaa}#hd p{padding:0}#hd li{display:inline}ul{padding:0 0 1em 1.2em}#ae-userinfo{text-align:right;white-space:nowrap;}#ae-userinfo ul{padding-bottom:0;padding-top:5px}#ae-appbar-lrg{margin:0 0 1.25em 0;padding:.25em .5em;background-color:#e5ecf9;border-top:1px solid #36c}#ae-appbar-lrg h1{font-size:1.2em;padding:0}#ae-appbar-lrg h1 span{font-size:80%;font-weight:normal}#ae-appbar-lrg form{display:inline;padding-right:.1em;margin-right:.5em}#ae-appbar-lrg strong{white-space:nowrap}#ae-appbar-sml{margin:0 0 1.25em 0;height:8px;padding:0 .5em;background:#e5ecf9}.ae-rounded-sml{border-radius:3px;-moz-border-radius:3px;-webkit-border-radius:3px}#ae-appbar-lrg a{margin-top:.3em}a.ae-ext-link,a span.ae-ext-link{background:url(/img/icn/icn-open-in-new-window.png) no-repeat right;padding-right:18px;margin-right:8px}.ae-no-pad{padding-left:1em}.ae-message h4{margin-bottom:.3em;padding-bottom:0}#ft{text-align:center;margin:2.5em 0 1em;padding-top:.5em;border-top:2px solid #c3d9ff}#bd h3{font-weight:bold;font-size:1.4em}#bd h3 .ae-apps-switch{font-weight:normal;font-size:.7em;margin-left:2em}#bd p{padding:0 0 1em 0}#ae-content{padding-left:1em}.ae-unimportant{color:#666}.ae-new-usr td{border-top:1px solid #ccccce;background-color:#ffe}.ae-error-td td{border:2px solid #f00;background-color:#fee}.ae-delete{cursor:pointer;border:none;background:transparent;}.ae-btn-large{background:#039 url(/img/icn/button_back.png) repeat-x;color:#fff;font-weight:bold;font-size:1.2em;padding:.5em;border:2px outset #000;cursor:pointer}.ae-breadcrumb{margin:0 0 1em}.ae-disabled,a.ae-disabled,a.ae-disabled:hover,a.ae-disabled:active{color:#666!important;text-decoration:none!important;cursor:default!important;opacity:.4!important;-moz-opacity:.4!important;filter:alpha(opacity=40)!important}input.ae-readonly{border:2px solid transparent;border-left:0;background-color:transparent}span.ae-text-input-clone{padding:5px 5px 5px 0}.ae-loading{opacity:.4;-moz-opacity:.4;filter:alpha(opacity=40)}.ae-tip{margin:1em 0;background:url(/img/tip.png) top left no-repeat;padding:2px 0 0 25px}sup.ae-new-sup{color:red}.ae-action{color:#00c;cursor:pointer;text-decoration:underline}.ae-toggle{padding-left:16px;background-position:left center;background-repeat:no-repeat;cursor:pointer}.ae-minus{background-image:url(/img/wgt/minus.gif)}.ae-plus{background-image:url(/img/wgt/plus.gif)}.ae-print{background-image:url(/img/print.gif);padding-left:19px}.ae-currency,.ae-table thead th.ae-currency{text-align:right;white-space:nowrap}#ae-loading{font-size:1.2em;position:absolute;text-align:center;top:0;width:100%}#ae-loading div{margin:0 auto;background:#fff1a9;width:5em;font-weight:bold;padding:4px 10px;-moz-border-radius-bottomleft:3px;-moz-border-radius-bottomright:3px;-webkit-border-radius-bottomleft:3px;-webkit-border-radius-bottomright:3px}.ae-occlude{filter:alpha(opacity=0);position:absolute}.g-tpl-66-34 .g-unit,.g-unit .g-tpl-66-34 .g-unit,.g-unit .g-unit .g-tpl-66-34 .g-unit,.g-unit .g-unit .g-unit .g-tpl-66-34 .g-unit{display:inline;margin:0;width:33.999%;float:right}.g-unit .g-unit .g-unit .g-tpl-66-34 .g-first,.g-unit .g-unit .g-tpl-66-34 .g-first,.g-unit .g-tpl-66-34 .g-first,.g-tpl-66-34 .g-first{display:inline;margin:0;width:65.999%;float:left}.ae-ie6-c{_margin-right:-2000px;_position:relative;_width:100%;background:#fff}h2.ae-section-header{background:#e5ecf9;padding:.2em .4em;margin-bottom:.5em}.ae-field-span{padding:3px 0}select{font:13px/13px Arial,sans-serif;color:#000;border-width:1px;border-style:solid;border-color:#bbb #999 #999 #bbb;-webkit-border-radius:2px;-moz-border-radius:2px;background:#eee;background:-moz-linear-gradient(top,#fff,#ddd);filter:progid:DXImageTransform.Microsoft.Gradient(EndColorstr='#dddddd',StartColorstr='#ffffff',GradientType=0);cursor:pointer;padding:2px 1px;margin:0}select:hover{border-color:#000}select[disabled],select[disabled]:active{color:#666;border-color:#ddd;background-color:#f3f3f3;background-image:none;text-shadow:none;cursor:auto}.ae-table-plain{border-collapse:collapse;width:100%}.ae-table{border:1px solid #c5d7ef;border-collapse:collapse;width:100%}#bd h2.ae-table-title{background:#e5ecf9;margin:0;color:#000;font-size:1em;padding:3px 0 3px 5px;border-left:1px solid #c5d7ef;border-right:1px solid #c5d7ef;border-top:1px solid #c5d7ef}.ae-table-caption,.ae-table caption{border:1px solid #c5d7ef;background:#e5ecf9;-moz-margin-start:-1px}.ae-table caption{padding:3px 5px;text-align:left}.ae-table th,.ae-table td{background-color:#fff;padding:.35em 1em .25em .35em;margin:0}.ae-table thead th{font-weight:bold;text-align:left;background:#c5d7ef;vertical-align:bottom}.ae-table thead th .ae-no-bold{font-weight:normal}.ae-table tfoot tr td{border-top:1px solid #c5d7ef;background-color:#e5ecf9}.ae-table td{border-top:1px solid #c5d7ef;border-bottom:1px solid #c5d7ef}.ae-even>td,.ae-even th,.ae-even-top td,.ae-even-tween td,.ae-even-bottom td,ol.ae-even{background-color:#e9e9e9;border-top:1px solid #c5d7ef;border-bottom:1px solid #c5d7ef}.ae-even-top td{border-bottom:0}.ae-even-bottom td{border-top:0}.ae-even-tween td{border:0}.ae-table .ae-tween td{border:0}.ae-table .ae-tween-top td{border-bottom:0}.ae-table .ae-tween-bottom td{border-top:0}#bd .ae-table .cbc{width:1.5em;padding-right:0}.ae-table #ae-live td{background-color:#ffeac0}.ae-table-fixed{table-layout:fixed}.ae-table-fixed td,.ae-table-nowrap{overflow:hidden;white-space:nowrap}.ae-paginate strong{margin:0 .5em}tfoot .ae-paginate{text-align:right}.ae-table-caption .ae-paginate,.ae-table-caption .ae-orderby{padding:2px 5px}.modal-dialog{background:#c1d9ff;border:1px solid #3a5774;color:#000;padding:4px;position:absolute;font-size:1.3em;-moz-box-shadow:0 1px 4px #333;-webkit-box-shadow:0 1px 4px #333;box-shadow:0 1px 4px #333}.modal-dialog a,.modal-dialog a:link,.modal-dialog a:visited{color:#06c;cursor:pointer}.modal-dialog-bg{background:#666;left:0;position:absolute;top:0}.modal-dialog-title{background:#e0edfe;color:#000;cursor:pointer;font-size:120%;font-weight:bold;padding:8px 15px 8px 8px;position:relative;_zoom:1;}.modal-dialog-title-close{background:#e0edfe url(https://ssl.gstatic.com/editor/editortoolbar.png) no-repeat -528px 0;cursor:default;height:15px;position:absolute;right:10px;top:8px;width:15px;vertical-align:middle}.modal-dialog-buttons,.modal-dialog-content{background-color:#fff;padding:8px}.modal-dialog-buttons button{margin-right:.75em}.goog-buttonset-default{font-weight:bold}.goog-tab{position:relative;border:1px solid #8ac;padding:4px 9px;color:#000;background:#e5ecf9;border-top-left-radius:2px;border-top-right-radius:2px;-moz-border-radius-topleft:2px;-webkit-border-top-left-radius:2px;-moz-border-radius-topright:2px;-webkit-border-top-right-radius:2px}.goog-tab-bar-top .goog-tab{margin:1px 4px 0 0;border-bottom:0;float:left}.goog-tab-bar-bottom .goog-tab{margin:0 4px 1px 0;border-top:0;float:left}.goog-tab-bar-start .goog-tab{margin:0 0 4px 1px;border-right:0}.goog-tab-bar-end .goog-tab{margin:0 1px 4px 0;border-left:0}.goog-tab-hover{text-decoration:underline;cursor:pointer}.goog-tab-disabled{color:#fff;background:#ccc;border-color:#ccc}.goog-tab-selected{background:#fff!important;color:black;font-weight:bold}.goog-tab-bar-top .goog-tab-selected{top:1px;margin-top:0;padding-bottom:5px}.goog-tab-bar-bottom .goog-tab-selected{top:-1px;margin-bottom:0;padding-top:5px}.goog-tab-bar-start .goog-tab-selected{left:1px;margin-left:0;padding-right:9px}.goog-tab-bar-end .goog-tab-selected{left:-1px;margin-right:0;padding-left:9px}.goog-tab-content{padding:.1em .8em .8em .8em;border:1px solid #8ac;border-top:none}.goog-tab-bar{position:relative;margin:0 0 0 5px;border:0;padding:0;list-style:none;cursor:default;outline:none}.goog-tab-bar-clear{border-top:1px solid #8ac;clear:both;height:0;overflow:hidden}.goog-tab-bar-start{float:left}.goog-tab-bar-end{float:right}* html .goog-tab-bar-start{margin-right:-3px}* html .goog-tab-bar-end{margin-left:-3px}#ae-nav ul{list-style-type:none;margin:0;padding:1em 0}#ae-nav ul li{padding-left:.5em}#ae-nav .ae-nav-selected{color:#000;display:block;font-weight:bold;background-color:#e5ecf9;margin-right:-1px;border-top-left-radius:4px;-moz-border-radius-topleft:4px;-webkit-border-top-left-radius:4px;border-bottom-left-radius:4px;-moz-border-radius-bottomleft:4px;-webkit-border-bottom-left-radius:4px}#ae-nav .ae-nav-bold{font-weight:bold}#ae-nav ul li span.ae-nav-disabled{color:#666}#ae-nav ul ul{margin:0;padding:0 0 0 .5em}#ae-nav ul ul li{padding-left:.5em}#ae-nav ul li a,#ae-nav ul li span,#ae-nav ul ul li a{padding-left:.5em}#ae-nav li a:link,#ae-nav li a:visited{color:#00c}.ae-nav-group{padding:.5em;margin:0 .75em 0 0;background-color:#fffbe8;border:1px solid #fff1a9}.ae-nav-group h4{font-weight:bold;padding:auto auto .5em .5em;padding-left:.4em;margin-bottom:.5em;padding-bottom:0}.ae-nav-group ul{margin:0 0 .5em 0;padding:0 0 0 1.3em;list-style-type:none}.ae-nav-group ul li{padding-bottom:.5em}.ae-nav-group li a:link,.ae-nav-group li a:visited{color:#00c}.ae-nav-group li a:hover{color:#00c}@media print{body{font-size:13px;width:8.5in;background:#fff}table,.ae-table-fixed{table-layout:automatic}tr{display:table-row!important}.g-doc-1024{width:8.5in}#ae-appbar-lrg,.ae-table-caption,.ae-table-nowrap,.ae-nowrap,th,td{overflow:visible!important;white-space:normal!important;background:#fff!important}.ae-print,.ae-toggle{display:none}#ae-lhs-nav-c{display:none}#ae-content{margin:0;padding:0}.goog-zippy-collapsed,.goog-zippy-expanded{background:none!important;padding:0!important}}#ae-admin-dev-table{margin:0 0 1em 0}.ae-admin-dev-tip,.ae-admin-dev-tip.ae-tip{margin:-0.31em 0 2.77em}#ae-sms-countryselect{margin-right:.5em}#ae-admin-enable-form{margin-bottom:1em}#ae-admin-services-c{margin-top:2em}#ae-admin-services{padding:0 0 0 3em;margin-bottom:1em;font-weight:bold}#ae-admin-logs-table-c{_margin-right:-2000px;_position:relative;_width:100%;background:#fff}#ae-admin-logs-table{margin:0;padding:0}#ae-admin-logs-filters{padding:3px 0 3px 5px}#ae-admin-logs-pagination{padding:6px 5px 0 0;text-align:right;width:45%}#ae-admin-logs-pagination span.ae-disabled{color:#666;background-color:transparent}#ae-admin-logs-table td{white-space:nowrap}#ae-storage-content div.ae-alert{padding-bottom:5px}#ae-admin-performance-form input[type=text]{width:2em}.ae-admin-performance-value{font-weight:normal}.ae-admin-performance-static-value{color:#666}.goog-slider-horizontal,.goog-twothumbslider-horizontal{position:relative;width:502px;height:7px;display:block;outline:0;margin:1.0em 0 0.9em 3em}.ae-slider-rail:before{position:relative;top:-0.462em;float:left;content:'Min';margin:0 0 0 -3em;color:#999}.ae-slider-rail{position:absolute;background-color:#d9d9d9;top:0;right:8px;bottom:0;left:8px;border:solid 1px;border-color:#a6a6a6 #b3b3b3 #bfbfbf;border-radius:5px}.ae-slider-rail:after{position:relative;top:-0.462em;float:right;content:'Max';margin:0 -3em 0 0;color:#999}.goog-slider-horizontal .goog-slider-thumb,.goog-twothumbslider-horizontal .goog-twothumbslider-value-thumb,.goog-twothumbslider-horizontal .goog-twothumbslider-extent-thumb{position:absolute;width:17px;height:17px;background:transparent url(/img/slider_thumb-down.png) no-repeat;outline:0}.goog-slider-horizontal .goog-slider-thumb{top:-5px}.goog-twothumbslider-horizontal .goog-twothumbslider-value-thumb{top:-11px}.goog-twothumbslider-horizontal .goog-twothumbslider-extent-thumb{top:2px;background-image:url(/img/slider_thumb-up.png)}.ae-admin-performance-scale{position:relative;display:inline-block;width:502px;margin:0 0 2.7em 3em}.ae-admin-performance-scale .ae-admin-performance-scale-start{position:absolute;display:inline-block;top:0;width:100%;text-align:left}.ae-admin-performance-scale .ae-admin-performance-scale-mid{position:absolute;display:inline-block;top:0;width:100%;text-align:center}.ae-admin-performance-scale .ae-admin-performance-scale-end{position:absolute;display:inline-block;top:0;width:100%;text-align:right}.ae-absolute-container{display:inline-block;width:100%}#ae-billing-form-c{_margin-right:-3000px;_position:relative;_width:100%}.ae-rounded-top-small{-moz-border-radius-topleft:3px;-webkit-border-top-left-radius:3px;-moz-border-radius-topright:3px;-webkit-border-top-right-radius:3px}.ae-progress-content{height:400px}#ae-billing-tos{text-align:left;width:100%;margin-bottom:.5em}.ae-billing-budget-section{margin-bottom:1.5em}.ae-billing-budget-section .g-unit,.g-unit .ae-billing-budget-section .g-unit,.g-unit .g-unit .ae-billing-budget-section .g-unit{margin:0 0 0 11em;width:auto;float:none}.g-unit .g-unit .ae-billing-budget-section .g-first,.g-unit .ae-billing-budget-section .g-first,.ae-billing-budget-section .g-first{margin:0;width:11em;float:left}#ae-billing-form .ae-btn-row{margin-left:11em}#ae-billing-form .ae-btn-row .ae-info{margin-top:10px}#ae-billing-checkout{width:150px;float:left}#ae-billing-alloc-table{border:1px solid #c5d7ef;border-bottom:none;width:100%;margin-top:.5em}#ae-billing-alloc-table th,#ae-billing-alloc-table td{padding:.35em 1em .25em .35em;border-bottom:1px solid #c5d7ef;color:#000;white-space:nowrap}.ae-billing-resource{background-color:transparent;font-weight:normal}#ae-billing-alloc-table tr th span{font-weight:normal}#ae-billing-alloc-table tr{vertical-align:baseline}#ae-billing-alloc-table th{white-space:nowrap}#ae-billing-alloc-table .ae-editable span.ae-text-input-clone,#ae-billing-alloc-table .ae-readonly input{display:none}#ae-billing-alloc-table .ae-readonly span.ae-text-input-clone,#ae-billing-alloc-table .ae-editable input{display:inline}#ae-billing-alloc-table td span.ae-billing-warn-note,#ae-billing-table-errors .ae-billing-warn-note{margin:0;background-repeat:no-repeat;display:inline-block;background-image:url(/img/icn/warning.png);text-align:right;padding-left:16px;padding-right:.1em;height:16px;font-weight:bold}#ae-billing-alloc-table td span.ae-billing-warn-note span,#ae-billing-table-errors .ae-billing-warn-note span{vertical-align:super;font-size:80%}#ae-billing-alloc-table td span.ae-billing-error-hidden,#ae-billing-table-errors .ae-billing-error-hidden{display:none}.ae-billing-percent{font-size:80%;color:#666;margin-left:3px}#ae-billing-week-info{margin-top:5px;line-height:1.4}#ae-billing-table-errors{margin-top:.3em}#ae-billing-allocation-noscript{margin-top:1.5em}#ae-billing-allocation-custom-opts{margin-left:2.2em}#ae-billing-settings h2{font-size:1em;display:inline}#ae-billing-settings p{padding:.3em 0 .5em}#ae-billing-settings-table{margin:.4em 0 .5em}#ae-settings-resource-col{width:19%}#ae-settings-budget-col{width:11%}#ae-billing-settings-table .ae-settings-budget-col{padding-right:2em}.ae-table th.ae-settings-unit-cell,.ae-table td.ae-settings-unit-cell,.ae-table th.ae-total-unit-cell,.ae-table td.ae-total-unit-cell{padding-left:1.2em}#ae-settings-unit-col{width:18%}#ae-settings-paid-col{width:15%}#ae-settings-free-col{width:15%}#ae-settings-total-col{width:22%}.ae-billing-inline-link{margin-left:.5em}.ae-billing-settings-section{margin-bottom:2em}.ae-billing-settings-formbutton{margin-top:.5em}#ae-billing-budget-setup-checkout{margin-bottom:0}#ae-billing-vat-c .ae-field-hint{width:85%}#ae-billing-checkout-note{margin-top:.8em}.ae-table thead th.ae-currency-th{text-align:right}#ae-billing-logs-date{width:15%}#ae-billing-logs-admin{width:15%}#ae-billing-logs-event{width:54%}#ae-billing-logs-amount{text-align:right;width:8%}#ae-billing-logs-balance{text-align:right;width:8%}#ae-billing-history-expand .ae-action{margin-left:1em}.ae-table .ae-billing-usage-report{width:100%;*width:auto;margin:0 0 1em 0}.ae-table .ae-billing-usage-report th,.ae-billing-charges th{color:#666;border-top:0}.ae-table .ae-billing-usage-report th,.ae-table .ae-billing-usage-report td,.ae-billing-charges th,.ae-billing-charges td{background-color:transparent;padding:.4em 0;border-bottom:1px solid #ddd}.ae-table .ae-billing-usage-report tfoot td,.ae-billing-charges tfoot td{border-bottom:none}.ae-billing-report-resource{width:30%}.ae-billing-report-used{width:20%}.ae-billing-report-free{width:20%}.ae-billing-report-paid{width:15%}.ae-billing-report-charge{width:15%}.ae-billing-change-resource{width:85%}.ae-billing-change-budget{width:15%}#ae-billing-always-on-label{display:inline}#ae-billing-budget-buffer-label{display:inline}.ae-billing-charges{width:50%}.ae-billing-charges-charge{text-align:right}.ae-billing-usage-report-container{padding:1em 1em 0 1em}#ae-billing-new-usage{background-color:#f6f9ff}.goog-zippy-expanded{background-image:url(/img/wgt/minus.gif);cursor:pointer;background-repeat:no-repeat;padding-left:17px}.goog-zippy-collapsed{background-image:url(/img/wgt/plus.gif);cursor:pointer;background-repeat:no-repeat;padding-left:17px}#ae-admin-logs-pagination{width:auto}.ae-usage-cycle-note{color:#555}#ae-createapp-start{background-color:#c6d5f1;padding:1em;padding-bottom:2em;text-align:center}#ae-admin-app_id_alias-check,#ae-createapp-id-check{margin:0 0 0 1em}#ae-admin-app_id_alias-message{display:block;margin:.4em 0}#ae-createapp-id-content{width:100%}#ae-createapp-id-content td{vertical-align:top}#ae-createapp-id-td{white-space:nowrap;width:1%}#ae-createapp-id-td #ae-createapp-id-error{position:absolute;width:24em;padding-left:1em;white-space:normal}#ae-createapp-id-error-td{padding-left:1em}#ae-admin-dev-invite label{float:left;width:3.6em;position:relative;top:.3em}#ae-admin-dev-invite .ae-radio{margin-left:3.6em}#ae-admin-dev-invite .ae-radio label{float:none;width:auto;font-weight:normal;position:static}#ae-admin-dev-invite .goog-button{margin-left:3.6em}#ae-admin-dev-invite .ae-field-hint{margin-left:4.2em}#ae-admin-dev-invite .ae-radio .ae-field-hint{margin-left:0}.ae-you{color:#008000}#ae-authdomain-opts{margin-bottom:1em}#ae-authdomain-content .ae-input-text,#ae-authdomain-content .ae-field-hint{margin:.3em 0 .4em 2.5em}#ae-authdomain-opts a{margin-left:1em}#ae-authdomain-opts-hint{margin-top:.2em;color:#666667;font-size:.85em}#ae-authdomain-content #ae-authdomain-desc .ae-field-hint{margin-left:0}#ae-storage-opts{margin-bottom:1em}#ae-storage-content .ae-input-text,#ae-storage-content .ae-field-hint{margin:.3em 0 .4em 2.5em}#ae-storage-opts a{margin-left:1em}#ae-storage-opts-hint{margin-top:.2em;color:#666667;font-size:.85em}#ae-storage-content #ae-storage-desc .ae-field-hint{margin-left:0}#ae-dash .g-section{margin:0 0 1em}#ae-dash * .g-section{margin:0}#ae-dash-quota .ae-alert{padding-left:1.5em}.ae-dash-email-disabled{background:url(/img/icn/exclamation_circle.png) no-repeat;margin-top:.5em;margin-bottom:.5em;min-height:16px;padding-left:1.5em}#ae-dash-email-disabled-footnote{padding-left:1.5em;margin:5px 0 0;font-weight:normal}#ae-dash-graph-c{border:1px solid #c5d7ef;padding:5px 0}#ae-dash-graph-change{margin:0 0 0 5px}#ae-dash-graph-img{padding:5px;margin-top:.5em;background-color:#fff;display:block}#ae-dash-graph-nodata{text-align:center}#ae-dash .ae-logs-severity{margin-right:.5em}#ae-dash .g-c{padding:0 0 0 .1em}#ae-dash .g-tpl-50-50 .g-unit .g-c{padding:0 0 0 1em}#ae-dash .g-tpl-50-50 .g-first .g-c{padding:0 1em 0 .1em}.ae-quota-warnings{background-color:#fffbe8;margin:0;padding:.5em .5em 0;text-align:left}.ae-quota-warnings div{padding:0 0 .5em}#ae-dash-quota-refresh-info{font-size:85%}#ae-dash #ae-dash-quota-bar-col,#ae-dash .ae-dash-quota-bar{width:100px}#ae-dash-quotadetails #ae-dash-quota-bar-col,#ae-dash-quotadetails .ae-dash-quota-bar{width:200px}#ae-dash-quota-percent-col{width:3.5em}#ae-dash-quota-cost-col{width:15%}#ae-dash-quota-alert-col{width:1%}#ae-dash .ae-dash-quota-alert-td{padding:0}.ae-dash-quota-alert-td a{display:block;width:16px;height:16px}#ae-dash .ae-dash-quota-alert-td .ae-alert{display:block;width:16px;height:16px;margin:0;padding:0}#ae-dash .ae-dash-quota-alert-td .ae-dash-email-disabled{display:block;width:16px;height:16px;margin:0;padding:0}#ae-dash-quota tbody th{font-weight:normal}#ae-dash-quota caption{padding:0}#ae-dash-quota caption .g-c{padding:3px}.ae-dash-quota-bar{float:left;background-color:#c0c0c0;height:13px;margin:.1em 0 0 0;position:relative}.ae-dash-quota-bar-free{background:url(/img/free_marker.png) top left no-repeat;width:7px;height:13px;position:absolute;top:0;left:0}#ae-dash-quota-footnote{margin:5px 0 0;font-weight:normal}.ae-quota-warning{background-color:#f90}.ae-quota-alert{background-color:#c00}.ae-quota-normal{background-color:#0b0}.ae-quota-alert-text{color:#c00}.ae-favicon-text{font-size:.85em}#ae-dash-popular{width:97%}#ae-dash-popular-reqsec-col{width:6.5em}#ae-dash-popular-req-col{width:7em}#ae-dash-popular-cpu-avg-col{width:9.5em}#ae-dash-popular-cpu-percent-col{width:7em}#ae-dash-popular .ae-unimportant{font-size:80%}#ae-dash-popular .ae-nowrap,#ae-dash-errors .ae-nowrap{margin-right:5px;overflow:hidden}#ae-dash-popular th span,#ae-dash-errors th span{font-size:.8em;font-weight:normal;display:block}#ae-dash-errors caption .g-unit{width:9em}#ae-dash-errors-count-col{width:5em}#ae-dash-errors-percent-col{width:7em}#ae-dash-graph-chart-type{float:left;margin-right:1em}#ae-apps-all strong.ae-disabled{color:#000;background:#eee}.ae-quota-resource{width:30%}.ae-quota-safety-limit{width:10%}#ae-quota-details h3{padding-bottom:0;margin-bottom:.25em}#ae-quota-details table{margin-bottom:1.75em}#ae-quota-details table.ae-quota-requests{margin-bottom:.5em}#ae-quota-refresh-note p{text-align:right;padding-top:.5em;padding-bottom:0;margin-bottom:0}#ae-quota-first-api.g-section{padding-bottom:0;margin-bottom:.25em}#ae-instances-summary-table,#ae-instances-details-table{margin-bottom:1em}.ae-instances-details-availability-image{float:left;margin-right:.5em}.ae-instances-small-link{font-size:80%}.ae-appbar-superuser-message strong{color:red}#ae-backends-table tr{vertical-align:baseline}.ae-backends-class-reminder{font-size:80%;color:#666;margin-left:3px}#ae-datastore-explorer-c{_margin-right:-3000px;_position:relative;_width:100%}#ae-datastore-explorer form dt{margin:1em 0 0 0}#ae-datastore-explorer #ae-datastore-explorer-labels{margin:0 0 3px}#ae-datastore-explorer-header .ae-action{margin-left:1em}#ae-datastore-explorer .id{white-space:nowrap}#ae-datastore-explorer caption{text-align:right;padding:5px}#ae-datastore-explorer-submit{margin-top:5px}#ae-datastore-explorer-namespace{margin-top:7px;margin-right:5px}#ae-datastore-explorer-gql-spacer{margin-top:22px}h4 #ae-datastore-explorer-gql-label{font-weight:normal}#ae-datastore-form em{font-style:normal;font-weight:normal;margin:0 0 0 .2em;color:#666}#ae-datastore-form dt{font-weight:bold}#ae-datastore-form dd{margin:.4em 0 .3em 1.5em;overflow:auto;zoom:1}#ae-datastore-form dd em{width:4em;float:left}#ae-datastore-form dd.ae-last{margin-bottom:1em}#ae-datastore-explorer-tabs-content{margin-bottom:1em}#ae-datastore-explorer-list .ae-label-row,#ae-datastore-explorer-new .ae-label-row{float:left;padding-top:.2em}#ae-datastore-explorer-list .ae-input-row,#ae-datastore-explorer-list .ae-btn-row,#ae-datastore-explorer-new .ae-input-row,#ae-datastore-explorer-new .ae-btn-row{margin-left:6em}#ae-datastore-explorer-list .ae-btn-row,#ae-datastore-explorer-new .ae-btn-row{margin-bottom:0}.ae-datastore-index-name{font-size:1.2em;font-weight:bold}.ae-table .ae-datastore-index-defs{padding-left:20px}.ae-datastore-index-defs-row{border-top:1px solid #ddd}.ae-datastore-index-defs .ae-unimportant{font-size:.8em}.ae-datastore-index-status{border:1px solid #c0dfbf;background:#f3f7f3;margin:0 25px 0 0;padding:3px}#ae-datastore-index-status-col{width:15%}.ae-datastore-index-status-Building{border-color:#edebcd;background:#fefdec}.ae-datastore-index-status-Deleting{border-color:#ccc;background:#eee}.ae-datastore-index-status-Error{border-color:#ffd3b4;background:#ffeae0}.ae-datastore-pathlink{font-size:.9em}#ae-datastore-stats-top-level-c{padding-bottom:1em;margin-bottom:1em;border-bottom:1px solid #e5ecf9}#ae-datastore-stats-top-level{width:100%}#ae-datastore-stats-piecharts-c{margin-bottom:1em}.ae-datastore-stats-piechart-label{font-size:.85em;font-weight:normal;text-align:center;padding:0}#ae-datastore-stats-property-type{width:65%}#ae-datastore-stats-size-all{width:35%}#ae-datastore-stats-property-name{width:60%}#ae-datastore-stats-type{width:10%}#ae-datastore-stats-size-entity{width:30%}#ae-datastore-blob-filter-form{margin-bottom:1em}#ae-datastore-blob-query-filter-label{padding-right:.5em}#ae-datastore-blob-filter-contents{padding-top:.5em}#ae-datastore-blob-date-after,#ae-datastore-blob-date-before{float:left}#ae-datastore-blob-date-after{margin-right:1em}#ae-datastore-blob-order label{font-weight:normal}#ae-datastore-blob-col-check{width:2%}#ae-datastore-blob-col-file{width:45%}#ae-datastore-blob-col-type{width:14%}#ae-datastore-blob-col-size{width:16%}#ae-blobstore-col-date{width:18%}#ae-blob-detail-filename{padding-bottom:0}#ae-blob-detail-filename span{font-weight:normal}#ae-blob-detail-key{font-size:85%}#ae-blob-detail-preview{margin-top:1em}#ae-blob-detail-dl{text-align:right}#ae-domain-admins-list li{margin-bottom:.3em}#ae-domain-admins-list button{margin-left:.5em}#ae-new-app-dialog-c{width:500px}#ae-new-app-dialog-c .g-section{margin-bottom:1em}#dombilling-tt-setup-note{border:1px solid #ccc;padding:1em;background:#efe}#dombilling-tt-setup-error{padding:0.5em;background:#fee}p.light-note{color:#555}.ae-bottom-message{margin-top:1em}#domusage-apptable{border-top:1px solid #ccc;border-left:1px solid #ccc}#domusage-apptable td,#domusage-apptable th{border-right:1px solid #ccc;border-bottom:1px solid #ccc;padding:2px 6px}#domusage-apptable td.users{text-align:right}#domusage-apptable td.cost{text-align:right}#domusage-apptable td.total-label{text-align:right;border-top:2px solid black;padding:1em 0.25em;border-right:0}#domusage-apptable td.total-cost{font-weight:bold;text-align:right;border-top:2px solid black;padding:1em 0.25em}#domusage-apptable td a{text-decoration:none}#domsettings-form div.ae-radio{margin-left:1.7em}#domsettings-form div.ae-radio input{margin-left:-1.47em;float:left}#ae-logs-c{_margin-right:-2000px;_position:relative;_width:100%;background:#fff}#ae-logs{background-color:#c5d7ef;padding:1px;line-height:1.65}#ae-logs .ae-table-caption{border:0}#ae-logs-c ol,#ae-logs-c li{list-style:none;padding:0;margin:0}#ae-logs-c li li{margin:0 0 0 3px;padding:0 0 0 17px}.ae-log-noerror{padding-left:23px}#ae-logs-form .goog-inline-block{margin-top:0}.ae-logs-reqlog .snippet{margin:.1em}.ae-logs-applog .snippet{color:#666}.ae-logs-severity{display:block;float:left;height:1.2em;width:1.2em;line-height:1.2;text-align:center;text-transform:capitalize;font-weight:bold;border-radius:2px;-moz-border-radius:2px;-webkit-border-radius:2px}.ae-logs-severity-4{background-color:#f22;color:#000}.ae-logs-severity-3{background-color:#f90;color:#000}.ae-logs-severity-2{background-color:#fd0}.ae-logs-severity-1{background-color:#3c0;color:#000}.ae-logs-severity-0{background-color:#09f;color:#000}#ae-logs-legend{margin:1em 0 0 0}#ae-logs-legend ul{list-style:none;margin:0;padding:0}#ae-logs-legend li,#ae-logs-legend strong{float:left;margin:0 1em 0 0}#ae-logs-legend li span{margin-right:.3em}.ae-logs-timestamp{padding:0 5px;font-size:85%}#ae-logs-form-c{margin-bottom:5px;padding-bottom:.5em;padding-left:1em}#ae-logs-form{padding:.3em 0 0}#ae-logs-form .ae-label-row{float:left;padding-top:.2em;margin-right:0.539em}#ae-logs-form .ae-input-row,#ae-logs-form .ae-btn-row{margin-left:4em}#ae-logs-form .ae-btn-row{margin-bottom:0}#ae-logs-requests-c{margin-bottom:.1em}#ae-logs-requests-c input{margin:0}#ae-logs-requests-all-label{margin-right:0.539em}#ae-logs-form-options{margin-top:8px}#ae-logs-tip{margin:.2em 0}#ae-logs-expand{margin-right:.2em}#ae-logs-severity-level-label{margin-top:.3em;display:block}#ae-logs-filter-hint-labels-list{margin:2px 0}#ae-logs-filter-hint-labels-list span{position:absolute}#ae-logs-filter-hint-labels-list ul{margin-left:5.5em;padding:0}#ae-logs-filter-hint-labels-list li{float:left;margin-right:.4em;line-height:1.2}.ae-toggle .ae-logs-getdetails,.ae-toggle pre{display:none}.ae-log-expanded .ae-toggle pre{display:block}#ae-logs-c .ae-log .ae-toggle{cursor:default;background:none;padding-left:0}#ae-logs-c .ae-log .ae-toggle h5{cursor:pointer;background-position:0 .55em;background-repeat:no-repeat;padding-left:17px}.ae-log .ae-plus h5{background-image:url(/img/wgt/plus.gif)}.ae-log .ae-minus h5{background-image:url(/img/wgt/minus.gif)}.ae-log{overflow:hidden;background-color:#fff;padding:.3em 0;line-height:1.65;border-bottom:1px solid #c5d7ef}.ae-log .ae-even{background-color:#e9e9e9;border:0}.ae-log h5{font-weight:normal;white-space:nowrap;padding:.4em 0 0 0}.ae-log span,.ae-log strong{margin:0 .3em}.ae-log .ae-logs-snippet{color:#666}.ae-log pre,.ae-logs-expanded{padding:.3em 0 .5em 1.5em;margin:0;font-family:"Courier New"}.ae-log .file{font-weight:bold}.ae-log.ae-log-expanded .file{white-space:pre-wrap;word-wrap:break-word}.ae-logs-app .ae-logs-req{display:none}.ae-logs-req .ae-app,.ae-logs-both .ae-app{padding-left:1em}#ae-dos-blacklist-rejects-table{text-align:left}#ae-dash-quota-percent-col{width:3.5em}.ae-cron-status-ok{color:#008000;font-size:90%;font-weight:bold}.ae-cron-status-error{color:#a03;font-size:90%;font-weight:bold}#ae-cronjobs-table .ae-table td{vertical-align:top}#ae-tasks-table td{vertical-align:top}#ae-tasks-quota{margin:0 0 1em 0}#ae-tasks-quota .ae-dash-quota-bar{width:150px}#ae-tasks-quota #ae-dash-quota-bar-col,#ae-tasks-quota .ae-dash-quota-bar{width:200px}.ae-tasks-paused-row{color:#666;font-style:italic;font-weight:bold}#ae-tasks-quota .ae-quota-safety-limit{width:30%}#ae-tasks-table{margin-top:1em}#ae-tasks-queuecontrols{margin-top:1em;margin-bottom:1em}#ae-tasks-delete-col{width:1em}#ae-tasks-eta-col,#ae-tasks-creation-col{width:11em}#ae-tasks-actions-col{width:7em}#ae-tasks-retry-col{width:4em}#ae-tasks-body-col{width:6em}#ae-tasks-headers-col{width:7em}.ae-tasks-hex-column,.ae-tasks-ascii-column{width:16em}#ae-tasks-table .ae-tasks-arrow{text-align:center}
\ No newline at end of file
diff --git a/google/appengine/ext/datastore_admin/static/js/compiled.js b/google/appengine/ext/datastore_admin/static/js/compiled.js
index 8082c93..244320a 100755
--- a/google/appengine/ext/datastore_admin/static/js/compiled.js
+++ b/google/appengine/ext/datastore_admin/static/js/compiled.js
@@ -2,17 +2,17 @@
 var r="push",s="length",da="propertyIsEnumerable",t="prototype",u="replace",x="split",y="indexOf",z="target",A="call",ea="keyCode",fa="handleEvent",C="type",D="apply",ga="name",E,F=this,G=function(){},H=function(a){var b=typeof a;if(b=="object")if(a){if(a instanceof Array)return"array";else if(a instanceof Object)return b;var c=Object[t].toString[A](a);if(c=="[object Window]")return"object";if(c=="[object Array]"||typeof a[s]=="number"&&typeof a.splice!="undefined"&&typeof a[da]!="undefined"&&!a[da]("splice"))return"array";
 if(c=="[object Function]"||typeof a[A]!="undefined"&&typeof a[da]!="undefined"&&!a[da]("call"))return"function"}else return"null";else if(b=="function"&&typeof a[A]=="undefined")return"object";return b},ha=function(a){var b=H(a);return b=="array"||b=="object"&&typeof a[s]=="number"},I=function(a){return typeof a=="string"},ia=function(a){return H(a)=="function"},ja=function(a){a=H(a);return a=="object"||a=="array"||a=="function"},J="closure_uid_"+Math.floor(Math.random()*2147483648).toString(36),
 ka=0,K=function(a,b){function c(){}c.prototype=b[t];a.B=b[t];a.prototype=new c};var la=function(a){this.stack=k().stack||"";if(a)this.message=o(a)};K(la,k);la[t].name="CustomError";var ma=function(a,b){for(var c=1;c<arguments[s];c++)var e=o(arguments[c])[u](/\$/g,"$$$$"),a=a[u](/\%s/,e);return a},sa=function(a,b){if(b)return a[u](na,"&amp;")[u](oa,"&lt;")[u](pa,"&gt;")[u](qa,"&quot;");else{if(!ra.test(a))return a;a[y]("&")!=-1&&(a=a[u](na,"&amp;"));a[y]("<")!=-1&&(a=a[u](oa,"&lt;"));a[y](">")!=-1&&(a=a[u](pa,"&gt;"));a[y]('"')!=-1&&(a=a[u](qa,"&quot;"));return a}},na=/&/g,oa=/</g,pa=/>/g,qa=/\"/g,ra=/[&<>\"]/,ua=function(a,b){for(var c=0,e=o(a)[u](/^[\s\xa0]+|[\s\xa0]+$/g,"")[x]("."),
-g=o(b)[u](/^[\s\xa0]+|[\s\xa0]+$/g,"")[x]("."),d=Math.max(e[s],g[s]),f=0;c==0&&f<d;f++){var j=e[f]||"",l=g[f]||"",m=RegExp("(\\d*)(\\D*)","g"),B=RegExp("(\\d*)(\\D*)","g");do{var n=m.exec(j)||["","",""],w=B.exec(l)||["","",""];if(n[0][s]==0&&w[0][s]==0)break;var c=n[1][s]==0?0:parseInt(n[1],10),v=w[1][s]==0?0:parseInt(w[1],10),c=ta(c,v)||ta(n[2][s]==0,w[2][s]==0)||ta(n[2],w[2])}while(c==0)}return c},ta=function(a,b){if(a<b)return-1;else if(a>b)return 1;return 0};var va=function(a,b){b.unshift(a);la[A](this,ma[D](i,b));b.shift();this.Q=a};K(va,la);va[t].name="AssertionError";var wa=function(a,b,c){if(!a){var e=Array[t].slice[A](arguments,2),g="Assertion failed";if(b){g+=": "+b;var d=e}throw new va(""+g,d||[]);}return a};var L=Array[t],xa=L[y]?function(a,b,c){wa(a[s]!=i);return L[y][A](a,b,c)}:function(a,b,c){c=c==i?0:c<0?Math.max(0,a[s]+c):c;if(I(a))return!I(b)||b[s]!=1?-1:a[y](b,c);for(;c<a[s];c++)if(c in a&&a[c]===b)return c;return-1},ya=L.forEach?function(a,b,c){wa(a[s]!=i);L.forEach[A](a,b,c)}:function(a,b,c){for(var e=a[s],g=I(a)?a[x](""):a,d=0;d<e;d++)d in g&&b[A](c,g[d],d,a)},za=function(a){return L.concat[D](L,arguments)},Aa=function(a){if(H(a)=="array")return za(a);else{for(var b=[],c=0,e=a[s];c<e;c++)b[c]=
-a[c];return b}},Ba=function(a,b,c){wa(a[s]!=i);return arguments[s]<=2?L.slice[A](a,b):L.slice[A](a,b,c)};var Ca=function(a,b,c){for(var e in a)b[A](c,a[e],e,a)},Da=["constructor","hasOwnProperty","isPrototypeOf","propertyIsEnumerable","toLocaleString","toString","valueOf"],Ea=function(a,b){for(var c,e,g=1;g<arguments[s];g++){e=arguments[g];for(c in e)a[c]=e[c];for(var d=0;d<Da[s];d++)c=Da[d],Object[t].hasOwnProperty[A](e,c)&&(a[c]=e[c])}};var M,Fa,Ga,Ha,Ia,Ja=function(){return F.navigator?F.navigator.userAgent:i},Ka=function(){return F.navigator};Ha=Ga=Fa=M=!1;var N;if(N=Ja()){var La=Ka();M=N[y]("Opera")==0;Fa=!M&&N[y]("MSIE")!=-1;(Ga=!M&&N[y]("WebKit")!=-1)&&N[y]("Mobile");Ha=!M&&!Ga&&La.product=="Gecko"}var Ma=M,O=Fa,Na=Ha,Oa=Ga,Pa=Ka(),Qa=Pa&&Pa.platform||"";Ia=Qa[y]("Mac")!=-1;Qa[y]("Win");Qa[y]("Linux");Ka()&&(Ka().appVersion||"")[y]("X11");var Ra;
+g=o(b)[u](/^[\s\xa0]+|[\s\xa0]+$/g,"")[x]("."),d=Math.max(e[s],g[s]),f=0;c==0&&f<d;f++){var j=e[f]||"",l=g[f]||"",m=RegExp("(\\d*)(\\D*)","g"),B=RegExp("(\\d*)(\\D*)","g");do{var n=m.exec(j)||["","",""],w=B.exec(l)||["","",""];if(n[0][s]==0&&w[0][s]==0)break;var c=n[1][s]==0?0:parseInt(n[1],10),v=w[1][s]==0?0:parseInt(w[1],10),c=ta(c,v)||ta(n[2][s]==0,w[2][s]==0)||ta(n[2],w[2])}while(c==0)}return c},ta=function(a,b){if(a<b)return-1;else if(a>b)return 1;return 0};Math.random();var va=function(a,b){b.unshift(a);la[A](this,ma[D](i,b));b.shift();this.Q=a};K(va,la);va[t].name="AssertionError";var wa=function(a,b,c){if(!a){var e=Array[t].slice[A](arguments,2),g="Assertion failed";if(b){g+=": "+b;var d=e}throw new va(""+g,d||[]);}return a};var L=Array[t],xa=L[y]?function(a,b,c){wa(a[s]!=i);return L[y][A](a,b,c)}:function(a,b,c){c=c==i?0:c<0?Math.max(0,a[s]+c):c;if(I(a))return!I(b)||b[s]!=1?-1:a[y](b,c);for(;c<a[s];c++)if(c in a&&a[c]===b)return c;return-1},ya=L.forEach?function(a,b,c){wa(a[s]!=i);L.forEach[A](a,b,c)}:function(a,b,c){for(var e=a[s],g=I(a)?a[x](""):a,d=0;d<e;d++)d in g&&b[A](c,g[d],d,a)},za=function(a){return L.concat[D](L,arguments)},Aa=function(a){if(H(a)=="array")return za(a);else{for(var b=[],c=0,e=a[s];c<e;c++)b[c]=
+a[c];return b}},Ba=function(a,b,c){wa(a[s]!=i);return arguments[s]<=2?L.slice[A](a,b):L.slice[A](a,b,c)};var Ca=function(a,b,c){for(var e in a)b[A](c,a[e],e,a)},Da="constructor,hasOwnProperty,isPrototypeOf,propertyIsEnumerable,toLocaleString,toString,valueOf".split(","),Ea=function(a,b){for(var c,e,g=1;g<arguments[s];g++){e=arguments[g];for(c in e)a[c]=e[c];for(var d=0;d<Da[s];d++)c=Da[d],Object[t].hasOwnProperty[A](e,c)&&(a[c]=e[c])}};var M,Fa,Ga,Ha,Ia,Ja=function(){return F.navigator?F.navigator.userAgent:i},Ka=function(){return F.navigator};Ha=Ga=Fa=M=!1;var N;if(N=Ja()){var La=Ka();M=N[y]("Opera")==0;Fa=!M&&N[y]("MSIE")!=-1;(Ga=!M&&N[y]("WebKit")!=-1)&&N[y]("Mobile");Ha=!M&&!Ga&&La.product=="Gecko"}var Ma=M,O=Fa,Na=Ha,Oa=Ga,Pa=Ka(),Qa=Pa&&Pa.platform||"";Ia=Qa[y]("Mac")!=-1;Qa[y]("Win");Qa[y]("Linux");Ka()&&(Ka().appVersion||"")[y]("X11");var Ra;
 a:{var Sa="",P;if(Ma&&F.opera)var Ta=F.opera.version,Sa=typeof Ta=="function"?Ta():Ta;else if(Na?P=/rv\:([^\);]+)(\)|;)/:O?P=/MSIE\s+([^\);]+)(\)|;)/:Oa&&(P=/WebKit\/(\S+)/),P)var Ua=P.exec(Ja()),Sa=Ua?Ua[1]:"";if(O){var Va,Wa=F.document;Va=Wa?Wa.documentMode:h;if(Va>parseFloat(Sa)){Ra=o(Va);break a}}Ra=Sa}var Xa=Ra,Ya={},Za=function(a){return Ya[a]||(Ya[a]=ua(Xa,a)>=0)},$a={},ab=function(a){return $a[a]||($a[a]=O&&p.documentMode&&p.documentMode>=a)};var bb=!O||ab(9);!Na&&!O||O&&ab(9)||Na&&Za("1.9.1");O&&Za("9");var cb=function(a,b){var c;c=(c=a.className)&&typeof c[x]=="function"?c[x](/\s+/):[];var e=Ba(arguments,1),g;g=c;for(var d=0,f=0;f<e[s];f++)xa(g,e[f])>=0||(g[r](e[f]),d++);g=d==e[s];a.className=c.join(" ");return g};var db=function(a,b,c,e){var a=e||a,g=b&&b!="*"?b.toUpperCase():"";if(a.querySelectorAll&&a.querySelector&&(!Oa||p.compatMode=="CSS1Compat"||Za("528"))&&(g||c))return a.querySelectorAll(g+(c?"."+c:""));if(c&&a.getElementsByClassName)if(b=a.getElementsByClassName(c),g){for(var a={},d=e=0,f;f=b[d];d++)g==f.nodeName&&(a[e++]=f);aa(a,e);return a}else return b;b=a.getElementsByTagName(g||"*");if(c){a={};for(d=e=0;f=b[d];d++){var g=f.className,j;if(j=typeof g[x]=="function")g=g[x](/\s+/),j=xa(g,c)>=0;j&&
-(a[e++]=f)}aa(a,e);return a}else return b},fb=function(a,b){Ca(b,function(b,e){e=="style"?a.style.cssText=b:e=="class"?a.className=b:e=="for"?a.htmlFor=b:e in eb?a.setAttribute(eb[e],b):a[e]=b})},eb={cellpadding:"cellPadding",cellspacing:"cellSpacing",colspan:"colSpan",rowspan:"rowSpan",valign:"vAlign",height:"height",width:"width",usemap:"useMap",frameborder:"frameBorder",maxlength:"maxLength",type:"type"},hb=function(a,b,c,e){function g(c){c&&b.appendChild(I(c)?a.createTextNode(c):c)}for(;e<c[s];e++){var d=
-c[e];ha(d)&&!(ja(d)&&d.nodeType>0)?ya(gb(d)?Aa(d):d,g):g(d)}},ib=function(a,b,c){var e=p,g=arguments,d=g[0],f=g[1];if(!bb&&f&&(f[ga]||f[C])){d=["<",d];f[ga]&&d[r](' name="',sa(f[ga]),'"');if(f[C]){d[r](' type="',sa(f[C]),'"');var j={};Ea(j,f);f=j;delete f[C]}d[r](">");d=d.join("")}d=e.createElement(d);if(f)I(f)?d.className=f:H(f)=="array"?cb[D](i,[d].concat(f)):fb(d,f);g[s]>2&&hb(e,d,g,2);return d},gb=function(a){if(a&&typeof a[s]=="number")if(ja(a))return typeof a.item=="function"||typeof a.item==
-"string";else if(ia(a))return typeof a.item=="function";return!1};var jb=new Function("a","return a");var kb;!O||ab(9);O&&Za("8");var Q=function(){};Q[t].H=!1;Q[t].f=function(){if(!this.H)this.H=!0,this.h()};Q[t].h=function(){this.P&&lb[D](i,this.P)};var lb=function(a){for(var b=0,c=arguments[s];b<c;++b){var e=arguments[b];ha(e)?lb[D](i,e):e&&typeof e.f=="function"&&e.f()}};var R=function(a,b){this.type=a;ca(this,b);q(this,this[z])};K(R,Q);R[t].h=function(){delete this[C];delete this[z];delete this.currentTarget};R[t].r=!1;R[t].O=!0;var S=function(a,b){a&&this.o(a,b)};K(S,R);E=S[t];ca(E,i);E.relatedTarget=i;E.offsetX=0;E.offsetY=0;E.clientX=0;E.clientY=0;E.screenX=0;E.screenY=0;E.button=0;E.keyCode=0;E.charCode=0;E.ctrlKey=!1;E.altKey=!1;E.shiftKey=!1;E.metaKey=!1;E.N=!1;E.C=i;
+(a[e++]=f)}aa(a,e);return a}else return b},fb=function(a,b){Ca(b,function(b,e){e=="style"?a.style.cssText=b:e=="class"?a.className=b:e=="for"?a.htmlFor=b:e in eb?a.setAttribute(eb[e],b):e.lastIndexOf("aria-",0)==0?a.setAttribute(e,b):a[e]=b})},eb={cellpadding:"cellPadding",cellspacing:"cellSpacing",colspan:"colSpan",rowspan:"rowSpan",valign:"vAlign",height:"height",width:"width",usemap:"useMap",frameborder:"frameBorder",maxlength:"maxLength",type:"type"},hb=function(a,b,c,e){function g(c){c&&b.appendChild(I(c)?
+a.createTextNode(c):c)}for(;e<c[s];e++){var d=c[e];ha(d)&&!(ja(d)&&d.nodeType>0)?ya(gb(d)?Aa(d):d,g):g(d)}},ib=function(a,b,c){var e=p,g=arguments,d=g[0],f=g[1];if(!bb&&f&&(f[ga]||f[C])){d=["<",d];f[ga]&&d[r](' name="',sa(f[ga]),'"');if(f[C]){d[r](' type="',sa(f[C]),'"');var j={};Ea(j,f);f=j;delete f[C]}d[r](">");d=d.join("")}d=e.createElement(d);if(f)I(f)?d.className=f:H(f)=="array"?cb[D](i,[d].concat(f)):fb(d,f);g[s]>2&&hb(e,d,g,2);return d},gb=function(a){if(a&&typeof a[s]=="number")if(ja(a))return typeof a.item==
+"function"||typeof a.item=="string";else if(ia(a))return typeof a.item=="function";return!1};var jb=new Function("a","return a");var kb;!O||ab(9);O&&Za("8");var Q=function(){};Q[t].H=!1;Q[t].f=function(){if(!this.H)this.H=!0,this.h()};Q[t].h=function(){this.P&&lb[D](i,this.P)};var lb=function(a){for(var b=0,c=arguments[s];b<c;++b){var e=arguments[b];ha(e)?lb[D](i,e):e&&typeof e.f=="function"&&e.f()}};var R=function(a,b){this.type=a;ca(this,b);q(this,this[z])};K(R,Q);R[t].h=function(){delete this[C];delete this[z];delete this.currentTarget};R[t].r=!1;R[t].O=!0;var S=function(a,b){a&&this.o(a,b)};K(S,R);E=S[t];ca(E,i);E.relatedTarget=i;E.offsetX=0;E.offsetY=0;E.clientX=0;E.clientY=0;E.screenX=0;E.screenY=0;E.button=0;E.keyCode=0;E.charCode=0;E.ctrlKey=!1;E.altKey=!1;E.shiftKey=!1;E.metaKey=!1;E.N=!1;E.C=i;
 E.o=function(a,b){var c=this.type=a[C];R[A](this,c);ca(this,a[z]||a.srcElement);q(this,b);var e=a.relatedTarget;if(e){if(Na){var g;a:{try{jb(e.nodeName);g=!0;break a}catch(d){}g=!1}g||(e=i)}}else if(c=="mouseover")e=a.fromElement;else if(c=="mouseout")e=a.toElement;this.relatedTarget=e;this.offsetX=a.offsetX!==h?a.offsetX:a.layerX;this.offsetY=a.offsetY!==h?a.offsetY:a.layerY;this.clientX=a.clientX!==h?a.clientX:a.pageX;this.clientY=a.clientY!==h?a.clientY:a.pageY;this.screenX=a.screenX||0;this.screenY=
 a.screenY||0;this.button=a.button;this.keyCode=a[ea]||0;this.charCode=a.charCode||(c=="keypress"?a[ea]:0);this.ctrlKey=a.ctrlKey;this.altKey=a.altKey;this.shiftKey=a.shiftKey;this.metaKey=a.metaKey;this.N=Ia?a.metaKey:a.ctrlKey;this.state=a.state;this.C=a;delete this.O;delete this.r};E.h=function(){S.B.h[A](this);this.C=i;ca(this,i);q(this,i);this.relatedTarget=i};var T=function(a,b){this.D=b;this.b=[];this.M(a)};K(T,Q);E=T[t];E.s=i;E.G=i;E.k=function(a){this.s=a};E.i=function(){return this.b[s]?this.b.pop():this.F()};E.j=function(a){this.b[s]<this.D?this.b[r](a):this.A(a)};E.M=function(a){if(a>this.D)throw k("[goog.structs.SimplePool] Initial cannot be greater than max");for(var b=0;b<a;b++)this.b[r](this.F())};E.F=function(){return this.s?this.s():{}};E.A=function(a){if(this.G)this.G(a);else if(ja(a))if(ia(a.f))a.f();else for(var b in a)delete a[b]};
 E.h=function(){T.B.h[A](this);for(var a=this.b;a[s];)this.A(a.pop());delete this.b};var mb,nb=(mb="ScriptEngine"in F&&F.ScriptEngine()=="JScript")?F.ScriptEngineMajorVersion()+"."+F.ScriptEngineMinorVersion()+"."+F.ScriptEngineBuildVersion():"0";var ob=function(){},pb=0;E=ob[t];E.c=0;E.e=!1;E.v=!1;E.o=function(a,b,c,e,g,d){if(ia(a))this.z=!0;else if(a&&a[fa]&&ia(a[fa]))this.z=!1;else throw k("Invalid listener argument");this.n=a;this.u=b;this.src=c;this.type=e;this.J=!!g;this.t=d;this.v=!1;this.c=++pb;this.e=!1};E.handleEvent=function(a){return this.z?this.n[A](this.t||this.src,a):this.n[fa][A](this.n,a)};var qb,rb,U,sb,tb,ub,vb,wb,xb,yb,zb;
-(function(){function a(){return{a:0,d:0}}function b(){return[]}function c(){var a=function(b){return f[A](a.src,a.c,b)};return a}function e(){return new ob}function g(){return new S}var d=mb&&!(ua(nb,"5.7")>=0),f;ub=function(a){f=a};if(d){qb=function(){return j.i()};rb=function(a){j.j(a)};U=function(){return l.i()};sb=function(a){l.j(a)};tb=function(){return m.i()};vb=function(){m.j(c())};wb=function(){return B.i()};xb=function(a){B.j(a)};yb=function(){return n.i()};zb=function(a){n.j(a)};var j=new T(0,
-600);j.k(a);var l=new T(0,600);l.k(b);var m=new T(0,600);m.k(c);var B=new T(0,600);B.k(e);var n=new T(0,600);n.k(g)}else qb=a,rb=G,U=b,sb=G,tb=c,vb=G,wb=e,xb=G,yb=g,zb=G})();var V={},W={},X={},Y={},Ab=function(a,b,c,e,g){if(b)if(H(b)=="array"){for(var d=0;d<b[s];d++)Ab(a,b[d],c,e,g);return i}else{var e=!!e,f=W;b in f||(f[b]=qb());f=f[b];e in f||(f[e]=qb(),f.a++);var f=f[e],j=a[J]||(a[J]=++ka),l;f.d++;if(f[j]){l=f[j];for(d=0;d<l[s];d++)if(f=l[d],f.n==c&&f.t==g){if(f.e)break;return l[d].c}}else l=f[j]=U(),f.a++;d=tb();d.src=a;f=wb();f.o(c,d,a,b,e,g);c=f.c;d.c=c;l[r](f);V[c]=f;X[j]||(X[j]=U());X[j][r](f);a.addEventListener?(a==F||!a.I)&&a.addEventListener(b,d,e):a.attachEvent(b in
+(function(){function a(){return{a:0,d:0}}function b(){return[]}function c(){var a=function(b){b=f[A](a.src,a.c,b);if(!b)return b};return a}function e(){return new ob}function g(){return new S}var d=mb&&!(ua(nb,"5.7")>=0),f;ub=function(a){f=a};if(d){qb=function(){return j.i()};rb=function(a){j.j(a)};U=function(){return l.i()};sb=function(a){l.j(a)};tb=function(){return m.i()};vb=function(){m.j(c())};wb=function(){return B.i()};xb=function(a){B.j(a)};yb=function(){return n.i()};zb=function(a){n.j(a)};
+var j=new T(0,600);j.k(a);var l=new T(0,600);l.k(b);var m=new T(0,600);m.k(c);var B=new T(0,600);B.k(e);var n=new T(0,600);n.k(g)}else qb=a,rb=G,U=b,sb=G,tb=c,vb=G,wb=e,xb=G,yb=g,zb=G})();var V={},W={},X={},Y={},Ab=function(a,b,c,e,g){if(b)if(H(b)=="array"){for(var d=0;d<b[s];d++)Ab(a,b[d],c,e,g);return i}else{var e=!!e,f=W;b in f||(f[b]=qb());f=f[b];e in f||(f[e]=qb(),f.a++);var f=f[e],j=a[J]||(a[J]=++ka),l;f.d++;if(f[j]){l=f[j];for(d=0;d<l[s];d++)if(f=l[d],f.n==c&&f.t==g){if(f.e)break;return l[d].c}}else l=f[j]=U(),f.a++;d=tb();d.src=a;f=wb();f.o(c,d,a,b,e,g);c=f.c;d.c=c;l[r](f);V[c]=f;X[j]||(X[j]=U());X[j][r](f);a.addEventListener?(a==F||!a.I)&&a.addEventListener(b,d,e):a.attachEvent(b in
 Y?Y[b]:Y[b]="on"+b,d);return c}else throw k("Invalid event type");},Bb=function(a,b,c,e){if(!e.p&&e.w){for(var g=0,d=0;g<e[s];g++)if(e[g].e){var f=e[g].u;f.src=i;vb(f);xb(e[g])}else g!=d&&(e[d]=e[g]),d++;aa(e,d);e.w=!1;d==0&&(sb(e),delete W[a][b][c],W[a][b].a--,W[a][b].a==0&&(rb(W[a][b]),delete W[a][b],W[a].a--),W[a].a==0&&(rb(W[a]),delete W[a]))}},Db=function(a,b,c,e,g){var d=1,b=b[J]||(b[J]=++ka);if(a[b]){a.d--;a=a[b];a.p?a.p++:a.p=1;try{for(var f=a[s],j=0;j<f;j++){var l=a[j];l&&!l.e&&(d&=Cb(l,
 g)!==!1)}}finally{a.p--,Bb(c,e,b,a)}}return Boolean(d)},Cb=function(a,b){var c=a[fa](b);if(a.v){var e=a.c;if(V[e]){var g=V[e];if(!g.e){var d=g.src,f=g[C],j=g.u,l=g.J;d.removeEventListener?(d==F||!d.I)&&d.removeEventListener(f,j,l):d.detachEvent&&d.detachEvent(f in Y?Y[f]:Y[f]="on"+f,j);d=d[J]||(d[J]=++ka);j=W[f][l][d];if(X[d]){var m=X[d],B=xa(m,g);B>=0&&(wa(m[s]!=i),L.splice[A](m,B,1));m[s]==0&&delete X[d]}g.e=!0;j.w=!0;Bb(f,l,d,j);delete V[e]}}}return c};
 ub(function(a,b){if(!V[a])return!0;var c=V[a],e=c[C],g=W;if(!(e in g))return!0;var g=g[e],d,f;kb===h&&(kb=O&&!F.addEventListener);if(kb){var j;if(!(j=b))a:{j="window.event"[x](".");for(var l=F;d=j.shift();)if(l[d]!=i)l=l[d];else{j=i;break a}j=l}d=j;j=!0 in g;l=!1 in g;if(j){if(d[ea]<0||d.returnValue!=h)return!0;a:{var m=!1;if(d[ea]==0)try{d.keyCode=-1;break a}catch(B){m=!0}if(m||d.returnValue==h)d.returnValue=!0}}m=yb();m.o(d,this);d=!0;try{if(j){for(var n=U(),w=m.currentTarget;w;w=w.parentNode)n[r](w);
diff --git a/google/appengine/ext/db/__init__.py b/google/appengine/ext/db/__init__.py
index 56f6c2b..7b4fc94 100755
--- a/google/appengine/ext/db/__init__.py
+++ b/google/appengine/ext/db/__init__.py
@@ -324,11 +324,11 @@
   """
 
 
-  result = Query().ancestor(model_instance);
+  result = Query().ancestor(model_instance)
 
-  result.filter(datastore_types._KEY_SPECIAL_PROPERTY + ' >',
-                model_instance.key());
-  return result;
+  result.filter(datastore_types.KEY_SPECIAL_PROPERTY + ' >',
+                model_instance.key())
+  return result
 
 
 def model_to_protobuf(model_instance, _entity_class=datastore.Entity):
@@ -722,6 +722,15 @@
     return self.data_type
 
 
+class Index(datastore._BaseIndex):
+  """A datastore index."""
+
+  id = datastore._BaseIndex._Id
+  kind = datastore._BaseIndex._Kind
+  has_ancestor = datastore._BaseIndex._HasAncestor
+  properties = datastore._BaseIndex._Properties
+
+
 class Model(object):
   """Model is the superclass of all object entities in the datastore.
 
@@ -1647,6 +1656,39 @@
     return KEY_RANGE_EMPTY
 
 
+def get_indexes_async(**kwargs):
+  """Asynchronously retrieves the application indexes and their states.
+
+  Identical to get_indexes() except returns an asynchronous object. Call
+  get_result() on the return value to block on the call and get the results.
+  """
+  config = datastore._GetConfigFromKwargs(kwargs)
+
+  def extra_hook(indexes):
+    return [(Index(index.Id(), index.Kind(), index.HasAncestor(),
+                  index.Properties()), state) for index, state in indexes]
+
+  return datastore.GetIndexesAsync(config=config, extra_hook=extra_hook)
+
+
+def get_indexes(**kwargs):
+  """Retrieves the application indexes and their states.
+
+  Args:
+    config: datastore_rpc.Configuration to use for this request, must be
+      specified as a keyword argument.
+
+  Returns:
+    A list of (Index, Index.[BUILDING|SERVING|DELETING|ERROR]) tuples.
+    An index can be in the following states:
+      Index.BUILDING: Index is being built and therefore can not serve queries
+      Index.SERVING: Index is ready to service queries
+      Index.DELETING: Index is being deleted
+      Index.ERROR: Index encounted an error in the BUILDING state
+  """
+  return get_indexes_async(**kwargs).get_result()
+
+
 class Expando(Model):
   """Dynamically expandable model.
 
@@ -2385,10 +2427,10 @@
       operator = '=='
 
     if self._model_class is None:
-      if prop != datastore_types._KEY_SPECIAL_PROPERTY:
+      if prop != datastore_types.KEY_SPECIAL_PROPERTY:
         raise BadQueryError(
             'Only %s filters are allowed on kindless queries.' %
-            datastore_types._KEY_SPECIAL_PROPERTY)
+            datastore_types.KEY_SPECIAL_PROPERTY)
     elif prop in self._model_class._unindexed_properties:
       raise PropertyError('Property \'%s\' is not indexed' % prop)
 
@@ -2439,11 +2481,11 @@
       order = datastore.Query.ASCENDING
 
     if self._model_class is None:
-      if (property != datastore_types._KEY_SPECIAL_PROPERTY or
+      if (property != datastore_types.KEY_SPECIAL_PROPERTY or
           order != datastore.Query.ASCENDING):
         raise BadQueryError(
             'Only %s ascending orders are supported on kindless queries' %
-            datastore_types._KEY_SPECIAL_PROPERTY)
+            datastore_types.KEY_SPECIAL_PROPERTY)
     else:
 
       if not issubclass(self._model_class, Expando):
diff --git a/google/appengine/ext/go/__init__.py b/google/appengine/ext/go/__init__.py
index 3f029d6..ee93431 100644
--- a/google/appengine/ext/go/__init__.py
+++ b/google/appengine/ext/go/__init__.py
@@ -79,6 +79,8 @@
 HEADER_MAP = {
     'APPLICATION_ID': 'X-AppEngine-Inbound-AppId',
     'CONTENT_TYPE': 'Content-Type',
+    'CURRENT_VERSION_ID': 'X-AppEngine-Inbound-Version-Id',
+    'HTTP_HOST': 'X-AppEngine-Default-Version-Hostname',
     'REMOTE_ADDR': 'X-AppEngine-Remote-Addr',
     'USER_EMAIL': 'X-AppEngine-Inbound-User-Email',
     'USER_ID': 'X-AppEngine-Inbound-User-Id',
@@ -230,25 +232,35 @@
 
 
 
-def find_go_files_mtime(basedir):
+def find_app_files(basedir):
   if not basedir.endswith(os.path.sep):
     basedir = basedir + os.path.sep
-  files, dirs, mtime = [], [basedir], 0
+  files, dirs = {}, [basedir]
   while dirs:
     dname = dirs.pop()
     for entry in os.listdir(dname):
       ename = os.path.join(dname, entry)
-      if (APP_CONFIG.skip_files.match(ename) or
-          APP_CONFIG.nobuild_files.match(ename)):
+      if APP_CONFIG.skip_files.match(ename):
         continue
       s = os.stat(ename)
       if stat.S_ISDIR(s[stat.ST_MODE]):
         dirs.append(ename)
         continue
-      if not ename.endswith('.go'):
-        continue
-      files.append(ename[len(basedir):])
-      mtime = max(mtime, s[stat.ST_MTIME])
+      files[ename[len(basedir):]] = s[stat.ST_MTIME]
+  return files
+
+
+
+
+def find_go_files_mtime(app_files):
+  files, mtime = [], 0
+  for f, mt in app_files.items():
+    if not f.endswith('.go'):
+      continue
+    if APP_CONFIG.nobuild_files.match(f):
+      continue
+    files.append(f)
+    mtime = max(mtime, mt)
   return files, mtime
 
 
@@ -280,6 +292,7 @@
   def __init__(self, root_path):
     self.root_path = root_path
     self.proc = None
+    self.proc_start = 0
     self.goroot = os.path.join(
 
         up(__file__, 5),
@@ -295,22 +308,40 @@
     if not self.arch:
       raise Exception('bad goroot: no compiler found')
 
+    atexit.register(self.cleanup)
+
+  def cleanup(self):
+    if self.proc:
+      os.kill(self.proc.pid, signal.SIGTERM)
+
   def make_and_run(self):
-    go_files, go_mtime = find_go_files_mtime(self.root_path)
+    app_files = find_app_files(self.root_path)
+    go_files, go_mtime = find_go_files_mtime(app_files)
     if not go_files:
       raise Exception('no .go files in %s', self.root_path)
-    app_name, app_mtime = os.path.join(GAB_WORK_DIR, GO_APP_NAME), 0
+    app_mtime = max(app_files.values())
+    bin_name, bin_mtime = os.path.join(GAB_WORK_DIR, GO_APP_NAME), 0
     try:
-      app_mtime = os.stat(app_name)[stat.ST_MTIME]
+      bin_mtime = os.stat(bin_name)[stat.ST_MTIME]
     except:
       pass
 
-    if go_mtime >= app_mtime:
-      if self.proc:
-        os.kill(self.proc.pid, signal.SIGTERM)
-        self.proc.wait()
-        self.proc = None
-      self.build(go_files, app_name)
+
+
+
+    rebuild, restart = False, False
+    if go_mtime >= bin_mtime:
+      rebuild, restart = True, True
+    elif app_mtime > self.proc_start:
+      restart = True
+
+    if restart and self.proc:
+      os.kill(self.proc.pid, signal.SIGTERM)
+      self.proc.wait()
+      self.proc = None
+    if rebuild:
+      self.build(go_files)
+
 
     if not self.proc or self.proc.poll() is not None:
       logging.info('running ' + GO_APP_NAME)
@@ -319,13 +350,14 @@
           'PWD': self.root_path,
           'TZ': 'UTC',
       }
-      self.proc = subprocess.Popen([app_name,
+      self.proc_start = app_mtime
+      self.proc = subprocess.Popen([bin_name,
           '-addr_http', 'unix:' + SOCKET_HTTP,
           '-addr_api', 'unix:' + SOCKET_API],
           cwd=self.root_path, env=env)
       wait_until_go_app_ready(self.proc.pid)
 
-  def build(self, go_files, app_name):
+  def build(self, go_files):
     logging.info('building ' + GO_APP_NAME)
     if not os.path.exists(GAB_WORK_DIR):
       os.makedirs(GAB_WORK_DIR)
diff --git a/google/appengine/ext/gql/__init__.py b/google/appengine/ext/gql/__init__.py
index 791c006..ffb0833 100755
--- a/google/appengine/ext/gql/__init__.py
+++ b/google/appengine/ext/gql/__init__.py
@@ -197,6 +197,7 @@
     \*|
     -?\d+(?:\.\d+)?|
     \w+|
+    (?:"[^"\s]+")+|
     \(|\)|
     \S+
     """, re.VERBOSE | re.IGNORECASE)
@@ -802,6 +803,10 @@
   __ordinal_regex = re.compile(r':(\d+)$')
   __named_regex = re.compile(r':(\w+)$')
   __identifier_regex = re.compile(r'(\w+)$')
+
+
+
+  __quoted_identifier_regex = re.compile(r'((?:"[^"\s]+")+)$')
   __conditions_regex = re.compile(r'(<=|>=|!=|=|<|>|is|in)$', re.IGNORECASE)
   __number_regex = re.compile(r'(\d+)$')
   __cast_regex = re.compile(
@@ -921,7 +926,7 @@
       True if parsing completed okay.
     """
     if self.__Accept('FROM'):
-      kind = self.__AcceptRegex(self.__identifier_regex)
+      kind = self.__Identifier()
       if kind:
         self._entity = kind
       else:
@@ -946,7 +951,7 @@
 
   def __FilterList(self):
     """Consume the filter list (remainder of the WHERE clause)."""
-    identifier = self.__AcceptRegex(self.__identifier_regex)
+    identifier = self.__Identifier()
     if not identifier:
       self.__Error('Invalid WHERE Identifier')
       return False
@@ -1077,6 +1082,26 @@
     return self.__AddProcessedParameterFilter(identifier, condition,
                                               'nop', [parameter])
 
+  def __Identifier(self):
+    """Consume an identifier and return it.
+
+    Returns:
+      The identifier string. If quoted, the surrounding quotes are stripped.
+    """
+    logging.log(LOG_LEVEL, 'Try Identifier')
+    identifier = self.__AcceptRegex(self.__identifier_regex)
+    if not identifier:
+
+
+      identifier = self.__AcceptRegex(self.__quoted_identifier_regex)
+      if identifier:
+
+
+
+
+        identifier = identifier[1:-1].replace('""', '"')
+    return identifier
+
   def __Reference(self):
     """Consume a parameter reference and return it.
 
@@ -1130,6 +1155,8 @@
 
 
 
+
+
       literal = self.__AcceptRegex(self.__quoted_string_regex)
       if literal:
         literal = literal[1:-1].replace("''", "'")
@@ -1199,7 +1226,7 @@
     """Consume variables and sort order for ORDER BY clause."""
 
 
-    identifier = self.__AcceptRegex(self.__identifier_regex)
+    identifier = self.__Identifier()
     if identifier:
       if self.__Accept('DESC'):
         self.__orderings.append((identifier, datastore.Query.DESCENDING))
diff --git a/google/appengine/ext/mapreduce/control.py b/google/appengine/ext/mapreduce/control.py
index 432a3f8..3c38ce0 100755
--- a/google/appengine/ext/mapreduce/control.py
+++ b/google/appengine/ext/mapreduce/control.py
@@ -54,7 +54,7 @@
               shard_count=_DEFAULT_SHARD_COUNT,
               output_writer_spec=None,
               mapreduce_parameters=None,
-              base_path=base_handler._DEFAULT_BASE_PATH,
+              base_path=None,
               queue_name="default",
               eta=None,
               countdown=None,
@@ -89,6 +89,8 @@
   """
   if not shard_count:
     shard_count = _DEFAULT_SHARD_COUNT
+  if base_path is None:
+    base_path = base_handler._DEFAULT_BASE_PATH
   mapper_spec = model.MapperSpec(handler_spec,
                                  reader_spec,
                                  mapper_parameters,
diff --git a/google/appengine/ext/mapreduce/handlers.py b/google/appengine/ext/mapreduce/handlers.py
index 1976e98..a496179 100755
--- a/google/appengine/ext/mapreduce/handlers.py
+++ b/google/appengine/ext/mapreduce/handlers.py
@@ -812,7 +812,7 @@
   @classmethod
   def _start_map(cls, name, mapper_spec,
                  mapreduce_params,
-                 base_path="/mapreduce",
+                 base_path=None,
                  queue_name="default",
                  eta=None,
                  countdown=None,
diff --git a/google/appengine/ext/mapreduce/input_readers.py b/google/appengine/ext/mapreduce/input_readers.py
index 6084c8a..558888d 100755
--- a/google/appengine/ext/mapreduce/input_readers.py
+++ b/google/appengine/ext/mapreduce/input_readers.py
@@ -263,6 +263,9 @@
       if self._current_key_range is None:
         if self._key_ranges:
           self._current_key_range = self._key_ranges.pop()
+
+
+          continue
         else:
           break
 
@@ -325,9 +328,7 @@
   @classmethod
   def _choose_split_points(cls, random_keys, shard_count):
     """Returns the best split points given a random set of db.Keys."""
-    if len(random_keys) < shard_count:
-      return sorted(random_keys)
-
+    assert len(random_keys) >= shard_count
     index_stride = len(random_keys) / float(shard_count)
     return [sorted(random_keys)[int(round(index_stride * i))]
             for i in range(1, shard_count)]
@@ -337,7 +338,13 @@
   @classmethod
   def _split_input_from_namespace(cls, app, namespace, entity_kind_name,
                                   shard_count):
-    """Return KeyRange objects. Helper for _split_input_from_params."""
+    """Return KeyRange objects. Helper for _split_input_from_params.
+
+    If there are not enough Entities to make all of the given shards, the
+    returned list of KeyRanges will include Nones. The returned list will
+    contain KeyRanges ordered lexographically with any Nones appearing at the
+    end.
+    """
 
     raw_entity_kind = util.get_short_name(entity_kind_name)
 
@@ -353,10 +360,11 @@
                                keys_only=True)
     ds_query.Order("__scatter__")
     random_keys = ds_query.Get(shard_count * cls._OVERSAMPLING_FACTOR)
-    if not random_keys:
+    if not random_keys or len(random_keys) < shard_count:
 
 
-      return [key_range.KeyRange(namespace=namespace, _app=app)]
+      return ([key_range.KeyRange(namespace=namespace, _app=app)] +
+          [None] * (shard_count - 1))
     else:
       random_keys = cls._choose_split_points(random_keys, shard_count)
 
@@ -411,6 +419,7 @@
     for i, k_range in enumerate(key_ranges):
       shared_ranges[i % shard_count].append(k_range)
     batch_size = int(params.get(cls.BATCH_SIZE_PARAM, cls._BATCH_SIZE))
+
     return [cls(entity_kind_name,
                 key_ranges=key_ranges,
                 ns_range=None,
@@ -453,9 +462,7 @@
 
     Tries as best as it can to split the whole query result set into equal
     shards. Due to difficulty of making the perfect split, resulting shards'
-    sizes might differ significantly from each other. The actual number of
-    shards might also be less then requested (even 1), though it is never
-    greater.
+    sizes might differ significantly from each other.
 
     Args:
       mapper_spec: MapperSpec with params containing 'entity_kind'.
@@ -466,7 +473,10 @@
         to specify the number of entities to process in each batch.
 
     Returns:
-      A list of InputReader objects of length <= number_of_shards.
+      A list of InputReader objects. If the query results are empty then the
+      empty list will be returned. Otherwise, the list will always have a length
+      equal to number_of_shards but may be padded with Nones if there are too
+      few results for effective sharding.
     """
     params = mapper_spec.params
     entity_kind_name = params[cls.ENTITY_KIND_PARAM]
@@ -520,7 +530,12 @@
     if self._key_ranges is None:
       key_ranges_json = None
     else:
-      key_ranges_json = [k.to_json() for k in self._key_ranges]
+      key_ranges_json = []
+      for k in self._key_ranges:
+        if k:
+          key_ranges_json.append(k.to_json())
+        else:
+          key_ranges_json.append(None)
 
     if self._ns_range is None:
       namespace_range_json = None
@@ -552,8 +567,12 @@
     if json[cls.KEY_RANGE_PARAM] is None:
       key_ranges = None
     else:
-      key_ranges = [key_range.KeyRange.from_json(k)
-                    for k in json[cls.KEY_RANGE_PARAM]]
+      key_ranges = []
+      for k in json[cls.KEY_RANGE_PARAM]:
+        if k:
+          key_ranges.append(key_range.KeyRange.from_json(k))
+        else:
+          key_ranges.append(None)
 
     if json[cls.NAMESPACE_RANGE_PARAM] is None:
       ns_range = None
@@ -1290,15 +1309,21 @@
                                   shard_count):
     key_ranges = super(ConsistentKeyReader, cls)._split_input_from_namespace(
         app, namespace, entity_kind_name, shard_count)
+    assert len(key_ranges) == shard_count
 
 
 
 
-    if key_ranges:
+    try:
+      last_key_range_index = key_ranges.index(None) - 1
+    except ValueError:
+      last_key_range_index = shard_count - 1
+
+    if last_key_range_index != -1:
       key_ranges[0].key_start = None
       key_ranges[0].include_start = False
-      key_ranges[-1].key_end = None
-      key_ranges[-1].include_end = False
+      key_ranges[last_key_range_index].key_end = None
+      key_ranges[last_key_range_index].include_end = False
     return key_ranges
 
   @classmethod
@@ -1457,12 +1482,13 @@
     return repr(self.ns_range)
 
 
-
-
 class RecordsReader(InputReader):
   """Reader to read a list of Files API file in records format.
 
-  All files are read in a single shard consequently.
+  The number of input shards can be specified by the SHARDS_PARAM
+  mapper parameter. Input files cannot be split, so there will be at most
+  one shard per file. Also the number of shards will not be reduced based on
+  the number of input files, so shards in always equals shards out.
   """
 
   FILE_PARAM = "file"
@@ -1544,6 +1570,7 @@
       A list of InputReaders.
     """
     params = mapper_spec.params
+    shard_count = mapper_spec.shard_count
 
     if cls.FILES_PARAM in params:
       filenames = params[cls.FILES_PARAM]
@@ -1552,7 +1579,14 @@
     else:
       filenames = [params[cls.FILE_PARAM]]
 
-    return [RecordsReader(filenames, 0)]
+    batch_list = [[] for _ in xrange(shard_count)]
+    for index, filename in enumerate(filenames):
+
+      batch_list[index % shard_count].append(filenames[index])
+
+
+    batch_list.sort(reverse=True, key=lambda x: len(x))
+    return [RecordsReader(batch, 0) for batch in batch_list]
 
   @classmethod
   def validate(cls, mapper_spec):
@@ -1574,4 +1608,7 @@
           (cls.FILES_PARAM, cls.FILE_PARAM))
 
   def __str__(self):
-    return "%s:%s" % (self._filenames, self._reader.tell())
+    position = 0
+    if self._reader:
+      position = self._reader.tell()
+    return "%s:%s" % (self._filenames, position)
diff --git a/google/appengine/ext/mapreduce/mapper_pipeline.py b/google/appengine/ext/mapreduce/mapper_pipeline.py
index 701d7cd..0b68dd0 100644
--- a/google/appengine/ext/mapreduce/mapper_pipeline.py
+++ b/google/appengine/ext/mapreduce/mapper_pipeline.py
@@ -18,6 +18,18 @@
 
 
 
+
+
+
+
+
+
+
+
+
+
+
+
 """Pipelines for mapreduce library."""
 
 
@@ -74,7 +86,7 @@
         output_writer_spec=output_writer_spec,
         )
     self.fill(self.outputs.job_id, mapreduce_id)
-    self.set_status(console_url="%s/details/detail?job_id=%s" % (
+    self.set_status(console_url="%s/detail?job_id=%s" % (
         (base_handler._DEFAULT_BASE_PATH, mapreduce_id)))
 
   def callback(self):
diff --git a/google/appengine/ext/mapreduce/mapreduce_pipeline.py b/google/appengine/ext/mapreduce/mapreduce_pipeline.py
index 9a3ee0c..4b3d3b7 100644
--- a/google/appengine/ext/mapreduce/mapreduce_pipeline.py
+++ b/google/appengine/ext/mapreduce/mapreduce_pipeline.py
@@ -18,6 +18,18 @@
 
 
 
+
+
+
+
+
+
+
+
+
+
+
+
 """Pipelines for mapreduce library."""
 
 from __future__ import with_statement
@@ -82,7 +94,7 @@
     reader_spec: specification of reduce function.
     output_writer_spec: specification of output write to use with reduce
       function.
-    parmas: mapper parameters to use as dict.
+    params: mapper parameters to use as dict.
     filenames: list of filenames to reduce.
 
   Returns:
diff --git a/google/appengine/ext/mapreduce/shuffler.py b/google/appengine/ext/mapreduce/shuffler.py
index 9beec49..41b2b5e 100644
--- a/google/appengine/ext/mapreduce/shuffler.py
+++ b/google/appengine/ext/mapreduce/shuffler.py
@@ -18,6 +18,18 @@
 
 
 
+
+
+
+
+
+
+
+
+
+
+
+
 """Mapreduce shuffler implementation."""
 
 from __future__ import with_statement
@@ -106,17 +118,16 @@
   l = len(records)
   proto_records = [None] * l
 
-
-  logging.info("parsing")
+  logging.debug("Parsing")
   for i in range(l):
     proto = file_service_pb.KeyValue()
     proto.ParseFromString(records[i])
     proto_records[i] = proto
 
-  logging.info("sorting")
+  logging.debug("Sorting")
   proto_records.sort(cmp=_compare_keys)
 
-  logging.info("writing")
+  logging.debug("Writing")
   blob_file_name = (ctx.mapreduce_spec.name + "-" +
                     ctx.mapreduce_id + "-output")
   output_path = files.blobstore.create(
@@ -125,8 +136,9 @@
     for proto in proto_records:
       pool.append(proto.Encode())
 
-  logging.info("finalizing")
+  logging.debug("Finalizing")
   files.finalize(output_path)
+  time.sleep(1)
   output_path = files.blobstore.get_file_name(
       files.blobstore.get_blob_key(output_path))
 
diff --git a/google/appengine/ext/mapreduce/static/base.css b/google/appengine/ext/mapreduce/static/base.css
index 0fca75b..95d237f 100644
--- a/google/appengine/ext/mapreduce/static/base.css
+++ b/google/appengine/ext/mapreduce/static/base.css
@@ -48,10 +48,6 @@
   margin: 0.3em;
 }
 
-.editable-input > label:after {
-  content: ': ';
-}
-
 #launch-control {
   margin-bottom: 0.5em;
 }
@@ -72,16 +68,6 @@
 }
 
 /* Shared */
-.param-key:after {
-  content: ': ';
-}
-.user-param-key:after {
-  content: ': ';
-}
-.param-aux:before {
-  content: ' ';
-}
-
 .status-table {
   margin: 5px;
   border-collapse: collapse;
diff --git a/google/appengine/ext/mapreduce/static/status.js b/google/appengine/ext/mapreduce/static/status.js
index 7115de8..6147bd1 100644
--- a/google/appengine/ext/mapreduce/static/status.js
+++ b/google/appengine/ext/mapreduce/static/status.js
@@ -18,7 +18,7 @@
 
 // Sets the status butter, optionally indicating if it's an error message.
 function setButter(message, error) {
-  var butter = $("#butter");
+  var butter = getButterBar();
   // Prevent flicker on butter update by hiding it first.
   butter.css('display', 'none');
   if (error) {
@@ -30,6 +30,16 @@
   $(document).scrollTop(0);
 }
 
+// Hides the butter bar.
+function hideButter() {
+  getButterBar().css('display', 'none');
+}
+
+// Fetches the butter bar dom element.
+function getButterBar() {
+  return $('#butter');
+}
+
 // Given an AJAX error message (which is empty or null on success) and a
 // data payload containing JSON, parses the data payload and returns the object.
 // Server-side errors and AJAX errors will be brought to the user's attention
@@ -71,11 +81,17 @@
   });
 }
 
-// Return the list of job records.
+// Return the list of job records and notifies the user the content
+// is being fetched.
 function listJobs(cursor, resultFunc) {
+  // If the user is paging then they scrolled down so let's
+  // help them by scrolling the window back to the top.
+  var jumpToTop = !!cursor;
+  cursor = cursor ? cursor : '';
+  setButter('Loading');
   $.ajax({
     type: 'GET',
-    url: 'command/list_jobs',
+    url: 'command/list_jobs?cursor=' + cursor,
     dataType: 'text',
     error: function(request, textStatus) {
       getResponseDataJson(textStatus);
@@ -84,6 +100,10 @@
       var response = getResponseDataJson(null, data);
       if (response) {
         resultFunc(response.jobs, response.cursor);
+        if (jumpToTop) {
+          window.scrollTo(0, 0);
+        }
+        hideButter();  // Hide the loading message.
       }
     }
   });
@@ -222,7 +242,7 @@
 // Retrieves the mapreduce_id from the query string. Assumes that it is
 // the only querystring parameter.
 function getJobId() {
-  var index = window.location.search.lastIndexOf("=");
+  var index = window.location.search.lastIndexOf('=');
   if (index == -1) {
     return '';
   }
@@ -238,7 +258,7 @@
   body.empty();
 
   if (!jobs || (jobs && jobs.length == 0)) {
-    $('<td colspan="8">').text("No job records found.").appendTo(body);
+    $('<td colspan="8">').text('No job records found.').appendTo(body);
     return;
   }
 
@@ -380,7 +400,7 @@
         return false;
       })
       .css('display', 'none')
-      .appendTo("#launch-container");
+      .appendTo('#launch-container');
 
     // Fixed job config values.
     $.each(FIXED_JOB_PARAMS, function(unused, key) {
@@ -390,6 +410,7 @@
         // Name is up in the page title so doesn't need to be shown again.
         $('<p class="job-static-param">')
           .append($('<span class="param-key">').text(getNiceParamKey(key)))
+          .append($('<span>').text(': '))
           .append($('<span class="param-value">').text(value))
           .appendTo(jobForm);
       }
@@ -414,18 +435,19 @@
         // Deal with the case in which the value is an object rather than
         // just the default value string.
         var prettyKey = key;
-        if (value && value["human_name"]) {
-          prettyKey = value["human_name"];
+        if (value && value['human_name']) {
+          prettyKey = value['human_name'];
         }
 
-        if (value && value["default_value"]) {
-          value = value["default_value"];
+        if (value && value['default_value']) {
+          value = value['default_value'];
         }
 
         $('<label>')
           .attr('for', paramId)
           .text(prettyKey)
           .appendTo(paramP);
+        $('<span>').text(': ').appendTo(paramP);
         $('<input type="text">')
           .attr('id', paramId)
           .attr('name', prefix + key)
@@ -435,8 +457,8 @@
       });
     }
 
-    addParameters(config.params, "params.");
-    addParameters(config.mapper_params, "mapper_params.");
+    addParameters(config.params, 'params.');
+    addParameters(config.mapper_params, 'mapper_params.');
 
     $('<input type="submit">')
       .attr('value', 'Run')
@@ -478,11 +500,13 @@
 
   $('<li>')
     .append($('<span class="param-key">').text('Elapsed time'))
+    .append($('<span>').text(': '))
     .append($('<span class="param-value">').text(getElapsedTimeString(
           detail.start_timestamp_ms, detail.updated_timestamp_ms)))
     .appendTo(jobParams);
   $('<li>')
     .append($('<span class="param-key">').text('Start time'))
+    .append($('<span>').text(': '))
     .append($('<span class="param-value">').text(getLocalTimestring(
           detail.start_timestamp_ms)))
     .appendTo(jobParams);
@@ -495,7 +519,8 @@
 
     $('<li>')
       .append($('<span class="param-key">').text(getNiceParamKey(key)))
-      .append($('<span class="param-value">').text("" + value))
+      .append($('<span>').text(': '))
+      .append($('<span class="param-value">').text('' + value))
       .appendTo(jobParams);
   });
 
@@ -505,7 +530,8 @@
     $.each(sortedKeys, function(index, key) {
       var value = detail.mapper_spec.mapper_params[key];
       $('<li>')
-        .append($('<span class="user-param-key"">').text(key))
+        .append($('<span class="user-param-key">').text(key))
+        .append($('<span>').text(': '))
         .append($('<span class="param-value">').html("" + value))
         .appendTo(jobParams);
     });
@@ -532,7 +558,9 @@
     var avgRate = Math.round(100.0 * value / (runtimeMs / 1000.0)) / 100.0;
     $('<li>')
       .append($('<span class="param-key">').html(getNiceParamKey(key)))
+      .append($('<span>').text(': '))
       .append($('<span class="param-value">').html(value))
+      .append($('<span>').text(' '))
       .append($('<span class="param-aux">').text('(' + avgRate + '/sec avg.)'))
       .appendTo(aggregatedCounters);
   });
@@ -597,7 +625,7 @@
 function initDetail() {
   var jobId = getJobId();
   if (!jobId) {
-    setButter("Could not find job ID in query string.", true);
+    setButter('Could not find job ID in query string.', true);
     return;
   }
   getJobDetail(jobId, initJobDetail);
diff --git a/google/appengine/ext/remote_api/remote_api_services.py b/google/appengine/ext/remote_api/remote_api_services.py
index 5176527..2f1436b 100755
--- a/google/appengine/ext/remote_api/remote_api_services.py
+++ b/google/appengine/ext/remote_api/remote_api_services.py
@@ -88,8 +88,6 @@
                   file_service_pb.CloseResponse),
         'Append': (file_service_pb.AppendRequest,
                    file_service_pb.AppendResponse),
-        'AppendKeyValue': (file_service_pb.AppendKeyValueRequest,
-                           file_service_pb.AppendKeyValueResponse),
         'Stat': (file_service_pb.StatRequest,
                  file_service_pb.StatResponse),
         'Delete': (file_service_pb.DeleteRequest,
diff --git a/google/appengine/ext/remote_api/remote_api_stub.py b/google/appengine/ext/remote_api/remote_api_stub.py
index ab99adf..e283281 100755
--- a/google/appengine/ext/remote_api/remote_api_stub.py
+++ b/google/appengine/ext/remote_api/remote_api_stub.py
@@ -451,7 +451,7 @@
             'Transaction %d not found.' % (txid,))
 
       txdata = self.__transactions[txid]
-      assert (txdata[txid].thread_id ==
+      assert (txdata.thread_id ==
           thread.get_ident()), "Transactions are single-threaded."
       del self.__transactions[txid]
     finally:
diff --git a/google/appengine/ext/webapp/__init__.py b/google/appengine/ext/webapp/__init__.py
index ea1cae7..c1ac324 100755
--- a/google/appengine/ext/webapp/__init__.py
+++ b/google/appengine/ext/webapp/__init__.py
@@ -18,14 +18,23 @@
 
 
 
+
+
+
 """An extremely simple WSGI web application framework.
 
-This module exports three primary classes: Request, Response, and
-RequestHandler. You implement a web application by subclassing RequestHandler.
-As WSGI requests come in, they are passed to instances of your RequestHandlers.
-The RequestHandler class provides access to the easy-to-use Request and
-Response objects so you can interpret the request and write the response with
-no knowledge of the esoteric WSGI semantics.  Here is a simple example:
+This module is an alias for the webapp2 module i.e. the following are
+equivalent:
+
+1. from google.appengine.ext import webapp
+2. import webapp2 as webapp
+
+It exports three primary classes: Request, Response, and RequestHandler. You
+implement a web application by subclassing RequestHandler. As WSGI requests come
+in, they are passed to instances of your RequestHandlers. The RequestHandler
+class provides access to the easy-to-use Request and Response objects so you can
+interpret the request and write the response with no knowledge of the esoteric
+WSGI semantics.  Here is a simple example:
 
   from google.appengine.ext import webapp
   import wsgiref.simple_server
@@ -47,10 +56,6 @@
     ('/hello', HelloPage)
   ], debug=True)
 
-  server = wsgiref.simple_server.make_server('', 8080, application)
-  print 'Serving on port 8080...'
-  server.serve_forever()
-
 The WSGIApplication class maps URI regular expressions to your RequestHandler
 classes.  It is a WSGI-compatible application object, so you can use it in
 conjunction with wsgiref to make your web application into, e.g., a CGI
@@ -61,772 +66,13 @@
 """
 
 
-import cgi
-import StringIO
-import logging
 import os
-import re
-import sys
-import traceback
-import urlparse
-import webob
-import wsgiref.handlers
-import wsgiref.headers
-import wsgiref.util
 
 from google.appengine.api import lib_config
 
 
-
-wsgiref.handlers.BaseHandler.os_environ = {}
-
-
-RE_FIND_GROUPS = re.compile('\(.*?\)')
-_CHARSET_RE = re.compile(r';\s*charset=([^;\s]*)', re.I)
-
-class Error(Exception):
-  """Base of all exceptions in the webapp module."""
-  pass
-
-
-class CannotReversePattern(Error):
-  """Thrown when a url_pattern cannot be reversed."""
-  pass
-
-
-class NoUrlFoundError(Error):
-  """Thrown when RequestHandler.get_url() fails."""
-  pass
-
-
-class Request(webob.Request):
-  """Abstraction for an HTTP request.
-
-  Properties:
-    uri: the complete URI requested by the user
-    scheme: 'http' or 'https'
-    host: the host, including the port
-    path: the path up to the ';' or '?' in the URL
-    parameters: the part of the URL between the ';' and the '?', if any
-    query: the part of the URL after the '?'
-
-  You can access parsed query and POST values with the get() method; do not
-  parse the query string yourself.
-  """
-
-
-
-
-
-  request_body_tempfile_limit = 0
-
-  uri = property(lambda self: self.url)
-  query = property(lambda self: self.query_string)
-
-  def __init__(self, environ):
-    """Constructs a Request object from a WSGI environment.
-
-    If the charset isn't specified in the Content-Type header, defaults
-    to UTF-8.
-
-    Args:
-      environ: A WSGI-compliant environment dictionary.
-    """
-    match = _CHARSET_RE.search(environ.get('CONTENT_TYPE', ''))
-    if match:
-      charset = match.group(1).lower()
-    else:
-      charset = 'utf-8'
-
-    webob.Request.__init__(self, environ, charset=charset,
-                           unicode_errors= 'ignore', decode_param_names=True)
-
-  def get(self, argument_name, default_value='', allow_multiple=False):
-    """Returns the query or POST argument with the given name.
-
-    We parse the query string and POST payload lazily, so this will be a
-    slower operation on the first call.
-
-    Args:
-      argument_name: the name of the query or POST argument
-      default_value: the value to return if the given argument is not present
-      allow_multiple: return a list of values with the given name (deprecated)
-
-    Returns:
-      If allow_multiple is False (which it is by default), we return the first
-      value with the given name given in the request. If it is True, we always
-      return a list.
-    """
-    param_value = self.get_all(argument_name)
-    if allow_multiple:
-      logging.warning('allow_multiple is a deprecated param, please use the '
-                      'Request.get_all() method instead.')
-    if len(param_value) > 0:
-      if allow_multiple:
-        return param_value
-      return param_value[0]
-    else:
-      if allow_multiple and not default_value:
-        return []
-      return default_value
-
-  def get_all(self, argument_name, default_value=None):
-    """Returns a list of query or POST arguments with the given name.
-
-    We parse the query string and POST payload lazily, so this will be a
-    slower operation on the first call.
-
-    Args:
-      argument_name: the name of the query or POST argument
-      default_value: the value to return if the given argument is not present,
-        None may not be used as a default, if it is then an empty list will be
-        returned instead.
-
-    Returns:
-      A (possibly empty) list of values.
-    """
-    if self.charset:
-      argument_name = argument_name.encode(self.charset)
-
-    if default_value is None:
-      default_value = []
-
-    param_value = self.params.getall(argument_name)
-
-    if param_value is None or len(param_value) == 0:
-      return default_value
-
-    for i in xrange(len(param_value)):
-      if isinstance(param_value[i], cgi.FieldStorage):
-        param_value[i] = param_value[i].value
-
-    return param_value
-
-  def arguments(self):
-    """Returns a list of the arguments provided in the query and/or POST.
-
-    The return value is a list of strings.
-    """
-    return list(set(self.params.keys()))
-
-  def get_range(self, name, min_value=None, max_value=None, default=0):
-    """Parses the given int argument, limiting it to the given range.
-
-    Args:
-      name: the name of the argument
-      min_value: the minimum int value of the argument (if any)
-      max_value: the maximum int value of the argument (if any)
-      default: the default value of the argument if it is not given
-
-    Returns:
-      An int within the given range for the argument
-    """
-    value = self.get(name, default)
-    if value is None:
-      return value
-    try:
-      value = int(value)
-    except ValueError:
-      value = default
-    if value is not None:
-      if max_value is not None:
-        value = min(value, max_value)
-      if min_value is not None:
-        value = max(value, min_value)
-    return value
-
-
-class Response(object):
-  """Abstraction for an HTTP response.
-
-  Properties:
-    out: file pointer for the output stream
-    headers: wsgiref.headers.Headers instance representing the output headers
-  """
-  def __init__(self):
-    """Constructs a response with the default settings."""
-
-
-    self.out = StringIO.StringIO()
-    self.__wsgi_headers = []
-    self.headers = wsgiref.headers.Headers(self.__wsgi_headers)
-    self.headers['Content-Type'] = 'text/html; charset=utf-8'
-    self.headers['Cache-Control'] = 'no-cache'
-
-    self.set_status(200)
-
-  @property
-  def status(self):
-    """Returns current request status code."""
-    return self.__status[0]
-
-  @property
-  def status_message(self):
-    """Returns current request status message."""
-    return self.__status[1]
-
-  def set_status(self, code, message=None):
-    """Sets the HTTP status code of this response.
-
-    Args:
-      message: the HTTP status string to use
-
-    If no status string is given, we use the default from the HTTP/1.1
-    specification.
-    """
-    if not message:
-      message = Response.http_status_message(code)
-    self.__status = (code, message)
-
-  def has_error(self):
-    """Indicates whether the response was an error response."""
-    return self.__status[0] >= 400
-
-  def clear(self):
-    """Clears all data written to the output stream so that it is empty."""
-    self.out.seek(0)
-    self.out.truncate(0)
-
-  def wsgi_write(self, start_response):
-    """Writes this response using WSGI semantics with the given WSGI function.
-
-    Args:
-      start_response: the WSGI-compatible start_response function
-    """
-    body = self.out.getvalue()
-    if isinstance(body, unicode):
-
-
-      body = body.encode('utf-8')
-    elif self.headers.get('Content-Type', '').endswith('; charset=utf-8'):
-
-
-      try:
-
-
-        body.decode('utf-8')
-      except UnicodeError, e:
-        logging.warning('Response written is not UTF-8: %s', e)
-
-    if (self.headers.get('Cache-Control') == 'no-cache' and
-        not self.headers.get('Expires')):
-      self.headers['Expires'] = 'Fri, 01 Jan 1990 00:00:00 GMT'
-    self.headers['Content-Length'] = str(len(body))
-
-
-    new_headers = []
-    for header, value in self.__wsgi_headers:
-      if not isinstance(value, basestring):
-        value = unicode(value)
-      if ('\n' in header or '\r' in header or
-          '\n' in value or '\r' in value):
-        logging.warning('Replacing newline in header: %s', repr((header,value)))
-        value = value.replace('\n','').replace('\r','')
-        header = header.replace('\n','').replace('\r','')
-      new_headers.append((header, value))
-    self.__wsgi_headers = new_headers
-
-    write = start_response('%d %s' % self.__status, self.__wsgi_headers)
-    write(body)
-    self.out.close()
-
-  def http_status_message(code):
-    """Returns the default HTTP status message for the given code.
-
-    Args:
-      code: the HTTP code for which we want a message
-    """
-    if not Response.__HTTP_STATUS_MESSAGES.has_key(code):
-      raise Error('Invalid HTTP status code: %d' % code)
-    return Response.__HTTP_STATUS_MESSAGES[code]
-  http_status_message = staticmethod(http_status_message)
-
-  __HTTP_STATUS_MESSAGES = {
-    100: 'Continue',
-    101: 'Switching Protocols',
-    200: 'OK',
-    201: 'Created',
-    202: 'Accepted',
-    203: 'Non-Authoritative Information',
-    204: 'No Content',
-    205: 'Reset Content',
-    206: 'Partial Content',
-    300: 'Multiple Choices',
-    301: 'Moved Permanently',
-    302: 'Moved Temporarily',
-    303: 'See Other',
-    304: 'Not Modified',
-    305: 'Use Proxy',
-    306: 'Unused',
-    307: 'Temporary Redirect',
-    400: 'Bad Request',
-    401: 'Unauthorized',
-    402: 'Payment Required',
-    403: 'Forbidden',
-    404: 'Not Found',
-    405: 'Method Not Allowed',
-    406: 'Not Acceptable',
-    407: 'Proxy Authentication Required',
-    408: 'Request Time-out',
-    409: 'Conflict',
-    410: 'Gone',
-    411: 'Length Required',
-    412: 'Precondition Failed',
-    413: 'Request Entity Too Large',
-    414: 'Request-URI Too Large',
-    415: 'Unsupported Media Type',
-    416: 'Requested Range Not Satisfiable',
-    417: 'Expectation Failed',
-    500: 'Internal Server Error',
-    501: 'Not Implemented',
-    502: 'Bad Gateway',
-    503: 'Service Unavailable',
-    504: 'Gateway Time-out',
-    505: 'HTTP Version not supported'
-  }
-
-
-class RequestHandler(object):
-  """Our base HTTP request handler. Clients should subclass this class.
-
-  Subclasses should override get(), post(), head(), options(), etc to handle
-  different HTTP methods.
-  """
-  def initialize(self, request, response):
-    """Initializes this request handler with the given Request and Response."""
-    self.request = request
-    self.response = response
-
-  def get(self, *args):
-    """Handler method for GET requests."""
-    self.error(405)
-
-  def post(self, *args):
-    """Handler method for POST requests."""
-    self.error(405)
-
-  def head(self, *args):
-    """Handler method for HEAD requests."""
-    self.error(405)
-
-  def options(self, *args):
-    """Handler method for OPTIONS requests."""
-    self.error(405)
-
-  def put(self, *args):
-    """Handler method for PUT requests."""
-    self.error(405)
-
-  def delete(self, *args):
-    """Handler method for DELETE requests."""
-    self.error(405)
-
-  def trace(self, *args):
-    """Handler method for TRACE requests."""
-    self.error(405)
-
-  def error(self, code):
-    """Clears the response output stream and sets the given HTTP error code.
-
-    Args:
-      code: the HTTP status error code (e.g., 501)
-    """
-    self.response.set_status(code)
-    self.response.clear()
-
-  def redirect(self, uri, permanent=False):
-    """Issues an HTTP redirect to the given relative URL.
-
-    Args:
-      uri: a relative or absolute URI (e.g., '../flowers.html')
-      permanent: if true, we use a 301 redirect instead of a 302 redirect
-    """
-    if permanent:
-      self.response.set_status(301)
-    else:
-      self.response.set_status(302)
-    absolute_url = urlparse.urljoin(self.request.uri, uri)
-    self.response.headers['Location'] = str(absolute_url)
-    self.response.clear()
-
-  def handle_exception(self, exception, debug_mode):
-    """Called if this handler throws an exception during execution.
-
-    The default behavior is to call self.error(500) and print a stack trace
-    if debug_mode is True.
-
-    Args:
-      exception: the exception that was thrown
-      debug_mode: True if the web application is running in debug mode
-    """
-    self.error(500)
-    logging.exception(exception)
-    if debug_mode:
-      lines = ''.join(traceback.format_exception(*sys.exc_info()))
-      self.response.clear()
-      self.response.out.write('<pre>%s</pre>' % (cgi.escape(lines, quote=True)))
-
-  @classmethod
-  def new_factory(cls, *args, **kwargs):
-    """Create new request handler factory.
-
-    Use factory method to create reusable request handlers that just
-    require a few configuration parameters to construct.  Also useful
-    for injecting shared state between multiple request handler
-    instances without relying on global variables.  For example, to
-    create a set of post handlers that will do simple text transformations
-    you can write:
-
-      class ChangeTextHandler(webapp.RequestHandler):
-
-        def __init__(self, transform):
-          self.transform = transform
-
-        def post(self):
-          response_text = self.transform(
-              self.request.request.body_file.getvalue())
-          self.response.out.write(response_text)
-
-      application = webapp.WSGIApplication(
-          [('/to_lower', ChangeTextHandler.new_factory(str.lower)),
-           ('/to_upper', ChangeTextHandler.new_factory(str.upper)),
-          ],
-          debug=True)
-
-    Text POSTed to /to_lower will be lower cased.
-    Text POSTed to /to_upper will be upper cased.
-    """
-    def new_instance():
-      return cls(*args, **kwargs)
-    new_instance.__name__ = cls.__name__ + 'Factory'
-    return new_instance
-
-  @classmethod
-  def get_url(cls, *args, **kargs):
-    """Returns the url for the given handler.
-
-    The default implementation uses the patterns passed to the active
-    WSGIApplication to create a url. However, it is different from Django's
-    urlresolvers.reverse() in the following ways:
-      - It does not try to resolve handlers via module loading
-      - It does not support named arguments
-      - It performs some post-prosessing on the url to remove some regex
-        operators.
-      - It will try to fill in the left-most missing arguments with the args
-        used in the active request.
-
-    Args:
-      args: Parameters for the url pattern's groups.
-      kwargs: Optionally contains 'implicit_args' that can either be a boolean
-              or a tuple. When it is True, it will use the arguments to the
-              active request as implicit arguments. When it is False (default),
-              it will not use any implicit arguments. When it is a tuple, it
-              will use the tuple as the implicit arguments.
-              the left-most args if some are missing from args.
-
-    Returns:
-      The url for this handler/args combination.
-
-    Raises:
-      NoUrlFoundError: No url pattern for this handler has the same
-        number of args that were passed in.
-    """
-
-
-    app = WSGIApplication.active_instance
-    pattern_map = app._pattern_map
-
-    implicit_args = kargs.get('implicit_args', ())
-    if implicit_args == True:
-      implicit_args = app.current_request_args
-
-
-
-    min_params = len(args)
-
-    for pattern_tuple in pattern_map.get(cls, ()):
-      num_params_in_pattern = pattern_tuple[1]
-      if num_params_in_pattern < min_params:
-        continue
-
-      try:
-
-        num_implicit_args = max(0, num_params_in_pattern - len(args))
-        merged_args = implicit_args[:num_implicit_args] + args
-
-        url = _reverse_url_pattern(pattern_tuple[0], *merged_args)
-
-
-
-        url = url.replace('\\', '')
-        url = url.replace('?', '')
-        return url
-      except CannotReversePattern:
-        continue
-
-    logging.warning('get_url failed for Handler name: %r, Args: %r',
-                    cls.__name__, args)
-    raise NoUrlFoundError
-
-
-def _reverse_url_pattern(url_pattern, *args):
-  """Turns a regex that matches a url back into a url by replacing
-  the url pattern's groups with the given args. Removes '^' and '$'
-  from the result.
-
-  Args:
-    url_pattern: A pattern used to match a URL.
-    args: list of values corresponding to groups in url_pattern.
-
-  Returns:
-    A string with url_pattern's groups filled in values from args.
-
-  Raises:
-     CannotReversePattern if either there aren't enough args to fill
-     url_pattern's groups, or if any arg isn't matched by the regular
-     expression fragment in the corresponding group.
-  """
-
-  group_index = [0]
-
-  def expand_group(match):
-    group = match.group(1)
-    try:
-
-      value = str(args[group_index[0]])
-      group_index[0] += 1
-    except IndexError:
-      raise CannotReversePattern('Not enough arguments in url tag')
-
-    if not re.match(group + '$', value):
-      raise CannotReversePattern("Value %r doesn't match (%r)" % (value, group))
-    return value
-
-  result = re.sub(r'\(([^)]+)\)', expand_group, url_pattern.pattern)
-  result = result.replace('^', '')
-  result = result.replace('$', '')
-  return result
-
-
-class RedirectHandler(RequestHandler):
-  """Simple redirection handler.
-
-  Easily configure URLs to redirect to alternate targets.  For example,
-  to configure a web application so that the root URL is always redirected
-  to the /home path, do:
-
-    application = webapp.WSGIApplication(
-        [('/', webapp.RedirectHandler.new_factory('/home', permanent=True)),
-         ('/home', HomeHandler),
-        ],
-        debug=True)
-
-  Handler also useful for setting up obsolete URLs to redirect to new paths.
-  """
-
-  def __init__(self, path, permanent=False):
-    """Constructor.
-
-    Do not use directly.  Configure using new_factory method.
-
-    Args:
-      path: Path to redirect to.
-      permanent: if true, we use a 301 redirect instead of a 302 redirect.
-    """
-    self.path = path
-    self.permanent = permanent
-
-  def get(self):
-    self.redirect(self.path, permanent=self.permanent)
-
-
-class WSGIApplication(object):
-  """Wraps a set of webapp RequestHandlers in a WSGI-compatible application.
-
-  To use this class, pass a list of (URI regular expression, RequestHandler)
-  pairs to the constructor, and pass the class instance to a WSGI handler.
-  See the example in the module comments for details.
-
-  The URL mapping is first-match based on the list ordering.
-  """
-
-  REQUEST_CLASS = Request
-  RESPONSE_CLASS = Response
-
-  def __init__(self, url_mapping, debug=False):
-    """Initializes this application with the given URL mapping.
-
-    Args:
-      url_mapping: list of (URI regular expression, RequestHandler) pairs
-                   (e.g., [('/', ReqHan)])
-      debug: if true, we send Python stack traces to the browser on errors
-    """
-    self._init_url_mappings(url_mapping)
-    self.__debug = debug
-
-
-    WSGIApplication.active_instance = self
-    self.current_request_args = ()
-
-  def __call__(self, environ, start_response):
-    """Called by WSGI when a request comes in."""
-    request = self.REQUEST_CLASS(environ)
-    response = self.RESPONSE_CLASS()
-
-
-    WSGIApplication.active_instance = self
-
-
-    handler = None
-    groups = ()
-    for regexp, handler_class in self._url_mapping:
-      match = regexp.match(request.path)
-      if match:
-        handler = handler_class()
-
-
-        handler.initialize(request, response)
-        groups = match.groups()
-        break
-
-
-    self.current_request_args = groups
-
-
-    if handler:
-      try:
-        method = environ['REQUEST_METHOD']
-        if method == 'GET':
-          handler.get(*groups)
-        elif method == 'POST':
-          handler.post(*groups)
-        elif method == 'HEAD':
-          handler.head(*groups)
-        elif method == 'OPTIONS':
-          handler.options(*groups)
-        elif method == 'PUT':
-          handler.put(*groups)
-        elif method == 'DELETE':
-          handler.delete(*groups)
-        elif method == 'TRACE':
-          handler.trace(*groups)
-        else:
-          handler.error(501)
-      except Exception, e:
-        handler.handle_exception(e, self.__debug)
-    else:
-      response.set_status(404)
-
-
-    response.wsgi_write(start_response)
-    return ['']
-
-  def _init_url_mappings(self, handler_tuples):
-    """Initializes the maps needed for mapping urls to handlers and handlers
-    to urls.
-
-    Args:
-      handler_tuples: list of (URI, RequestHandler) pairs.
-    """
-
-
-
-
-
-    handler_map = {}
-
-    pattern_map = {}
-
-    url_mapping = []
-
-
-    for regexp, handler in handler_tuples:
-
-      try:
-        handler_name = handler.__name__
-      except AttributeError:
-        pass
-      else:
-        handler_map[handler_name] = handler
-
-
-      if not regexp.startswith('^'):
-        regexp = '^' + regexp
-      if not regexp.endswith('$'):
-        regexp += '$'
-
-      if regexp == '^/form$':
-        logging.warning('The URL "/form" is reserved and will not be matched.')
-
-      compiled = re.compile(regexp)
-      url_mapping.append((compiled, handler))
-
-
-      num_groups = len(RE_FIND_GROUPS.findall(regexp))
-      handler_patterns = pattern_map.setdefault(handler, [])
-      handler_patterns.append((compiled, num_groups))
-
-    self._handler_map = handler_map
-    self._pattern_map = pattern_map
-    self._url_mapping = url_mapping
-
-  def get_registered_handler_by_name(self, handler_name):
-    """Returns the handler given the handler's name.
-
-    This uses the application's url mapping.
-
-    Args:
-      handler_name: The __name__ of a handler to return.
-
-    Returns:
-      The handler with the given name.
-
-    Raises:
-      KeyError: If the handler name is not found in the parent application.
-    """
-    try:
-      return self._handler_map[handler_name]
-    except:
-      logging.error('Handler does not map to any urls: %s', handler_name)
-      raise
-
-
-def _django_setup():
-  """Imports and configures Django.
-
-  This can be overridden by defining a function named
-  webapp_django_setup() in the app's appengine_config.py file (see
-  lib_config docs).  Such a function should import and configure
-  Django.
-
-  You can also just configure the Django version to be used by setting
-  webapp_django_version in that file.
-
-  Finally, calling use_library('django', <version>) in that file
-  should also work:
-
-    # Example taken from from
-    # http://code.google.com/appengine/docs/python/tools/libraries.html#Django
-
-    import os
-    os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
-
-    from google.appengine.dist import use_library
-    use_library('django', '1.2')
-
-  If your application also imports Django directly it should ensure
-  that the same code is executed before your app imports Django
-  (directly or indirectly).  Perhaps the simplest way to ensure that
-  is to include the following in your main.py (and in each alternate
-  main script):
-
-    from google.appengine.ext.webapp import template
-    import django
-
-  This will ensure that whatever Django setup code you have included
-  in appengine_config.py is executed, as a side effect of importing
-  the webapp.template module.
-  """
+def __django_version_setup():
+  """Selects a particular Django version to load."""
   django_version = _config_handle.django_version
 
   if django_version is not None:
@@ -858,6 +104,49 @@
       pass
 
 
+def _django_setup():
+  """Imports and configures Django.
+
+  This can be overridden by defining a function named
+  webapp_django_setup() in the app's appengine_config.py file (see
+  lib_config docs).  Such a function should import and configure
+  Django.
+
+  In the Python 2.5 runtime, you can also just configure the Django version to
+  be used by setting webapp_django_version in that file.
+
+  Finally, calling use_library('django', <version>) in that file
+  should also work:
+
+    # Example taken from from
+    # http://code.google.com/appengine/docs/python/tools/libraries.html#Django
+
+    import os
+    os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
+
+    from google.appengine.dist import use_library
+    use_library('django', '1.2')
+
+  In the Python 2.7 runtime, the Django version is specified in you app.yaml
+  file and use_library is not supported.
+
+  If your application also imports Django directly it should ensure
+  that the same code is executed before your app imports Django
+  (directly or indirectly).  Perhaps the simplest way to ensure that
+  is to include the following in your main.py (and in each alternate
+  main script):
+
+    from google.appengine.ext.webapp import template
+    import django
+
+  This will ensure that whatever Django setup code you have included
+  in appengine_config.py is executed, as a side effect of importing
+  the webapp.template module.
+  """
+  if os.environ.get('APPENGINE_RUNTIME') != 'python27':
+    __django_version_setup()
+
+
   import django
 
 
@@ -891,9 +180,21 @@
 
 
 
-_config_handle = lib_config.register(
-    'webapp',
-    {'django_setup': _django_setup,
-     'django_version': None,
-     'add_wsgi_middleware': lambda app: app,
-     })
+if os.environ.get('APPENGINE_RUNTIME') == 'python27':
+
+
+  _config_handle = lib_config.register(
+      'webapp',
+      {'django_setup': _django_setup,
+       'add_wsgi_middleware': lambda app: app,
+       })
+  from webapp2 import *
+else:
+  _config_handle = lib_config.register(
+      'webapp',
+      {'django_setup': _django_setup,
+       'django_version': None,
+       'add_wsgi_middleware': lambda app: app,
+       })
+  from _webapp25 import *
+  from _webapp25 import __doc__
diff --git a/google/appengine/ext/webapp/_webapp25.py b/google/appengine/ext/webapp/_webapp25.py
new file mode 100644
index 0000000..597072d
--- /dev/null
+++ b/google/appengine/ext/webapp/_webapp25.py
@@ -0,0 +1,792 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+
+
+"""An extremely simple WSGI web application framework.
+
+This module exports three primary classes: Request, Response, and
+RequestHandler. You implement a web application by subclassing RequestHandler.
+As WSGI requests come in, they are passed to instances of your RequestHandlers.
+The RequestHandler class provides access to the easy-to-use Request and
+Response objects so you can interpret the request and write the response with
+no knowledge of the esoteric WSGI semantics.  Here is a simple example:
+
+  from google.appengine.ext import webapp
+  from google.appengine.ext.webapp.util import run_wsgi_app
+
+  class MainPage(webapp.RequestHandler):
+    def get(self):
+      self.response.out.write(
+        '<html><body><form action="/hello" method="post">'
+        'Name: <input name="name" type="text" size="20"> '
+        '<input type="submit" value="Say Hello"></form></body></html>')
+
+  class HelloPage(webapp.RequestHandler):
+    def post(self):
+      self.response.headers['Content-Type'] = 'text/plain'
+      self.response.out.write('Hello, %s' % self.request.get('name'))
+
+  application = webapp.WSGIApplication([
+    ('/', MainPage),
+    ('/hello', HelloPage)
+  ], debug=True)
+
+  def main():
+    run_wsgi_app(application)
+
+  if __name__ == "__main__":
+    main()
+
+The WSGIApplication class maps URI regular expressions to your RequestHandler
+classes.  It is a WSGI-compatible application object, so you can use it in
+conjunction with wsgiref to make your web application into, e.g., a CGI
+script or a simple HTTP server, as in the example above.
+
+The framework does not support streaming output. All output from a response
+is stored in memory before it is written.
+"""
+
+
+
+
+import cgi
+import logging
+import re
+import StringIO
+import sys
+import traceback
+import urlparse
+import webob
+import wsgiref.handlers
+import wsgiref.headers
+import wsgiref.util
+
+
+
+wsgiref.handlers.BaseHandler.os_environ = {}
+
+
+RE_FIND_GROUPS = re.compile('\(.*?\)')
+_CHARSET_RE = re.compile(r';\s*charset=([^;\s]*)', re.I)
+
+class Error(Exception):
+  """Base of all exceptions in the webapp module."""
+  pass
+
+
+class CannotReversePattern(Error):
+  """Thrown when a url_pattern cannot be reversed."""
+  pass
+
+
+class NoUrlFoundError(Error):
+  """Thrown when RequestHandler.get_url() fails."""
+  pass
+
+
+class Request(webob.Request):
+  """Abstraction for an HTTP request.
+
+  Properties:
+    uri: the complete URI requested by the user
+    scheme: 'http' or 'https'
+    host: the host, including the port
+    path: the path up to the ';' or '?' in the URL
+    parameters: the part of the URL between the ';' and the '?', if any
+    query: the part of the URL after the '?'
+
+  You can access parsed query and POST values with the get() method; do not
+  parse the query string yourself.
+  """
+
+
+
+
+
+  request_body_tempfile_limit = 0
+
+  uri = property(lambda self: self.url)
+  query = property(lambda self: self.query_string)
+
+  def __init__(self, environ):
+    """Constructs a Request object from a WSGI environment.
+
+    If the charset isn't specified in the Content-Type header, defaults
+    to UTF-8.
+
+    Args:
+      environ: A WSGI-compliant environment dictionary.
+    """
+    match = _CHARSET_RE.search(environ.get('CONTENT_TYPE', ''))
+    if match:
+      charset = match.group(1).lower()
+    else:
+      charset = 'utf-8'
+
+    webob.Request.__init__(self, environ, charset=charset,
+                           unicode_errors= 'ignore', decode_param_names=True)
+
+  def get(self, argument_name, default_value='', allow_multiple=False):
+    """Returns the query or POST argument with the given name.
+
+    We parse the query string and POST payload lazily, so this will be a
+    slower operation on the first call.
+
+    Args:
+      argument_name: the name of the query or POST argument
+      default_value: the value to return if the given argument is not present
+      allow_multiple: return a list of values with the given name (deprecated)
+
+    Returns:
+      If allow_multiple is False (which it is by default), we return the first
+      value with the given name given in the request. If it is True, we always
+      return a list.
+    """
+    param_value = self.get_all(argument_name)
+    if allow_multiple:
+      logging.warning('allow_multiple is a deprecated param, please use the '
+                      'Request.get_all() method instead.')
+    if len(param_value) > 0:
+      if allow_multiple:
+        return param_value
+      return param_value[0]
+    else:
+      if allow_multiple and not default_value:
+        return []
+      return default_value
+
+  def get_all(self, argument_name, default_value=None):
+    """Returns a list of query or POST arguments with the given name.
+
+    We parse the query string and POST payload lazily, so this will be a
+    slower operation on the first call.
+
+    Args:
+      argument_name: the name of the query or POST argument
+      default_value: the value to return if the given argument is not present,
+        None may not be used as a default, if it is then an empty list will be
+        returned instead.
+
+    Returns:
+      A (possibly empty) list of values.
+    """
+    if self.charset:
+      argument_name = argument_name.encode(self.charset)
+
+    if default_value is None:
+      default_value = []
+
+    param_value = self.params.getall(argument_name)
+
+    if param_value is None or len(param_value) == 0:
+      return default_value
+
+    for i in xrange(len(param_value)):
+      if isinstance(param_value[i], cgi.FieldStorage):
+        param_value[i] = param_value[i].value
+
+    return param_value
+
+  def arguments(self):
+    """Returns a list of the arguments provided in the query and/or POST.
+
+    The return value is a list of strings.
+    """
+    return list(set(self.params.keys()))
+
+  def get_range(self, name, min_value=None, max_value=None, default=0):
+    """Parses the given int argument, limiting it to the given range.
+
+    Args:
+      name: the name of the argument
+      min_value: the minimum int value of the argument (if any)
+      max_value: the maximum int value of the argument (if any)
+      default: the default value of the argument if it is not given
+
+    Returns:
+      An int within the given range for the argument
+    """
+    value = self.get(name, default)
+    if value is None:
+      return value
+    try:
+      value = int(value)
+    except ValueError:
+      value = default
+    if value is not None:
+      if max_value is not None:
+        value = min(value, max_value)
+      if min_value is not None:
+        value = max(value, min_value)
+    return value
+
+
+class Response(object):
+  """Abstraction for an HTTP response.
+
+  Properties:
+    out: file pointer for the output stream
+    headers: wsgiref.headers.Headers instance representing the output headers
+  """
+  def __init__(self):
+    """Constructs a response with the default settings."""
+
+
+    self.out = StringIO.StringIO()
+    self.__wsgi_headers = []
+    self.headers = wsgiref.headers.Headers(self.__wsgi_headers)
+    self.headers['Content-Type'] = 'text/html; charset=utf-8'
+    self.headers['Cache-Control'] = 'no-cache'
+
+    self.set_status(200)
+
+  @property
+  def status(self):
+    """Returns current request status code."""
+    return self.__status[0]
+
+  @property
+  def status_message(self):
+    """Returns current request status message."""
+    return self.__status[1]
+
+  def set_status(self, code, message=None):
+    """Sets the HTTP status code of this response.
+
+    Args:
+      message: the HTTP status string to use
+
+    If no status string is given, we use the default from the HTTP/1.1
+    specification.
+    """
+    if not message:
+      message = Response.http_status_message(code)
+    self.__status = (code, message)
+
+  def has_error(self):
+    """Indicates whether the response was an error response."""
+    return self.__status[0] >= 400
+
+  def clear(self):
+    """Clears all data written to the output stream so that it is empty."""
+    self.out.seek(0)
+    self.out.truncate(0)
+
+  def wsgi_write(self, start_response):
+    """Writes this response using WSGI semantics with the given WSGI function.
+
+    Args:
+      start_response: the WSGI-compatible start_response function
+    """
+    body = self.out.getvalue()
+    if isinstance(body, unicode):
+
+
+      body = body.encode('utf-8')
+    elif self.headers.get('Content-Type', '').endswith('; charset=utf-8'):
+
+
+      try:
+
+
+        body.decode('utf-8')
+      except UnicodeError, e:
+        logging.warning('Response written is not UTF-8: %s', e)
+
+    if (self.headers.get('Cache-Control') == 'no-cache' and
+        not self.headers.get('Expires')):
+      self.headers['Expires'] = 'Fri, 01 Jan 1990 00:00:00 GMT'
+    self.headers['Content-Length'] = str(len(body))
+
+
+    new_headers = []
+    for header, value in self.__wsgi_headers:
+      if not isinstance(value, basestring):
+        value = unicode(value)
+      if ('\n' in header or '\r' in header or
+          '\n' in value or '\r' in value):
+        logging.warning('Replacing newline in header: %s', repr((header,value)))
+        value = value.replace('\n','').replace('\r','')
+        header = header.replace('\n','').replace('\r','')
+      new_headers.append((header, value))
+    self.__wsgi_headers = new_headers
+
+    write = start_response('%d %s' % self.__status, self.__wsgi_headers)
+    write(body)
+    self.out.close()
+
+  def http_status_message(code):
+    """Returns the default HTTP status message for the given code.
+
+    Args:
+      code: the HTTP code for which we want a message
+    """
+    if not Response.__HTTP_STATUS_MESSAGES.has_key(code):
+      raise Error('Invalid HTTP status code: %d' % code)
+    return Response.__HTTP_STATUS_MESSAGES[code]
+  http_status_message = staticmethod(http_status_message)
+
+  __HTTP_STATUS_MESSAGES = {
+    100: 'Continue',
+    101: 'Switching Protocols',
+    200: 'OK',
+    201: 'Created',
+    202: 'Accepted',
+    203: 'Non-Authoritative Information',
+    204: 'No Content',
+    205: 'Reset Content',
+    206: 'Partial Content',
+    300: 'Multiple Choices',
+    301: 'Moved Permanently',
+    302: 'Moved Temporarily',
+    303: 'See Other',
+    304: 'Not Modified',
+    305: 'Use Proxy',
+    306: 'Unused',
+    307: 'Temporary Redirect',
+    400: 'Bad Request',
+    401: 'Unauthorized',
+    402: 'Payment Required',
+    403: 'Forbidden',
+    404: 'Not Found',
+    405: 'Method Not Allowed',
+    406: 'Not Acceptable',
+    407: 'Proxy Authentication Required',
+    408: 'Request Time-out',
+    409: 'Conflict',
+    410: 'Gone',
+    411: 'Length Required',
+    412: 'Precondition Failed',
+    413: 'Request Entity Too Large',
+    414: 'Request-URI Too Large',
+    415: 'Unsupported Media Type',
+    416: 'Requested Range Not Satisfiable',
+    417: 'Expectation Failed',
+    500: 'Internal Server Error',
+    501: 'Not Implemented',
+    502: 'Bad Gateway',
+    503: 'Service Unavailable',
+    504: 'Gateway Time-out',
+    505: 'HTTP Version not supported'
+  }
+
+
+class RequestHandler(object):
+  """Our base HTTP request handler. Clients should subclass this class.
+
+  Subclasses should override get(), post(), head(), options(), etc to handle
+  different HTTP methods.
+  """
+  def initialize(self, request, response):
+    """Initializes this request handler with the given Request and Response."""
+    self.request = request
+    self.response = response
+
+  def get(self, *args):
+    """Handler method for GET requests."""
+    self.error(405)
+
+  def post(self, *args):
+    """Handler method for POST requests."""
+    self.error(405)
+
+  def head(self, *args):
+    """Handler method for HEAD requests."""
+    self.error(405)
+
+  def options(self, *args):
+    """Handler method for OPTIONS requests."""
+    self.error(405)
+
+  def put(self, *args):
+    """Handler method for PUT requests."""
+    self.error(405)
+
+  def delete(self, *args):
+    """Handler method for DELETE requests."""
+    self.error(405)
+
+  def trace(self, *args):
+    """Handler method for TRACE requests."""
+    self.error(405)
+
+  def error(self, code):
+    """Clears the response output stream and sets the given HTTP error code.
+
+    Args:
+      code: the HTTP status error code (e.g., 501)
+    """
+    self.response.set_status(code)
+    self.response.clear()
+
+  def redirect(self, uri, permanent=False):
+    """Issues an HTTP redirect to the given relative URL.
+
+    Args:
+      uri: a relative or absolute URI (e.g., '../flowers.html')
+      permanent: if true, we use a 301 redirect instead of a 302 redirect
+    """
+    if permanent:
+      self.response.set_status(301)
+    else:
+      self.response.set_status(302)
+    absolute_url = urlparse.urljoin(self.request.uri, uri)
+    self.response.headers['Location'] = str(absolute_url)
+    self.response.clear()
+
+  def handle_exception(self, exception, debug_mode):
+    """Called if this handler throws an exception during execution.
+
+    The default behavior is to call self.error(500) and print a stack trace
+    if debug_mode is True.
+
+    Args:
+      exception: the exception that was thrown
+      debug_mode: True if the web application is running in debug mode
+    """
+    self.error(500)
+    logging.exception(exception)
+    if debug_mode:
+      lines = ''.join(traceback.format_exception(*sys.exc_info()))
+      self.response.clear()
+      self.response.out.write('<pre>%s</pre>' % (cgi.escape(lines, quote=True)))
+
+  @classmethod
+  def new_factory(cls, *args, **kwargs):
+    """Create new request handler factory.
+
+    Use factory method to create reusable request handlers that just
+    require a few configuration parameters to construct.  Also useful
+    for injecting shared state between multiple request handler
+    instances without relying on global variables.  For example, to
+    create a set of post handlers that will do simple text transformations
+    you can write:
+
+      class ChangeTextHandler(webapp.RequestHandler):
+
+        def __init__(self, transform):
+          self.transform = transform
+
+        def post(self):
+          response_text = self.transform(
+              self.request.request.body_file.getvalue())
+          self.response.out.write(response_text)
+
+      application = webapp.WSGIApplication(
+          [('/to_lower', ChangeTextHandler.new_factory(str.lower)),
+           ('/to_upper', ChangeTextHandler.new_factory(str.upper)),
+          ],
+          debug=True)
+
+    Text POSTed to /to_lower will be lower cased.
+    Text POSTed to /to_upper will be upper cased.
+    """
+    def new_instance():
+      return cls(*args, **kwargs)
+    new_instance.__name__ = cls.__name__ + 'Factory'
+    return new_instance
+
+  @classmethod
+  def get_url(cls, *args, **kargs):
+    """Returns the url for the given handler.
+
+    The default implementation uses the patterns passed to the active
+    WSGIApplication to create a url. However, it is different from Django's
+    urlresolvers.reverse() in the following ways:
+      - It does not try to resolve handlers via module loading
+      - It does not support named arguments
+      - It performs some post-prosessing on the url to remove some regex
+        operators.
+      - It will try to fill in the left-most missing arguments with the args
+        used in the active request.
+
+    Args:
+      args: Parameters for the url pattern's groups.
+      kwargs: Optionally contains 'implicit_args' that can either be a boolean
+              or a tuple. When it is True, it will use the arguments to the
+              active request as implicit arguments. When it is False (default),
+              it will not use any implicit arguments. When it is a tuple, it
+              will use the tuple as the implicit arguments.
+              the left-most args if some are missing from args.
+
+    Returns:
+      The url for this handler/args combination.
+
+    Raises:
+      NoUrlFoundError: No url pattern for this handler has the same
+        number of args that were passed in.
+    """
+
+
+    app = WSGIApplication.active_instance
+    pattern_map = app._pattern_map
+
+    implicit_args = kargs.get('implicit_args', ())
+    if implicit_args == True:
+      implicit_args = app.current_request_args
+
+
+
+    min_params = len(args)
+
+    for pattern_tuple in pattern_map.get(cls, ()):
+      num_params_in_pattern = pattern_tuple[1]
+      if num_params_in_pattern < min_params:
+        continue
+
+      try:
+
+        num_implicit_args = max(0, num_params_in_pattern - len(args))
+        merged_args = implicit_args[:num_implicit_args] + args
+
+        url = _reverse_url_pattern(pattern_tuple[0], *merged_args)
+
+
+
+        url = url.replace('\\', '')
+        url = url.replace('?', '')
+        return url
+      except CannotReversePattern:
+        continue
+
+    logging.warning('get_url failed for Handler name: %r, Args: %r',
+                    cls.__name__, args)
+    raise NoUrlFoundError
+
+
+def _reverse_url_pattern(url_pattern, *args):
+  """Turns a regex that matches a url back into a url by replacing
+  the url pattern's groups with the given args. Removes '^' and '$'
+  from the result.
+
+  Args:
+    url_pattern: A pattern used to match a URL.
+    args: list of values corresponding to groups in url_pattern.
+
+  Returns:
+    A string with url_pattern's groups filled in values from args.
+
+  Raises:
+     CannotReversePattern if either there aren't enough args to fill
+     url_pattern's groups, or if any arg isn't matched by the regular
+     expression fragment in the corresponding group.
+  """
+
+  group_index = [0]
+
+  def expand_group(match):
+    group = match.group(1)
+    try:
+
+      value = str(args[group_index[0]])
+      group_index[0] += 1
+    except IndexError:
+      raise CannotReversePattern('Not enough arguments in url tag')
+
+    if not re.match(group + '$', value):
+      raise CannotReversePattern("Value %r doesn't match (%r)" % (value, group))
+    return value
+
+  result = re.sub(r'\(([^)]+)\)', expand_group, url_pattern.pattern)
+  result = result.replace('^', '')
+  result = result.replace('$', '')
+  return result
+
+
+class RedirectHandler(RequestHandler):
+  """Simple redirection handler.
+
+  Easily configure URLs to redirect to alternate targets.  For example,
+  to configure a web application so that the root URL is always redirected
+  to the /home path, do:
+
+    application = webapp.WSGIApplication(
+        [('/', webapp.RedirectHandler.new_factory('/home', permanent=True)),
+         ('/home', HomeHandler),
+        ],
+        debug=True)
+
+  Handler also useful for setting up obsolete URLs to redirect to new paths.
+  """
+
+  def __init__(self, path, permanent=False):
+    """Constructor.
+
+    Do not use directly.  Configure using new_factory method.
+
+    Args:
+      path: Path to redirect to.
+      permanent: if true, we use a 301 redirect instead of a 302 redirect.
+    """
+    self.path = path
+    self.permanent = permanent
+
+  def get(self):
+    self.redirect(self.path, permanent=self.permanent)
+
+
+class WSGIApplication(object):
+  """Wraps a set of webapp RequestHandlers in a WSGI-compatible application.
+
+  To use this class, pass a list of (URI regular expression, RequestHandler)
+  pairs to the constructor, and pass the class instance to a WSGI handler.
+  See the example in the module comments for details.
+
+  The URL mapping is first-match based on the list ordering.
+  """
+
+  REQUEST_CLASS = Request
+  RESPONSE_CLASS = Response
+
+  def __init__(self, url_mapping, debug=False):
+    """Initializes this application with the given URL mapping.
+
+    Args:
+      url_mapping: list of (URI regular expression, RequestHandler) pairs
+                   (e.g., [('/', ReqHan)])
+      debug: if true, we send Python stack traces to the browser on errors
+    """
+    self._init_url_mappings(url_mapping)
+    self.__debug = debug
+
+
+    WSGIApplication.active_instance = self
+    self.current_request_args = ()
+
+  def __call__(self, environ, start_response):
+    """Called by WSGI when a request comes in."""
+    request = self.REQUEST_CLASS(environ)
+    response = self.RESPONSE_CLASS()
+
+
+    WSGIApplication.active_instance = self
+
+
+    handler = None
+    groups = ()
+    for regexp, handler_class in self._url_mapping:
+      match = regexp.match(request.path)
+      if match:
+        handler = handler_class()
+
+
+        handler.initialize(request, response)
+        groups = match.groups()
+        break
+
+
+    self.current_request_args = groups
+
+
+    if handler:
+      try:
+        method = environ['REQUEST_METHOD']
+        if method == 'GET':
+          handler.get(*groups)
+        elif method == 'POST':
+          handler.post(*groups)
+        elif method == 'HEAD':
+          handler.head(*groups)
+        elif method == 'OPTIONS':
+          handler.options(*groups)
+        elif method == 'PUT':
+          handler.put(*groups)
+        elif method == 'DELETE':
+          handler.delete(*groups)
+        elif method == 'TRACE':
+          handler.trace(*groups)
+        else:
+          handler.error(501)
+      except Exception, e:
+        handler.handle_exception(e, self.__debug)
+    else:
+      response.set_status(404)
+
+
+    response.wsgi_write(start_response)
+    return ['']
+
+  def _init_url_mappings(self, handler_tuples):
+    """Initializes the maps needed for mapping urls to handlers and handlers
+    to urls.
+
+    Args:
+      handler_tuples: list of (URI, RequestHandler) pairs.
+    """
+
+
+
+
+
+    handler_map = {}
+
+    pattern_map = {}
+
+    url_mapping = []
+
+
+    for regexp, handler in handler_tuples:
+
+      try:
+        handler_name = handler.__name__
+      except AttributeError:
+        pass
+      else:
+        handler_map[handler_name] = handler
+
+
+      if not regexp.startswith('^'):
+        regexp = '^' + regexp
+      if not regexp.endswith('$'):
+        regexp += '$'
+
+      if regexp == '^/form$':
+        logging.warning('The URL "/form" is reserved and will not be matched.')
+
+      compiled = re.compile(regexp)
+      url_mapping.append((compiled, handler))
+
+
+      num_groups = len(RE_FIND_GROUPS.findall(regexp))
+      handler_patterns = pattern_map.setdefault(handler, [])
+      handler_patterns.append((compiled, num_groups))
+
+    self._handler_map = handler_map
+    self._pattern_map = pattern_map
+    self._url_mapping = url_mapping
+
+  def get_registered_handler_by_name(self, handler_name):
+    """Returns the handler given the handler's name.
+
+    This uses the application's url mapping.
+
+    Args:
+      handler_name: The __name__ of a handler to return.
+
+    Returns:
+      The handler with the given name.
+
+    Raises:
+      KeyError: If the handler name is not found in the parent application.
+    """
+    try:
+      return self._handler_map[handler_name]
+    except:
+      logging.error('Handler does not map to any urls: %s', handler_name)
+      raise
diff --git a/google/appengine/ext/webapp/blobstore_handlers.py b/google/appengine/ext/webapp/blobstore_handlers.py
index fa8e692..4664213 100755
--- a/google/appengine/ext/webapp/blobstore_handlers.py
+++ b/google/appengine/ext/webapp/blobstore_handlers.py
@@ -43,6 +43,7 @@
 
 import cgi
 import cStringIO
+import os
 import re
 import sys
 
@@ -77,15 +78,69 @@
 class Error(Exception):
   """Base class for all errors in blobstore handlers module."""
 
-
-class RangeFormatError(webapp.Error):
-  """Raised when Range header incorrectly formatted."""
+if os.environ.get('APPENGINE_RUNTIME') == 'python27':
+  class RangeFormatError(Error):
+    """Raised when Range header incorrectly formatted."""
+else:
+  class RangeFormatError(webapp.Error):
+    """Raised when Range header incorrectly formatted."""
 
 
 class UnsupportedRangeFormatError(RangeFormatError):
   """Raised when Range format is correct, but not supported."""
 
 
+def _serialize_range(start, end):
+  """Return a string suitable for use as a value in a RANGE header.
+
+  Args:
+    start: The start of the bytes range e.g. 50.
+    end: The end of the bytes range e.g. 100. This value is inclusive and may
+      be None if the end of the range is not specified.
+
+  Returns:
+    Returns a string (e.g. "bytes=50-100") that represents a serialized RANGE
+    header value.
+  """
+  if hasattr(byterange.Range, 'serialize_bytes'):
+    return byterange.Range.serialize_bytes(_BYTES_UNIT, [(start, end)])
+  else:
+    if end is not None:
+      end += 1
+    return str(byterange.Range([(start, end)]))
+
+
+def _parse_bytes(range_header):
+  """Returns the a list of byte ranges given the value of a RANGE header."""
+
+
+
+
+
+
+  if os.environ.get('APPENGINE_RUNTIME') == 'python27':
+    parse_result = byterange.Range.parse_bytes(range_header)
+    if parse_result is None:
+      return None
+
+    ranges = []
+    for start, end in parse_result[1]:
+      if end is not None:
+        end -= 1
+      ranges.append((start, end))
+    return parse_result[0], ranges
+  else:
+
+
+
+    original_stdout = sys.stdout
+    sys.stdout = cStringIO.StringIO()
+    try:
+      return byterange.Range.parse_bytes(range_header)
+    finally:
+      sys.stdout = original_stdout
+
+
 def _check_ranges(start, end, use_range_set, use_range, range_header):
   """Set the range header.
 
@@ -119,7 +174,7 @@
       if start > end:
         raise ValueError('start must be < end.')
 
-    range_indexes = byterange.Range.serialize_bytes(_BYTES_UNIT, [(start, end)])
+    range_indexes = _serialize_range(start, end)
 
 
   if use_range_set and use_range and use_indexes:
@@ -235,10 +290,12 @@
   def get_range(self):
     """Get range from header if it exists.
 
+    A range header of "bytes: 0-100" would return (0, 100).
+
     Returns:
       Tuple (start, end):
         start: Start index.  None if there is None.
-        end: End index.  None if there is None.
+        end: End index (inclusive).  None if there is None.
       None if there is no request header.
 
     Raises:
@@ -251,15 +308,7 @@
       return None
 
     try:
-
-
-
-      original_stdout = sys.stdout
-      sys.stdout = cStringIO.StringIO()
-      try:
-        parsed_range = byterange.Range.parse_bytes(range_header)
-      finally:
-        sys.stdout = original_stdout
+      parsed_range = _parse_bytes(range_header)
     except TypeError, err:
       raise RangeFormatError('Invalid range header: %s' % err)
     if parsed_range is None:
diff --git a/google/appengine/runtime/apiproxy.py b/google/appengine/runtime/apiproxy.py
index c48d609..77b5c47 100755
--- a/google/appengine/runtime/apiproxy.py
+++ b/google/appengine/runtime/apiproxy.py
@@ -230,3 +230,23 @@
 def CancelApiCalls():
   """Cancels all outstanding API calls."""
   _apphosting_runtime___python__apiproxy.CancelApiCalls()
+
+
+def GetRequestCpuUsage():
+  """Returns the number of megacycles used so far by this request.
+
+  Returns:
+    The number of megacycles used so far by this request. Does not include CPU
+    used by API calls.
+  """
+  return _apphosting_runtime___python__apiproxy.get_request_cpu_usage()
+
+
+def GetRequestApiCpuUsage():
+  """Returns the number of megacycles used by API calls.
+
+  Returns:
+    The number of megacycles used by API calls so far during this request. Does
+    not include CPU used by the request code itself.
+  """
+  return _apphosting_runtime___python__apiproxy.get_request_api_cpu_usage()
diff --git a/google/appengine/tools/appcfg.py b/google/appengine/tools/appcfg.py
index 938ecae..ad81e19 100755
--- a/google/appengine/tools/appcfg.py
+++ b/google/appengine/tools/appcfg.py
@@ -108,13 +108,6 @@
 SDK_PRODUCT = 'appcfg_py'
 
 
-
-_api_versions = os.environ.get('GOOGLE_TEST_API_VERSIONS', '1')
-_options = validation.Options(*_api_versions.split(','))
-appinfo.AppInfoExternal.ATTRIBUTES[appinfo.API_VERSION] = _options
-del _api_versions, _options
-
-
 DAY = 24*3600
 SUNDAY = 6
 
diff --git a/google/appengine/tools/dev-channel-js.js b/google/appengine/tools/dev-channel-js.js
index 7330ea5..ea7f7da 100755
--- a/google/appengine/tools/dev-channel-js.js
+++ b/google/appengine/tools/dev-channel-js.js
@@ -164,17 +164,19 @@
   return fn.call.apply(fn.bind, arguments)
 };
 goog.bindJs_ = function(fn, selfObj, var_args) {
-  var context = selfObj || goog.global;
+  if(!fn) {
+    throw Error();
+  }
   if(arguments.length > 2) {
     var boundArgs = Array.prototype.slice.call(arguments, 2);
     return function() {
       var newArgs = Array.prototype.slice.call(arguments);
       Array.prototype.unshift.apply(newArgs, boundArgs);
-      return fn.apply(context, newArgs)
+      return fn.apply(selfObj, newArgs)
     }
   }else {
     return function() {
-      return fn.apply(context, arguments)
+      return fn.apply(selfObj, arguments)
     }
   }
 };
@@ -521,7 +523,7 @@
   opt_protectEscapedCharacters && (str = goog.string.htmlEscape(str));
   return str
 };
-goog.string.specialEscapeChars_ = {"\000":"\\0", "\u0008":"\\b", "\u000c":"\\f", "\n":"\\n", "\r":"\\r", "\t":"\\t", "\u000b":"\\x0B", '"':'\\"', "\\":"\\\\"};
+goog.string.specialEscapeChars_ = {"\x00":"\\0", "\u0008":"\\b", "\u000c":"\\f", "\n":"\\n", "\r":"\\r", "\t":"\\t", "\u000b":"\\x0B", '"':'\\"', "\\":"\\\\"};
 goog.string.jsEscapeCache_ = {"'":"\\'"};
 goog.string.quote = function(s) {
   s = String(s);
@@ -998,6 +1000,15 @@
 goog.array.compare = function(arr1, arr2, opt_equalsFn) {
   return goog.array.equals(arr1, arr2, opt_equalsFn)
 };
+goog.array.compare3 = function(arr1, arr2, opt_compareFn) {
+  for(var compare = opt_compareFn || goog.array.defaultCompare, l = Math.min(arr1.length, arr2.length), i = 0;i < l;i++) {
+    var result = compare(arr1[i], arr2[i]);
+    if(result != 0) {
+      return result
+    }
+  }
+  return goog.array.defaultCompare(arr1.length, arr2.length)
+};
 goog.array.defaultCompare = function(a, b) {
   return a > b ? 1 : a < b ? -1 : 0
 };
@@ -1292,7 +1303,7 @@
   }
   return transposed
 };
-goog.object.PROTOTYPE_FIELDS_ = ["constructor", "hasOwnProperty", "isPrototypeOf", "propertyIsEnumerable", "toLocaleString", "toString", "valueOf"];
+goog.object.PROTOTYPE_FIELDS_ = "constructor,hasOwnProperty,isPrototypeOf,propertyIsEnumerable,toLocaleString,toString,valueOf".split(",");
 goog.object.extend = function(target, var_args) {
   for(var key, source, i = 1;i < arguments.length;i++) {
     source = arguments[i];
@@ -1547,7 +1558,7 @@
 goog.dom.$$ = goog.dom.getElementsByTagNameAndClass;
 goog.dom.setProperties = function(element, properties) {
   goog.object.forEach(properties, function(val, key) {
-    key == "style" ? element.style.cssText = val : key == "class" ? element.className = val : key == "for" ? element.htmlFor = val : key in goog.dom.DIRECT_ATTRIBUTE_MAP_ ? element.setAttribute(goog.dom.DIRECT_ATTRIBUTE_MAP_[key], val) : element[key] = val
+    key == "style" ? element.style.cssText = val : key == "class" ? element.className = val : key == "for" ? element.htmlFor = val : key in goog.dom.DIRECT_ATTRIBUTE_MAP_ ? element.setAttribute(goog.dom.DIRECT_ATTRIBUTE_MAP_[key], val) : goog.string.startsWith(key, "aria-") ? element.setAttribute(key, val) : element[key] = val
   })
 };
 goog.dom.DIRECT_ATTRIBUTE_MAP_ = {cellpadding:"cellPadding", cellspacing:"cellSpacing", colspan:"colSpan", rowspan:"rowSpan", valign:"vAlign", height:"height", width:"width", usemap:"useMap", frameborder:"frameBorder", maxlength:"maxLength", type:"type"};
@@ -1956,13 +1967,14 @@
 };
 goog.dom.findNodes_ = function(root, p, rv, findOne) {
   if(root != null) {
-    for(var i = 0, child;child = root.childNodes[i];i++) {
+    for(var child = root.firstChild;child;) {
       if(p(child) && (rv.push(child), findOne)) {
         return!0
       }
       if(goog.dom.findNodes_(child, p, rv, findOne)) {
         return!0
       }
+      child = child.nextSibling
     }
   }
   return!1
@@ -2464,6 +2476,26 @@
   };
   return iter
 };
+goog.iter.cycle = function(iterable) {
+  var baseIterator = goog.iter.toIterator(iterable), cache = [], cacheIndex = 0, iter = new goog.iter.Iterator, useCache = !1;
+  iter.next = function() {
+    var returnElement = null;
+    if(!useCache) {
+      try {
+        return returnElement = baseIterator.next(), cache.push(returnElement), returnElement
+      }catch(e) {
+        if(e != goog.iter.StopIteration || goog.array.isEmpty(cache)) {
+          throw e;
+        }
+        useCache = !0
+      }
+    }
+    returnElement = cache[cacheIndex];
+    cacheIndex = (cacheIndex + 1) % cache.length;
+    return returnElement
+  };
+  return iter
+};
 goog.structs.getCount = function(col) {
   return typeof col.getCount == "function" ? col.getCount() : goog.isArrayLike(col) || goog.isString(col) ? col.length : goog.object.getCount(col)
 };
@@ -3110,6 +3142,10 @@
 goog.debug.Logger.getLogger = function(name) {
   return goog.debug.LogManager.getLogger(name)
 };
+goog.debug.Logger.logToProfilers = function(msg) {
+  goog.global.console && (goog.global.console.timeStamp ? goog.global.console.timeStamp(msg) : goog.global.console.markTimeline && goog.global.console.markTimeline(msg));
+  goog.global.msWriteProfilerMark && goog.global.msWriteProfilerMark(msg)
+};
 goog.debug.Logger.prototype.getParent = function() {
   return this.parent_
 };
@@ -3158,11 +3194,8 @@
 goog.debug.Logger.prototype.finest = function(msg, opt_exception) {
   this.log(goog.debug.Logger.Level.FINEST, msg, opt_exception)
 };
-goog.debug.Logger.prototype.logToSpeedTracer_ = function(msg) {
-  goog.global.console && goog.global.console.markTimeline && goog.global.console.markTimeline(msg)
-};
 goog.debug.Logger.prototype.doLogRecord_ = function(logRecord) {
-  this.logToSpeedTracer_("log:" + logRecord.getMessage());
+  goog.debug.Logger.logToProfilers("log:" + logRecord.getMessage());
   if(goog.debug.Logger.ENABLE_HIERARCHY) {
     for(var target = this;target;) {
       target.callPublish_(logRecord), target = target.getParent()
@@ -3497,7 +3530,10 @@
   }
   function getProxy() {
     var f = function(eventObject) {
-      return proxyCallbackFunction.call(f.src, f.key, eventObject)
+      var v = proxyCallbackFunction.call(f.src, f.key, eventObject);
+      if(!v) {
+        return v
+      }
     };
     return f
   }
@@ -4943,7 +4979,7 @@
   this.ignoreCase_ = !!opt_ignoreCase
 };
 goog.Uri.QueryData.prototype.ensureKeyMapInitialized_ = function() {
-  if(!this.keyMap_ && (this.keyMap_ = new goog.structs.Map, this.encodedQuery_)) {
+  if(!this.keyMap_ && (this.keyMap_ = new goog.structs.Map, this.count_ = 0, this.encodedQuery_)) {
     for(var pairs = this.encodedQuery_.split("&"), i = 0;i < pairs.length;i++) {
       var indexOfEquals = pairs[i].indexOf("="), name = null, value = null;
       indexOfEquals >= 0 ? (name = pairs[i].substring(0, indexOfEquals), value = pairs[i].substring(indexOfEquals + 1)) : name = pairs[i];
diff --git a/google/appengine/tools/dev_appserver.py b/google/appengine/tools/dev_appserver.py
index 3ac72d8..181952c 100755
--- a/google/appengine/tools/dev_appserver.py
+++ b/google/appengine/tools/dev_appserver.py
@@ -4129,9 +4129,8 @@
         dispatcher = MatcherDispatcher(login_url,
                                        [implicit_matcher, explicit_matcher])
 
-        if require_indexes:
 
-          dev_appserver_index.SetupIndexes(config.application, root_path)
+        dev_appserver_index.SetupIndexes(config.application, root_path)
 
 
 
diff --git a/google/appengine/tools/dev_appserver_channel.py b/google/appengine/tools/dev_appserver_channel.py
index a1b5ee3..a126338 100755
--- a/google/appengine/tools/dev_appserver_channel.py
+++ b/google/appengine/tools/dev_appserver_channel.py
@@ -91,7 +91,7 @@
           Defaults to None.
       """
 
-      outfile.write('Status: 200\n\n')
+      outfile.write('Status: 200\r\n')
 
       (unused_scheme, unused_netloc,
        path, query,
@@ -102,8 +102,10 @@
 
       if page == 'jsapi':
         path = os.path.join(os.path.dirname(__file__), 'dev-channel-js.js')
+        outfile.write('Content-type: text/javascript\r\n\r\n')
         outfile.write(open(path).read())
       elif page == 'dev':
+        outfile.write('\r\n')
         id = param_dict['channel'][0]
         command = param_dict['command'][0]
 
diff --git a/google/appengine/tools/dev_appserver_index.py b/google/appengine/tools/dev_appserver_index.py
index f92d31c..089edaf 100755
--- a/google/appengine/tools/dev_appserver_index.py
+++ b/google/appengine/tools/dev_appserver_index.py
@@ -33,6 +33,7 @@
 from google.appengine.api import datastore_admin
 from google.appengine.api import yaml_errors
 from google.appengine.datastore import datastore_index
+from google.appengine.datastore import entity_pb
 
 import yaml
 
@@ -311,7 +312,10 @@
   created = 0
   for key, index in requested.iteritems():
     if key not in existing:
-      datastore_admin.CreateIndex(index)
+      id = datastore_admin.CreateIndex(index)
+      index.set_id(id)
+      index.set_state(entity_pb.CompositeIndex.READ_WRITE)
+      datastore_admin.UpdateIndex(index)
       created += 1
 
 
diff --git a/google/storage/speckle/proto/client_pb2.py b/google/storage/speckle/proto/client_pb2.py
index d398911..edd30f6 100755
--- a/google/storage/speckle/proto/client_pb2.py
+++ b/google/storage/speckle/proto/client_pb2.py
@@ -28,7 +28,7 @@
 DESCRIPTOR = descriptor.FileDescriptor(
   name='storage/speckle/proto/client.proto',
   package='speckle',
-  serialized_pb='\n\"storage/speckle/proto/client.proto\x12\x07speckle\"\xb6\x01\n\x11\x42indVariableProto\x12\r\n\x05value\x18\x01 \x01(\x0c\x12\x0c\n\x04type\x18\x02 \x01(\x05\x12\x10\n\x08position\x18\x03 \x01(\x05\x12\x0c\n\x04name\x18\x04 \x01(\t\x12;\n\tdirection\x18\x05 \x01(\x0e\x32$.speckle.BindVariableProto.Direction:\x02IN\"\'\n\tDirection\x12\x06\n\x02IN\x10\x01\x12\x07\n\x03OUT\x10\x02\x12\t\n\x05INOUT\x10\x03\"\x8c\x03\n\x0bResultProto\x12\"\n\x04rows\x18\x01 \x01(\x0b\x32\x14.speckle.RowSetProto\x12\x14\n\x0crows_updated\x18\x02 \x01(\x03\x12\x16\n\x0egenerated_keys\x18\x03 \x03(\x0c\x12\'\n\x08warnings\x18\x04 \x03(\x0b\x32\x15.speckle.SqlException\x12,\n\rsql_exception\x18\x05 \x01(\x0b\x32\x15.speckle.SqlException\x12\x14\n\x0cstatement_id\x18\x06 \x01(\x04\x12\x18\n\tmore_rows\x18\x07 \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0cmore_results\x18\x08 \x01(\x08:\x05\x66\x61lse\x12\x33\n\x0foutput_variable\x18\t \x03(\x0b\x32\x1a.speckle.BindVariableProto\x12\x1a\n\x12\x62\x61tch_rows_updated\x18\n \x03(\x03\x12\x36\n\x12parameter_metadata\x18\x0b \x03(\x0b\x32\x1a.speckle.ParameterMetadata\"\xf1\x05\n\x07OpProto\x12%\n\x04type\x18\x01 \x02(\x0e\x32\x17.speckle.OpProto.OpType\x12\x0f\n\x07\x63\x61talog\x18\x02 \x01(\t\x12\x0b\n\x03sql\x18\x03 \x01(\t\x12%\n\tsavepoint\x18\x04 \x01(\x0b\x32\x12.speckle.SavePoint\x12\x13\n\x0b\x61uto_commit\x18\x05 \x01(\x08\x12\x11\n\tread_only\x18\x06 \x01(\x08\x12G\n\x1btransaction_isolation_level\x18\x07 \x01(\x0e\x32\".speckle.TransactionIsolationLevel\x12\x14\n\x0cstatement_id\x18\x08 \x01(\x04\x12\x12\n\nrequest_id\x18\t \x01(\x04\"\xde\x03\n\x06OpType\x12\x0e\n\nNATIVE_SQL\x10\x01\x12\x0c\n\x08ROLLBACK\x10\x02\x12\x11\n\rSET_SAVEPOINT\x10\x03\x12\x13\n\x0fSET_AUTO_COMMIT\x10\x04\x12\x11\n\rSET_READ_ONLY\x10\x05\x12#\n\x1fSET_TRANSACTION_ISOLATION_LEVEL\x10\x06\x12\n\n\x06\x43OMMIT\x10\x07\x12\x0f\n\x0bSET_CATALOG\x10\x08\x12\x13\n\x0f\x43LOSE_STATEMENT\x10\t\x12\x08\n\x04PING\x10\n\x12\x0f\n\x0bNEXT_RESULT\x10\x0b\x12\t\n\x05RETRY\x10\x0c\x12\x1e\n\x1aVALUE_ENUM_UNKNOWN_VALUE13\x10\r\x12\x1e\n\x1aVALUE_ENUM_UNKNOWN_VALUE14\x10\x0e\x12\x1e\n\x1aVALUE_ENUM_UNKNOWN_VALUE15\x10\x0f\x12\x1e\n\x1aVALUE_ENUM_UNKNOWN_VALUE16\x10\x10\x12\x1e\n\x1aVALUE_ENUM_UNKNOWN_VALUE17\x10\x11\x12\x1e\n\x1aVALUE_ENUM_UNKNOWN_VALUE18\x10\x12\x12\x1e\n\x1aVALUE_ENUM_UNKNOWN_VALUE19\x10\x13\x12\x1e\n\x1aVALUE_ENUM_UNKNOWN_VALUE20\x10\x14\"%\n\tSavePoint\x12\n\n\x02id\x18\x01 \x01(\x03\x12\x0c\n\x04name\x18\x02 \x02(\t\"c\n\x0cSqlException\x12\x0f\n\x07message\x18\x01 \x02(\t\x12\x0f\n\x04\x63ode\x18\x02 \x02(\x05:\x01\x30\x12\x11\n\tsql_state\x18\x03 \x01(\t\x12\x1e\n\x16\x61pplication_error_code\x18\x04 \x01(\x05\"+\n\nTupleProto\x12\x0e\n\x06values\x18\x01 \x03(\x0c\x12\r\n\x05nulls\x18\x02 \x03(\x05\"\xc0\x03\n\x0b\x43olumnProto\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\r\n\x05label\x18\x02 \x01(\t\x12\x10\n\x04type\x18\x03 \x01(\x05:\x02\x31\x32\x12\x12\n\ntable_name\x18\x04 \x01(\t\x12\x13\n\x0bschema_name\x18\x05 \x01(\t\x12\x14\n\x0c\x63\x61talog_name\x18\x06 \x01(\t\x12\x14\n\tprecision\x18\x07 \x01(\x05:\x01\x30\x12\x10\n\x05scale\x18\x08 \x01(\x05:\x01\x30\x12\x10\n\x08nullable\x18\t \x01(\x08\x12\x12\n\nsearchable\x18\n \x01(\x08\x12\x14\n\x0c\x64isplay_size\x18\x0b \x01(\x05\x12\x1d\n\x0e\x61uto_increment\x18\x0c \x01(\x08:\x05\x66\x61lse\x12\x1d\n\x0e\x63\x61se_sensitive\x18\r \x01(\x08:\x05\x66\x61lse\x12\x17\n\x08\x63urrency\x18\x0e \x01(\x08:\x05\x66\x61lse\x12\"\n\x13\x64\x65\x66initely_writable\x18\x0f \x01(\x08:\x05\x66\x61lse\x12\x18\n\tread_only\x18\x10 \x01(\x08:\x05\x66\x61lse\x12\x15\n\x06signed\x18\x11 \x01(\x08:\x05\x66\x61lse\x12\x17\n\x08writable\x18\x12 \x01(\x08:\x05\x66\x61lse\x12\x1a\n\x10\x63olumn_type_name\x18\x13 \x01(\t:\x00\"Y\n\x0bRowSetProto\x12%\n\x07\x63olumns\x18\x01 \x03(\x0b\x32\x14.speckle.ColumnProto\x12#\n\x06tuples\x18\x02 \x03(\x0b\x32\x13.speckle.TupleProto\"\x9c\x36\n\x19JdbcDatabaseMetaDataProto\x12*\n\x1b\x61ll_procedures_are_callable\x18\x01 \x01(\x08:\x05\x66\x61lse\x12(\n\x19\x61ll_tables_are_selectable\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x39\n*auto_commit_failure_closes_all_result_sets\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x38\n)data_definition_causes_transaction_commit\x18\x04 \x01(\x08:\x05\x66\x61lse\x12\x36\n\'data_definition_ignored_in_transactions\x18\x05 \x01(\x08:\x05\x66\x61lse\x12.\n\x1f\x64oes_max_row_size_include_blobs\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x19\n\x11\x63\x61talog_separator\x18\x07 \x01(\t\x12\x14\n\x0c\x63\x61talog_term\x18\x08 \x01(\t\x12!\n\x16\x64\x61tabase_major_version\x18\t \x01(\x05:\x01\x30\x12!\n\x16\x64\x61tabase_minor_version\x18\n \x01(\x05:\x01\x30\x12&\n\x15\x64\x61tabase_product_name\x18\x0b \x01(\t:\x07Speckle\x12\"\n\x18\x64\x61tabase_product_version\x18\x0c \x01(\t:\x00\x12u\n\x1d\x64\x65\x66\x61ult_transaction_isolation\x18\r \x01(\x0e\x32\".speckle.TransactionIsolationLevel:*TRANSACTIONISOLATIONLEVEL_TRANSACTION_NONE\x12\x1f\n\x15\x65xtra_name_characters\x18\x0e \x01(\t:\x00\x12!\n\x17identifier_quote_string\x18\x0f \x01(\t:\x00\x12\x1d\n\x12jdbc_major_version\x18\x10 \x01(\x05:\x01\x31\x12\x1d\n\x12jdbc_minor_version\x18\x11 \x01(\x05:\x01\x30\x12$\n\x19max_binary_literal_length\x18\x12 \x01(\x05:\x01\x30\x12\"\n\x17max_catalog_name_length\x18\x13 \x01(\x05:\x01\x30\x12\"\n\x17max_char_literal_length\x18\x14 \x01(\x05:\x01\x30\x12!\n\x16max_column_name_length\x18\x15 \x01(\x05:\x01\x30\x12\"\n\x17max_columns_in_group_by\x18\x16 \x01(\x05:\x01\x30\x12\x1f\n\x14max_columns_in_index\x18\x17 \x01(\x05:\x01\x30\x12\"\n\x17max_columns_in_order_by\x18\x18 \x01(\x05:\x01\x30\x12 \n\x15max_columns_in_select\x18\x19 \x01(\x05:\x01\x30\x12\x1f\n\x14max_columns_in_table\x18\x1a \x01(\x05:\x01\x30\x12\x1a\n\x0fmax_connections\x18\x1b \x01(\x05:\x01\x30\x12!\n\x16max_cursor_name_length\x18\x1c \x01(\x05:\x01\x30\x12\x1b\n\x10max_index_length\x18\x1d \x01(\x05:\x01\x30\x12$\n\x19max_procedure_name_length\x18\x1e \x01(\x05:\x01\x30\x12\x17\n\x0cmax_row_size\x18\x1f \x01(\x05:\x01\x30\x12!\n\x16max_schema_name_length\x18  \x01(\x05:\x01\x30\x12\x1f\n\x14max_statement_length\x18! \x01(\x05:\x01\x30\x12\x19\n\x0emax_statements\x18\" \x01(\x05:\x01\x30\x12 \n\x15max_table_name_length\x18# \x01(\x05:\x01\x30\x12\x1f\n\x14max_tables_in_select\x18$ \x01(\x05:\x01\x30\x12\x1f\n\x14max_user_name_length\x18% \x01(\x05:\x01\x30\x12\x1b\n\x11numeric_functions\x18& \x01(\t:\x00\x12\x18\n\x0eprocedure_term\x18\' \x01(\t:\x00\x12j\n\x15resultset_holdability\x18( \x01(\x0e\x32\x1d.speckle.ResultSetHoldability:,RESULTSETHOLDABILITY_CLOSE_CURSORS_AT_COMMIT\x12i\n\x0erowid_lifetime\x18) \x01(\x0e\x32\x30.speckle.JdbcDatabaseMetaDataProto.RowIdLifetime:\x1fROWIDLIFETIME_ROWID_UNSUPPORTED\x12\x14\n\x0csql_keywords\x18* \x01(\t\x12\x63\n\x0esql_state_type\x18+ \x01(\x0e\x32/.speckle.JdbcDatabaseMetaDataProto.SqlStateType:\x1aSQLSTATETYPE_SQL_STATE_SQL\x12\x15\n\x0bschema_term\x18, \x01(\t:\x00\x12\x1c\n\x14search_string_escape\x18- \x01(\t\x12\x1a\n\x10string_functions\x18. \x01(\t:\x00\x12\x1a\n\x10system_functions\x18/ \x01(\t:\x00\x12\x1d\n\x13time_date_functions\x18\x30 \x01(\t:\x00\x12\x13\n\tuser_name\x18\x31 \x01(\t:\x00\x12\x1f\n\x10\x63\x61talog_at_start\x18\x32 \x01(\x08:\x05\x66\x61lse\x12#\n\x14locators_update_copy\x18\x33 \x01(\x08:\x05\x66\x61lse\x12)\n\x1anull_plus_non_null_is_null\x18\x34 \x01(\x08:\x05\x66\x61lse\x12&\n\x17nulls_are_sorted_at_end\x18\x35 \x01(\x08:\x05\x66\x61lse\x12(\n\x19nulls_are_sorted_at_start\x18\x36 \x01(\x08:\x05\x66\x61lse\x12$\n\x15nulls_are_sorted_high\x18\x37 \x01(\x08:\x05\x66\x61lse\x12#\n\x14nulls_are_sorted_low\x18\x38 \x01(\x08:\x05\x66\x61lse\x12,\n\x1dstores_lower_case_identifiers\x18\x39 \x01(\x08:\x05\x66\x61lse\x12\x33\n$stores_lower_case_quoted_identifiers\x18: \x01(\x08:\x05\x66\x61lse\x12,\n\x1dstores_mixed_case_identifiers\x18; \x01(\x08:\x05\x66\x61lse\x12\x33\n$stores_mixed_case_quoted_identifiers\x18< \x01(\x08:\x05\x66\x61lse\x12,\n\x1dstores_upper_case_identifiers\x18= \x01(\x08:\x05\x66\x61lse\x12\x33\n$stores_upper_case_quoted_identifiers\x18> \x01(\x08:\x05\x66\x61lse\x12.\n\x1fsupports_ansi92_entry_level_sql\x18? \x01(\x08:\x05\x66\x61lse\x12\'\n\x18supports_ansi92_full_sql\x18@ \x01(\x08:\x05\x66\x61lse\x12/\n supports_ansi92_intermediate_sql\x18\x41 \x01(\x08:\x05\x66\x61lse\x12\x33\n$supports_alter_table_with_add_column\x18\x42 \x01(\x08:\x05\x66\x61lse\x12\x34\n%supports_alter_table_with_drop_column\x18\x43 \x01(\x08:\x05\x66\x61lse\x12%\n\x16supports_batch_updates\x18\x44 \x01(\x08:\x05\x66\x61lse\x12\x35\n&supports_catalogs_in_data_manipulation\x18\x45 \x01(\x08:\x05\x66\x61lse\x12\x35\n&supports_catalogs_in_index_definitions\x18\x46 \x01(\x08:\x05\x66\x61lse\x12\x39\n*supports_catalogs_in_privilege_definitions\x18G \x01(\x08:\x05\x66\x61lse\x12\x33\n$supports_catalogs_in_procedure_calls\x18H \x01(\x08:\x05\x66\x61lse\x12\x35\n&supports_catalogs_in_table_definitions\x18I \x01(\x08:\x05\x66\x61lse\x12\'\n\x18supports_column_aliasing\x18J \x01(\x08:\x05\x66\x61lse\x12\x1f\n\x10supports_convert\x18K \x01(\x08:\x05\x66\x61lse\x12(\n\x19supports_core_sql_grammar\x18L \x01(\x08:\x05\x66\x61lse\x12-\n\x1esupports_correlated_subqueries\x18M \x01(\x08:\x05\x66\x61lse\x12J\n;supports_data_definition_and_data_manipulation_transactions\x18N \x01(\x08:\x05\x66\x61lse\x12;\n,supports_data_manipulation_transactions_only\x18O \x01(\x08:\x05\x66\x61lse\x12\x39\n*supports_different_table_correlation_names\x18P \x01(\x08:\x05\x66\x61lse\x12/\n supports_expressions_in_order_by\x18Q \x01(\x08:\x05\x66\x61lse\x12,\n\x1dsupports_extended_sql_grammar\x18R \x01(\x08:\x05\x66\x61lse\x12(\n\x19supports_full_outer_joins\x18S \x01(\x08:\x05\x66\x61lse\x12*\n\x1bsupports_get_generated_keys\x18T \x01(\x08:\x05\x66\x61lse\x12 \n\x11supports_group_by\x18U \x01(\x08:\x05\x66\x61lse\x12.\n\x1fsupports_group_by_beyond_select\x18V \x01(\x08:\x05\x66\x61lse\x12*\n\x1bsupports_group_by_unrelated\x18W \x01(\x08:\x05\x66\x61lse\x12\x36\n\'supports_integrity_enhancement_facility\x18X \x01(\x08:\x05\x66\x61lse\x12*\n\x1bsupports_like_escape_clause\x18Y \x01(\x08:\x05\x66\x61lse\x12+\n\x1csupports_limited_outer_joins\x18Z \x01(\x08:\x05\x66\x61lse\x12+\n\x1csupports_minimum_sql_grammar\x18[ \x01(\x08:\x05\x66\x61lse\x12.\n\x1fsupports_mixed_case_identifiers\x18\\ \x01(\x08:\x05\x66\x61lse\x12\x35\n&supports_mixed_case_quoted_identifiers\x18] \x01(\x08:\x05\x66\x61lse\x12-\n\x1esupports_multiple_open_results\x18^ \x01(\x08:\x05\x66\x61lse\x12,\n\x1dsupports_multiple_result_sets\x18_ \x01(\x08:\x05\x66\x61lse\x12-\n\x1esupports_multiple_transactions\x18` \x01(\x08:\x05\x66\x61lse\x12(\n\x19supports_named_parameters\x18\x61 \x01(\x08:\x05\x66\x61lse\x12,\n\x1dsupports_non_nullable_columns\x18\x62 \x01(\x08:\x05\x66\x61lse\x12\x32\n#supports_open_cursors_across_commit\x18\x63 \x01(\x08:\x05\x66\x61lse\x12\x34\n%supports_open_cursors_across_rollback\x18\x64 \x01(\x08:\x05\x66\x61lse\x12\x35\n&supports_open_statements_across_commit\x18\x65 \x01(\x08:\x05\x66\x61lse\x12\x37\n(supports_open_statements_across_rollback\x18\x66 \x01(\x08:\x05\x66\x61lse\x12*\n\x1bsupports_order_by_unrelated\x18g \x01(\x08:\x05\x66\x61lse\x12#\n\x14supports_outer_joins\x18h \x01(\x08:\x05\x66\x61lse\x12)\n\x1asupports_positioned_delete\x18i \x01(\x08:\x05\x66\x61lse\x12)\n\x1asupports_positioned_update\x18j \x01(\x08:\x05\x66\x61lse\x12\"\n\x13supports_savepoints\x18k \x01(\x08:\x05\x66\x61lse\x12\x34\n%supports_schemas_in_data_manipulation\x18l \x01(\x08:\x05\x66\x61lse\x12\x34\n%supports_schemas_in_index_definitions\x18m \x01(\x08:\x05\x66\x61lse\x12\x38\n)supports_schemas_in_privilege_definitions\x18n \x01(\x08:\x05\x66\x61lse\x12\x32\n#supports_schemas_in_procedure_calls\x18o \x01(\x08:\x05\x66\x61lse\x12\x34\n%supports_schemas_in_table_definitions\x18p \x01(\x08:\x05\x66\x61lse\x12)\n\x1asupports_select_for_update\x18q \x01(\x08:\x05\x66\x61lse\x12)\n\x1asupports_statement_pooling\x18r \x01(\x08:\x05\x66\x61lse\x12:\n+supports_stored_functions_using_call_syntax\x18s \x01(\x08:\x05\x66\x61lse\x12)\n\x1asupports_stored_procedures\x18t \x01(\x08:\x05\x66\x61lse\x12\x31\n\"supports_subqueries_in_comparisons\x18u \x01(\x08:\x05\x66\x61lse\x12,\n\x1dsupports_subqueries_in_exists\x18v \x01(\x08:\x05\x66\x61lse\x12)\n\x1asupports_subqueries_in_ins\x18w \x01(\x08:\x05\x66\x61lse\x12\x31\n\"supports_subqueries_in_quantifieds\x18x \x01(\x08:\x05\x66\x61lse\x12/\n supports_table_correlation_names\x18y \x01(\x08:\x05\x66\x61lse\x12$\n\x15supports_transactions\x18z \x01(\x08:\x05\x66\x61lse\x12\x1d\n\x0esupports_union\x18{ \x01(\x08:\x05\x66\x61lse\x12!\n\x12supports_union_all\x18| \x01(\x08:\x05\x66\x61lse\x12(\n\x19uses_local_file_per_table\x18} \x01(\x08:\x05\x66\x61lse\x12\x1f\n\x10uses_local_files\x18~ \x01(\x08:\x05\x66\x61lse\x12\x18\n\tread_only\x18\x7f \x01(\x08:\x05\x66\x61lse\x12\x14\n\x0btable_types\x18\x80\x01 \x03(\t\x12\x11\n\x08\x63\x61talogs\x18\x81\x01 \x03(\t\x12;\n\x07schemas\x18\x82\x01 \x03(\x0b\x32).speckle.JdbcDatabaseMetaDataProto.Schema\x12\x35\n\x14\x64\x65letes_are_detected\x18\x83\x01 \x03(\x0e\x32\x16.speckle.ResultSetType\x12\x35\n\x14inserts_are_detected\x18\x84\x01 \x03(\x0e\x32\x16.speckle.ResultSetType\x12\x35\n\x14updates_are_detected\x18\x85\x01 \x03(\x0e\x32\x16.speckle.ResultSetType\x12;\n\x1aothers_deletes_are_visible\x18\x86\x01 \x03(\x0e\x32\x16.speckle.ResultSetType\x12;\n\x1aothers_inserts_are_visible\x18\x87\x01 \x03(\x0e\x32\x16.speckle.ResultSetType\x12;\n\x1aothers_updates_are_visible\x18\x88\x01 \x03(\x0e\x32\x16.speckle.ResultSetType\x12\x38\n\x17own_deletes_are_visible\x18\x89\x01 \x03(\x0e\x32\x16.speckle.ResultSetType\x12\x38\n\x17own_inserts_are_visible\x18\x8a\x01 \x03(\x0e\x32\x16.speckle.ResultSetType\x12\x38\n\x17own_updates_are_visible\x18\x8b\x01 \x03(\x0e\x32\x16.speckle.ResultSetType\x12J\n)supports_result_set_concurrency_updatable\x18\x8c\x01 \x03(\x0e\x32\x16.speckle.ResultSetType\x12\x39\n\x18supports_result_set_type\x18\x8d\x01 \x03(\x0e\x32\x16.speckle.ResultSetType\x12G\n\x1fsupports_result_set_holdability\x18\x8e\x01 \x03(\x0e\x32\x1d.speckle.ResultSetHoldability\x12Q\n$supports_transaction_isolation_level\x18\x8f\x01 \x03(\x0e\x32\".speckle.TransactionIsolationLevel\x1a\x35\n\x06Schema\x12\x14\n\x0ctable_schema\x18\x01 \x01(\t\x12\x15\n\rtable_catalog\x18\x02 \x01(\t\"\xd2\x01\n\rRowIdLifetime\x12#\n\x1fROWIDLIFETIME_ROWID_UNSUPPORTED\x10\x00\x12%\n!ROWIDLIFETIME_ROWID_VALID_FOREVER\x10\x01\x12#\n\x1fROWIDLIFETIME_ROWID_VALID_OTHER\x10\x02\x12%\n!ROWIDLIFETIME_ROWID_VALID_SESSION\x10\x03\x12)\n%ROWIDLIFETIME_ROWID_VALID_TRANSACTION\x10\x04\"r\n\x0cSqlStateType\x12\x1e\n\x1aSQLSTATETYPE_SQL_STATE_SQL\x10\x00\x12 \n\x1cSQLSTATETYPE_SQL_STATE_SQL99\x10\x01\x12 \n\x1cSQLSTATETYPE_SQL_STATE_XOPEN\x10\x02\"&\n\x08Property\x12\x0b\n\x03key\x18\x01 \x02(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\xd6\x03\n\x0b\x45xecOptions\x12%\n\x16include_generated_keys\x18\x01 \x01(\x08:\x05\x66\x61lse\x12 \n\x18generated_column_indices\x18\x02 \x03(\x05\x12\x1e\n\x16generated_column_names\x18\x03 \x03(\t\x12$\n\x04type\x18\x04 \x01(\x0e\x32\x16.speckle.ResultSetType\x12\x32\n\x0b\x63oncurrency\x18\x05 \x01(\x0e\x32\x1d.speckle.ResultSetConcurrency\x12\x32\n\x0bholdability\x18\x06 \x01(\x0e\x32\x1d.speckle.ResultSetHoldability\x12\x12\n\nfetch_size\x18\x07 \x01(\x05\x12\x10\n\x08max_rows\x18\x08 \x01(\x05\x12\x17\n\x08poolable\x18\t \x01(\x08:\x05\x66\x61lse\x12?\n\x0f\x66\x65tch_direction\x18\n \x01(\x0e\x32\x17.speckle.FetchDirection:\rFETCH_FORWARD\x12\x13\n\x0b\x63ursor_name\x18\x0b \x01(\t\x12\x19\n\x0emax_field_size\x18\x0c \x01(\x05:\x01\x30\x12 \n\x11\x65scape_processing\x18\r \x01(\x08:\x05\x66\x61lse\"K\n\x16\x42\x61tchBindVariableProto\x12\x31\n\rbind_variable\x18\x01 \x03(\x0b\x32\x1a.speckle.BindVariableProto\"]\n\nBatchProto\x12\x11\n\tstatement\x18\x01 \x03(\t\x12<\n\x13\x62\x61tch_bind_variable\x18\x02 \x03(\x0b\x32\x1f.speckle.BatchBindVariableProto\"!\n\x11ParameterMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\":\n\rRpcErrorProto\x12\x12\n\nerror_code\x18\x01 \x01(\x05\x12\x15\n\rerror_message\x18\x02 \x01(\t*\xb4\x02\n\x19TransactionIsolationLevel\x12.\n*TRANSACTIONISOLATIONLEVEL_TRANSACTION_NONE\x10\x00\x12\x38\n4TRANSACTIONISOLATIONLEVEL_TRANSACTION_READ_COMMITTED\x10\x02\x12:\n6TRANSACTIONISOLATIONLEVEL_TRANSACTION_READ_UNCOMMITTED\x10\x01\x12\x39\n5TRANSACTIONISOLATIONLEVEL_TRANSACTION_REPEATABLE_READ\x10\x04\x12\x36\n2TRANSACTIONISOLATIONLEVEL_TRANSACTION_SERIALIZABLE\x10\x08*\x8b\x01\n\rResultSetType\x12$\n\x1fRESULTSETTYPE_TYPE_FORWARD_ONLY\x10\xeb\x07\x12*\n%RESULTSETTYPE_TYPE_SCROLL_INSENSITIVE\x10\xec\x07\x12(\n#RESULTSETTYPE_TYPE_SCROLL_SENSITIVE\x10\xed\x07*n\n\x14ResultSetConcurrency\x12*\n%RESULTSETCONCURRENCY_CONCUR_READ_ONLY\x10\xef\x07\x12*\n%RESULTSETCONCURRENCY_CONCUR_UPDATABLE\x10\xf0\x07*{\n\x14ResultSetHoldability\x12\x31\n-RESULTSETHOLDABILITY_HOLD_CURSORS_OVER_COMMIT\x10\x01\x12\x30\n,RESULTSETHOLDABILITY_CLOSE_CURSORS_AT_COMMIT\x10\x02*L\n\x0e\x46\x65tchDirection\x12\x12\n\rFETCH_FORWARD\x10\xe8\x07\x12\x12\n\rFETCH_REVERSE\x10\xe9\x07\x12\x12\n\rFETCH_UNKNOWN\x10\xea\x07*\x8d\t\n\x0cMetadataType\x12(\n$METADATATYPE_DATABASE_METADATA_BASIC\x10\x01\x12-\n)METADATATYPE_DATABASE_METADATA_GET_TABLES\x10\x02\x12\x31\n-METADATATYPE_DATABASE_METADATA_GET_PROCEDURES\x10\x03\x12\x38\n4METADATATYPE_DATABASE_METADATA_GET_PROCEDURE_COLUMNS\x10\x04\x12.\n*METADATATYPE_DATABASE_METADATA_GET_COLUMNS\x10\x05\x12\x38\n4METADATATYPE_DATABASE_METADATA_GET_COLUMN_PRIVILEGES\x10\x06\x12\x37\n3METADATATYPE_DATABASE_METADATA_GET_TABLE_PRIVILEGES\x10\x07\x12:\n6METADATATYPE_DATABASE_METADATA_GET_BEST_ROW_IDENTIFIER\x10\x08\x12\x36\n2METADATATYPE_DATABASE_METADATA_GET_VERSION_COLUMNS\x10\t\x12\x33\n/METADATATYPE_DATABASE_METADATA_GET_PRIMARY_KEYS\x10\n\x12\x34\n0METADATATYPE_DATABASE_METADATA_GET_IMPORTED_KEYS\x10\x0b\x12\x34\n0METADATATYPE_DATABASE_METADATA_GET_EXPORTED_KEYS\x10\x0c\x12\x36\n2METADATATYPE_DATABASE_METADATA_GET_CROSS_REFERENCE\x10\r\x12\x31\n-METADATATYPE_DATABASE_METADATA_GET_INDEX_INFO\x10\x0e\x12+\n\'METADATATYPE_DATABASE_METADATA_GET_UDTS\x10\x0f\x12\x32\n.METADATATYPE_DATABASE_METADATA_GET_SUPER_TYPES\x10\x10\x12\x33\n/METADATATYPE_DATABASE_METADATA_GET_SUPER_TABLES\x10\x11\x12\x31\n-METADATATYPE_DATABASE_METADATA_GET_ATTRIBUTES\x10\x12\x12\x30\n,METADATATYPE_DATABASE_METADATA_GET_FUNCTIONS\x10\x13\x12\x37\n3METADATATYPE_DATABASE_METADATA_GET_FUNCTION_COLUMNS\x10\x14\x12\x30\n,METADATATYPE_DATABASE_METADATA_GET_TYPE_INFO\x10\x15\x12.\n*METADATATYPE_DATABASE_METADATA_GET_SCHEMAS\x10\x16\x42%\n\x1b\x63om.google.protos.cloud.sql\x10\x02 \x02(\x02xd')
+  serialized_pb='\n\"storage/speckle/proto/client.proto\x12\x07speckle\"\xb6\x01\n\x11\x42indVariableProto\x12\r\n\x05value\x18\x01 \x01(\x0c\x12\x0c\n\x04type\x18\x02 \x01(\x05\x12\x10\n\x08position\x18\x03 \x01(\x05\x12\x0c\n\x04name\x18\x04 \x01(\t\x12;\n\tdirection\x18\x05 \x01(\x0e\x32$.speckle.BindVariableProto.Direction:\x02IN\"\'\n\tDirection\x12\x06\n\x02IN\x10\x01\x12\x07\n\x03OUT\x10\x02\x12\t\n\x05INOUT\x10\x03\"\x8c\x03\n\x0bResultProto\x12\"\n\x04rows\x18\x01 \x01(\x0b\x32\x14.speckle.RowSetProto\x12\x14\n\x0crows_updated\x18\x02 \x01(\x03\x12\x16\n\x0egenerated_keys\x18\x03 \x03(\x0c\x12\'\n\x08warnings\x18\x04 \x03(\x0b\x32\x15.speckle.SqlException\x12,\n\rsql_exception\x18\x05 \x01(\x0b\x32\x15.speckle.SqlException\x12\x14\n\x0cstatement_id\x18\x06 \x01(\x04\x12\x18\n\tmore_rows\x18\x07 \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0cmore_results\x18\x08 \x01(\x08:\x05\x66\x61lse\x12\x33\n\x0foutput_variable\x18\t \x03(\x0b\x32\x1a.speckle.BindVariableProto\x12\x1a\n\x12\x62\x61tch_rows_updated\x18\n \x03(\x03\x12\x36\n\x12parameter_metadata\x18\x0b \x03(\x0b\x32\x1a.speckle.ParameterMetadata\"\xf1\x05\n\x07OpProto\x12%\n\x04type\x18\x01 \x02(\x0e\x32\x17.speckle.OpProto.OpType\x12\x0f\n\x07\x63\x61talog\x18\x02 \x01(\t\x12\x0b\n\x03sql\x18\x03 \x01(\t\x12%\n\tsavepoint\x18\x04 \x01(\x0b\x32\x12.speckle.SavePoint\x12\x13\n\x0b\x61uto_commit\x18\x05 \x01(\x08\x12\x11\n\tread_only\x18\x06 \x01(\x08\x12G\n\x1btransaction_isolation_level\x18\x07 \x01(\x0e\x32\".speckle.TransactionIsolationLevel\x12\x14\n\x0cstatement_id\x18\x08 \x01(\x04\x12\x12\n\nrequest_id\x18\t \x01(\x04\"\xde\x03\n\x06OpType\x12\x0e\n\nNATIVE_SQL\x10\x01\x12\x0c\n\x08ROLLBACK\x10\x02\x12\x11\n\rSET_SAVEPOINT\x10\x03\x12\x13\n\x0fSET_AUTO_COMMIT\x10\x04\x12\x11\n\rSET_READ_ONLY\x10\x05\x12#\n\x1fSET_TRANSACTION_ISOLATION_LEVEL\x10\x06\x12\n\n\x06\x43OMMIT\x10\x07\x12\x0f\n\x0bSET_CATALOG\x10\x08\x12\x13\n\x0f\x43LOSE_STATEMENT\x10\t\x12\x08\n\x04PING\x10\n\x12\x0f\n\x0bNEXT_RESULT\x10\x0b\x12\t\n\x05RETRY\x10\x0c\x12\x1e\n\x1aVALUE_ENUM_UNKNOWN_VALUE13\x10\r\x12\x1e\n\x1aVALUE_ENUM_UNKNOWN_VALUE14\x10\x0e\x12\x1e\n\x1aVALUE_ENUM_UNKNOWN_VALUE15\x10\x0f\x12\x1e\n\x1aVALUE_ENUM_UNKNOWN_VALUE16\x10\x10\x12\x1e\n\x1aVALUE_ENUM_UNKNOWN_VALUE17\x10\x11\x12\x1e\n\x1aVALUE_ENUM_UNKNOWN_VALUE18\x10\x12\x12\x1e\n\x1aVALUE_ENUM_UNKNOWN_VALUE19\x10\x13\x12\x1e\n\x1aVALUE_ENUM_UNKNOWN_VALUE20\x10\x14\"%\n\tSavePoint\x12\n\n\x02id\x18\x01 \x01(\x03\x12\x0c\n\x04name\x18\x02 \x02(\t\"c\n\x0cSqlException\x12\x0f\n\x07message\x18\x01 \x02(\t\x12\x0f\n\x04\x63ode\x18\x02 \x02(\x05:\x01\x30\x12\x11\n\tsql_state\x18\x03 \x01(\t\x12\x1e\n\x16\x61pplication_error_code\x18\x04 \x01(\x05\"+\n\nTupleProto\x12\x0e\n\x06values\x18\x01 \x03(\x0c\x12\r\n\x05nulls\x18\x02 \x03(\x05\"\xc0\x03\n\x0b\x43olumnProto\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\r\n\x05label\x18\x02 \x01(\t\x12\x10\n\x04type\x18\x03 \x01(\x05:\x02\x31\x32\x12\x12\n\ntable_name\x18\x04 \x01(\t\x12\x13\n\x0bschema_name\x18\x05 \x01(\t\x12\x14\n\x0c\x63\x61talog_name\x18\x06 \x01(\t\x12\x14\n\tprecision\x18\x07 \x01(\x05:\x01\x30\x12\x10\n\x05scale\x18\x08 \x01(\x05:\x01\x30\x12\x10\n\x08nullable\x18\t \x01(\x08\x12\x12\n\nsearchable\x18\n \x01(\x08\x12\x14\n\x0c\x64isplay_size\x18\x0b \x01(\x05\x12\x1d\n\x0e\x61uto_increment\x18\x0c \x01(\x08:\x05\x66\x61lse\x12\x1d\n\x0e\x63\x61se_sensitive\x18\r \x01(\x08:\x05\x66\x61lse\x12\x17\n\x08\x63urrency\x18\x0e \x01(\x08:\x05\x66\x61lse\x12\"\n\x13\x64\x65\x66initely_writable\x18\x0f \x01(\x08:\x05\x66\x61lse\x12\x18\n\tread_only\x18\x10 \x01(\x08:\x05\x66\x61lse\x12\x15\n\x06signed\x18\x11 \x01(\x08:\x05\x66\x61lse\x12\x17\n\x08writable\x18\x12 \x01(\x08:\x05\x66\x61lse\x12\x1a\n\x10\x63olumn_type_name\x18\x13 \x01(\t:\x00\"Y\n\x0bRowSetProto\x12%\n\x07\x63olumns\x18\x01 \x03(\x0b\x32\x14.speckle.ColumnProto\x12#\n\x06tuples\x18\x02 \x03(\x0b\x32\x13.speckle.TupleProto\"\xcb\x36\n\x19JdbcDatabaseMetaDataProto\x12*\n\x1b\x61ll_procedures_are_callable\x18\x01 \x01(\x08:\x05\x66\x61lse\x12(\n\x19\x61ll_tables_are_selectable\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x39\n*auto_commit_failure_closes_all_result_sets\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x38\n)data_definition_causes_transaction_commit\x18\x04 \x01(\x08:\x05\x66\x61lse\x12\x36\n\'data_definition_ignored_in_transactions\x18\x05 \x01(\x08:\x05\x66\x61lse\x12.\n\x1f\x64oes_max_row_size_include_blobs\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x19\n\x11\x63\x61talog_separator\x18\x07 \x01(\t\x12\x14\n\x0c\x63\x61talog_term\x18\x08 \x01(\t\x12!\n\x16\x64\x61tabase_major_version\x18\t \x01(\x05:\x01\x30\x12!\n\x16\x64\x61tabase_minor_version\x18\n \x01(\x05:\x01\x30\x12&\n\x15\x64\x61tabase_product_name\x18\x0b \x01(\t:\x07Speckle\x12\"\n\x18\x64\x61tabase_product_version\x18\x0c \x01(\t:\x00\x12u\n\x1d\x64\x65\x66\x61ult_transaction_isolation\x18\r \x01(\x0e\x32\".speckle.TransactionIsolationLevel:*TRANSACTIONISOLATIONLEVEL_TRANSACTION_NONE\x12\x1f\n\x15\x65xtra_name_characters\x18\x0e \x01(\t:\x00\x12!\n\x17identifier_quote_string\x18\x0f \x01(\t:\x00\x12\x1d\n\x12jdbc_major_version\x18\x10 \x01(\x05:\x01\x31\x12\x1d\n\x12jdbc_minor_version\x18\x11 \x01(\x05:\x01\x30\x12$\n\x19max_binary_literal_length\x18\x12 \x01(\x05:\x01\x30\x12\"\n\x17max_catalog_name_length\x18\x13 \x01(\x05:\x01\x30\x12\"\n\x17max_char_literal_length\x18\x14 \x01(\x05:\x01\x30\x12!\n\x16max_column_name_length\x18\x15 \x01(\x05:\x01\x30\x12\"\n\x17max_columns_in_group_by\x18\x16 \x01(\x05:\x01\x30\x12\x1f\n\x14max_columns_in_index\x18\x17 \x01(\x05:\x01\x30\x12\"\n\x17max_columns_in_order_by\x18\x18 \x01(\x05:\x01\x30\x12 \n\x15max_columns_in_select\x18\x19 \x01(\x05:\x01\x30\x12\x1f\n\x14max_columns_in_table\x18\x1a \x01(\x05:\x01\x30\x12\x1a\n\x0fmax_connections\x18\x1b \x01(\x05:\x01\x30\x12!\n\x16max_cursor_name_length\x18\x1c \x01(\x05:\x01\x30\x12\x1b\n\x10max_index_length\x18\x1d \x01(\x05:\x01\x30\x12$\n\x19max_procedure_name_length\x18\x1e \x01(\x05:\x01\x30\x12\x17\n\x0cmax_row_size\x18\x1f \x01(\x05:\x01\x30\x12!\n\x16max_schema_name_length\x18  \x01(\x05:\x01\x30\x12\x1f\n\x14max_statement_length\x18! \x01(\x05:\x01\x30\x12\x19\n\x0emax_statements\x18\" \x01(\x05:\x01\x30\x12 \n\x15max_table_name_length\x18# \x01(\x05:\x01\x30\x12\x1f\n\x14max_tables_in_select\x18$ \x01(\x05:\x01\x30\x12\x1f\n\x14max_user_name_length\x18% \x01(\x05:\x01\x30\x12\x1b\n\x11numeric_functions\x18& \x01(\t:\x00\x12\x18\n\x0eprocedure_term\x18\' \x01(\t:\x00\x12j\n\x15resultset_holdability\x18( \x01(\x0e\x32\x1d.speckle.ResultSetHoldability:,RESULTSETHOLDABILITY_CLOSE_CURSORS_AT_COMMIT\x12i\n\x0erowid_lifetime\x18) \x01(\x0e\x32\x30.speckle.JdbcDatabaseMetaDataProto.RowIdLifetime:\x1fROWIDLIFETIME_ROWID_UNSUPPORTED\x12\x14\n\x0csql_keywords\x18* \x01(\t\x12\x63\n\x0esql_state_type\x18+ \x01(\x0e\x32/.speckle.JdbcDatabaseMetaDataProto.SqlStateType:\x1aSQLSTATETYPE_SQL_STATE_SQL\x12\x15\n\x0bschema_term\x18, \x01(\t:\x00\x12\x1c\n\x14search_string_escape\x18- \x01(\t\x12\x1a\n\x10string_functions\x18. \x01(\t:\x00\x12\x1a\n\x10system_functions\x18/ \x01(\t:\x00\x12\x1d\n\x13time_date_functions\x18\x30 \x01(\t:\x00\x12\x13\n\tuser_name\x18\x31 \x01(\t:\x00\x12\x1f\n\x10\x63\x61talog_at_start\x18\x32 \x01(\x08:\x05\x66\x61lse\x12#\n\x14locators_update_copy\x18\x33 \x01(\x08:\x05\x66\x61lse\x12)\n\x1anull_plus_non_null_is_null\x18\x34 \x01(\x08:\x05\x66\x61lse\x12&\n\x17nulls_are_sorted_at_end\x18\x35 \x01(\x08:\x05\x66\x61lse\x12(\n\x19nulls_are_sorted_at_start\x18\x36 \x01(\x08:\x05\x66\x61lse\x12$\n\x15nulls_are_sorted_high\x18\x37 \x01(\x08:\x05\x66\x61lse\x12#\n\x14nulls_are_sorted_low\x18\x38 \x01(\x08:\x05\x66\x61lse\x12,\n\x1dstores_lower_case_identifiers\x18\x39 \x01(\x08:\x05\x66\x61lse\x12\x33\n$stores_lower_case_quoted_identifiers\x18: \x01(\x08:\x05\x66\x61lse\x12,\n\x1dstores_mixed_case_identifiers\x18; \x01(\x08:\x05\x66\x61lse\x12\x33\n$stores_mixed_case_quoted_identifiers\x18< \x01(\x08:\x05\x66\x61lse\x12,\n\x1dstores_upper_case_identifiers\x18= \x01(\x08:\x05\x66\x61lse\x12\x33\n$stores_upper_case_quoted_identifiers\x18> \x01(\x08:\x05\x66\x61lse\x12.\n\x1fsupports_ansi92_entry_level_sql\x18? \x01(\x08:\x05\x66\x61lse\x12\'\n\x18supports_ansi92_full_sql\x18@ \x01(\x08:\x05\x66\x61lse\x12/\n supports_ansi92_intermediate_sql\x18\x41 \x01(\x08:\x05\x66\x61lse\x12\x33\n$supports_alter_table_with_add_column\x18\x42 \x01(\x08:\x05\x66\x61lse\x12\x34\n%supports_alter_table_with_drop_column\x18\x43 \x01(\x08:\x05\x66\x61lse\x12%\n\x16supports_batch_updates\x18\x44 \x01(\x08:\x05\x66\x61lse\x12\x35\n&supports_catalogs_in_data_manipulation\x18\x45 \x01(\x08:\x05\x66\x61lse\x12\x35\n&supports_catalogs_in_index_definitions\x18\x46 \x01(\x08:\x05\x66\x61lse\x12\x39\n*supports_catalogs_in_privilege_definitions\x18G \x01(\x08:\x05\x66\x61lse\x12\x33\n$supports_catalogs_in_procedure_calls\x18H \x01(\x08:\x05\x66\x61lse\x12\x35\n&supports_catalogs_in_table_definitions\x18I \x01(\x08:\x05\x66\x61lse\x12\'\n\x18supports_column_aliasing\x18J \x01(\x08:\x05\x66\x61lse\x12\x1f\n\x10supports_convert\x18K \x01(\x08:\x05\x66\x61lse\x12(\n\x19supports_core_sql_grammar\x18L \x01(\x08:\x05\x66\x61lse\x12-\n\x1esupports_correlated_subqueries\x18M \x01(\x08:\x05\x66\x61lse\x12J\n;supports_data_definition_and_data_manipulation_transactions\x18N \x01(\x08:\x05\x66\x61lse\x12;\n,supports_data_manipulation_transactions_only\x18O \x01(\x08:\x05\x66\x61lse\x12\x39\n*supports_different_table_correlation_names\x18P \x01(\x08:\x05\x66\x61lse\x12/\n supports_expressions_in_order_by\x18Q \x01(\x08:\x05\x66\x61lse\x12,\n\x1dsupports_extended_sql_grammar\x18R \x01(\x08:\x05\x66\x61lse\x12(\n\x19supports_full_outer_joins\x18S \x01(\x08:\x05\x66\x61lse\x12*\n\x1bsupports_get_generated_keys\x18T \x01(\x08:\x05\x66\x61lse\x12 \n\x11supports_group_by\x18U \x01(\x08:\x05\x66\x61lse\x12.\n\x1fsupports_group_by_beyond_select\x18V \x01(\x08:\x05\x66\x61lse\x12*\n\x1bsupports_group_by_unrelated\x18W \x01(\x08:\x05\x66\x61lse\x12\x36\n\'supports_integrity_enhancement_facility\x18X \x01(\x08:\x05\x66\x61lse\x12*\n\x1bsupports_like_escape_clause\x18Y \x01(\x08:\x05\x66\x61lse\x12+\n\x1csupports_limited_outer_joins\x18Z \x01(\x08:\x05\x66\x61lse\x12+\n\x1csupports_minimum_sql_grammar\x18[ \x01(\x08:\x05\x66\x61lse\x12.\n\x1fsupports_mixed_case_identifiers\x18\\ \x01(\x08:\x05\x66\x61lse\x12\x35\n&supports_mixed_case_quoted_identifiers\x18] \x01(\x08:\x05\x66\x61lse\x12-\n\x1esupports_multiple_open_results\x18^ \x01(\x08:\x05\x66\x61lse\x12,\n\x1dsupports_multiple_result_sets\x18_ \x01(\x08:\x05\x66\x61lse\x12-\n\x1esupports_multiple_transactions\x18` \x01(\x08:\x05\x66\x61lse\x12(\n\x19supports_named_parameters\x18\x61 \x01(\x08:\x05\x66\x61lse\x12,\n\x1dsupports_non_nullable_columns\x18\x62 \x01(\x08:\x05\x66\x61lse\x12\x32\n#supports_open_cursors_across_commit\x18\x63 \x01(\x08:\x05\x66\x61lse\x12\x34\n%supports_open_cursors_across_rollback\x18\x64 \x01(\x08:\x05\x66\x61lse\x12\x35\n&supports_open_statements_across_commit\x18\x65 \x01(\x08:\x05\x66\x61lse\x12\x37\n(supports_open_statements_across_rollback\x18\x66 \x01(\x08:\x05\x66\x61lse\x12*\n\x1bsupports_order_by_unrelated\x18g \x01(\x08:\x05\x66\x61lse\x12#\n\x14supports_outer_joins\x18h \x01(\x08:\x05\x66\x61lse\x12)\n\x1asupports_positioned_delete\x18i \x01(\x08:\x05\x66\x61lse\x12)\n\x1asupports_positioned_update\x18j \x01(\x08:\x05\x66\x61lse\x12\"\n\x13supports_savepoints\x18k \x01(\x08:\x05\x66\x61lse\x12\x34\n%supports_schemas_in_data_manipulation\x18l \x01(\x08:\x05\x66\x61lse\x12\x34\n%supports_schemas_in_index_definitions\x18m \x01(\x08:\x05\x66\x61lse\x12\x38\n)supports_schemas_in_privilege_definitions\x18n \x01(\x08:\x05\x66\x61lse\x12\x32\n#supports_schemas_in_procedure_calls\x18o \x01(\x08:\x05\x66\x61lse\x12\x34\n%supports_schemas_in_table_definitions\x18p \x01(\x08:\x05\x66\x61lse\x12)\n\x1asupports_select_for_update\x18q \x01(\x08:\x05\x66\x61lse\x12)\n\x1asupports_statement_pooling\x18r \x01(\x08:\x05\x66\x61lse\x12:\n+supports_stored_functions_using_call_syntax\x18s \x01(\x08:\x05\x66\x61lse\x12)\n\x1asupports_stored_procedures\x18t \x01(\x08:\x05\x66\x61lse\x12\x31\n\"supports_subqueries_in_comparisons\x18u \x01(\x08:\x05\x66\x61lse\x12,\n\x1dsupports_subqueries_in_exists\x18v \x01(\x08:\x05\x66\x61lse\x12)\n\x1asupports_subqueries_in_ins\x18w \x01(\x08:\x05\x66\x61lse\x12\x31\n\"supports_subqueries_in_quantifieds\x18x \x01(\x08:\x05\x66\x61lse\x12/\n supports_table_correlation_names\x18y \x01(\x08:\x05\x66\x61lse\x12$\n\x15supports_transactions\x18z \x01(\x08:\x05\x66\x61lse\x12\x1d\n\x0esupports_union\x18{ \x01(\x08:\x05\x66\x61lse\x12!\n\x12supports_union_all\x18| \x01(\x08:\x05\x66\x61lse\x12(\n\x19uses_local_file_per_table\x18} \x01(\x08:\x05\x66\x61lse\x12\x1f\n\x10uses_local_files\x18~ \x01(\x08:\x05\x66\x61lse\x12\x18\n\tread_only\x18\x7f \x01(\x08:\x05\x66\x61lse\x12\x14\n\x0btable_types\x18\x80\x01 \x03(\t\x12\x11\n\x08\x63\x61talogs\x18\x81\x01 \x03(\t\x12;\n\x07schemas\x18\x82\x01 \x03(\x0b\x32).speckle.JdbcDatabaseMetaDataProto.Schema\x12\x35\n\x14\x64\x65letes_are_detected\x18\x83\x01 \x03(\x0e\x32\x16.speckle.ResultSetType\x12\x35\n\x14inserts_are_detected\x18\x84\x01 \x03(\x0e\x32\x16.speckle.ResultSetType\x12\x35\n\x14updates_are_detected\x18\x85\x01 \x03(\x0e\x32\x16.speckle.ResultSetType\x12;\n\x1aothers_deletes_are_visible\x18\x86\x01 \x03(\x0e\x32\x16.speckle.ResultSetType\x12;\n\x1aothers_inserts_are_visible\x18\x87\x01 \x03(\x0e\x32\x16.speckle.ResultSetType\x12;\n\x1aothers_updates_are_visible\x18\x88\x01 \x03(\x0e\x32\x16.speckle.ResultSetType\x12\x38\n\x17own_deletes_are_visible\x18\x89\x01 \x03(\x0e\x32\x16.speckle.ResultSetType\x12\x38\n\x17own_inserts_are_visible\x18\x8a\x01 \x03(\x0e\x32\x16.speckle.ResultSetType\x12\x38\n\x17own_updates_are_visible\x18\x8b\x01 \x03(\x0e\x32\x16.speckle.ResultSetType\x12J\n)supports_result_set_concurrency_updatable\x18\x8c\x01 \x03(\x0e\x32\x16.speckle.ResultSetType\x12\x39\n\x18supports_result_set_type\x18\x8d\x01 \x03(\x0e\x32\x16.speckle.ResultSetType\x12G\n\x1fsupports_result_set_holdability\x18\x8e\x01 \x03(\x0e\x32\x1d.speckle.ResultSetHoldability\x12Q\n$supports_transaction_isolation_level\x18\x8f\x01 \x03(\x0e\x32\".speckle.TransactionIsolationLevel\x12-\n\x1dgenerated_key_always_returned\x18\x90\x01 \x01(\x08:\x05\x66\x61lse\x1a\x35\n\x06Schema\x12\x14\n\x0ctable_schema\x18\x01 \x01(\t\x12\x15\n\rtable_catalog\x18\x02 \x01(\t\"\xd2\x01\n\rRowIdLifetime\x12#\n\x1fROWIDLIFETIME_ROWID_UNSUPPORTED\x10\x00\x12%\n!ROWIDLIFETIME_ROWID_VALID_FOREVER\x10\x01\x12#\n\x1fROWIDLIFETIME_ROWID_VALID_OTHER\x10\x02\x12%\n!ROWIDLIFETIME_ROWID_VALID_SESSION\x10\x03\x12)\n%ROWIDLIFETIME_ROWID_VALID_TRANSACTION\x10\x04\"r\n\x0cSqlStateType\x12\x1e\n\x1aSQLSTATETYPE_SQL_STATE_SQL\x10\x00\x12 \n\x1cSQLSTATETYPE_SQL_STATE_SQL99\x10\x01\x12 \n\x1cSQLSTATETYPE_SQL_STATE_XOPEN\x10\x02\"&\n\x08Property\x12\x0b\n\x03key\x18\x01 \x02(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\xd6\x03\n\x0b\x45xecOptions\x12%\n\x16include_generated_keys\x18\x01 \x01(\x08:\x05\x66\x61lse\x12 \n\x18generated_column_indices\x18\x02 \x03(\x05\x12\x1e\n\x16generated_column_names\x18\x03 \x03(\t\x12$\n\x04type\x18\x04 \x01(\x0e\x32\x16.speckle.ResultSetType\x12\x32\n\x0b\x63oncurrency\x18\x05 \x01(\x0e\x32\x1d.speckle.ResultSetConcurrency\x12\x32\n\x0bholdability\x18\x06 \x01(\x0e\x32\x1d.speckle.ResultSetHoldability\x12\x12\n\nfetch_size\x18\x07 \x01(\x05\x12\x10\n\x08max_rows\x18\x08 \x01(\x05\x12\x17\n\x08poolable\x18\t \x01(\x08:\x05\x66\x61lse\x12?\n\x0f\x66\x65tch_direction\x18\n \x01(\x0e\x32\x17.speckle.FetchDirection:\rFETCH_FORWARD\x12\x13\n\x0b\x63ursor_name\x18\x0b \x01(\t\x12\x19\n\x0emax_field_size\x18\x0c \x01(\x05:\x01\x30\x12 \n\x11\x65scape_processing\x18\r \x01(\x08:\x05\x66\x61lse\"K\n\x16\x42\x61tchBindVariableProto\x12\x31\n\rbind_variable\x18\x01 \x03(\x0b\x32\x1a.speckle.BindVariableProto\"]\n\nBatchProto\x12\x11\n\tstatement\x18\x01 \x03(\t\x12<\n\x13\x62\x61tch_bind_variable\x18\x02 \x03(\x0b\x32\x1f.speckle.BatchBindVariableProto\"!\n\x11ParameterMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\":\n\rRpcErrorProto\x12\x12\n\nerror_code\x18\x01 \x01(\x05\x12\x15\n\rerror_message\x18\x02 \x01(\t*\xb4\x02\n\x19TransactionIsolationLevel\x12.\n*TRANSACTIONISOLATIONLEVEL_TRANSACTION_NONE\x10\x00\x12\x38\n4TRANSACTIONISOLATIONLEVEL_TRANSACTION_READ_COMMITTED\x10\x02\x12:\n6TRANSACTIONISOLATIONLEVEL_TRANSACTION_READ_UNCOMMITTED\x10\x01\x12\x39\n5TRANSACTIONISOLATIONLEVEL_TRANSACTION_REPEATABLE_READ\x10\x04\x12\x36\n2TRANSACTIONISOLATIONLEVEL_TRANSACTION_SERIALIZABLE\x10\x08*\x8b\x01\n\rResultSetType\x12$\n\x1fRESULTSETTYPE_TYPE_FORWARD_ONLY\x10\xeb\x07\x12*\n%RESULTSETTYPE_TYPE_SCROLL_INSENSITIVE\x10\xec\x07\x12(\n#RESULTSETTYPE_TYPE_SCROLL_SENSITIVE\x10\xed\x07*n\n\x14ResultSetConcurrency\x12*\n%RESULTSETCONCURRENCY_CONCUR_READ_ONLY\x10\xef\x07\x12*\n%RESULTSETCONCURRENCY_CONCUR_UPDATABLE\x10\xf0\x07*{\n\x14ResultSetHoldability\x12\x31\n-RESULTSETHOLDABILITY_HOLD_CURSORS_OVER_COMMIT\x10\x01\x12\x30\n,RESULTSETHOLDABILITY_CLOSE_CURSORS_AT_COMMIT\x10\x02*L\n\x0e\x46\x65tchDirection\x12\x12\n\rFETCH_FORWARD\x10\xe8\x07\x12\x12\n\rFETCH_REVERSE\x10\xe9\x07\x12\x12\n\rFETCH_UNKNOWN\x10\xea\x07*\xc4\t\n\x0cMetadataType\x12(\n$METADATATYPE_DATABASE_METADATA_BASIC\x10\x01\x12-\n)METADATATYPE_DATABASE_METADATA_GET_TABLES\x10\x02\x12\x31\n-METADATATYPE_DATABASE_METADATA_GET_PROCEDURES\x10\x03\x12\x38\n4METADATATYPE_DATABASE_METADATA_GET_PROCEDURE_COLUMNS\x10\x04\x12.\n*METADATATYPE_DATABASE_METADATA_GET_COLUMNS\x10\x05\x12\x38\n4METADATATYPE_DATABASE_METADATA_GET_COLUMN_PRIVILEGES\x10\x06\x12\x37\n3METADATATYPE_DATABASE_METADATA_GET_TABLE_PRIVILEGES\x10\x07\x12:\n6METADATATYPE_DATABASE_METADATA_GET_BEST_ROW_IDENTIFIER\x10\x08\x12\x36\n2METADATATYPE_DATABASE_METADATA_GET_VERSION_COLUMNS\x10\t\x12\x33\n/METADATATYPE_DATABASE_METADATA_GET_PRIMARY_KEYS\x10\n\x12\x34\n0METADATATYPE_DATABASE_METADATA_GET_IMPORTED_KEYS\x10\x0b\x12\x34\n0METADATATYPE_DATABASE_METADATA_GET_EXPORTED_KEYS\x10\x0c\x12\x36\n2METADATATYPE_DATABASE_METADATA_GET_CROSS_REFERENCE\x10\r\x12\x31\n-METADATATYPE_DATABASE_METADATA_GET_INDEX_INFO\x10\x0e\x12+\n\'METADATATYPE_DATABASE_METADATA_GET_UDTS\x10\x0f\x12\x32\n.METADATATYPE_DATABASE_METADATA_GET_SUPER_TYPES\x10\x10\x12\x33\n/METADATATYPE_DATABASE_METADATA_GET_SUPER_TABLES\x10\x11\x12\x31\n-METADATATYPE_DATABASE_METADATA_GET_ATTRIBUTES\x10\x12\x12\x30\n,METADATATYPE_DATABASE_METADATA_GET_FUNCTIONS\x10\x13\x12\x37\n3METADATATYPE_DATABASE_METADATA_GET_FUNCTION_COLUMNS\x10\x14\x12\x30\n,METADATATYPE_DATABASE_METADATA_GET_TYPE_INFO\x10\x15\x12.\n*METADATATYPE_DATABASE_METADATA_GET_SCHEMAS\x10\x16\x12\x35\n1METADATATYPE_DATABASE_METADATA_GET_PSEUDO_COLUMNS\x10\x17\x42%\n\x1b\x63om.google.protos.cloud.sql\x10\x02 \x02(\x02xd')
 
 _TRANSACTIONISOLATIONLEVEL = descriptor.EnumDescriptor(
   name='TransactionIsolationLevel',
@@ -59,8 +59,8 @@
   ],
   containing_type=None,
   options=None,
-  serialized_start=9838,
-  serialized_end=10146,
+  serialized_start=9885,
+  serialized_end=10193,
 )
 
 
@@ -85,8 +85,8 @@
   ],
   containing_type=None,
   options=None,
-  serialized_start=10149,
-  serialized_end=10288,
+  serialized_start=10196,
+  serialized_end=10335,
 )
 
 
@@ -107,8 +107,8 @@
   ],
   containing_type=None,
   options=None,
-  serialized_start=10290,
-  serialized_end=10400,
+  serialized_start=10337,
+  serialized_end=10447,
 )
 
 
@@ -129,8 +129,8 @@
   ],
   containing_type=None,
   options=None,
-  serialized_start=10402,
-  serialized_end=10525,
+  serialized_start=10449,
+  serialized_end=10572,
 )
 
 
@@ -155,8 +155,8 @@
   ],
   containing_type=None,
   options=None,
-  serialized_start=10527,
-  serialized_end=10603,
+  serialized_start=10574,
+  serialized_end=10650,
 )
 
 
@@ -254,11 +254,15 @@
       name='METADATATYPE_DATABASE_METADATA_GET_SCHEMAS', index=21, number=22,
       options=None,
       type=None),
+    descriptor.EnumValueDescriptor(
+      name='METADATATYPE_DATABASE_METADATA_GET_PSEUDO_COLUMNS', index=22, number=23,
+      options=None,
+      type=None),
   ],
   containing_type=None,
   options=None,
-  serialized_start=10606,
-  serialized_end=11771,
+  serialized_start=10653,
+  serialized_end=11873,
 )
 
 
@@ -299,6 +303,7 @@
 METADATATYPE_DATABASE_METADATA_GET_FUNCTION_COLUMNS = 20
 METADATATYPE_DATABASE_METADATA_GET_TYPE_INFO = 21
 METADATATYPE_DATABASE_METADATA_GET_SCHEMAS = 22
+METADATATYPE_DATABASE_METADATA_GET_PSEUDO_COLUMNS = 23
 
 
 _BINDVARIABLEPROTO_DIRECTION = descriptor.EnumDescriptor(
@@ -448,8 +453,8 @@
   ],
   containing_type=None,
   options=None,
-  serialized_start=8729,
-  serialized_end=8939,
+  serialized_start=8776,
+  serialized_end=8986,
 )
 
 _JDBCDATABASEMETADATAPROTO_SQLSTATETYPE = descriptor.EnumDescriptor(
@@ -473,8 +478,8 @@
   ],
   containing_type=None,
   options=None,
-  serialized_start=8941,
-  serialized_end=9055,
+  serialized_start=8988,
+  serialized_end=9102,
 )
 
 
@@ -1056,8 +1061,8 @@
   options=None,
   is_extendable=False,
   extension_ranges=[],
-  serialized_start=8673,
-  serialized_end=8726,
+  serialized_start=8720,
+  serialized_end=8773,
 )
 
 _JDBCDATABASEMETADATAPROTO = descriptor.Descriptor(
@@ -2068,6 +2073,13 @@
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       options=None),
+    descriptor.FieldDescriptor(
+      name='generated_key_always_returned', full_name='speckle.JdbcDatabaseMetaDataProto.generated_key_always_returned', index=143,
+      number=144, type=8, cpp_type=7, label=1,
+      has_default_value=True, default_value=False,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
   ],
   extensions=[
   ],
@@ -2080,7 +2092,7 @@
   is_extendable=False,
   extension_ranges=[],
   serialized_start=2115,
-  serialized_end=9055,
+  serialized_end=9102,
 )
 
 
@@ -2114,8 +2126,8 @@
   options=None,
   is_extendable=False,
   extension_ranges=[],
-  serialized_start=9057,
-  serialized_end=9095,
+  serialized_start=9104,
+  serialized_end=9142,
 )
 
 
@@ -2226,8 +2238,8 @@
   options=None,
   is_extendable=False,
   extension_ranges=[],
-  serialized_start=9098,
-  serialized_end=9568,
+  serialized_start=9145,
+  serialized_end=9615,
 )
 
 
@@ -2254,8 +2266,8 @@
   options=None,
   is_extendable=False,
   extension_ranges=[],
-  serialized_start=9570,
-  serialized_end=9645,
+  serialized_start=9617,
+  serialized_end=9692,
 )
 
 
@@ -2289,8 +2301,8 @@
   options=None,
   is_extendable=False,
   extension_ranges=[],
-  serialized_start=9647,
-  serialized_end=9740,
+  serialized_start=9694,
+  serialized_end=9787,
 )
 
 
@@ -2317,8 +2329,8 @@
   options=None,
   is_extendable=False,
   extension_ranges=[],
-  serialized_start=9742,
-  serialized_end=9775,
+  serialized_start=9789,
+  serialized_end=9822,
 )
 
 
@@ -2352,8 +2364,8 @@
   options=None,
   is_extendable=False,
   extension_ranges=[],
-  serialized_start=9777,
-  serialized_end=9835,
+  serialized_start=9824,
+  serialized_end=9882,
 )
 
 _BINDVARIABLEPROTO.fields_by_name['direction'].enum_type = _BINDVARIABLEPROTO_DIRECTION
diff --git a/lib/cacerts/urlfetch_cacerts.txt b/lib/cacerts/urlfetch_cacerts.txt
index 657ef52..385e21e 100755
--- a/lib/cacerts/urlfetch_cacerts.txt
+++ b/lib/cacerts/urlfetch_cacerts.txt
@@ -33,7 +33,7 @@
 # the terms of any one of the MPL, the GPL or the LGPL.
 #
 # ***** END LICENSE BLOCK *****
-CVS_ID "@(#) $RCSfile: certdata.txt,v $ $Revision: 1.71 $ $Date: 2011/03/23 20:07:31 $"
+CVS_ID "@(#) $RCSfile: certdata.txt,v $ $Revision: 1.74 $ $Date: 2011/04/13 00:10:24 $"
 
 subject= /C=US/O=GTE Corporation/OU=GTE CyberTrust Solutions, Inc./CN=GTE CyberTrust Global Root
 serial=01A5
@@ -53,29 +53,6 @@
 lZPvy5TYnh+dXIVtx6quTx8itc2VrbqnzPmrC3p/
 -----END CERTIFICATE-----
 
-subject= /C=ZA/ST=Western Cape/L=Cape Town/O=Thawte Consulting/OU=Certification Services Division/CN=Thawte Personal Freemail CA/emailAddress=personal-freemail@thawte.com
-serial=00
------BEGIN CERTIFICATE-----
-MIIDLTCCApagAwIBAgIBADANBgkqhkiG9w0BAQQFADCB0TELMAkGA1UEBhMCWkEx
-FTATBgNVBAgTDFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMRowGAYD
-VQQKExFUaGF3dGUgQ29uc3VsdGluZzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBT
-ZXJ2aWNlcyBEaXZpc2lvbjEkMCIGA1UEAxMbVGhhd3RlIFBlcnNvbmFsIEZyZWVt
-YWlsIENBMSswKQYJKoZIhvcNAQkBFhxwZXJzb25hbC1mcmVlbWFpbEB0aGF3dGUu
-Y29tMB4XDTk2MDEwMTAwMDAwMFoXDTIwMTIzMTIzNTk1OVowgdExCzAJBgNVBAYT
-AlpBMRUwEwYDVQQIEwxXZXN0ZXJuIENhcGUxEjAQBgNVBAcTCUNhcGUgVG93bjEa
-MBgGA1UEChMRVGhhd3RlIENvbnN1bHRpbmcxKDAmBgNVBAsTH0NlcnRpZmljYXRp
-b24gU2VydmljZXMgRGl2aXNpb24xJDAiBgNVBAMTG1RoYXd0ZSBQZXJzb25hbCBG
-cmVlbWFpbCBDQTErMCkGCSqGSIb3DQEJARYccGVyc29uYWwtZnJlZW1haWxAdGhh
-d3RlLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA1GnX1LCUZFtx6UfY
-DFG26nKRsIRefS0Nj3sS34UldSh0OkIsYyeflXtL734Zhx2G6qPduc6WZBrCFG5E
-rHzmj+hND3EfQDimAKOHePb5lIZererAXnbr2RSjXW56fAylS1V/Bhkpf56aJtVq
-uzgkCGqYx7Hao5iR/Xnb5VrEHLkCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zAN
-BgkqhkiG9w0BAQQFAAOBgQDH7JJ+Tvj1lqVnYiqk8E0RYNBvjWBYYawmu1I1XAjP
-MPuoSpaKH2JCI4wXD/S6ZJwXrEcp352YXtJsYHFcoqzceePnbgBHH7UNKOgCneSa
-/RP0ptl8sfjcXyMmCZGAc9AUG95DqYMl8uacLxXK/qarigd1iwzdUYRr5PjRznei
-gQ==
------END CERTIFICATE-----
-
 subject= /C=ZA/ST=Western Cape/L=Cape Town/O=Thawte Consulting cc/OU=Certification Services Division/CN=Thawte Server CA/emailAddress=server-certs@thawte.com
 serial=01
 -----BEGIN CERTIFICATE-----
@@ -577,38 +554,6 @@
 2cNgQ4xYDiKWL2KjLB+6rQXvqzJ4h6BUcxm1XAX5Uj5tLUUL9wqT6u0G+bI=
 -----END CERTIFICATE-----
 
-subject= /C=US/O=Entrust.net/OU=www.entrust.net/Client_CA_Info/CPS incorp. by ref. limits liab./OU=(c) 1999 Entrust.net Limited/CN=Entrust.net Client Certification Authority
-serial=380391EE
------BEGIN CERTIFICATE-----
-MIIE7TCCBFagAwIBAgIEOAOR7jANBgkqhkiG9w0BAQQFADCByTELMAkGA1UEBhMC
-VVMxFDASBgNVBAoTC0VudHJ1c3QubmV0MUgwRgYDVQQLFD93d3cuZW50cnVzdC5u
-ZXQvQ2xpZW50X0NBX0luZm8vQ1BTIGluY29ycC4gYnkgcmVmLiBsaW1pdHMgbGlh
-Yi4xJTAjBgNVBAsTHChjKSAxOTk5IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNV
-BAMTKkVudHJ1c3QubmV0IENsaWVudCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe
-Fw05OTEwMTIxOTI0MzBaFw0xOTEwMTIxOTU0MzBaMIHJMQswCQYDVQQGEwJVUzEU
-MBIGA1UEChMLRW50cnVzdC5uZXQxSDBGBgNVBAsUP3d3dy5lbnRydXN0Lm5ldC9D
-bGllbnRfQ0FfSW5mby9DUFMgaW5jb3JwLiBieSByZWYuIGxpbWl0cyBsaWFiLjEl
-MCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEGA1UEAxMq
-RW50cnVzdC5uZXQgQ2xpZW50IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGdMA0G
-CSqGSIb3DQEBAQUAA4GLADCBhwKBgQDIOpleMRffrCdvkHvkGf9FozTC28GoT/Bo
-6oT9n3V5z8GKUZSvx1cDR2SerYIbWtp/N3hHuzeYEpbOxhN979IMMFGpOZ5V+Pux
-5zDeg7K6PvHViTs7hbqqdCz+PzFur5GVbgbUB01LLFZHGARS2g4Qk79jkJvh34zm
-AqTmT173iwIBA6OCAeAwggHcMBEGCWCGSAGG+EIBAQQEAwIABzCCASIGA1UdHwSC
-ARkwggEVMIHkoIHhoIHepIHbMIHYMQswCQYDVQQGEwJVUzEUMBIGA1UEChMLRW50
-cnVzdC5uZXQxSDBGBgNVBAsUP3d3dy5lbnRydXN0Lm5ldC9DbGllbnRfQ0FfSW5m
-by9DUFMgaW5jb3JwLiBieSByZWYuIGxpbWl0cyBsaWFiLjElMCMGA1UECxMcKGMp
-IDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEGA1UEAxMqRW50cnVzdC5uZXQg
-Q2xpZW50IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMCyg
-KqAohiZodHRwOi8vd3d3LmVudHJ1c3QubmV0L0NSTC9DbGllbnQxLmNybDArBgNV
-HRAEJDAigA8xOTk5MTAxMjE5MjQzMFqBDzIwMTkxMDEyMTkyNDMwWjALBgNVHQ8E
-BAMCAQYwHwYDVR0jBBgwFoAUxPucKXuXzUyW/O5bs8qZdIuV6kwwHQYDVR0OBBYE
-FMT7nCl7l81MlvzuW7PKmXSLlepMMAwGA1UdEwQFMAMBAf8wGQYJKoZIhvZ9B0EA
-BAwwChsEVjQuMAMCBJAwDQYJKoZIhvcNAQEEBQADgYEAP66K8ddmAwWePvrqHEa7
-pFuPeJoSSJn59DXeDDYHAmsQOokUgZwxpnyyQbJq5wcBoUv5nyU7lsqZwz6hURzz
-wy5E97BnRqqS5TvaHBkUODDV4qIxJS7x7EU47fgGWANzYrAQMY9Av2TgXD7FTx/a
-EkP/TOYGJqibGapEPHayXOw=
------END CERTIFICATE-----
-
 subject= /O=Entrust.net/OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/OU=(c) 1999 Entrust.net Limited/CN=Entrust.net Certification Authority (2048)
 serial=3863B966
 -----BEGIN CERTIFICATE-----
@@ -832,86 +777,6 @@
 xqE=
 -----END CERTIFICATE-----
 
-subject= /C=ZA/ST=Western Cape/L=Durbanville/O=Thawte/OU=Thawte Certification/CN=Thawte Timestamping CA
-serial=00
------BEGIN CERTIFICATE-----
-MIICoTCCAgqgAwIBAgIBADANBgkqhkiG9w0BAQQFADCBizELMAkGA1UEBhMCWkEx
-FTATBgNVBAgTDFdlc3Rlcm4gQ2FwZTEUMBIGA1UEBxMLRHVyYmFudmlsbGUxDzAN
-BgNVBAoTBlRoYXd0ZTEdMBsGA1UECxMUVGhhd3RlIENlcnRpZmljYXRpb24xHzAd
-BgNVBAMTFlRoYXd0ZSBUaW1lc3RhbXBpbmcgQ0EwHhcNOTcwMTAxMDAwMDAwWhcN
-MjAxMjMxMjM1OTU5WjCBizELMAkGA1UEBhMCWkExFTATBgNVBAgTDFdlc3Rlcm4g
-Q2FwZTEUMBIGA1UEBxMLRHVyYmFudmlsbGUxDzANBgNVBAoTBlRoYXd0ZTEdMBsG
-A1UECxMUVGhhd3RlIENlcnRpZmljYXRpb24xHzAdBgNVBAMTFlRoYXd0ZSBUaW1l
-c3RhbXBpbmcgQ0EwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANYrWHhhRYZT
-6jR7UZztsOYuGA7+4F+oJ9O0yeB8WU4WDnNUYMF/9p8u6TqFJBU820cEY8OexJQa
-Wt9MevPZQx08EHp5JduQ/vBR5zDWQQD9nyjfeb6Uu522FOMjhdepQeBMpHmwKxqL
-8vg7ij5FrHGSALSQQZj7X+36ty6K+Ig3AgMBAAGjEzARMA8GA1UdEwEB/wQFMAMB
-Af8wDQYJKoZIhvcNAQEEBQADgYEAZ9viwuaHPUCDhjc1fR/OmsMMZiCouqoEiYbC
-9RAIDb/LogWK0E02PvTX72nGXuSwlG9KuefeW4i2e9vjJ+V2w/A1wcu1J5szedyQ
-pgCed/r8zSeUQhac0xxo7L9c3eWpexAKMnRUEzGLhQOEkbdYATAUOK8oyvyxUBkZ
-CayJSdM=
------END CERTIFICATE-----
-
-subject= /O=Entrust.net/OU=www.entrust.net/SSL_CPS incorp. by ref. (limits liab.)/OU=(c) 2000 Entrust.net Limited/CN=Entrust.net Secure Server Certification Authority
-serial=389B113C
------BEGIN CERTIFICATE-----
-MIIElTCCA/6gAwIBAgIEOJsRPDANBgkqhkiG9w0BAQQFADCBujEUMBIGA1UEChML
-RW50cnVzdC5uZXQxPzA9BgNVBAsUNnd3dy5lbnRydXN0Lm5ldC9TU0xfQ1BTIGlu
-Y29ycC4gYnkgcmVmLiAobGltaXRzIGxpYWIuKTElMCMGA1UECxMcKGMpIDIwMDAg
-RW50cnVzdC5uZXQgTGltaXRlZDE6MDgGA1UEAxMxRW50cnVzdC5uZXQgU2VjdXJl
-IFNlcnZlciBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMDAyMDQxNzIwMDBa
-Fw0yMDAyMDQxNzUwMDBaMIG6MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDE/MD0GA1UE
-CxQ2d3d3LmVudHJ1c3QubmV0L1NTTF9DUFMgaW5jb3JwLiBieSByZWYuIChsaW1p
-dHMgbGlhYi4pMSUwIwYDVQQLExwoYykgMjAwMCBFbnRydXN0Lm5ldCBMaW1pdGVk
-MTowOAYDVQQDEzFFbnRydXN0Lm5ldCBTZWN1cmUgU2VydmVyIENlcnRpZmljYXRp
-b24gQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDHwV9OcfHO
-8GCGD9JYf9Mzly0XonUwtZZkJi9ow0SrqHXmAGc0V55lxyKbc+bT3QgON1WqJUaB
-bL3+qPZ1V1eMkGxKwz6LS0MKyRFWmponIpnPVZ5h2QLifLZ8OAfc439PmrkDQYC2
-dWcTC5/oVzbIXQA23mYU2m52H083jIITiQIDAQABo4IBpDCCAaAwEQYJYIZIAYb4
-QgEBBAQDAgAHMIHjBgNVHR8EgdswgdgwgdWggdKggc+kgcwwgckxFDASBgNVBAoT
-C0VudHJ1c3QubmV0MT8wPQYDVQQLFDZ3d3cuZW50cnVzdC5uZXQvU1NMX0NQUyBp
-bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAyMDAw
-IEVudHJ1c3QubmV0IExpbWl0ZWQxOjA4BgNVBAMTMUVudHJ1c3QubmV0IFNlY3Vy
-ZSBTZXJ2ZXIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxDTALBgNVBAMTBENSTDEw
-KwYDVR0QBCQwIoAPMjAwMDAyMDQxNzIwMDBagQ8yMDIwMDIwNDE3NTAwMFowCwYD
-VR0PBAQDAgEGMB8GA1UdIwQYMBaAFMtswGvjuz7L/CKc/vuLkpyw8m4iMB0GA1Ud
-DgQWBBTLbMBr47s+y/winP77i5KcsPJuIjAMBgNVHRMEBTADAQH/MB0GCSqGSIb2
-fQdBAAQQMA4bCFY1LjA6NC4wAwIEkDANBgkqhkiG9w0BAQQFAAOBgQBi24GRzsia
-d0Iv7L0no1MPUBvqTpLwqa+poLpIYcvvyQbvH9X07t9WLebKahlzqlO+krNQAraF
-JnJj2HVQYnUUt7NQGj/KEQALhUVpbbalrlHhStyCP2yMNLJ3a9kC9n8O6mUE8c1U
-yrrJzOCE98g+EZfTYAkYvAX/bIkz8OwVDw==
------END CERTIFICATE-----
-
-subject= /O=Entrust.net/OU=www.entrust.net/GCCA_CPS incorp. by ref. (limits liab.)/OU=(c) 2000 Entrust.net Limited/CN=Entrust.net Client Certification Authority
-serial=389EF6E4
------BEGIN CERTIFICATE-----
-MIIEgzCCA+ygAwIBAgIEOJ725DANBgkqhkiG9w0BAQQFADCBtDEUMBIGA1UEChML
-RW50cnVzdC5uZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9HQ0NBX0NQUyBp
-bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAyMDAw
-IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNVBAMTKkVudHJ1c3QubmV0IENsaWVu
-dCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMDAyMDcxNjE2NDBaFw0yMDAy
-MDcxNjQ2NDBaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3d3d3
-LmVudHJ1c3QubmV0L0dDQ0FfQ1BTIGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxp
-YWIuKTElMCMGA1UECxMcKGMpIDIwMDAgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEG
-A1UEAxMqRW50cnVzdC5uZXQgQ2xpZW50IENlcnRpZmljYXRpb24gQXV0aG9yaXR5
-MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCTdLS25MVL1qFof2LV7PdRV7Ny
-Spj10InJrWPNTTVRaoTUrcloeW+46xHbh65cJFET8VQlhK8pK5/jgOLZy93GRUk0
-iJBeAZfv6lOm3fzB3ksqJeTpNfpVBQbliXrqpBFXO/x8PTbNZzVtpKklWb1m9fkn
-5JVn1j+SgF7yNH0rhQIDAQABo4IBnjCCAZowEQYJYIZIAYb4QgEBBAQDAgAHMIHd
-BgNVHR8EgdUwgdIwgc+ggcyggcmkgcYwgcMxFDASBgNVBAoTC0VudHJ1c3QubmV0
-MUAwPgYDVQQLFDd3d3cuZW50cnVzdC5uZXQvR0NDQV9DUFMgaW5jb3JwLiBieSBy
-ZWYuIChsaW1pdHMgbGlhYi4pMSUwIwYDVQQLExwoYykgMjAwMCBFbnRydXN0Lm5l
-dCBMaW1pdGVkMTMwMQYDVQQDEypFbnRydXN0Lm5ldCBDbGllbnQgQ2VydGlmaWNh
-dGlvbiBBdXRob3JpdHkxDTALBgNVBAMTBENSTDEwKwYDVR0QBCQwIoAPMjAwMDAy
-MDcxNjE2NDBagQ8yMDIwMDIwNzE2NDY0MFowCwYDVR0PBAQDAgEGMB8GA1UdIwQY
-MBaAFISLdP3FjcD/J20gN0V8/i3OutN9MB0GA1UdDgQWBBSEi3T9xY3A/ydtIDdF
-fP4tzrrTfTAMBgNVHRMEBTADAQH/MB0GCSqGSIb2fQdBAAQQMA4bCFY1LjA6NC4w
-AwIEkDANBgkqhkiG9w0BAQQFAAOBgQBObzWAO9GK9Q6nIMstZVXQkvTnhLUGJoMS
-hAusO7JE7r3PQNsgDrpuFOow4DtifH+La3xKp9U1PL6oXOpLu5OOgGarDyn9TS2/
-GpsKkMWr2tGzhtQvJFJcem3G8v7lTRowjJDyutdKPkN+1MhQGof4T4HHdguEOnKd
-zmVml64mXg==
------END CERTIFICATE-----
-
 subject= /C=US/O=Entrust, Inc./OU=www.entrust.net/CPS is incorporated by reference/OU=(c) 2006 Entrust, Inc./CN=Entrust Root Certification Authority
 serial=456B5054
 -----BEGIN CERTIFICATE-----
@@ -942,69 +807,6 @@
 0vdXcDazv/wor3ElhVsT/h5/WrQ8
 -----END CERTIFICATE-----
 
-subject= /C=US/O=AOL Time Warner Inc./OU=America Online Inc./CN=AOL Time Warner Root Certification Authority 1
-serial=01
------BEGIN CERTIFICATE-----
-MIID5jCCAs6gAwIBAgIBATANBgkqhkiG9w0BAQUFADCBgzELMAkGA1UEBhMCVVMx
-HTAbBgNVBAoTFEFPTCBUaW1lIFdhcm5lciBJbmMuMRwwGgYDVQQLExNBbWVyaWNh
-IE9ubGluZSBJbmMuMTcwNQYDVQQDEy5BT0wgVGltZSBXYXJuZXIgUm9vdCBDZXJ0
-aWZpY2F0aW9uIEF1dGhvcml0eSAxMB4XDTAyMDUyOTA2MDAwMFoXDTM3MTEyMDE1
-MDMwMFowgYMxCzAJBgNVBAYTAlVTMR0wGwYDVQQKExRBT0wgVGltZSBXYXJuZXIg
-SW5jLjEcMBoGA1UECxMTQW1lcmljYSBPbmxpbmUgSW5jLjE3MDUGA1UEAxMuQU9M
-IFRpbWUgV2FybmVyIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgMTCCASIw
-DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJnej8Mlo2k06AX3dLm/WpcZuS+U
-0pPlLYnKhHw/EEMbjIt8hFj4JHxIzyr9wBXZGH6EGhfT257XyuTZ16pYUYfw8ItI
-TuLCxFlpMGK2MKKMCxGZYTVtfu/FsRkGIBKOQuHfD5YQUqjPnF+VFNivO3ULMSAf
-RC+iYkGzuxgh28pxPIzstrkNn+9R7017EvILDOGsQI93f7DKeHEMXRZxcKLXwjqF
-zQ6axOAAsNUl6twr5JQtOJyJQVdkKGUZHLZEtMgxa44Be3ZZJX8VHIQIfHNlIAqh
-BC4aMqiaILGcLCFZ5/vP7nAtCMpjPiybkxlqpMKX/7eGV4iFbJ4VFitNLLMCAwEA
-AaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUoTYwFsuGkABFgFOxj8jY
-PXy+XxIwHwYDVR0jBBgwFoAUoTYwFsuGkABFgFOxj8jYPXy+XxIwDgYDVR0PAQH/
-BAQDAgGGMA0GCSqGSIb3DQEBBQUAA4IBAQCKIBilvrMvtKaEAEAwKfq0FHNMeUWn
-9nDg6H5kHgqVfGphwu9OH77/yZkfB2FK4V1Mza3u0FIy2VkyvNp5ctZ7CegCgTXT
-Ct8RHcl5oIBN/lrXVtbtDyqvpxh1MwzqwWEFT2qaifKNuZ8u77BfWgDrvq2g+EQF
-Z7zLBO+eZMXpyD8Fv8YvBxzDNnGGyjhmSs3WuEvGbKeXO/oTLW4jYYehY0KswsuX
-n2Fozy1MBJ3XJU8KDk2QixhWqJNIV9xvrr2eZ1d3iVCzvhGbRWeDhhmH05i9CBoW
-H1iCC+GWaQVLjuyDUTEH1dSf/1l7qG6Fz9NLqUmwX7A5KGgOc90lmt4S
------END CERTIFICATE-----
-
-subject= /C=US/O=AOL Time Warner Inc./OU=America Online Inc./CN=AOL Time Warner Root Certification Authority 2
-serial=01
------BEGIN CERTIFICATE-----
-MIIF5jCCA86gAwIBAgIBATANBgkqhkiG9w0BAQUFADCBgzELMAkGA1UEBhMCVVMx
-HTAbBgNVBAoTFEFPTCBUaW1lIFdhcm5lciBJbmMuMRwwGgYDVQQLExNBbWVyaWNh
-IE9ubGluZSBJbmMuMTcwNQYDVQQDEy5BT0wgVGltZSBXYXJuZXIgUm9vdCBDZXJ0
-aWZpY2F0aW9uIEF1dGhvcml0eSAyMB4XDTAyMDUyOTA2MDAwMFoXDTM3MDkyODIz
-NDMwMFowgYMxCzAJBgNVBAYTAlVTMR0wGwYDVQQKExRBT0wgVGltZSBXYXJuZXIg
-SW5jLjEcMBoGA1UECxMTQW1lcmljYSBPbmxpbmUgSW5jLjE3MDUGA1UEAxMuQU9M
-IFRpbWUgV2FybmVyIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgMjCCAiIw
-DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALQ3WggWmRToVbEbJGv8x4vmh6mJ
-7ouZzU9AhqS2TcnZsdw8TQ2FTBVsRotSeJ/4I/1n9SQ6aF3Q92RhQVSji6UI0ilb
-m2BPJoPRYxJWSXakFsKlnUWsi4SVqBax7J/qJBrvuVdcmiQhLE0OcR+mrF1FdAOY
-xFSMFkpBd4aVdQxHAWZg/BXxD+r1FHjHDtdugRxev17nOirYlxcwfACtCJ0zr7iZ
-YYCLqJV+FNwSbKTQ2O9ASQI2+W6p1h2WVgSysy0WVoaP2SBXgM1nEG2wTPDaRrbq
-JS5Gr42whTg0ixQmgiusrpkLjhTXUr2eacOGAgvqdnUxCc4zGSGFQ+aJLZ8lN2fx
-I2rSAG2X+Z/nKcrdH9cG6rjJuQkhn8g/BsXS6RJGAE57COtCPStIbp1n3UsC5ETz
-kxmlJ85per5n0/xQpCyrw2u544BMzwVhSyvcG7mm0tCq9Stz+86QNZ8MUhy/XCFh
-EVsVS6kkUfykXPcXnbDS+gfpj1bkGoxoigTTfFrjnqKhynFbotSg5ymFXQNoKk/S
-Btc9+cMDLz9l+WceR0DTYw/j1Y75hauXTLPXJuuWCpTehTacyH+BCQJJKg71ZDIM
-gtG6aoIbs0t0EfOMd9afv9w3pKdVBC/UMejTRrkDfNoSTllkt1ExMVCgyhwn2RAu
-rda9EGYrw7AiShJbAgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE
-FE9pbQN+nZ8HGEO8txBO1b+pxCAoMB8GA1UdIwQYMBaAFE9pbQN+nZ8HGEO8txBO
-1b+pxCAoMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQUFAAOCAgEAO/Ouyugu
-h4X7ZVnnrREUpVe8WJ8kEle7+z802u6teio0cnAxa8cZmIDJgt43d15Ui47y6mdP
-yXSEkVYJ1eV6moG2gcKtNuTxVBFT8zRFASbI5Rq8NEQh3q0l/HYWdyGQgJhXnU7q
-7C+qPBR7V8F+GBRn7iTGvboVsNIYvbdVgaxTwOjdaRITQrcCtQVBynlQboIOcXKT
-RuidDV29rs4prWPVVRaAMCf/drr3uNZK49m1+VLQTkCpx+XCMseqdiThawVQ68W/
-ClTluUI8JPu3B5wwn3la5uBAUhX0/Kr0VvlEl4ftDmVyXr4m+02kLQgH3thcoNyB
-M5kYJRF3p+v9WAksmWsbivNSPxpNSGDxoPYzAlOL7SUJuA0t7Zdz7NeWH45gDtoQ
-my8YJPamTQr5O8t1wswvziRpyQoijlmn94IM19drNZxDAGrElWe6nEXLuA4399xO
-AU++CrYD062KRffaJ00psUjf5BHklka9bAI+1lHIlRcBFanyqqryvy9lG2/QuRqT
-9Y41xICHPpQvZuTpqP9BnHAqTyo5GJUefvthATxRCC4oGKQWDzH9OmwjkyB24f0H
-hdFbP9IcczLd+rn4jM8Ch3qaluTtT4mNU0OrDhPAARW0eTjb/G49nlG2uBOLZ8/5
-fNkiHfZdxRwBL5joeiQYvITX+txyW/fBOmg=
------END CERTIFICATE-----
-
 subject= /O=RSA Security Inc/OU=RSA Security 2048 V3
 serial=0A0101010000027C0000000A00000002
 -----BEGIN CERTIFICATE-----
@@ -1365,295 +1167,6 @@
 dBInTMu2l+nZrghtWjlA3QVHdWpaIbOjGM9O9y5Xt5hwXsjEeLBi
 -----END CERTIFICATE-----
 
-subject= /C=ES/ST=Barcelona/L=Barcelona/O=IPS Internet publishing Services s.l./O=ips@mail.ips.es C.I.F.  B-60929452/OU=IPS CA Chained CAs Certification Authority/CN=IPS CA Chained CAs Certification Authority/emailAddress=ips@mail.ips.es
-serial=00
------BEGIN CERTIFICATE-----
-MIIH9zCCB2CgAwIBAgIBADANBgkqhkiG9w0BAQUFADCCARwxCzAJBgNVBAYTAkVT
-MRIwEAYDVQQIEwlCYXJjZWxvbmExEjAQBgNVBAcTCUJhcmNlbG9uYTEuMCwGA1UE
-ChMlSVBTIEludGVybmV0IHB1Ymxpc2hpbmcgU2VydmljZXMgcy5sLjErMCkGA1UE
-ChQiaXBzQG1haWwuaXBzLmVzIEMuSS5GLiAgQi02MDkyOTQ1MjEzMDEGA1UECxMq
-SVBTIENBIENoYWluZWQgQ0FzIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MTMwMQYD
-VQQDEypJUFMgQ0EgQ2hhaW5lZCBDQXMgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkx
-HjAcBgkqhkiG9w0BCQEWD2lwc0BtYWlsLmlwcy5lczAeFw0wMTEyMjkwMDUzNTha
-Fw0yNTEyMjcwMDUzNThaMIIBHDELMAkGA1UEBhMCRVMxEjAQBgNVBAgTCUJhcmNl
-bG9uYTESMBAGA1UEBxMJQmFyY2Vsb25hMS4wLAYDVQQKEyVJUFMgSW50ZXJuZXQg
-cHVibGlzaGluZyBTZXJ2aWNlcyBzLmwuMSswKQYDVQQKFCJpcHNAbWFpbC5pcHMu
-ZXMgQy5JLkYuICBCLTYwOTI5NDUyMTMwMQYDVQQLEypJUFMgQ0EgQ2hhaW5lZCBD
-QXMgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxMzAxBgNVBAMTKklQUyBDQSBDaGFp
-bmVkIENBcyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEeMBwGCSqGSIb3DQEJARYP
-aXBzQG1haWwuaXBzLmVzMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDcVpJJ
-spQgvJhPUOtopKdJC7/SMejHT8KGC/po/UNaivNgkjWZOLtNA1IhW/A3mTXhQSCB
-hYEFcYGdtJUZqV92NC5jNzVXjrQfQj8VXOF6wV8TGDIxya2+o8eDZh65nAQTy2nB
-Bt4wBrszo7Uf8I9vzv+W6FS+ZoCua9tBhDaiPQIDAQABo4IEQzCCBD8wHQYDVR0O
-BBYEFKGtMbH5PuEXpsirNPxShwkeYlJBMIIBTgYDVR0jBIIBRTCCAUGAFKGtMbH5
-PuEXpsirNPxShwkeYlJBoYIBJKSCASAwggEcMQswCQYDVQQGEwJFUzESMBAGA1UE
-CBMJQmFyY2Vsb25hMRIwEAYDVQQHEwlCYXJjZWxvbmExLjAsBgNVBAoTJUlQUyBJ
-bnRlcm5ldCBwdWJsaXNoaW5nIFNlcnZpY2VzIHMubC4xKzApBgNVBAoUImlwc0Bt
-YWlsLmlwcy5lcyBDLkkuRi4gIEItNjA5Mjk0NTIxMzAxBgNVBAsTKklQUyBDQSBD
-aGFpbmVkIENBcyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEzMDEGA1UEAxMqSVBT
-IENBIENoYWluZWQgQ0FzIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MR4wHAYJKoZI
-hvcNAQkBFg9pcHNAbWFpbC5pcHMuZXOCAQAwDAYDVR0TBAUwAwEB/zAMBgNVHQ8E
-BQMDB/+AMGsGA1UdJQRkMGIGCCsGAQUFBwMBBggrBgEFBQcDAgYIKwYBBQUHAwMG
-CCsGAQUFBwMEBggrBgEFBQcDCAYKKwYBBAGCNwIBFQYKKwYBBAGCNwIBFgYKKwYB
-BAGCNwoDAQYKKwYBBAGCNwoDBDARBglghkgBhvhCAQEEBAMCAAcwGgYDVR0RBBMw
-EYEPaXBzQG1haWwuaXBzLmVzMBoGA1UdEgQTMBGBD2lwc0BtYWlsLmlwcy5lczBC
-BglghkgBhvhCAQ0ENRYzQ2hhaW5lZCBDQSBDZXJ0aWZpY2F0ZSBpc3N1ZWQgYnkg
-aHR0cDovL3d3dy5pcHMuZXMvMCkGCWCGSAGG+EIBAgQcFhpodHRwOi8vd3d3Lmlw
-cy5lcy9pcHMyMDAyLzA3BglghkgBhvhCAQQEKhYoaHR0cDovL3d3dy5pcHMuZXMv
-aXBzMjAwMi9pcHMyMDAyQ0FDLmNybDA8BglghkgBhvhCAQMELxYtaHR0cDovL3d3
-dy5pcHMuZXMvaXBzMjAwMi9yZXZvY2F0aW9uQ0FDLmh0bWw/MDkGCWCGSAGG+EIB
-BwQsFipodHRwOi8vd3d3Lmlwcy5lcy9pcHMyMDAyL3JlbmV3YWxDQUMuaHRtbD8w
-NwYJYIZIAYb4QgEIBCoWKGh0dHA6Ly93d3cuaXBzLmVzL2lwczIwMDIvcG9saWN5
-Q0FDLmh0bWwwbQYDVR0fBGYwZDAuoCygKoYoaHR0cDovL3d3dy5pcHMuZXMvaXBz
-MjAwMi9pcHMyMDAyQ0FDLmNybDAyoDCgLoYsaHR0cDovL3d3d2JhY2suaXBzLmVz
-L2lwczIwMDIvaXBzMjAwMkNBQy5jcmwwLwYIKwYBBQUHAQEEIzAhMB8GCCsGAQUF
-BzABhhNodHRwOi8vb2NzcC5pcHMuZXMvMA0GCSqGSIb3DQEBBQUAA4GBAERyMJ1W
-WKJBGyi3leGmGpVfp3hAK+/blkr8THFj2XOVvQLiogbHvpcqk4A0hgP63Ng9HgfN
-HnNDJGD1HWHc3JagvPsd4+cSACczAsDAK1M92GsDgaPb1pOVIO/Tln4mkImcJpvN
-b2ar7QMiRDjMWb2f2/YHogF/JsRj9SVCXmK9
------END CERTIFICATE-----
-
-subject= /C=ES/ST=Barcelona/L=Barcelona/O=IPS Internet publishing Services s.l./O=ips@mail.ips.es C.I.F.  B-60929452/OU=IPS CA CLASE1 Certification Authority/CN=IPS CA CLASE1 Certification Authority/emailAddress=ips@mail.ips.es
-serial=00
------BEGIN CERTIFICATE-----
-MIIH6jCCB1OgAwIBAgIBADANBgkqhkiG9w0BAQUFADCCARIxCzAJBgNVBAYTAkVT
-MRIwEAYDVQQIEwlCYXJjZWxvbmExEjAQBgNVBAcTCUJhcmNlbG9uYTEuMCwGA1UE
-ChMlSVBTIEludGVybmV0IHB1Ymxpc2hpbmcgU2VydmljZXMgcy5sLjErMCkGA1UE
-ChQiaXBzQG1haWwuaXBzLmVzIEMuSS5GLiAgQi02MDkyOTQ1MjEuMCwGA1UECxMl
-SVBTIENBIENMQVNFMSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEuMCwGA1UEAxMl
-SVBTIENBIENMQVNFMSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEeMBwGCSqGSIb3
-DQEJARYPaXBzQG1haWwuaXBzLmVzMB4XDTAxMTIyOTAwNTkzOFoXDTI1MTIyNzAw
-NTkzOFowggESMQswCQYDVQQGEwJFUzESMBAGA1UECBMJQmFyY2Vsb25hMRIwEAYD
-VQQHEwlCYXJjZWxvbmExLjAsBgNVBAoTJUlQUyBJbnRlcm5ldCBwdWJsaXNoaW5n
-IFNlcnZpY2VzIHMubC4xKzApBgNVBAoUImlwc0BtYWlsLmlwcy5lcyBDLkkuRi4g
-IEItNjA5Mjk0NTIxLjAsBgNVBAsTJUlQUyBDQSBDTEFTRTEgQ2VydGlmaWNhdGlv
-biBBdXRob3JpdHkxLjAsBgNVBAMTJUlQUyBDQSBDTEFTRTEgQ2VydGlmaWNhdGlv
-biBBdXRob3JpdHkxHjAcBgkqhkiG9w0BCQEWD2lwc0BtYWlsLmlwcy5lczCBnzAN
-BgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA4FEnpwvdr9G5Q1uCN0VWcu+atsIS7ywS
-zHb5BlmvXSHU0lq4oNTzav3KaY1mSPd05u42veiWkXWmcSjK5yISMmmwPh5r9FBS
-YmL9Yzt9fuzuOOpi9GyocY3h6YvJP8a1zZRCb92CRTzo3wno7wpVqVZHYUxJZHMQ
-KD/Kvwn/xi8CAwEAAaOCBEowggRGMB0GA1UdDgQWBBTrsxl588GlHKzcuh9morKb
-adB4CDCCAUQGA1UdIwSCATswggE3gBTrsxl588GlHKzcuh9morKbadB4CKGCARqk
-ggEWMIIBEjELMAkGA1UEBhMCRVMxEjAQBgNVBAgTCUJhcmNlbG9uYTESMBAGA1UE
-BxMJQmFyY2Vsb25hMS4wLAYDVQQKEyVJUFMgSW50ZXJuZXQgcHVibGlzaGluZyBT
-ZXJ2aWNlcyBzLmwuMSswKQYDVQQKFCJpcHNAbWFpbC5pcHMuZXMgQy5JLkYuICBC
-LTYwOTI5NDUyMS4wLAYDVQQLEyVJUFMgQ0EgQ0xBU0UxIENlcnRpZmljYXRpb24g
-QXV0aG9yaXR5MS4wLAYDVQQDEyVJUFMgQ0EgQ0xBU0UxIENlcnRpZmljYXRpb24g
-QXV0aG9yaXR5MR4wHAYJKoZIhvcNAQkBFg9pcHNAbWFpbC5pcHMuZXOCAQAwDAYD
-VR0TBAUwAwEB/zAMBgNVHQ8EBQMDB/+AMGsGA1UdJQRkMGIGCCsGAQUFBwMBBggr
-BgEFBQcDAgYIKwYBBQUHAwMGCCsGAQUFBwMEBggrBgEFBQcDCAYKKwYBBAGCNwIB
-FQYKKwYBBAGCNwIBFgYKKwYBBAGCNwoDAQYKKwYBBAGCNwoDBDARBglghkgBhvhC
-AQEEBAMCAAcwGgYDVR0RBBMwEYEPaXBzQG1haWwuaXBzLmVzMBoGA1UdEgQTMBGB
-D2lwc0BtYWlsLmlwcy5lczBBBglghkgBhvhCAQ0ENBYyQ0xBU0UxIENBIENlcnRp
-ZmljYXRlIGlzc3VlZCBieSBodHRwOi8vd3d3Lmlwcy5lcy8wKQYJYIZIAYb4QgEC
-BBwWGmh0dHA6Ly93d3cuaXBzLmVzL2lwczIwMDIvMDoGCWCGSAGG+EIBBAQtFito
-dHRwOi8vd3d3Lmlwcy5lcy9pcHMyMDAyL2lwczIwMDJDTEFTRTEuY3JsMD8GCWCG
-SAGG+EIBAwQyFjBodHRwOi8vd3d3Lmlwcy5lcy9pcHMyMDAyL3Jldm9jYXRpb25D
-TEFTRTEuaHRtbD8wPAYJYIZIAYb4QgEHBC8WLWh0dHA6Ly93d3cuaXBzLmVzL2lw
-czIwMDIvcmVuZXdhbENMQVNFMS5odG1sPzA6BglghkgBhvhCAQgELRYraHR0cDov
-L3d3dy5pcHMuZXMvaXBzMjAwMi9wb2xpY3lDTEFTRTEuaHRtbDBzBgNVHR8EbDBq
-MDGgL6AthitodHRwOi8vd3d3Lmlwcy5lcy9pcHMyMDAyL2lwczIwMDJDTEFTRTEu
-Y3JsMDWgM6Axhi9odHRwOi8vd3d3YmFjay5pcHMuZXMvaXBzMjAwMi9pcHMyMDAy
-Q0xBU0UxLmNybDAvBggrBgEFBQcBAQQjMCEwHwYIKwYBBQUHMAGGE2h0dHA6Ly9v
-Y3NwLmlwcy5lcy8wDQYJKoZIhvcNAQEFBQADgYEAK9Dr/drIyllq2tPMMi7JVBuK
-Yn4VLenZMdMu9Ccj/1urxUq2ckCuU3T0vAW0xtnIyXf7t/k0f3gA+Nak5FI/LEpj
-V4F1Wo7ojPsCwJTGKbqz3Bzosq/SLmJbGqmODszFV0VRFOlOHIilkfSj945RyKm+
-hjM+5i9Ibq9UkE6tsSU=
------END CERTIFICATE-----
-
-subject= /C=ES/ST=Barcelona/L=Barcelona/O=IPS Internet publishing Services s.l./O=ips@mail.ips.es C.I.F.  B-60929452/OU=IPS CA CLASE3 Certification Authority/CN=IPS CA CLASE3 Certification Authority/emailAddress=ips@mail.ips.es
-serial=00
------BEGIN CERTIFICATE-----
-MIIH6jCCB1OgAwIBAgIBADANBgkqhkiG9w0BAQUFADCCARIxCzAJBgNVBAYTAkVT
-MRIwEAYDVQQIEwlCYXJjZWxvbmExEjAQBgNVBAcTCUJhcmNlbG9uYTEuMCwGA1UE
-ChMlSVBTIEludGVybmV0IHB1Ymxpc2hpbmcgU2VydmljZXMgcy5sLjErMCkGA1UE
-ChQiaXBzQG1haWwuaXBzLmVzIEMuSS5GLiAgQi02MDkyOTQ1MjEuMCwGA1UECxMl
-SVBTIENBIENMQVNFMyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEuMCwGA1UEAxMl
-SVBTIENBIENMQVNFMyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEeMBwGCSqGSIb3
-DQEJARYPaXBzQG1haWwuaXBzLmVzMB4XDTAxMTIyOTAxMDE0NFoXDTI1MTIyNzAx
-MDE0NFowggESMQswCQYDVQQGEwJFUzESMBAGA1UECBMJQmFyY2Vsb25hMRIwEAYD
-VQQHEwlCYXJjZWxvbmExLjAsBgNVBAoTJUlQUyBJbnRlcm5ldCBwdWJsaXNoaW5n
-IFNlcnZpY2VzIHMubC4xKzApBgNVBAoUImlwc0BtYWlsLmlwcy5lcyBDLkkuRi4g
-IEItNjA5Mjk0NTIxLjAsBgNVBAsTJUlQUyBDQSBDTEFTRTMgQ2VydGlmaWNhdGlv
-biBBdXRob3JpdHkxLjAsBgNVBAMTJUlQUyBDQSBDTEFTRTMgQ2VydGlmaWNhdGlv
-biBBdXRob3JpdHkxHjAcBgkqhkiG9w0BCQEWD2lwc0BtYWlsLmlwcy5lczCBnzAN
-BgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAqxf+DrDGaBtT8FK+n/ra+osTBLsBjzLZ
-H49NzjaY2uQARIwo2BNEKqRrThckQpzTiKRBgtYj+4vJhuW5qYIF3PHeH+AMmVWY
-8jjsbJ0gA8DvqqPGZARRLXgNo9KoOtYkTOmWehisEyMiG3zoMRGzXwmqMHBxRiVr
-SXGAK5UBsh8CAwEAAaOCBEowggRGMB0GA1UdDgQWBBS4k/8uy9wsjqLnev42USGj
-mFsMNDCCAUQGA1UdIwSCATswggE3gBS4k/8uy9wsjqLnev42USGjmFsMNKGCARqk
-ggEWMIIBEjELMAkGA1UEBhMCRVMxEjAQBgNVBAgTCUJhcmNlbG9uYTESMBAGA1UE
-BxMJQmFyY2Vsb25hMS4wLAYDVQQKEyVJUFMgSW50ZXJuZXQgcHVibGlzaGluZyBT
-ZXJ2aWNlcyBzLmwuMSswKQYDVQQKFCJpcHNAbWFpbC5pcHMuZXMgQy5JLkYuICBC
-LTYwOTI5NDUyMS4wLAYDVQQLEyVJUFMgQ0EgQ0xBU0UzIENlcnRpZmljYXRpb24g
-QXV0aG9yaXR5MS4wLAYDVQQDEyVJUFMgQ0EgQ0xBU0UzIENlcnRpZmljYXRpb24g
-QXV0aG9yaXR5MR4wHAYJKoZIhvcNAQkBFg9pcHNAbWFpbC5pcHMuZXOCAQAwDAYD
-VR0TBAUwAwEB/zAMBgNVHQ8EBQMDB/+AMGsGA1UdJQRkMGIGCCsGAQUFBwMBBggr
-BgEFBQcDAgYIKwYBBQUHAwMGCCsGAQUFBwMEBggrBgEFBQcDCAYKKwYBBAGCNwIB
-FQYKKwYBBAGCNwIBFgYKKwYBBAGCNwoDAQYKKwYBBAGCNwoDBDARBglghkgBhvhC
-AQEEBAMCAAcwGgYDVR0RBBMwEYEPaXBzQG1haWwuaXBzLmVzMBoGA1UdEgQTMBGB
-D2lwc0BtYWlsLmlwcy5lczBBBglghkgBhvhCAQ0ENBYyQ0xBU0UzIENBIENlcnRp
-ZmljYXRlIGlzc3VlZCBieSBodHRwOi8vd3d3Lmlwcy5lcy8wKQYJYIZIAYb4QgEC
-BBwWGmh0dHA6Ly93d3cuaXBzLmVzL2lwczIwMDIvMDoGCWCGSAGG+EIBBAQtFito
-dHRwOi8vd3d3Lmlwcy5lcy9pcHMyMDAyL2lwczIwMDJDTEFTRTMuY3JsMD8GCWCG
-SAGG+EIBAwQyFjBodHRwOi8vd3d3Lmlwcy5lcy9pcHMyMDAyL3Jldm9jYXRpb25D
-TEFTRTMuaHRtbD8wPAYJYIZIAYb4QgEHBC8WLWh0dHA6Ly93d3cuaXBzLmVzL2lw
-czIwMDIvcmVuZXdhbENMQVNFMy5odG1sPzA6BglghkgBhvhCAQgELRYraHR0cDov
-L3d3dy5pcHMuZXMvaXBzMjAwMi9wb2xpY3lDTEFTRTMuaHRtbDBzBgNVHR8EbDBq
-MDGgL6AthitodHRwOi8vd3d3Lmlwcy5lcy9pcHMyMDAyL2lwczIwMDJDTEFTRTMu
-Y3JsMDWgM6Axhi9odHRwOi8vd3d3YmFjay5pcHMuZXMvaXBzMjAwMi9pcHMyMDAy
-Q0xBU0UzLmNybDAvBggrBgEFBQcBAQQjMCEwHwYIKwYBBQUHMAGGE2h0dHA6Ly9v
-Y3NwLmlwcy5lcy8wDQYJKoZIhvcNAQEFBQADgYEAF2VcmZVDAyevJuXr0LMXI/dD
-qsfwfewPxqmurpYPdikc4gYtfibFPPqhwYHOU7BC0ZdXGhd+pFFhxu7pXu8Fuuu9
-D6eSb9ijBmgpjnn1/7/5p6/ksc7C0YBCJwUENPjDfxZ4IwwHJPJGR607VNCv1TGy
-r33I6unUVtkOE7LFRVA=
------END CERTIFICATE-----
-
-subject= /C=ES/ST=Barcelona/L=Barcelona/O=IPS Internet publishing Services s.l./O=ips@mail.ips.es C.I.F.  B-60929452/OU=IPS CA CLASEA1 Certification Authority/CN=IPS CA CLASEA1 Certification Authority/emailAddress=ips@mail.ips.es
-serial=00
------BEGIN CERTIFICATE-----
-MIIH9zCCB2CgAwIBAgIBADANBgkqhkiG9w0BAQUFADCCARQxCzAJBgNVBAYTAkVT
-MRIwEAYDVQQIEwlCYXJjZWxvbmExEjAQBgNVBAcTCUJhcmNlbG9uYTEuMCwGA1UE
-ChMlSVBTIEludGVybmV0IHB1Ymxpc2hpbmcgU2VydmljZXMgcy5sLjErMCkGA1UE
-ChQiaXBzQG1haWwuaXBzLmVzIEMuSS5GLiAgQi02MDkyOTQ1MjEvMC0GA1UECxMm
-SVBTIENBIENMQVNFQTEgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxLzAtBgNVBAMT
-JklQUyBDQSBDTEFTRUExIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MR4wHAYJKoZI
-hvcNAQkBFg9pcHNAbWFpbC5pcHMuZXMwHhcNMDExMjI5MDEwNTMyWhcNMjUxMjI3
-MDEwNTMyWjCCARQxCzAJBgNVBAYTAkVTMRIwEAYDVQQIEwlCYXJjZWxvbmExEjAQ
-BgNVBAcTCUJhcmNlbG9uYTEuMCwGA1UEChMlSVBTIEludGVybmV0IHB1Ymxpc2hp
-bmcgU2VydmljZXMgcy5sLjErMCkGA1UEChQiaXBzQG1haWwuaXBzLmVzIEMuSS5G
-LiAgQi02MDkyOTQ1MjEvMC0GA1UECxMmSVBTIENBIENMQVNFQTEgQ2VydGlmaWNh
-dGlvbiBBdXRob3JpdHkxLzAtBgNVBAMTJklQUyBDQSBDTEFTRUExIENlcnRpZmlj
-YXRpb24gQXV0aG9yaXR5MR4wHAYJKoZIhvcNAQkBFg9pcHNAbWFpbC5pcHMuZXMw
-gZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBALsw19zQVL01Tp/FTILq0VA8R5j8
-m2mdd81u4D/u6zJfX5/S0HnllXNEITLgCtud186Nq1KLK3jgm1t99P1tCeWu4Wwd
-ByOgF9H5fahGRpEiqLJpxq339fWUoTCUvQDMRH/uxJ7JweaPCjbB/SQ9AaD1e+J8
-eGZDi09Z8pvZ+kmzAgMBAAGjggRTMIIETzAdBgNVHQ4EFgQUZyaW56G/2LUDnf47
-3P7yiuYV3TAwggFGBgNVHSMEggE9MIIBOYAUZyaW56G/2LUDnf473P7yiuYV3TCh
-ggEcpIIBGDCCARQxCzAJBgNVBAYTAkVTMRIwEAYDVQQIEwlCYXJjZWxvbmExEjAQ
-BgNVBAcTCUJhcmNlbG9uYTEuMCwGA1UEChMlSVBTIEludGVybmV0IHB1Ymxpc2hp
-bmcgU2VydmljZXMgcy5sLjErMCkGA1UEChQiaXBzQG1haWwuaXBzLmVzIEMuSS5G
-LiAgQi02MDkyOTQ1MjEvMC0GA1UECxMmSVBTIENBIENMQVNFQTEgQ2VydGlmaWNh
-dGlvbiBBdXRob3JpdHkxLzAtBgNVBAMTJklQUyBDQSBDTEFTRUExIENlcnRpZmlj
-YXRpb24gQXV0aG9yaXR5MR4wHAYJKoZIhvcNAQkBFg9pcHNAbWFpbC5pcHMuZXOC
-AQAwDAYDVR0TBAUwAwEB/zAMBgNVHQ8EBQMDB/+AMGsGA1UdJQRkMGIGCCsGAQUF
-BwMBBggrBgEFBQcDAgYIKwYBBQUHAwMGCCsGAQUFBwMEBggrBgEFBQcDCAYKKwYB
-BAGCNwIBFQYKKwYBBAGCNwIBFgYKKwYBBAGCNwoDAQYKKwYBBAGCNwoDBDARBglg
-hkgBhvhCAQEEBAMCAAcwGgYDVR0RBBMwEYEPaXBzQG1haWwuaXBzLmVzMBoGA1Ud
-EgQTMBGBD2lwc0BtYWlsLmlwcy5lczBCBglghkgBhvhCAQ0ENRYzQ0xBU0VBMSBD
-QSBDZXJ0aWZpY2F0ZSBpc3N1ZWQgYnkgaHR0cDovL3d3dy5pcHMuZXMvMCkGCWCG
-SAGG+EIBAgQcFhpodHRwOi8vd3d3Lmlwcy5lcy9pcHMyMDAyLzA7BglghkgBhvhC
-AQQELhYsaHR0cDovL3d3dy5pcHMuZXMvaXBzMjAwMi9pcHMyMDAyQ0xBU0VBMS5j
-cmwwQAYJYIZIAYb4QgEDBDMWMWh0dHA6Ly93d3cuaXBzLmVzL2lwczIwMDIvcmV2
-b2NhdGlvbkNMQVNFQTEuaHRtbD8wPQYJYIZIAYb4QgEHBDAWLmh0dHA6Ly93d3cu
-aXBzLmVzL2lwczIwMDIvcmVuZXdhbENMQVNFQTEuaHRtbD8wOwYJYIZIAYb4QgEI
-BC4WLGh0dHA6Ly93d3cuaXBzLmVzL2lwczIwMDIvcG9saWN5Q0xBU0VBMS5odG1s
-MHUGA1UdHwRuMGwwMqAwoC6GLGh0dHA6Ly93d3cuaXBzLmVzL2lwczIwMDIvaXBz
-MjAwMkNMQVNFQTEuY3JsMDagNKAyhjBodHRwOi8vd3d3YmFjay5pcHMuZXMvaXBz
-MjAwMi9pcHMyMDAyQ0xBU0VBMS5jcmwwLwYIKwYBBQUHAQEEIzAhMB8GCCsGAQUF
-BzABhhNodHRwOi8vb2NzcC5pcHMuZXMvMA0GCSqGSIb3DQEBBQUAA4GBAH66iqyA
-AIQVCtWYUQxkxZwCWINmyq0eB81+atqAB98DNEock8RLWCA1NnHtogo1EqWmZaeF
-aQoO42Hu6r4okzPV7Oi+xNtff6j5YzHIa5biKcJboOeXNp13XjFr/tOn2yrb25aL
-H2betgPAK7N41lUH5Y85UN4HI3LmvSAUS7SG
------END CERTIFICATE-----
-
-subject= /C=ES/ST=Barcelona/L=Barcelona/O=IPS Internet publishing Services s.l./O=ips@mail.ips.es C.I.F.  B-60929452/OU=IPS CA CLASEA3 Certification Authority/CN=IPS CA CLASEA3 Certification Authority/emailAddress=ips@mail.ips.es
-serial=00
------BEGIN CERTIFICATE-----
-MIIH9zCCB2CgAwIBAgIBADANBgkqhkiG9w0BAQUFADCCARQxCzAJBgNVBAYTAkVT
-MRIwEAYDVQQIEwlCYXJjZWxvbmExEjAQBgNVBAcTCUJhcmNlbG9uYTEuMCwGA1UE
-ChMlSVBTIEludGVybmV0IHB1Ymxpc2hpbmcgU2VydmljZXMgcy5sLjErMCkGA1UE
-ChQiaXBzQG1haWwuaXBzLmVzIEMuSS5GLiAgQi02MDkyOTQ1MjEvMC0GA1UECxMm
-SVBTIENBIENMQVNFQTMgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxLzAtBgNVBAMT
-JklQUyBDQSBDTEFTRUEzIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MR4wHAYJKoZI
-hvcNAQkBFg9pcHNAbWFpbC5pcHMuZXMwHhcNMDExMjI5MDEwNzUwWhcNMjUxMjI3
-MDEwNzUwWjCCARQxCzAJBgNVBAYTAkVTMRIwEAYDVQQIEwlCYXJjZWxvbmExEjAQ
-BgNVBAcTCUJhcmNlbG9uYTEuMCwGA1UEChMlSVBTIEludGVybmV0IHB1Ymxpc2hp
-bmcgU2VydmljZXMgcy5sLjErMCkGA1UEChQiaXBzQG1haWwuaXBzLmVzIEMuSS5G
-LiAgQi02MDkyOTQ1MjEvMC0GA1UECxMmSVBTIENBIENMQVNFQTMgQ2VydGlmaWNh
-dGlvbiBBdXRob3JpdHkxLzAtBgNVBAMTJklQUyBDQSBDTEFTRUEzIENlcnRpZmlj
-YXRpb24gQXV0aG9yaXR5MR4wHAYJKoZIhvcNAQkBFg9pcHNAbWFpbC5pcHMuZXMw
-gZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAO6AAPYaZC6tasiDsYun7o/ZttvN
-G7uGBiJ2MwwSbUhWYdLcgiViL5/SaTBlA0IjWLxH3GvWdV0XPOH/8lhneaDBgbHU
-VqLyjRGZ/fZ98cfEXgIqmuJKtROKAP2Md4bm15T1IHUuDky/dMQ/gT6DtKM4Ninn
-6Cr1jIhBqoCm42zvAgMBAAGjggRTMIIETzAdBgNVHQ4EFgQUHp9XUEe2YZM50yz8
-2l09BXW3mQIwggFGBgNVHSMEggE9MIIBOYAUHp9XUEe2YZM50yz82l09BXW3mQKh
-ggEcpIIBGDCCARQxCzAJBgNVBAYTAkVTMRIwEAYDVQQIEwlCYXJjZWxvbmExEjAQ
-BgNVBAcTCUJhcmNlbG9uYTEuMCwGA1UEChMlSVBTIEludGVybmV0IHB1Ymxpc2hp
-bmcgU2VydmljZXMgcy5sLjErMCkGA1UEChQiaXBzQG1haWwuaXBzLmVzIEMuSS5G
-LiAgQi02MDkyOTQ1MjEvMC0GA1UECxMmSVBTIENBIENMQVNFQTMgQ2VydGlmaWNh
-dGlvbiBBdXRob3JpdHkxLzAtBgNVBAMTJklQUyBDQSBDTEFTRUEzIENlcnRpZmlj
-YXRpb24gQXV0aG9yaXR5MR4wHAYJKoZIhvcNAQkBFg9pcHNAbWFpbC5pcHMuZXOC
-AQAwDAYDVR0TBAUwAwEB/zAMBgNVHQ8EBQMDB/+AMGsGA1UdJQRkMGIGCCsGAQUF
-BwMBBggrBgEFBQcDAgYIKwYBBQUHAwMGCCsGAQUFBwMEBggrBgEFBQcDCAYKKwYB
-BAGCNwIBFQYKKwYBBAGCNwIBFgYKKwYBBAGCNwoDAQYKKwYBBAGCNwoDBDARBglg
-hkgBhvhCAQEEBAMCAAcwGgYDVR0RBBMwEYEPaXBzQG1haWwuaXBzLmVzMBoGA1Ud
-EgQTMBGBD2lwc0BtYWlsLmlwcy5lczBCBglghkgBhvhCAQ0ENRYzQ0xBU0VBMyBD
-QSBDZXJ0aWZpY2F0ZSBpc3N1ZWQgYnkgaHR0cDovL3d3dy5pcHMuZXMvMCkGCWCG
-SAGG+EIBAgQcFhpodHRwOi8vd3d3Lmlwcy5lcy9pcHMyMDAyLzA7BglghkgBhvhC
-AQQELhYsaHR0cDovL3d3dy5pcHMuZXMvaXBzMjAwMi9pcHMyMDAyQ0xBU0VBMy5j
-cmwwQAYJYIZIAYb4QgEDBDMWMWh0dHA6Ly93d3cuaXBzLmVzL2lwczIwMDIvcmV2
-b2NhdGlvbkNMQVNFQTMuaHRtbD8wPQYJYIZIAYb4QgEHBDAWLmh0dHA6Ly93d3cu
-aXBzLmVzL2lwczIwMDIvcmVuZXdhbENMQVNFQTMuaHRtbD8wOwYJYIZIAYb4QgEI
-BC4WLGh0dHA6Ly93d3cuaXBzLmVzL2lwczIwMDIvcG9saWN5Q0xBU0VBMy5odG1s
-MHUGA1UdHwRuMGwwMqAwoC6GLGh0dHA6Ly93d3cuaXBzLmVzL2lwczIwMDIvaXBz
-MjAwMkNMQVNFQTMuY3JsMDagNKAyhjBodHRwOi8vd3d3YmFjay5pcHMuZXMvaXBz
-MjAwMi9pcHMyMDAyQ0xBU0VBMy5jcmwwLwYIKwYBBQUHAQEEIzAhMB8GCCsGAQUF
-BzABhhNodHRwOi8vb2NzcC5pcHMuZXMvMA0GCSqGSIb3DQEBBQUAA4GBAEo9IEca
-2on0eisxeewBwMwB9dbB/MjD81ACUZBYKp/nNQlbMAqBACVHr9QPDp5gJqiVp4MI
-3y2s6Q73nMify5NF8bpqxmdRSmlPa/59Cy9SKcJQrSRE7SOzSMtEQMEDlQwKeAYS
-AfWRMS1Jjbs/RU4s4OjNtckUFQzjB4ObJnXv
------END CERTIFICATE-----
-
-subject= /C=ES/ST=Barcelona/L=Barcelona/O=IPS Internet publishing Services s.l./O=ips@mail.ips.es C.I.F.  B-60929452/OU=IPS CA Timestamping Certification Authority/CN=IPS CA Timestamping Certification Authority/emailAddress=ips@mail.ips.es
-serial=00
------BEGIN CERTIFICATE-----
-MIIIODCCB6GgAwIBAgIBADANBgkqhkiG9w0BAQUFADCCAR4xCzAJBgNVBAYTAkVT
-MRIwEAYDVQQIEwlCYXJjZWxvbmExEjAQBgNVBAcTCUJhcmNlbG9uYTEuMCwGA1UE
-ChMlSVBTIEludGVybmV0IHB1Ymxpc2hpbmcgU2VydmljZXMgcy5sLjErMCkGA1UE
-ChQiaXBzQG1haWwuaXBzLmVzIEMuSS5GLiAgQi02MDkyOTQ1MjE0MDIGA1UECxMr
-SVBTIENBIFRpbWVzdGFtcGluZyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTE0MDIG
-A1UEAxMrSVBTIENBIFRpbWVzdGFtcGluZyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0
-eTEeMBwGCSqGSIb3DQEJARYPaXBzQG1haWwuaXBzLmVzMB4XDTAxMTIyOTAxMTAx
-OFoXDTI1MTIyNzAxMTAxOFowggEeMQswCQYDVQQGEwJFUzESMBAGA1UECBMJQmFy
-Y2Vsb25hMRIwEAYDVQQHEwlCYXJjZWxvbmExLjAsBgNVBAoTJUlQUyBJbnRlcm5l
-dCBwdWJsaXNoaW5nIFNlcnZpY2VzIHMubC4xKzApBgNVBAoUImlwc0BtYWlsLmlw
-cy5lcyBDLkkuRi4gIEItNjA5Mjk0NTIxNDAyBgNVBAsTK0lQUyBDQSBUaW1lc3Rh
-bXBpbmcgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxNDAyBgNVBAMTK0lQUyBDQSBU
-aW1lc3RhbXBpbmcgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxHjAcBgkqhkiG9w0B
-CQEWD2lwc0BtYWlsLmlwcy5lczCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA
-vLjuVqWajOY2ycJioGaBjRrVetJznw6EZLqVtJCneK/K/lRhW86yIFcBrkSSQxA4
-Efdo/BdApWgnMjvEp+ZCccWZ73b/K5Uk9UmSGGjKALWkWi9uy9YbLA1UZ2t6KaFY
-q6JaANZbuxjC3/YeE1Z2m6Vo4pjOxgOKNNtMg0GmqaMCAwEAAaOCBIAwggR8MB0G
-A1UdDgQWBBSL0BBQCYHynQnVDmB4AyKiP8jKZjCCAVAGA1UdIwSCAUcwggFDgBSL
-0BBQCYHynQnVDmB4AyKiP8jKZqGCASakggEiMIIBHjELMAkGA1UEBhMCRVMxEjAQ
-BgNVBAgTCUJhcmNlbG9uYTESMBAGA1UEBxMJQmFyY2Vsb25hMS4wLAYDVQQKEyVJ
-UFMgSW50ZXJuZXQgcHVibGlzaGluZyBTZXJ2aWNlcyBzLmwuMSswKQYDVQQKFCJp
-cHNAbWFpbC5pcHMuZXMgQy5JLkYuICBCLTYwOTI5NDUyMTQwMgYDVQQLEytJUFMg
-Q0EgVGltZXN0YW1waW5nIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MTQwMgYDVQQD
-EytJUFMgQ0EgVGltZXN0YW1waW5nIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MR4w
-HAYJKoZIhvcNAQkBFg9pcHNAbWFpbC5pcHMuZXOCAQAwDAYDVR0TBAUwAwEB/zAM
-BgNVHQ8EBQMDB/+AMGsGA1UdJQRkMGIGCCsGAQUFBwMBBggrBgEFBQcDAgYIKwYB
-BQUHAwMGCCsGAQUFBwMEBggrBgEFBQcDCAYKKwYBBAGCNwIBFQYKKwYBBAGCNwIB
-FgYKKwYBBAGCNwoDAQYKKwYBBAGCNwoDBDARBglghkgBhvhCAQEEBAMCAAcwGgYD
-VR0RBBMwEYEPaXBzQG1haWwuaXBzLmVzMBoGA1UdEgQTMBGBD2lwc0BtYWlsLmlw
-cy5lczBHBglghkgBhvhCAQ0EOhY4VGltZXN0YW1waW5nIENBIENlcnRpZmljYXRl
-IGlzc3VlZCBieSBodHRwOi8vd3d3Lmlwcy5lcy8wKQYJYIZIAYb4QgECBBwWGmh0
-dHA6Ly93d3cuaXBzLmVzL2lwczIwMDIvMEAGCWCGSAGG+EIBBAQzFjFodHRwOi8v
-d3d3Lmlwcy5lcy9pcHMyMDAyL2lwczIwMDJUaW1lc3RhbXBpbmcuY3JsMEUGCWCG
-SAGG+EIBAwQ4FjZodHRwOi8vd3d3Lmlwcy5lcy9pcHMyMDAyL3Jldm9jYXRpb25U
-aW1lc3RhbXBpbmcuaHRtbD8wQgYJYIZIAYb4QgEHBDUWM2h0dHA6Ly93d3cuaXBz
-LmVzL2lwczIwMDIvcmVuZXdhbFRpbWVzdGFtcGluZy5odG1sPzBABglghkgBhvhC
-AQgEMxYxaHR0cDovL3d3dy5pcHMuZXMvaXBzMjAwMi9wb2xpY3lUaW1lc3RhbXBp
-bmcuaHRtbDB/BgNVHR8EeDB2MDegNaAzhjFodHRwOi8vd3d3Lmlwcy5lcy9pcHMy
-MDAyL2lwczIwMDJUaW1lc3RhbXBpbmcuY3JsMDugOaA3hjVodHRwOi8vd3d3YmFj
-ay5pcHMuZXMvaXBzMjAwMi9pcHMyMDAyVGltZXN0YW1waW5nLmNybDAvBggrBgEF
-BQcBAQQjMCEwHwYIKwYBBQUHMAGGE2h0dHA6Ly9vY3NwLmlwcy5lcy8wDQYJKoZI
-hvcNAQEFBQADgYEAZbrBzAAalZHK6Ww6vzoeFAh8+4Pua2JR0zORtWB5fgTYXXk3
-6MNbsMRnLWhasl8OCvrNPzpFoeo2zyYepxEoxZSPhExTCMWTs/zif/WN87GphV+I
-3pGW7hdbrqXqcGV4LCFkAZXOzkw+UPS2Wctjjba9GNSHSl/c7+lW8AoM6HU=
------END CERTIFICATE-----
-
 subject= /C=BM/O=QuoVadis Limited/OU=Root Certification Authority/CN=QuoVadis Root Certification Authority
 serial=3AB6508B
 -----BEGIN CERTIFICATE-----
@@ -4710,6 +4223,206 @@
 bX4b5brdNL4iEexomDOBAmoLE1V5MXVOOsi2E72XbzcKCy2IDt5nkMKzyiDKmlH0
 ZD7b9C5F8sdHF6j0+pBaf4CmgqzkbIFGu1KFICT4gOo=
 -----END CERTIFICATE-----
+
+subject= /C=US/ST=Arizona/L=Scottsdale/O=GoDaddy.com, Inc./CN=Go Daddy Root Certificate Authority - G2
+serial=00
+-----BEGIN CERTIFICATE-----
+MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx
+EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT
+EUdvRGFkZHkuY29tLCBJbmMuMTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRp
+ZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIz
+NTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQH
+EwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8GA1UE
+AxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIw
+DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKD
+E6bFIEMBO4Tx5oVJnyfq9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH
+/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD+qK+ihVqf94Lw7YZFAXK6sOoBJQ7Rnwy
+DfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutdfMh8+7ArU6SSYmlRJQVh
+GkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMlNAJWJwGR
+tDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEA
+AaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE
+FDqahQcQZyi27/a9BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmX
+WWcDYfF+OwYxdS2hII5PZYe096acvNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu
+9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r5N9ss4UXnT3ZJE95kTXWXwTr
+gIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYVN8Gb5DKj7Tjo
+2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO
+LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI
+4uJEvlz36hz1
+-----END CERTIFICATE-----
+
+subject= /C=US/ST=Arizona/L=Scottsdale/O=Starfield Technologies, Inc./CN=Starfield Root Certificate Authority - G2
+serial=00
+-----BEGIN CERTIFICATE-----
+MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMx
+EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT
+HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVs
+ZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAw
+MFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6
+b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQgVGVj
+aG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZp
+Y2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
+ggEBAL3twQP89o/8ArFvW59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMg
+nLRJdzIpVv257IzdIvpy3Cdhl+72WoTsbhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1
+HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNkN3mSwOxGXn/hbVNMYq/N
+Hwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7NfZTD4p7dN
+dloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0
+HZbUJtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO
+BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0G
+CSqGSIb3DQEBCwUAA4IBAQARWfolTwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjU
+sHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx4mcujJUDJi5DnUox9g61DLu3
+4jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUwF5okxBDgBPfg
+8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K
+pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1
+mMpYjn0q7pBZc2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0
+-----END CERTIFICATE-----
+
+subject= /C=US/ST=Arizona/L=Scottsdale/O=Starfield Technologies, Inc./CN=Starfield Services Root Certificate Authority - G2
+serial=00
+-----BEGIN CERTIFICATE-----
+MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMx
+EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT
+HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVs
+ZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5
+MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNVBAYTAlVTMRAwDgYD
+VQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFy
+ZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2Vy
+dmljZXMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20p
+OsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm2
+8xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4PahHQUw2eeBGg6345AWh1K
+Ts9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLPLJGmpufe
+hRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk
+6mFBrMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAw
+DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+q
+AdcwKziIorhtSpzyEZGDMA0GCSqGSIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMI
+bw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPPE95Dz+I0swSdHynVv/heyNXB
+ve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTyxQGjhdByPq1z
+qwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd
+iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn
+0q23KXB56jzaYyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCN
+sSi6
+-----END CERTIFICATE-----
+
+subject= /C=US/O=AffirmTrust/CN=AffirmTrust Commercial
+serial=7777062726A9B17C
+-----BEGIN CERTIFICATE-----
+MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UE
+BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz
+dCBDb21tZXJjaWFsMB4XDTEwMDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDEL
+MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp
+cm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6EqdbDuKP
+Hx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yr
+ba0F8PrVC8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPAL
+MeIrJmqbTFeurCA+ukV6BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1
+yHp52UKqK39c/s4mT6NmgTWvRLpUHhwwMmWd5jyTXlBOeuM61G7MGvv50jeuJCqr
+VwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNVHQ4EFgQUnZPGU4teyq8/
+nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ
+KoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYG
+XUPGhi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNj
+vbz4YYCanrHOQnDiqX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivt
+Z8SOyUOyXGsViQK8YvxO8rUzqrJv0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9g
+N53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0khsUlHRUe072o0EclNmsxZt9YC
+nlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8=
+-----END CERTIFICATE-----
+
+subject= /C=US/O=AffirmTrust/CN=AffirmTrust Networking
+serial=7C4F04391CD4992D
+-----BEGIN CERTIFICATE-----
+MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UE
+BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz
+dCBOZXR3b3JraW5nMB4XDTEwMDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDEL
+MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp
+cm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SEHi3y
+YJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbua
+kCNrmreIdIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRL
+QESxG9fhwoXA3hA/Pe24/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp
+6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gbh+0t+nvujArjqWaJGctB+d1ENmHP4ndG
+yH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNVHQ4EFgQUBx/S55zawm6i
+QLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ
+KoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfO
+tDIuUFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzu
+QY0x2+c06lkh1QF612S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZ
+Lgo/bNjR9eUJtGxUAArgFU2HdW23WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4u
+olu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9/ZFvgrG+CJPbFEfxojfHRZ48
+x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s=
+-----END CERTIFICATE-----
+
+subject= /C=US/O=AffirmTrust/CN=AffirmTrust Premium
+serial=6D8C1446B1A60AEE
+-----BEGIN CERTIFICATE-----
+MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UE
+BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVz
+dCBQcmVtaXVtMB4XDTEwMDEyOTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkG
+A1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1U
+cnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxBLf
+qV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtnBKAQ
+JG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ
++jjeRFcV5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrS
+s8PhaJyJ+HoAVt70VZVs+7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5
+HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmdGPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d7
+70O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5Rp9EixAqnOEhss/n/fauG
+V+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NIS+LI+H+S
+qHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S
+5u046uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4Ia
+C1nEWTJ3s7xgaVY5/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TX
+OwF0lkLgAOIua+rF7nKsu7/+6qqo+Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYE
+FJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/
+BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByvMiPIs0laUZx2
+KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg
+Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B
+8OWycvpEgjNC6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQ
+MKSOyARiqcTtNd56l+0OOF6SL5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc
+0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK+4w1IX2COPKpVJEZNZOUbWo6xbLQ
+u4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmVBtWVyuEklut89pMF
+u+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFgIxpH
+YoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8
+GKa1qF60g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaO
+RtGdFNrHF+QFlozEJLUbzxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6e
+KeC2uAloGRwYQw==
+-----END CERTIFICATE-----
+
+subject= /C=US/O=AffirmTrust/CN=AffirmTrust Premium ECC
+serial=7497258AC73F7A54
+-----BEGIN CERTIFICATE-----
+MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMC
+VVMxFDASBgNVBAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQ
+cmVtaXVtIEVDQzAeFw0xMDAxMjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJ
+BgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1UcnVzdDEgMB4GA1UEAwwXQWZmaXJt
+VHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQNMF4bFZ0D
+0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQN8O9
+ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0G
+A1UdDgQWBBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4G
+A1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/Vs
+aobgxCd05DhT1wV/GzTjxi+zygk8N53X57hG8f2h4nECMEJZh0PUUd+60wkyWs6I
+flc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKMeQ==
+-----END CERTIFICATE-----
+
+subject= /C=PL/O=Unizeto Technologies S.A./OU=Certum Certification Authority/CN=Certum Trusted Network CA
+serial=0444C0
+-----BEGIN CERTIFICATE-----
+MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBM
+MSIwIAYDVQQKExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5D
+ZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBU
+cnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIyMTIwNzM3WhcNMjkxMjMxMTIwNzM3
+WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBUZWNobm9sb2dpZXMg
+Uy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MSIw
+IAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0B
+AQEFAAOCAQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rH
+UV+rpDKmYYe2bg+G0jACl/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LM
+TXPb865Px1bVWqeWifrzq2jUI4ZZJ88JJ7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVU
+BBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4fOQtf/WsX+sWn7Et0brM
+kUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0cvW0QM8x
+AcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNV
+HQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15y
+sHhE49wcrwn9I0j6vSrEuVUEtRCjjSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfL
+I9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1mS1FhIrlQgnXdAIv94nYmem8
+J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5ajZt3hrvJBW8qY
+VoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI
+03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw=
+-----END CERTIFICATE-----
 # ***** BEGIN LICENSE BLOCK *****
 # This file is a collection of intermediate certificates accumulated
 # from mapreducing valid certificate chains.
@@ -19462,6 +19175,26 @@
 eH4YLi1x/drvDYFGifBS8gxQaffO1QF8sn8o
 -----END CERTIFICATE-----
 
+subject= /C=ZA/ST=Western Cape/L=Durbanville/O=Thawte/OU=Thawte Certification/CN=Thawte Timestamping CA
+serial=00
+-----BEGIN CERTIFICATE-----
+MIICoTCCAgqgAwIBAgIBADANBgkqhkiG9w0BAQQFADCBizELMAkGA1UEBhMCWkEx
+FTATBgNVBAgTDFdlc3Rlcm4gQ2FwZTEUMBIGA1UEBxMLRHVyYmFudmlsbGUxDzAN
+BgNVBAoTBlRoYXd0ZTEdMBsGA1UECxMUVGhhd3RlIENlcnRpZmljYXRpb24xHzAd
+BgNVBAMTFlRoYXd0ZSBUaW1lc3RhbXBpbmcgQ0EwHhcNOTcwMTAxMDAwMDAwWhcN
+MjAxMjMxMjM1OTU5WjCBizELMAkGA1UEBhMCWkExFTATBgNVBAgTDFdlc3Rlcm4g
+Q2FwZTEUMBIGA1UEBxMLRHVyYmFudmlsbGUxDzANBgNVBAoTBlRoYXd0ZTEdMBsG
+A1UECxMUVGhhd3RlIENlcnRpZmljYXRpb24xHzAdBgNVBAMTFlRoYXd0ZSBUaW1l
+c3RhbXBpbmcgQ0EwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANYrWHhhRYZT
+6jR7UZztsOYuGA7+4F+oJ9O0yeB8WU4WDnNUYMF/9p8u6TqFJBU820cEY8OexJQa
+Wt9MevPZQx08EHp5JduQ/vBR5zDWQQD9nyjfeb6Uu522FOMjhdepQeBMpHmwKxqL
+8vg7ij5FrHGSALSQQZj7X+36ty6K+Ig3AgMBAAGjEzARMA8GA1UdEwEB/wQFMAMB
+Af8wDQYJKoZIhvcNAQEEBQADgYEAZ9viwuaHPUCDhjc1fR/OmsMMZiCouqoEiYbC
+9RAIDb/LogWK0E02PvTX72nGXuSwlG9KuefeW4i2e9vjJ+V2w/A1wcu1J5szedyQ
+pgCed/r8zSeUQhac0xxo7L9c3eWpexAKMnRUEzGLhQOEkbdYATAUOK8oyvyxUBkZ
+CayJSdM=
+-----END CERTIFICATE-----
+
 subject= /C=ZA/ST=Western Cape/L=Cape Town/O=Thawte Consulting/OU=Certification Services Division/CN=Thawte Personal Premium CA/emailAddress=personal-premium@thawte.com
 serial=00
 -----BEGIN CERTIFICATE-----
@@ -19484,6 +19217,29 @@
 KXLA4CxM+1bkOqhv5TJZUtt1KFBZDPgLGeSs2a+WjS9Q2wfD6h+rM+D1KzGJ
 -----END CERTIFICATE-----
 
+subject= /C=ZA/ST=Western Cape/L=Cape Town/O=Thawte Consulting/OU=Certification Services Division/CN=Thawte Personal Freemail CA/emailAddress=personal-freemail@thawte.com
+serial=00
+-----BEGIN CERTIFICATE-----
+MIIDLTCCApagAwIBAgIBADANBgkqhkiG9w0BAQQFADCB0TELMAkGA1UEBhMCWkEx
+FTATBgNVBAgTDFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMRowGAYD
+VQQKExFUaGF3dGUgQ29uc3VsdGluZzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBT
+ZXJ2aWNlcyBEaXZpc2lvbjEkMCIGA1UEAxMbVGhhd3RlIFBlcnNvbmFsIEZyZWVt
+YWlsIENBMSswKQYJKoZIhvcNAQkBFhxwZXJzb25hbC1mcmVlbWFpbEB0aGF3dGUu
+Y29tMB4XDTk2MDEwMTAwMDAwMFoXDTIwMTIzMTIzNTk1OVowgdExCzAJBgNVBAYT
+AlpBMRUwEwYDVQQIEwxXZXN0ZXJuIENhcGUxEjAQBgNVBAcTCUNhcGUgVG93bjEa
+MBgGA1UEChMRVGhhd3RlIENvbnN1bHRpbmcxKDAmBgNVBAsTH0NlcnRpZmljYXRp
+b24gU2VydmljZXMgRGl2aXNpb24xJDAiBgNVBAMTG1RoYXd0ZSBQZXJzb25hbCBG
+cmVlbWFpbCBDQTErMCkGCSqGSIb3DQEJARYccGVyc29uYWwtZnJlZW1haWxAdGhh
+d3RlLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA1GnX1LCUZFtx6UfY
+DFG26nKRsIRefS0Nj3sS34UldSh0OkIsYyeflXtL734Zhx2G6qPduc6WZBrCFG5E
+rHzmj+hND3EfQDimAKOHePb5lIZererAXnbr2RSjXW56fAylS1V/Bhkpf56aJtVq
+uzgkCGqYx7Hao5iR/Xnb5VrEHLkCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zAN
+BgkqhkiG9w0BAQQFAAOBgQDH7JJ+Tvj1lqVnYiqk8E0RYNBvjWBYYawmu1I1XAjP
+MPuoSpaKH2JCI4wXD/S6ZJwXrEcp352YXtJsYHFcoqzceePnbgBHH7UNKOgCneSa
+/RP0ptl8sfjcXyMmCZGAc9AUG95DqYMl8uacLxXK/qarigd1iwzdUYRr5PjRznei
+gQ==
+-----END CERTIFICATE-----
+
 subject= /C=ZA/ST=Western Cape/L=Cape Town/O=Thawte Consulting/OU=Certification Services Division/CN=Thawte Personal Basic CA/emailAddress=personal-basic@thawte.com
 serial=00
 -----BEGIN CERTIFICATE-----
@@ -21077,6 +20833,98 @@
 RBqMbXpEn6wKuw4Sove0r/MrH4nwLU1NJVpwUtySkOXXMVMvOZs=
 -----END CERTIFICATE-----
 
+subject= /O=Entrust.net/OU=www.entrust.net/SSL_CPS incorp. by ref. (limits liab.)/OU=(c) 2000 Entrust.net Limited/CN=Entrust.net Secure Server Certification Authority
+serial=389B113C
+-----BEGIN CERTIFICATE-----
+MIIElTCCA/6gAwIBAgIEOJsRPDANBgkqhkiG9w0BAQQFADCBujEUMBIGA1UEChML
+RW50cnVzdC5uZXQxPzA9BgNVBAsUNnd3dy5lbnRydXN0Lm5ldC9TU0xfQ1BTIGlu
+Y29ycC4gYnkgcmVmLiAobGltaXRzIGxpYWIuKTElMCMGA1UECxMcKGMpIDIwMDAg
+RW50cnVzdC5uZXQgTGltaXRlZDE6MDgGA1UEAxMxRW50cnVzdC5uZXQgU2VjdXJl
+IFNlcnZlciBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMDAyMDQxNzIwMDBa
+Fw0yMDAyMDQxNzUwMDBaMIG6MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDE/MD0GA1UE
+CxQ2d3d3LmVudHJ1c3QubmV0L1NTTF9DUFMgaW5jb3JwLiBieSByZWYuIChsaW1p
+dHMgbGlhYi4pMSUwIwYDVQQLExwoYykgMjAwMCBFbnRydXN0Lm5ldCBMaW1pdGVk
+MTowOAYDVQQDEzFFbnRydXN0Lm5ldCBTZWN1cmUgU2VydmVyIENlcnRpZmljYXRp
+b24gQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDHwV9OcfHO
+8GCGD9JYf9Mzly0XonUwtZZkJi9ow0SrqHXmAGc0V55lxyKbc+bT3QgON1WqJUaB
+bL3+qPZ1V1eMkGxKwz6LS0MKyRFWmponIpnPVZ5h2QLifLZ8OAfc439PmrkDQYC2
+dWcTC5/oVzbIXQA23mYU2m52H083jIITiQIDAQABo4IBpDCCAaAwEQYJYIZIAYb4
+QgEBBAQDAgAHMIHjBgNVHR8EgdswgdgwgdWggdKggc+kgcwwgckxFDASBgNVBAoT
+C0VudHJ1c3QubmV0MT8wPQYDVQQLFDZ3d3cuZW50cnVzdC5uZXQvU1NMX0NQUyBp
+bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAyMDAw
+IEVudHJ1c3QubmV0IExpbWl0ZWQxOjA4BgNVBAMTMUVudHJ1c3QubmV0IFNlY3Vy
+ZSBTZXJ2ZXIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxDTALBgNVBAMTBENSTDEw
+KwYDVR0QBCQwIoAPMjAwMDAyMDQxNzIwMDBagQ8yMDIwMDIwNDE3NTAwMFowCwYD
+VR0PBAQDAgEGMB8GA1UdIwQYMBaAFMtswGvjuz7L/CKc/vuLkpyw8m4iMB0GA1Ud
+DgQWBBTLbMBr47s+y/winP77i5KcsPJuIjAMBgNVHRMEBTADAQH/MB0GCSqGSIb2
+fQdBAAQQMA4bCFY1LjA6NC4wAwIEkDANBgkqhkiG9w0BAQQFAAOBgQBi24GRzsia
+d0Iv7L0no1MPUBvqTpLwqa+poLpIYcvvyQbvH9X07t9WLebKahlzqlO+krNQAraF
+JnJj2HVQYnUUt7NQGj/KEQALhUVpbbalrlHhStyCP2yMNLJ3a9kC9n8O6mUE8c1U
+yrrJzOCE98g+EZfTYAkYvAX/bIkz8OwVDw==
+-----END CERTIFICATE-----
+
+subject= /O=Entrust.net/OU=www.entrust.net/GCCA_CPS incorp. by ref. (limits liab.)/OU=(c) 2000 Entrust.net Limited/CN=Entrust.net Client Certification Authority
+serial=389EF6E4
+-----BEGIN CERTIFICATE-----
+MIIEgzCCA+ygAwIBAgIEOJ725DANBgkqhkiG9w0BAQQFADCBtDEUMBIGA1UEChML
+RW50cnVzdC5uZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9HQ0NBX0NQUyBp
+bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAyMDAw
+IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNVBAMTKkVudHJ1c3QubmV0IENsaWVu
+dCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMDAyMDcxNjE2NDBaFw0yMDAy
+MDcxNjQ2NDBaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3d3d3
+LmVudHJ1c3QubmV0L0dDQ0FfQ1BTIGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxp
+YWIuKTElMCMGA1UECxMcKGMpIDIwMDAgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEG
+A1UEAxMqRW50cnVzdC5uZXQgQ2xpZW50IENlcnRpZmljYXRpb24gQXV0aG9yaXR5
+MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCTdLS25MVL1qFof2LV7PdRV7Ny
+Spj10InJrWPNTTVRaoTUrcloeW+46xHbh65cJFET8VQlhK8pK5/jgOLZy93GRUk0
+iJBeAZfv6lOm3fzB3ksqJeTpNfpVBQbliXrqpBFXO/x8PTbNZzVtpKklWb1m9fkn
+5JVn1j+SgF7yNH0rhQIDAQABo4IBnjCCAZowEQYJYIZIAYb4QgEBBAQDAgAHMIHd
+BgNVHR8EgdUwgdIwgc+ggcyggcmkgcYwgcMxFDASBgNVBAoTC0VudHJ1c3QubmV0
+MUAwPgYDVQQLFDd3d3cuZW50cnVzdC5uZXQvR0NDQV9DUFMgaW5jb3JwLiBieSBy
+ZWYuIChsaW1pdHMgbGlhYi4pMSUwIwYDVQQLExwoYykgMjAwMCBFbnRydXN0Lm5l
+dCBMaW1pdGVkMTMwMQYDVQQDEypFbnRydXN0Lm5ldCBDbGllbnQgQ2VydGlmaWNh
+dGlvbiBBdXRob3JpdHkxDTALBgNVBAMTBENSTDEwKwYDVR0QBCQwIoAPMjAwMDAy
+MDcxNjE2NDBagQ8yMDIwMDIwNzE2NDY0MFowCwYDVR0PBAQDAgEGMB8GA1UdIwQY
+MBaAFISLdP3FjcD/J20gN0V8/i3OutN9MB0GA1UdDgQWBBSEi3T9xY3A/ydtIDdF
+fP4tzrrTfTAMBgNVHRMEBTADAQH/MB0GCSqGSIb2fQdBAAQQMA4bCFY1LjA6NC4w
+AwIEkDANBgkqhkiG9w0BAQQFAAOBgQBObzWAO9GK9Q6nIMstZVXQkvTnhLUGJoMS
+hAusO7JE7r3PQNsgDrpuFOow4DtifH+La3xKp9U1PL6oXOpLu5OOgGarDyn9TS2/
+GpsKkMWr2tGzhtQvJFJcem3G8v7lTRowjJDyutdKPkN+1MhQGof4T4HHdguEOnKd
+zmVml64mXg==
+-----END CERTIFICATE-----
+
+subject= /C=US/O=Entrust.net/OU=www.entrust.net/Client_CA_Info/CPS incorp. by ref. limits liab./OU=(c) 1999 Entrust.net Limited/CN=Entrust.net Client Certification Authority
+serial=380391EE
+-----BEGIN CERTIFICATE-----
+MIIE7TCCBFagAwIBAgIEOAOR7jANBgkqhkiG9w0BAQQFADCByTELMAkGA1UEBhMC
+VVMxFDASBgNVBAoTC0VudHJ1c3QubmV0MUgwRgYDVQQLFD93d3cuZW50cnVzdC5u
+ZXQvQ2xpZW50X0NBX0luZm8vQ1BTIGluY29ycC4gYnkgcmVmLiBsaW1pdHMgbGlh
+Yi4xJTAjBgNVBAsTHChjKSAxOTk5IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNV
+BAMTKkVudHJ1c3QubmV0IENsaWVudCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe
+Fw05OTEwMTIxOTI0MzBaFw0xOTEwMTIxOTU0MzBaMIHJMQswCQYDVQQGEwJVUzEU
+MBIGA1UEChMLRW50cnVzdC5uZXQxSDBGBgNVBAsUP3d3dy5lbnRydXN0Lm5ldC9D
+bGllbnRfQ0FfSW5mby9DUFMgaW5jb3JwLiBieSByZWYuIGxpbWl0cyBsaWFiLjEl
+MCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEGA1UEAxMq
+RW50cnVzdC5uZXQgQ2xpZW50IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGdMA0G
+CSqGSIb3DQEBAQUAA4GLADCBhwKBgQDIOpleMRffrCdvkHvkGf9FozTC28GoT/Bo
+6oT9n3V5z8GKUZSvx1cDR2SerYIbWtp/N3hHuzeYEpbOxhN979IMMFGpOZ5V+Pux
+5zDeg7K6PvHViTs7hbqqdCz+PzFur5GVbgbUB01LLFZHGARS2g4Qk79jkJvh34zm
+AqTmT173iwIBA6OCAeAwggHcMBEGCWCGSAGG+EIBAQQEAwIABzCCASIGA1UdHwSC
+ARkwggEVMIHkoIHhoIHepIHbMIHYMQswCQYDVQQGEwJVUzEUMBIGA1UEChMLRW50
+cnVzdC5uZXQxSDBGBgNVBAsUP3d3dy5lbnRydXN0Lm5ldC9DbGllbnRfQ0FfSW5m
+by9DUFMgaW5jb3JwLiBieSByZWYuIGxpbWl0cyBsaWFiLjElMCMGA1UECxMcKGMp
+IDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEGA1UEAxMqRW50cnVzdC5uZXQg
+Q2xpZW50IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMCyg
+KqAohiZodHRwOi8vd3d3LmVudHJ1c3QubmV0L0NSTC9DbGllbnQxLmNybDArBgNV
+HRAEJDAigA8xOTk5MTAxMjE5MjQzMFqBDzIwMTkxMDEyMTkyNDMwWjALBgNVHQ8E
+BAMCAQYwHwYDVR0jBBgwFoAUxPucKXuXzUyW/O5bs8qZdIuV6kwwHQYDVR0OBBYE
+FMT7nCl7l81MlvzuW7PKmXSLlepMMAwGA1UdEwQFMAMBAf8wGQYJKoZIhvZ9B0EA
+BAwwChsEVjQuMAMCBJAwDQYJKoZIhvcNAQEEBQADgYEAP66K8ddmAwWePvrqHEa7
+pFuPeJoSSJn59DXeDDYHAmsQOokUgZwxpnyyQbJq5wcBoUv5nyU7lsqZwz6hURzz
+wy5E97BnRqqS5TvaHBkUODDV4qIxJS7x7EU47fgGWANzYrAQMY9Av2TgXD7FTx/a
+EkP/TOYGJqibGapEPHayXOw=
+-----END CERTIFICATE-----
+
 subject= /C=CA/ST=Ontario/L=Toronto/O=Echoworx Corporation/OU=Certification Services/CN=Echoworx Root CA2
 serial=00
 -----BEGIN CERTIFICATE-----
@@ -21588,31 +21436,6 @@
 kxpUnwVwwEpxYB5DC2Ae/qPOgRnhCzU=
 -----END CERTIFICATE-----
 
-subject= /C=PL/O=Unizeto Technologies S.A./OU=Certum Certification Authority/CN=Certum Trusted Network CA
-serial=0444C0
------BEGIN CERTIFICATE-----
-MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBM
-MSIwIAYDVQQKExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5D
-ZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBU
-cnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIyMTIwNzM3WhcNMjkxMjMxMTIwNzM3
-WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBUZWNobm9sb2dpZXMg
-Uy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MSIw
-IAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0B
-AQEFAAOCAQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rH
-UV+rpDKmYYe2bg+G0jACl/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LM
-TXPb865Px1bVWqeWifrzq2jUI4ZZJ88JJ7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVU
-BBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4fOQtf/WsX+sWn7Et0brM
-kUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0cvW0QM8x
-AcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNV
-HRMBAf8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNV
-HQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15y
-sHhE49wcrwn9I0j6vSrEuVUEtRCjjSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfL
-I9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1mS1FhIrlQgnXdAIv94nYmem8
-J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5ajZt3hrvJBW8qY
-VoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI
-03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw=
------END CERTIFICATE-----
-
 subject= /C=BR/ST=Rio de Janeiro/L=Rio de Janeiro/O=Certisign Certificadora Digital Ltda./OU=Certisign Autoridade Certificadora AC3S
 serial=00
 -----BEGIN CERTIFICATE-----
diff --git a/lib/whoosh/whoosh/LICENSE b/lib/whoosh/whoosh/LICENSE
new file mode 100644
index 0000000..b026632
--- /dev/null
+++ b/lib/whoosh/whoosh/LICENSE
@@ -0,0 +1,26 @@
+Copyright 2011 Matt Chaput. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+   1. Redistributions of source code must retain the above copyright notice,
+      this list of conditions and the following disclaimer.
+
+   2. Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in the
+      documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+The views and conclusions contained in the software and documentation are
+those of the authors and should not be interpreted as representing official
+policies, either expressed or implied, of Matt Chaput.
diff --git a/lib/whoosh/whoosh/OWNERS b/lib/whoosh/whoosh/OWNERS
new file mode 100644
index 0000000..0b3aaaf
--- /dev/null
+++ b/lib/whoosh/whoosh/OWNERS
@@ -0,0 +1,3 @@
+ged
+majewski
+mukai
diff --git a/lib/whoosh/whoosh/__init__.py b/lib/whoosh/whoosh/__init__.py
new file mode 100644
index 0000000..edef704
--- /dev/null
+++ b/lib/whoosh/whoosh/__init__.py
@@ -0,0 +1,50 @@
+# Copyright 2008 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+__version__ = (1, 8, 1)
+
+
+def versionstring(build=True, extra=True):
+    """Returns the version number of Whoosh as a string.
+
+    :param build: Whether to include the build number in the string.
+    :param extra: Whether to include alpha/beta/rc etc. tags. Only
+        checked if build is True.
+    :rtype: str
+    """
+
+    if build:
+        first = 3
+    else:
+        first = 2
+
+    s = ".".join(str(n) for n in __version__[:first])
+    if build and extra:
+        s += "".join(str(n) for n in __version__[3:])
+
+    return s
+
diff --git a/lib/whoosh/whoosh/analysis.py b/lib/whoosh/whoosh/analysis.py
new file mode 100644
index 0000000..fbec210
--- /dev/null
+++ b/lib/whoosh/whoosh/analysis.py
@@ -0,0 +1,1815 @@
+# coding: utf8
+
+# Copyright 2007 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+"""Classes and functions for turning a piece of text into an indexable stream
+of "tokens" (usually equivalent to words). There are three general types of
+classes/functions involved in analysis:
+
+* Tokenizers are always at the start of the text processing pipeline. They take
+  a string and yield Token objects (actually, the same token object over and
+  over, for performance reasons) corresponding to the tokens (words) in the
+  text.
+
+  Every tokenizer is a callable that takes a string and returns a generator of
+  tokens.
+
+* Filters take the tokens from the tokenizer and perform various
+  transformations on them. For example, the LowercaseFilter converts all tokens
+  to lowercase, which is usually necessary when indexing regular English text.
+
+  Every filter is a callable that takes a token generator and returns a token
+  generator.
+
+* Analyzers are convenience functions/classes that "package up" a tokenizer and
+  zero or more filters into a single unit, so you don't have to construct the
+  tokenizer-filter-filter-etc. pipeline yourself. For example, the
+  StandardAnalyzer combines a RegexTokenizer, LowercaseFilter, and StopFilter.
+
+  Every analyzer is a callable that takes a string and returns a token
+  generator. (So Tokenizers can be used as Analyzers if you don't need any
+  filtering).
+
+You can implement an analyzer as a custom class or function, or compose
+tokenizers and filters together using the ``|`` character::
+
+    my_analyzer = RegexTokenizer() | LowercaseFilter() | StopFilter()
+
+The first item must be a tokenizer and the rest must be filters (you can't put
+a filter first or a tokenizer after the first item).
+"""
+
+import re
+from array import array
+from collections import deque
+from itertools import chain
+
+from whoosh.lang.dmetaphone import double_metaphone
+from whoosh.lang.porter import stem
+from whoosh.util import lru_cache, unbound_cache
+
+
+# Default list of stop words (words so common it's usually wasteful to index
+# them). This list is used by the StopFilter class, which allows you to supply
+# an optional list to override this one.
+
+STOP_WORDS = frozenset(('a', 'an', 'and', 'are', 'as', 'at', 'be', 'by', 'can',
+                        'for', 'from', 'have', 'if', 'in', 'is', 'it', 'may',
+                        'not', 'of', 'on', 'or', 'tbd', 'that', 'the', 'this',
+                        'to', 'us', 'we', 'when', 'will', 'with', 'yet',
+                        'you', 'your'))
+
+
+# Pre-configured regular expressions
+
+default_pattern = re.compile(r"\w+(\.?\w+)*", re.UNICODE)
+url_pattern = re.compile("""
+(
+    [A-Za-z+]+://          # URL protocol
+    \\S+?                  # URL body
+    (?=\\s|[.]\\s|$|[.]$)  # Stop at space/end, or a dot followed by space/end
+) | (                      # or...
+    \w+([:.]?\w+)*         # word characters, with optional internal colons/dots
+)
+""", re.VERBOSE | re.UNICODE)
+
+
+# Utility functions
+
+def unstopped(tokenstream):
+    """Removes tokens from a token stream where token.stopped = True.
+    """
+    return (t for t in tokenstream if not t.stopped)
+
+
+# Token object
+
+class Token(object):
+    """
+    Represents a "token" (usually a word) extracted from the source text being
+    indexed.
+
+    See "Advanced analysis" in the user guide for more information.
+
+    Because object instantiation in Python is slow, tokenizers should create
+    ONE SINGLE Token object and YIELD IT OVER AND OVER, changing the attributes
+    each time.
+
+    This trick means that consumers of tokens (i.e. filters) must never try to
+    hold onto the token object between loop iterations, or convert the token
+    generator into a list. Instead, save the attributes between iterations,
+    not the object::
+
+        def RemoveDuplicatesFilter(self, stream):
+            # Removes duplicate words.
+            lasttext = None
+            for token in stream:
+                # Only yield the token if its text doesn't
+                # match the previous token.
+                if lasttext != token.text:
+                    yield token
+                lasttext = token.text
+
+    ...or, call token.copy() to get a copy of the token object.
+    """
+
+    def __init__(self, positions=False, chars=False, removestops=True, mode='',
+                 **kwargs):
+        """
+        :param positions: Whether tokens should have the token position in the
+            'pos' attribute.
+        :param chars: Whether tokens should have character offsets in the
+            'startchar' and 'endchar' attributes.
+        :param removestops: whether to remove stop words from the stream (if
+            the tokens pass through a stop filter).
+        :param mode: contains a string describing the purpose for which the
+            analyzer is being called, i.e. 'index' or 'query'.
+        """
+
+        self.positions = positions
+        self.chars = chars
+        self.stopped = False
+        self.boost = 1.0
+        self.removestops = removestops
+        self.mode = mode
+        self.__dict__.update(kwargs)
+
+    def __repr__(self):
+        parms = ", ".join("%s=%r" % (name, value)
+                          for name, value in self.__dict__.iteritems())
+        return "%s(%s)" % (self.__class__.__name__, parms)
+
+    def copy(self):
+        # This is faster than using the copy module
+        return Token(**self.__dict__)
+
+
+# Composition support
+
+class Composable(object):
+    def __or__(self, other):
+        assert callable(other), "%r is not callable" % other
+        return CompositeAnalyzer(self, other)
+
+    def __repr__(self):
+        attrs = ""
+        if self.__dict__:
+            attrs = ", ".join("%s=%r" % (key, value)
+                              for key, value
+                              in self.__dict__.iteritems())
+        return self.__class__.__name__ + "(%s)" % attrs
+
+
+# Tokenizers
+
+class Tokenizer(Composable):
+    """Base class for Tokenizers.
+    """
+
+    def __eq__(self, other):
+        return other and self.__class__ is other.__class__
+
+
+class IDTokenizer(Tokenizer):
+    """Yields the entire input string as a single token. For use in indexed but
+    untokenized fields, such as a document's path.
+
+    >>> idt = IDTokenizer()
+    >>> [token.text for token in idt(u"/a/b 123 alpha")]
+    [u"/a/b 123 alpha"]
+    """
+
+    def __call__(self, value, positions=False, chars=False,
+                 keeporiginal=False, removestops=True,
+                 start_pos=0, start_char=0, mode='',
+                 **kwargs):
+        assert isinstance(value, unicode), "%r is not unicode" % value
+        t = Token(positions, chars, removestops=removestops, mode=mode)
+        t.text = value
+        t.boost = 1.0
+        if keeporiginal:
+            t.original = value
+        if positions:
+            t.pos = start_pos + 1
+        if chars:
+            t.startchar = start_char
+            t.endchar = start_char + len(value)
+        yield t
+
+
+class RegexTokenizer(Tokenizer):
+    """
+    Uses a regular expression to extract tokens from text.
+
+    >>> rex = RegexTokenizer()
+    >>> [token.text for token in rex(u"hi there 3.141 big-time under_score")]
+    [u"hi", u"there", u"3.141", u"big", u"time", u"under_score"]
+    """
+
+    __inittypes__ = dict(expression=unicode, gaps=bool)
+
+    def __init__(self, expression=default_pattern, gaps=False):
+        """
+        :param expression: A regular expression object or string. Each match
+            of the expression equals a token. Group 0 (the entire matched text)
+            is used as the text of the token. If you require more complicated
+            handling of the expression match, simply write your own tokenizer.
+        :param gaps: If True, the tokenizer *splits* on the expression, rather
+            than matching on the expression.
+        """
+
+        if isinstance(expression, basestring):
+            self.expression = re.compile(expression, re.UNICODE)
+        else:
+            self.expression = expression
+        self.gaps = gaps
+
+    def __eq__(self, other):
+        if self.__class__ is other.__class__:
+            if self.expression.pattern == other.expression.pattern:
+                return True
+        return False
+
+    def __call__(self, value, positions=False, chars=False,
+                 keeporiginal=False, removestops=True,
+                 start_pos=0, start_char=0,
+                 tokenize=True, mode='', **kwargs):
+        """
+        :param value: The unicode string to tokenize.
+        :param positions: Whether to record token positions in the token.
+        :param chars: Whether to record character offsets in the token.
+        :param start_pos: The position number of the first token. For example,
+            if you set start_pos=2, the tokens will be numbered 2,3,4,...
+            instead of 0,1,2,...
+        :param start_char: The offset of the first character of the first
+            token. For example, if you set start_char=2, the text "aaa bbb"
+            will have chars (2,5),(6,9) instead (0,3),(4,7).
+        :param tokenize: if True, the text should be tokenized.
+        """
+
+        assert isinstance(value, unicode), "%r is not unicode" % value
+
+        t = Token(positions, chars, removestops=removestops, mode=mode)
+        if not tokenize:
+            t.original = t.text = value
+            t.boost = 1.0
+            if positions:
+                t.pos = start_pos
+            if chars:
+                t.startchar = start_char
+                t.endchar = start_char + len(value)
+            yield t
+        elif not self.gaps:
+            # The default: expression matches are used as tokens
+            for pos, match in enumerate(self.expression.finditer(value)):
+                t.text = match.group(0)
+                t.boost = 1.0
+                if keeporiginal:
+                    t.original = t.text
+                t.stopped = False
+                if positions:
+                    t.pos = start_pos + pos
+                if chars:
+                    t.startchar = start_char + match.start()
+                    t.endchar = start_char + match.end()
+                yield t
+        else:
+            # When gaps=True, iterate through the matches and
+            # yield the text between them.
+            prevend = 0
+            pos = start_pos
+            for match in self.expression.finditer(value):
+                start = prevend
+                end = match.start()
+                text = value[start:end]
+                if text:
+                    t.text = text
+                    t.boost = 1.0
+                    if keeporiginal:
+                        t.original = t.text
+                    t.stopped = False
+                    if positions:
+                        t.pos = pos
+                        pos += 1
+                    if chars:
+                        t.startchar = start_char + start
+                        t.endchar = start_char + end
+
+                    yield t
+
+                prevend = match.end()
+
+            # If the last "gap" was before the end of the text,
+            # yield the last bit of text as a final token.
+            if prevend < len(value):
+                t.text = value[prevend:]
+                t.boost = 1.0
+                if keeporiginal:
+                    t.original = t.text
+                t.stopped = False
+                if positions:
+                    t.pos = pos
+                if chars:
+                    t.startchar = prevend
+                    t.endchar = len(value)
+                yield t
+
+
+class CharsetTokenizer(Tokenizer):
+    """Tokenizes and translates text according to a character mapping object.
+    Characters that map to None are considered token break characters. For all
+    other characters the map is used to translate the character. This is useful
+    for case and accent folding.
+
+    This tokenizer loops character-by-character and so will likely be much
+    slower than :class:`RegexTokenizer`.
+
+    One way to get a character mapping object is to convert a Sphinx charset
+    table file using :func:`whoosh.support.charset.charset_table_to_dict`.
+
+    >>> from whoosh.support.charset import charset_table_to_dict, default_charset
+    >>> charmap = charset_table_to_dict(default_charset)
+    >>> chtokenizer = CharsetTokenizer(charmap)
+    >>> [t.text for t in chtokenizer(u'Stra\\xdfe ABC')]
+    [u'strase', u'abc']
+
+    The Sphinx charset table format is described at
+    http://www.sphinxsearch.com/docs/current.html#conf-charset-table.
+    """
+
+    __inittype__ = dict(charmap=str)
+
+    def __init__(self, charmap):
+        """
+        :param charmap: a mapping from integer character numbers to unicode
+            characters, as used by the unicode.translate() method.
+        """
+        self.charmap = charmap
+
+    def __eq__(self, other):
+        return (other
+                and self.__class__ is other.__class__
+                and self.charmap == other.charmap)
+
+    def __call__(self, value, positions=False, chars=False,
+                 keeporiginal=False, removestops=True,
+                 start_pos=0, start_char=0,
+                 tokenize=True, mode='', **kwargs):
+        """
+        :param value: The unicode string to tokenize.
+        :param positions: Whether to record token positions in the token.
+        :param chars: Whether to record character offsets in the token.
+        :param start_pos: The position number of the first token. For example,
+            if you set start_pos=2, the tokens will be numbered 2,3,4,...
+            instead of 0,1,2,...
+        :param start_char: The offset of the first character of the first
+            token. For example, if you set start_char=2, the text "aaa bbb"
+            will have chars (2,5),(6,9) instead (0,3),(4,7).
+        :param tokenize: if True, the text should be tokenized.
+        """
+
+        assert isinstance(value, unicode), "%r is not unicode" % value
+
+        t = Token(positions, chars, removestops=removestops, mode=mode)
+        if not tokenize:
+            t.original = t.text = value
+            t.boost = 1.0
+            if positions:
+                t.pos = start_pos
+            if chars:
+                t.startchar = start_char
+                t.endchar = start_char + len(value)
+            yield t
+        else:
+            text = u""
+            charmap = self.charmap
+            pos = start_pos
+            startchar = currentchar = start_char
+            for char in value:
+                tchar = charmap[ord(char)]
+                if tchar:
+                    text += tchar
+                else:
+                    if currentchar > startchar:
+                        t.text = text
+                        t.boost = 1.0
+                        if keeporiginal:
+                            t.original = t.text
+                        if positions:
+                            t.pos = pos
+                            pos += 1
+                        if chars:
+                            t.startchar = startchar
+                            t.endchar = currentchar
+                        yield t
+                    startchar = currentchar + 1
+                    text = u""
+
+                currentchar += 1
+
+            if currentchar > startchar:
+                t.text = value[startchar:currentchar]
+                t.boost = 1.0
+                if keeporiginal:
+                    t.original = t.text
+                if positions:
+                    t.pos = pos
+                if chars:
+                    t.startchar = startchar
+                    t.endchar = currentchar
+                yield t
+
+
+def SpaceSeparatedTokenizer():
+    """Returns a RegexTokenizer that splits tokens by whitespace.
+
+    >>> sst = SpaceSeparatedTokenizer()
+    >>> [token.text for token in sst(u"hi there big-time, what's up")]
+    [u"hi", u"there", u"big-time,", u"what's", u"up"]
+    """
+
+    return RegexTokenizer(r"[^ \t\r\n]+")
+
+
+def CommaSeparatedTokenizer():
+    """Splits tokens by commas.
+
+    Note that the tokenizer calls unicode.strip() on each match of the regular
+    expression.
+
+    >>> cst = CommaSeparatedTokenizer()
+    >>> [token.text for token in cst(u"hi there, what's , up")]
+    [u"hi there", u"what's", u"up"]
+    """
+
+    return RegexTokenizer(r"[^,]+") | StripFilter()
+
+
+class NgramTokenizer(Tokenizer):
+    """Splits input text into N-grams instead of words.
+
+    >>> ngt = NgramTokenizer(4)
+    >>> [token.text for token in ngt(u"hi there")]
+    [u"hi t", u"i th", u" the", u"ther", u"here"]
+
+    Note that this tokenizer does NOT use a regular expression to extract
+    words, so the grams emitted by it will contain whitespace, punctuation,
+    etc. You may want to massage the input or add a custom filter to this
+    tokenizer's output.
+
+    Alternatively, if you only want sub-word grams without whitespace, you
+    could combine a RegexTokenizer with NgramFilter instead.
+    """
+
+    __inittypes__ = dict(minsize=int, maxsize=int)
+
+    def __init__(self, minsize, maxsize=None):
+        """
+        :param minsize: The minimum size of the N-grams.
+        :param maxsize: The maximum size of the N-grams. If you omit
+            this parameter, maxsize == minsize.
+        """
+
+        self.min = minsize
+        self.max = maxsize or minsize
+
+    def __eq__(self, other):
+        if self.__class__ is other.__class__:
+            if self.min == other.min and self.max == other.max:
+                return True
+        return False
+
+    def __call__(self, value, positions=False, chars=False, keeporiginal=False,
+                 removestops=True, start_pos=0, start_char=0, mode='',
+                 **kwargs):
+        assert isinstance(value, unicode), "%r is not unicode" % value
+
+        inlen = len(value)
+        t = Token(positions, chars, removestops=removestops, mode=mode)
+        pos = start_pos
+
+        if mode == "query":
+            size = min(self.max, inlen)
+            for start in xrange(0, inlen - size + 1):
+                end = start + size
+                if end > inlen:
+                    continue
+                t.text = value[start:end]
+                if keeporiginal:
+                    t.original = t.text
+                t.stopped = False
+                if positions:
+                    t.pos = pos
+                if chars:
+                    t.startchar = start_char + start
+                    t.endchar = start_char + end
+                yield t
+                pos += 1
+        else:
+            for start in xrange(0, inlen - self.min + 1):
+                for size in xrange(self.min, self.max + 1):
+                    end = start + size
+                    if end > inlen:
+                        continue
+                    t.text = value[start:end]
+                    if keeporiginal:
+                        t.original = t.text
+                    t.stopped = False
+                    if positions:
+                        t.pos = pos
+                    if chars:
+                        t.startchar = start_char + start
+                        t.endchar = start_char + end
+
+                    yield t
+                pos += 1
+
+
+# Filters
+
+class Filter(Composable):
+    """Base class for Filter objects. A Filter subclass must implement a
+    __call__ method that takes a single argument, which is an iterator of Token
+    objects, and yield a series of Token objects in return.
+    """
+
+    def __eq__(self, other):
+        return other and self.__class__ is other.__class__
+
+
+class PassFilter(Filter):
+    """An identity filter: passes the tokens through untouched.
+    """
+
+    def __call__(self, tokens):
+        for t in tokens:
+            yield t
+
+
+class LoggingFilter(Filter):
+    """Prints the contents of every filter that passes through as a debug
+    log entry.
+    """
+
+    def __init__(self, logger=None):
+        """
+        :param target: the logger to use. If omitted, the "whoosh.analysis"
+            logger is used.
+        """
+
+        if logger is None:
+            import logging
+            logger = logging.getLogger("whoosh.analysis")
+        self.logger = logger
+
+    def __call__(self, tokens):
+        logger = self.logger
+        for t in tokens:
+            logger.debug(repr(t))
+            yield t
+
+
+class MultiFilter(Filter):
+    """Chooses one of two or more sub-filters based on the 'mode' attribute
+    of the token stream.
+    """
+
+    def __init__(self, **kwargs):
+        """Use keyword arguments to associate mode attribute values with
+        instantiated filters.
+
+        >>> iwf_for_index = IntraWordFilter(mergewords=True, mergenums=False)
+        >>> iwf_for_query = IntraWordFilter(mergewords=False, mergenums=False)
+        >>> mf = MultiFilter(index=iwf_for_index, query=iwf_for_query)
+
+        This class expects that the value of the mode attribute is consistent
+        among all tokens in a token stream.
+        """
+        self.filters = kwargs
+
+    def __eq__(self, other):
+        return (other
+                and self.__class__ is other.__class__
+                and self.filters == other.filters)
+
+    def __call__(self, tokens):
+        # Only selects on the first token
+        t = tokens.next()
+        filter = self.filters[t.mode]
+        return filter(chain([t], tokens))
+
+
+class ReverseTextFilter(Filter):
+    """Reverses the text of each token.
+
+    >>> ana = RegexTokenizer() | ReverseTextFilter()
+    >>> [token.text for token in ana(u"hello there")]
+    [u"olleh", u"ereht"]
+    """
+
+    def __call__(self, tokens):
+        for t in tokens:
+            t.text = t.text[::-1]
+            yield t
+
+
+class LowercaseFilter(Filter):
+    """Uses unicode.lower() to lowercase token text.
+
+    >>> rext = RegexTokenizer()
+    >>> stream = rext(u"This is a TEST")
+    >>> [token.text for token in LowercaseFilter(stream)]
+    [u"this", u"is", u"a", u"test"]
+    """
+
+    def __call__(self, tokens):
+        for t in tokens:
+            t.text = t.text.lower()
+            yield t
+
+
+class StripFilter(Filter):
+    """Calls unicode.strip() on the token text.
+    """
+
+    def __call__(self, tokens):
+        for t in tokens:
+            t.text = t.text.strip()
+            yield t
+
+
+class StopFilter(Filter):
+    """Marks "stop" words (words too common to index) in the stream (and by
+    default removes them).
+
+    >>> rext = RegexTokenizer()
+    >>> stream = rext(u"this is a test")
+    >>> stopper = StopFilter()
+    >>> [token.text for token in sopper(stream)]
+    [u"this", u"test"]
+
+    """
+
+    __inittypes__ = dict(stoplist=list, minsize=int, maxsize=int, renumber=bool)
+
+    def __init__(self, stoplist=STOP_WORDS, minsize=2, maxsize=None,
+                 renumber=True):
+        """
+        :param stoplist: A collection of words to remove from the stream.
+            This is converted to a frozenset. The default is a list of
+            common stop words.
+        :param minsize: The minimum length of token texts. Tokens with
+            text smaller than this will be stopped.
+        :param maxsize: The maximum length of token texts. Tokens with text
+            larger than this will be stopped. Use None to allow any length.
+        :param renumber: Change the 'pos' attribute of unstopped tokens
+            to reflect their position with the stopped words removed.
+        :param remove: Whether to remove the stopped words from the stream
+            entirely. This is not normally necessary, since the indexing
+            code will ignore tokens it receives with stopped=True.
+        """
+
+        if stoplist is None:
+            self.stops = frozenset()
+        else:
+            self.stops = frozenset(stoplist)
+        self.min = minsize
+        self.max = maxsize
+        self.renumber = renumber
+
+    def __eq__(self, other):
+        return (other
+                and self.__class__ is other.__class__
+                and self.stops == other.stops
+                and self.min == other.min
+                and self.renumber == other.renumber)
+
+    def __call__(self, tokens):
+        stoplist = self.stops
+        minsize = self.min
+        maxsize = self.max
+        renumber = self.renumber
+
+        pos = None
+        for t in tokens:
+            text = t.text
+            if (len(text) >= minsize
+                and (maxsize is None or len(text) <= maxsize)
+                and text not in stoplist):
+                # This is not a stop word
+                if renumber and t.positions:
+                    if pos is None:
+                        pos = t.pos
+                    else:
+                        pos += 1
+                        t.pos = pos
+                t.stopped = False
+                yield t
+            else:
+                # This is a stop word
+                if not t.removestops:
+                    # This IS a stop word, but we're not removing them
+                    t.stopped = True
+                    yield t
+
+
+class StemFilter(Filter):
+    """Stems (removes suffixes from) the text of tokens using the Porter
+    stemming algorithm. Stemming attempts to reduce multiple forms of the same
+    root word (for example, "rendering", "renders", "rendered", etc.) to a
+    single word in the index.
+
+    >>> stemmer = RegexTokenizer() | StemFilter()
+    >>> [token.text for token in stemmer(u"fundamentally willows")]
+    [u"fundament", u"willow"]
+
+    You can pass your own stemming function to the StemFilter. The default
+    is the Porter stemming algorithm for English.
+
+    >>> stemfilter = StemFilter(stem_function)
+
+    By default, this class wraps an LRU cache around the stemming function. The
+    ``cachesize`` keyword argument sets the size of the cache. To make the
+    cache unbounded (the class caches every input), use ``cachesize=-1``. To
+    disable caching, use ``cachesize=None``.
+
+    If you compile and install the py-stemmer library, the
+    :class:`PyStemmerFilter` provides slightly easier access to the language
+    stemmers in that library.
+    """
+
+    __inittypes__ = dict(stemfn=object, ignore=list)
+
+    def __init__(self, stemfn=stem, ignore=None, cachesize=50000):
+        """
+        :param stemfn: the function to use for stemming.
+        :param ignore: a set/list of words that should not be stemmed. This is
+            converted into a frozenset. If you omit this argument, all tokens
+            are stemmed.
+        :param cachesize: the maximum number of words to cache. Use ``-1`` for
+            an unbounded cache, or ``None`` for no caching.
+        """
+
+        self.stemfn = stemfn
+        self.ignore = frozenset() if ignore is None else frozenset(ignore)
+        self.cachesize = cachesize
+        # clear() sets the _stem attr to a cached wrapper around self.stemfn
+        self.clear()
+
+    def __getstate__(self):
+        # Can't pickle a dynamic function, so we have to remove the _stem
+        # attribute from the state
+        return dict([(k, self.__dict__[k]) for k in self.__dict__
+                      if k != "_stem"])
+
+    def __setstate__(self, state):
+        # Check for old instances of StemFilter class, which didn't have a
+        # cachesize attribute and pickled the cache attribute
+        if "cachesize" not in state:
+            self.cachesize = 50000
+        if "ignores" in state:
+            self.ignore = state["ignores"]
+        elif "ignore" not in state:
+            self.ignore = frozenset()
+        if "cache" in state:
+            del state["cache"]
+
+        self.__dict__.update(state)
+        # Set the _stem attribute
+        self.clear()
+
+    def clear(self):
+        if self.cachesize < 0:
+            self._stem = unbound_cache(self.stemfn)
+        elif self.cachesize > 1:
+            self._stem = lru_cache(self.cachesize)(self.stemfn)
+        else:
+            self._stem = self.stemfn
+
+    def cache_info(self):
+        if self.cachesize <= 1:
+            return None
+        return self._stem.cache_info()
+
+    def __eq__(self, other):
+        return (other and self.__class__ is other.__class__
+                and self.stemfn == other.stemfn)
+
+    def __call__(self, tokens):
+        stemfn = self._stem
+        ignore = self.ignore
+
+        for t in tokens:
+            if not t.stopped:
+                text = t.text
+                if text not in ignore:
+                    t.text = stemfn(text)
+            yield t
+
+
+class PyStemmerFilter(StemFilter):
+    """This is a simple sublcass of StemFilter that works with the py-stemmer
+    third-party library. You must have the py-stemmer library installed to use
+    this filter.
+
+    >>> PyStemmerFilter("spanish")
+    """
+
+    def __init__(self, lang="english", ignore=None, cachesize=10000):
+        """
+        :param lang: a string identifying the stemming algorithm to use. You
+            can get a list of available algorithms by with the
+            :meth:`PyStemmerFilter.algorithms` method. The identification
+            strings are directly from the py-stemmer library.
+        :param ignore: a set/list of words that should not be stemmed. This is
+            converted into a frozenset. If you omit this argument, all tokens
+            are stemmed.
+        :param cachesize: the maximum number of words to cache.
+        """
+
+        import Stemmer
+
+        stemmer = Stemmer.Stemmer(lang)
+        stemmer.maxCacheSize = cachesize
+        self._stem = stemmer.stemWord
+        self.ignore = frozenset() if ignore is None else frozenset(ignore)
+
+    def algorithms(self):
+        """Returns a list of stemming algorithms provided by the py-stemmer
+        library.
+        """
+
+        import Stemmer
+
+        return Stemmer.algorithms()
+
+    def cache_info(self):
+        return None
+
+
+class CharsetFilter(Filter):
+    """Translates the text of tokens by calling unicode.translate() using the
+    supplied character mapping object. This is useful for case and accent
+    folding.
+
+    The ``whoosh.support.charset`` module has a useful map for accent folding.
+
+    >>> from whoosh.support.charset import accent_map
+    >>> retokenizer = RegexTokenizer()
+    >>> chfilter = CharsetFilter(accent_map)
+    >>> [t.text for t in chfilter(retokenizer(u'café'))]
+    [u'cafe']
+
+    Another way to get a character mapping object is to convert a Sphinx
+    charset table file using
+    :func:`whoosh.support.charset.charset_table_to_dict`.
+
+    >>> from whoosh.support.charset import charset_table_to_dict, default_charset
+    >>> retokenizer = RegexTokenizer()
+    >>> charmap = charset_table_to_dict(default_charset)
+    >>> chfilter = CharsetFilter(charmap)
+    >>> [t.text for t in chfilter(retokenizer(u'Stra\\xdfe'))]
+    [u'strase']
+
+    The Sphinx charset table format is described at
+    http://www.sphinxsearch.com/docs/current.html#conf-charset-table.
+    """
+
+    __inittypes__ = dict(charmap=dict)
+
+    def __init__(self, charmap):
+        """
+        :param charmap: a dictionary mapping from integer character numbers to
+            unicode characters, as required by the unicode.translate() method.
+        """
+        self.charmap = charmap
+
+    def __eq__(self, other):
+        return (other
+                and self.__class__ is other.__class__
+                and self.charmap == other.charmap)
+
+    def __call__(self, tokens):
+        assert hasattr(tokens, "__iter__")
+        charmap = self.charmap
+        for t in tokens:
+            t.text = t.text.translate(charmap)
+            yield t
+
+
+class NgramFilter(Filter):
+    """Splits token text into N-grams.
+
+    >>> rext = RegexTokenizer()
+    >>> stream = rext(u"hello there")
+    >>> ngf = NgramFilter(4)
+    >>> [token.text for token in ngf(stream)]
+    [u"hell", u"ello", u"ther", u"here"]
+
+    """
+
+    __inittypes__ = dict(minsize=int, maxsize=int)
+
+    def __init__(self, minsize, maxsize=None, at=None):
+        """
+        :param minsize: The minimum size of the N-grams.
+        :param maxsize: The maximum size of the N-grams. If you omit this
+            parameter, maxsize == minsize.
+        :param at: If 'start', only take N-grams from the start of each word.
+            if 'end', only take N-grams from the end of each word. Otherwise,
+            take all N-grams from the word (the default).
+        """
+
+        self.min = minsize
+        self.max = maxsize or minsize
+        self.at = 0
+        if at == "start":
+            self.at = -1
+        elif at == "end":
+            self.at = 1
+
+    def __eq__(self, other):
+        return other and self.__class__ is other.__class__\
+        and self.min == other.min and self.max == other.max
+
+    def __call__(self, tokens):
+        assert hasattr(tokens, "__iter__")
+        at = self.at
+        for t in tokens:
+            text = t.text
+            if len(text) < self.min:
+                continue
+
+            chars = t.chars
+            if chars:
+                startchar = t.startchar
+            # Token positions don't mean much for N-grams,
+            # so we'll leave the token's original position
+            # untouched.
+
+            if t.mode == "query":
+                size = min(self.max, len(t.text))
+                if at == -1:
+                    t.text = text[:size]
+                    if chars:
+                        t.endchar = startchar + size
+                    yield t
+                elif at == 1:
+                    t.text = text[0 - size:]
+                    if chars:
+                        t.startchar = t.endchar - size
+                    yield t
+                else:
+                    for start in xrange(0, len(text) - size + 1):
+                        t.text = text[start:start + size]
+                        if chars:
+                            t.startchar = startchar + start
+                            t.endchar = startchar + start + size
+                        yield t
+            else:
+                if at == -1:
+                    limit = min(self.max, len(text))
+                    for size in xrange(self.min, limit + 1):
+                        t.text = text[:size]
+                        if chars:
+                            t.endchar = startchar + size
+                        yield t
+
+                elif at == 1:
+                    start = max(0, len(text) - self.max)
+                    for i in xrange(start, len(text) - self.min + 1):
+                        t.text = text[i:]
+                        if chars:
+                            t.startchar = t.endchar - size
+                        yield t
+                else:
+                    for start in xrange(0, len(text) - self.min + 1):
+                        for size in xrange(self.min, self.max + 1):
+                            end = start + size
+                            if end > len(text):
+                                continue
+
+                            t.text = text[start:end]
+
+                            if chars:
+                                t.startchar = startchar + start
+                                t.endchar = startchar + end
+
+                            yield t
+
+
+class IntraWordFilter(Filter):
+    """Splits words into subwords and performs optional transformations on
+    subword groups. This filter is funtionally based on yonik's
+    WordDelimiterFilter in Solr, but shares no code with it.
+
+    * Split on intra-word delimiters, e.g. `Wi-Fi` -> `Wi`, `Fi`.
+    * When splitwords=True, split on case transitions,
+      e.g. `PowerShot` -> `Power`, `Shot`.
+    * When splitnums=True, split on letter-number transitions,
+      e.g. `SD500` -> `SD`, `500`.
+    * Leading and trailing delimiter characters are ignored.
+    * Trailing possesive "'s" removed from subwords,
+      e.g. `O'Neil's` -> `O`, `Neil`.
+
+    The mergewords and mergenums arguments turn on merging of subwords.
+
+    When the merge arguments are false, subwords are not merged.
+
+    * `PowerShot` -> `0`:`Power`, `1`:`Shot` (where `0` and `1` are token
+      positions).
+
+    When one or both of the merge arguments are true, consecutive runs of
+    alphabetic and/or numeric subwords are merged into an additional token with
+    the same position as the last sub-word.
+
+    * `PowerShot` -> `0`:`Power`, `1`:`Shot`, `1`:`PowerShot`
+    * `A's+B's&C's` -> `0`:`A`, `1`:`B`, `2`:`C`, `2`:`ABC`
+    * `Super-Duper-XL500-42-AutoCoder!` -> `0`:`Super`, `1`:`Duper`, `2`:`XL`,
+      `2`:`SuperDuperXL`,
+      `3`:`500`, `4`:`42`, `4`:`50042`, `5`:`Auto`, `6`:`Coder`,
+      `6`:`AutoCoder`
+
+    When using this filter you should use a tokenizer that only splits on
+    whitespace, so the tokenizer does not remove intra-word delimiters before
+    this filter can see them, and put this filter before any use of
+    LowercaseFilter.
+
+    >>> analyzer = RegexTokenizer(r"\\S+") | IntraWordFilter() | LowercaseFilter()
+
+    One use for this filter is to help match different written representations
+    of a concept. For example, if the source text contained `wi-fi`, you
+    probably want `wifi`, `WiFi`, `wi-fi`, etc. to match. One way of doing this
+    is to specify mergewords=True and/or mergenums=True in the analyzer used
+    for indexing, and mergewords=False / mergenums=False in the analyzer used
+    for querying.
+
+    >>> iwf = MultiFilter(index=IntraWordFilter(mergewords=True, mergenums=True),
+                          query=IntraWordFilter(mergewords=False, mergenums=False))
+    >>> analyzer = RegexTokenizer(r"\S+") | iwf | LowercaseFilter()
+
+    (See :class:`MultiFilter`.)
+    """
+
+    # Create sets of unicode digit, uppercase, and lowercase characters.
+    digits = array("u")
+    uppers = array("u")
+    lowers = array("u")
+    for n in xrange(2 ** 16 - 1):
+        ch = unichr(n)
+        if ch.islower():
+            lowers.append(ch)
+        elif ch.isupper():
+            uppers.append(ch)
+        elif ch.isdigit():
+            digits.append(ch)
+
+    # Create escaped strings of characters for use in regular expressions
+    digits = re.escape("".join(digits))
+    uppers = re.escape("".join(uppers))
+    lowers = re.escape("".join(lowers))
+    letters = uppers + lowers
+
+    __inittypes__ = dict(delims=unicode, splitwords=bool, splitnums=bool,
+                         mergewords=bool, mergenums=bool)
+
+    def __init__(self, delims=u"-_'\"()!@#$%^&*[]{}<>\|;:,./?`~=+",
+                 splitwords=True, splitnums=True,
+                 mergewords=False, mergenums=False):
+        """
+        :param delims: a string of delimiter characters.
+        :param splitwords: if True, split at case transitions,
+            e.g. `PowerShot` -> `Power`, `Shot`
+        :param splitnums: if True, split at letter-number transitions,
+            e.g. `SD500` -> `SD`, `500`
+        :param mergewords: merge consecutive runs of alphabetic subwords into
+            an additional token with the same position as the last subword.
+        :param mergenums: merge consecutive runs of numeric subwords into an
+            additional token with the same position as the last subword.
+        """
+
+        self.delims = re.escape(delims)
+
+        # Expression for splitting at delimiter characters
+        self.splitter = re.compile(u"[%s]+" % (self.delims,), re.UNICODE)
+        # Expression for removing "'s" from the end of sub-words
+        dispat = u"(?<=[%s])'[Ss](?=$|[%s])" % (self.letters, self.delims)
+        self.disposses = re.compile(dispat, re.UNICODE)
+
+        # Expression for finding case and letter-number transitions
+        lower2upper = u"[%s][%s]" % (self.lowers, self.uppers)
+        letter2digit = u"[%s][%s]" % (self.letters, self.digits)
+        digit2letter = u"[%s][%s]" % (self.digits, self.letters)
+        if splitwords and splitnums:
+            splitpat = u"(%s|%s|%s)" % (lower2upper, letter2digit, digit2letter)
+            self.boundary = re.compile(splitpat, re.UNICODE)
+        elif splitwords:
+            self.boundary = re.compile(unicode(lower2upper), re.UNICODE)
+        elif splitnums:
+            numpat = u"(%s|%s)" % (letter2digit, digit2letter)
+            self.boundary = re.compile(numpat, re.UNICODE)
+
+        self.splitting = splitwords or splitnums
+        self.mergewords = mergewords
+        self.mergenums = mergenums
+
+    def __eq__(self, other):
+        return other and self.__class__ is other.__class__\
+        and self.__dict__ == other.__dict__
+
+    def split(self, string):
+        boundaries = self.boundary.finditer
+
+        # Are we splitting on word/num boundaries?
+        if self.splitting:
+            parts = []
+            # First, split on delimiters
+            splitted = self.splitter.split(string)
+
+            for run in splitted:
+                # For each delimited run of characters, find the boundaries
+                # (e.g. lower->upper, letter->num, num->letter) and split
+                # between them.
+                start = 0
+                for match in boundaries(run):
+                    middle = match.start() + 1
+                    parts.append(run[start:middle])
+                    start = middle
+
+                # Add the bit after the last split
+                if start < len(run):
+                    parts.append(run[start:])
+        else:
+            # Just split on delimiters
+            parts = self.splitter.split(string)
+        return parts
+
+    def merge(self, parts):
+        mergewords = self.mergewords
+        mergenums = self.mergenums
+
+        # Current type (1=alpah, 2=digit)
+        last = 0
+        # Where to insert a merged term in the original list
+        insertat = 0
+        # Buffer for parts to merge
+        buf = []
+        for pos, part in parts[:]:
+            # Set the type of this part
+            if part.isalpha():
+                this = 1
+            elif part.isdigit():
+                this = 2
+
+            # Is this the same type as the previous part?
+            if buf and (this == last == 1 and mergewords)\
+            or (this == last == 2 and mergenums):
+                # This part is the same type as the previous. Add it to the
+                # buffer of parts to merge.
+                buf.append(part)
+            else:
+                # This part is different than the previous.
+                if len(buf) > 1:
+                    # If the buffer has at least two parts in it, merge them
+                    # and add them to the original list of parts.
+                    parts.insert(insertat, (pos - 1, u"".join(buf)))
+                    insertat += 1
+                # Reset the buffer
+                buf = [part]
+                last = this
+            insertat += 1
+
+        # If there are parts left in the buffer at the end, merge them and add
+        # them to the original list.
+        if len(buf) > 1:
+            parts.append((pos, u"".join(buf)))
+
+    def __call__(self, tokens):
+        disposses = self.disposses.sub
+        merge = self.merge
+        mergewords = self.mergewords
+        mergenums = self.mergenums
+
+        # This filter renumbers tokens as it expands them. New position
+        # counter.
+
+        newpos = None
+        for t in tokens:
+            text = t.text
+
+            # If this is the first token we've seen, use it to set the new
+            # position counter
+            if newpos is None:
+                if t.positions:
+                    newpos = t.pos
+                else:
+                    # Token doesn't have positions, just use 0
+                    newpos = 0
+
+            if (text.isalpha()
+                and (text.islower() or text.isupper())) or text.isdigit():
+                # Short-circuit the common cases of no delimiters, no case
+                # transitions, only digits, etc.
+                t.pos = newpos
+                yield t
+                newpos += 1
+            else:
+                # Should we check for an apos before doing the disposses step?
+                # Or is the re faster? if "'" in text:
+                text = disposses("", text)
+
+                # Split the token text on delimiters, word and/or number
+                # boundaries, and give the split parts positions
+                parts = [(newpos + i, part)
+                         for i, part in enumerate(self.split(text))]
+
+                # Did the split yield more than one part?
+                if len(parts) > 1:
+                    # If the options are set, merge consecutive runs of all-
+                    # letters and/or all-numbers.
+                    if mergewords or mergenums:
+                        merge(parts)
+
+                    # Yield tokens for the parts
+                    for pos, text in parts:
+                        t.text = text
+                        t.pos = pos
+                        yield t
+
+                    # Set the new position counter based on the last part
+                    newpos = parts[-1][0] + 1
+                else:
+                    # The split only gave one part, so just yield the
+                    # "dispossesed" text.
+                    t.text = text
+                    t.pos = newpos
+                    yield t
+                    newpos += 1
+
+
+class BiWordFilter(Filter):
+    """Merges adjacent tokens into "bi-word" tokens, so that for example::
+
+        "the", "sign", "of", "four"
+
+    becomes::
+
+        "the-sign", "sign-of", "of-four"
+
+    This can be used to create fields for pseudo-phrase searching, where if
+    all the terms match the document probably contains the phrase, but the
+    searching is faster than actually doing a phrase search on individual word
+    terms.
+
+    The ``BiWordFilter`` is much faster than using the otherwise equivalent
+    ``ShingleFilter(2)``.
+    """
+
+    def __init__(self, sep="-"):
+        self.sep = sep
+
+    def __call__(self, tokens):
+        sep = self.sep
+        prev_text = None
+        prev_startchar = None
+        prev_pos = None
+        atleastone = False
+
+        for token in tokens:
+            # Save the original text of this token
+            text = token.text
+
+            # Save the original position
+            positions = token.positions
+            if positions:
+                ps = token.pos
+
+            # Save the original start char
+            chars = token.chars
+            if chars:
+                sc = token.startchar
+
+            if prev_text is not None:
+                # Use the pos and startchar from the previous token
+                if positions:
+                    token.pos = prev_pos
+                if chars:
+                    token.startchar = prev_startchar
+
+                # Join the previous token text and the current token text to
+                # form the biword token
+                token.text = "".join((prev_text, sep, text))
+                yield token
+                atleastone = True
+
+            # Save the originals and the new "previous" values
+            prev_text = text
+            if chars:
+                prev_startchar = sc
+            if positions:
+                prev_pos = ps
+
+        # If no bi-words were emitted, that is, the token stream only had
+        # a single token, then emit that single token.
+        if not atleastone:
+            yield token
+
+
+class ShingleFilter(Filter):
+    """Merges a certain number of adjacent tokens into multi-word tokens, so
+    that for example::
+
+        "better", "a", "witty", "fool", "than", "a", "foolish", "wit"
+
+    with ``ShingleFilter(3, ' ')`` becomes::
+
+        'better a witty', 'a witty fool', 'witty fool than', 'fool than a',
+        'than a foolish', 'a foolish wit'
+
+    This can be used to create fields for pseudo-phrase searching, where if
+    all the terms match the document probably contains the phrase, but the
+    searching is faster than actually doing a phrase search on individual word
+    terms.
+
+    If you're using two-word shingles, you should use the functionally
+    equivalent ``BiWordFilter`` instead because it's faster than
+    ``ShingleFilter``.
+    """
+
+    def __init__(self, size=2, sep="-"):
+        self.size = size
+        self.sep = sep
+
+    def __call__(self, tokens):
+        size = self.size
+        sep = self.sep
+        buf = deque()
+        atleastone = False
+
+        def make_token():
+            tk = buf[0]
+            tk.text = sep.join([t.text for t in buf])
+            if tk.chars:
+                tk.endchar = buf[-1].endchar
+            return tk
+
+        for token in tokens:
+            buf.append(token.copy())
+            if len(buf) == size:
+                atleastone = True
+                yield make_token()
+                buf.popleft()
+
+        # If no shingles were emitted, that is, the token stream had fewer than
+        # 'size' tokens, then emit a single token with whatever tokens there
+        # were
+        if not atleastone:
+            yield make_token()
+
+
+class BoostTextFilter(Filter):
+    "This filter is deprecated, use :class:`DelimitedAttributeFilter` instead."
+
+    def __init__(self, expression, group=1, default=1.0):
+        """
+        :param expression: a compiled regular expression object representing
+            the pattern to look for within each token.
+        :param group: the group name or number to use as the boost number
+            (what to pass to match.group()). The string value of this group is
+            passed to float().
+        :param default: the default boost to use for tokens that don't have
+            the marker.
+        """
+
+        self.expression = expression
+        self.group = group
+        self.default = default
+
+    def __eq__(self, other):
+        return (other and self.__class__ is other.__class__
+                and self.expression == other.expression
+                and self.default == other.default
+                and self.group == other.group)
+
+    def __call__(self, tokens):
+        expression = self.expression
+        groupnum = self.group
+        default = self.default
+
+        for t in tokens:
+            text = t.text
+            m = expression.match(text)
+            if m:
+                text = text[:m.start()] + text[m.end():]
+                t.boost = float(m.group(groupnum))
+            else:
+                t.boost = default
+
+            yield t
+
+
+class DelimitedAttributeFilter(Filter):
+    """Looks for delimiter characters in the text of each token and stores the
+    data after the delimiter in a named attribute on the token.
+
+    The defaults are set up to use the ``^`` character as a delimiter and store
+    the value after the ``^`` as the boost for the token.
+
+    >>> daf = DelimitedAttributeFilter(delimiter="^", attribute="boost")
+    >>> ana = RegexTokenizer("\\\\S+") | DelimitedAttributeFilter()
+    >>> for t in ana(u"image render^2 file^0.5")
+    ...    print "%r %f" % (t.text, t.boost)
+    'image' 1.0
+    'render' 2.0
+    'file' 0.5
+
+    Note that you need to make sure your tokenizer includes the delimiter and
+    data as part of the token!
+    """
+
+    def __init__(self, delimiter="^", attribute="boost", default=1.0,
+                 type=float):
+        """
+        :param delimiter: a string that, when present in a token's text,
+            separates the actual text from the "data" payload.
+        :param attribute: the name of the attribute in which to store the
+            data on the token.
+        :param default: the value to use for the attribute for tokens that
+            don't have delimited data.
+        :param type: the type of the data, for example ``str`` or ``float``.
+            This is used to convert the string value of the data before
+            storing it in the attribute.
+        """
+
+        self.delim = delimiter
+        self.attr = attribute
+        self.default = default
+        self.type = type
+
+    def __eq__(self, other):
+        return (other and self.__class__ is other.__class__
+                and self.delim == other.delim
+                and self.attr == other.attr
+                and self.default == other.default)
+
+    def __call__(self, tokens):
+        delim = self.delim
+        attr = self.attr
+        default = self.default
+        typ = self.type
+
+        for t in tokens:
+            text = t.text
+            pos = text.find(delim)
+            if pos > -1:
+                setattr(t, attr, typ(text[pos + 1:]))
+                t.text = text[:pos]
+            else:
+                setattr(t, attr, default)
+
+            yield t
+
+
+class DoubleMetaphoneFilter(Filter):
+    """Transforms the text of the tokens using Lawrence Philips's Double
+    Metaphone algorithm. This algorithm attempts to encode words in such a way
+    that similar-sounding words reduce to the same code. This may be useful for
+    fields containing the names of people and places, and other uses where
+    tolerance of spelling differences is desireable.
+    """
+
+    def __init__(self, primary_boost=1.0, secondary_boost=0.5, combine=False):
+        """
+        :param primary_boost: the boost to apply to the token containing the
+            primary code.
+        :param secondary_boost: the boost to apply to the token containing the
+            secondary code, if any.
+        :param combine: if True, the original unencoded tokens are kept in the
+            stream, preceding the encoded tokens.
+        """
+
+        self.primary_boost = primary_boost
+        self.secondary_boost = secondary_boost
+        self.combine = combine
+
+    def __eq__(self, other):
+        return (other
+                and self.__class__ is other.__class__
+                and self.primary_boost == other.primary_boost)
+
+    def __call__(self, tokens):
+        primary_boost = self.primary_boost
+        secondary_boost = self.secondary_boost
+        combine = self.combine
+
+        for t in tokens:
+            if combine:
+                yield t
+
+            primary, secondary = double_metaphone(t.text)
+            b = t.boost
+            # Overwrite the token's text and boost and yield it
+            if primary:
+                t.text = primary
+                t.boost = b * primary_boost
+                yield t
+            if secondary:
+                t.text = secondary
+                t.boost = b * secondary_boost
+                yield t
+
+
+class SubstitutionFilter(Filter):
+    """Performas a regular expression substitution on the token text.
+
+    This is especially useful for removing text from tokens, for example
+    hyphens::
+
+        ana = RegexTokenizer(r"\\S+") | SubstitutionFilter("-", "")
+
+    Because it has the full power of the re.sub() method behind it, this filter
+    can perform some fairly complex transformations. For example, to take tokens
+    like ``'a=b', 'c=d', 'e=f'`` and change them to ``'b=a', 'd=c', 'f=e'``::
+
+        # Analyzer that swaps the text on either side of an equal sign
+        ana = RegexTokenizer(r"\\S+") | SubstitutionFilter("([^/]*)/(./*)", r"\\2/\\1")
+    """
+
+    def __init__(self, pattern, replacement):
+        """
+        :param pattern: a pattern string or compiled regular expression object
+            describing the text to replace.
+        :param replacement: the substitution text.
+        """
+
+        if isinstance(pattern, basestring):
+            pattern = re.compile(pattern, re.UNICODE)
+        self.pattern = pattern
+        self.replacement = replacement
+
+    def __eq__(self, other):
+        return (other and self.__class__ is other.__class__
+                and self.pattern == other.pattern
+                and self.replacement == other.replacement)
+
+    def __call__(self, tokens):
+        pattern = self.pattern
+        replacement = self.replacement
+
+        for t in tokens:
+            t.text = pattern.sub(replacement, t.text)
+            yield t
+
+
+# Analyzers
+
+class Analyzer(Composable):
+    """ Abstract base class for analyzers. Since the analyzer protocol is just
+    __call__, this is pretty simple -- it mostly exists to provide common
+    implementations of __repr__ and __eq__.
+    """
+
+    def __repr__(self):
+        return "%s()" % self.__class__.__name__
+
+    def __eq__(self, other):
+        return (other
+                and self.__class__ is other.__class__
+                and self.__dict__ == other.__dict__)
+
+    def __call__(self, value, **kwargs):
+        raise NotImplementedError
+
+    def clean(self):
+        pass
+
+
+class CompositeAnalyzer(Analyzer):
+    def __init__(self, *composables):
+        self.items = []
+        for comp in composables:
+            if isinstance(comp, CompositeAnalyzer):
+                self.items.extend(comp.items)
+            else:
+                self.items.append(comp)
+
+    def __repr__(self):
+        return "%s(%s)" % (self.__class__.__name__,
+                           ", ".join(repr(item) for item in self.items))
+
+    def __call__(self, value, **kwargs):
+        items = self.items
+        gen = items[0](value, **kwargs)
+        for item in items[1:]:
+            gen = item(gen)
+        return gen
+
+    def __getitem__(self, item):
+        return self.items.__getitem__(item)
+
+    def __len__(self):
+        return len(self.items)
+
+    def __eq__(self, other):
+        return (other
+                and self.__class__ is other.__class__
+                and self.items == other.items)
+
+    def clean(self):
+        for item in self.items:
+            if hasattr(item, "clean"):
+                item.clean()
+
+
+def IDAnalyzer(lowercase=False):
+    """Deprecated, just use an IDTokenizer directly, with a LowercaseFilter if
+    desired.
+    """
+
+    tokenizer = IDTokenizer()
+    if lowercase:
+        tokenizer = tokenizer | LowercaseFilter()
+    return tokenizer
+IDAnalyzer.__inittypes__ = dict(lowercase=bool)
+
+
+def KeywordAnalyzer(lowercase=False, commas=False):
+    """Parses space-separated tokens.
+
+    >>> ana = KeywordAnalyzer()
+    >>> [token.text for token in ana(u"Hello there, this is a TEST")]
+    [u"Hello", u"there,", u"this", u"is", u"a", u"TEST"]
+
+    :param lowercase: whether to lowercase the tokens.
+    :param commas: if True, items are separated by commas rather than spaces.
+    """
+
+    if commas:
+        tokenizer = CommaSeparatedTokenizer()
+    else:
+        tokenizer = SpaceSeparatedTokenizer()
+    if lowercase:
+        tokenizer = tokenizer | LowercaseFilter()
+    return tokenizer
+KeywordAnalyzer.__inittypes__ = dict(lowercase=bool, commas=bool)
+
+
+def RegexAnalyzer(expression=r"\w+(\.?\w+)*", gaps=False):
+    """Deprecated, just use a RegexTokenizer directly.
+    """
+
+    return RegexTokenizer(expression=expression, gaps=gaps)
+RegexAnalyzer.__inittypes__ = dict(expression=unicode, gaps=bool)
+
+
+def SimpleAnalyzer(expression=default_pattern, gaps=False):
+    """Composes a RegexTokenizer with a LowercaseFilter.
+
+    >>> ana = SimpleAnalyzer()
+    >>> [token.text for token in ana(u"Hello there, this is a TEST")]
+    [u"hello", u"there", u"this", u"is", u"a", u"test"]
+
+    :param expression: The regular expression pattern to use to extract tokens.
+    :param gaps: If True, the tokenizer *splits* on the expression, rather
+        than matching on the expression.
+    """
+
+    return RegexTokenizer(expression=expression, gaps=gaps) | LowercaseFilter()
+SimpleAnalyzer.__inittypes__ = dict(expression=unicode, gaps=bool)
+
+
+def StandardAnalyzer(expression=default_pattern, stoplist=STOP_WORDS,
+                     minsize=2, maxsize=None, gaps=False):
+    """Composes a RegexTokenizer with a LowercaseFilter and optional
+    StopFilter.
+
+    >>> ana = StandardAnalyzer()
+    >>> [token.text for token in ana(u"Testing is testing and testing")]
+    [u"testing", u"testing", u"testing"]
+
+    :param expression: The regular expression pattern to use to extract tokens.
+    :param stoplist: A list of stop words. Set this to None to disable
+        the stop word filter.
+    :param minsize: Words smaller than this are removed from the stream.
+    :param maxsize: Words longer that this are removed from the stream.
+    :param gaps: If True, the tokenizer *splits* on the expression, rather
+        than matching on the expression.
+    """
+
+    ret = RegexTokenizer(expression=expression, gaps=gaps)
+    chain = ret | LowercaseFilter()
+    if stoplist is not None:
+        chain = chain | StopFilter(stoplist=stoplist, minsize=minsize,
+                                   maxsize=maxsize)
+    return chain
+StandardAnalyzer.__inittypes__ = dict(expression=unicode, gaps=bool,
+                                      stoplist=list, minsize=int, maxsize=int)
+
+
+def StemmingAnalyzer(expression=default_pattern, stoplist=STOP_WORDS,
+                     minsize=2, maxsize=None, gaps=False, stemfn=stem,
+                     ignore=None, cachesize=50000):
+    """Composes a RegexTokenizer with a lower case filter, an optional stop
+    filter, and a stemming filter.
+
+    >>> ana = StemmingAnalyzer()
+    >>> [token.text for token in ana(u"Testing is testing and testing")]
+    [u"test", u"test", u"test"]
+
+    :param expression: The regular expression pattern to use to extract tokens.
+    :param stoplist: A list of stop words. Set this to None to disable
+        the stop word filter.
+    :param minsize: Words smaller than this are removed from the stream.
+    :param maxsize: Words longer that this are removed from the stream.
+    :param gaps: If True, the tokenizer *splits* on the expression, rather
+        than matching on the expression.
+    :param ignore: a set of words to not stem.
+    :param cachesize: the maximum number of stemmed words to cache. The larger
+        this number, the faster stemming will be but the more memory it will
+        use.
+    """
+
+    ret = RegexTokenizer(expression=expression, gaps=gaps)
+    chain = ret | LowercaseFilter()
+    if stoplist is not None:
+        chain = chain | StopFilter(stoplist=stoplist, minsize=minsize,
+                                   maxsize=maxsize)
+    return chain | StemFilter(stemfn=stemfn, ignore=ignore, cachesize=cachesize)
+StemmingAnalyzer.__inittypes__ = dict(expression=unicode, gaps=bool,
+                                      stoplist=list, minsize=int, maxsize=int)
+
+
+def FancyAnalyzer(expression=r"\s+", stoplist=STOP_WORDS, minsize=2,
+                  maxsize=None, gaps=True, splitwords=True, splitnums=True,
+                  mergewords=False, mergenums=False):
+    """Composes a RegexTokenizer with an IntraWordFilter, LowercaseFilter, and
+    StopFilter.
+
+    >>> ana = FancyAnalyzer()
+    >>> [token.text for token in ana(u"Should I call getInt or get_real?")]
+    [u"should", u"call", u"getInt", u"get", u"int", u"get_real", u"get", u"real"]
+
+    :param expression: The regular expression pattern to use to extract tokens.
+    :param stoplist: A list of stop words. Set this to None to disable
+        the stop word filter.
+    :param minsize: Words smaller than this are removed from the stream.
+    :param maxsize: Words longer that this are removed from the stream.
+    :param gaps: If True, the tokenizer *splits* on the expression, rather
+        than matching on the expression.
+    """
+
+    ret = RegexTokenizer(expression=expression, gaps=gaps)
+    iwf = IntraWordFilter(splitwords=splitwords, splitnums=splitnums,
+                          mergewords=mergewords, mergenums=mergenums)
+    lcf = LowercaseFilter()
+    swf = StopFilter(stoplist=stoplist, minsize=minsize)
+
+    return ret | iwf | lcf | swf
+FancyAnalyzer.__inittypes__ = dict(expression=unicode, gaps=bool,
+                                   stoplist=list, minsize=int, maxsize=int)
+
+
+def NgramAnalyzer(minsize, maxsize=None):
+    """Composes an NgramTokenizer and a LowercaseFilter.
+
+    >>> ana = NgramAnalyzer(4)
+    >>> [token.text for token in ana(u"hi there")]
+    [u"hi t", u"i th", u" the", u"ther", u"here"]
+    """
+
+    return NgramTokenizer(minsize, maxsize=maxsize) | LowercaseFilter()
+NgramAnalyzer.__inittypes__ = dict(minsize=int, maxsize=int)
+
+
+def NgramWordAnalyzer(minsize, maxsize=None, tokenizer=None, at=None):
+    if not tokenizer:
+        tokenizer = RegexTokenizer()
+    return tokenizer | LowercaseFilter() | NgramFilter(minsize, maxsize, at=at)
+
+
+
diff --git a/lib/whoosh/whoosh/classify.py b/lib/whoosh/whoosh/classify.py
new file mode 100644
index 0000000..5455452
--- /dev/null
+++ b/lib/whoosh/whoosh/classify.py
@@ -0,0 +1,443 @@
+# Copyright 2008 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+"""Classes and functions for classifying and extracting information from
+documents.
+"""
+
+from __future__ import division
+from collections import defaultdict
+from math import log, sqrt
+
+
+# Expansion models
+
+class ExpansionModel(object):
+    def __init__(self, doc_count, field_length):
+        self.N = doc_count
+        self.collection_total = field_length
+        self.mean_length = self.collection_total / self.N
+
+    def normalizer(self, maxweight, top_total):
+        raise NotImplementedError
+
+    def score(self, weight_in_top, weight_in_collection, top_total):
+        raise NotImplementedError
+
+
+class Bo1Model(ExpansionModel):
+    def normalizer(self, maxweight, top_total):
+        f = maxweight / self.N
+        return (maxweight * log((1.0 + f) / f) + log(1.0 + f)) / log(2.0)
+
+    def score(self, weight_in_top, weight_in_collection, top_total):
+        f = weight_in_collection / self.N
+        return weight_in_top * log((1.0 + f) / f, 2) + log(1.0 + f, 2)
+
+
+class Bo2Model(ExpansionModel):
+    def normalizer(self, maxweight, top_total):
+        f = maxweight * self.N / self.collection_total
+        return (maxweight * log((1.0 + f) / f, 2) + log(1.0 + f, 2))
+
+    def score(self, weight_in_top, weight_in_collection, top_total):
+        f = weight_in_top * top_total / self.collection_total
+        return weight_in_top * log((1.0 + f) / f, 2) + log(1.0 + f, 2)
+
+
+class KLModel(ExpansionModel):
+    def normalizer(self, maxweight, top_total):
+        return maxweight * log(self.collection_total / top_total) / log(2.0) * top_total
+
+    def score(self, weight_in_top, weight_in_collection, top_total):
+        wit_over_tt = weight_in_top / top_total
+        wic_over_ct = weight_in_collection / self.collection_total
+
+        if wit_over_tt < wic_over_ct:
+            return 0
+        else:
+            return wit_over_tt * log((wit_over_tt) / (weight_in_top / self.collection_total), 2)
+
+
+class Expander(object):
+    """Uses an ExpansionModel to expand the set of query terms based on the top
+    N result documents.
+    """
+
+    def __init__(self, ixreader, fieldname, model=Bo1Model):
+        """
+        :param reader: A :class:whoosh.reading.IndexReader object.
+        :param fieldname: The name of the field in which to search.
+        :param model: (classify.ExpansionModel) The model to use for expanding
+            the query terms. If you omit this parameter, the expander uses
+            scoring.Bo1Model by default.
+        """
+
+        self.ixreader = ixreader
+        self.fieldname = fieldname
+
+        if type(model) is type:
+            model = model(self.ixreader.doc_count_all(),
+                          self.ixreader.field_length(fieldname))
+        self.model = model
+
+        # Cache the collection frequency of every term in this field. This
+        # turns out to be much faster than reading each individual weight
+        # from the term index as we add words.
+        self.collection_freq = dict((word, freq) for word, _, freq
+                                      in self.ixreader.iter_field(self.fieldname))
+
+        # Maps words to their weight in the top N documents.
+        self.topN_weight = defaultdict(float)
+
+        # Total weight of all terms in the top N documents.
+        self.top_total = 0
+
+    def add(self, vector):
+        """Adds forward-index information about one of the "top N" documents.
+
+        :param vector: A series of (text, weight) tuples, such as is
+            returned by Reader.vector_as("weight", docnum, fieldname).
+        """
+
+        total_weight = 0
+        topN_weight = self.topN_weight
+
+        for word, weight in vector:
+            total_weight += weight
+            topN_weight[word] += weight
+
+        self.top_total += total_weight
+
+    def add_document(self, docnum):
+        if self.ixreader.has_vector(docnum, self.fieldname):
+            self.add(self.ixreader.vector_as("weight", docnum, self.fieldname))
+        elif self.ixreader.schema[self.fieldname].stored:
+            self.add_text(self.ixreader.stored_fields(docnum).get(self.fieldname))
+        else:
+            raise Exception("Field %r in document %s is not vectored or stored" % (self.fieldname, docnum))
+
+    def add_text(self, string):
+        field = self.ixreader.schema[self.fieldname]
+        self.add((text, weight) for text, freq, weight, value
+                 in field.index(string))
+
+    def expanded_terms(self, number, normalize=True):
+        """Returns the N most important terms in the vectors added so far.
+
+        :param number: The number of terms to return.
+        :param normalize: Whether to normalize the weights.
+        :returns: A list of ("term", weight) tuples.
+        """
+
+        model = self.model
+        tlist = []
+        maxweight = 0
+        collection_freq = self.collection_freq
+
+        for word, weight in self.topN_weight.iteritems():
+            if word in collection_freq:
+                score = model.score(weight, collection_freq[word], self.top_total)
+                if score > maxweight:
+                    maxweight = score
+                tlist.append((score, word))
+
+        if normalize:
+            norm = model.normalizer(maxweight, self.top_total)
+        else:
+            norm = maxweight
+        tlist = [(weight / norm, t) for weight, t in tlist]
+        tlist.sort(key=lambda x: (0 - x[0], x[1]))
+
+        return [(t, weight) for weight, t in tlist[:number]]
+
+
+# Clustering
+
+def median(nums):
+    nums = sorted(nums)
+    l = len(nums)
+    if l % 2:  # Odd
+        return nums[l // 2]
+    else:
+        return (nums[l // 2 - 1] + nums[l // 2]) / 2.0
+
+
+def mean(nums):
+    return sum(nums) / len(nums)
+
+
+def minkowski_distance(x, y, p=2):
+    assert(len(y) == len(x))
+    s = sum(abs(x[i] - y[i]) ** p for i in xrange(len(x)))
+    return s ** 1.0 / p
+
+
+def list_to_matrix(ls, f, symmetric=False, diagonal=None):
+    matrix = []
+    for rownum, i1 in enumerate(ls):
+        row = []
+        for colnum, i2 in enumerate(ls):
+            if diagonal is not None and rownum == colnum:
+                # Cell on the diagonal
+                row.append(diagonal)
+            elif symmetric and colnum < rownum:
+                # Matrix is symmetrical and we've already calculated this cell
+                # on the other side of the diagonal.
+                row.append(matrix[colnum][rownum])
+            else:
+                row.append(f(i1, i2))
+        matrix.append(row)
+    return matrix
+
+
+def magnitude(v):
+    return sqrt(sum(v[i] ** 2 for i in xrange(len(v))))
+
+
+def dot_product(v1, v2):
+    assert len(v1) == len(v2)
+    return sum(v1[i] * v2[i] for i in xrange(len(v1)))
+
+
+def centroid(points, method=median):
+    return tuple(method([point[i] for point in points])
+                 for i in xrange(len(points[0])))
+
+
+class Cluster(object):
+    def __init__(self, *items):
+        self.items = list(items)
+
+    def __repr__(self):
+        return "<C %r>" % (self.items, )
+
+    def __len__(self):
+        return len(self.items)
+
+    def __add__(self, cluster):
+        return Cluster(self.items + cluster.items)
+
+    def __iter__(self):
+        return iter(self.items)
+
+    def __getitem__(self, n):
+        return self.items.__getitem__(n)
+
+    def append(self, item):
+        self.items.append(item)
+
+    def remove(self, item):
+        self.items.remove(item)
+
+    def pop(self, i=None):
+        return self.items.pop(i)
+
+    def flatten(self):
+        for item in self.items:
+            if isinstance(item, Cluster):
+                for i2 in item.flatten():
+                    yield i2
+            else:
+                yield item
+
+    def dump(self, tab=0):
+        print "%s-" % (" " * tab, )
+        for item in self.items:
+            if isinstance(item, Cluster):
+                item.dump(tab + 2)
+            else:
+                print "%s%r" % (" " * tab, item)
+
+
+class HierarchicalClustering(object):
+    def __init__(self, distance_fn, linkage="uclus"):
+        self.distance = distance_fn
+        if linkage == "uclus":
+            self.linkage = self.uclus_dist
+        if linkage == "average":
+            self.linkage = self.average_linkage_dist
+        if linkage == "complete":
+            self.linkage = self.complete_linkage_dist
+        if linkage == "single":
+            self.linkage = self.single_linkage_dist
+
+    def uclus_dist(self, x, y):
+        distances = []
+        for xi in x.flatten():
+            for yi in y.flatten():
+                distances.append(self.distance(xi, yi))
+        return median(distances)
+
+    def average_linkage_dist(self, x, y):
+        distances = []
+        for xi in x.flatten():
+            for yi in y.flatten():
+                distances.append(self.distance(xi, yi))
+        return mean(distances)
+
+    def complete_linkage_dist(self, x, y):
+        maxdist = self.distance(x[0], y[0])
+        for xi in x.flatten():
+            for yi in y.flatten():
+                maxdist = max(maxdist, self.distance(xi, yi))
+        return maxdist
+
+    def single_linkage_dist(self, x, y):
+        mindist = self.distance(x[0], y[0])
+        for xi in x.flatten():
+            for yi in y.flatten():
+                mindist = min(mindist, self.distance(xi, yi))
+        return mindist
+
+    def clusters(self, data):
+        data = [Cluster(x) for x in data]
+        linkage = self.linkage
+        matrix = None
+        sequence = 0
+        while matrix is None or len(matrix) > 2:
+            matrix = list_to_matrix(data, linkage, True, 0)
+            lowrow, lowcol = None, None
+            mindist = None
+            for rownum, row in enumerate(matrix):
+                for colnum, cell in enumerate(row):
+                    if rownum != colnum and (cell < mindist or lowrow is None):
+                        lowrow, lowcol = rownum, colnum
+                        mindist = cell
+
+            sequence += 1
+            cluster = Cluster(data[lowrow], data[lowcol])
+
+            data.remove(data[max(lowrow, lowcol)])
+            data.remove(data[min(lowrow, lowcol)])
+            data.append(cluster)
+
+        if isinstance(data, list):
+            data = Cluster(*data)
+        return data
+
+
+class KMeansClustering(object):
+    def __init__(self, distance_fn=None):
+        self.distance = distance_fn or minkowski_distance
+
+    def clusters(self, data, count):
+        if len(data) > 1 and isinstance(data[0], (list, tuple)):
+            l = len(data[0])
+            if not all(len(item) == l for item in data[1:]):
+                raise ValueError("All items in %r are not of the same dimension" % (data, ))
+        if count <= 1:
+            raise ValueError("You must ask for at least 2 clusters")
+        if not data or len(data) == 1 or count >= len(data):
+            return data
+
+        clusters = [Cluster() for _ in xrange(count)]
+        for i, item in enumerate(data):
+            clusters[i % count].append(item)
+
+        def move_item(item, pos, origin):
+            closest = origin
+            for cluster in clusters:
+                if (self.distance(item, centroid(cluster))
+                    < self.distance(item, centroid(closest))):
+                    closest = cluster
+            if closest is not origin:
+                closest.append(origin.pop(pos))
+                return True
+            return False
+
+        moved = True
+        while moved:
+            moved = False
+            for cluster in clusters:
+                for pos, item in enumerate(cluster):
+                    moved = move_item(item, pos, cluster) or moved
+
+        return clusters
+
+
+# Similarity functions
+
+def shingles(input, size=2):
+    d = defaultdict(int)
+    for shingle in (input[i:i + size]
+                    for i in xrange(len(input) - (size - 1))):
+        d[shingle] += 1
+    return d.iteritems()
+
+
+def simhash(features, hashbits=32):
+    if hashbits == 32:
+        hashfn = hash
+    else:
+        hashfn = lambda s: _hash(s, hashbits)
+
+    vs = [0] * hashbits
+    for feature, weight in features:
+        h = hashfn(feature)
+        for i in xrange(hashbits):
+            if h & (1 << i):
+                vs[i] += weight
+            else:
+                vs[i] -= weight
+
+    out = 0
+    for i, v in enumerate(vs):
+        if v > 0:
+            out |= 1 << i
+    return out
+
+
+def _hash(s, hashbits):
+    # A variable-length version of Python's builtin hash
+    if s == "":
+        return 0
+    else:
+        x = ord(s[0]) << 7
+        m = 1000003
+        mask = 2 ** hashbits - 1
+        for c in s:
+            x = ((x * m) ^ ord(c)) & mask
+        x ^= len(s)
+        if x == -1:
+            x = -2
+        return x
+
+
+def hamming_distance(first_hash, other_hash, hashbits=32):
+    x = (first_hash ^ other_hash) & ((1 << hashbits) - 1)
+    tot = 0
+    while x:
+        tot += 1
+        x &= x - 1
+    return tot
+
+
+
+
+
+
diff --git a/lib/whoosh/whoosh/fields.py b/lib/whoosh/whoosh/fields.py
new file mode 100644
index 0000000..0e14588
--- /dev/null
+++ b/lib/whoosh/whoosh/fields.py
@@ -0,0 +1,996 @@
+# Copyright 2007 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+""" Contains functions and classes related to fields.
+"""
+
+import datetime
+import fnmatch
+import re
+from decimal import Decimal
+
+from whoosh.analysis import (IDAnalyzer, RegexAnalyzer, KeywordAnalyzer,
+                             StandardAnalyzer, NgramAnalyzer, Tokenizer,
+                             NgramWordAnalyzer, Analyzer)
+from whoosh.formats import Format, Existence, Frequency, Positions
+from whoosh.support.numeric import (int_to_text, text_to_int, long_to_text,
+                                    text_to_long, float_to_text, text_to_float,
+                                    )
+from whoosh.support.times import datetime_to_long
+
+
+# Exceptions
+
+class FieldConfigurationError(Exception):
+    pass
+
+
+class UnknownFieldError(Exception):
+    pass
+
+
+# Field Types
+
+class FieldType(object):
+    """Represents a field configuration.
+
+    The FieldType object supports the following attributes:
+
+    * format (fields.Format): the storage format for the field's contents.
+
+    * vector (fields.Format): the storage format for the field's vectors
+      (forward index), or None if the field should not store vectors.
+
+    * scorable (boolean): whether searches against this field may be scored.
+      This controls whether the index stores per-document field lengths for
+      this field.
+
+    * stored (boolean): whether the content of this field is stored for each
+      document. For example, in addition to indexing the title of a document,
+      you usually want to store the title so it can be presented as part of
+      the search results.
+
+    * unique (boolean): whether this field's value is unique to each document.
+      For example, 'path' or 'ID'. IndexWriter.update_document() will use
+      fields marked as 'unique' to find the previous version of a document
+      being updated.
+
+    * multitoken_query is a string indicating what kind of query to use when
+      a "word" in a user query parses into multiple tokens. The string is
+      interpreted by the query parser. The strings understood by the default
+      query parser are "first" (use first token only), "and" (join the tokens
+      with an AND query), "or" (join the tokens with OR), and "phrase" (join
+      the tokens with a phrase query).
+
+    The constructor for the base field type simply lets you supply your own
+    configured field format, vector format, and scorable and stored values.
+    Subclasses may configure some or all of this for you.
+    """
+
+    format = vector = scorable = stored = unique = None
+    indexed = True
+    multitoken_query = "first"
+    sortable_type = unicode
+    sortable_typecode = None
+
+    __inittypes__ = dict(format=Format, vector=Format,
+                         scorable=bool, stored=bool, unique=bool)
+
+    def __init__(self, format, vector=None, scorable=False, stored=False,
+                 unique=False, multitoken_query="first"):
+        self.format = format
+        self.vector = vector
+        self.scorable = scorable
+        self.stored = stored
+        self.unique = unique
+        self.multitoken_query = multitoken_query
+
+    def __repr__(self):
+        temp = "%s(format=%r, vector=%r, scorable=%s, stored=%s, unique=%s)"
+        return temp % (self.__class__.__name__, self.format, self.vector,
+                       self.scorable, self.stored, self.unique)
+
+    def __eq__(self, other):
+        return all((isinstance(other, FieldType),
+                    (self.format == other.format),
+                    (self.vector == other.vector),
+                    (self.scorable == other.scorable),
+                    (self.stored == other.stored),
+                    (self.unique == other.unique)))
+
+    def on_add(self, schema, fieldname):
+        pass
+
+    def on_remove(self, schema, fieldname):
+        pass
+
+    def clean(self):
+        """Clears any cached information in the field and any child objects.
+        """
+
+        if self.format and hasattr(self.format, "clean"):
+            self.format.clean()
+        if self.vector and hasattr(self.vector, "clean"):
+            self.vector.clean()
+
+    def to_text(self, value):
+        """Returns a textual representation of the value. Non-textual fields
+        (such as NUMERIC and DATETIME) will override this to encode objects
+        as text.
+        """
+
+        return value
+
+    def index(self, value, **kwargs):
+        """Returns an iterator of (termtext, frequency, weight, encoded_value)
+        tuples.
+        """
+
+        if not self.format:
+            raise Exception("%s field cannot index without a format" % self.__class__)
+        if not isinstance(value, unicode):
+            raise ValueError("%r is not unicode" % value)
+        return self.format.word_values(value, mode="index", **kwargs)
+
+    def process_text(self, qstring, mode='', **kwargs):
+        """Returns an iterator of token strings corresponding to the given
+        string.
+        """
+
+        if not self.format:
+            raise Exception("%s field has no format" % self)
+        return (t.text for t
+                in self.format.analyze(qstring, mode=mode, **kwargs))
+
+    def self_parsing(self):
+        """Subclasses should override this method to return True if they want
+        the query parser to call the field's ``parse_query()`` method instead
+        of running the analyzer on text in this field. This is useful where
+        the field needs full control over how queries are interpreted, such
+        as in the numeric field type.
+        """
+
+        return False
+
+    def parse_query(self, fieldname, qstring, boost=1.0):
+        """When ``self_parsing()`` returns True, the query parser will call
+        this method to parse basic query text.
+        """
+
+        raise NotImplementedError(self.__class__.__name__)
+
+    def parse_range(self, fieldname, start, end, startexcl, endexcl, boost=1.0):
+        """When ``self_parsing()`` returns True, the query parser will call
+        this method to parse range query text. If this method returns None
+        instead of a query object, the parser will fall back to parsing the
+        start and end terms using process_text().
+        """
+
+        return None
+
+    def sortable_values(self, ixreader, fieldname):
+        """Returns an iterator of (term_text, sortable_value) pairs for the
+        terms in the given reader and field. The sortable values can be used
+        for sorting. The default implementation simply returns the texts of all
+        terms in the field.
+
+        The value of the field's ``sortable_type`` attribute should contain the
+        type of the second item (the sortable value) in the pairs, e.g.
+        ``unicode`` or ``int``.
+
+        This can be overridden by field types such as NUMERIC where some values
+        in a field are not useful for sorting, and where the sortable values
+        can be expressed more compactly as numbers.
+        """
+
+        return ((text, text) for text in ixreader.lexicon(fieldname))
+
+
+class ID(FieldType):
+    """Configured field type that indexes the entire value of the field as one
+    token. This is useful for data you don't want to tokenize, such as the path
+    of a file.
+    """
+
+    __inittypes__ = dict(stored=bool, unique=bool, field_boost=float)
+
+    def __init__(self, stored=False, unique=False, field_boost=1.0):
+        """
+        :param stored: Whether the value of this field is stored with the document.
+        """
+        self.format = Existence(analyzer=IDAnalyzer(), field_boost=field_boost)
+        self.stored = stored
+        self.unique = unique
+
+
+class IDLIST(FieldType):
+    """Configured field type for fields containing IDs separated by whitespace
+    and/or puntuation.
+    """
+
+    __inittypes__ = dict(stored=bool, unique=bool, expression=bool, field_boost=float)
+
+    def __init__(self, stored=False, unique=False, expression=None, field_boost=1.0):
+        """
+        :param stored: Whether the value of this field is stored with the
+            document.
+        :param unique: Whether the value of this field is unique per-document.
+        :param expression: The regular expression object to use to extract
+            tokens. The default expression breaks tokens on CRs, LFs, tabs,
+            spaces, commas, and semicolons.
+        """
+
+        expression = expression or re.compile(r"[^\r\n\t ,;]+")
+        analyzer = RegexAnalyzer(expression=expression)
+        self.format = Existence(analyzer=analyzer, field_boost=field_boost)
+        self.stored = stored
+        self.unique = unique
+
+
+class NUMERIC(FieldType):
+    """Special field type that lets you index int, long, or floating point
+    numbers in relatively short fixed-width terms. The field converts numbers
+    to sortable text for you before indexing.
+
+    You specify the numeric type of the field when you create the NUMERIC
+    object. The default is ``int``.
+
+    >>> schema = Schema(path=STORED, position=NUMERIC(long))
+    >>> ix = storage.create_index(schema)
+    >>> w = ix.writer()
+    >>> w.add_document(path="/a", position=5820402204)
+    >>> w.commit()
+
+    You can also use the NUMERIC field to store Decimal instances by specifying
+    a type of ``int`` or ``long`` and the ``decimal_places`` keyword argument.
+    This simply multiplies each number by ``(10 ** decimal_places)`` before
+    storing it as an integer. Of course this may throw away decimal prcesision
+    (by truncating, not rounding) and imposes the same maximum value limits as
+    ``int``/``long``, but these may be acceptable for certain applications.
+
+    >>> from decimal import Decimal
+    >>> schema = Schema(path=STORED, position=NUMERIC(int, decimal_places=4))
+    >>> ix = storage.create_index(schema)
+    >>> w = ix.writer()
+    >>> w.add_document(path="/a", position=Decimal("123.45")
+    >>> w.commit()
+    """
+
+    def __init__(self, type=int, stored=False, unique=False, field_boost=1.0,
+                 decimal_places=0, shift_step=4, signed=True):
+        """
+        :param type: the type of numbers that can be stored in this field: one
+            of ``int``, ``long``, ``float``, or ``Decimal``.
+        :param stored: Whether the value of this field is stored with the
+            document.
+        :param unique: Whether the value of this field is unique per-document.
+        :param decimal_places: specifies the number of decimal places to save
+            when storing Decimal instances as ``int`` or ``float``.
+        :param shift_steps: The number of bits of precision to shift away at
+            each tiered indexing level. Values should generally be 1-8. Lower
+            values yield faster searches but take up more space. A value
+            of `0` means no tiered indexing.
+        :param signed: Whether the numbers stored in this field may be
+            negative.
+        """
+
+        self.type = type
+        if self.type is int:
+            self._to_text = int_to_text
+            self._from_text = text_to_int
+            self.sortable_type = int
+            self.sortable_typecode = "i" if signed else "I"
+        elif self.type is long:
+            self._to_text = long_to_text
+            self._from_text = text_to_long
+            self.sortable_type = long
+            self.sortable_typecode = "q" if signed else "Q"
+        elif self.type is float:
+            self._to_text = float_to_text
+            self._from_text = text_to_float
+            self.sortable_typecode = "f"
+        elif self.type is Decimal:
+            raise TypeError("To store Decimal instances, set type to int or "
+                            "float and use the decimal_places argument")
+        else:
+            raise TypeError("%s field type can't store %r" % (self.__class__,
+                                                              self.type))
+
+        self.stored = stored
+        self.unique = unique
+        self.decimal_places = decimal_places
+        self.shift_step = shift_step
+        self.signed = signed
+        self.format = Existence(analyzer=IDAnalyzer(), field_boost=field_boost)
+
+    def _tiers(self, num):
+        t = self.type
+        if t is int:
+            bitlen = 32
+        else:
+            bitlen = 64
+
+        for shift in xrange(0, bitlen, self.shift_step):
+            yield self.to_text(num, shift=shift)
+
+    def index(self, num):
+        # word, freq, weight, valuestring
+        if self.shift_step:
+            return [(txt, 1, 1.0, '') for txt in self._tiers(num)]
+        else:
+            return [(self.to_text(num), 1, 1.0, '')]
+
+    def prepare_number(self, x):
+        if x is None:
+            return x
+        if self.decimal_places:
+            x = Decimal(x)
+            x *= 10 ** self.decimal_places
+        x = self.type(x)
+        return x
+
+    def unprepare_number(self, x):
+        if self.decimal_places:
+            s = str(x)
+            x = Decimal(s[:-self.decimal_places] + "." + s[-self.decimal_places:])
+        return x
+
+    def to_text(self, x, shift=0):
+        return self._to_text(self.prepare_number(x), shift=shift)
+
+    def from_text(self, t):
+        x = self._from_text(t)
+        return self.unprepare_number(x)
+
+    def process_text(self, text, **kwargs):
+        return (self.to_text(text),)
+
+    def self_parsing(self):
+        return True
+
+    def parse_query(self, fieldname, qstring, boost=1.0):
+        from whoosh import query
+        from whoosh.qparser import QueryParserError
+
+        if qstring == "*":
+            return query.Every(fieldname, boost=boost)
+
+        try:
+            text = self.to_text(qstring)
+        except Exception, e:
+            raise QueryParserError(e)
+
+        return query.Term(fieldname, text, boost=boost)
+
+    def parse_range(self, fieldname, start, end, startexcl, endexcl, boost=1.0):
+        from whoosh import query
+        from whoosh.qparser import QueryParserError
+
+        try:
+            if start is not None:
+                start = self.from_text(self.to_text(start))
+            if end is not None:
+                end = self.from_text(self.to_text(end))
+        except Exception, e:
+            raise QueryParserError(e)
+
+        return query.NumericRange(fieldname, start, end, startexcl, endexcl,
+                                  boost=boost)
+
+    def sortable_values(self, ixreader, fieldname):
+        from_text = self._from_text
+
+        for text in ixreader.lexicon(fieldname):
+            if text[0] != "\x00":
+                # Only yield the full-precision values
+                break
+
+            yield (text, from_text(text))
+
+
+class DATETIME(NUMERIC):
+    """Special field type that lets you index datetime objects. The field
+    converts the datetime objects to sortable text for you before indexing.
+
+    Since this field is based on Python's datetime module it shares all the
+    limitations of that module, such as the inability to represent dates before
+    year 1 in the proleptic Gregorian calendar. However, since this field
+    stores datetimes as an integer number of microseconds, it could easily
+    represent a much wider range of dates if the Python datetime implementation
+    ever supports them.
+
+    >>> schema = Schema(path=STORED, date=DATETIME)
+    >>> ix = storage.create_index(schema)
+    >>> w = ix.writer()
+    >>> w.add_document(path="/a", date=datetime.now())
+    >>> w.commit()
+    """
+
+    __inittypes__ = dict(stored=bool, unique=bool)
+
+    def __init__(self, stored=False, unique=False):
+        """
+        :param stored: Whether the value of this field is stored with the
+            document.
+        :param unique: Whether the value of this field is unique per-document.
+        """
+
+        super(DATETIME, self).__init__(type=long, stored=stored, unique=unique,
+                                       shift_step=8)
+
+    def to_text(self, x, shift=0):
+        if isinstance(x, datetime.datetime):
+            x = datetime_to_long(x)
+        elif not isinstance(x, (int, long)):
+            raise ValueError("DATETIME.to_text field doesn't know what to do "
+                             "with %r" % x)
+
+        return super(DATETIME, self).to_text(x, shift=shift)
+
+    def _parse_datestring(self, qstring):
+        # This method does parses a very simple datetime representation of
+        # the form YYYY[MM[DD[hh[mm[ss[uuuuuu]]]]]]
+        from whoosh.support.times import adatetime, fix, is_void
+
+        qstring = qstring.replace(" ", "").replace("-", "").replace(".", "")
+        year = month = day = hour = minute = second = microsecond = None
+        if len(qstring) >= 4:
+            year = int(qstring[:4])
+        if len(qstring) >= 6:
+            month = int(qstring[4:6])
+        if len(qstring) >= 8:
+            day = int(qstring[6:8])
+        if len(qstring) >= 10:
+            hour = int(qstring[8:10])
+        if len(qstring) >= 12:
+            minute = int(qstring[10:12])
+        if len(qstring) >= 14:
+            second = int(qstring[12:14])
+        if len(qstring) == 20:
+            microsecond = int(qstring[14:])
+
+        at = fix(adatetime(year, month, day, hour, minute, second, microsecond))
+        if is_void(at):
+            raise Exception("%r is not a parseable date" % qstring)
+        return at
+
+    def parse_query(self, fieldname, qstring, boost=1.0):
+        from whoosh import query
+        from whoosh.support.times import is_ambiguous
+
+        at = self._parse_datestring(qstring)
+        if is_ambiguous(at):
+            startnum = datetime_to_long(at.floor())
+            endnum = datetime_to_long(at.ceil())
+            return query.NumericRange(fieldname, startnum, endnum)
+        else:
+            return query.Term(fieldname, self.to_text(at), boost=boost)
+
+    def parse_range(self, fieldname, start, end, startexcl, endexcl, boost=1.0):
+        from whoosh import query
+
+        if start is None and end is None:
+            return query.Every(fieldname, boost=boost)
+
+        if start is not None:
+            startdt = self._parse_datestring(start).floor()
+            start = datetime_to_long(startdt)
+
+        if end is not None:
+            enddt = self._parse_datestring(end).ceil()
+            end = datetime_to_long(enddt)
+
+        return query.NumericRange(fieldname, start, end, boost=boost)
+
+
+class BOOLEAN(FieldType):
+    """Special field type that lets you index boolean values (True and False).
+    The field converts the boolean values to text for you before indexing.
+
+    >>> schema = Schema(path=STORED, done=BOOLEAN)
+    >>> ix = storage.create_index(schema)
+    >>> w = ix.writer()
+    >>> w.add_document(path="/a", done=False)
+    >>> w.commit()
+    """
+
+    strings = (u"f", u"t")
+    trues = frozenset((u"t", u"true", u"yes", u"1"))
+    falses = frozenset((u"f", u"false", u"no", u"0"))
+
+    __inittypes__ = dict(stored=bool)
+
+    def __init__(self, stored=False):
+        """
+        :param stored: Whether the value of this field is stored with the
+            document.
+        """
+
+        self.stored = stored
+        self.format = Existence(None)
+
+    def to_text(self, bit):
+        if isinstance(bit, basestring):
+            bit = bit in self.trues
+        elif not isinstance(bit, bool):
+            raise ValueError("%r is not a boolean")
+        return self.strings[int(bit)]
+
+    def index(self, bit):
+        bit = bool(bit)
+        # word, freq, weight, valuestring
+        return [(self.strings[int(bit)], 1, 1.0, '')]
+
+    def self_parsing(self):
+        return True
+
+    def parse_query(self, fieldname, qstring, boost=1.0):
+        from whoosh import query
+        text = None
+
+        if qstring == "*":
+            return query.Every(fieldname, boost=boost)
+
+        try:
+            text = self.to_text(qstring)
+        except ValueError:
+            return query.NullQuery
+
+        return query.Term(fieldname, text, boost=boost)
+
+
+class STORED(FieldType):
+    """Configured field type for fields you want to store but not index.
+    """
+
+    indexed = False
+    stored = True
+
+    def __init__(self):
+        pass
+
+
+class KEYWORD(FieldType):
+    """Configured field type for fields containing space-separated or
+    comma-separated keyword-like data (such as tags). The default is to not
+    store positional information (so phrase searching is not allowed in this
+    field) and to not make the field scorable.
+    """
+
+    __inittypes__ = dict(stored=bool, lowercase=bool, commas=bool, scorable=bool,
+                         unique=bool, field_boost=float)
+
+    def __init__(self, stored=False, lowercase=False, commas=False,
+                 scorable=False, unique=False, field_boost=1.0):
+        """
+        :param stored: Whether to store the value of the field with the
+            document.
+        :param comma: Whether this is a comma-separated field. If this is False
+            (the default), it is treated as a space-separated field.
+        :param scorable: Whether this field is scorable.
+        """
+
+        ana = KeywordAnalyzer(lowercase=lowercase, commas=commas)
+        self.format = Frequency(analyzer=ana, field_boost=field_boost)
+        self.scorable = scorable
+        self.stored = stored
+        self.unique = unique
+
+
+class TEXT(FieldType):
+    """Configured field type for text fields (for example, the body text of an
+    article). The default is to store positional information to allow phrase
+    searching. This field type is always scorable.
+    """
+
+    __inittypes__ = dict(analyzer=Analyzer, phrase=bool, vector=object,
+                         stored=bool, field_boost=float)
+
+    def __init__(self, analyzer=None, phrase=True, vector=None, stored=False,
+                 field_boost=1.0, multitoken_query="first"):
+        """
+        :param analyzer: The analysis.Analyzer to use to index the field
+            contents. See the analysis module for more information. If you omit
+            this argument, the field uses analysis.StandardAnalyzer.
+        :param phrase: Whether the store positional information to allow phrase
+            searching.
+        :param vector: A :class:`whoosh.formats.Format` object to use to store
+            term vectors, or ``True`` to store vectors using the same format as
+            the inverted index, or ``None`` or ``False`` to not store vectors.
+            By default, fields do not store term vectors.
+        :param stored: Whether to store the value of this field with the
+            document. Since this field type generally contains a lot of text,
+            you should avoid storing it with the document unless you need to,
+            for example to allow fast excerpts in the search results.
+        """
+
+        ana = analyzer or StandardAnalyzer()
+
+        if phrase:
+            formatclass = Positions
+        else:
+            formatclass = Frequency
+
+        self.format = formatclass(analyzer=ana, field_boost=field_boost)
+
+        if vector:
+            if type(vector) is type:
+                vector = vector(ana)
+            elif isinstance(vector, Format):
+                pass
+            else:
+                vector = formatclass(ana)
+        else:
+            vector = None
+        self.vector = vector
+
+        self.multitoken_query = multitoken_query
+        self.scorable = True
+        self.stored = stored
+
+
+class NGRAM(FieldType):
+    """Configured field that indexes text as N-grams. For example, with a field
+    type NGRAM(3,4), the value "hello" will be indexed as tokens
+    "hel", "hell", "ell", "ello", "llo". This field chops the entire
+    """
+
+    __inittypes__ = dict(minsize=int, maxsize=int, stored=bool,
+                         field_boost=float, queryor=bool, phrase=bool)
+    scorable = True
+
+    def __init__(self, minsize=2, maxsize=4, stored=False, field_boost=1.0,
+                 queryor=False, phrase=False):
+        """
+        :param minsize: The minimum length of the N-grams.
+        :param maxsize: The maximum length of the N-grams.
+        :param stored: Whether to store the value of this field with the
+            document. Since this field type generally contains a lot of text,
+            you should avoid storing it with the document unless you need to,
+            for example to allow fast excerpts in the search results.
+        :param queryor: if True, combine the N-grams with an Or query. The
+            default is to combine N-grams with an And query.
+        :param phrase: store positions on the N-grams to allow exact phrase
+            searching. The default is off.
+        """
+
+        formatclass = Frequency
+        if phrase:
+            formatclass = Positions
+
+        self.format = formatclass(analyzer=NgramAnalyzer(minsize, maxsize),
+                                  field_boost=field_boost)
+        self.stored = stored
+        self.queryor = queryor
+
+    def self_parsing(self):
+        return True
+
+    def parse_query(self, fieldname, qstring, boost=1.0):
+        from whoosh import query
+
+        terms = [query.Term(fieldname, g)
+                 for g in self.process_text(qstring, mode='query')]
+        cls = query.Or if self.queryor else query.And
+
+        return cls(terms, boost=boost)
+
+
+class NGRAMWORDS(NGRAM):
+    """Configured field that breaks text into words, lowercases, and then chops
+    the words into N-grams.
+    """
+
+    __inittypes__ = dict(minsize=int, maxsize=int, stored=bool,
+                         field_boost=float, tokenizer=Tokenizer, at=str,
+                         queryor=bool)
+    scorable = True
+
+    def __init__(self, minsize=2, maxsize=4, stored=False, field_boost=1.0,
+                 tokenizer=None, at=None, queryor=False):
+        """
+        :param minsize: The minimum length of the N-grams.
+        :param maxsize: The maximum length of the N-grams.
+        :param stored: Whether to store the value of this field with the
+            document. Since this field type generally contains a lot of text,
+            you should avoid storing it with the document unless you need to,
+            for example to allow fast excerpts in the search results.
+        :param tokenizer: an instance of :class:`whoosh.analysis.Tokenizer`
+            used to break the text into words.
+        :param at: if 'start', only takes N-grams from the start of the word.
+            If 'end', only takes N-grams from the end. Otherwise the default
+            is to take all N-grams from each word.
+        :param queryor: if True, combine the N-grams with an Or query. The
+            default is to combine N-grams with an And query.
+        """
+
+        analyzer = NgramWordAnalyzer(minsize, maxsize, tokenizer, at=at)
+        self.format = Frequency(analyzer=analyzer, field_boost=field_boost)
+        self.stored = stored
+        self.queryor = queryor
+
+
+# Schema class
+
+class MetaSchema(type):
+    def __new__(cls, name, bases, attrs):
+        super_new = super(MetaSchema, cls).__new__
+        if not any(b for b in bases if isinstance(b, MetaSchema)):
+            # If this isn't a subclass of MetaSchema, don't do anything special
+            return super_new(cls, name, bases, attrs)
+
+        # Create the class
+        special_attrs = {}
+        for key in attrs.keys():
+            if key.startswith("__"):
+                special_attrs[key] = attrs.pop(key)
+        new_class = super_new(cls, name, bases, special_attrs)
+
+        fields = {}
+        for b in bases:
+            if hasattr(b, "_clsfields"):
+                fields.update(b._clsfields)
+        fields.update(attrs)
+        new_class._clsfields = fields
+        return new_class
+
+    def schema(self):
+        return Schema(**self._clsfields)
+
+
+class Schema(object):
+    """Represents the collection of fields in an index. Maps field names to
+    FieldType objects which define the behavior of each field.
+
+    Low-level parts of the index use field numbers instead of field names for
+    compactness. This class has several methods for converting between the
+    field name, field number, and field object itself.
+    """
+
+    def __init__(self, **fields):
+        """ All keyword arguments to the constructor are treated as fieldname =
+        fieldtype pairs. The fieldtype can be an instantiated FieldType object,
+        or a FieldType sub-class (in which case the Schema will instantiate it
+        with the default constructor before adding it).
+
+        For example::
+
+            s = Schema(content = TEXT,
+                       title = TEXT(stored = True),
+                       tags = KEYWORD(stored = True))
+        """
+
+        self._fields = {}
+        self._dyn_fields = {}
+
+        for name in sorted(fields.keys()):
+            self.add(name, fields[name])
+
+    def copy(self):
+        """Returns a shallow copy of the schema. The field instances are not
+        deep copied, so they are shared between schema copies.
+        """
+
+        return self.__class__(**self._fields)
+
+    def __eq__(self, other):
+        return (other.__class__ is self.__class__
+                and self.items() == other.items())
+
+    def __repr__(self):
+        return "<%s: %r>" % (self.__class__.__name__, self.names())
+
+    def __iter__(self):
+        """Returns the field objects in this schema.
+        """
+
+        return self._fields.itervalues()
+
+    def __getitem__(self, name):
+        """Returns the field associated with the given field name.
+        """
+
+        if name in self._fields:
+            return self._fields[name]
+
+        for expr, fieldtype in self._dyn_fields.itervalues():
+            if expr.match(name):
+                return fieldtype
+
+        raise KeyError("No field named %r" % (name, ))
+
+    def __len__(self):
+        """Returns the number of fields in this schema.
+        """
+
+        return len(self._fields)
+
+    def __contains__(self, fieldname):
+        """Returns True if a field by the given name is in this schema.
+        """
+
+        # Defined in terms of __getitem__ so that there's only one method to
+        # override to provide dynamic fields
+        try:
+            field = self[fieldname]
+            return field is not None
+        except KeyError:
+            return False
+
+    def items(self):
+        """Returns a list of ("fieldname", field_object) pairs for the fields
+        in this schema.
+        """
+
+        return sorted(self._fields.items())
+
+    def names(self):
+        """Returns a list of the names of the fields in this schema.
+        """
+        return sorted(self._fields.keys())
+
+    def clean(self):
+        for field in self:
+            field.clean()
+
+    def add(self, name, fieldtype, glob=False):
+        """Adds a field to this schema.
+
+        :param name: The name of the field.
+        :param fieldtype: An instantiated fields.FieldType object, or a
+            FieldType subclass. If you pass an instantiated object, the schema
+            will use that as the field configuration for this field. If you
+            pass a FieldType subclass, the schema will automatically
+            instantiate it with the default constructor.
+        """
+
+        # Check field name
+        if name.startswith("_"):
+            raise FieldConfigurationError("Field names cannot start with an underscore")
+        if " " in name:
+            raise FieldConfigurationError("Field names cannot contain spaces")
+        if name in self._fields or (glob and name in self._dyn_fields):
+            raise FieldConfigurationError("Schema already has a field %r" % name)
+
+        # If the user passed a type rather than an instantiated field object,
+        # instantiate it automatically
+        if type(fieldtype) is type:
+            try:
+                fieldtype = fieldtype()
+            except Exception, e:
+                raise FieldConfigurationError("Error: %s instantiating field %r: %r" % (e, name, fieldtype))
+
+        if not isinstance(fieldtype, FieldType):
+                raise FieldConfigurationError("%r is not a FieldType object" % fieldtype)
+
+        if glob:
+            expr = re.compile(fnmatch.translate(name))
+            self._dyn_fields[name] = (expr, fieldtype)
+        else:
+            fieldtype.on_add(self, name)
+            self._fields[name] = fieldtype
+
+    def remove(self, fieldname):
+        if fieldname in self._fields:
+            self._fields[fieldname].on_remove(self, fieldname)
+            del self._fields[fieldname]
+        elif fieldname in self._dyn_fields:
+            del self._dyn_fields[fieldname]
+        else:
+            raise KeyError("No field named %r" % fieldname)
+
+    def has_vectored_fields(self):
+        """Returns True if any of the fields in this schema store term vectors.
+        """
+
+        return any(ftype.vector for ftype in self)
+
+    def has_scorable_fields(self):
+        return any(ftype.scorable for ftype in self)
+
+    def stored_names(self):
+        """Returns a list of the names of fields that are stored.
+        """
+
+        return [name for name, field in self.items() if field.stored]
+
+    def scorable_names(self):
+        """Returns a list of the names of fields that store field
+        lengths.
+        """
+
+        return [name for name, field in self.items() if field.scorable]
+
+    def vector_names(self):
+        """Returns a list of the names of fields that store vectors.
+        """
+
+        return [name for name, field in self.items() if field.vector]
+
+    def analyzer(self, fieldname):
+        """Returns the content analyzer for the given fieldname, or None if
+        the field has no analyzer
+        """
+
+        field = self[fieldname]
+        if field.format and field.format.analyzer:
+            return field.format.analyzer
+
+
+class SchemaClass(Schema):
+    __metaclass__ = MetaSchema
+
+    """Allows you to define a schema using declarative syntax, similar to
+    Django models::
+
+        class MySchema(SchemaClass):
+            path = ID
+            date = DATETIME
+            content = TEXT
+
+    You can use inheritance to share common fields between schemas::
+
+        class Parent(SchemaClass):
+            path = ID(stored=True)
+            date = DATETIME
+
+        class Child1(Parent):
+            content = TEXT(positions=False)
+
+        class Child2(Parent):
+            tags = KEYWORD
+
+    This class overrides ``__new__`` so instantiating your sub-class always
+    results in an instance of ``Schema``.
+
+    >>> class MySchema(SchemaClass):
+    ...     title = TEXT(stored=True)
+    ...     content = TEXT
+    ...
+    >>> s = MySchema()
+    >>> type(s)
+    <class 'whoosh.fields.Schema'>
+    """
+
+    def __new__(cls, *args, **kwargs):
+        obj = super(Schema, cls).__new__(Schema)
+        kw = getattr(cls, "_clsfields", {})
+        kw.update(kwargs)
+        obj.__init__(*args, **kw)
+        return obj
+
+
+def ensure_schema(schema):
+    if isinstance(schema, type) and issubclass(schema, Schema):
+        schema = schema.schema()
+    if not isinstance(schema, Schema):
+        raise FieldConfigurationError("%r is not a Schema" % schema)
+    return schema
+
+
+
+
diff --git a/lib/whoosh/whoosh/filedb/__init__.py b/lib/whoosh/whoosh/filedb/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/whoosh/whoosh/filedb/__init__.py
diff --git a/lib/whoosh/whoosh/filedb/fieldcache.py b/lib/whoosh/whoosh/filedb/fieldcache.py
new file mode 100644
index 0000000..21fe60f
--- /dev/null
+++ b/lib/whoosh/whoosh/filedb/fieldcache.py
@@ -0,0 +1,629 @@
+# Copyright 2011 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+from __future__ import with_statement
+import threading
+import weakref
+from array import array
+from collections import defaultdict
+from heapq import heappush, heapreplace
+from struct import Struct
+
+from whoosh.system import _INT_SIZE, _FLOAT_SIZE, _LONG_SIZE
+from whoosh.util import utf8encode
+
+
+pack_int_le = Struct("<i").pack
+
+
+def pickled_unicode(u):
+    # Returns the unicode string as a pickle protocol 2 operator
+    return "X%s%s" % (pack_int_le(len(u)), utf8encode(u)[0])
+
+
+class BadFieldCache(Exception):
+    pass
+
+
+# Python does not support arrays of long long see Issue 1172711
+# These functions help write/read a simulated an array of q/Q using lists
+
+def write_qsafe_array(typecode, arry, dbfile):
+    if typecode == "q":
+        for num in arry:
+            dbfile.write_long(num)
+    elif typecode == "Q":
+        for num in arry:
+            dbfile.write_ulong(num)
+    else:
+        dbfile.write_array(arry)
+
+
+def read_qsafe_array(typecode, size, dbfile):
+    if typecode == "q":
+        arry = [dbfile.read_long() for _ in xrange(size)]
+    elif typecode == "Q":
+        arry = [dbfile.read_ulong() for _ in xrange(size)]
+    else:
+        arry = dbfile.read_array(typecode, size)
+
+    return arry
+
+
+class FieldCache(object):
+    """Keeps a list of the sorted text values of a field and an array of ints
+    where each place in the array corresponds to a document, and the value
+    at a place in the array is a pointer to a text in the list of texts.
+
+    This structure allows fast sorting and grouping of documents by associating
+    each document with a value through the array.
+    """
+
+    def __init__(self, order=None, texts=None, hastexts=True, default=u"",
+                 typecode="I"):
+        """
+        :param order: an array of ints.
+        :param texts: a list of text values.
+        :param default: the value to use for documents without the field.
+        """
+
+        self.order = order or array(self.code)
+        self.hastexts = hastexts
+        self.texts = None
+        if hastexts:
+            self.texts = texts or [default]
+        self.typecode = typecode
+
+    def __eq__(self, other):
+        return (other and self.__class__ is other.__class__
+                and self.hastexts == other.hastexts
+                and self.order == other.order
+                and self.texts == other.texts)
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    def size(self):
+        """Returns the size in bytes (or as accurate an estimate as is
+        practical, anyway) of this cache.
+        """
+
+        orderlen = len(self.order)
+        if self.typecode == "B":
+            total = orderlen
+        elif self.typecode in "Ii":
+            total = orderlen * _INT_SIZE
+        elif self.typecode == "f":
+            total = orderlen * _FLOAT_SIZE
+        elif self.typecode in "Qq":
+            total = orderlen * _LONG_SIZE
+
+        if self.hastexts:
+            total += sum(len(t) for t in self.texts)
+
+        return total
+
+    # Class constructor for building a field cache from a reader
+
+    @classmethod
+    def from_field(cls, ixreader, fieldname, default=u""):
+        """Creates an in-memory field cache from a reader.
+
+        >>> r = ix.reader()
+        >>> fc = FieldCache.from_field(r, "chapter")
+
+        :param ixreader: a :class:`whoosh.reading.IndexReader` object.
+        :param fieldname: the name of the field to cache.
+        :param key: a key function to use to order the values of the field,
+            as in the built-in sort functions, or None to use the lexical
+            ordering.
+        :param default: the value to use for documents without the field.
+        """
+
+        field = ixreader.schema[fieldname]
+        hastexts = field.sortable_typecode in (None, "unicode")
+
+        texts = None
+        if hastexts:
+            typecode = "I"
+            texts = [default]
+        else:
+            typecode = field.sortable_typecode
+
+        doccount = ixreader.doc_count_all()
+        # Python does not support arrays of long long see Issue 1172711
+        if typecode.lower() == "q":
+            order = [0] * doccount
+        else:
+            order = array(typecode, [0] * doccount)
+
+        enum = enumerate(field.sortable_values(ixreader, fieldname))
+        for i, (text, sortable) in enum:
+            if hastexts:
+                texts.append(sortable)
+
+            ps = ixreader.postings(fieldname, text)
+            for id in ps.all_ids():
+                if hastexts:
+                    order[id] = i + 1
+                else:
+                    order[id] = sortable
+
+        # Compact the order array if possible
+        if hastexts:
+            if len(texts) < 255:
+                newcode = "B"
+            elif len(texts) < 65535:
+                newcode = "H"
+
+            if newcode != order.typecode:
+                order = array(newcode, order)
+                typecode = newcode
+
+        return cls(order, texts, hastexts=hastexts, typecode=typecode)
+
+    # Class constructor for defining a field cache using arbitrary queries
+
+    @classmethod
+    def from_lists(cls, doclists, doccount, default=u""):
+        texts = sorted(doclists.keys())
+        order = array("I", [0] * doccount)
+
+        # Run the queries to populate the order array
+        for i, text in enumerate(texts):
+            doclist = doclists[text]
+            for id in doclist:
+                order[id] = i + 1
+
+        texts.insert(0, default)
+        return cls(order, texts)
+
+    # Class constructor for loading a field cache from a file
+
+    @classmethod
+    def from_file(cls, dbfile):
+        """Loads an in-memory field cache from a saved file created with
+        :meth:`FieldCache.to_file`.
+
+        >>> fc = FieldCache.from_file(f)
+        """
+
+        # Read the finished tag
+        tag = dbfile.read(1)
+        if tag != "+":
+            raise BadFieldCache
+
+        # Read the number of documents
+        doccount = dbfile.read_uint()
+        textcount = dbfile.read_uint()
+
+        texts = None
+        if textcount:
+            # Read the texts
+            texts = dbfile.read_pickle()
+
+        typecode = dbfile.read(1)
+        order = read_qsafe_array(typecode, doccount, dbfile)
+        return cls(order, texts, typecode=typecode, hastexts=bool(texts))
+
+    def to_file(self, dbfile):
+        """Saves an in-memory field cache to a file.
+
+        >>> fc = FieldCache.from_field(r, "tag")
+        >>> fc.to_file(f)
+        """
+
+        # Write a tag at the start of the file indicating the file write is in
+        # progress, to warn other processes that might open the file. We'll
+        # seek back and change this when the file is done.
+        dbfile.write("-")
+
+        dbfile.write_uint(len(self.order))  # Number of documents
+
+        if self.hastexts:
+            dbfile.write_uint(len(self.texts))  # Number of texts
+            dbfile.write_pickle(self.texts)
+
+            # Compact the order array if possible
+            if len(self.texts) < 255:
+                newcode = "B"
+            elif len(self.texts) < 65535:
+                newcode = "H"
+
+            if newcode != self.order.typecode:
+                self.order = array(newcode, self.order)
+                self.typecode = newcode
+        else:
+            dbfile.write_uint(0)  # No texts
+
+        dbfile.write(self.typecode)
+        write_qsafe_array(self.typecode, self.order, dbfile)
+        dbfile.flush()
+
+        # Seek back and change the tag byte at the start of the file
+        dbfile.seek(0)
+        dbfile.write("+")
+
+    # Field cache operations
+
+    def key_for(self, docnum):
+        """Returns the key corresponding to a document number.
+        """
+
+        o = self.order[docnum]
+        if self.hastexts:
+            return self.texts[o]
+        else:
+            return o
+
+    def keys(self):
+        """Returns a list of all key values in the cache.
+        """
+
+        if self.hastexts:
+            return self.texts
+        else:
+            return sorted(set(self.order))
+
+    def ords(self):
+        """Yields a series of (docnum, order) pairs.
+        """
+
+        return enumerate(self.order)
+
+    def groups(self, docnums, counts=False):
+        """Returns a dictionary mapping key values to document numbers. If
+        ``counts_only`` is True, the returned dictionary maps key values to the
+        number of documents in that
+        """
+
+        defaulttype = int if counts else list
+        groups = defaultdict(defaulttype)
+        key_for = self.key_for
+
+        for docnum in docnums:
+            key = key_for(docnum)
+            if counts:
+                groups[key] += 1
+            else:
+                groups[key].append(docnum)
+
+        return groups
+
+    def scored_groups(self, scores_and_docnums, limit=None):
+        """Takes a sequence of (score, docnum) pairs and returns a dictionary
+        mapping key values to sorted lists of (score, docnum) pairs.
+
+        If you specify the ``limit`` keyword, the sorted lists will contain
+        only the ``limit`` highest-scoring items.
+        """
+
+        groups = defaultdict(list)
+        key_for = self.key_for
+
+        for score, docnum in scores_and_docnums:
+            key = key_for(docnum)
+            ritem = (0 - score, docnum)
+            ls = groups[key]
+            if limit:
+                if len(ls) < limit:
+                    heappush(ls, ritem)
+                elif ritem[0] > ls[0][0]:
+                    heapreplace(ls, ritem)
+            else:
+                ls.append(ritem)
+
+        for v in groups.values():
+            v.sort()
+
+        return groups
+
+    def collapse(self, scores_and_docnums):
+        """Takes a sequence of (score, docnum) pairs and returns a list of
+        docnums. If any docnums in the original list had the same key value,
+        all but the highest scoring duplicates are removed from the result
+        list.
+        """
+
+        maxes = {}
+        key_for = self.key_for
+
+        for score, docnum in scores_and_docnums:
+            key = key_for(docnum)
+            if score > maxes[key][1]:
+                maxes[key] = (docnum, score)
+
+        return sorted(maxes.keys())
+
+
+# Streaming cache file writer
+
+class FieldCacheWriter(object):
+    def __init__(self, dbfile, size=0, hastexts=True, code="I", default=u""):
+        self.dbfile = dbfile
+        self.order = array(self.code, [0] * size)
+        self.hastexts = hastexts
+        self.code = code
+
+        self.key = 0
+        self.keycount = 1
+
+        self.tagpos = dbfile.tell()
+        dbfile.write("-")
+        self.start = dbfile.tell()
+        dbfile.write_uint(0)  # Number of docs
+        dbfile.write_uint(0)  # Number of texts
+
+        if self.hastexts:
+            # Start the pickled list of texts
+            dbfile.write("(" + pickled_unicode(default))
+
+    def add_key(self, value):
+        if self.hastexts:
+            self.key += 1
+            self.dbfile.write(pickled_unicode(value))
+        else:
+            self.key = value
+        self.keycount += 1
+
+    def add_doc(self, docnum):
+        order = self.order
+        if len(order) < docnum + 1:
+            order.extend([0] * (docnum + 1 - len(order)))
+        order[docnum] = self.key
+
+    def close(self):
+        dbfile = self.dbfile
+        order = self.order
+        keycount = self.keycount
+
+        # Finish the pickled list of texts
+        dbfile.write("l.")
+
+        # Compact the order array if possible
+        if self.hastexts:
+            if keycount < 255:
+                code = "B"
+                order = array(code, order)
+            elif keycount < 65535:
+                code = "H"
+                order = array(code, order)
+
+        # Write the order array
+        dbfile.write(code)
+        dbfile.write_array(self.order)
+
+        # Seek back to the start and write numbers of docs
+        dbfile.flush()
+        dbfile.seek(self.start)
+        dbfile.write_uint(len(order))
+        if self.hastexts:
+            dbfile.write_uint(keycount)
+        dbfile.flush()
+
+        # Seek back and write the finished file tag
+        dbfile.seek(self.tagpos)
+        dbfile.write("+")
+
+        dbfile.close()
+
+
+# Caching policies
+
+class FieldCachingPolicy(object):
+    """Base class for field caching policies.
+    """
+
+    def put(self, key, obj, save=True):
+        """Adds the given object to the cache under the given key.
+        """
+
+        raise NotImplementedError
+
+    def __contains__(self, key):
+        """Returns True if an object exists in the cache (either in memory
+        or on disk) under the given key.
+        """
+
+        raise NotImplementedError
+
+    def is_loaded(self, key):
+        """Returns True if an object exists in memory for the given key. This
+        might be useful for scenarios where code can use a field cache if it's
+        already loaded, but is not important enough to load it for its own sake.
+        """
+
+        raise NotImplementedError
+
+    def get(self, key):
+        """Returns the object for the given key, or ``None`` if the key does
+        not exist in the cache.
+        """
+
+        raise NotImplementedError
+
+    def delete(self, key):
+        """Removes the object for the given key from the cache.
+        """
+
+        pass
+
+    def get_class(self):
+        """Returns the class to use when creating field caches. This class
+        should implement the same protocol as FieldCache.
+        """
+
+        return FieldCache
+
+
+class NoCaching(FieldCachingPolicy):
+    """A field caching policy that does not save field caches at all.
+    """
+
+    def put(self, key, obj, save=True):
+        pass
+
+    def __contains__(self, key):
+        return False
+
+    def is_loaded(self, key):
+        return False
+
+    def get(self, key):
+        return None
+
+
+class DefaultFieldCachingPolicy(FieldCachingPolicy):
+    """A field caching policy that saves generated caches in memory and also
+    writes them to disk by default.
+    """
+
+    shared_cache = weakref.WeakValueDictionary()
+    sharedlock = threading.Lock()
+
+    def __init__(self, basename, storage=None, gzip_caches=False,
+                 fcclass=FieldCache):
+        """
+        :param basename: a prefix for filenames. This is usually the name of
+            the reader's segment.
+        :param storage: a custom :class:`whoosh.store.Storage` object to use
+            for saving field caches. If this is ``None``, this object will not
+            save caches to disk.
+        :param gzip_caches: if True, field caches saved to disk by this object
+            will be compressed. Loading compressed caches is very slow, so you
+            should not turn this option on.
+        :param fcclass:
+        """
+
+        self.basename = basename
+        self.storage = storage
+        self.caches = {}
+        self.gzip_caches = gzip_caches
+        self.fcclass = fcclass
+
+    def __contains__(self, key):
+        return self.is_loaded(key) or self._file_exists(key)
+
+    def _filename(self, key):
+        if "/" in key:
+            savename = key[key.rfind("/") + 1:]
+        else:
+            savename = key
+        return "%s.%s.fc" % (self.basename, savename)
+
+    def _file_exists(self, key):
+        if not self.storage:
+            return False
+
+        filename = self._filename(key)
+        gzfilename = filename + ".gz"
+        return (self.storage.file_exists(filename)
+                or self.storage.file_exists(gzfilename))
+
+    def _save(self, key, cache):
+        filename = self._filename(key)
+        if self.gzip_caches:
+            filename += ".gz"
+
+        try:
+            f = self.storage.create_file(filename, gzip=self.gzip_caches,
+                                         excl=True)
+        except OSError:
+            pass
+        else:
+            cache.to_file(f)
+            f.close()
+
+    def _load(self, key):
+        storage = self.storage
+        filename = self._filename(key)
+        gzfilename = filename + ".gz"
+        gzipped = False
+        if storage.file_exists(gzfilename) and not storage.file_exists(filename):
+            filename = gzfilename
+            gzipped = True
+
+        f = storage.open_file(filename, mapped=False, gzip=gzipped)
+        cache = self.fcclass.from_file(f)
+        f.close()
+        return cache
+
+    def is_loaded(self, key):
+        if key in self.caches:
+            return True
+
+        with self.sharedlock:
+            return key in self.shared_cache
+
+    def put(self, key, cache, save=True):
+        self.caches[key] = cache
+        if save:
+            if self.storage:
+                self._save(key, cache)
+            with self.sharedlock:
+                if key not in self.shared_cache:
+                    self.shared_cache[key] = cache
+
+    def get(self, key):
+        if key in self.caches:
+            return self.caches.get(key)
+
+        with self.sharedlock:
+            if key in self.shared_cache:
+                return self.shared_cache[key]
+
+        if self._file_exists(key):
+            try:
+                return self._load(key)
+            except (OSError, BadFieldCache):
+                return None
+
+    def delete(self, key):
+        try:
+            del self.caches[key]
+        except KeyError:
+            pass
+
+    def get_class(self):
+        return self.fcclass
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/lib/whoosh/whoosh/filedb/fileindex.py b/lib/whoosh/whoosh/filedb/fileindex.py
new file mode 100644
index 0000000..4842a11
--- /dev/null
+++ b/lib/whoosh/whoosh/filedb/fileindex.py
@@ -0,0 +1,513 @@
+# Copyright 2009 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+import cPickle
+import re
+import uuid
+from time import time
+from threading import Lock
+
+from whoosh import __version__
+from whoosh.fields import ensure_schema
+from whoosh.index import Index, EmptyIndexError, IndexVersionError, _DEF_INDEX_NAME
+from whoosh.reading import EmptyReader, MultiReader
+from whoosh.store import Storage, LockError
+from whoosh.system import _INT_SIZE, _FLOAT_SIZE, _LONG_SIZE
+
+
+_INDEX_VERSION = -110
+
+
+# TOC read/write functions
+
+def _toc_filename(indexname, gen):
+    return "_%s_%s.toc" % (indexname, gen)
+
+
+def _toc_pattern(indexname):
+    """Returns a regular expression object that matches TOC filenames.
+    name is the name of the index.
+    """
+
+    return re.compile("^_%s_([0-9]+).toc$" % indexname)
+
+
+def _segment_pattern(indexname):
+    """Returns a regular expression object that matches segment filenames.
+    name is the name of the index.
+    """
+
+    return re.compile("(_%s_[0-9]+)\\..*" % indexname)
+
+
+def _latest_generation(storage, indexname):
+    pattern = _toc_pattern(indexname)
+
+    max = -1
+    for filename in storage:
+        m = pattern.match(filename)
+        if m:
+            num = int(m.group(1))
+            if num > max:
+                max = num
+    return max
+
+
+def _create_index(storage, schema, indexname=_DEF_INDEX_NAME):
+    # Clear existing files
+    prefix = "_%s_" % indexname
+    for filename in storage:
+        if filename.startswith(prefix):
+            storage.delete_file(filename)
+
+    schema = ensure_schema(schema)
+    # Write a TOC file with an empty list of segments
+    _write_toc(storage, schema, indexname, 0, 0, [])
+
+
+def _write_toc(storage, schema, indexname, gen, segment_counter, segments):
+    schema = ensure_schema(schema)
+    schema.clean()
+
+    # Use a temporary file for atomic write.
+    tocfilename = _toc_filename(indexname, gen)
+    tempfilename = '%s.%s' % (tocfilename, time())
+    stream = storage.create_file(tempfilename)
+
+    stream.write_varint(_INT_SIZE)
+    stream.write_varint(_LONG_SIZE)
+    stream.write_varint(_FLOAT_SIZE)
+    stream.write_int(-12345)
+
+    stream.write_int(_INDEX_VERSION)
+    for num in __version__[:3]:
+        stream.write_varint(num)
+
+    stream.write_string(cPickle.dumps(schema, -1))
+    stream.write_int(gen)
+    stream.write_int(segment_counter)
+    stream.write_pickle(segments)
+    stream.close()
+
+    # Rename temporary file to the proper filename
+    storage.rename_file(tempfilename, tocfilename, safe=True)
+
+
+class Toc(object):
+    def __init__(self, **kwargs):
+        for name, value in kwargs.iteritems():
+            setattr(self, name, value)
+
+
+def _read_toc(storage, schema, indexname):
+    gen = _latest_generation(storage, indexname)
+    if gen < 0:
+        raise EmptyIndexError("Index %r does not exist in %r" % (indexname, storage))
+
+    # Read the content of this index from the .toc file.
+    tocfilename = _toc_filename(indexname, gen)
+    stream = storage.open_file(tocfilename)
+
+    def check_size(name, target):
+        sz = stream.read_varint()
+        if sz != target:
+            raise IndexError("Index was created on different architecture:"
+                             " saved %s = %s, this computer = %s" % (name, sz, target))
+
+    check_size("int", _INT_SIZE)
+    check_size("long", _LONG_SIZE)
+    check_size("float", _FLOAT_SIZE)
+
+    if not stream.read_int() == -12345:
+        raise IndexError("Number misread: byte order problem")
+
+    version = stream.read_int()
+    if version != _INDEX_VERSION:
+        raise IndexVersionError("Can't read format %s" % version, version)
+    release = (stream.read_varint(), stream.read_varint(), stream.read_varint())
+
+    # If the user supplied a schema object with the constructor, don't load
+    # the pickled schema from the saved index.
+    if schema:
+        stream.skip_string()
+    else:
+        schema = cPickle.loads(stream.read_string())
+    schema = ensure_schema(schema)
+
+    # Generation
+    index_gen = stream.read_int()
+    assert gen == index_gen
+
+    segment_counter = stream.read_int()
+    segments = stream.read_pickle()
+
+    stream.close()
+    return Toc(version=version, release=release, schema=schema,
+               segment_counter=segment_counter, segments=segments,
+               generation=gen)
+
+
+def _next_segment_name(self):
+    #Returns the name of the next segment in sequence.
+    if self.segment_num_lock is None:
+        self.segment_num_lock = Lock()
+
+    if self.segment_num_lock.acquire():
+        try:
+            self.segment_counter += 1
+            return
+        finally:
+            self.segment_num_lock.release()
+    else:
+        raise LockError
+
+
+def _clean_files(storage, indexname, gen, segments):
+    # Attempts to remove unused index files (called when a new generation
+    # is created). If existing Index and/or reader objects have the files
+    # open, they may not be deleted immediately (i.e. on Windows) but will
+    # probably be deleted eventually by a later call to clean_files.
+
+    current_segment_names = set(s.name for s in segments)
+
+    tocpattern = _toc_pattern(indexname)
+    segpattern = _segment_pattern(indexname)
+
+    todelete = set()
+    for filename in storage:
+        tocm = tocpattern.match(filename)
+        segm = segpattern.match(filename)
+        if tocm:
+            if int(tocm.group(1)) != gen:
+                todelete.add(filename)
+        elif segm:
+            name = segm.group(1)
+            if name not in current_segment_names:
+                todelete.add(filename)
+
+    for filename in todelete:
+        try:
+            storage.delete_file(filename)
+        except OSError:
+            # Another process still has this file open
+            pass
+
+
+# Index placeholder object
+
+class FileIndex(Index):
+    def __init__(self, storage, schema=None, indexname=_DEF_INDEX_NAME):
+        if not isinstance(storage, Storage):
+            raise ValueError("%r is not a Storage object" % storage)
+        if not isinstance(indexname, (str, unicode)):
+            raise ValueError("indexname %r is not a string" % indexname)
+
+        if schema:
+            schema = ensure_schema(schema)
+
+        self.storage = storage
+        self._schema = schema
+        self.indexname = indexname
+
+        # Try reading the TOC to see if it's possible
+        _read_toc(self.storage, self._schema, self.indexname)
+
+    def __repr__(self):
+        return "%s(%r, %r)" % (self.__class__.__name__,
+                               self.storage, self.indexname)
+
+    def close(self):
+        pass
+
+    # add_field
+    # remove_field
+
+    def latest_generation(self):
+        return _latest_generation(self.storage, self.indexname)
+
+    # refresh
+    # up_to_date
+
+    def last_modified(self):
+        gen = self.latest_generation()
+        filename = _toc_filename(self.indexname, gen)
+        return self.storage.file_modified(filename)
+
+    def is_empty(self):
+        return len(self._read_toc().segments) == 0
+
+    def optimize(self):
+        w = self.writer()
+        w.commit(optimize=True)
+
+    # searcher
+
+    def writer(self, **kwargs):
+        from whoosh.filedb.filewriting import SegmentWriter
+        return SegmentWriter(self, **kwargs)
+
+    def lock(self, name):
+        """Returns a lock object that you can try to call acquire() on to
+        lock the index.
+        """
+
+        return self.storage.lock(self.indexname + "_" + name)
+
+    def _read_toc(self):
+        return _read_toc(self.storage, self._schema, self.indexname)
+
+    def _segments(self):
+        return self._read_toc().segments
+
+    def _current_schema(self):
+        return self._read_toc().schema
+
+    @property
+    def schema(self):
+        return self._current_schema()
+
+    @classmethod
+    def _reader(self, storage, schema, segments, generation, reuse=None):
+        from whoosh.filedb.filereading import SegmentReader
+
+        reusable = {}
+        try:
+            if len(segments) == 0:
+                # This index has no segments! Return an EmptyReader object,
+                # which simply returns empty or zero to every method
+                return EmptyReader(schema)
+
+            if reuse:
+                # Put all atomic readers in a dictionary keyed by their
+                # generation, so we can re-use them if them if possible
+                if reuse.is_atomic():
+                    readers = [reuse]
+                else:
+                    readers = [r for r, offset in reuse.leaf_readers()]
+                reusable = dict((r.generation(), r) for r in readers)
+
+            # Make a function to open readers, which reuses reusable readers.
+            # It removes any readers it reuses from the "reusable" dictionary,
+            # so later we can close any remaining readers.
+            def segreader(segment):
+                gen = segment.generation
+                if gen in reusable:
+                    r = reusable[gen]
+                    del reusable[gen]
+                    return r
+                else:
+                    return SegmentReader(storage, schema, segment)
+
+            if len(segments) == 1:
+                # This index has one segment, so return a SegmentReader object
+                # for the segment
+                return segreader(segments[0])
+            else:
+                # This index has multiple segments, so create a list of
+                # SegmentReaders for the segments, then composite them with a
+                # MultiReader
+
+                readers = [segreader(segment) for segment in segments]
+                return MultiReader(readers, generation=generation)
+        finally:
+            for r in reusable.values():
+                r.close()
+
+    def reader(self, reuse=None):
+        # Lock the index so nobody can delete a segment while we're in the
+        # middle of creating the reader
+        lock = self.lock("READLOCK")
+        lock.acquire(True)
+        try:
+            # Read the information from the TOC file
+            info = self._read_toc()
+            return self._reader(self.storage, info.schema, info.segments,
+                                info.generation, reuse=reuse)
+        finally:
+            lock.release()
+
+
+class Segment(object):
+    """Do not instantiate this object directly. It is used by the Index object
+    to hold information about a segment. A list of objects of this class are
+    pickled as part of the TOC file.
+
+    The TOC file stores a minimal amount of information -- mostly a list of
+    Segment objects. Segments are the real reverse indexes. Having multiple
+    segments allows quick incremental indexing: just create a new segment for
+    the new documents, and have the index overlay the new segment over previous
+    ones for purposes of reading/search. "Optimizing" the index combines the
+    contents of existing segments into one (removing any deleted documents
+    along the way).
+    """
+
+    EXTENSIONS = {"fieldlengths": "fln", "storedfields": "sto",
+                  "termsindex": "trm", "termposts": "pst",
+                  "vectorindex": "vec", "vectorposts": "vps"}
+
+    generation = 0
+
+    def __init__(self, name, generation, doccount, fieldlength_totals,
+                 fieldlength_maxes, deleted=None):
+        """
+        :param name: The name of the segment (the Index object computes this
+            from its name and the generation).
+        :param doccount: The maximum document number in the segment.
+        :param term_count: Total count of all terms in all documents.
+        :param fieldlength_totals: A dictionary mapping field numbers to the
+            total number of terms in that field across all documents in the
+            segment.
+        :param deleted: A set of deleted document numbers, or None if no
+            deleted documents exist in this segment.
+        """
+
+        assert isinstance(name, basestring)
+        assert isinstance(doccount, (int, long))
+        assert fieldlength_totals is None or isinstance(fieldlength_totals, dict), "fl_totals=%r" % fieldlength_totals
+        assert fieldlength_maxes is None or isinstance(fieldlength_maxes, dict), "fl_maxes=%r" % fieldlength_maxes
+
+        self.name = name
+        self.generation = generation
+        self.doccount = doccount
+        self.fieldlength_totals = fieldlength_totals
+        self.fieldlength_maxes = fieldlength_maxes
+        self.deleted = deleted
+        self.uuid = uuid.uuid4()
+
+    def __repr__(self):
+        return "<%s %r %s>" % (self.__class__.__name__, self.name,
+                               getattr(self, "uuid", ""))
+
+    def __getattr__(self, name):
+        # Capture accesses to e.g. Segment.fieldlengths_filename and return
+        # the appropriate filename
+        ext = "_filename"
+        if name.endswith(ext):
+            basename = name[:0 - len(ext)]
+            if basename in self.EXTENSIONS:
+                return self.make_filename(self.EXTENSIONS[basename])
+
+        raise AttributeError(name)
+
+    def copy(self):
+        return Segment(self.name, self.generation, self.doccount,
+                       self.fieldlength_totals, self.fieldlength_maxes,
+                       self.deleted)
+
+    def make_filename(self, ext):
+        return "%s.%s" % (self.name, ext)
+
+    @classmethod
+    def basename(cls, indexname, segment_number):
+        return "_%s_%s" % (indexname, segment_number)
+
+    def doc_count_all(self):
+        """
+        :returns: the total number of documents, DELETED OR UNDELETED, in this
+            segment.
+        """
+        return self.doccount
+
+    def doc_count(self):
+        """
+        :returns: the number of (undeleted) documents in this segment.
+        """
+        return self.doccount - self.deleted_count()
+
+    def has_deletions(self):
+        """
+        :returns: True if any documents in this segment are deleted.
+        """
+        return self.deleted_count() > 0
+
+    def deleted_count(self):
+        """
+        :returns: the total number of deleted documents in this segment.
+        """
+        if self.deleted is None:
+            return 0
+        return len(self.deleted)
+
+    def field_length(self, fieldname, default=0):
+        """Returns the total number of terms in the given field across all
+        documents in this segment.
+        """
+        return self.fieldlength_totals.get(fieldname, default)
+
+    def max_field_length(self, fieldname, default=0):
+        """Returns the maximum length of the given field in any of the
+        documents in the segment.
+        """
+        return self.fieldlength_maxes.get(fieldname, default)
+
+    def delete_document(self, docnum, delete=True):
+        """Deletes the given document number. The document is not actually
+        removed from the index until it is optimized.
+
+        :param docnum: The document number to delete.
+        :param delete: If False, this undeletes a deleted document.
+        """
+
+        if delete:
+            if self.deleted is None:
+                self.deleted = set()
+            self.deleted.add(docnum)
+        elif self.deleted is not None and docnum in self.deleted:
+            self.deleted.clear(docnum)
+
+    def is_deleted(self, docnum):
+        """:returns: True if the given document number is deleted."""
+
+        if self.deleted is None:
+            return False
+        return docnum in self.deleted
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/lib/whoosh/whoosh/filedb/filepostings.py b/lib/whoosh/whoosh/filedb/filepostings.py
new file mode 100644
index 0000000..0d3b5f2
--- /dev/null
+++ b/lib/whoosh/whoosh/filedb/filepostings.py
@@ -0,0 +1,325 @@
+# Copyright 2010 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+from whoosh.formats import Format
+from whoosh.writing import PostingWriter
+from whoosh.matching import Matcher, ReadTooFar
+from whoosh.spans import Span
+from whoosh.system import _INT_SIZE
+from whoosh.filedb import postblocks
+
+
+class FilePostingWriter(PostingWriter):
+    blockclass = postblocks.current
+
+    def __init__(self, postfile, stringids=False, blocklimit=128,
+                 compression=3):
+        self.postfile = postfile
+        self.stringids = stringids
+
+        if blocklimit > 255:
+            raise ValueError("blocklimit argument must be <= 255")
+        elif blocklimit < 1:
+            raise ValueError("blocklimit argument must be > 0")
+        self.blocklimit = blocklimit
+        self.compression = compression
+        self.block = None
+
+    def _reset_block(self):
+        self.block = self.blockclass(self.postfile, self.stringids)
+
+    def start(self, format):
+        if self.block is not None:
+            raise Exception("Called start() in a block")
+
+        self.format = format
+        self.blockcount = 0
+        self.posttotal = 0
+        self.startoffset = self.postfile.tell()
+
+        # Magic number
+        self.postfile.write_int(self.blockclass.magic)
+        # Placeholder for block count
+        self.postfile.write_uint(0)
+
+        self._reset_block()
+        return self.startoffset
+
+    def write(self, id, weight, valuestring, dfl):
+        self.block.append(id, weight, valuestring, dfl)
+        if len(self.block) >= self.blocklimit:
+            self._write_block()
+        self.posttotal += 1
+
+    def finish(self):
+        if self.block is None:
+            raise Exception("Called finish() when not in a block")
+
+        if self.block:
+            self._write_block()
+
+        # Seek back to the start of this list of posting blocks and writer the
+        # number of blocks
+        pf = self.postfile
+        pf.flush()
+        offset = pf.tell()
+        pf.seek(self.startoffset + _INT_SIZE)
+        pf.write_uint(self.blockcount)
+        pf.seek(offset)
+
+        self.block = None
+        return self.posttotal
+
+    def cancel(self):
+        self.block = None
+
+    def close(self):
+        if self.block:
+            self.finish()
+        self.postfile.close()
+
+    def block_stats(self):
+        return self.block.stats()
+
+    def _write_block(self):
+        self.block.to_file(self.postfile, self.format.posting_size,
+                           compression=self.compression)
+        self._reset_block()
+        self.blockcount += 1
+
+    def as_inline(self):
+        block = self.block
+        _, maxwol, minlength = block.stats()
+        return (tuple(block.ids), tuple(block.weights), tuple(block.values),
+                maxwol, minlength)
+
+
+class FilePostingReader(Matcher):
+    def __init__(self, postfile, offset, format, scorer=None,
+                 fieldname=None, text=None, stringids=False):
+
+        assert isinstance(offset, (int, long)), "offset is %r/%s" % (offset, type(offset))
+        assert isinstance(format, Format), "format is %r/%s" % (format, type(format))
+
+        self.postfile = postfile
+        self.startoffset = offset
+        self.format = format
+        self.supports_chars = self.format.supports("characters")
+        self.supports_poses = self.format.supports("positions")
+        self.scorer = scorer
+        self.fieldname = fieldname
+        self.text = text
+        self.stringids = stringids
+
+        magic = postfile.get_int(offset)
+        self.blockclass = postblocks.magic_map[magic]
+
+        self.blockcount = postfile.get_uint(offset + _INT_SIZE)
+        self.baseoffset = offset + _INT_SIZE * 2
+        self._active = True
+        self.currentblock = -1
+        self._next_block()
+
+    def __repr__(self):
+        return "%s(%r, %s, %r, %r)" % (self.__class__.__name__, str(self.postfile),
+                                       self.startoffset, self.fieldname, self.text)
+
+    def close(self):
+        pass
+
+    def copy(self):
+        return self.__class__(self.postfile, self.startoffset, self.format,
+                              scorer=self.scorer, fieldname=self.fieldname,
+                              text=self.text, stringids=self.stringids)
+
+    def is_active(self):
+        return self._active
+
+    def id(self):
+        return self.block.ids[self.i]
+
+    def items_as(self, astype):
+        decoder = self.format.decoder(astype)
+        for id, value in self.all_items():
+            yield (id, decoder(value))
+
+    def supports(self, astype):
+        return self.format.supports(astype)
+
+    def value(self):
+        if self.block.values is None:
+            self.block.read_values(self.format.posting_size)
+        return self.block.values[self.i]
+
+    def value_as(self, astype):
+        decoder = self.format.decoder(astype)
+        return decoder(self.value())
+
+    def spans(self):
+        if self.supports_chars:
+            return [Span(pos, startchar=startchar, endchar=endchar)
+                    for pos, startchar, endchar in self.value_as("characters")]
+        elif self.supports_poses:
+            return [Span(pos) for pos in self.value_as("positions")]
+        else:
+            raise Exception("Field does not support positions (%r)" % self.fieldname)
+
+    def weight(self):
+        weights = self.block.weights
+        if weights is None:
+            return 1.0
+        else:
+            return weights[self.i]
+
+    def all_ids(self):
+        nextoffset = self.baseoffset
+        for _ in xrange(self.blockcount):
+            block = self._read_block(nextoffset)
+            nextoffset = block.nextoffset
+            ids = block.read_ids()
+            for id in ids:
+                yield id
+
+    def next(self):
+        if self.i == self.block.postcount - 1:
+            self._next_block()
+            return True
+        else:
+            self.i += 1
+            return False
+
+    def skip_to(self, id):
+        if not self.is_active():
+            raise ReadTooFar
+
+        i = self.i
+        # If we're already in the block with the target ID, do nothing
+        if id <= self.block.ids[i]:
+            return
+
+        # Skip to the block that would contain the target ID
+        if id > self.block.maxid:
+            self._skip_to_block(lambda: id > self.block.maxid)
+        if not self._active:
+            return
+
+        # Iterate through the IDs in the block until we find or pass the
+        # target
+        ids = self.block.ids
+        i = self.i
+        while ids[i] < id:
+            i += 1
+            if i == len(ids):
+                self._active = False
+                return
+        self.i = i
+
+    def _read_block(self, offset):
+        pf = self.postfile
+        pf.seek(offset)
+        return self.blockclass.from_file(pf, self.stringids)
+
+    def _consume_block(self):
+        self.block.read_ids()
+        self.block.read_weights()
+        self.i = 0
+
+    def _next_block(self, consume=True):
+        if not (self.currentblock < self.blockcount):
+            raise Exception("No next block")
+
+        self.currentblock += 1
+        if self.currentblock == self.blockcount:
+            self._active = False
+            return
+
+        if self.currentblock == 0:
+            pos = self.baseoffset
+        else:
+            pos = self.block.nextoffset
+
+        self.block = self._read_block(pos)
+        if consume:
+            self._consume_block()
+
+    def _skip_to_block(self, targetfn):
+        skipped = 0
+        while self._active and targetfn():
+            self._next_block(consume=False)
+            skipped += 1
+
+        if self._active:
+            self._consume_block()
+
+        return skipped
+
+    def supports_quality(self):
+        return self.scorer and self.scorer.supports_quality()
+
+    def skip_to_quality(self, minquality):
+        bq = self.block_quality
+        if bq() > minquality:
+            return 0
+        return self._skip_to_block(lambda: bq() <= minquality)
+
+    def block_maxweight(self):
+        return self.block.maxweight
+
+    def block_maxwol(self):
+        return self.block.maxwol
+
+    def block_maxid(self):
+        return self.block.maxid
+
+    def block_minlength(self):
+        return self.block.minlength
+
+    def score(self):
+        return self.scorer.score(self)
+
+    def quality(self):
+        return self.scorer.quality(self)
+
+    def block_quality(self):
+        return self.scorer.block_quality(self)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/lib/whoosh/whoosh/filedb/filereading.py b/lib/whoosh/whoosh/filedb/filereading.py
new file mode 100644
index 0000000..33d3793
--- /dev/null
+++ b/lib/whoosh/whoosh/filedb/filereading.py
@@ -0,0 +1,427 @@
+# Copyright 2009 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+from bisect import bisect_left
+from heapq import nlargest, nsmallest
+from threading import Lock
+
+from whoosh.filedb.fieldcache import FieldCache, DefaultFieldCachingPolicy
+from whoosh.filedb.filepostings import FilePostingReader
+from whoosh.filedb.filetables import (TermIndexReader, StoredFieldReader,
+                                      LengthReader, TermVectorReader)
+from whoosh.matching import FilterMatcher, ListMatcher
+from whoosh.reading import IndexReader, TermNotFound
+from whoosh.util import protected
+
+SAVE_BY_DEFAULT = True
+
+
+# Reader class
+
+class SegmentReader(IndexReader):
+    GZIP_CACHES = False
+
+    def __init__(self, storage, schema, segment):
+        self.storage = storage
+        self.schema = schema
+        self.segment = segment
+
+        if hasattr(self.segment, "uuid"):
+            self.uuid_string = str(self.segment.uuid)
+        else:
+            import uuid
+            self.uuid_string = str(uuid.uuid4())
+
+        # Term index
+        tf = storage.open_file(segment.termsindex_filename)
+        self.termsindex = TermIndexReader(tf)
+
+        # Term postings file, vector index, and vector postings: lazy load
+        self.postfile = None
+        self.vectorindex = None
+        self.vpostfile = None
+
+        # Stored fields file
+        sf = storage.open_file(segment.storedfields_filename, mapped=False)
+        self.storedfields = StoredFieldReader(sf)
+
+        # Field length file
+        self.fieldlengths = None
+        if self.schema.has_scorable_fields():
+            flf = storage.open_file(segment.fieldlengths_filename)
+            self.fieldlengths = LengthReader(flf, segment.doc_count_all())
+
+        # Copy methods from underlying segment
+        self.has_deletions = segment.has_deletions
+        self.is_deleted = segment.is_deleted
+        self.doc_count = segment.doc_count
+
+        # Postings file
+        self.postfile = self.storage.open_file(segment.termposts_filename,
+                                               mapped=False)
+
+        self.dc = segment.doc_count_all()
+        assert self.dc == self.storedfields.length
+
+        self.set_caching_policy()
+
+        self.is_closed = False
+        self._sync_lock = Lock()
+
+    def generation(self):
+        return self.segment.generation
+
+    def _open_vectors(self):
+        if self.vectorindex:
+            return
+
+        storage, segment = self.storage, self.segment
+
+        # Vector index
+        vf = storage.open_file(segment.vectorindex_filename)
+        self.vectorindex = TermVectorReader(vf)
+
+        # Vector postings file
+        self.vpostfile = storage.open_file(segment.vectorposts_filename,
+                                           mapped=False)
+
+    def __repr__(self):
+        return "%s(%s)" % (self.__class__.__name__, self.segment)
+
+    @protected
+    def __contains__(self, term):
+        return term in self.termsindex
+
+    def close(self):
+        self.storedfields.close()
+        self.termsindex.close()
+        if self.postfile:
+            self.postfile.close()
+        if self.vectorindex:
+            self.vectorindex.close()
+        if self.vpostfile:
+            self.vpostfile.close()
+        #if self.fieldlengths:
+        #    self.fieldlengths.close()
+        self.caching_policy = None
+        self.is_closed = True
+
+    def doc_count_all(self):
+        return self.dc
+
+    @protected
+    def stored_fields(self, docnum):
+        schema = self.schema
+        return dict(item for item
+                    in self.storedfields[docnum].iteritems()
+                    if item[0] in schema)
+
+    @protected
+    def all_stored_fields(self):
+        is_deleted = self.segment.is_deleted
+        sf = self.stored_fields
+        for docnum in xrange(self.segment.doc_count_all()):
+            if not is_deleted(docnum):
+                yield sf(docnum)
+
+    def field_length(self, fieldname):
+        return self.segment.field_length(fieldname)
+
+    @protected
+    def doc_field_length(self, docnum, fieldname, default=0):
+        if self.fieldlengths is None:
+            return default
+        return self.fieldlengths.get(docnum, fieldname, default=default)
+
+    def max_field_length(self, fieldname):
+        return self.segment.max_field_length(fieldname)
+
+    @protected
+    def has_vector(self, docnum, fieldname):
+        if self.schema[fieldname].vector:
+            self._open_vectors()
+            return (docnum, fieldname) in self.vectorindex
+        else:
+            return False
+
+    @protected
+    def __iter__(self):
+        schema = self.schema
+        for (fieldname, t), (totalfreq, _, postcount) in self.termsindex:
+            if fieldname not in schema:
+                continue
+            yield (fieldname, t, postcount, totalfreq)
+
+    def _test_field(self, fieldname):
+        if fieldname not in self.schema:
+            raise TermNotFound("No field %r" % fieldname)
+        if self.schema[fieldname].format is None:
+            raise TermNotFound("Field %r is not indexed" % fieldname)
+
+    @protected
+    def iter_from(self, fieldname, text):
+        schema = self.schema
+        self._test_field(fieldname)
+        for (fn, t), (totalfreq, _, postcount) in self.termsindex.items_from((fieldname, text)):
+            if fn not in schema:
+                continue
+            yield (fn, t, postcount, totalfreq)
+
+    @protected
+    def _term_info(self, fieldname, text):
+        self._test_field(fieldname)
+        try:
+            return self.termsindex[fieldname, text]
+        except KeyError:
+            raise TermNotFound("%s:%r" % (fieldname, text))
+
+    def doc_frequency(self, fieldname, text):
+        self._test_field(fieldname)
+        try:
+            return self._term_info(fieldname, text)[2]
+        except TermNotFound:
+            return 0
+
+    def frequency(self, fieldname, text):
+        self._test_field(fieldname)
+        try:
+            return self._term_info(fieldname, text)[0]
+        except TermNotFound:
+            return 0
+
+    def lexicon(self, fieldname):
+        # The base class has a lexicon() implementation that uses iter_from()
+        # and throws away the value, but overriding to use
+        # FileTableReader.keys_from() is much, much faster.
+
+        self._test_field(fieldname)
+
+        # If a field cache happens to already be loaded for this field, use it
+        # instead of loading the field values from disk
+        if self.fieldcache_loaded(fieldname):
+            fieldcache = self.fieldcache(fieldname)
+            it = iter(fieldcache.texts)
+            # The first value in fieldcache.texts is the default; throw it away
+            it.next()
+            return it
+
+        return self.expand_prefix(fieldname, '')
+
+    @protected
+    def expand_prefix(self, fieldname, prefix):
+        # The base class has an expand_prefix() implementation that uses
+        # iter_from() and throws away the value, but overriding to use
+        # FileTableReader.keys_from() is much, much faster.
+
+        self._test_field(fieldname)
+
+        if self.fieldcache_loaded(fieldname):
+            texts = self.fieldcache(fieldname).texts
+            i = bisect_left(texts, prefix)
+            while i < len(texts) and texts[i].startswith(prefix):
+                yield texts[i]
+                i += 1
+        else:
+            for fn, t in self.termsindex.keys_from((fieldname, prefix)):
+                if fn != fieldname or not t.startswith(prefix):
+                    break
+                yield t
+
+    def postings(self, fieldname, text, scorer=None):
+        self._test_field(fieldname)
+        format = self.schema[fieldname].format
+        try:
+            offset = self.termsindex[fieldname, text][1]
+        except KeyError:
+            raise TermNotFound("%s:%r" % (fieldname, text))
+
+        if isinstance(offset, (int, long)):
+            postreader = FilePostingReader(self.postfile, offset, format,
+                                           scorer=scorer, fieldname=fieldname,
+                                           text=text)
+        else:
+            docids, weights, values, maxwol, minlength = offset
+            postreader = ListMatcher(docids, weights, values, format, scorer,
+                                     maxwol=maxwol, minlength=minlength)
+
+        deleted = self.segment.deleted
+        if deleted:
+            postreader = FilterMatcher(postreader, deleted, exclude=True)
+
+        return postreader
+
+    def vector(self, docnum, fieldname):
+        if fieldname not in self.schema:
+            raise TermNotFound("No  field %r" % fieldname)
+        vformat = self.schema[fieldname].vector
+        if not vformat:
+            raise Exception("No vectors are stored for field %r" % fieldname)
+
+        self._open_vectors()
+        offset = self.vectorindex.get((docnum, fieldname))
+        if offset is None:
+            raise Exception("No vector found for document"
+                            " %s field %r" % (docnum, fieldname))
+
+        return FilePostingReader(self.vpostfile, offset, vformat, stringids=True)
+
+    # Field cache methods
+
+    def supports_caches(self):
+        return True
+
+    def set_caching_policy(self, cp=None, save=True, storage=None):
+        """This method lets you control the caching policy of the reader. You
+        can either pass a :class:`whoosh.filedb.fieldcache.FieldCachingPolicy`
+        as the first argument, *or* use the `save` and `storage` keywords to
+        alter the default caching policy::
+
+            # Use a custom field caching policy object
+            reader.set_caching_policy(MyPolicy())
+
+            # Use the default caching policy but turn off saving caches to disk
+            reader.set_caching_policy(save=False)
+
+            # Use the default caching policy but save caches to a custom storage
+            from whoosh.filedb.filestore import FileStorage
+            mystorage = FileStorage("path/to/cachedir")
+            reader.set_caching_policy(storage=mystorage)
+
+        :param cp: a :class:`whoosh.filedb.fieldcache.FieldCachingPolicy`
+            object. If this argument is not given, the default caching policy
+            is used.
+        :param save: save field caches to disk for re-use. If a caching policy
+            object is specified using `cp`, this argument is ignored.
+        :param storage: a custom :class:`whoosh.store.Storage` object to use
+            for saving field caches. If a caching policy object is specified
+            using `cp` or `save` is `False`, this argument is ignored.
+        """
+
+        if not cp:
+            if save and storage is None:
+                storage = self.storage
+            else:
+                storage = None
+            cp = DefaultFieldCachingPolicy(self.segment.name, storage=storage)
+
+        if type(cp) is type:
+            cp = cp()
+
+        self.caching_policy = cp
+
+    def _fieldkey(self, fieldname):
+        return "%s/%s" % (self.uuid_string, fieldname)
+
+    def define_facets(self, name, qs, save=SAVE_BY_DEFAULT):
+        if name in self.schema:
+            raise Exception("Can't define facets using the name of a field (%r)" % name)
+
+        if self.fieldcache_available(name):
+            # Don't recreate the cache if it already exists
+            return
+
+        cache = self.caching_policy.get_class().from_lists(qs, self.doc_count_all())
+        self.caching_policy.put(self._fieldkey(name), cache, save=save)
+
+    def fieldcache(self, fieldname, save=SAVE_BY_DEFAULT):
+        """Returns a :class:`whoosh.filedb.fieldcache.FieldCache` object for
+        the given field.
+
+        :param fieldname: the name of the field to get a cache for.
+        :param save: if True (the default), the cache is saved to disk if it
+            doesn't already exist.
+        """
+
+        key = self._fieldkey(fieldname)
+        fc = self.caching_policy.get(key)
+        if not fc:
+            fc = FieldCache.from_field(self, fieldname)
+            self.caching_policy.put(key, fc, save=save)
+        return fc
+
+    def fieldcache_available(self, fieldname):
+        """Returns True if a field cache exists for the given field (either in
+        memory already or on disk).
+        """
+
+        return self._fieldkey(fieldname) in self.caching_policy
+
+    def fieldcache_loaded(self, fieldname):
+        """Returns True if a field cache for the given field is in memory.
+        """
+
+        return self.caching_policy.is_loaded(self._fieldkey(fieldname))
+
+    def unload_fieldcache(self, name):
+        self.caching_policy.delete(self._fieldkey(name))
+
+    # Sorting and faceting methods
+
+    def key_fn(self, fields):
+        if isinstance(fields, basestring):
+            fields = (fields, )
+
+        if len(fields) > 1:
+            fcs = [self.fieldcache(fn) for fn in fields]
+            return lambda docnum: tuple(fc.key_for(docnum) for fc in fcs)
+        else:
+            return self.fieldcache(fields[0]).key_for
+
+    def sort_docs_by(self, fields, docnums, reverse=False):
+        keyfn = self.key_fn(fields)
+        return sorted(docnums, key=keyfn, reverse=reverse)
+
+    def key_docs_by(self, fields, docnums, limit, reverse=False, offset=0):
+        keyfn = self.key_fn(fields)
+
+        if limit is None:
+            # Don't bother sorting, the caller will do that
+            return [(keyfn(docnum), docnum + offset) for docnum in docnums]
+        else:
+            # A non-reversed sort (the usual case) is inefficient because we
+            # have to use nsmallest, but I can't think of a cleverer thing to
+            # do right now. I thought I had an idea, but I was wrong.
+            op = nlargest if reverse else nsmallest
+
+            return op(limit, ((keyfn(docnum), docnum + offset)
+                              for docnum in docnums))
+
+
+
+
+#    def collapse_docs_by(self, fieldname, scores_and_docnums):
+#        fieldcache = self.caches.get_cache(self, fieldname)
+#        return fieldcache.collapse(scores_and_docnums)
+
+
+
+
+
+
+
+
+
diff --git a/lib/whoosh/whoosh/filedb/filestore.py b/lib/whoosh/whoosh/filedb/filestore.py
new file mode 100644
index 0000000..a872d49
--- /dev/null
+++ b/lib/whoosh/whoosh/filedb/filestore.py
@@ -0,0 +1,220 @@
+# Copyright 2009 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+import os
+from cStringIO import StringIO
+from threading import Lock
+
+from whoosh.index import _DEF_INDEX_NAME
+from whoosh.store import Storage
+from whoosh.support.filelock import FileLock
+from whoosh.filedb.structfile import StructFile
+
+
+class ReadOnlyError(Exception):
+    pass
+
+
+class FileStorage(Storage):
+    """Storage object that stores the index as files in a directory on disk.
+    """
+
+    def __init__(self, path, mapped=True, readonly=False):
+        self.folder = path
+        self.mapped = mapped
+        self.readonly = readonly
+        self.locks = {}
+
+        if not os.path.exists(path):
+            raise IOError("Directory %s does not exist" % path)
+
+    def create_index(self, schema, indexname=_DEF_INDEX_NAME):
+        if self.readonly:
+            raise ReadOnlyError
+
+        from whoosh.filedb.fileindex import _create_index, FileIndex
+        _create_index(self, schema, indexname)
+        return FileIndex(self, schema, indexname)
+
+    def open_index(self, indexname=_DEF_INDEX_NAME, schema=None):
+        from whoosh.filedb.fileindex import FileIndex
+        return FileIndex(self, schema=schema, indexname=indexname)
+
+    def create_file(self, name, excl=False, mode="wb", **kwargs):
+        if self.readonly:
+            raise ReadOnlyError
+
+        path = self._fpath(name)
+        if excl:
+            flags = os.O_CREAT | os.O_EXCL | os.O_RDWR
+            if hasattr(os, "O_BINARY"):
+                flags |= os.O_BINARY
+            fd = os.open(path, flags)
+            fileobj = os.fdopen(fd, mode)
+        else:
+            fileobj = open(path, mode)
+
+        f = StructFile(fileobj, name=name, mapped=self.mapped, **kwargs)
+        return f
+
+    def open_file(self, name, *args, **kwargs):
+        try:
+            f = StructFile(open(self._fpath(name), "rb"), name=name, *args, **kwargs)
+        except IOError:
+            print "Tried to open %r, files=%r" % (name, self.list())
+            raise
+        return f
+
+    def _fpath(self, fname):
+        return os.path.join(self.folder, fname)
+
+    def clean(self):
+        path = self.folder
+        if not os.path.exists(path):
+            os.mkdir(path)
+
+        files = self.list()
+        for file in files:
+            os.remove(os.path.join(path, file))
+
+    def list(self):
+        try:
+            files = os.listdir(self.folder)
+        except IOError:
+            files = []
+
+        return files
+
+    def file_exists(self, name):
+        return os.path.exists(self._fpath(name))
+
+    def file_modified(self, name):
+        return os.path.getmtime(self._fpath(name))
+
+    def file_length(self, name):
+        return os.path.getsize(self._fpath(name))
+
+    def delete_file(self, name):
+        os.remove(self._fpath(name))
+
+    def rename_file(self, frm, to, safe=False):
+        if os.path.exists(self._fpath(to)):
+            if safe:
+                raise NameError("File %r exists" % to)
+            else:
+                os.remove(self._fpath(to))
+        os.rename(self._fpath(frm), self._fpath(to))
+
+    def lock(self, name):
+        return FileLock(self._fpath(name))
+
+    def __repr__(self):
+        return "%s(%s)" % (self.__class__.__name__, repr(self.folder))
+
+
+class RamStorage(FileStorage):
+    """Storage object that keeps the index in memory.
+    """
+
+    def __init__(self):
+        self.files = {}
+        self.locks = {}
+        self.folder = ''
+
+    def list(self):
+        return self.files.keys()
+
+    def clean(self):
+        self.files = {}
+
+    def total_size(self):
+        return sum(self.file_length(f) for f in self.list())
+
+    def file_exists(self, name):
+        return name in self.files
+
+    def file_length(self, name):
+        if name not in self.files:
+            raise NameError
+        return len(self.files[name])
+
+    def delete_file(self, name):
+        if name not in self.files:
+            raise NameError
+        del self.files[name]
+
+    def rename_file(self, name, newname, safe=False):
+        if name not in self.files:
+            raise NameError("File %r does not exist" % name)
+        if safe and newname in self.files:
+            raise NameError("File %r exists" % newname)
+
+        content = self.files[name]
+        del self.files[name]
+        self.files[newname] = content
+
+    def create_file(self, name, **kwargs):
+        def onclose_fn(sfile):
+            self.files[name] = sfile.file.getvalue()
+        f = StructFile(StringIO(), name=name, onclose=onclose_fn)
+        return f
+
+    def open_file(self, name, *args, **kwargs):
+        if name not in self.files:
+            raise NameError("No such file %r" % name)
+        return StructFile(StringIO(self.files[name]), name=name, *args, **kwargs)
+
+    def lock(self, name):
+        if name not in self.locks:
+            self.locks[name] = Lock()
+        return self.locks[name]
+
+
+def copy_to_ram(storage):
+    """Copies the given FileStorage object into a new RamStorage object.
+
+    :rtype: :class:`RamStorage`
+    """
+
+    import shutil
+    ram = RamStorage()
+    for name in storage.list():
+        f = storage.open_file(name)
+        r = ram.create_file(name)
+        shutil.copyfileobj(f.file, r.file)
+        f.close()
+        r.close()
+    return ram
+
+
+
+
+
+
+
+
+
diff --git a/lib/whoosh/whoosh/filedb/filetables.py b/lib/whoosh/whoosh/filedb/filetables.py
new file mode 100644
index 0000000..bc3b1e1
--- /dev/null
+++ b/lib/whoosh/whoosh/filedb/filetables.py
@@ -0,0 +1,897 @@
+# Copyright 2009 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+"""This module defines writer and reader classes for a fast, immutable
+on-disk key-value database format. The current format is based heavily on
+D. J. Bernstein's CDB format (http://cr.yp.to/cdb.html).
+"""
+
+from array import array
+from collections import defaultdict
+from cPickle import loads, dumps
+from struct import Struct
+
+from whoosh.system import (_INT_SIZE, _LONG_SIZE, pack_ushort, pack_uint,
+                           pack_long, unpack_ushort, unpack_uint, unpack_long)
+from whoosh.util import byte_to_length, utf8encode, utf8decode
+
+
+_4GB = 4 * 1024 * 1024 * 1024
+
+#def cdb_hash(key):
+#    h = 5381L
+#    for c in key:
+#        h = (h + (h << 5)) & 0xffffffffL ^ ord(c)
+#    return h
+
+_header_entry_struct = Struct("!qI")  # Position, number of slots
+header_entry_size = _header_entry_struct.size
+pack_header_entry = _header_entry_struct.pack
+unpack_header_entry = _header_entry_struct.unpack
+
+_lengths_struct = Struct("!II")  # Length of key, length of data
+lengths_size = _lengths_struct.size
+pack_lengths = _lengths_struct.pack
+unpack_lengths = _lengths_struct.unpack
+
+_pointer_struct = Struct("!qq")  # Hash value, position
+pointer_size = _pointer_struct.size
+pack_pointer = _pointer_struct.pack
+unpack_pointer = _pointer_struct.unpack
+
+HEADER_SIZE = 256 * header_entry_size
+
+#def _hash(value):
+#    return abs(hash(value))
+_hash = hash
+
+
+# Table classes
+
+class HashWriter(object):
+    def __init__(self, dbfile):
+        self.dbfile = dbfile
+        # Seek past the first 2048 bytes of the file... we'll come back here
+        # to write the header later
+        dbfile.seek(HEADER_SIZE)
+        # Store the directory of hashed values
+        self.hashes = defaultdict(list)
+
+    def add_all(self, items):
+        dbfile = self.dbfile
+        hashes = self.hashes
+        pos = dbfile.tell()
+        write = dbfile.write
+
+        for key, value in items:
+            write(pack_lengths(len(key), len(value)))
+            write(key)
+            write(value)
+
+            h = _hash(key)
+            hashes[h & 255].append((h, pos))
+            pos += lengths_size + len(key) + len(value)
+
+    def add(self, key, value):
+        self.add_all(((key, value),))
+
+    def _write_hashes(self):
+        dbfile = self.dbfile
+        hashes = self.hashes
+        directory = self.directory = []
+
+        pos = dbfile.tell()
+        for i in xrange(0, 256):
+            entries = hashes[i]
+            numslots = 2 * len(entries)
+            directory.append((pos, numslots))
+
+            null = (0, 0)
+            hashtable = [null] * numslots
+            for hashval, position in entries:
+                n = (hashval >> 8) % numslots
+                while hashtable[n] != null:
+                    n = (n + 1) % numslots
+                hashtable[n] = (hashval, position)
+
+            write = dbfile.write
+            for hashval, position in hashtable:
+                write(pack_pointer(hashval, position))
+                pos += pointer_size
+
+        dbfile.flush()
+
+    def _write_directory(self):
+        dbfile = self.dbfile
+        directory = self.directory
+
+        dbfile.seek(0)
+        for position, numslots in directory:
+            dbfile.write(pack_header_entry(position, numslots))
+        assert dbfile.tell() == HEADER_SIZE
+        dbfile.flush()
+
+    def close(self):
+        self._write_hashes()
+        self._write_directory()
+        self.dbfile.close()
+
+
+class HashReader(object):
+    def __init__(self, dbfile):
+        self.dbfile = dbfile
+        self.map = dbfile.map
+        self.end_of_data = dbfile.get_long(0)
+        self.is_closed = False
+
+    def close(self):
+        if self.is_closed:
+            raise Exception("Tried to close %r twice" % self)
+        del self.map
+        self.dbfile.close()
+        self.is_closed = True
+
+    def read(self, position, length):
+        return self.map[position:position + length]
+
+    def _ranges(self, pos=HEADER_SIZE):
+        eod = self.end_of_data
+        read = self.read
+        while pos < eod:
+            keylen, datalen = unpack_lengths(read(pos, lengths_size))
+            keypos = pos + lengths_size
+            datapos = pos + lengths_size + keylen
+            pos = datapos + datalen
+            yield (keypos, keylen, datapos, datalen)
+
+    def __iter__(self):
+        return self.items()
+
+    def items(self):
+        read = self.read
+        for keypos, keylen, datapos, datalen in self._ranges():
+            yield (read(keypos, keylen), read(datapos, datalen))
+
+    def keys(self):
+        read = self.read
+        for keypos, keylen, _, _ in self._ranges():
+            yield read(keypos, keylen)
+
+    def values(self):
+        read = self.read
+        for _, _, datapos, datalen in self._ranges():
+            yield read(datapos, datalen)
+
+    def __getitem__(self, key):
+        for data in self.all(key):
+            return data
+        raise KeyError(key)
+
+    def get(self, key, default=None):
+        for data in self.all(key):
+            return data
+        return default
+
+    def all(self, key):
+        read = self.read
+        for datapos, datalen in self._get_ranges(key):
+            yield read(datapos, datalen)
+
+    def __contains__(self, key):
+        for _ in self._get_ranges(key):
+            return True
+        return False
+
+    def _hashtable_info(self, keyhash):
+        # Return (directory_position, number_of_hash_entries)
+        return unpack_header_entry(self.read((keyhash & 255) * header_entry_size,
+                                             header_entry_size))
+
+    def _key_position(self, key):
+        keyhash = _hash(key)
+        hpos, hslots = self._hashtable_info(keyhash)
+        if not hslots:
+            raise KeyError(key)
+        slotpos = hpos + (((keyhash >> 8) % hslots) * header_entry_size)
+
+        return self.dbfile.get_long(slotpos + _INT_SIZE)
+
+    def _key_at(self, pos):
+        keylen = self.dbfile.get_uint(pos)
+        return self.read(pos + lengths_size, keylen)
+
+    def _get_ranges(self, key):
+        read = self.read
+        keyhash = _hash(key)
+        hpos, hslots = self._hashtable_info(keyhash)
+        if not hslots:
+            return
+
+        slotpos = hpos + (((keyhash >> 8) % hslots) * pointer_size)
+        for _ in xrange(hslots):
+            slothash, pos = unpack_pointer(read(slotpos, pointer_size))
+            if not pos:
+                return
+
+            slotpos += pointer_size
+            # If we reach the end of the hashtable, wrap around
+            if slotpos == hpos + (hslots * pointer_size):
+                slotpos = hpos
+
+            if slothash == keyhash:
+                keylen, datalen = unpack_lengths(read(pos, lengths_size))
+                if keylen == len(key):
+                    if key == read(pos + lengths_size, keylen):
+                        yield (pos + lengths_size + keylen, datalen)
+
+    def end_of_hashes(self):
+        lastpos, lastnum = unpack_header_entry(self.read(255 * header_entry_size,
+                                                         header_entry_size))
+        return lastpos + lastnum * pointer_size
+
+
+class OrderedHashWriter(HashWriter):
+    def __init__(self, dbfile):
+        HashWriter.__init__(self, dbfile)
+        self.index = []
+        self.lastkey = None
+
+    def add_all(self, items):
+        dbfile = self.dbfile
+        hashes = self.hashes
+        pos = dbfile.tell()
+        write = dbfile.write
+
+        index = self.index
+        lk = self.lastkey
+
+        for key, value in items:
+            if key <= lk:
+                raise ValueError("Keys must increase: %r .. %r" % (lk, key))
+            lk = key
+
+            index.append(pos)
+            write(pack_lengths(len(key), len(value)))
+            write(key)
+            write(value)
+
+            h = _hash(key)
+            hashes[h & 255].append((h, pos))
+
+            pos += lengths_size + len(key) + len(value)
+
+        self.lastkey = lk
+
+    def close(self):
+        self._write_hashes()
+        dbfile = self.dbfile
+
+        dbfile.write_uint(len(self.index))
+        for n in self.index:
+            dbfile.write_long(n)
+
+        self._write_directory()
+        self.dbfile.close()
+
+
+class OrderedHashReader(HashReader):
+    def __init__(self, dbfile):
+        HashReader.__init__(self, dbfile)
+        dbfile.seek(self.end_of_hashes())
+        self.length = dbfile.read_uint()
+        self.indexbase = dbfile.tell()
+
+    def _closest_key(self, key):
+        dbfile = self.dbfile
+        key_at = self._key_at
+        indexbase = self.indexbase
+        lo = 0
+        hi = self.length
+        while lo < hi:
+            mid = (lo + hi) // 2
+            midkey = key_at(dbfile.get_long(indexbase + mid * _LONG_SIZE))
+            if midkey < key:
+                lo = mid + 1
+            else:
+                hi = mid
+        #i = max(0, mid - 1)
+        if lo == self.length:
+            return None
+        return dbfile.get_long(indexbase + lo * _LONG_SIZE)
+
+    def closest_key(self, key):
+        pos = self._closest_key(key)
+        if pos is None:
+            return None
+        return self._key_at(pos)
+
+    def _ranges_from(self, key):
+        #read = self.read
+        pos = self._closest_key(key)
+        if pos is None:
+            return
+
+        for x in self._ranges(pos=pos):
+            yield x
+
+    def items_from(self, key):
+        read = self.read
+        for keypos, keylen, datapos, datalen in self._ranges_from(key):
+            yield (read(keypos, keylen), read(datapos, datalen))
+
+    def keys_from(self, key):
+        read = self.read
+        for keypos, keylen, _, _ in self._ranges_from(key):
+            yield read(keypos, keylen)
+
+    def values_from(self, key):
+        read = self.read
+        for _, _, datapos, datalen in self._ranges_from(key):
+            yield read(datapos, datalen)
+
+
+class CodedHashWriter(HashWriter):
+    # Abstract base class, subclass must implement keycoder and valuecoder
+
+    def __init__(self, dbfile):
+        sup = super(CodedHashWriter, self)
+        sup.__init__(dbfile)
+
+        self._add = sup.add
+
+    def add(self, key, data):
+        self._add(self.keycoder(key), self.valuecoder(data))
+
+
+class CodedHashReader(HashReader):
+    # Abstract base class, subclass must implement keycoder, keydecoder and
+    # valuecoder
+
+    def __init__(self, dbfile):
+        sup = super(CodedHashReader, self)
+        sup.__init__(dbfile)
+
+        self._items = sup.items
+        self._keys = sup.keys
+        self._get = sup.get
+        self._getitem = sup.__getitem__
+        self._contains = sup.__contains__
+
+    def __getitem__(self, key):
+        k = self.keycoder(key)
+        return self.valuedecoder(self._getitem(k))
+
+    def __contains__(self, key):
+        return self._contains(self.keycoder(key))
+
+    def get(self, key, default=None):
+        k = self.keycoder(key)
+        return self.valuedecoder(self._get(k, default))
+
+    def items(self):
+        kd = self.keydecoder
+        vd = self.valuedecoder
+        for key, value in self._items():
+            yield (kd(key), vd(value))
+
+    def keys(self):
+        kd = self.keydecoder
+        for k in self._keys():
+            yield kd(k)
+
+
+class CodedOrderedWriter(OrderedHashWriter):
+    # Abstract base class, subclasses must implement keycoder and valuecoder
+
+    def __init__(self, dbfile):
+        sup = super(CodedOrderedWriter, self)
+        sup.__init__(dbfile)
+        self._add = sup.add
+
+    def add(self, key, data):
+        self._add(self.keycoder(key), self.valuecoder(data))
+
+
+class CodedOrderedReader(OrderedHashReader):
+    # Abstract base class, subclasses must implement keycoder, keydecoder,
+    # and valuedecoder
+
+    def __init__(self, dbfile):
+        sup = super(CodedOrderedReader, self)
+        sup.__init__(dbfile)
+
+        self._items = sup.items
+        self._items_from = sup.items_from
+        self._keys = sup.keys
+        self._keys_from = sup.keys_from
+        self._get = sup.get
+        self._getitem = sup.__getitem__
+        self._contains = sup.__contains__
+
+    def __getitem__(self, key):
+        k = self.keycoder(key)
+        return self.valuedecoder(self._getitem(k))
+
+    def __contains__(self, key):
+        try:
+            codedkey = self.keycoder(key)
+        except KeyError:
+            return False
+        return self._contains(codedkey)
+
+    def get(self, key, default=None):
+        k = self.keycoder(key)
+        return self.valuedecoder(self._get(k, default))
+
+    def items(self):
+        kd = self.keydecoder
+        vd = self.valuedecoder
+        for key, value in self._items():
+            yield (kd(key), vd(value))
+
+    def items_from(self, key):
+        fromkey = self.keycoder(key)
+        kd = self.keydecoder
+        vd = self.valuedecoder
+        for key, value in self._items_from(fromkey):
+            yield (kd(key), vd(value))
+
+    def keys(self):
+        kd = self.keydecoder
+        for k in self._keys():
+            yield kd(k)
+
+    def keys_from(self, key):
+        kd = self.keydecoder
+        for k in self._keys_from(self.keycoder(key)):
+            yield kd(k)
+
+
+class TermIndexWriter(CodedOrderedWriter):
+    def __init__(self, dbfile):
+        super(TermIndexWriter, self).__init__(dbfile)
+        self.fieldcounter = 0
+        self.fieldmap = {}
+
+    def keycoder(self, key):
+        # Encode term
+        fieldmap = self.fieldmap
+        fieldname, text = key
+
+        if fieldname in fieldmap:
+            fieldnum = fieldmap[fieldname]
+        else:
+            fieldnum = self.fieldcounter
+            fieldmap[fieldname] = fieldnum
+            self.fieldcounter += 1
+
+        key = pack_ushort(fieldnum) + utf8encode(text)[0]
+        return key
+
+    def valuecoder(self, data):
+        w, offset, df = data
+
+        if w == 1 and df == 1:
+            v = dumps((offset, ), -1)
+        elif w == df:
+            v = dumps((offset, df), -1)
+        else:
+            v = dumps((w, offset, df), -1)
+
+        # Strip off protocol at start and stack return command at end
+        return v[2:-1]
+
+    def close(self):
+        self._write_hashes()
+        dbfile = self.dbfile
+
+        dbfile.write_uint(len(self.index))
+        for n in self.index:
+            dbfile.write_long(n)
+        dbfile.write_pickle(self.fieldmap)
+
+        self._write_directory()
+        self.dbfile.close()
+
+
+class TermIndexReader(CodedOrderedReader):
+    def __init__(self, dbfile):
+        super(TermIndexReader, self).__init__(dbfile)
+
+        dbfile.seek(self.indexbase + self.length * _LONG_SIZE)
+        self.fieldmap = dbfile.read_pickle()
+        self.names = [None] * len(self.fieldmap)
+        for name, num in self.fieldmap.iteritems():
+            self.names[num] = name
+
+    def keycoder(self, key):
+        fieldname, text = key
+        fnum = self.fieldmap.get(fieldname, 65535)
+        return pack_ushort(fnum) + utf8encode(text)[0]
+
+    def keydecoder(self, v):
+        return (self.names[unpack_ushort(v[:2])[0]], utf8decode(v[2:])[0])
+
+    def valuedecoder(self, v):
+        v = loads(v + ".")
+        if len(v) == 1:
+            return (1, v[0], 1)
+        elif len(v) == 2:
+            return (v[1], v[0], v[1])
+        else:
+            return v
+
+
+# docnum, fieldnum
+_vectorkey_struct = Struct("!IH")
+
+
+class TermVectorWriter(TermIndexWriter):
+    def keycoder(self, key):
+        fieldmap = self.fieldmap
+        docnum, fieldname = key
+
+        if fieldname in fieldmap:
+            fieldnum = fieldmap[fieldname]
+        else:
+            fieldnum = self.fieldcounter
+            fieldmap[fieldname] = fieldnum
+            self.fieldcounter += 1
+
+        return _vectorkey_struct.pack(docnum, fieldnum)
+
+    def valuecoder(self, offset):
+        return pack_long(offset)
+
+
+class TermVectorReader(TermIndexReader):
+    def keycoder(self, key):
+        return _vectorkey_struct.pack(key[0], self.fieldmap[key[1]])
+
+    def keydecoder(self, v):
+        docnum, fieldnum = _vectorkey_struct.unpack(v)
+        return (docnum, self.names[fieldnum])
+
+    def valuedecoder(self, v):
+        return unpack_long(v)[0]
+
+
+class LengthWriter(object):
+    def __init__(self, dbfile, doccount, lengths=None):
+        self.dbfile = dbfile
+        self.doccount = doccount
+        if lengths is not None:
+            self.lengths = lengths
+        else:
+            self.lengths = {}
+
+    def add_all(self, items):
+        lengths = self.lengths
+        for docnum, fieldname, byte in items:
+            if byte:
+                if fieldname not in lengths:
+                    lengths[fieldname] = array("B", (0 for _ in xrange(self.doccount)))
+                lengths[fieldname][docnum] = byte
+
+    def add(self, docnum, fieldname, byte):
+        lengths = self.lengths
+        if byte:
+            if fieldname not in lengths:
+                lengths[fieldname] = array("B", (0 for _ in xrange(self.doccount)))
+            lengths[fieldname][docnum] = byte
+
+    def reader(self):
+        return LengthReader(None, self.doccount, lengths=self.lengths)
+
+    def close(self):
+        self.dbfile.write_ushort(len(self.lengths))
+        for fieldname, arry in self.lengths.iteritems():
+            self.dbfile.write_string(fieldname)
+            self.dbfile.write_array(arry)
+        self.dbfile.close()
+
+
+class LengthReader(object):
+    def __init__(self, dbfile, doccount, lengths=None):
+        self.doccount = doccount
+
+        if lengths is not None:
+            self.lengths = lengths
+        else:
+            self.lengths = {}
+            count = dbfile.read_ushort()
+            for _ in xrange(count):
+                fieldname = dbfile.read_string()
+                self.lengths[fieldname] = dbfile.read_array("B", self.doccount)
+            dbfile.close()
+
+    def __iter__(self):
+        for fieldname in self.lengths.keys():
+            for docnum, byte in enumerate(self.lengths[fieldname]):
+                yield docnum, fieldname, byte
+
+    def get(self, docnum, fieldname, default=0):
+        lengths = self.lengths
+        if fieldname not in lengths:
+            return default
+        byte = lengths[fieldname][docnum] or default
+        return byte_to_length(byte)
+
+
+_stored_pointer_struct = Struct("!qI")  # offset, length
+stored_pointer_size = _stored_pointer_struct.size
+pack_stored_pointer = _stored_pointer_struct.pack
+unpack_stored_pointer = _stored_pointer_struct.unpack
+
+
+class StoredFieldWriter(object):
+    def __init__(self, dbfile, fieldnames):
+        self.dbfile = dbfile
+        self.length = 0
+        self.directory = []
+
+        self.dbfile.write_long(0)
+        self.dbfile.write_uint(0)
+
+        self.name_map = {}
+        for i, name in enumerate(fieldnames):
+            self.name_map[name] = i
+
+    def append(self, values):
+        f = self.dbfile
+
+        name_map = self.name_map
+
+        vlist = [None] * len(name_map)
+        for k, v in values.iteritems():
+            if k in name_map:
+                vlist[name_map[k]] = v
+            else:
+                # For dynamic stored fields, put them at the end of the list
+                # as a tuple of (fieldname, value)
+                vlist.append((k, v))
+
+        v = dumps(vlist, -1)[2:-1]
+        self.length += 1
+        self.directory.append(pack_stored_pointer(f.tell(), len(v)))
+        f.write(v)
+
+    def close(self):
+        f = self.dbfile
+        directory_pos = f.tell()
+        f.write_pickle(self.name_map)
+        for pair in self.directory:
+            f.write(pair)
+        f.flush()
+        f.seek(0)
+        f.write_long(directory_pos)
+        f.write_uint(self.length)
+        f.close()
+
+
+class StoredFieldReader(object):
+    def __init__(self, dbfile):
+        self.dbfile = dbfile
+
+        dbfile.seek(0)
+        pos = dbfile.read_long()
+        self.length = dbfile.read_uint()
+
+        dbfile.seek(pos)
+        name_map = dbfile.read_pickle()
+        self.names = [None] * len(name_map)
+        for name, pos in name_map.iteritems():
+            self.names[pos] = name
+        self.directory_offset = dbfile.tell()
+
+    def close(self):
+        self.dbfile.close()
+
+    def __getitem__(self, num):
+        if num > self.length - 1:
+            raise IndexError("Tried to get document %s, file has %s" % (num, self.length))
+
+        dbfile = self.dbfile
+        start = self.directory_offset + num * stored_pointer_size
+        dbfile.seek(start)
+        ptr = dbfile.read(stored_pointer_size)
+        if len(ptr) != stored_pointer_size:
+            raise Exception("Error reading %r @%s %s < %s" % (dbfile, start, len(ptr), stored_pointer_size))
+        position, length = unpack_stored_pointer(ptr)
+        vlist = loads(dbfile.map[position:position + length] + ".")
+
+        names = self.names
+        # Recreate a dictionary by putting the field names and values back
+        # together by position. We can't just use dict(zip(...)) because we
+        # want to filter out the None values.
+        values = dict((names[i], vlist[i]) for i in xrange(len(names))
+                      if vlist[i] is not None)
+
+        # Pull out an extra stored dynamic field values off the end of the list
+        if len(vlist) > len(names):
+            values.update(dict(vlist[len(names):]))
+
+        return values
+
+
+# Utility functions
+
+def dump_hash(hashreader):
+    dbfile = hashreader.dbfile
+    read = hashreader.read
+    eod = hashreader.end_of_data
+
+    print "HEADER_SIZE=", HEADER_SIZE, "eod=", eod
+
+    # Dump hashtables
+    for bucketnum in xrange(0, 256):
+        pos, numslots = unpack_header_entry(read(bucketnum * header_entry_size, header_entry_size))
+        if numslots:
+            print "Bucket %d: %d slots" % (bucketnum, numslots)
+
+            dbfile.seek(pos)
+            for _ in xrange(0, numslots):
+                print "  %X : %d" % unpack_pointer(read(pos, pointer_size))
+                pos += pointer_size
+        else:
+            print "Bucket %d empty: %s, %s" % (bucketnum, pos, numslots)
+
+    # Dump keys and values
+    print "-----"
+    pos = HEADER_SIZE
+    dbfile.seek(pos)
+    while pos < eod:
+        keylen, datalen = unpack_lengths(read(pos, lengths_size))
+        keypos = pos + lengths_size
+        datapos = pos + lengths_size + keylen
+        key = read(keypos, keylen)
+        data = read(datapos, datalen)
+        print "%d +%d,%d:%r->%r" % (pos, keylen, datalen, key, data)
+        pos = datapos + datalen
+
+
+##
+#
+#class FixedHashWriter(HashWriter):
+#    def __init__(self, dbfile, keysize, datasize):
+#        self.dbfile = dbfile
+#        dbfile.seek(HEADER_SIZE)
+#        self.hashes = defaultdict(list)
+#        self.keysize = keysize
+#        self.datasize = datasize
+#        self.recordsize = keysize + datasize
+#
+#    def add_all(self, items):
+#        dbfile = self.dbfile
+#        hashes = self.hashes
+#        recordsize = self.recordsize
+#        pos = dbfile.tell()
+#        write = dbfile.write
+#
+#        for key, value in items:
+#            write(key + value)
+#
+#            h = _hash(key)
+#            hashes[h & 255].append((h, pos))
+#            pos += recordsize
+#
+#
+#class FixedHashReader(HashReader):
+#    def __init__(self, dbfile, keysize, datasize):
+#        self.dbfile = dbfile
+#        self.keysize = keysize
+#        self.datasize = datasize
+#        self.recordsize = keysize + datasize
+#
+#        self.map = dbfile.map
+#        self.end_of_data = dbfile.get_uint(0)
+#        self.is_closed = False
+#
+#    def read(self, position, length):
+#        return self.map[position:position + length]
+#
+#    def _ranges(self, pos=HEADER_SIZE):
+#        keysize = self.keysize
+#        recordsize = self.recordsize
+#        eod = self.end_of_data
+#        while pos < eod:
+#            yield (pos, pos + keysize)
+#            pos += recordsize
+#
+#    def __iter__(self):
+#        return self.items()
+#
+#    def __contains__(self, key):
+#        for _ in self._get_data_poses(key):
+#            return True
+#        return False
+#
+#    def items(self):
+#        keysize = self.keysize
+#        datasize = self.datasize
+#        read = self.read
+#        for keypos, datapos in self._ranges():
+#            yield (read(keypos, keysize), read(datapos, datasize))
+#
+#    def keys(self):
+#        keysize = self.keysize
+#        read = self.read
+#        for keypos, _ in self._ranges():
+#            yield read(keypos, keysize)
+#
+#    def values(self):
+#        datasize = self.datasize
+#        read = self.read
+#        for _, datapos in self._ranges():
+#            yield read(datapos, datasize)
+#
+#    def __getitem__(self, key):
+#        for data in self.all(key):
+#            return data
+#        raise KeyError(key)
+#
+#    def get(self, key, default=None):
+#        for data in self.all(key):
+#            return data
+#        return default
+#
+#    def all(self, key):
+#        datasize = self.datasize
+#        read = self.read
+#        for datapos in self._get_data_poses(key):
+#            yield read(datapos, datasize)
+#
+#    def _key_at(self, pos):
+#        return self.read(pos, self.keysize)
+#
+#    def _get_ranges(self, key):
+#        raise NotImplementedError
+#
+#    def _get_data_poses(self, key):
+#        keysize = self.keysize
+#        read = self.read
+#        keyhash = _hash(key)
+#        hpos, hslots = self._hashtable_info(keyhash)
+#        if not hslots:
+#            return
+#
+#        slotpos = hpos + (((keyhash >> 8) % hslots) * pointer_size)
+#        for _ in xrange(hslots):
+#            slothash, pos = unpack_pointer(read(slotpos, pointer_size))
+#            if not pos:
+#                return
+#
+#            slotpos += pointer_size
+#            # If we reach the end of the hashtable, wrap around
+#            if slotpos == hpos + (hslots * pointer_size):
+#                slotpos = hpos
+#
+#            if slothash == keyhash:
+#                if key == read(pos, keysize):
+#                    yield pos + keysize
+
+
diff --git a/lib/whoosh/whoosh/filedb/filewriting.py b/lib/whoosh/whoosh/filedb/filewriting.py
new file mode 100644
index 0000000..9c53425
--- /dev/null
+++ b/lib/whoosh/whoosh/filedb/filewriting.py
@@ -0,0 +1,580 @@
+# Copyright 2007 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+from __future__ import with_statement
+from bisect import bisect_right
+from collections import defaultdict
+
+from whoosh.fields import UnknownFieldError
+from whoosh.filedb.fileindex import Segment
+from whoosh.filedb.filepostings import FilePostingWriter
+from whoosh.filedb.filetables import (TermIndexWriter, StoredFieldWriter,
+                                      TermVectorWriter)
+from whoosh.filedb.pools import TempfilePool
+from whoosh.store import LockError
+from whoosh.support.filelock import try_for
+from whoosh.util import fib
+from whoosh.writing import IndexWriter, IndexingError
+
+
+# Merge policies
+
+# A merge policy is a callable that takes the Index object, the SegmentWriter
+# object, and the current segment list (not including the segment being written),
+# and returns an updated segment list (not including the segment being written).
+
+def NO_MERGE(writer, segments):
+    """This policy does not merge any existing segments.
+    """
+    return segments
+
+
+def MERGE_SMALL(writer, segments):
+    """This policy merges small segments, where "small" is defined using a
+    heuristic based on the fibonacci sequence.
+    """
+
+    from whoosh.filedb.filereading import SegmentReader
+    newsegments = []
+    sorted_segment_list = sorted((s.doc_count_all(), s) for s in segments)
+    total_docs = 0
+    for i, (count, seg) in enumerate(sorted_segment_list):
+        if count > 0:
+            total_docs += count
+            if total_docs < fib(i + 5):
+                reader = SegmentReader(writer.storage, writer.schema, seg)
+                writer.add_reader(reader)
+                reader.close()
+            else:
+                newsegments.append(seg)
+    return newsegments
+
+
+def OPTIMIZE(writer, segments):
+    """This policy merges all existing segments.
+    """
+
+    from whoosh.filedb.filereading import SegmentReader
+
+    for seg in segments:
+        reader = SegmentReader(writer.storage, writer.schema, seg)
+        writer.add_reader(reader)
+        reader.close()
+    return []
+
+
+def MERGE_SQUARES(writer, segments):
+    """This is an alternative merge policy similar to Lucene's. It is less
+    optimal than the default MERGE_SMALL.
+    """
+
+    from whoosh.filedb.filereading import SegmentReader
+
+    sizedsegs = [(s.doc_count_all(), s) for s in segments]
+    tomerge = []
+    for size in (10, 100, 1000, 10000, 100000):
+        smaller = [seg for segsize, seg in sizedsegs
+                   if segsize < size - 1 and segsize >= size//10]
+        if len(smaller) >= 10:
+            tomerge.extend(smaller)
+            for seg in smaller:
+                segments.remove(seg)
+
+    for seg in tomerge:
+        reader = SegmentReader(writer.storage, writer.schema, seg)
+        writer.add_reader(reader)
+        reader.close()
+
+    return segments
+
+
+# Writer object
+
+class SegmentWriter(IndexWriter):
+    def __init__(self, ix, poolclass=None, procs=0, blocklimit=128,
+                 timeout=0.0, delay=0.1, name=None, _l=True, **poolargs):
+
+        self.writelock = None
+        if _l:
+            self.writelock = ix.lock("WRITELOCK")
+            if not try_for(self.writelock.acquire, timeout=timeout, delay=delay):
+                raise LockError
+        self.readlock = ix.lock("READLOCK")
+
+        info = ix._read_toc()
+        self.schema = info.schema
+        self.segments = info.segments
+        self.storage = ix.storage
+        self.indexname = ix.indexname
+        self.is_closed = False
+
+        self.blocklimit = blocklimit
+        self.segment_number = info.segment_counter + 1
+        self.generation = info.generation + 1
+
+        self._doc_offsets = []
+        base = 0
+        for s in self.segments:
+            self._doc_offsets.append(base)
+            base += s.doc_count_all()
+
+        self.name = name or Segment.basename(self.indexname, self.segment_number)
+        self.docnum = 0
+        self.fieldlength_totals = defaultdict(int)
+        self._added = False
+        self._unique_cache = {}
+
+        # Create a temporary segment to use its .*_filename attributes
+        segment = Segment(self.name, self.generation, 0, None, None)
+
+        # Terms index
+        tf = self.storage.create_file(segment.termsindex_filename)
+        ti = TermIndexWriter(tf)
+        # Term postings file
+        pf = self.storage.create_file(segment.termposts_filename)
+        pw = FilePostingWriter(pf, blocklimit=blocklimit)
+        # Terms writer
+        self.termswriter = TermsWriter(self.schema, ti, pw)
+
+        if self.schema.has_vectored_fields():
+            # Vector index
+            vf = self.storage.create_file(segment.vectorindex_filename)
+            self.vectorindex = TermVectorWriter(vf)
+
+            # Vector posting file
+            vpf = self.storage.create_file(segment.vectorposts_filename)
+            self.vpostwriter = FilePostingWriter(vpf, stringids=True)
+        else:
+            self.vectorindex = None
+            self.vpostwriter = None
+
+        # Stored fields file
+        sf = self.storage.create_file(segment.storedfields_filename)
+        self.storedfields = StoredFieldWriter(sf, self.schema.stored_names())
+
+        # Field lengths file
+        self.lengthfile = self.storage.create_file(segment.fieldlengths_filename)
+
+        # Create the pool
+        if poolclass is None:
+            if procs > 1:
+                from whoosh.filedb.multiproc import MultiPool
+                poolclass = MultiPool
+            else:
+                poolclass = TempfilePool
+        self.pool = poolclass(self.schema, procs=procs, **poolargs)
+
+    def _check_state(self):
+        if self.is_closed:
+            raise IndexingError("This writer is closed")
+
+    def add_field(self, fieldname, fieldspec):
+        self._check_state()
+        if self._added:
+            raise Exception("Can't modify schema after adding data to writer")
+        super(SegmentWriter, self).add_field(fieldname, fieldspec)
+
+    def remove_field(self, fieldname):
+        self._check_state()
+        if self._added:
+            raise Exception("Can't modify schema after adding data to writer")
+        super(SegmentWriter, self).remove_field(fieldname)
+
+    def _document_segment(self, docnum):
+        #Returns the index.Segment object containing the given document
+        #number.
+
+        offsets = self._doc_offsets
+        if len(offsets) == 1:
+            return 0
+        return bisect_right(offsets, docnum) - 1
+
+    def _segment_and_docnum(self, docnum):
+        #Returns an (index.Segment, segment_docnum) pair for the segment
+        #containing the given document number.
+
+        segmentnum = self._document_segment(docnum)
+        offset = self._doc_offsets[segmentnum]
+        segment = self.segments[segmentnum]
+        return segment, docnum - offset
+
+    def has_deletions(self):
+        """
+        :returns: True if this index has documents that are marked deleted but
+            haven't been optimized out of the index yet.
+        """
+
+        return any(s.has_deletions() for s in self.segments)
+
+    def delete_document(self, docnum, delete=True):
+        self._check_state()
+        if docnum >= sum(seg.doccount for seg in self.segments):
+            raise IndexingError("No document ID %r in this index" % docnum)
+        segment, segdocnum = self._segment_and_docnum(docnum)
+        segment.delete_document(segdocnum, delete=delete)
+
+    def deleted_count(self):
+        """
+        :returns: the total number of deleted documents in the index.
+        """
+
+        return sum(s.deleted_count() for s in self.segments)
+
+    def is_deleted(self, docnum):
+        segment, segdocnum = self._segment_and_docnum(docnum)
+        return segment.is_deleted(segdocnum)
+
+    def reader(self, reuse=None):
+        self._check_state()
+        from whoosh.filedb.fileindex import FileIndex
+
+        return FileIndex._reader(self.storage, self.schema, self.segments,
+                                 self.generation, reuse=reuse)
+
+    def add_reader(self, reader):
+        self._check_state()
+        startdoc = self.docnum
+
+        has_deletions = reader.has_deletions()
+        if has_deletions:
+            docmap = {}
+
+        fieldnames = set(self.schema.names())
+
+        # Add stored documents, vectors, and field lengths
+        for docnum in reader.all_doc_ids():
+            if (not has_deletions) or (not reader.is_deleted(docnum)):
+                d = dict(item for item
+                         in reader.stored_fields(docnum).iteritems()
+                         if item[0] in fieldnames)
+                # We have to append a dictionary for every document, even if
+                # it's empty.
+                self.storedfields.append(d)
+
+                if has_deletions:
+                    docmap[docnum] = self.docnum
+
+                for fieldname, length in reader.doc_field_lengths(docnum):
+                    if fieldname in fieldnames:
+                        self.pool.add_field_length(self.docnum, fieldname, length)
+
+                for fieldname in reader.schema.vector_names():
+                    if (fieldname in fieldnames
+                        and reader.has_vector(docnum, fieldname)):
+                        vpostreader = reader.vector(docnum, fieldname)
+                        self._add_vector_reader(self.docnum, fieldname, vpostreader)
+
+                self.docnum += 1
+
+        for fieldname, text, _, _ in reader:
+            if fieldname in fieldnames:
+                postreader = reader.postings(fieldname, text)
+                while postreader.is_active():
+                    docnum = postreader.id()
+                    valuestring = postreader.value()
+                    if has_deletions:
+                        newdoc = docmap[docnum]
+                    else:
+                        newdoc = startdoc + docnum
+
+                    self.pool.add_posting(fieldname, text, newdoc,
+                                          postreader.weight(), valuestring)
+                    postreader.next()
+
+        self._added = True
+
+    def add_document(self, **fields):
+        #from whoosh.util import now
+        #t = now()
+        self._check_state()
+        schema = self.schema
+
+        # Sort the keys
+        fieldnames = sorted([name for name in fields.keys()
+                             if not name.startswith("_")])
+
+        # Check if the caller gave us a bogus field
+        for name in fieldnames:
+            if name not in schema:
+                raise UnknownFieldError("No field named %r in %s" % (name, schema))
+
+        storedvalues = {}
+
+        docnum = self.docnum
+        for fieldname in fieldnames:
+            value = fields.get(fieldname)
+            if value is not None:
+                field = schema[fieldname]
+
+                if field.indexed:
+                    self.pool.add_content(docnum, fieldname, field, value)
+
+                vformat = field.vector
+                if vformat:
+                    vlist = sorted((w, weight, valuestring)
+                                   for w, freq, weight, valuestring
+                                   in vformat.word_values(value, mode="index"))
+                    self._add_vector(docnum, fieldname, vlist)
+
+                if field.stored:
+                    # Caller can override the stored value by including a key
+                    # _stored_<fieldname>
+                    storedvalue = value
+                    storedname = "_stored_" + fieldname
+                    if storedname in fields:
+                        storedvalue = fields[storedname]
+                    storedvalues[fieldname] = storedvalue
+
+        self._added = True
+        self.storedfields.append(storedvalues)
+        self.docnum += 1
+        #print "%f" % (now() - t)
+
+    #def update_document(self, **fields):
+
+    def _add_vector(self, docnum, fieldname, vlist):
+        vpostwriter = self.vpostwriter
+        offset = vpostwriter.start(self.schema[fieldname].vector)
+        for text, weight, valuestring in vlist:
+            assert isinstance(text, unicode), "%r is not unicode" % text
+            vpostwriter.write(text, weight, valuestring, 0)
+        vpostwriter.finish()
+
+        self.vectorindex.add((docnum, fieldname), offset)
+
+    def _add_vector_reader(self, docnum, fieldname, vreader):
+        vpostwriter = self.vpostwriter
+        offset = vpostwriter.start(self.schema[fieldname].vector)
+        while vreader.is_active():
+            # text, weight, valuestring, fieldlen
+            vpostwriter.write(vreader.id(), vreader.weight(), vreader.value(), 0)
+            vreader.next()
+        vpostwriter.finish()
+
+        self.vectorindex.add((docnum, fieldname), offset)
+
+    def _close_all(self):
+        self.is_closed = True
+
+        self.termswriter.close()
+        self.storedfields.close()
+        if not self.lengthfile.is_closed:
+            self.lengthfile.close()
+        if self.vectorindex:
+            self.vectorindex.close()
+        if self.vpostwriter:
+            self.vpostwriter.close()
+
+    def _getsegment(self):
+        return Segment(self.name, self.generation, self.docnum,
+                       self.pool.fieldlength_totals(),
+                       self.pool.fieldlength_maxes())
+
+    def commit(self, mergetype=None, optimize=False, merge=True):
+        """Finishes writing and saves all additions and changes to disk.
+
+        There are four possible ways to use this method::
+
+            # Merge small segments but leave large segments, trying to
+            # balance fast commits with fast searching:
+            writer.commit()
+
+            # Merge all segments into a single segment:
+            writer.commit(optimize=True)
+
+            # Don't merge any existing segments:
+            writer.commit(merge=False)
+
+            # Use a custom merge function
+            writer.commit(mergetype=my_merge_function)
+
+        :param mergetype: a custom merge function taking a Writer object and
+            segment list as arguments, and returning a new segment list. If you
+            supply a ``mergetype`` function, the values of the ``optimize`` and
+            ``merge`` arguments are ignored.
+        :param optimize: if True, all existing segments are merged with the
+            documents you've added to this writer (and the value of the
+            ``merge`` argument is ignored).
+        :param merge: if False, do not merge small segments.
+        """
+
+        self._check_state()
+        try:
+            if mergetype:
+                pass
+            elif optimize:
+                mergetype = OPTIMIZE
+            elif not merge:
+                mergetype = NO_MERGE
+            else:
+                mergetype = MERGE_SMALL
+
+            # Call the merge policy function. The policy may choose to merge other
+            # segments into this writer's pool
+            new_segments = mergetype(self, self.segments)
+
+            # Tell the pool we're finished adding information, it should add its
+            # accumulated data to the lengths, terms index, and posting files.
+            if self._added:
+                self.pool.finish(self.termswriter, self.docnum, self.lengthfile)
+
+                # Create a Segment object for the segment created by this writer and
+                # add it to the list of remaining segments returned by the merge policy
+                # function
+                new_segments.append(self._getsegment())
+            else:
+                self.pool.cleanup()
+
+            # Close all files, write a new TOC with the new segment list, and
+            # release the lock.
+            self._close_all()
+
+            from whoosh.filedb.fileindex import _write_toc, _clean_files
+            _write_toc(self.storage, self.schema, self.indexname, self.generation,
+                       self.segment_number, new_segments)
+
+            self.readlock.acquire(True)
+            try:
+                _clean_files(self.storage, self.indexname, self.generation, new_segments)
+            finally:
+                self.readlock.release()
+
+        finally:
+            if self.writelock:
+                self.writelock.release()
+
+    def cancel(self):
+        self._check_state()
+        try:
+            self.pool.cancel()
+            self._close_all()
+        finally:
+            if self.writelock:
+                self.writelock.release()
+
+
+class TermsWriter(object):
+    def __init__(self, schema, termsindex, postwriter, inlinelimit=1):
+        self.schema = schema
+        self.termsindex = termsindex
+        self.postwriter = postwriter
+        self.inlinelimit = inlinelimit
+
+        self.lastfn = None
+        self.lasttext = None
+        self.format = None
+        self.offset = None
+
+    def _new_term(self, fieldname, text):
+        lastfn = self.lastfn
+        lasttext = self.lasttext
+        if fieldname < lastfn or (fieldname == lastfn and text < lasttext):
+            raise Exception("Postings are out of order: %r:%s .. %r:%s" %
+                            (lastfn, lasttext, fieldname, text))
+
+        if fieldname != lastfn:
+            self.format = self.schema[fieldname].format
+
+        if fieldname != lastfn or text != lasttext:
+            self._finish_term()
+            # Reset the term attributes
+            self.weight = 0
+            self.offset = self.postwriter.start(self.format)
+            self.lasttext = text
+            self.lastfn = fieldname
+
+    def _finish_term(self):
+        postwriter = self.postwriter
+        if self.lasttext is not None:
+            postcount = postwriter.posttotal
+            if postcount <= self.inlinelimit and postwriter.blockcount < 1:
+                offset = postwriter.as_inline()
+                postwriter.cancel()
+            else:
+                offset = self.offset
+                postwriter.finish()
+
+            self.termsindex.add((self.lastfn, self.lasttext),
+                                (self.weight, offset, postcount))
+
+    def add_postings(self, fieldname, text, matcher, getlen, offset=0, docmap=None):
+        self._new_term(fieldname, text)
+        postwrite = self.postwriter.write
+        totalweight = 0
+        while matcher.is_active():
+            docnum = matcher.id()
+            weight = matcher.weight()
+            valuestring = matcher.value()
+            if docmap:
+                newdoc = docmap[docnum]
+            else:
+                newdoc = offset + docnum
+            totalweight += weight
+            postwrite(newdoc, weight, valuestring, getlen(docnum, fieldname))
+            matcher.next()
+        self.weight += totalweight
+
+    def add_iter(self, postiter, getlen, offset=0, docmap=None):
+        _new_term = self._new_term
+        postwrite = self.postwriter.write
+        for fieldname, text, docnum, weight, valuestring in postiter:
+            _new_term(fieldname, text)
+            if docmap:
+                newdoc = docmap[docnum]
+            else:
+                newdoc = offset + docnum
+            self.weight += weight
+            postwrite(newdoc, weight, valuestring, getlen(docnum, fieldname))
+
+    def add(self, fieldname, text, docnum, weight, valuestring, fieldlen):
+        self._new_term(fieldname, text)
+        self.weight += weight
+        self.postwriter.write(docnum, weight, valuestring, fieldlen)
+
+    def close(self):
+        self._finish_term()
+        self.termsindex.close()
+        self.postwriter.close()
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/lib/whoosh/whoosh/filedb/gae.py b/lib/whoosh/whoosh/filedb/gae.py
new file mode 100644
index 0000000..25fd8f4
--- /dev/null
+++ b/lib/whoosh/whoosh/filedb/gae.py
@@ -0,0 +1,156 @@
+"""
+This module contains EXPERIMENTAL support for storing a Whoosh index's files in
+the Google App Engine blobstore. This will use a lot of RAM since all files are
+loaded into RAM, but it potentially useful as a workaround for the lack of file
+storage in Google App Engine.
+
+Use at your own risk, but please report any problems to me so I can fix them.
+
+To create a new index::
+
+    from whoosh.filedb.gae import DataStoreStorage
+
+    ix = DataStoreStorage().create_index(schema)
+
+To open an existing index::
+
+    ix = DataStoreStorage().open_index()
+"""
+
+from cStringIO import StringIO
+
+from google.appengine.api import memcache
+from google.appengine.ext import db
+
+from whoosh.store import Storage
+from whoosh.filedb.fileindex import _create_index, FileIndex, _DEF_INDEX_NAME
+from whoosh.filedb.filestore import ReadOnlyError
+from whoosh.filedb.structfile import StructFile
+
+
+class DatastoreFile(db.Model):
+    """A file-like object that is backed by a StringIO() object whose contents
+    is loaded from a BlobProperty in the app engine datastore.
+    """
+
+    value = db.BlobProperty()
+
+    def __init__(self, *args, **kwargs):
+        super(DatastoreFile, self).__init__(*args, **kwargs)
+        self.data = StringIO()
+
+    @classmethod
+    def loadfile(cls, name):
+        value = memcache.get(name, namespace="DatastoreFile")
+        if value is None:
+            file = cls.get_by_key_name(name)
+            memcache.set(name, file.value, namespace="DatastoreFile")
+        else:
+            file = cls(value=value)
+        file.data = StringIO(file.value)
+        return file
+
+    def close(self):
+        oldvalue = self.value
+        self.value = self.getvalue()
+        if oldvalue != self.value:
+            self.put()
+            memcache.set(self.key().id_or_name(), self.value, namespace="DatastoreFile")
+
+    def tell(self):
+        return self.data.tell()
+
+    def write(self, data):
+        return self.data.write(data)
+
+    def read(self, length):
+        return self.data.read(length)
+
+    def seek(self, *args):
+        return self.data.seek(*args)
+
+    def readline(self):
+        return self.data.readline()
+
+    def getvalue(self):
+        return self.data.getvalue()
+
+
+class MemcacheLock(object):
+    def __init__(self, name):
+        self.name = name
+
+    def acquire(self, blocking=False):
+        val = memcache.add(self.name, "L", 360, namespace="whooshlocks")
+
+        if blocking and not val:
+            # Simulate blocking by retrying the acquire over and over
+            import time
+            while not val:
+                time.sleep(0.1)
+                val = memcache.add(self.name, "", 360, namespace="whooshlocks")
+
+        return val
+
+    def release(self):
+        memcache.delete(self.name, namespace="whooshlocks")
+
+
+class DatastoreStorage(Storage):
+    """An implementation of :class:`whoosh.store.Storage` that stores files in
+    the app engine datastore as blob properties.
+    """
+
+    def create_index(self, schema, indexname=_DEF_INDEX_NAME):
+        if self.readonly:
+            raise ReadOnlyError
+
+        _create_index(self, schema, indexname)
+        return FileIndex(self, schema, indexname)
+
+    def open_index(self, indexname=_DEF_INDEX_NAME, schema=None):
+        return FileIndex(self, schema=schema, indexname=indexname)
+
+    def list(self):
+        query = DatastoreFile.all()
+        keys = []
+        for file in query:
+            keys.append(file.key().id_or_name())
+        return keys
+
+    def clean(self):
+        pass
+
+    def total_size(self):
+        return sum(self.file_length(f) for f in self.list())
+
+    def file_exists(self, name):
+        return DatastoreFile.get_by_key_name(name) != None
+
+    def file_length(self, name):
+        return len(DatastoreFile.get_by_key_name(name).value)
+
+    def delete_file(self, name):
+        memcache.delete(name, namespace="DatastoreFile")
+        return DatastoreFile.get_by_key_name(name).delete()
+
+    def rename_file(self, name, newname, safe=False):
+        file = DatastoreFile.get_by_key_name(name)
+        newfile = DatastoreFile(key_name=newname)
+        newfile.value = file.value
+        newfile.put()
+        file.delete()
+
+    def create_file(self, name, **kwargs):
+        f = StructFile(DatastoreFile(key_name=name), name=name,
+                       onclose=lambda sfile: sfile.file.close())
+        return f
+
+    def open_file(self, name, *args, **kwargs):
+        return StructFile(DatastoreFile.loadfile(name))
+
+    def lock(self, name):
+        return MemcacheLock(name)
+
+
+
diff --git a/lib/whoosh/whoosh/filedb/multiproc.py b/lib/whoosh/whoosh/filedb/multiproc.py
new file mode 100644
index 0000000..f071b43
--- /dev/null
+++ b/lib/whoosh/whoosh/filedb/multiproc.py
@@ -0,0 +1,358 @@
+# Copyright 2010 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+import os
+import tempfile
+from multiprocessing import Process, Queue, cpu_count
+from cPickle import dump, load
+
+from whoosh.filedb.filetables import LengthWriter, LengthReader
+from whoosh.filedb.fileindex import Segment
+from whoosh.filedb.filewriting import SegmentWriter
+from whoosh.filedb.pools import (imerge, read_run, PoolBase, TempfilePool)
+from whoosh.filedb.structfile import StructFile
+from whoosh.writing import IndexWriter
+
+
+# Multiprocessing writer
+
+class SegmentWritingTask(Process):
+    def __init__(self, storage, indexname, segname, kwargs, jobqueue,
+                 resultqueue, firstjob=None):
+        Process.__init__(self)
+        self.storage = storage
+        self.indexname = indexname
+        self.segname = segname
+        self.kwargs = kwargs
+        self.jobqueue = jobqueue
+        self.resultqueue = resultqueue
+        self.firstjob = firstjob
+
+        self.segment = None
+        self.running = True
+
+    def _add_file(self, args):
+        writer = self.writer
+        filename, length = args
+        f = open(filename, "rb")
+        for _ in xrange(length):
+            writer.add_document(**load(f))
+        f.close()
+        os.remove(filename)
+
+    def run(self):
+        jobqueue = self.jobqueue
+        ix = self.storage.open_index(self.indexname)
+        writer = self.writer = SegmentWriter(ix, _lk=False, name=self.segname,
+                                             **self.kwargs)
+
+        if self.firstjob:
+            self._add_file(self.firstjob)
+
+        while self.running:
+            args = jobqueue.get()
+            if args is None:
+                break
+            self._add_file(args)
+
+        if not self.running:
+            writer.cancel()
+        else:
+            writer.pool.finish(writer.termswriter, writer.docnum,
+                               writer.lengthfile)
+            writer._close_all()
+            self.resultqueue.put(writer._getsegment())
+
+    def cancel(self):
+        self.running = False
+
+
+class MultiSegmentWriter(IndexWriter):
+    def __init__(self, ix, procs=None, batchsize=100, dir=None, **kwargs):
+        self.index = ix
+        self.procs = procs or cpu_count()
+        self.bufferlimit = batchsize
+        self.dir = dir
+        self.kwargs = kwargs
+        self.kwargs["dir"] = dir
+
+        self.segnames = []
+        self.tasks = []
+        self.jobqueue = Queue(self.procs * 4)
+        self.resultqueue = Queue()
+        self.docbuffer = []
+
+        self.writelock = ix.lock("WRITELOCK")
+        self.writelock.acquire()
+
+        info = ix._read_toc()
+        self.schema = info.schema
+        self.segment_number = info.segment_counter
+        self.generation = info.generation + 1
+        self.segments = info.segments
+        self.storage = ix.storage
+
+    def _new_task(self, firstjob):
+        ix = self.index
+        self.segment_number += 1
+        segmentname = Segment.basename(ix.indexname, self.segment_number)
+        task = SegmentWritingTask(ix.storage, ix.indexname, segmentname,
+                                  self.kwargs, self.jobqueue,
+                                  self.resultqueue, firstjob)
+        self.tasks.append(task)
+        task.start()
+        return task
+
+    def _enqueue(self):
+        doclist = self.docbuffer
+        fd, filename = tempfile.mkstemp(".doclist", dir=self.dir)
+        f = os.fdopen(fd, "wb")
+        for doc in doclist:
+            dump(doc, f, -1)
+        f.close()
+        args = (filename, len(doclist))
+
+        if len(self.tasks) < self.procs:
+            self._new_task(args)
+        else:
+            self.jobqueue.put(args)
+
+        self.docbuffer = []
+
+    def cancel(self):
+        try:
+            for task in self.tasks:
+                task.cancel()
+        finally:
+            self.lock.release()
+
+    def add_document(self, **fields):
+        self.docbuffer.append(fields)
+        if len(self.docbuffer) >= self.bufferlimit:
+            self._enqueue()
+
+    def commit(self, **kwargs):
+        try:
+            for task in self.tasks:
+                self.jobqueue.put(None)
+
+            for task in self.tasks:
+                task.join()
+
+            for task in self.tasks:
+                taskseg = self.resultqueue.get()
+                assert isinstance(taskseg, Segment), type(taskseg)
+                self.segments.append(taskseg)
+
+            self.jobqueue.close()
+            self.resultqueue.close()
+
+            from whoosh.filedb.fileindex import _write_toc, _clean_files
+            _write_toc(self.storage, self.schema, self.index.indexname,
+                       self.generation, self.segment_number, self.segments)
+
+            readlock = self.index.lock("READLOCK")
+            readlock.acquire(True)
+            try:
+                _clean_files(self.storage, self.index.indexname,
+                             self.generation, self.segments)
+            finally:
+                readlock.release()
+        finally:
+            self.writelock.release()
+
+
+# Multiprocessing pool
+
+class PoolWritingTask(Process):
+    def __init__(self, schema, dir, jobqueue, resultqueue, limitmb,
+                 firstjob=None):
+        Process.__init__(self)
+        self.schema = schema
+        self.dir = dir
+        self.jobqueue = jobqueue
+        self.resultqueue = resultqueue
+        self.limitmb = limitmb
+        self.firstjob = firstjob
+
+    def _add_file(self, filename, length):
+        subpool = self.subpool
+        f = open(filename, "rb")
+        for _ in xrange(length):
+            code, args = load(f)
+            if code == 0:
+                subpool.add_content(*args)
+            elif code == 1:
+                subpool.add_posting(*args)
+            elif code == 2:
+                subpool.add_field_length(*args)
+        f.close()
+        os.remove(filename)
+
+    def run(self):
+        jobqueue = self.jobqueue
+        rqueue = self.resultqueue
+        subpool = self.subpool = TempfilePool(self.schema, limitmb=self.limitmb,
+                                              dir=self.dir)
+
+        if self.firstjob:
+            self._add_file(*self.firstjob)
+
+        while True:
+            arg1, arg2 = jobqueue.get()
+            if arg1 is None:
+                doccount = arg2
+                break
+            else:
+                self._add_file(arg1, arg2)
+
+        lenfd, lenfilename = tempfile.mkstemp(".lengths", dir=subpool.dir)
+        lenf = os.fdopen(lenfd, "wb")
+        subpool._write_lengths(StructFile(lenf), doccount)
+        subpool.dump_run()
+        rqueue.put((subpool.runs, subpool.fieldlength_totals(),
+                    subpool.fieldlength_maxes(), lenfilename))
+
+
+class MultiPool(PoolBase):
+    def __init__(self, schema, dir=None, procs=2, limitmb=32, batchsize=100,
+                 **kw):
+        PoolBase.__init__(self, schema, dir=dir)
+        self._make_dir()
+
+        self.procs = procs
+        self.limitmb = limitmb
+        self.jobqueue = Queue(self.procs * 4)
+        self.resultqueue = Queue()
+        self.tasks = []
+        self.buffer = []
+        self.bufferlimit = batchsize
+
+    def _new_task(self, firstjob):
+        task = PoolWritingTask(self.schema, self.dir, self.jobqueue,
+                               self.resultqueue, self.limitmb, firstjob=firstjob)
+        self.tasks.append(task)
+        task.start()
+        return task
+
+    def _enqueue(self):
+        commandlist = self.buffer
+        fd, filename = tempfile.mkstemp(".commands", dir=self.dir)
+        f = os.fdopen(fd, "wb")
+        for command in commandlist:
+            dump(command, f, -1)
+        f.close()
+        args = (filename, len(commandlist))
+
+        if len(self.tasks) < self.procs:
+            self._new_task(args)
+        else:
+            self.jobqueue.put(args)
+
+        self.buffer = []
+
+    def _append(self, item):
+        self.buffer.append(item)
+        if len(self.buffer) > self.bufferlimit:
+            self._enqueue()
+
+    def add_content(self, *args):
+        self._append((0, args))
+
+    def add_posting(self, *args):
+        self.postingqueue.put((1, args))
+
+    def add_field_length(self, *args):
+        self.postingqueue.put((2, args))
+
+    def cancel(self):
+        for task in self.tasks:
+            task.terminate()
+        self.cleanup()
+
+    def cleanup(self):
+        self._clean_temp_dir()
+
+    def finish(self, termswriter, doccount, lengthfile):
+        if self.buffer:
+            self._enqueue()
+
+        _fieldlength_totals = self._fieldlength_totals
+        if not self.tasks:
+            return
+
+        jobqueue = self.jobqueue
+        rqueue = self.resultqueue
+
+        for task in self.tasks:
+            jobqueue.put((None, doccount))
+
+        for task in self.tasks:
+            task.join()
+
+        runs = []
+        lenfilenames = []
+        for task in self.tasks:
+            taskruns, flentotals, flenmaxes, lenfilename = rqueue.get()
+            runs.extend(taskruns)
+            lenfilenames.append(lenfilename)
+            for fieldnum, total in flentotals.iteritems():
+                _fieldlength_totals[fieldnum] += total
+            for fieldnum, length in flenmaxes.iteritems():
+                if length > self._fieldlength_maxes.get(fieldnum, 0):
+                    self._fieldlength_maxes[fieldnum] = length
+
+        jobqueue.close()
+        rqueue.close()
+
+        lw = LengthWriter(lengthfile, doccount)
+        for lenfilename in lenfilenames:
+            sublengths = LengthReader(StructFile(open(lenfilename, "rb")), doccount)
+            lw.add_all(sublengths)
+            os.remove(lenfilename)
+        lw.close()
+        lengths = lw.reader()
+
+#        if len(runs) >= self.procs * 2:
+#            pool = Pool(self.procs)
+#            tempname = lambda: tempfile.mktemp(suffix=".run", dir=self.dir)
+#            while len(runs) >= self.procs * 2:
+#                runs2 = [(runs[i:i+4], tempname())
+#                         for i in xrange(0, len(runs), 4)]
+#                if len(runs) % 4:
+#                    last = runs2.pop()[0]
+#                    runs2[-1][0].extend(last)
+#                runs = pool.map(merge_runs, runs2)
+#            pool.close()
+
+        iterator = imerge([read_run(runname, count) for runname, count in runs])
+        total = sum(count for runname, count in runs)
+        termswriter.add_iter(iterator, lengths.get)
+        for runname, count in runs:
+            os.remove(runname)
+
+        self.cleanup()
diff --git a/lib/whoosh/whoosh/filedb/pools.py b/lib/whoosh/whoosh/filedb/pools.py
new file mode 100644
index 0000000..e5c68d6
--- /dev/null
+++ b/lib/whoosh/whoosh/filedb/pools.py
@@ -0,0 +1,445 @@
+
+# Copyright 2010 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+from __future__ import with_statement
+import os
+import tempfile
+from array import array
+from collections import defaultdict
+from heapq import heapify, heappop, heapreplace
+from marshal import load, dump
+#import sqlite3 as sqlite
+
+from whoosh.filedb.filetables import LengthWriter, LengthReader
+from whoosh.util import length_to_byte
+
+
+try:
+    from sys import getsizeof
+except ImportError:
+    # If this is Python 2.5, rig up a guesstimated version of getsizeof
+    def getsizeof(obj):
+        if obj is None:
+            return 8
+        t = type(obj)
+        if t is int:
+            return 12
+        elif t is float:
+            return 16
+        elif t is long:
+            return 16
+        elif t is str:
+            return 21 + len(obj)
+        elif t is unicode:
+            return 26 + 2 * len(obj)
+
+
+try:
+    from heapq import merge
+    def imerge(iterables):
+        return merge(*iterables)
+except ImportError:
+    def imerge(iterables):
+        """Merge-sorts items from a list of iterators.
+        """
+
+        _heappop, _heapreplace, _StopIteration = heappop, heapreplace, StopIteration
+
+        h = []
+        h_append = h.append
+        for itnum, it in enumerate(map(iter, iterables)):
+            try:
+                next = it.next
+                h_append([next(), itnum, next])
+            except _StopIteration:
+                pass
+        heapify(h)
+
+        while 1:
+            try:
+                while 1:
+                    v, itnum, next = s = h[0]   # raises IndexError when h is empty
+                    yield v
+                    s[0] = next()               # raises StopIteration when exhausted
+                    _heapreplace(h, s)          # restore heap condition
+            except _StopIteration:
+                _heappop(h)                     # remove empty iterator
+            except IndexError:
+                return
+
+
+def read_run(filename, count, atatime=100):
+    with open(filename, "rb") as f:
+        while count:
+            buff = []
+            take = min(atatime, count)
+            for _ in xrange(take):
+                buff.append(load(f))
+            count -= take
+            for item in buff:
+                yield item
+
+
+DEBUG_DIR = False
+
+
+class PoolBase(object):
+    def __init__(self, schema, dir=None, basename=''):
+        self.schema = schema
+        self._using_tempdir = False
+        self.dir = dir
+        self._using_tempdir = dir is None
+        self.basename = basename
+
+        self.length_arrays = {}
+        self._fieldlength_totals = defaultdict(int)
+        self._fieldlength_maxes = {}
+
+    def _make_dir(self):
+        if self.dir is None:
+            self.dir = tempfile.mkdtemp(".whoosh")
+
+            if DEBUG_DIR:
+                dfile = open(self._filename("DEBUG.txt"), "wb")
+                import traceback
+                traceback.print_stack(file=dfile)
+                dfile.close()
+
+    def _filename(self, name):
+        return os.path.abspath(os.path.join(self.dir, self.basename + name))
+
+    def _clean_temp_dir(self):
+        if self._using_tempdir and self.dir and os.path.exists(self.dir):
+            if DEBUG_DIR:
+                os.remove(self._filename("DEBUG.txt"))
+
+            try:
+                os.rmdir(self.dir)
+            except OSError:
+                # directory didn't exist or was not empty -- don't
+                # accidentially delete data
+                pass
+
+    def cleanup(self):
+        self._clean_temp_dir()
+
+    def cancel(self):
+        pass
+
+    def fieldlength_totals(self):
+        return dict(self._fieldlength_totals)
+
+    def fieldlength_maxes(self):
+        return self._fieldlength_maxes
+
+    def add_posting(self, fieldname, text, docnum, weight, valuestring):
+        raise NotImplementedError
+
+    def add_field_length(self, docnum, fieldname, length):
+        self._fieldlength_totals[fieldname] += length
+        if length > self._fieldlength_maxes.get(fieldname, 0):
+            self._fieldlength_maxes[fieldname] = length
+
+        if fieldname not in self.length_arrays:
+            self.length_arrays[fieldname] = array("B")
+        arry = self.length_arrays[fieldname]
+
+        if len(arry) <= docnum:
+            for _ in xrange(docnum - len(arry) + 1):
+                arry.append(0)
+        arry[docnum] = length_to_byte(length)
+
+    def _fill_lengths(self, doccount):
+        for fieldname in self.length_arrays.keys():
+            arry = self.length_arrays[fieldname]
+            if len(arry) < doccount:
+                for _ in xrange(doccount - len(arry)):
+                    arry.append(0)
+
+    def add_content(self, docnum, fieldname, field, value):
+        add_posting = self.add_posting
+        termcount = 0
+        # TODO: Method for adding progressive field values, ie
+        # setting start_pos/start_char?
+        for w, freq, weight, valuestring in field.index(value):
+            #assert w != ""
+            add_posting(fieldname, w, docnum, weight, valuestring)
+            termcount += freq
+
+        if field.scorable and termcount:
+            self.add_field_length(docnum, fieldname, termcount)
+
+        return termcount
+
+    def _write_lengths(self, lengthfile, doccount):
+        self._fill_lengths(doccount)
+        lw = LengthWriter(lengthfile, doccount, lengths=self.length_arrays)
+        lw.close()
+
+
+class TempfilePool(PoolBase):
+    def __init__(self, schema, limitmb=32, dir=None, basename='', **kw):
+        super(TempfilePool, self).__init__(schema, dir=dir, basename=basename)
+
+        self.limit = limitmb * 1024 * 1024
+
+        self.size = 0
+        self.count = 0
+        self.postings = []
+        self.runs = []
+
+    def add_posting(self, fieldname, text, docnum, weight, valuestring):
+        if self.size >= self.limit:
+            self.dump_run()
+
+        tup = (fieldname, text, docnum, weight, valuestring)
+        # 48 bytes for tuple overhead (28 bytes + 4 bytes * 5 items) plus the
+        # sizes of the objects inside the tuple, plus 4 bytes overhead for
+        # putting the tuple in the postings list
+        #self.size += 48 + sum(getsizeof(o) for o in tup) + 4
+        valsize = len(valuestring) if valuestring else 0
+        self.size += 48 + len(fieldname) + 22 + len(text) + 26 + 16 + 16 + valsize + 22 + 4
+        self.postings.append(tup)
+        self.count += 1
+
+    def dump_run(self):
+        if self.size > 0:
+            self._make_dir()
+            fd, filename = tempfile.mkstemp(".run", dir=self.dir)
+            runfile = os.fdopen(fd, "w+b")
+            self.postings.sort()
+            for p in self.postings:
+                dump(p, runfile)
+            runfile.close()
+
+            self.runs.append((filename, self.count))
+            self.postings = []
+            self.size = 0
+            self.count = 0
+
+    def run_filenames(self):
+        return [filename for filename, _ in self.runs]
+
+    def cancel(self):
+        self.cleanup()
+
+    def cleanup(self):
+        for filename in self.run_filenames():
+            if os.path.exists(filename):
+                try:
+                    os.remove(filename)
+                except IOError:
+                    pass
+
+        self._clean_temp_dir()
+
+    def finish(self, termswriter, doccount, lengthfile):
+        self._write_lengths(lengthfile, doccount)
+        lengths = LengthReader(None, doccount, self.length_arrays)
+
+        if self.postings or self.runs:
+            if self.postings and len(self.runs) == 0:
+                self.postings.sort()
+                postiter = iter(self.postings)
+            elif not self.postings and not self.runs:
+                postiter = iter([])
+            else:
+                self.dump_run()
+                postiter = imerge([read_run(runname, count)
+                                   for runname, count in self.runs])
+
+            termswriter.add_iter(postiter, lengths.get)
+        self.cleanup()
+
+
+# Alternative experimental and testing pools
+
+class SqlitePool(PoolBase):
+    def __init__(self, schema, dir=None, basename='', limitmb=32, **kwargs):
+        super(SqlitePool, self).__init__(schema, dir=dir, basename=basename)
+        self._make_dir()
+        self.postbuf = defaultdict(list)
+        self.bufsize = 0
+        self.limit = limitmb * 1024 * 1024
+        self.fieldnames = set()
+        self._flushed = False
+
+    def _field_filename(self, name):
+        return self._filename("%s.sqlite" % name)
+
+    def _con(self, name):
+        import sqlite3 as sqlite
+
+        filename = self._field_filename(name)
+        con = sqlite.connect(filename)
+        if name not in self.fieldnames:
+            self.fieldnames.add(name)
+            con.execute("create table postings (token text, docnum int, weight float, value blob)")
+            #con.execute("create index postix on postings (token, docnum)")
+        return con
+
+    def flush(self):
+        for fieldname, lst in self.postbuf.iteritems():
+            con = self._con(fieldname)
+            con.executemany("insert into postings values (?, ?, ?, ?)", lst)
+            con.commit()
+            con.close()
+        self.postbuf = defaultdict(list)
+        self.bufsize = 0
+        self._flushed = True
+        print "flushed"
+
+    def add_posting(self, fieldname, text, docnum, weight, valuestring):
+        self.postbuf[fieldname].append((text, docnum, weight, valuestring))
+        self.bufsize += len(text) + 8 + len(valuestring)
+        if self.bufsize > self.limit:
+            self.flush()
+
+    def readback(self):
+        for name in sorted(self.fieldnames):
+            con = self._con(name)
+            con.execute("create index postix on postings (token, docnum)")
+            for text, docnum, weight, valuestring in con.execute("select * from postings order by token, docnum"):
+                yield (name, text, docnum, weight, valuestring)
+            con.close()
+            os.remove(self._field_filename(name))
+
+        if self._using_tempdir and self.dir:
+            try:
+                os.rmdir(self.dir)
+            except OSError:
+                # directory didn't exist or was not empty -- don't
+                # accidentially delete data
+                pass
+
+    def readback_buffer(self):
+        for fieldname in sorted(self.postbuf.keys()):
+            lst = self.postbuf[fieldname]
+            lst.sort()
+            for text, docnum, weight, valuestring in lst:
+                yield (fieldname, text, docnum, weight, valuestring)
+            del self.postbuf[fieldname]
+
+    def finish(self, termswriter, doccount, lengthfile):
+        self._write_lengths(lengthfile, doccount)
+        lengths = LengthReader(None, doccount, self.length_arrays)
+
+        if not self._flushed:
+            gen = self.readback_buffer()
+        else:
+            if self.postbuf:
+                self.flush()
+            gen = self.readback()
+
+        termswriter.add_iter(gen, lengths.get)
+
+
+class NullPool(PoolBase):
+    def __init__(self, *args, **kwargs):
+        self._fieldlength_totals = {}
+        self._fieldlength_maxes = {}
+
+    def add_content(self, *args):
+        pass
+
+    def add_posting(self, *args):
+        pass
+
+    def add_field_length(self, *args, **kwargs):
+        pass
+
+    def finish(self, *args):
+        pass
+
+
+class MemPool(PoolBase):
+    def __init__(self, schema, **kwargs):
+        super(MemPool, self).__init__(schema)
+        self.schema = schema
+        self.postbuf = []
+
+    def add_posting(self, *item):
+        self.postbuf.append(item)
+
+    def finish(self, termswriter, doccount, lengthfile):
+        self._write_lengths(lengthfile, doccount)
+        lengths = LengthReader(None, doccount, self.length_arrays)
+        self.postbuf.sort()
+        termswriter.add_iter(self.postbuf, lengths.get)
+
+
+#class UnixSortPool(PoolBase):
+#    def __init__(self, schema, dir=None, basename='', limitmb=32, **kwargs):
+#        super(UnixSortPool, self).__init__(schema, dir=dir, basename=basename)
+#        self._make_dir()
+#        fd, self.filename = tempfile.mkstemp(".run", dir=self.dir)
+#        self.sortfile = os.fdopen(fd, "wb")
+#        self.linebuffer = []
+#        self.bufferlimit = 100
+#
+#    def add_posting(self, *args):
+#        self.sortfile.write(b64encode(dumps(args)) + "\n")
+#
+#    def finish(self, termswriter, doccount, lengthfile):
+#        self.sortfile.close()
+#        from whoosh.util import now
+#        print "Sorting file...", self.filename
+#        t = now()
+#        outpath = os.path.join(os.path.dirname(self.filename), "sorted.txt")
+#        os.system("sort %s >%s" % (self.filename, outpath))
+#        print "...took", now() - t
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/lib/whoosh/whoosh/filedb/postblocks.py b/lib/whoosh/whoosh/filedb/postblocks.py
new file mode 100644
index 0000000..42265fc
--- /dev/null
+++ b/lib/whoosh/whoosh/filedb/postblocks.py
@@ -0,0 +1,402 @@
+# Copyright 2011 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+from array import array
+from cPickle import dumps, load, loads
+from struct import Struct
+
+try:
+    from zlib import compress, decompress
+    can_compress = True
+except ImportError:
+    can_compress = False
+
+from whoosh.system import _INT_SIZE, _FLOAT_SIZE, pack_uint, IS_LITTLE
+from whoosh.util import utf8decode, length_to_byte, byte_to_length
+
+
+class BlockBase(object):
+    def __init__(self, postfile, stringids=False):
+        self.postfile = postfile
+        self.stringids = stringids
+
+        if stringids:
+            self.ids = []
+        else:
+            self.ids = array("I")
+        self.weights = array("f")
+        self.lengths = array("i")
+        self.values = None
+
+    def __del__(self):
+        try:
+            del self.postfile
+        except:
+            pass
+
+    def __len__(self):
+        return len(self.ids)
+
+    def __nonzero__(self):
+        return bool(self.ids)
+
+    def stats(self):
+        # Calculate block statistics
+        maxweight = max(self.weights)
+        maxwol = 0.0
+        minlength = 0
+        if self.lengths:
+            minlength = min(self.lengths)
+            maxwol = max(w / l for w, l in zip(self.weights, self.lengths))
+
+        return (maxweight, maxwol, minlength)
+
+    def append(self, id, weight, valuestring, dfl):
+        if self.values is None:
+            self.values = []
+
+        self.ids.append(id)
+        self.weights.append(weight)
+        self.values.append(valuestring)
+        if dfl:
+            self.lengths.append(dfl)
+
+
+# Current block format
+
+class Block2(BlockBase):
+    magic = 1114401586  # "Blk2"
+
+    # Offset  Type  Desc
+    # ------  ----  -------
+    # 0       i     Delta to next block
+    # 4       B     Flags (compression)
+    # 5       B     Post count
+    # 6       c     ID array typecode
+    # 7       B     -Unused
+    # 8       i     IDs length
+    # 12      i     Weights length
+    # 16      f     Maximum weight
+    # 20      f     Max weight-over-length
+    # 24      f     -Unused
+    # 28      B     Minimum length, encoded as byte
+    #
+    # Followed by either an unsigned int or string indicating the last ID in
+    # this block
+    _struct = Struct("<iBBcBiifffB")
+
+    @classmethod
+    def from_file(cls, postfile, stringids=False):
+        start = postfile.tell()
+        block = cls(postfile, stringids=stringids)
+        header = cls._struct.unpack(postfile.read(cls._struct.size))
+
+        block.nextoffset = start + header[0]
+        block.compression = header[1]
+        block.postcount = header[2]
+        block.typecode = header[3]
+        block.idslen = header[5]
+        block.weightslen = header[6]
+        block.maxweight = header[7]
+        block.maxwol = header[8]
+        block.minlen = byte_to_length(header[10])
+
+        if stringids:
+            block.maxid = load(postfile)
+        else:
+            block.maxid = postfile.read_uint()
+
+        block.dataoffset = postfile.tell()
+        return block
+
+    def read_ids(self):
+        dataoffset = self.dataoffset
+        ids_string = self.postfile.map[dataoffset:dataoffset + self.idslen]
+        if self.compression:
+            ids_string = decompress(ids_string)
+
+        if self.stringids:
+            ids = loads(ids_string)
+        else:
+            ids = array(self.typecode)
+            ids.fromstring(ids_string)
+            if not IS_LITTLE:
+                ids.byteswap()
+
+        self.ids = ids
+        return ids
+
+    def read_weights(self):
+        if self.weightslen == 0:
+            weights = [1.0] * self.postcount
+        else:
+            offset = self.dataoffset + self.idslen
+            weights_string = self.postfile.map[offset:offset + self.weightslen]
+            if self.compression:
+                weights_string = decompress(weights_string)
+            weights = array("f")
+            weights.fromstring(weights_string)
+            if not IS_LITTLE:
+                weights.byteswap()
+
+        self.weights = weights
+        return weights
+
+    def read_values(self, posting_size):
+        if posting_size == 0:
+            values = [None] * self.postcount
+        else:
+            offset = self.dataoffset + self.idslen + self.weightslen
+            values_string = self.postfile.map[offset:self.nextoffset]
+            if self.compression:
+                values_string = decompress(values_string)
+            if posting_size < 0:
+                values = loads(values_string)
+            else:
+                values = [values_string[i:i + posting_size]
+                          for i in xrange(0, len(values_string), posting_size)]
+
+        self.values = values
+        return values
+
+    def to_file(self, postfile, posting_size, compression=3):
+        stringids = self.stringids
+        ids = self.ids
+        weights = self.weights
+        values = self.values
+        postcount = len(ids)
+        maxweight, maxwol, minlength = self.stats()
+
+        if postcount <= 4 or not can_compress:
+            compression = 0
+
+        # Max ID
+        maxid = ids[-1]
+        if stringids:
+            maxid_string = dumps(maxid, -1)[2:]
+        else:
+            maxid_string = pack_uint(maxid)
+
+        # IDs
+        typecode = "I"
+        if stringids:
+            ids_string = dumps(ids, -1)[2:]
+            typecode = "s"
+        else:
+            if maxid <= 255:
+                typecode = "B"
+            elif maxid <= 65535:
+                typecode = "H"
+            if typecode != ids.typecode:
+                ids = array(typecode, ids)
+            if not IS_LITTLE:
+                ids.byteswap()
+            ids_string = ids.tostring()
+        if compression:
+            ids_string = compress(ids_string, compression)
+
+        # Weights
+        if all(w == 1.0 for w in weights):
+            weights_string = ''
+        else:
+            if not IS_LITTLE:
+                weights.byteswap()
+            weights_string = weights.tostring()
+        if weights_string and compression:
+            weights_string = compress(weights_string, compression)
+
+        # Values
+        if posting_size < 0:
+            values_string = dumps(values, -1)[2:]
+        elif posting_size == 0:
+            values_string = ''
+        else:
+            values_string = "".join(values)
+        if values_string and compression:
+            values_string = compress(values_string, compression)
+
+        # Header
+        flags = 1 if compression else 0
+        minlen_byte = length_to_byte(minlength)
+        blocksize = sum((self._struct.size, len(maxid_string), len(ids_string),
+                         len(weights_string), len(values_string)))
+        header = self._struct.pack(blocksize, flags, postcount, typecode,
+                                   0, len(ids_string), len(weights_string),
+                                   maxweight, maxwol, 0, minlen_byte)
+
+        postfile.write(header)
+        postfile.write(maxid_string)
+        postfile.write(ids_string)
+        postfile.write(weights_string)
+        postfile.write(values_string)
+
+
+# Old block formats
+
+class Block1(BlockBase):
+    # On-disk header format
+    #
+    # Offset  Type  Desc
+    # ------  ----  -------
+    # 0       B     Flags
+    # 1       B     (Unused)
+    # 2       H     (Unused)
+    # 4       i     Delta to start of next block
+    # ------------- If byte 0 == 0, the first 8 bytes are an absolute pointer
+    #               to the next block (backwards compatibility)
+    #
+    # 8       H     Length of the compressed IDs, or 0 if IDs are not
+    #               compressed
+    # 10      H     Length of the compressed weights, or 0 if the weights are
+    #               not compressed, or 1 if the weights are all 1.0.
+    # 12      B     Number of posts in this block
+    # 13      f     Maximum weight in this block (used for quality)
+    # 17      f     Maximum (weight/fieldlength) in this block (for quality)
+    # 21      f     (Unused)
+    # 25      B     Minimum length in this block, encoded as byte (for quality)
+    #
+    # Followed by either an unsigned int or string indicating the last ID in
+    # this block
+
+    _struct = Struct("!BBHiHHBfffB")
+    magic = -48626
+
+    @classmethod
+    def from_file(cls, postfile, stringids=False):
+        pos = postfile.tell()
+        block = cls(postfile, stringids=stringids)
+
+        encoded_header = postfile.read(cls._struct.size)
+        header = cls._struct.unpack(encoded_header)
+        (flags, _, _, nextoffset, block.idslen, block.weightslen,
+         block.postcount, block.maxweight, block.maxwol, _, minlength) = header
+
+        block.nextoffset = pos + nextoffset
+        block.minlength = byte_to_length(minlength)
+
+        assert block.postcount > 0, "postcount=%r" % block.postcount
+
+        if stringids:
+            block.maxid = utf8decode(postfile.read_string())[0]
+        else:
+            block.maxid = postfile.read_uint()
+
+        block.dataoffset = postfile.tell()
+
+        return block
+
+    def read_ids(self):
+        postfile = self.postfile
+        offset = self.dataoffset
+        postcount = self.postcount
+        postfile.seek(offset)
+
+        if self.stringids:
+            rs = postfile.read_string
+            ids = [utf8decode(rs())[0] for _ in xrange(postcount)]
+            newoffset = postfile.tell()
+        elif self.idslen:
+            ids = array("I")
+            ids.fromstring(decompress(postfile.read(self.idslen)))
+            if IS_LITTLE:
+                ids.byteswap()
+            newoffset = offset + self.idslen
+        else:
+            ids = postfile.read_array("I", postcount)
+            newoffset = offset + _INT_SIZE * postcount
+
+        self.ids = ids
+        self.weights_offset = newoffset
+        return ids
+
+    def read_weights(self):
+        postfile = self.postfile
+        offset = self.weights_offset
+        postfile.seek(offset)
+        weightslen = self.weightslen
+        postcount = self.postcount
+
+        if weightslen == 1:
+            weights = None
+            newoffset = offset
+        elif weightslen:
+            weights = array("f")
+            weights.fromstring(decompress(postfile.read(weightslen)))
+            if IS_LITTLE:
+                weights.byteswap()
+            newoffset = offset + weightslen
+        else:
+            weights = postfile.get_array(offset, "f", postcount)
+            newoffset = offset + _FLOAT_SIZE * postcount
+
+        self.weights = weights
+        self.values_offset = newoffset
+        return weights
+
+    def read_values(self, posting_size):
+        postfile = self.postfile
+        startoffset = self.values_offset
+        endoffset = self.nextoffset
+        postcount = self.postcount
+
+        if posting_size != 0:
+            values_string = postfile.map[startoffset:endoffset]
+
+            if self.weightslen:
+                # Values string is compressed
+                values_string = decompress(values_string)
+
+            if posting_size < 0:
+                # Pull the array of value lengths off the front of the string
+                lengths = array("i")
+                lengths.fromstring(values_string[:_INT_SIZE * postcount])
+                values_string = values_string[_INT_SIZE * postcount:]
+
+            # Chop up the block string into individual valuestrings
+            if posting_size > 0:
+                # Format has a fixed posting size, just chop up the values
+                # equally
+                values = [values_string[i * posting_size: i * posting_size + posting_size]
+                          for i in xrange(postcount)]
+            else:
+                # Format has a variable posting size, use the array of lengths
+                # to chop up the values.
+                pos = 0
+                values = []
+                for length in lengths:
+                    values.append(values_string[pos:pos + length])
+                    pos += length
+        else:
+            # Format does not store values (i.e. Existence), just create fake
+            # values
+            values = (None,) * postcount
+
+        self.values = values
+
+
+current = Block2
+magic_map = {Block1.magic: Block1, Block2.magic: Block2}
diff --git a/lib/whoosh/whoosh/filedb/structfile.py b/lib/whoosh/whoosh/filedb/structfile.py
new file mode 100644
index 0000000..704216f
--- /dev/null
+++ b/lib/whoosh/whoosh/filedb/structfile.py
@@ -0,0 +1,312 @@
+# Copyright 2009 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+import os
+from array import array
+from copy import copy
+from cPickle import dump as dump_pickle
+from cPickle import load as load_pickle
+from struct import calcsize
+from gzip import GzipFile
+
+from whoosh.system import (_INT_SIZE, _SHORT_SIZE, _FLOAT_SIZE, _LONG_SIZE,
+                           pack_sbyte, pack_ushort, pack_int, pack_uint,
+                           pack_long, pack_float,
+                           unpack_sbyte, unpack_ushort, unpack_int,
+                           unpack_uint, unpack_long, unpack_float, IS_LITTLE)
+from whoosh.util import (varint, read_varint, signed_varint,
+                         decode_signed_varint, float_to_byte, byte_to_float)
+
+
+_SIZEMAP = dict((typecode, calcsize(typecode)) for typecode in "bBiIhHqQf")
+_ORDERMAP = {"little": "<", "big": ">"}
+
+_types = (("sbyte", "b"), ("ushort", "H"), ("int", "i"),
+          ("long", "q"), ("float", "f"))
+
+
+# Main function
+
+class StructFile(object):
+    """Returns a "structured file" object that wraps the given file object and
+    provides numerous additional methods for writing structured data, such as
+    "write_varint" and "write_long".
+    """
+
+    def __init__(self, fileobj, name=None, onclose=None, mapped=True,
+                 gzip=False):
+
+        if gzip:
+            fileobj = GzipFile(fileobj=fileobj)
+
+        self.file = fileobj
+        self._name = name
+        self.onclose = onclose
+        self.is_closed = False
+
+        for attr in ("read", "readline", "write", "tell", "seek", "truncate"):
+            if hasattr(fileobj, attr):
+                setattr(self, attr, getattr(fileobj, attr))
+
+        # If mapped is True, set the 'map' attribute to a memory-mapped
+        # representation of the file. Otherwise, the fake 'map' that set up by
+        # the base class will be used.
+        if not gzip and mapped and hasattr(fileobj, "mode") and "r" in fileobj.mode:
+            fd = fileobj.fileno()
+            self.size = os.fstat(fd).st_size
+            if self.size > 0:
+                import mmap
+
+                try:
+                    self.map = mmap.mmap(fd, self.size, access=mmap.ACCESS_READ)
+                except OSError:
+                    self._setup_fake_map()
+        else:
+            self._setup_fake_map()
+
+        self.is_real = not gzip and hasattr(fileobj, "fileno")
+
+    def __repr__(self):
+        return "%s(%r)" % (self.__class__.__name__, self._name)
+
+    def __str__(self):
+        return self._name
+
+    def flush(self):
+        """Flushes the buffer of the wrapped file. This is a no-op if the
+        wrapped file does not have a flush method.
+        """
+        if hasattr(self.file, "flush"):
+            self.file.flush()
+
+    def close(self):
+        """Closes the wrapped file. This is a no-op if the wrapped file does
+        not have a close method.
+        """
+
+        if self.is_closed:
+            raise Exception("This file is already closed")
+        del self.map
+        if self.onclose:
+            self.onclose(self)
+        if hasattr(self.file, "close"):
+            self.file.close()
+        self.is_closed = True
+
+    def _setup_fake_map(self):
+        _self = self
+
+        class fakemap(object):
+            def __getitem__(self, slice):
+                if isinstance(slice, (int, long)):
+                    _self.seek(slice)
+                    return _self.read(1)
+                else:
+                    _self.seek(slice.start)
+                    return _self.read(slice.stop - slice.start)
+
+        self.map = fakemap()
+
+    def write_string(self, s):
+        """Writes a string to the wrapped file. This method writes the length
+        of the string first, so you can read the string back without having to
+        know how long it was.
+        """
+        self.write_varint(len(s))
+        self.file.write(s)
+
+    def write_string2(self, s):
+        self.write(pack_ushort(len(s)) + s)
+
+    def read_string(self):
+        """Reads a string from the wrapped file.
+        """
+        return self.file.read(self.read_varint())
+
+    def read_string2(self):
+        l = self.read_ushort()
+        return self.read(l)
+
+    def skip_string(self):
+        l = self.read_varint()
+        self.seek(l, 1)
+
+    def write_varint(self, i):
+        """Writes a variable-length unsigned integer to the wrapped file.
+        """
+        self.file.write(varint(i))
+
+    def write_svarint(self, i):
+        """Writes a variable-length signed integer to the wrapped file.
+        """
+        self.file.write(signed_varint(i))
+
+    def read_varint(self):
+        """Reads a variable-length encoded unsigned integer from the wrapped file.
+        """
+        return read_varint(self.file.read)
+
+    def read_svarint(self):
+        """Reads a variable-length encoded signed integer from the wrapped file.
+        """
+        return decode_signed_varint(read_varint(self.file.read))
+
+    def write_byte(self, n):
+        """Writes a single byte to the wrapped file, shortcut for
+        ``file.write(chr(n))``.
+        """
+        self.file.write(chr(n))
+
+    def read_byte(self):
+        return ord(self.file.read(1))
+
+    def get_byte(self, position):
+        return ord(self.map[position])
+
+    def write_8bitfloat(self, f, mantissabits=5, zeroexp=2):
+        """Writes a byte-sized representation of floating point value f to the
+        wrapped file.
+
+        :param mantissabits: the number of bits to use for the mantissa
+            (with the rest used for the exponent).
+        :param zeroexp: the zero point for the exponent.
+        """
+
+        self.write_byte(float_to_byte(f, mantissabits, zeroexp))
+
+    def read_8bitfloat(self, mantissabits=5, zeroexp=2):
+        """Reads a byte-sized representation of a floating point value.
+
+        :param mantissabits: the number of bits to use for the mantissa
+            (with the rest used for the exponent).
+        :param zeroexp: the zero point for the exponent.
+        """
+        return byte_to_float(self.read_byte(), mantissabits, zeroexp)
+
+    def write_pickle(self, obj, protocol=-1):
+        """Writes a pickled representation of obj to the wrapped file.
+        """
+        dump_pickle(obj, self.file, protocol)
+
+    def read_pickle(self):
+        """Reads a pickled object from the wrapped file.
+        """
+        return load_pickle(self.file)
+
+    def write_sbyte(self, n):
+        self.file.write(pack_sbyte(n))
+
+    def write_int(self, n):
+        self.file.write(pack_int(n))
+
+    def write_uint(self, n):
+        self.file.write(pack_uint(n))
+
+    def write_ushort(self, n):
+        self.file.write(pack_ushort(n))
+
+    def write_long(self, n):
+        self.file.write(pack_long(n))
+
+    def write_float(self, n):
+        self.file.write(pack_float(n))
+
+    def write_array(self, arry):
+        if IS_LITTLE:
+            arry = copy(arry)
+            arry.byteswap()
+        if self.is_real:
+            arry.tofile(self.file)
+        else:
+            self.file.write(arry.tostring())
+
+    def read_sbyte(self):
+        return unpack_sbyte(self.file.read(1))[0]
+
+    def read_int(self):
+        return unpack_int(self.file.read(_INT_SIZE))[0]
+
+    def read_uint(self):
+        return unpack_uint(self.file.read(_INT_SIZE))[0]
+
+    def read_ushort(self):
+        return unpack_ushort(self.file.read(_SHORT_SIZE))[0]
+
+    def read_long(self):
+        return unpack_long(self.file.read(_LONG_SIZE))[0]
+
+    def read_float(self):
+        return unpack_float(self.file.read(_FLOAT_SIZE))[0]
+
+    def read_array(self, typecode, length):
+        a = array(typecode)
+        if self.is_real:
+            a.fromfile(self.file, length)
+        else:
+            a.fromstring(self.file.read(length * _SIZEMAP[typecode]))
+        if IS_LITTLE:
+            a.byteswap()
+        return a
+
+    def get_sbyte(self, position):
+        return unpack_sbyte(self.map[position:position + 1])[0]
+
+    def get_int(self, position):
+        return unpack_int(self.map[position:position + _INT_SIZE])[0]
+
+    def get_uint(self, position):
+        return unpack_uint(self.map[position:position + _INT_SIZE])[0]
+
+    def get_ushort(self, position):
+        return unpack_ushort(self.map[position:position + _SHORT_SIZE])[0]
+
+    def get_long(self, position):
+        return unpack_long(self.map[position:position + _LONG_SIZE])[0]
+
+    def get_float(self, position):
+        return unpack_float(self.map[position:position + _FLOAT_SIZE])[0]
+
+    def get_array(self, position, typecode, length):
+        source = self.map[position:position + length * _SIZEMAP[typecode]]
+        a = array(typecode)
+        a.fromstring(source)
+        if IS_LITTLE:
+            a.byteswap()
+        return a
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/lib/whoosh/whoosh/formats.py b/lib/whoosh/whoosh/formats.py
new file mode 100644
index 0000000..3b03c7c
--- /dev/null
+++ b/lib/whoosh/whoosh/formats.py
@@ -0,0 +1,490 @@
+# Copyright 2009 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+"""
+The classes in this module encode and decode posting information for a field.
+The field format essentially determines what information is stored about each
+occurance of a term.
+"""
+
+from collections import defaultdict
+from cPickle import dumps, loads
+
+from whoosh.analysis import unstopped
+from whoosh.system import (_INT_SIZE, _FLOAT_SIZE, pack_uint, unpack_uint,
+                           pack_float, unpack_float)
+from whoosh.util import float_to_byte, byte_to_float
+
+
+# Format base class
+
+class Format(object):
+    """Abstract base class representing a storage format for a field or vector.
+    Format objects are responsible for writing and reading the low-level
+    representation of a field. It controls what kind/level of information to
+    store about the indexed fields.
+    """
+
+    posting_size = -1
+    textual = True
+    __inittypes__ = dict(analyzer=object, field_boost=float)
+
+    def __init__(self, analyzer, field_boost=1.0, **options):
+        """
+        :param analyzer: The analysis.Analyzer object to use to index this
+            field. See the analysis module for more information. If this value
+            is None, the field is not indexed/searchable.
+        :param field_boost: A constant boost factor to scale to the score
+            of all queries matching terms in this field.
+        """
+
+        self.analyzer = analyzer
+        self.field_boost = field_boost
+        self.options = options
+
+    def __eq__(self, other):
+        return (other
+                and self.__class__ is other.__class__
+                and self.__dict__ == other.__dict__)
+
+    def __repr__(self):
+        return "%s(%r, boost = %s)" % (self.__class__.__name__,
+                                       self.analyzer, self.field_boost)
+
+    def clean(self):
+        if self.analyzer and hasattr(self.analyzer, "clean"):
+            self.analyzer.clean()
+
+    def word_values(self, value, **kwargs):
+        """Takes the text value to be indexed and yields a series of
+        ("tokentext", frequency, weight, valuestring) tuples, where frequency
+        is the number of times "tokentext" appeared in the value, weight is the
+        weight (a float usually equal to frequency in the absence of per-term
+        boosts) and valuestring is encoded field-specific posting value for the
+        token. For example, in a Frequency format, the value string would be
+        the same as frequency; in a Positions format, the value string would
+        encode a list of token positions at which "tokentext" occured.
+
+        :param value: The unicode text to index.
+        """
+        raise NotImplementedError
+
+    def analyze(self, unicodestring, mode='', **kwargs):
+        """Returns a :class:`whoosh.analysis.Token` iterator from the given
+        unicode string.
+
+        :param unicodestring: the string to analyzer.
+        :param mode: a string indicating the purpose for which the unicode
+            string is being analyzed, i.e. 'index' or 'query'.
+        """
+
+        if not self.analyzer:
+            raise Exception("%s format has no analyzer" % self.__class__)
+        return self.analyzer(unicodestring, mode=mode, **kwargs)
+
+    def encode(self, value):
+        """Returns the given value encoded as a string.
+        """
+        raise NotImplementedError
+
+    def supports(self, name):
+        """Returns True if this format supports interpreting its posting
+        value as 'name' (e.g. "frequency" or "positions").
+        """
+        return hasattr(self, "decode_" + name)
+
+    def decoder(self, name):
+        """Returns the bound method for interpreting value as 'name',
+        where 'name' is for example "frequency" or "positions". This
+        object must have a corresponding Format.decode_<name>() method.
+        """
+        return getattr(self, "decode_" + name)
+
+    def decode_as(self, astype, valuestring):
+        """Interprets the encoded value string as 'astype', where 'astype' is
+        for example "frequency" or "positions". This object must have a
+        corresponding decode_<astype>() method.
+        """
+        return self.decoder(astype)(valuestring)
+
+
+# Concrete field classes
+
+# TODO: as a legacy thing most of these formats store the frequency but not the
+# weight in the value string, so if you use field or term boosts
+# postreader.value_as("weight") will not match postreader.weight()
+
+
+class Existence(Format):
+    """Only indexes whether a given term occurred in a given document; it does
+    not store frequencies or positions. This is useful for fields that should
+    be searchable but not scorable, such as file path.
+
+    Supports: frequency, weight (always reports frequency = 1).
+    """
+
+    posting_size = 0
+    __inittypes__ = dict(analyzer=object, field_boost=float)
+
+    def __init__(self, analyzer, field_boost=1.0, **options):
+        self.analyzer = analyzer
+        self.field_boost = field_boost
+        self.options = options
+
+    def word_values(self, value, **kwargs):
+        fb = self.field_boost
+        wordset = set(t.text for t
+                      in unstopped(self.analyzer(value, **kwargs)))
+        return ((w, 1, fb, '') for w in wordset)
+
+    def encode(self, value):
+        return ''
+
+    def decode_frequency(self, valuestring):
+        return 1
+
+    def decode_weight(self, valuestring):
+        return self.field_boost
+
+
+class Frequency(Format):
+    """Stores frequency information for each posting.
+
+    Supports: frequency, weight.
+    """
+
+    posting_size = _INT_SIZE
+    __inittypes__ = dict(analyzer=object, field_boost=float,
+                         boost_as_freq=bool)
+
+    def __init__(self, analyzer, field_boost=1.0, boost_as_freq=False,
+                 **options):
+        """
+        :param analyzer: The analysis.Analyzer object to use to index this
+            field. See the analysis module for more information. If this value
+            is None, the field is not indexed/searchable.
+        :param field_boost: A constant boost factor to scale to the score of
+            all queries matching terms in this field.
+        """
+
+        self.analyzer = analyzer
+        self.field_boost = field_boost
+        self.options = options
+
+    def word_values(self, value, **kwargs):
+        fb = self.field_boost
+        freqs = defaultdict(int)
+        weights = defaultdict(float)
+
+        for t in unstopped(self.analyzer(value, boosts=True, **kwargs)):
+            freqs[t.text] += 1
+            weights[t.text] += t.boost
+
+        encode = self.encode
+        return ((w, freq, weights[w] * fb, encode(freq))
+                for w, freq in freqs.iteritems())
+
+    def encode(self, freq):
+        return pack_uint(freq)
+
+    def decode_frequency(self, valuestring):
+        return unpack_uint(valuestring)[0]
+
+    def decode_weight(self, valuestring):
+        freq = unpack_uint(valuestring)[0]
+        return freq * self.field_boost
+
+
+class DocBoosts(Frequency):
+    """A Field that stores frequency and per-document boost information for
+    each posting.
+
+    Supports: frequency, weight.
+    """
+
+    posting_size = _INT_SIZE + 1
+
+    def word_values(self, value, doc_boost=1.0, **kwargs):
+        fb = self.field_boost
+        freqs = defaultdict(int)
+        weights = defaultdict(float)
+        for t in unstopped(self.analyzer(value, boosts=True, **kwargs)):
+            weights[t.text] += t.boost
+            freqs[t.text] += 1
+
+        encode = self.encode
+        return ((w, freq, weights[w] * doc_boost * fb, encode((freq, doc_boost)))
+                for w, freq in freqs.iteritems())
+
+    def encode(self, freq_docboost):
+        freq, docboost = freq_docboost
+        return pack_uint(freq) + float_to_byte(docboost)
+
+    def decode_docboosts(self, valuestring):
+        freq = unpack_uint(valuestring[:_INT_SIZE])[0]
+        docboost = byte_to_float(valuestring[-1])
+        return (freq, docboost)
+
+    def decode_frequency(self, valuestring):
+        return unpack_uint(valuestring[0:_INT_SIZE])[0]
+
+    def decode_weight(self, valuestring):
+        freq = unpack_uint(valuestring[:_INT_SIZE])[0]
+        docboost = byte_to_float(valuestring[-1])
+        return freq * docboost * self.field_boost
+
+
+# Vector formats
+
+class Positions(Format):
+    """A vector that stores position information in each posting, to allow
+    phrase searching and "near" queries.
+
+    Supports: frequency, weight, positions, position_boosts (always reports
+    position boost = 1.0).
+    """
+
+    def word_values(self, value, start_pos=0, **kwargs):
+        fb = self.field_boost
+        poses = defaultdict(list)
+        weights = defaultdict(float)
+        for t in unstopped(self.analyzer(value, positions=True, boosts=True,
+                                         start_pos=start_pos, **kwargs)):
+            poses[t.text].append(start_pos + t.pos)
+            weights[t.text] += t.boost
+
+        encode = self.encode
+        return ((w, len(poslist), weights[w] * fb, encode(poslist))
+                for w, poslist in poses.iteritems())
+
+    def encode(self, positions):
+        codes = []
+        base = 0
+        for pos in positions:
+            codes.append(pos - base)
+            base = pos
+        return pack_uint(len(codes)) + dumps(codes, -1)[2:-1]
+
+    def decode_positions(self, valuestring):
+        codes = loads(valuestring[_INT_SIZE:] + ".")
+        position = 0
+        positions = []
+        for code in codes:
+            position += code
+            positions.append(position)
+        return positions
+
+    def decode_frequency(self, valuestring):
+        return unpack_uint(valuestring[:_INT_SIZE])[0]
+
+    def decode_weight(self, valuestring):
+        return self.decode_frequency(valuestring) * self.field_boost
+
+    def decode_position_boosts(self, valuestring):
+        return [(pos, 1) for pos in self.decode_positions(valuestring)]
+
+
+class Characters(Positions):
+    """Stores token position and character start and end information for each
+    posting.
+
+    Supports: frequency, weight, positions, position_boosts (always reports
+    position boost = 1.0), characters.
+    """
+
+    def word_values(self, value, start_pos=0, start_char=0, **kwargs):
+        fb = self.field_boost
+        seen = defaultdict(list)
+        weights = defaultdict(float)
+
+        for t in unstopped(self.analyzer(value, positions=True, chars=True,
+                                         boosts=True, start_pos=start_pos,
+                                         start_char=start_char, **kwargs)):
+            seen[t.text].append((t.pos, start_char + t.startchar,
+                                 start_char + t.endchar))
+            weights[t.text] += t.boost
+
+        encode = self.encode
+        return ((w, len(ls), weights[w] * fb, encode(ls))
+                for w, ls in seen.iteritems())
+
+    def encode(self, posns_chars):
+        # posns_chars = [(pos, startchar, endchar), ...]
+        codes = []
+        posbase = 0
+        charbase = 0
+        for pos, startchar, endchar in posns_chars:
+            codes.append((pos - posbase, startchar - charbase, endchar - startchar))
+            posbase = pos
+            charbase = endchar
+        return pack_uint(len(posns_chars)) + dumps(codes, -1)[2:-1]
+
+    def decode_characters(self, valuestring):
+        codes = loads(valuestring[_INT_SIZE:] + ".")
+        position = 0
+        endchar = 0
+        posns_chars = []
+        for code in codes:
+            position = code[0] + position
+            startchar = code[1] + endchar
+            endchar = code[2] + startchar
+            posns_chars.append((position, startchar, endchar))
+        return posns_chars
+
+    def decode_positions(self, valuestring):
+        codes = loads(valuestring[_INT_SIZE:] + ".")
+        position = 0
+        posns = []
+        for code in codes:
+            position = code[0] + position
+            posns.append(position)
+        return posns
+
+
+class PositionBoosts(Positions):
+    """A format that stores positions and per-position boost information
+    in each posting.
+
+    Supports: frequency, weight, positions, position_boosts.
+    """
+
+    def word_values(self, value, start_pos=0, **kwargs):
+        fb = self.field_boost
+        seen = defaultdict(iter)
+
+        for t in unstopped(self.analyzer(value, positions=True, boosts=True,
+                                         start_pos=start_pos, **kwargs)):
+            pos = t.pos
+            boost = t.boost
+            seen[t.text].append((pos, boost))
+
+        encode = self.encode
+        return ((w, len(poslist), sum(p[1] for p in poslist) * fb, encode(poslist))
+                for w, poslist in seen.iteritems())
+
+    def encode(self, posns_boosts):
+        # posns_boosts = [(pos, boost), ...]
+        codes = []
+        base = 0
+        summedboost = 0
+        for pos, boost in posns_boosts:
+            summedboost += boost
+            codes.append((pos - base, boost))
+            base = pos
+
+        return (pack_uint(len(posns_boosts)) + pack_float(summedboost)
+                + dumps(codes, -1)[2:-1])
+
+    def decode_position_boosts(self, valuestring):
+        codes = loads(valuestring[_INT_SIZE + _FLOAT_SIZE:] + ".")
+        position = 0
+        posns_boosts = []
+        for code in codes:
+            position = code[0] + position
+            posns_boosts.append((position, code[1]))
+        return posns_boosts
+
+    def decode_positions(self, valuestring):
+        codes = loads(valuestring[_INT_SIZE + _FLOAT_SIZE:] + ".")
+        position = 0
+        posns = []
+        for code in codes:
+            position = code[0] + position
+            posns.append(position)
+        return posns
+
+    def decode_weight(self, valuestring):
+        summedboost = unpack_float(valuestring[_INT_SIZE:_INT_SIZE + _FLOAT_SIZE])[0]
+        return summedboost * self.field_boost
+
+
+class CharacterBoosts(Characters):
+    """A format that stores positions, character start and end, and
+    per-position boost information in each posting.
+
+    Supports: frequency, weight, positions, position_boosts, characters,
+    character_boosts.
+    """
+
+    def word_values(self, value, start_pos=0, start_char=0, **kwargs):
+        fb = self.field_boost
+        seen = defaultdict(iter)
+
+        for t in unstopped(self.analyzer(value, positions=True,
+                                         characters=True, boosts=True,
+                                         start_pos=start_pos,
+                                         start_char=start_char, **kwargs)):
+            seen[t.text].append((t.pos,
+                                 start_char + t.startchar,
+                                 start_char + t.endchar,
+                                 t.boost))
+
+        encode = self.encode
+        return ((w, len(poslist), sum(p[3] for p in poslist) * fb, encode(poslist))
+                for w, poslist in seen.iteritems())
+
+    def encode(self, posns_chars_boosts):
+        # posns_chars_boosts = [(pos, startchar, endchar, boost), ...]
+        codes = []
+        posbase = 0
+        charbase = 0
+        summedboost = 0
+        for pos, startchar, endchar, boost in posns_chars_boosts:
+            codes.append((pos - posbase, startchar - charbase,
+                          endchar - startchar, boost))
+            posbase = pos
+            charbase = endchar
+            summedboost += boost
+
+        return (pack_uint(len(posns_chars_boosts)) + pack_float(summedboost)
+                + dumps(codes, -1)[2:-1])
+
+    def decode_character_boosts(self, valuestring):
+        codes = loads(valuestring[_INT_SIZE + _FLOAT_SIZE:] + ".")
+        position = 0
+        endchar = 0
+        posn_char_boosts = []
+        for code in codes:
+            position = position + code[0]
+            startchar = endchar + code[1]
+            endchar = startchar + code[2]
+            posn_char_boosts.append((position, startchar, endchar, code[3]))
+        return posn_char_boosts
+
+    def decode_positions(self, valuestring):
+        return [item[0] for item in self.decode_character_boosts(valuestring)]
+
+    def decode_characters(self, valuestring):
+        return [(pos, startchar, endchar) for pos, startchar, endchar, _
+                in self.decode_character_boosts(valuestring)]
+
+    def decode_position_boosts(self, valuestring):
+        return [(pos, boost) for pos, _, _, boost
+                in self.decode_character_boosts(valuestring)]
+
+
+
diff --git a/lib/whoosh/whoosh/highlight.py b/lib/whoosh/whoosh/highlight.py
new file mode 100644
index 0000000..f1cb3c3
--- /dev/null
+++ b/lib/whoosh/whoosh/highlight.py
@@ -0,0 +1,571 @@
+# Copyright 2008 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+"""The highlight module contains classes and functions for displaying short
+excerpts from hit documents in the search results you present to the user, with
+query terms highlighted.
+"""
+
+from __future__ import division
+from collections import deque
+from heapq import nlargest
+from cgi import escape as htmlescape
+
+
+# Fragment object
+
+class Fragment(object):
+    """Represents a fragment (extract) from a hit document. This object is
+    mainly used to keep track of the start and end points of the fragment; it
+    does not contain the text of the fragment or do much else.
+    """
+
+    def __init__(self, tokens, charsbefore=0, charsafter=0, textlen=999999):
+        """
+        :param tokens: list of the Token objects in the fragment.
+        :param charsbefore: approx. how many characters before the start of the
+            first matched term to include in the fragment.
+        :param charsafter: approx. how many characters after the end of the
+            last matched term to include in the fragment.
+        :param textlen: length in characters of the document text.
+        """
+
+        #: index of the first character of the fragment in the original
+        # document
+        self.startchar = max(0, tokens[0].startchar - charsbefore)
+        #: index after the last character of the fragment in the original
+        #document
+        self.endchar = min(textlen, tokens[-1].endchar + charsafter)
+        self.matches = [t for t in tokens if t.matched]
+        self.matched_terms = frozenset(t.text for t in self.matches)
+
+    def __len__(self):
+        return self.endchar - self.startchar
+
+    def overlaps(self, fragment):
+        sc = self.startchar
+        ec = self.endchar
+        fsc = fragment.startchar
+        fec = fragment.endchar
+        return (fsc > sc and fsc < ec) or (fec > sc and fec < ec)
+
+    def overlapped_length(self, fragment):
+        sc = self.startchar
+        ec = self.endchar
+        fsc = fragment.startchar
+        fec = fragment.endchar
+        return max(ec, fec) - min(sc, fsc)
+
+    def has_matches(self):
+        return any(t.matched for t in self.tokens)
+
+
+# Filters
+
+def copyandmatchfilter(termset, tokens):
+    for t in tokens:
+        t = t.copy()
+        t.matched = t.text in termset
+        yield t
+
+
+# Fragmenters
+
+class Fragmenter(object):
+    def __call__(self, text, tokens):
+        raise NotImplementedError
+
+
+class WholeFragmenter(Fragmenter):
+    def __call__(self, text, tokens):
+        """Doesn't fragment the token stream. This object just returns the
+        entire stream as one "fragment". This is useful if you want to
+        highlight the entire text.
+        """
+
+        tokens = list(tokens)
+        before = after = 0
+        if tokens:
+            before = tokens[0].startchar
+            after = len(text) - tokens[-1].endchar
+        return [Fragment(tokens, charsbefore=before, charsafter=after)]
+
+
+# Backwards compatiblity
+NullFragmeter = WholeFragmenter
+
+
+class SimpleFragmenter(Fragmenter):
+    """Simply splits the text into roughly equal sized chunks.
+    """
+
+    def __init__(self, size=70):
+        """
+        :param size: size (in characters) to chunk to. The chunking is based on
+            tokens, so the fragments will usually be smaller.
+        """
+        self.size = size
+
+    def __call__(self, text, tokens):
+        size = self.size
+        first = None
+        frag = []
+
+        for t in tokens:
+            if first is None:
+                first = t.startchar
+
+            if t.endchar - first > size:
+                first = None
+                if frag:
+                    yield Fragment(frag)
+                frag = []
+
+            frag.append(t)
+
+        if frag:
+            yield Fragment(frag)
+
+
+class SentenceFragmenter(Fragmenter):
+    """Breaks the text up on sentence end punctuation characters
+    (".", "!", or "?"). This object works by looking in the original text for a
+    sentence end as the next character after each token's 'endchar'.
+
+    When highlighting with this fragmenter, you should use an analyzer that
+    does NOT remove stop words, for example::
+
+        sa = StandardAnalyzer(stoplist=None)
+    """
+
+    def __init__(self, maxchars=200, sentencechars=".!?"):
+        """
+        :param maxchars: The maximum number of characters allowed in a fragment.
+        """
+
+        self.maxchars = maxchars
+        self.sentencechars = frozenset(sentencechars)
+
+    def __call__(self, text, tokens):
+        maxchars = self.maxchars
+        sentencechars = self.sentencechars
+        textlen = len(text)
+        first = None
+        frag = []
+
+        for t in tokens:
+            if first is None:
+                first = t.startchar
+            endchar = t.endchar
+
+            if endchar - first > maxchars:
+                first = None
+                if frag:
+                    yield Fragment(frag)
+                frag = []
+
+            frag.append(t)
+            if frag and endchar < textlen and text[endchar] in sentencechars:
+                # Don't break for two periods in a row (e.g. ignore "...")
+                if endchar + 1 < textlen and text[endchar + 1] in sentencechars:
+                    continue
+
+                yield Fragment(frag, charsafter=0)
+                frag = []
+                first = None
+
+        if frag:
+            yield Fragment(frag)
+
+
+class ContextFragmenter(Fragmenter):
+    """Looks for matched terms and aggregates them with their surrounding
+    context.
+
+    This fragmenter only yields fragments that contain matched terms.
+    """
+
+    def __init__(self, maxchars=200, surround=20):
+        """
+        :param maxchars: The maximum number of characters allowed in a
+            fragment.
+        :param surround: The number of extra characters of context to add both
+            before the first matched term and after the last matched term.
+        """
+
+        self.maxchars = maxchars
+        self.charsbefore = self.charsafter = surround
+
+    def __call__(self, text, tokens):
+        maxchars = self.maxchars
+        charsbefore = self.charsbefore
+        charsafter = self.charsafter
+
+        current = deque()
+        currentlen = 0
+        countdown = -1
+        for t in tokens:
+            if t.matched:
+                countdown = charsafter
+                # Add on "unused" context length from the front
+                countdown += (charsbefore - currentlen)
+
+            current.append(t)
+
+            length = t.endchar - t.startchar
+            currentlen += length
+
+            if countdown >= 0:
+                countdown -= length
+
+                if countdown < 0 or currentlen >= maxchars:
+                    yield Fragment(current)
+                    current = deque()
+                    currentlen = 0
+
+            else:
+                while current and currentlen > charsbefore:
+                    t = current.popleft()
+                    currentlen -= t.endchar - t.startchar
+
+        if countdown >= 0:
+            yield Fragment(current)
+
+
+#class VectorFragmenter(Fragmenter):
+#    def __init__(self, termmap, maxchars=200, charsbefore=20, charsafter=20):
+#        """
+#        :param termmap: A dictionary mapping the terms you're looking for to
+#            lists of either (posn, startchar, endchar) or
+#            (posn, startchar, endchar, boost) tuples.
+#        :param maxchars: The maximum number of characters allowed in a fragment.
+#        :param charsbefore: The number of extra characters of context to add before
+#            the first matched term.
+#        :param charsafter: The number of extra characters of context to add after
+#            the last matched term.
+#        """
+#
+#        self.termmap = termmap
+#        self.maxchars = maxchars
+#        self.charsbefore = charsbefore
+#        self.charsafter = charsafter
+#
+#    def __call__(self, text, tokens):
+#        maxchars = self.maxchars
+#        charsbefore = self.charsbefore
+#        charsafter = self.charsafter
+#        textlen = len(text)
+#
+#        vfrags = []
+#        for term, data in self.termmap.iteritems():
+#            if len(data) == 3:
+#                t = Token(startchar = data[1], endchar = data[2])
+#            elif len(data) == 4:
+#                t = Token(startchar = data[1], endchar = data[2], boost = data[3])
+#            else:
+#                raise ValueError(repr(data))
+#
+#            newfrag = VFragment([t], charsbefore, charsafter, textlen)
+#            added = False
+#
+#            for vf in vfrags:
+#                if vf.overlaps(newfrag) and vf.overlapped_length(newfrag) < maxchars:
+#                    vf.merge(newfrag)
+#                    added = True
+#                    break
+
+
+# Fragment scorers
+
+class FragmentScorer(object):
+    pass
+
+
+class BasicFragmentScorer(FragmentScorer):
+    def __call__(self, f):
+        # Add up the boosts for the matched terms in this passage
+        score = sum(t.boost for t in f.matches)
+
+        # Favor diversity: multiply score by the number of separate
+        # terms matched
+        score *= len(f.matched_terms) * 100
+
+        return score
+
+
+# Fragment sorters
+
+def SCORE(fragment):
+    "Sorts higher scored passages first."
+    return None
+
+
+def FIRST(fragment):
+    "Sorts passages from earlier in the document first."
+    return fragment.startchar
+
+
+def LONGER(fragment):
+    "Sorts longer passages first."
+    return 0 - len(fragment)
+
+
+def SHORTER(fragment):
+    "Sort shorter passages first."
+    return len(fragment)
+
+
+# Formatters
+
+class Formatter(object):
+    pass
+
+
+class UppercaseFormatter(Formatter):
+    """Returns a string in which the matched terms are in UPPERCASE.
+    """
+
+    def __init__(self, between="..."):
+        """
+        :param between: the text to add between fragments.
+        """
+
+        self.between = between
+
+    def _format_fragment(self, text, fragment):
+        output = []
+        index = fragment.startchar
+
+        for t in fragment.matches:
+            if t.startchar > index:
+                output.append(text[index:t.startchar])
+
+            ttxt = text[t.startchar:t.endchar]
+            if t.matched:
+                ttxt = ttxt.upper()
+            output.append(ttxt)
+            index = t.endchar
+
+        output.append(text[index:fragment.endchar])
+        return "".join(output)
+
+    def __call__(self, text, fragments):
+        return self.between.join((self._format_fragment(text, fragment)
+                                  for fragment in fragments))
+
+
+class HtmlFormatter(Formatter):
+    """Returns a string containing HTML formatting around the matched terms.
+
+    This formatter wraps matched terms in an HTML element with two class names.
+    The first class name (set with the constructor argument ``classname``) is
+    the same for each match. The second class name (set with the constructor
+    argument ``termclass`` is different depending on which term matched. This
+    allows you to give different formatting (for example, different background
+    colors) to the different terms in the excerpt.
+
+    >>> hf = HtmlFormatter(tagname="span", classname="match", termclass="term")
+    >>> hf(mytext, myfragments)
+    "The <span class="match term0">template</span> <span class="match term1">geometry</span> is..."
+
+    This object maintains a dictionary mapping terms to HTML class names (e.g.
+    ``term0`` and ``term1`` above), so that multiple excerpts will use the same
+    class for the same term. If you want to re-use the same HtmlFormatter
+    object with different searches, you should call HtmlFormatter.clear()
+    between searches to clear the mapping.
+    """
+
+    template = '<%(tag)s class=%(q)s%(cls)s%(tn)s%(q)s>%(t)s</%(tag)s>'
+
+    def __init__(self, tagname="strong", between="...",
+                 classname="match", termclass="term", maxclasses=5,
+                 attrquote='"'):
+        """
+        :param tagname: the tag to wrap around matching terms.
+        :param between: the text to add between fragments.
+        :param classname: the class name to add to the elements wrapped around
+            matching terms.
+        :param termclass: the class name prefix for the second class which is
+            different for each matched term.
+        :param maxclasses: the maximum number of term classes to produce. This
+            limits the number of classes you have to define in CSS by recycling
+            term class names. For example, if you set maxclasses to 3 and have
+            5 terms, the 5 terms will use the CSS classes ``term0``, ``term1``,
+            ``term2``, ``term0``, ``term1``.
+        """
+
+        self.between = between
+        self.tagname = tagname
+        self.classname = classname
+        self.termclass = termclass
+        self.attrquote = attrquote
+        self.maxclasses = maxclasses
+        self.seen = {}
+
+    def _format_fragment(self, text, fragment, seen):
+        htmlclass = " ".join((self.classname, self.termclass))
+
+        output = []
+        index = fragment.startchar
+
+        for t in fragment.matches:
+            if t.startchar > index:
+                output.append(text[index:t.startchar])
+
+            ttxt = htmlescape(text[t.startchar:t.endchar])
+            if t.matched:
+                if t.text in seen:
+                    termnum = seen[t.text]
+                else:
+                    termnum = len(seen) % self.maxclasses
+                    seen[t.text] = termnum
+                ttxt = self.template % {"tag": self.tagname,
+                                        "q": self.attrquote,
+                                        "cls": htmlclass,
+                                        "t": ttxt, "tn": termnum}
+            output.append(ttxt)
+            index = t.endchar
+
+        if index < fragment.endchar:
+            output.append(text[index:fragment.endchar])
+
+        return "".join(output)
+
+    def __call__(self, text, fragments):
+        seen = self.seen
+        return self.between.join(self._format_fragment(text, fragment, seen)
+                                 for fragment in fragments)
+
+    def clean(self):
+        """Clears the dictionary mapping terms to HTML classnames.
+        """
+        self.seen = {}
+
+
+class GenshiFormatter(Formatter):
+    """Returns a Genshi event stream containing HTML formatting around the
+    matched terms.
+    """
+
+    def __init__(self, qname="strong", between="..."):
+        """
+        :param qname: the QName for the tag to wrap around matched terms.
+        :param between: the text to add between fragments.
+        """
+
+        self.qname = qname
+        self.between = between
+
+        from genshi.core import START, END, TEXT, Attrs, Stream
+        self.START, self.END, self.TEXT = START, END, TEXT
+        self.Attrs, self.Stream = Attrs, Stream
+
+    def _add_text(self, text, output):
+        if output and output[-1][0] == self.TEXT:
+            output[-1] = (self.TEXT, output[-1][1] + text, output[-1][2])
+        else:
+            output.append((self.TEXT, text, (None, -1, -1)))
+
+    def _format_fragment(self, text, fragment):
+        START, TEXT, END, Attrs = self.START, self.TEXT, self.END, self.Attrs
+        qname = self.qname
+        output = []
+
+        index = fragment.startchar
+        lastmatched = False
+        for t in fragment.matches:
+            if t.startchar > index:
+                if lastmatched:
+                    output.append((END, qname, (None, -1, -1)))
+                    lastmatched = False
+                self._add_text(text[index:t.startchar], output)
+
+            ttxt = text[t.startchar:t.endchar]
+            if not lastmatched:
+                output.append((START, (qname, Attrs()), (None, -1, -1)))
+                lastmatched = True
+            output.append((TEXT, ttxt, (None, -1, -1)))
+
+            index = t.endchar
+
+        if lastmatched:
+            output.append((END, qname, (None, -1, -1)))
+
+        return output
+
+    def __call__(self, text, fragments):
+        output = []
+        first = True
+        for fragment in fragments:
+            if not first:
+                self._add_text(self.between, output)
+            first = False
+            output += self._format_fragment(text, fragment)
+
+        return self.Stream(output)
+
+
+# Highlighting
+
+def top_fragments(text, terms, analyzer, fragmenter, top=3,
+                  scorer=None, minscore=1):
+    if scorer is None:
+        scorer = BasicFragmentScorer()
+
+    termset = frozenset(terms)
+    tokens = copyandmatchfilter(termset, analyzer(text, chars=True,
+                                                  keeporiginal=True))
+    scored_frags = nlargest(top, ((scorer(f), f)
+                                  for f in fragmenter(text, tokens)))
+    return [sf for score, sf in scored_frags if score > minscore]
+
+
+def highlight(text, terms, analyzer, fragmenter, formatter, top=3,
+              scorer=None, minscore=1, order=FIRST):
+
+    if scorer is None:
+        scorer = BasicFragmentScorer()
+
+    if type(fragmenter) is type:
+        fragmenter = fragmenter()
+    if type(formatter) is type:
+        formatter = formatter()
+    if type(scorer) is type:
+        scorer = scorer()
+
+    fragments = top_fragments(text, terms, analyzer, fragmenter,
+                              top=top, scorer=scorer, minscore=minscore)
+    fragments.sort(key=order)
+    return formatter(text, fragments)
+
+
+if __name__ == '__main__':
+    pass
+
+
+
+
diff --git a/lib/whoosh/whoosh/index.py b/lib/whoosh/whoosh/index.py
new file mode 100644
index 0000000..a2d3878
--- /dev/null
+++ b/lib/whoosh/whoosh/index.py
@@ -0,0 +1,373 @@
+# Copyright 2007 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+"""Contains the main functions/classes for creating, maintaining, and using
+an index.
+"""
+
+from __future__ import division
+import os.path
+
+from whoosh import fields, store
+
+
+_DEF_INDEX_NAME = "MAIN"
+
+
+# Exceptions
+
+class IndexError(Exception):
+    """Generic index error."""
+
+
+class IndexVersionError(IndexError):
+    """Raised when you try to open an index using a format that the current
+    version of Whoosh cannot read. That is, when the index you're trying to
+    open is either not backward or forward compatible with this version of
+    Whoosh.
+    """
+
+    def __init__(self, msg, version, release=None):
+        Exception.__init__(self, msg)
+        self.version = version
+        self.release = release
+
+
+class OutOfDateError(IndexError):
+    """Raised when you try to commit changes to an index which is not the
+    latest generation.
+    """
+
+
+class EmptyIndexError(IndexError):
+    """Raised when you try to work with an index that has no indexed terms.
+    """
+
+
+# Convenience functions
+
+def create_in(dirname, schema, indexname=None):
+    """Convenience function to create an index in a directory. Takes care of
+    creating a FileStorage object for you.
+
+    :param dirname: the path string of the directory in which to create the index.
+    :param schema: a :class:`whoosh.fields.Schema` object describing the index's fields.
+    :param indexname: the name of the index to create; you only need to specify this if
+        you are creating multiple indexes within the same storage object.
+    :returns: :class:`Index`
+    """
+
+    if not indexname:
+        indexname = _DEF_INDEX_NAME
+
+    from whoosh.filedb.filestore import FileStorage
+    storage = FileStorage(dirname)
+    return storage.create_index(schema, indexname)
+
+
+def open_dir(dirname, indexname=None, mapped=True, readonly=False):
+    """Convenience function for opening an index in a directory. Takes care of
+    creating a FileStorage object for you. dirname is the filename of the
+    directory in containing the index. indexname is the name of the index to
+    create; you only need to specify this if you have multiple indexes within
+    the same storage object.
+
+    :param dirname: the path string of the directory in which to create the
+        index.
+    :param indexname: the name of the index to create; you only need to specify
+        this if you have multiple indexes within the same storage object.
+    :param mapped: whether to use memory mapping to speed up disk reading.
+    :returns: :class:`Index`
+    """
+
+    if indexname is None:
+        indexname = _DEF_INDEX_NAME
+
+    from whoosh.filedb.filestore import FileStorage
+    storage = FileStorage(dirname, mapped=mapped, readonly=readonly)
+    return storage.open_index(indexname)
+
+
+def exists_in(dirname, indexname=None):
+    """Returns True if dirname contains a Whoosh index.
+
+    :param dirname: the file path of a directory.
+    :param indexname: the name of the index. If None, the default index name is
+        used.
+    :param rtype: bool
+    """
+
+    if os.path.exists(dirname):
+        try:
+            ix = open_dir(dirname, indexname=indexname)
+            return ix.latest_generation() > -1
+        except EmptyIndexError:
+            pass
+
+    return False
+
+
+def exists(storage, indexname=None):
+    """Returns True if the given Storage object contains a Whoosh index.
+
+    :param storage: a store.Storage object.
+    :param indexname: the name of the index. If None, the default index name is
+        used.
+    :param rtype: bool
+    """
+
+    if indexname is None:
+        indexname = _DEF_INDEX_NAME
+
+    try:
+        ix = storage.open_index(indexname)
+        gen = ix.latest_generation()
+        ix.close()
+        return gen > -1
+    except EmptyIndexError:
+        pass
+
+    return False
+
+
+def version_in(dirname, indexname=None):
+    """Returns a tuple of (release_version, format_version), where
+    release_version is the release version number of the Whoosh code that
+    created the index -- e.g. (0, 1, 24) -- and format_version is the version
+    number of the on-disk format used for the index -- e.g. -102.
+
+    The second number (format version) may be useful for figuring out if you
+    need to recreate an index because the format has changed. However, you can
+    just try to open the index and see if you get an IndexVersionError
+    exception.
+
+    Note that the release and format version are available as attributes on the
+    Index object in Index.release and Index.version.
+
+    :param dirname: the file path of a directory containing an index.
+    :param indexname: the name of the index. If None, the default index name is
+        used.
+    :returns: ((major_ver, minor_ver, build_ver), format_ver)
+    """
+
+    from whoosh.filedb.filestore import FileStorage
+    storage = FileStorage(dirname)
+    return version(storage, indexname=indexname)
+
+
+def version(storage, indexname=None):
+    """Returns a tuple of (release_version, format_version), where
+    release_version is the release version number of the Whoosh code that
+    created the index -- e.g. (0, 1, 24) -- and format_version is the version
+    number of the on-disk format used for the index -- e.g. -102.
+
+    The second number (format version) may be useful for figuring out if you
+    need to recreate an index because the format has changed. However, you can
+    just try to open the index and see if you get an IndexVersionError
+    exception.
+
+    Note that the release and format version are available as attributes on the
+    Index object in Index.release and Index.version.
+
+    :param storage: a store.Storage object.
+    :param indexname: the name of the index. If None, the default index name is
+        used.
+    :returns: ((major_ver, minor_ver, build_ver), format_ver)
+    """
+
+    try:
+        if indexname is None:
+            indexname = _DEF_INDEX_NAME
+
+        ix = storage.open_index(indexname)
+        return (ix.release, ix.version)
+    except IndexVersionError, e:
+        return (None, e.version)
+
+
+# Index class
+
+class Index(object):
+    """Represents an indexed collection of documents.
+    """
+
+    def close(self):
+        """Closes any open resources held by the Index object itself. This may
+        not close all resources being used everywhere, for example by a
+        Searcher object.
+        """
+        pass
+
+    def add_field(self, fieldname, fieldspec):
+        """Adds a field to the index's schema.
+
+        :param fieldname: the name of the field to add.
+        :param fieldspec: an instantiated :class:`whoosh.fields.FieldType`
+            object.
+        """
+
+        w = self.writer()
+        w.add_field(fieldname, fieldspec)
+        w.commit()
+
+    def remove_field(self, fieldname):
+        """Removes the named field from the index's schema. Depending on the
+        backend implementation, this may or may not actually remove existing
+        data for the field from the index. Optimizing the index should always
+        clear out existing data for a removed field.
+        """
+
+        w = self.writer()
+        w.remove_field(fieldname)
+        w.commit()
+
+    def latest_generation(self):
+        """Returns the generation number of the latest generation of this
+        index, or -1 if the backend doesn't support versioning.
+        """
+        return -1
+
+    def refresh(self):
+        """Returns a new Index object representing the latest generation
+        of this index (if this object is the latest generation, or the backend
+        doesn't support versioning, returns self).
+
+        :returns: :class:`Index`
+        """
+        return self
+
+    def up_to_date(self):
+        """Returns True if this object represents the latest generation of
+        this index. Returns False if this object is not the latest generation
+        (that is, someone else has updated the index since you opened this
+        object).
+
+        :param rtype: bool
+        """
+        return True
+
+    def last_modified(self):
+        """Returns the last modified time of the index, or -1 if the backend
+        doesn't support last-modified times.
+        """
+        return - 1
+
+    def is_empty(self):
+        """Returns True if this index is empty (that is, it has never had any
+        documents successfully written to it.
+
+        :param rtype: bool
+        """
+        raise NotImplementedError
+
+    def optimize(self):
+        """Optimizes this index, if necessary.
+        """
+        pass
+
+    def doc_count_all(self):
+        """Returns the total number of documents, DELETED OR UNDELETED,
+        in this index.
+        """
+
+        r = self.reader()
+        try:
+            return r.doc_count_all()
+        finally:
+            r.close()
+
+    def doc_count(self):
+        """Returns the total number of UNDELETED documents in this index.
+        """
+
+        r = self.reader()
+        try:
+            return r.doc_count()
+        finally:
+            r.close()
+
+    def searcher(self, **kwargs):
+        """Returns a Searcher object for this index. Keyword arguments are
+        passed to the Searcher object's constructor.
+
+        :rtype: :class:`whoosh.searching.Searcher`
+        """
+
+        from whoosh.searching import Searcher
+        return Searcher(self.reader(), fromindex=self, **kwargs)
+
+    def field_length(self, fieldname):
+        """Returns the total length of the field across all documents.
+        """
+
+        r = self.reader()
+        try:
+            return r.field_length(fieldname)
+        finally:
+            r.close()
+
+    def max_field_length(self, fieldname):
+        """Returns the maximum length of the field across all documents.
+        """
+
+        r = self.reader()
+        try:
+            return r.max_field_length(fieldname)
+        finally:
+            r.close()
+
+    def reader(self, reuse=None):
+        """Returns an IndexReader object for this index.
+
+        :param reuse: an existing reader. Some implementations may recycle
+            resources from this existing reader to create the new reader. Note
+            that any resources in the "recycled" reader that are not used by
+            the new reader will be CLOSED, so you CANNOT use it afterward.
+        :rtype: :class:`whoosh.reading.IndexReader`
+        """
+
+        raise NotImplementedError
+
+    def writer(self, **kwargs):
+        """Returns an IndexWriter object for this index.
+
+        :rtype: :class:`whoosh.writing.IndexWriter`
+        """
+        raise NotImplementedError
+
+    def delete_by_term(self, fieldname, text, searcher=None):
+        w = self.writer()
+        w.delete_by_term(fieldname, text, searcher=searcher)
+        w.commit()
+
+    def delete_by_query(self, q, searcher=None):
+        w = self.writer()
+        w.delete_by_query(q, searcher=searcher)
+        w.commit()
+
+
+
diff --git a/lib/whoosh/whoosh/lang/__init__.py b/lib/whoosh/whoosh/lang/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/whoosh/whoosh/lang/__init__.py
diff --git a/lib/whoosh/whoosh/lang/dmetaphone.py b/lib/whoosh/whoosh/lang/dmetaphone.py
new file mode 100644
index 0000000..7c53ae0
--- /dev/null
+++ b/lib/whoosh/whoosh/lang/dmetaphone.py
@@ -0,0 +1,427 @@
+# coding= utf-8
+
+# This script implements the Double Metaphone algorythm (c) 1998, 1999 by
+# Lawrence Philips. It was translated to Python from the C source written by
+# Kevin Atkinson (http://aspell.net/metaphone/) By Andrew Collins - January 12,
+# 2007 who claims no rights to this work.
+# http://atomboy.isa-geek.com:8080/plone/Members/acoil/programing/double-metaphone
+
+import re
+
+
+vowels = frozenset("AEIOUY")
+slavo_germ_exp = re.compile("W|K|CZ|WITZ")
+silent_starts = re.compile("GN|KN|PN|WR|PS")
+
+
+def double_metaphone(text):
+    text = text.upper()
+    slavo_germanic = bool(slavo_germ_exp.search(text))
+
+    length = len(text)
+    text = "--" + text + "     "
+    first = pos = 2
+    last = first + length - 1
+    primary = secondary = ""
+
+    if silent_starts.match(text, pos):
+        pos += 1
+
+    while pos < length + 2:
+        ch = text[pos]
+
+        if ch in vowels:
+            # all init vowels now map to 'A'
+            if pos != first:
+                next = (None, 1)
+            else:
+                next = ("A", 1)
+        elif ch == "B":
+            #"-mb", e.g", "dumb", already skipped over... see 'M' below
+            if text[pos + 1] == "B":
+                next = ("P", 2)
+            else:
+                next = ("P", 1)
+        elif ch == "C":
+            # various germanic
+            if (pos > (first + 1) and text[pos - 2] not in vowels and text[pos - 1:pos + 2] == 'ACH' and \
+               (text[pos + 2] not in ['I', 'E'] or text[pos - 2:pos + 4] in ['BACHER', 'MACHER'])):
+                next = ('K', 2)
+            # special case 'CAESAR'
+            elif pos == first and text[first:first + 6] == 'CAESAR':
+                next = ('S', 2)
+            elif text[pos:pos + 4] == 'CHIA':  # italian 'chianti'
+                next = ('K', 2)
+            elif text[pos:pos + 2] == 'CH':
+                # find 'michael'
+                if pos > first and text[pos:pos + 4] == 'CHAE':
+                    next = ('K', 'X', 2)
+                elif pos == first and (text[pos + 1:pos + 6] in ['HARAC', 'HARIS'] or \
+                   text[pos + 1:pos + 4] in ["HOR", "HYM", "HIA", "HEM"]) and text[first:first + 5] != 'CHORE':
+                    next = ('K', 2)
+                # germanic, greek, or otherwise 'ch' for 'kh' sound
+                elif text[first:first + 4] in ['VAN ', 'VON '] or text[first:first + 3] == 'SCH' \
+                   or text[pos - 2:pos + 4] in ["ORCHES", "ARCHIT", "ORCHID"] \
+                   or text[pos + 2] in ['T', 'S'] \
+                   or ((text[pos - 1] in ["A", "O", "U", "E"] or pos == first) \
+                   and text[pos + 2] in ["L", "R", "N", "M", "B", "H", "F", "V", "W", " "]):
+                    next = ('K', 1)
+                else:
+                    if pos > first:
+                        if text[first:first + 2] == 'MC':
+                            next = ('K', 2)
+                        else:
+                            next = ('X', 'K', 2)
+                    else:
+                        next = ('X', 2)
+            # e.g, 'czerny'
+            elif text[pos:pos + 2] == 'CZ' and text[pos - 2:pos + 2] != 'WICZ':
+                next = ('S', 'X', 2)
+            # e.g., 'focaccia'
+            elif text[pos + 1:pos + 4] == 'CIA':
+                next = ('X', 3)
+            # double 'C', but not if e.g. 'McClellan'
+            elif text[pos:pos + 2] == 'CC' and not (pos == (first + 1) and text[first] == 'M'):
+                # 'bellocchio' but not 'bacchus'
+                if text[pos + 2] in ["I", "E", "H"] and text[pos + 2:pos + 4] != 'HU':
+                    # 'accident', 'accede' 'succeed'
+                    if (pos == (first + 1) and text[first] == 'A') or \
+                       text[pos - 1:pos + 4] in ['UCCEE', 'UCCES']:
+                        next = ('KS', 3)
+                    # 'bacci', 'bertucci', other italian
+                    else:
+                        next = ('X', 3)
+                else:
+                    next = ('K', 2)
+            elif text[pos:pos + 2] in ["CK", "CG", "CQ"]:
+                next = ('K', 'K', 2)
+            elif text[pos:pos + 2] in ["CI", "CE", "CY"]:
+                # italian vs. english
+                if text[pos:pos + 3] in ["CIO", "CIE", "CIA"]:
+                    next = ('S', 'X', 2)
+                else:
+                    next = ('S', 2)
+            else:
+                # name sent in 'mac caffrey', 'mac gregor
+                if text[pos + 1:pos + 3] in [" C", " Q", " G"]:
+                    next = ('K', 3)
+                else:
+                    if text[pos + 1] in ["C", "K", "Q"] and text[pos + 1:pos + 3] not in ["CE", "CI"]:
+                        next = ('K', 2)
+                    else:  # default for 'C'
+                        next = ('K', 1)
+        elif ch == u'Ç':
+            next = ('S', 1)
+        elif ch == 'D':
+            if text[pos:pos + 2] == 'DG':
+                if text[pos + 2] in ['I', 'E', 'Y']:  # e.g. 'edge'
+                    next = ('J', 3)
+                else:
+                    next = ('TK', 2)
+            elif text[pos:pos + 2] in ['DT', 'DD']:
+                next = ('T', 2)
+            else:
+                next = ('T', 1)
+        elif ch == 'F':
+            if text[pos + 1] == 'F':
+                next = ('F', 2)
+            else:
+                next = ('F', 1)
+        elif ch == 'G':
+            if text[pos + 1] == 'H':
+                if pos > first and text[pos - 1] not in vowels:
+                    next = ('K', 2)
+                elif pos < (first + 3):
+                    if pos == first:  # 'ghislane', ghiradelli
+                        if text[pos + 2] == 'I':
+                            next = ('J', 2)
+                        else:
+                            next = ('K', 2)
+                # Parker's rule (with some further refinements) - e.g., 'hugh'
+                elif (pos > (first + 1) and text[pos - 2] in ['B', 'H', 'D']) \
+                   or (pos > (first + 2) and text[pos - 3] in ['B', 'H', 'D']) \
+                   or (pos > (first + 3) and text[pos - 4] in ['B', 'H']):
+                    next = (None, 2)
+                else:
+                    # e.g., 'laugh', 'McLaughlin', 'cough', 'gough', 'rough', 'tough'
+                    if pos > (first + 2) and text[pos - 1] == 'U' \
+                       and text[pos - 3] in ["C", "G", "L", "R", "T"]:
+                        next = ('F', 2)
+                    else:
+                        if pos > first and text[pos - 1] != 'I':
+                            next = ('K', 2)
+            elif text[pos + 1] == 'N':
+                if pos == (first + 1) and text[first] in vowels and not slavo_germanic:
+                    next = ('KN', 'N', 2)
+                else:
+                    # not e.g. 'cagney'
+                    if text[pos + 2:pos + 4] != 'EY' and text[pos + 1] != 'Y' and not slavo_germanic:
+                        next = ('N', 'KN', 2)
+                    else:
+                        next = ('KN', 2)
+            # 'tagliaro'
+            elif text[pos + 1:pos + 3] == 'LI' and not slavo_germanic:
+                next = ('KL', 'L', 2)
+            # -ges-,-gep-,-gel-, -gie- at beginning
+            elif pos == first and (text[pos + 1] == 'Y' \
+               or text[pos + 1:pos + 3] in ["ES", "EP", "EB", "EL", "EY", "IB", "IL", "IN", "IE", "EI", "ER"]):
+                next = ('K', 'J', 2)
+            # -ger-,  -gy-
+            elif (text[pos + 1:pos + 2] == 'ER' or text[pos + 1] == 'Y') \
+               and text[first:first + 6] not in ["DANGER", "RANGER", "MANGER"] \
+               and text[pos - 1] not in ['E', 'I'] and text[pos - 1:pos + 2] not in ['RGY', 'OGY']:
+                next = ('K', 'J', 2)
+            # italian e.g, 'biaggi'
+            elif text[pos + 1] in ['E', 'I', 'Y'] or text[pos - 1:pos + 3] in ["AGGI", "OGGI"]:
+                # obvious germanic
+                if text[first:first + 4] in ['VON ', 'VAN '] or text[first:first + 3] == 'SCH' \
+                   or text[pos + 1:pos + 3] == 'ET':
+                    next = ('K', 2)
+                else:
+                    # always soft if french ending
+                    if text[pos + 1:pos + 5] == 'IER ':
+                        next = ('J', 2)
+                    else:
+                        next = ('J', 'K', 2)
+            elif text[pos + 1] == 'G':
+                next = ('K', 2)
+            else:
+                next = ('K', 1)
+        elif ch == 'H':
+            # only keep if first & before vowel or btw. 2 vowels
+            if (pos == first or text[pos - 1] in vowels) and text[pos + 1] in vowels:
+                next = ('H', 2)
+            else:  # (also takes care of 'HH')
+                next = (None, 1)
+        elif ch == 'J':
+            # obvious spanish, 'jose', 'san jacinto'
+            if text[pos:pos + 4] == 'JOSE' or text[first:first + 4] == 'SAN ':
+                if (pos == first and text[pos + 4] == ' ') or text[first:first + 4] == 'SAN ':
+                    next = ('H',)
+                else:
+                    next = ('J', 'H')
+            elif pos == first and text[pos:pos + 4] != 'JOSE':
+                next = ('J', 'A')  # Yankelovich/Jankelowicz
+            else:
+                # spanish pron. of e.g. 'bajador'
+                if text[pos - 1] in vowels and not slavo_germanic \
+                   and text[pos + 1] in ['A', 'O']:
+                    next = ('J', 'H')
+                else:
+                    if pos == last:
+                        next = ('J', ' ')
+                    else:
+                        if text[pos + 1] not in ["L", "T", "K", "S", "N", "M", "B", "Z"] \
+                           and text[pos - 1] not in ["S", "K", "L"]:
+                            next = ('J',)
+                        else:
+                            next = (None,)
+            if text[pos + 1] == 'J':
+                next = next + (2,)
+            else:
+                next = next + (1,)
+        elif ch == 'K':
+            if text[pos + 1] == 'K':
+                next = ('K', 2)
+            else:
+                next = ('K', 1)
+        elif ch == 'L':
+            if text[pos + 1] == 'L':
+                # spanish e.g. 'cabrillo', 'gallegos'
+                if (pos == (last - 2) and text[pos - 1:pos + 3] in ["ILLO", "ILLA", "ALLE"]) \
+                   or ((text[last - 1:last + 1] in ["AS", "OS"] or text[last] in ["A", "O"]) \
+                   and text[pos - 1:pos + 3] == 'ALLE'):
+                    next = ('L', '', 2)
+                else:
+                    next = ('L', 2)
+            else:
+                next = ('L', 1)
+        elif ch == 'M':
+            if text[pos + 1:pos + 4] == 'UMB' \
+               and (pos + 1 == last or text[pos + 2:pos + 4] == 'ER') \
+               or text[pos + 1] == 'M':
+                next = ('M', 2)
+            else:
+                next = ('M', 1)
+        elif ch == 'N':
+            if text[pos + 1] == 'N':
+                next = ('N', 2)
+            else:
+                next = ('N', 1)
+        elif ch == u'Ñ':
+            next = ('N', 1)
+        elif ch == 'P':
+            if text[pos + 1] == 'H':
+                next = ('F', 2)
+            elif text[pos + 1] in ['P', 'B']:  # also account for "campbell", "raspberry"
+                next = ('P', 2)
+            else:
+                next = ('P', 1)
+        elif ch == 'Q':
+            if text[pos + 1] == 'Q':
+                next = ('K', 2)
+            else:
+                next = ('K', 1)
+        elif ch == 'R':
+            # french e.g. 'rogier', but exclude 'hochmeier'
+            if pos == last and not slavo_germanic \
+               and text[pos - 2:pos] == 'IE' and text[pos - 4:pos - 2] not in ['ME', 'MA']:
+                next = ('', 'R')
+            else:
+                next = ('R',)
+            if text[pos + 1] == 'R':
+                next = next + (2,)
+            else:
+                next = next + (1,)
+        elif ch == 'S':
+            # special cases 'island', 'isle', 'carlisle', 'carlysle'
+            if text[pos - 1:pos + 2] in ['ISL', 'YSL']:
+                next = (None, 1)
+            # special case 'sugar-'
+            elif pos == first and text[first:first + 5] == 'SUGAR':
+                next = ('X', 'S', 1)
+            elif text[pos:pos + 2] == 'SH':
+                # germanic
+                if text[pos + 1:pos + 5] in ["HEIM", "HOEK", "HOLM", "HOLZ"]:
+                    next = ('S', 2)
+                else:
+                    next = ('X', 2)
+            # italian & armenian
+            elif text[pos:pos + 3] in ["SIO", "SIA"] or text[pos:pos + 4] == 'SIAN':
+                if not slavo_germanic:
+                    next = ('S', 'X', 3)
+                else:
+                    next = ('S', 3)
+            # german & anglicisations, e.g. 'smith' match 'schmidt', 'snider' match 'schneider'
+            # also, -sz- in slavic language altho in hungarian it is pronounced 's'
+            elif (pos == first and text[pos + 1] in ["M", "N", "L", "W"]) or text[pos + 1] == 'Z':
+                next = ('S', 'X')
+                if text[pos + 1] == 'Z':
+                    next = next + (2,)
+                else:
+                    next = next + (1,)
+            elif text[pos:pos + 2] == 'SC':
+                # Schlesinger's rule
+                if text[pos + 2] == 'H':
+                    # dutch origin, e.g. 'school', 'schooner'
+                    if text[pos + 3:pos + 5] in ["OO", "ER", "EN", "UY", "ED", "EM"]:
+                        # 'schermerhorn', 'schenker'
+                        if text[pos + 3:pos + 5] in ['ER', 'EN']:
+                            next = ('X', 'SK', 3)
+                        else:
+                            next = ('SK', 3)
+                    else:
+                        if pos == first and text[first + 3] not in vowels and text[first + 3] != 'W':
+                            next = ('X', 'S', 3)
+                        else:
+                            next = ('X', 3)
+                elif text[pos + 2] in ['I', 'E', 'Y']:
+                    next = ('S', 3)
+                else:
+                    next = ('SK', 3)
+            # french e.g. 'resnais', 'artois'
+            elif pos == last and text[pos - 2:pos] in ['AI', 'OI']:
+                next = ('', 'S', 1)
+            else:
+                next = ('S',)
+                if text[pos + 1] in ['S', 'Z']:
+                    next = next + (2,)
+                else:
+                    next = next + (1,)
+        elif ch == 'T':
+            if text[pos:pos + 4] == 'TION':
+                next = ('X', 3)
+            elif text[pos:pos + 3] in ['TIA', 'TCH']:
+                next = ('X', 3)
+            elif text[pos:pos + 2] == 'TH' or text[pos:pos + 3] == 'TTH':
+                # special case 'thomas', 'thames' or germanic
+                if text[pos + 2:pos + 4] in ['OM', 'AM'] or text[first:first + 4] in ['VON ', 'VAN '] \
+                   or text[first:first + 3] == 'SCH':
+                    next = ('T', 2)
+                else:
+                    next = ('0', 'T', 2)
+            elif text[pos + 1] in ['T', 'D']:
+                next = ('T', 2)
+            else:
+                next = ('T', 1)
+        elif ch == 'V':
+            if text[pos + 1] == 'V':
+                next = ('F', 2)
+            else:
+                next = ('F', 1)
+        elif ch == 'W':
+            # can also be in middle of word
+            if text[pos:pos + 2] == 'WR':
+                next = ('R', 2)
+            elif pos == first and (text[pos + 1] in vowels or text[pos:pos + 2] == 'WH'):
+                # Wasserman should match Vasserman
+                if text[pos + 1] in vowels:
+                    next = ('A', 'F', 1)
+                else:
+                    next = ('A', 1)
+            # Arnow should match Arnoff
+            elif (pos == last and text[pos - 1] in vowels) \
+               or text[pos - 1:pos + 5] in ["EWSKI", "EWSKY", "OWSKI", "OWSKY"] \
+               or text[first:first + 3] == 'SCH':
+                next = ('', 'F', 1)
+            # polish e.g. 'filipowicz'
+            elif text[pos:pos + 4] in ["WICZ", "WITZ"]:
+                next = ('TS', 'FX', 4)
+            else:  # default is to skip it
+                next = (None, 1)
+        elif ch == 'X':
+            # french e.g. breaux
+            next = (None,)
+            if not(pos == last and (text[pos - 3:pos] in ["IAU", "EAU"] \
+               or text[pos - 2:pos] in ['AU', 'OU'])):
+                next = ('KS',)
+            if text[pos + 1] in ['C', 'X']:
+                next = next + (2,)
+            else:
+                next = next + (1,)
+        elif ch == 'Z':
+            # chinese pinyin e.g. 'zhao'
+            if text[pos + 1] == 'H':
+                next = ('J',)
+            elif text[pos + 1:pos + 3] in ["ZO", "ZI", "ZA"] \
+               or (slavo_germanic and pos > first and text[pos - 1] != 'T'):
+                next = ('S', 'TS')
+            else:
+                next = ('S',)
+            if text[pos + 1] == 'Z':
+                next = next + (2,)
+            else:
+                next = next + (1,)
+        else:
+            next = (None, 1)
+
+        if len(next) == 2:
+            if next[0]:
+                primary += next[0]
+                secondary += next[0]
+            pos += next[1]
+        elif len(next) == 3:
+            if next[0]:
+                primary += next[0]
+            if next[1]:
+                secondary += next[1]
+            pos += next[2]
+
+    if primary == secondary:
+        return (primary, None)
+    else:
+        return (primary, secondary)
+
+if __name__ == '__main__':
+    names = {'maurice': ('MRS', None), 'aubrey': ('APR', None), 'cambrillo': ('KMPRL', 'KMPR'),
+        'heidi': ('HT', None), 'katherine': ('K0RN', 'KTRN'), 'Thumbail': ('0MPL', 'TMPL'),
+        'catherine': ('K0RN', 'KTRN'), 'richard': ('RXRT', 'RKRT'), 'bob': ('PP', None),\
+        'eric': ('ARK', None), 'geoff': ('JF', 'KF'), 'Through': ('0R', 'TR'), 'Schwein': ('XN', 'XFN'),
+        'dave': ('TF', None), 'ray': ('R', None), 'steven': ('STFN', None), 'bryce': ('PRS', None),
+        'randy': ('RNT', None), 'bryan': ('PRN', None), 'Rapelje': ('RPL', None),
+        'brian': ('PRN', None), 'otto': ('AT', None), 'auto': ('AT', None), 'Dallas': ('TLS', None),
+        'maisey': ('MS', None), 'zhang': ('JNK', None), 'Chile': ('XL', None),
+        'Jose': ('HS', None), 'Arnow': ('ARN', 'ARNF'), 'solilijs': ('SLLS', None),
+        'Parachute': ('PRKT', None), 'Nowhere': ('NR', None), 'Tux': ('TKS', None)}
+    for name in names.keys():
+        assert (double_metaphone(name) == names[name]), 'For "%s" function returned %s. Should be %s.' % (name, double_metaphone(name), names[name])
diff --git a/lib/whoosh/whoosh/lang/lovins.py b/lib/whoosh/whoosh/lang/lovins.py
new file mode 100644
index 0000000..ef49125
--- /dev/null
+++ b/lib/whoosh/whoosh/lang/lovins.py
@@ -0,0 +1,573 @@
+"""This module implements the Lovins stemming algorithm. Use the ``stem()``
+function::
+
+    stemmed_word = stem(word)
+"""
+
+from collections import defaultdict
+
+
+# Conditions
+
+def A(base):
+    # A   No restrictions on stem
+    return True
+
+
+def B(base):
+    # B  Minimum stem length = 3
+    return len(base) > 2
+
+
+def C(base):
+    # C  Minimum stem length = 4
+    return len(base) > 3
+
+
+def D(base):
+    # D  Minimum stem length = 5
+    return len(base) > 4
+
+
+def E(base):
+    # E  Do not remove ending after e
+    return base[-1] != "e"
+
+
+def F(base):
+    # F  Minimum stem length = 3 and do not remove ending after e
+    return len(base) > 2 and base[-1] != "e"
+
+
+def G(base):
+    # G  Minimum stem length = 3 and remove ending only after f
+    return len(base) > 2 and base[-1] == "f"
+
+
+def H(base):
+    # H  Remove ending only after t or ll
+    c1, c2 = base[-2:]
+    return c2 == "t" or (c2 == "l" and c1 == "l")
+
+
+def I(base):
+    # I  Do not remove ending after o or e
+    c = base[-1]
+    return c != "o" and c != "e"
+
+
+def J(base):
+    # J  Do not remove ending after a or e
+    c = base[-1]
+    return c != "a" and c != "e"
+
+
+def K(base):
+    # K  Minimum stem length = 3 and remove ending only after l, i or u*e
+    c = base[-1]
+    cc = base[-3]
+    return len(base) > 2 and (c == "l" or c == "i" or (c == "e" and cc == "u"))
+
+
+def L(base):
+    # L  Do not remove ending after u, x or s, unless s follows o
+    c1, c2 = base[-2:]
+    return c2 != "u" and c2 != "x" and (c2 != "s" or c1 == "o")
+
+
+def M(base):
+    # M  Do not remove ending after a, c, e or m
+    c = base[-1]
+    return c != "a" and c != "c" and c != "e" and c != "m"
+
+
+def N(base):
+    # N  Minimum stem length = 4 after s**, elsewhere = 3
+    return len(base) > 3 or (len(base) == 3 and base[-1] != "s")
+
+
+def O(base):
+    # O  Remove ending only after l or i
+    c = base[-1]
+    return c == "l" or c == "i"
+
+
+def P(base):
+    # P  Do not remove ending after c
+    return base[-1] != "c"
+
+
+def Q(base):
+    # Q  Minimum stem length = 3 and do not remove ending after l or n
+    c = base[-1]
+    return len(base) > 2 and (c != "l" and c != "n")
+
+
+def R(base):
+    # R  Remove ending only after n or r
+    c = base[-1]
+    return c == "n" or c == "r"
+
+
+def S(base):
+    # S  Remove ending only after dr or t, unless t follows t
+    l2 = base[-2]
+    return l2 == "rd" or (base[-1] == "t" and l2 != "tt")
+
+
+def T(base):
+    # T  Remove ending only after s or t, unless t follows o
+    c1, c2 = base[-2:]
+    return c2 == "s" or (c2 == "t" and c1 != "o")
+
+
+def U(base):
+    # U  Remove ending only after l, m, n or r
+    c = base[-1]
+    return c == "l" or c == "m" or c == "n" or c == "r"
+
+
+def V(base):
+    # V  Remove ending only after c
+    return base[-1] == "c"
+
+
+def W(base):
+    # W  Do not remove ending after s or u
+    c = base[-1]
+    return c != "s" and c != "u"
+
+
+def X(base):
+    # X  Remove ending only after l, i or u*e
+    c = base[-1]
+    cc = base[-3]
+    return c == "l" or c == "i" or (c == "e" and cc == "u")
+
+
+def Y(base):
+    # Y  Remove ending only after in
+    return base[-2:] == "in"
+
+
+def Z(base):
+    # Z  Do not remove ending after f
+    return base[-1] != "f"
+
+
+def a(base):
+    # a  Remove ending only after d, f, ph, th, l, er, or, es or t
+    c = base[-1]
+    l2 = base[-2:]
+    return (c == "d" or c == "f" or l2 == "ph" or l2 == "th" or c == "l"
+            or l2 == "er" or l2 == "or" or l2 == "es" or c == "t")
+
+
+def b(base):
+    # b  Minimum stem length = 3 and do not remove ending after met or ryst
+    return len(base) > 2 and not (base.endswith("met")
+                                  or base.endswith("ryst"))
+
+
+def c(base):
+    # c  Remove ending only after l
+    return base[-1] == "l"
+
+
+# Endings
+
+m = [None] * 12
+
+m[11] = dict((
+        ("alistically", B),
+        ("arizability", A),
+        ("izationally", B)))
+m[10] = dict((
+        ("antialness", A),
+        ("arisations", A),
+        ("arizations", A),
+        ("entialness", A)))
+m[9] = dict((
+        ("allically", C),
+        ("antaneous", A),
+        ("antiality", A),
+        ("arisation", A),
+        ("arization", A),
+        ("ationally", B),
+        ("ativeness", A),
+        ("eableness", E),
+        ("entations", A),
+        ("entiality", A),
+        ("entialize", A),
+        ("entiation", A),
+        ("ionalness", A),
+        ("istically", A),
+        ("itousness", A),
+        ("izability", A),
+        ("izational", A)))
+m[8] = dict((
+        ("ableness", A),
+        ("arizable", A),
+        ("entation", A),
+        ("entially", A),
+        ("eousness", A),
+        ("ibleness", A),
+        ("icalness", A),
+        ("ionalism", A),
+        ("ionality", A),
+        ("ionalize", A),
+        ("iousness", A),
+        ("izations", A),
+        ("lessness", A)))
+m[7] = dict((
+        ("ability", A),
+        ("aically", A),
+        ("alistic", B),
+        ("alities", A),
+        ("ariness", E),
+        ("aristic", A),
+        ("arizing", A),
+        ("ateness", A),
+        ("atingly", A),
+        ("ational", B),
+        ("atively", A),
+        ("ativism", A),
+        ("elihood", E),
+        ("encible", A),
+        ("entally", A),
+        ("entials", A),
+        ("entiate", A),
+        ("entness", A),
+        ("fulness", A),
+        ("ibility", A),
+        ("icalism", A),
+        ("icalist", A),
+        ("icality", A),
+        ("icalize", A),
+        ("ication", G),
+        ("icianry", A),
+        ("ination", A),
+        ("ingness", A),
+        ("ionally", A),
+        ("isation", A),
+        ("ishness", A),
+        ("istical", A),
+        ("iteness", A),
+        ("iveness", A),
+        ("ivistic", A),
+        ("ivities", A),
+        ("ization", F),
+        ("izement", A),
+        ("oidally", A),
+        ("ousness", A)))
+m[6] = dict((
+        ("aceous", A),
+        ("acious", B),
+        ("action", G),
+        ("alness", A),
+        ("ancial", A),
+        ("ancies", A),
+        ("ancing", B),
+        ("ariser", A),
+        ("arized", A),
+        ("arizer", A),
+        ("atable", A),
+        ("ations", B),
+        ("atives", A),
+        ("eature", Z),
+        ("efully", A),
+        ("encies", A),
+        ("encing", A),
+        ("ential", A),
+        ("enting", C),
+        ("entist", A),
+        ("eously", A),
+        ("ialist", A),
+        ("iality", A),
+        ("ialize", A),
+        ("ically", A),
+        ("icance", A),
+        ("icians", A),
+        ("icists", A),
+        ("ifully", A),
+        ("ionals", A),
+        ("ionate", D),
+        ("ioning", A),
+        ("ionist", A),
+        ("iously", A),
+        ("istics", A),
+        ("izable", E),
+        ("lessly", A),
+        ("nesses", A),
+        ("oidism", A)))
+m[5] = dict((
+        ("acies", A),
+        ("acity", A),
+        ("aging", B),
+        ("aical", A),
+        ("alist", A),
+        ("alism", B),
+        ("ality", A),
+        ("alize", A),
+        ("allic", b),
+        ("anced", B),
+        ("ances", B),
+        ("antic", C),
+        ("arial", A),
+        ("aries", A),
+        ("arily", A),
+        ("arity", B),
+        ("arize", A),
+        ("aroid", A),
+        ("ately", A),
+        ("ating", I),
+        ("ation", B),
+        ("ative", A),
+        ("ators", A),
+        ("atory", A),
+        ("ature", E),
+        ("early", Y),
+        ("ehood", A),
+        ("eless", A),
+        ("elily", A),
+        ("ement", A),
+        ("enced", A),
+        ("ences", A),
+        ("eness", E),
+        ("ening", E),
+        ("ental", A),
+        ("ented", C),
+        ("ently", A),
+        ("fully", A),
+        ("ially", A),
+        ("icant", A),
+        ("ician", A),
+        ("icide", A),
+        ("icism", A),
+        ("icist", A),
+        ("icity", A),
+        ("idine", I),
+        ("iedly", A),
+        ("ihood", A),
+        ("inate", A),
+        ("iness", A),
+        ("ingly", B),
+        ("inism", J),
+        ("inity", c),
+        ("ional", A),
+        ("ioned", A),
+        ("ished", A),
+        ("istic", A),
+        ("ities", A),
+        ("itous", A),
+        ("ively", A),
+        ("ivity", A),
+        ("izers", F),
+        ("izing", F),
+        ("oidal", A),
+        ("oides", A),
+        ("otide", A),
+        ("ously", A)))
+m[4] = dict((
+        ("able", A),
+        ("ably", A),
+        ("ages", B),
+        ("ally", B),
+        ("ance", B),
+        ("ancy", B),
+        ("ants", B),
+        ("aric", A),
+        ("arly", K),
+        ("ated", I),
+        ("ates", A),
+        ("atic", B),
+        ("ator", A),
+        ("ealy", Y),
+        ("edly", E),
+        ("eful", A),
+        ("eity", A),
+        ("ence", A),
+        ("ency", A),
+        ("ened", E),
+        ("enly", E),
+        ("eous", A),
+        ("hood", A),
+        ("ials", A),
+        ("ians", A),
+        ("ible", A),
+        ("ibly", A),
+        ("ical", A),
+        ("ides", L),
+        ("iers", A),
+        ("iful", A),
+        ("ines", M),
+        ("ings", N),
+        ("ions", B),
+        ("ious", A),
+        ("isms", B),
+        ("ists", A),
+        ("itic", H),
+        ("ized", F),
+        ("izer", F),
+        ("less", A),
+        ("lily", A),
+        ("ness", A),
+        ("ogen", A),
+        ("ward", A),
+        ("wise", A),
+        ("ying", B),
+        ("yish", A)))
+m[3] = dict((
+        ("acy", A),
+        ("age", B),
+        ("aic", A),
+        ("als", b),
+        ("ant", B),
+        ("ars", O),
+        ("ary", F),
+        ("ata", A),
+        ("ate", A),
+        ("eal", Y),
+        ("ear", Y),
+        ("ely", E),
+        ("ene", E),
+        ("ent", C),
+        ("ery", E),
+        ("ese", A),
+        ("ful", A),
+        ("ial", A),
+        ("ian", A),
+        ("ics", A),
+        ("ide", L),
+        ("ied", A),
+        ("ier", A),
+        ("ies", P),
+        ("ily", A),
+        ("ine", M),
+        ("ing", N),
+        ("ion", Q),
+        ("ish", C),
+        ("ism", B),
+        ("ist", A),
+        ("ite", a),
+        ("ity", A),
+        ("ium", A),
+        ("ive", A),
+        ("ize", F),
+        ("oid", A),
+        ("one", R),
+        ("ous", A)))
+m[2] = dict((
+        ("ae", A),
+        ("al", b),
+        ("ar", X),
+        ("as", B),
+        ("ed", E),
+        ("en", F),
+        ("es", E),
+        ("ia", A),
+        ("ic", A),
+        ("is", A),
+        ("ly", B),
+        ("on", S),
+        ("or", T),
+        ("um", U),
+        ("us", V),
+        ("yl", R),
+        ("s'", A),
+        ("'s", A)))
+m[1] = dict((
+        ("a", A),
+        ("e", A),
+        ("i", A),
+        ("o", A),
+        ("s", W),
+        ("y", B)))
+
+
+def remove_ending(word):
+    length = len(word)
+    el = 11
+    while el > 0:
+        if length - el > 1:
+            ending = word[length - el:]
+            cond = m[el].get(ending)
+            if cond:
+                base = word[:length - el]
+                if cond(base):
+                    return base
+        el -= 1
+    return word
+
+
+_endings = (("iev", "ief"),
+            ("uct", "uc"),
+            ("iev", "ief"),
+            ("uct", "uc"),
+            ("umpt", "um"),
+            ("rpt", "rb"),
+            ("urs", "ur"),
+            ("istr", "ister"),
+            ("metr", "meter"),
+            ("olv", "olut"),
+            ("ul", "l", "aoi"),
+            ("bex", "bic"),
+            ("dex", "dic"),
+            ("pex", "pic"),
+            ("tex", "tic"),
+            ("ax", "ac"),
+            ("ex", "ec"),
+            ("ix", "ic"),
+            ("lux", "luc"),
+            ("uad", "uas"),
+            ("vad", "vas"),
+            ("cid", "cis"),
+            ("lid", "lis"),
+            ("erid", "eris"),
+            ("pand", "pans"),
+            ("end", "ens", "s"),
+            ("ond", "ons"),
+            ("lud", "lus"),
+            ("rud", "rus"),
+            ("her", "hes", "pt"),
+            ("mit", "mis"),
+            ("ent", "ens", "m"),
+            ("ert", "ers"),
+            ("et", "es", "n"),
+            ("yt", "ys"),
+            ("yz", "ys"))
+
+
+# Hash the ending rules by the last letter of the target ending
+_endingrules = defaultdict(list)
+for rule in _endings:
+    _endingrules[rule[0][-1]].append(rule)
+
+_doubles = frozenset(("dd", "gg", "ll", "mm", "nn", "pp", "rr", "ss", "tt"))
+
+
+def fix_ending(word):
+    if word[-2:] in _doubles:
+        word = word[:-1]
+
+    for endingrule in _endingrules[word[-1]]:
+        target, newend = endingrule[:2]
+        if word.endswith(target):
+            if len(endingrule) > 2:
+                exceptafter = endingrule[2]
+                c = word[0 - (len(target) + 1)]
+                if c in exceptafter:
+                    return word
+
+            return word[:0 - len(target)] + newend
+
+    return word
+
+
+def stem(word):
+    """Returns the stemmed version of the argument string.
+    """
+    return fix_ending(remove_ending(word))
+
+
+
diff --git a/lib/whoosh/whoosh/lang/morph_en.py b/lib/whoosh/whoosh/lang/morph_en.py
new file mode 100644
index 0000000..48db2be
--- /dev/null
+++ b/lib/whoosh/whoosh/lang/morph_en.py
@@ -0,0 +1,941 @@
+"""
+Contains the variations() function for expanding an English word into multiple
+variations by programatically adding and removing suffixes.
+
+Translated to Python from the ``com.sun.labs.minion.lexmorph.LiteMorph_en``
+class of Sun's `Minion search engine <https://minion.dev.java.net/>`_.
+"""
+
+import re
+
+# Rule exceptions
+
+exceptions = [
+        "a",
+        "abandoner abandon abandons abandoned abandoning abandonings abandoners",
+        "abdomen abdomens",
+        "about",
+        "above",
+        "acid acids acidic acidity acidities",
+        "across",
+        "act acts acted acting actor actors",
+        "ad ads",
+        "add adds added adding addings addition additions adder adders",
+        "advertise advertises advertised advertising advertiser advertisers advertisement advertisements advertisings",
+        "after",
+        "again",
+        "against",
+        "ago",
+        "all",
+        "almost",
+        "along",
+        "already",
+        "also",
+        "although",
+        "alumna alumnae alumnus alumni",
+        "always",
+        "amen amens",
+        "amidships",
+        "amid amidst",
+        "among amongst",
+        "an",
+        "analysis analyses",
+        "and",
+        "another other others",
+        "antenna antennas antennae",
+        "antitheses antithesis",
+        "any",
+        "anyone anybody",
+        "anything",
+        "appendix appendixes appendices",
+        "apropos",
+        "aquarium aquariums aquaria",
+        "argument arguments argue argues argued arguing arguings arguer arguers",
+        "arise arises arose arisen ariser arisers arising arisings",
+        "around",
+        "as",
+        "asbestos",
+        "at",
+        "atlas atlases",
+        "auger augers augered augering augerings augerer augerers",
+        "augment augments augmented augmenting augmentings augmentation augmentations augmenter augmenters",
+        "automata automaton automatons",
+        "automation automating automate automates automated automatic",
+        "avoirdupois",
+        "awake awakes awoke awaked awoken awaker awakers awaking awakings awakening awakenings",
+        "away",
+        "awful awfully awfulness",
+        "axis axes axises",
+        "bacillus bacilli",
+        "bacterium bacteria",
+        "bad worse worst badly badness",
+        "bas",
+        "bases basis",
+        "bases base based basing basings basely baseness basenesses basement basements baseless basic basics",
+        "be am are is was were been being",
+        "bear bears bore borne bearing bearings bearer bearers",
+        "beat beats beaten beating beatings beater beaters",
+        "because",
+        "become becomes became becoming",
+        "beef beefs beeves beefed beefing",
+        "beer beers",
+        "before",
+        "begin begins began begun beginning beginnings beginner beginners",
+        "behalf behalves",
+        "being beings",
+        "bend bends bent bending bendings bender benders",
+        "bereave bereaves bereaved bereft bereaving bereavings bereavement bereavements",
+        "beside besides",
+        "best bests bested besting",
+        "bet bets betting bettor bettors",
+        "betimes",
+        "between",
+        "beyond",
+        "bid bids bade bidden bidding biddings bidder bidders",
+        "bier biers",
+        "bind binds bound binding bindings binder binders",
+        "bit bits",
+        "bite bites bit bitten biting bitings biter biters",
+        "blackfoot blackfeet",
+        "bleed bleeds bled bleeding bleedings bleeder bleeders",
+        "blow blows blew blown blowing blowings blower blowers",
+        "bookshelf bookshelves",
+        "both",
+        "bound bounds bounded bounding boundings bounder bounders boundless",
+        "bourgeois bourgeoisie",
+        "bra bras",
+        "brahman brahmans",
+        "break breaks broke broken breaking breakings breaker breakers",
+        "breed breeds bred breeding breedings breeder breeders",
+        "bring brings brought bringing bringings bringer bringers",
+        "build builds built building buildings builder builders",
+        "bus buses bused bussed busing bussing busings bussings buser busers busser bussers",
+        "buss busses bussed bussing bussings busser bussers",
+        "but",
+        "buy buys bought buying buyings buyer buyers",
+        "by",
+        "calf calves calved calving calvings calver calvers",
+        "can cans canned canning cannings canner canners",
+        "can could cannot",
+        "canoes canoe canoed canoeing canoeings canoer canoers",
+        "catch catches caught catching catchings catcher catchers",
+        "cement cements cemented cementing cementings cementer cementers",
+        "cent cents",
+        "center centers centered centering centerings centerless",
+        "child children childless childish childishly",
+        "choose chooses chose chosen choosing choosings chooser choosers",
+        "cling clings clung clinging clingings clinger clingers",
+        "colloquium colloquia colloquiums",
+        "come comes came coming comings comer comers",
+        "comment comments commented commenting commentings commenter commenters",
+        "compendium compendia compendiums",
+        "complement complements complemented complementing complementings complementer complementers complementary",
+        "compliment compliments complimented complimenting complimentings complimenter complimenters complimentary",
+        "concerto concertos concerti",
+        "condiment condiments",
+        "corps",
+        "cortex cortices cortexes cortical",
+        "couscous",
+        "creep creeps crept creeping creepings creeper creepers creepy",
+        "crisis crises",
+        "criterion criteria criterial",
+        "cryptanalysis cryptanalyses",
+        "curriculum curricula curriculums curricular",
+        "datum data",
+        "day days daily",
+        "deal deals dealt dealing dealings dealer dealers",
+        "decrement decrements decremented decrementing decrementings decrementer decrementers decremental",
+        "deer deers",
+        "demented dementia",
+        "desideratum desiderata",
+        "diagnosis diagnoses diagnose diagnosed diagnosing diagnostic",
+        "dialysis dialyses",
+        "dice dices diced dicing dicings dicer dicers",
+        "die dice",
+        "die dies died dying dyings",
+        "dig digs dug digging diggings digger diggers",
+        "dive dives diver divers dove dived diving divings",
+        "divest divests divester divesters divested divesting divestings divestment divestments",
+        "do does did done doing doings doer doers",
+        "document documents documented documenting documentings documenter documenters documentation documentations documentary",
+        "doe does",
+        "dove doves",
+        "downstairs",
+        "dozen",
+        "draw draws drew drawn drawing drawings drawer drawers",
+        "drink drinks drank drunk drinking drinkings drinker drinkers",
+        "drive drives drove driven driving drivings driver drivers driverless",
+        "due dues duly",
+        "during",
+        "e",
+        "each",
+        "eager eagerer eagerest eagerly eagerness eagernesses",
+        "early earlier earliest",
+        "easement easements",
+        "eat eats ate eaten eating eatings eater eaters",
+        "effluvium effluvia",
+        "either",
+        "element elements elementary",
+        "elf elves elfen",
+        "ellipse ellipses elliptic elliptical elliptically",
+        "ellipsis ellipses elliptic elliptical elliptically",
+        "else",
+        "embolus emboli embolic embolism",
+        "emolument emoluments",
+        "emphasis emphases",
+        "employ employs employed employing employer employers employee employees employment employments employable",
+        "enough",
+        "equilibrium equilibria equilibriums",
+        "erratum errata",
+        "ever",
+        "every",
+        "everything",
+        "exotic exotically exoticness exotica",
+        "experiment experiments experimented experimenting experimentings experimenter experimenters experimentation experimental",
+        "extra extras",
+        "fall falls fell fallen falling fallings faller fallers",
+        "far farther farthest",
+        "fee fees feeless",
+        "feed feeds fed feeding feedings feeder feeders",
+        "feel feels felt feeling feelings feeler feelers",
+        "ferment ferments fermented fermenting fermentings fermentation fermentations fermenter fermenters",
+        "few fewer fewest",
+        "fight fights fought fighting fightings fighter fighters",
+        "figment figments",
+        "filament filaments",
+        "find finds found finding findings finder finders",
+        "firmament firmaments",
+        "flee flees fled fleeing fleeings",
+        "fling flings flung flinging flingings flinger flingers",
+        "floe floes",
+        "fly flies flew flown flying flyings flier fliers flyer flyers",
+        "focus foci focuses focused focusing focusses focussed focussing focuser focal",
+        "foment foments fomented fomenting fomentings fomenter fomenters",
+        "foot feet",
+        "foot foots footed footing footer footers",
+        "footing footings footer footers",
+        "for",
+        "forbid forbids forbade forbidden forbidding forbiddings forbidder forbidders",
+        "foresee foresaw foreseen foreseeing foreseeings foreseer foreseers",
+        "forest forests forester foresting forestation forestations",
+        "forget forgets forgot forgotten forgetting forgettings forgetter forgetters forgetful",
+        "forsake forsakes forsook forsaken forsaking forsakings forsaker forsakers",
+        "found founds founded founding foundings founder founders",
+        "fragment fragments fragmented fragmenting fragmentings fragmentation fragmentations fragmenter fragmenters",
+        "free frees freer freest freed freeing freely freeness freenesses",
+        "freeze freezes froze frozen freezing freezings freezer freezers",
+        "from",
+        "full fully fuller fullest",
+        "fuller fullers full fulls fulled fulling fullings",
+        "fungus fungi funguses fungal",
+        "gallows",
+        "ganglion ganglia ganglions ganglionic",
+        "garment garments",
+        "gas gasses gassed gassing gassings gasser gassers",
+        "gas gases gasses gaseous gasless",
+        "gel gels gelled gelling gellings geller gellers",
+        "german germans germanic germany German Germans Germanic Germany",
+        "get gets got gotten getting gettings getter getters",
+        "give gives gave given giving givings giver givers",
+        "gladiolus gladioli gladioluses gladiola gladiolas gladiolae",
+        "glans glandes",
+        "gluiness gluey glue glues glued gluing gluings gluer gluers",
+        "go goes went gone going goings goer goers",
+        "godchild godchildren",
+        "good better best goodly goodness goodnesses",
+        "goods",
+        "goose geese",
+        "goose gooses goosed goosing goosings gooser goosers",
+        "grandchild grandchildren",
+        "grind grinds ground grinding grindings grinder grinders",
+        "ground grounds grounded grounding groundings grounder grounders groundless",
+        "grow grows grew grown growing growings grower growers growth",
+        "gum gums gummed gumming gummings gummer gummers",
+        "half halves",
+        "halve halves halved halving halvings halver halvers",
+        "hang hangs hung hanged hanging hangings hanger hangers",
+        "have has had having havings haver havers",
+        "he him his himself",
+        "hear hears heard hearing hearings hearer hearers",
+        "here",
+        "hide hides hid hidden hiding hidings hider hiders",
+        "hippopotamus hippopotami hippopotamuses",
+        "hold holds held holding holdings holder holders",
+        "honorarium honoraria honorariums",
+        "hoof hoofs hooves hoofed hoofing hoofer hoofers",
+        "how",
+        "hum hums hummed humming hummings hummer hummers",
+        "hymen hymens hymenal",
+        "hypotheses hypothesis hypothesize hypothesizes hypothesized hypothesizer hypothesizing hypothetical hypothetically",
+        "i",
+        "if iffy",
+        "impediment impediments",
+        "implement implements implemented implementing implementings implementation implementations implementer implementers",
+        "imply implies implied implying implyings implier impliers",
+        "in inner",
+        "inclement",
+        "increment increments incremented incrementing incrementings incrementer incrementers incremental incrementally",
+        "index indexes indexed indexing indexings indexer indexers",
+        "index indexes indices indexical indexicals",
+        "indoor indoors",
+        "instrument instruments instrumented instrumenting instrumentings instrumenter instrumenters instrumentation instrumentations instrumental",
+        "integument integumentary",
+        "into",
+        "it its itself",
+            "java",
+        "july julys",
+        "keep keeps kept keeping keepings keeper keepers",
+        "knife knifes knifed knifing knifings knifer knifers",
+        "knife knives",
+        "know knows knew known knowing knowings knower knowers knowledge",
+        "lament laments lamented lamenting lamentings lamentation lamentations lamenter lamenters lamentable lamentably",
+        "larva larvae larvas larval",
+        "late later latest lately lateness",
+        "latter latterly",
+        "lay lays laid laying layer layers",
+        "layer layers layered layering layerings",
+        "lead leads led leading leadings leader leaders leaderless",
+        "leaf leafs leafed leafing leafings leafer leafers",
+        "leaf leaves leafless",
+        "leave leaves left leaving leavings leaver leavers",
+        "lend lends lent lending lendings lender lenders",
+        "less lesser least",
+        "let lets letting lettings",
+        "lie lies lay lain lying lier liers",
+        "lie lies lied lying liar liars",
+        "life lives lifeless",
+        "light lights lit lighted lighting lightings lightly lighter lighters lightness lightnesses lightless",
+        "likely likelier likeliest",
+        "limen limens",
+        "lineament lineaments",
+        "liniment liniments",
+        "live alive living",
+        "live lives lived living livings",
+        "liver livers",
+        "loaf loafs loafed loafing loafings loafer loafers",
+        "loaf loaves",
+        "logic logics logical logically",
+        "lose loses lost losing loser losers loss losses",
+        "louse lice",
+        "lumen lumens",
+        "make makes made making makings maker makers",
+        "man mans manned manning mannings",
+        "man men",
+        "manly manlier manliest manliness manful manfulness manhood",
+        "manic manically",
+        "manner manners mannered mannerly mannerless mannerful",
+        "many",
+        "matrix matrices matrixes",
+        "may might",
+        "maximum maxima maximums maximal maximize maximizes maximized maximizing",
+        "mean means meant meaning meanings meaningless meaningful",
+        "mean meaner meanest meanly meanness meannesses",
+        "median medians medianly medial",
+        "medium media mediums",
+        "meet meets met meeting meetings",
+        "memorandum memoranda memorandums",
+        "mere merely",
+        "metal metals metallic",
+        "might mighty mightily",
+        "millenium millennia milleniums millennial",
+        "mine mines mined mining minings miner miners",
+        "mine my our ours",
+        "minimum minima minimums minimal",
+        "minus minuses",
+        "miscellaneous miscellanea miscellaneously miscellaneousness miscellany",
+        "molest molests molested molesting molestings molester molesters",
+        "moment moments",
+        "monument monuments monumental",
+        "more most",
+        "mouse mice mouseless",
+        "much",
+        "multiply multiplies multiplier multipliers multiple multiples multiplying multiplyings multiplication multiplications",
+        "mum mums mummed mumming mummings mummer mummers",
+        "must musts",
+        "neither",
+        "nemeses nemesis",
+        "neurosis neuroses neurotic neurotics",
+        "nomen",
+        "none",
+        "nos no noes",
+        "not",
+        "nothing nothings nothingness",
+        "now",
+        "nowadays",
+        "nucleus nuclei nucleuses nuclear",
+        "number numbers numbered numbering numberings numberless",
+        "nutriment nutriments nutrient nutrients nutrition nutritions",
+        "oasis oases",
+        "octopus octopi octopuses",
+        "of",
+        "off",
+        "offer offers offered offering offerings offerer offerers offeror offerors",
+        "often",
+        "oftentimes",
+        "ointment ointments",
+        "omen omens",
+        "on",
+        "once",
+        "only",
+        "ornament ornaments ornamented ornamenting ornamentings ornamentation ornamenter ornamenters ornamental",
+        "outdoor outdoors",
+        "outlay outlays",
+        "outlie outlies outlay outlied outlain outlying outlier outliers",
+        "ovum ova",
+        "ox oxen",
+        "parentheses parenthesis",
+        "parliament parliaments parliamentary",
+        "passerby passer-by passersby passers-by",
+        "past pasts",
+        "pay pays paid paying payings payer payers payee payees payment payments",
+        "per",
+        "perhaps",
+        "person persons people",
+        "phenomenon phenomena phenomenal",
+        "pi",
+        "picnic picnics picnicker picnickers picnicked picnicking picnickings",
+        "pigment pigments pigmented pigmenting pigmentings pigmenter pigmenters pigmentation pigmentations",
+        "please pleases pleased pleasing pleasings pleaser pleasers pleasure pleasures pleasuring pleasurings pleasant pleasantly pleasureless pleasureful",
+        "plus pluses plusses",
+        "polyhedra polyhedron polyhedral",
+        "priest priests priestly priestlier priestliest priestliness priestless",
+        "prognosis prognoses",
+        "prostheses prosthesis",
+        "prove proves proved proving provings proofs proof prover provers provable",
+        "psychosis psychoses psychotic psychotics",
+        "qed",
+        "quiz quizzes quizzed quizzing quizzings quizzer quizzers",
+        "raiment",
+        "rather",
+        "re",
+        "real really",
+        "redo redoes redid redone redoing redoings redoer redoers",
+        "regiment regiments regimented regimenting regimenter regimenters regimentation regimental",
+        "rendezvous",
+        "requiz requizzes requizzed requizzing requizzings requizzer requizzers",
+        "ride rides rode ridden riding ridings rider riders rideless",
+        "ring rings rang rung ringing ringings ringer ringers ringless",
+        "rise rises rose risen rising risings riser risers",
+        "rose roses",
+        "rudiment rudiments rudimentary",
+        "rum rums rummed rumming rummings rummer rummers",
+        "run runs ran running runnings runner runners",
+        "sacrament sacraments sacramental",
+        "same sameness",
+        "sans",
+        "saw saws sawed sawn sawing sawings sawyer sawyers",
+        "say says said saying sayings sayer sayers",
+        "scarf scarfs scarves scarfless",
+        "schema schemata schemas",
+        "sediment sediments sedimentary sedimentation sedimentations",
+        "see sees saw seen seeing seeings seer seers",
+        "seek seeks sought seeking seekings seeker seekers",
+        "segment segments segmented segmenting segmentings segmenter segmenters segmentation segmentations",
+        "self selves selfless",
+        "sell sells sold selling sellings seller sellers",
+        "semen",
+        "send sends sent sending sendings sender senders",
+        "sentiment sentiments sentimental",
+        "series",
+        "set sets setting settings",
+        "several severally",
+        "sew sews sewed sewn sewing sewings sewer sewers",
+        "sewer sewers sewerless",
+        "shake shakes shook shaken shaking shakings shaker shakers",
+        "shall should",
+        "shaman shamans",
+        "shave shaves shaved shaven shaving shavings shaver shavers shaveless",
+        "she her hers herself",
+        "sheaf sheaves sheafless",
+        "sheep",
+        "shelf shelves shelved shelfing shelvings shelver shelvers shelfless",
+        "shine shines shined shone shining shinings shiner shiners shineless",
+        "shoe shoes shoed shod shoeing shoeings shoer shoers shoeless",
+        "shoot shoots shot shooting shootings shooter shooters",
+        "shot shots",
+        "show shows showed shown showing showings shower showers",
+        "shower showers showery showerless",
+        "shrink shrinks shrank shrunk shrinking shrinkings shrinker shrinkers shrinkable",
+        "sideways",
+        "simply simple simpler simplest",
+        "since",
+        "sing sings sang sung singing singings singer singers singable",
+        "sink sinks sank sunk sinking sinkings sinker sinkers sinkable",
+        "sit sits sat sitting sittings sitter sitters",
+        "ski skis skied skiing skiings skier skiers skiless skiable",
+        "sky skies",
+        "slay slays slew slain slaying slayings slayer slayers",
+        "sleep sleeps slept sleeping sleepings sleeper sleepers sleepless",
+        "so",
+        "some",
+        "something",
+        "sometime sometimes",
+        "soon",
+        "spa spas",
+        "speak speaks spoke spoken speaking speakings speaker speakers",
+        "species specie",
+        "spectrum spectra spectrums",
+        "speed speeds sped speeded speeding speedings speeder speeders",
+        "spend spends spent spending spendings spender spenders spendable",
+        "spin spins spun spinning spinnings spinner spinners",
+        "spoke spokes",
+        "spring springs sprang sprung springing springings springer springers springy springiness",
+        "staff staffs staves staffed staffing staffings staffer staffers",
+        "stand stands stood standing standings",
+        "stasis stases",
+        "steal steals stole stolen stealing stealings stealer stealers",
+        "stick sticks stuck sticking stickings sticker stickers",
+        "stigma stigmata stigmas stigmatize stigmatizes stigmatized stigmatizing",
+        "stimulus stimuli",
+        "sting stings stung stinging stingings stinger stingers",
+        "stink stinks stank stunk stinking stinkings stinker stinkers",
+        "stomach stomachs",
+        "stratum strata stratums",
+        "stride strides strode stridden striding stridings strider striders",
+        "string strings strung stringing stringings stringer stringers stringless",
+        "strive strives strove striven striving strivings striver strivers",
+        "strum strums strummed strumming strummings strummer strummers strummable",
+        "such",
+        "suffer suffers suffered suffering sufferings sufferer sufferers sufferable",
+        "suggest suggests suggested suggesting suggestings suggester suggesters suggestor suggestors suggestive suggestion suggestions suggestible suggestable",
+        "sum sums summed summing summings summer summers",
+        "summer summers summered summering summerings",
+        "supplement supplements supplemented supplementing supplementings supplementation supplementer supplementers supplementary supplemental",
+        "supply supplies supplied supplying supplyings supplier suppliers",
+        "swear swears swore sworn swearing swearings swearer swearers",
+        "sweep sweeps swept sweeping sweepings sweeper sweepers",
+        "swell swells swelled swollen swelling swellings",
+        "swim swims swam swum swimming swimmings swimmer swimmers swimable",
+        "swine",
+        "swing swings swung swinging swingings swinger swingers",
+        "syllabus syllabi syllabuses",
+        "symposium symposia symposiums",
+        "synapse synapses",
+        "synapsis synapses",
+        "synopsis synopses",
+        "synthesis syntheses",
+        "tableau tableaux tableaus",
+        "take takes took taken taking takings taker takers takable",
+        "teach teaches taught teaching teachings teacher teachers teachable",
+        "tear tears tore torn tearing tearings tearer tearers tearable",
+        "tegument teguments",
+        "tell tells told telling tellings teller tellers tellable",
+        "temperament temperaments temperamental temperamentally",
+        "tenement tenements",
+        "the",
+        "there theres",
+        "theses thesis",
+        "they them their theirs themselves",
+        "thief thieves thieving thievings",
+        "think thinks thought thinking thinker thinkers thinkable",
+        "this that these those",
+        "thought thoughts thougtful thoughtless",
+        "throw throws threw thrown throwing throwings thrower throwers throwable",
+        "tic tics",
+        "tie ties tied tying tyings tier tiers tieable tieless",
+        "tier tiers tiered tiering tierings tierer tierers",
+        "to",
+        "toe toes toed toeing toeings toer toers toeless",
+        "together togetherness",
+        "too",
+        "tooth teeth toothless",
+        "topaz topazes",
+        "torment torments tormented tormenting tormentings tormenter tormenters tormentable",
+        "toward towards",
+        "tread treads trod trodden treading treadings treader treaders",
+        "tread treads treadless retread retreads",
+        "true truly trueness",
+        "two twos",
+        "u",
+        "under",
+        "underlay underlays underlaid underlaying underlayings underlayer underlayers",
+        "underlie underlies underlay underlain underlying underlier underliers",
+        "undo undoes undid undone undoing undoings undoer undoers undoable",
+        "unrest unrestful",
+        "until",
+        "unto",
+        "up",
+        "upon",
+        "upstairs",
+        "use uses user users used using useful useless",
+        "various variously",
+        "vehement vehemently vehemence",
+        "versus",
+        "very",
+        "visit visits visited visiting visitings visitor visitors",
+        "vortex vortexes vortices",
+        "wake wakes woke waked woken waking wakings waker wakers wakeful wakefulness wakefulnesses wakeable",
+        "wear wears wore worn wearing wearings wearer wearers wearable",
+        "weather weathers weathered weathering weatherly",
+        "weave weaves wove woven weaving weavings weaver weavers weaveable",
+        "weep weeps wept weeping weepings weeper weepers",
+        "wharf wharfs wharves",
+        "where wheres",
+        "whereas whereases",
+        "whether whethers",
+        "while whiles whilst whiled whiling",
+        "whiz whizzes whizzed whizzing whizzings whizzer whizzers",
+        "who whom whos whose whoses",
+        "why whys",
+        "wife wives wifeless",
+        "will wills willed willing willings willful",
+        "will would",
+        "win wins won winning winnings winner winners winnable",
+        "wind winds wound winding windings winder winders windable",
+        "wind winds windy windless",
+        "with",
+        "within",
+        "without",
+        "wolf wolves",
+        "woman women womanless womanly",
+        "wound wounds wounded wounding woundings",
+        "write writes wrote written writing writings writer writers writeable",
+        "yeses yes",
+        "yet yets",
+        "you your yours yourself"
+        ]
+
+_exdict = {}
+for exlist in exceptions:
+    for ex in exlist.split(" "):
+        _exdict[ex] = exlist
+
+# Programmatic rules
+
+vowels = "aeiouy"
+cons = "bcdfghjklmnpqrstvwxyz"
+
+rules = (
+         # Words ending in S
+
+         # (e.g., happiness, business)
+         (r"[%s].*[%s](iness)" % (vowels, cons), "y,ies,ier,iers,iest,ied,ying,yings,ily,inesses,iment,iments,iless,iful"),
+         # (e.g., baseless, shoeless)
+         (r"[%s].*(eless)" % vowels, "e,es,er,ers,est,ed,ing,ings,eing,eings,ely,eness,enesses,ement,ements,eness,enesses,eful"),
+         # (e.g., gutless, hatless, spotless)
+         (r"[%s][%s][bdgklmnprt]?(less)" % (cons, vowels), ",s,&er,&ers,&est,&ed,&ing,&ings,ly,ness,nesses,ment,ments,ful"),
+         # (e.g., thoughtless, worthless)
+         (r"[%s].*?(less)" % vowels, ",s,er,ers,est,ed,ing,ings,ly,ness,nesses,ment,ments,ful"),
+         # (e.g., baseness, toeness)
+         (r"[%s].*(eness)" % vowels, "e,es,er,ers,est,ed,ing,ings,eing,eings,ely,enesses,ement,ements,eless,eful"),
+         # (e.g., bluntness, grayness)
+         (r"[%s].*(ness)" % vowels, ",s,er,ers,est,ed,ing,ings,ly,nesses,ment,ments,less,ful"),
+         # (e.g., albatross, kiss)
+         (r"[%s]ss" % vowels, "es,er,ers,est,ed,ing,ings,ly,ness,nesses,ment,ments,less,ful"),
+         # (e.g., joyous, fractious, gaseous)
+         (r"[%s].*(ous)" % vowels, "ly,ness"),
+         # (e.g., tries, unties, jollies, beauties)
+         (r"(ies)", "y,ie,yer,yers,ier,iers,iest,ied,ying,yings,yness,iness,ieness,ynesses,inesses,ienesses,iment,iement,iments,iements,yless,iless,ieless,yful,iful,ieful"),
+         # (e.g., crisis, kinesis)
+         (r"[%s].*(sis)" % vowels, "ses,sises,sisness,sisment,sisments,sisless,sisful"),
+         # (e.g., bronchitis, bursitis)
+         (r"[%s].*(is)" % vowels, "es,ness,ment,ments,less,ful"),
+         (r"[%s].*[cs]h(es)" % vowels, ",e,er,ers,est,ed,ing,ings,ly,ely,ness,eness,nesses,enesses,ment,ement,ments,ements,less,eless,ful,eful"),
+         # (e.g., tokenizes) // adds British variations
+         (r"[%s].*[%s](izes)" % (vowels, cons), "ize,izes,izer,izers,ized,izing,izings,ization,izations,ise,iser,isers,ised,ising,isings,isation,isations"),
+         # (e.g., tokenises) // British variant  // ~expertise
+         (r"[%s].*[%s](ises)" % (vowels, cons), "ize,izes,izer,izers,ized,izing,izings,ization,izations,ise,iser,isers,ised,ising,isings,isation,isations"),
+         # (e.g., aches, arches)
+         (r"[%s].*[jsxz](es)" % vowels, ",e,er,ers,est,ed,ing,ings,ly,ely,ness,eness,nesses,enesses,ment,ement,ments,ements,less,eless,ful,eful"),
+         # (e.g., judges, abridges)
+         (r"[%s].*dg(es)" % vowels, "e,er,ers,est,ed,ing,ings,ely,eness,enesses,ment,ments,ement,ements,eless,eful"),
+         # (e.g., trees, races, likes, agrees) covers all other -es words
+         (r"e(s)", ",*"),
+         # (e.g., segments, bisegments, cosegments)
+         (r"segment(s)", ",*"),
+         # (e.g., pigments, depigments, repigments)
+         (r"pigment(s)", ",*"),
+         # (e.g., judgments, abridgments)
+         (r"[%s].*dg(ments)" % vowels, "ment,*ments"),
+         # (e.g., merriments, embodiments) -iment in turn will generate y and *y (redo y)
+         (r"[%s].*[%s]iment(s)" % (vowels, cons), ",*"),
+         # (e.g., atonements, entrapments)
+         (r"[%s].*ment(s)" % vowels, ",*"),
+         # (e.g., viewers, meters, traders, transfers)
+         (r"[%s].*er(s)" % vowels, ",*"),
+         # (e.g., unflags) polysyllables
+         (r"[%s].*[%s][%s][bdglmnprt](s)" % (vowels, cons, vowels), ",*"),
+         # (e.g., frogs) monosyllables
+         (r"[%s][%s][bdglmnprt](s)" % (vowels, cons), ",*"),
+         # (e.g., killings, muggings)
+         (r"[%s].*ing(s)" % vowels, ",*"),
+         # (e.g., hulls, tolls)
+         (r"[%s].*ll(s)" % vowels, ",*"),
+         # e.g., boas, polkas, spas) don't generate latin endings
+         (r"a(s)", ",er,ers,est,ed,ing,ings,ly,ness,nesses,ment,ments,less,ful"),
+         # (e.g., beads, toads)
+         (r"[%s].*[%s].*(s)" % (vowels, cons), ",*"),
+         # (e.g., boas, zoos)
+         (r"[%s].*[%s](s)" % (cons, vowels), ",er,ers,est,ed,ing,ings,ly,ness,nesses,ment,ments,less,ful"),
+         # (e.g., ss, sss, ssss) no vowel (vowel case is already handled above)
+         (r"ss()", ""),
+         # (e.g., cds, lcds, m-16s) no vowel (can be a plural noun, but not verb)
+         (r"[%s].*[%s1234567890](s)" % (cons, cons), ""),
+
+         # Words ending in E
+
+         # (e.g., apple, so it doesn't include apply)
+         (r"appl(e)", "es,er,ers,est,ed,ing,ings,ely,eness,enesses,ement,ements,eless,eful"),
+         # (e.g., supple, so it doesn't include supply)
+         (r"suppl(e)", "es,er,ers,est,ed,ing,ings,ely,eness,enesses,ement,ements,eless,eful"),
+         # (e.g., able, abominable, fungible, table, enable, idle, subtle)
+         (r"[%s].*[%s]l(e)" % (vowels, cons), "es,er,ers,est,ed,ing,ings,y,ely,eness,enesses,ement,ements,eless,eful"),
+         # (e.g., bookie, magpie, vie)
+         (r"(ie)", "ies,ier,iers,iest,ied,ying,yings,iely,ieness,ienesses,iement,iements,ieless,ieful"),
+         # (e.g., dye, redye, redeye)
+         (r"ye()", "s,r,rs,st,d,ing,ings,ly,ness,nesses,ment,ments,less,ful"),
+         # (e.g., judge, abridge)
+         (r"[%s].*dg(e)" % vowels, "es,er,ers,est,ed,ing,ings,ely,eness,enesses,ment,ments,less,ful,ement,ements,eless,eful"),
+         # (e.g., true, due, imbue)
+         (r"u(e)", "es,er,ers,est,ed,ing,ings,eing,eings,ly,ely,eness,enesses,ment,ments,less,ful,ement,ements,eless,eful"),
+         # (e.g., tokenize) // adds British variations
+         (r"[%s].*[%s](ize)" % (vowels, cons), "izes,izer,izers,ized,izing,izings,ization,izations,ise,ises,iser,isers,ised,ising,isings,isation,isations"),
+         # (e.g., tokenise) // British variant  // ~expertise
+         (r"[%s].*[%s](ise)" % (vowels, cons), "ize,izes,izer,izers,ized,izing,izings,ization,izations,ises,iser,isers,ised,ising,isings,isation,isations"),
+         # (e.g., tree, agree, rage, horse, hoarse)
+         (r"[%s].*[%s](e)" % (vowels, cons), "es,er,ers,est,ed,ing,ings,eing,eings,ely,eness,enesses,ement,ements,eless,eful"),
+
+         # Words ending in -ED
+
+         # (e.g., agreed, freed, decreed, treed)
+         (r"ree(d)", "ds,der,ders,ded,ding,dings,dly,dness,dnesses,dment,dments,dless,dful,,*"),
+         # (e.g., feed, seed, Xweed)
+         (r"ee(d)", "ds,der,ders,ded,ding,dings,dly,dness,dnesses,dment,dments,dless,dful"),
+         # (e.g., tried)
+         (r"[%s](ied)" % cons, "y,ie,ies,ier,iers,iest,ying,yings,ily,yly,iness,yness,inesses,ynesses,iment,iments,iless,iful,yment,yments,yless,yful"),
+         # (e.g., controlled, fulfilled, rebelled)
+         (r"[%s].*[%s].*l(led)" % (vowels, cons), ",s,er,ers,est,ing,ings,ly,ness,nesses,ment,ments,less,ful,&,&s,&er,&ers,&est,&ing,&ings,&y,&ness,&nesses,&ment,&ments,&ful"),
+         # (e.g., pulled, filled, fulled)
+         (r"[%s].*l(led)" % vowels, "&,&s,&er,&ers,&est,&ing,&ings,&y,&ness,&nesses,&ment,&ments,&ful"),
+         # (e.g., hissed, grossed)
+         (r"[%s].*s(sed)" % vowels, "&,&es,&er,&ers,&est,&ing,&ings,&ly,&ness,&nesses,&ment,&ments,&less,&ful"),
+         # (e.g., hugged, trekked)
+         (r"[%s][%s](?P<ed1>[bdgklmnprt])((?P=ed1)ed)", ",s,&er,&ers,&est,&ing,&ings,ly,ness,nesses,ment,ments,less,ful"),
+         # (e.g., tokenize) // adds British variations
+         (r"[%s].*[%s](ized)" % (vowels, cons), "izes,izer,izers,ize,izing,izings,ization,izations,ise,ises,iser,isers,ised,ising,isings,isation,isations"),
+         # (e.g., tokenise) // British variant  // ~expertise
+         (r"[%s].*[%s](ized)" % (vowels, cons), "ize,izes,izer,izers,ized,izing,izings,ization,izations,ises,iser,isers,ise,ising,isings,isation,isations"),
+         # (e.g., spoiled, tooled, tracked, roasted, atoned, abridged)
+         (r"[%s].*(ed)" % vowels, ",e,s,es,er,ers,est,ing,ings,ly,ely,ness,eness,nesses,enesses,ment,ement,ments,ements,less,eless,ful,eful"),
+         # (e.g., bed, sled) words with a single e as the only vowel
+         (r"ed()", "s,&er,&ers,&est,&ed,&ing,&ings,ly,ness,nesses,ment,ments,less,ful"),
+
+         # Words ending in -ER
+
+         # (e.g., altimeter, ammeter, odometer, perimeter)
+         (r"meter()", "s,er,ers,ed,ing,ings,ly,ness,nesses,ment,ments,less,ful"),
+         # (e.g., agreer, beer, budgeteer, engineer, freer)
+         (r"eer()", "eers,eered,eering,eerings,eerly,eerness,eernesses,eerment,eerments,eerless,eerful,ee,ees,eest,eed,eeing,eeings,eely,eeness,eenesses,eement,eements,eeless,eeful,eerer,eerers,eerest"),
+         # (e.g., acidifier, saltier)
+         (r"[%s].*[%s](ier)" % (vowels, cons), "y,ie,ies,iest,ied,ying,yings,ily,yly,iness,yness,inesses,ynesses,yment,yments,yless,yful,iment,iments,iless,iful,iers,iered,iering,ierings,ierly,ierness,iernesses,ierment,ierments,ierless,ierful,ierer,ierers,ierest"),
+         # (e.g., puller, filler, fuller)
+         (r"[%s].*l(ler)" % vowels, "&,&s,&est,&ed,&ing,&ings,ly,lely,&ness,&nesses,&ment,&ments,&ful,&ers,&ered,&ering,&erings,&erly,&erness,&ernesses,&erments,&erless,&erful"),
+         # (e.g., hisser, grosser)
+         (r"[%s].*s(ser)" % vowels, "&,&es,&est,&ed,&ing,&ings,&ly,&ness,&nesses,&ment,&ments,&less,&ful,&ers,&ered,&ering,&erings,&erly,&erness,&ernesses,&erment,&erments,&erless,&erful"),
+         # (e.g., bigger, trekker, hitter)
+         (r"[%s][%s](?P<er1>[bdgkmnprt])((?P=er1)er)" % (cons, vowels), "s,&est,&ed,&ing,&ings,ly,ness,nesses,ment,ments,less,ful,&ers,&ered,&ering,&erings,&erly,&erness,&ernesses,&erments,&erless,&erful"),
+         # (e.g., tokenize) // adds British variations
+         (r"[%s].*[%s](izer)" % (vowels, cons), "izes,ize,izers,ized,izing,izings,ization,izations,ise,ises,iser,isers,ised,ising,isings,isation,isations"),
+         # (e.g., tokenise) // British variant  // ~expertise
+         (r"[%s].*[%s](iser)" % (vowels, cons), "ize,izes,izer,izers,ized,izing,izings,ization,izations,ises,ise,isers,ised,ising,isings,isation,isations"),
+         #(e.g., actioner, atoner, icer, trader, accruer, churchgoer, prefer)
+         (r"[%s].*(er)" % vowels, ",e,s,es,est,ed,ing,ings,ly,ely,ness,eness,nesses,enesses,ment,ments,less,ful,ement,ements,eless,eful,ers,ered,erred,ering,erring,erings,errings,erly,erness,ernesses,erment,erments,erless,erful,erer,erers,erest,errer,errers,errest"),
+
+         # Words ending in -EST
+
+         # (e.g., sliest, happiest, wittiest)
+         (r"[%s](iest)" % cons, "y,ies,ier,iers,ied,ying,yings,ily,yly,iness,yness,inesses,ynesses,iment,iments,iless,iful"),
+         # (e.g., fullest)
+         (r"[%s].*l(lest)" % vowels, "&,&s,&er,&ers,&ed,&ing,&ings,ly,&ness,&nesses,&ment,&ments,&ful"),
+         # (e.g.,  grossest)
+         (r"[%s].*s(sest)" % vowels, "&,&es,&er,&ers,&ed,&ing,&ings,&ly,&ness,&nesses,&ment,&ments,&less,&ful"),
+         # (e.g., biggest)
+         (r"[%s][%s](?P<est1>[bdglmnprst])((?P=est1)est)" % (cons, vowels), ",s,&er,&ers,&ed,&ing,&ings,ly,ness,nesses,ment,ments,less,ful"),
+         # (e.g., basest, archest, rashest)
+         (r"[%s].*([cs]h|[jsxz])(est)" % vowels, "e,es,er,ers,ed,ing,ings,ly,ely,ness,eness,nesses,enesses,ment,ments,less,ful,ement,ements,eless,eful,ests,ester,esters,ested,esting,estings,estly,estness,estnesses,estment,estments,estless,estful"),
+         # (e.g., severest, Xinterest, merest)
+         (r"er(est)", "e,es,er,ers,ed,eing,eings,ely,eness,enesses,ement,ements,eless,eful,ests,ester,esters,ested,esting,estings,estly,estness,estnesses,estment,estments,estless,estful"),
+         # (e.g., slickest, coolest, ablest, amplest, protest, quest)
+         (r"[%s].*(est)" % vowels, ",e,s,es,er,ers,ed,ing,ings,ly,ely,ness,eness,nesses,enesses,ment,ments,less,ful,ement,ements,eless,eful,ests,ester,esters,ested,esting,estings,estly,estness,estnesses,estment,estments,estless,estful"),
+         # (e.g., rest, test)
+         (r"est", "s,er,ers,ed,ing,ings,ly,ness,nesses,ment,ments,less,ful"),
+
+         # Words ending in -FUL
+
+         # (e.g., beautiful, plentiful)
+         (r"[%s].*[%s](iful)" % (vowels, cons), "ifully,ifulness,*y"),
+         # (e.g., hopeful, sorrowful)
+         (r"[%s].*(ful)" % vowels, "fully,fulness,,*"),
+
+         # Words ending in -ICAL
+
+         (r"[%s].*(ical)" % vowels, "ic,ics,ically"),
+
+         # Words ending in -IC
+
+         (r"[%s].*(ic)" % vowels, "ics,ical,ically"),
+
+         # Words ending in -ING
+
+         # (e.g., dying, crying, supplying)
+         (r"[%s](ying)" % cons, "yings,ie,y,ies,ier,iers,iest,ied,iely,yly,ieness,yness,ienesses,ynesses,iment,iments,iless,iful"),
+         # (e.g., pulling, filling, fulling)
+         (r"[%s].*l(ling)" % vowels, ",*,&,&s,&er,&ers,&est,&ed,&ings,&ness,&nesses,&ment,&ments,&ful"),
+         # (e.g., hissing, grossing, processing)
+         (r"[%s].*s(sing)" % vowels, "&,&s,&er,&ers,&est,&ed,&ings,&ly,&ness,&nesses,&ment,&ments,&less,&ful"),
+         # (e.g., hugging, trekking)
+         (r"[%s][%s](?P<ing1>[bdgklmnprt])((?P=ing1)ing)" % (cons, vowels), ",s,&er,&ers,&est,&ed,&ings,ly,ness,nesses,ment,ments,less,ful"),
+         # (e.g., freeing, agreeing)
+         (r"eeing()", "ee,ees,eer,eers,eest,eed,eeings,eely,eeness,eenesses,eement,eements,eeless,eeful"),
+         # (e.g., ageing, aweing)
+         (r"[%s].*(eing)" % vowels, "e,es,er,ers,est,ed,eings,ely,eness,enesses,ement,ements,eless,eful"),
+         # (e.g., toying, playing)
+         (r"[%s].*y(ing)" % vowels, ",s,er,ers,est,ed,ings,ly,ingly,ness,nesses,ment,ments,less,ful"),
+         # (e.g., editing, crediting, expediting, siting, exciting)
+         (r"[%s].*[%s][eio]t(ing)" % (vowels, cons), ",*,*e,ings,inger,ingers,ingest,inged,inging,ingings,ingly,ingness,ingnesses,ingment,ingments,ingless,ingful"),
+         # (e.g., robing, siding, doling, translating, flaking)
+         (r"[%s][%s][bdgklmt](ing)" % (cons, vowels), "*e,ings,inger,ingers,ingest,inged,ingly,ingness,ingnesses,ingment,ingments,ingless,ingful"),
+         # (e.g., tokenize) // adds British variations
+         (r"[%s].*[%s](izing)" % (vowels, cons), "izes,izer,izers,ized,ize,izings,ization,izations,ise,ises,iser,isers,ised,ising,isings,isation,isations"),
+         # (e.g., tokenise) // British variant  // ~expertise
+         (r"[%s].*[%s](ising)" % (vowels, cons), "ize,izes,izer,izers,ized,izing,izings,ization,izations,ises,iser,isers,ised,ise,isings,isation,isations"),
+         # (e.g., icing, aging, achieving, amazing, housing)
+         (r"[%s][cgsvz](ing)" % vowels, "*e,ings,inger,ingers,ingest,inged,inging,ingings,ingly,ingness,ingnesses,ingment,ingments,ingless,ingful"),
+         # (e.g., dancing, troubling, arguing, bluing, carving)
+         (r"[%s][clsuv](ing)" % cons, "*e,ings,inger,ingers,ingest,inged,inging,ingings,ingly,ingness,ingnesses,ingment,ingments,ingless,ingful"),
+         # (e.g., charging, bulging)
+         (r"[%s].*[lr]g(ing)" % vowels, "*e,ings,inger,ingers,ingest,inged,inging,ingings,ingly,ingness,ingnesses,ingment,ingments,ingless,ingful"),
+         # (e.g., farming, harping, interesting, bedspring, redwing)
+         (r"[%s].*[%s][bdfjkmnpqrtwxz](ing)" % (vowels, cons), ",*,ings,inger,ingers,ingest,inged,inging,ingings,ingly,ingness,ingnesses,ingment,ingments,ingless,ingful"),
+         # (e.g., spoiling, reviling, autoing, egging, hanging, hingeing)
+         (r"[%s].*(ing)" % vowels, ",*,*e,ings,inger,ingers,ingest,inged,inging,ingings,ingly,ingness,ingnesses,ingment,ingments,ingless,ingful"),
+         # (e.g., wing, thing) monosyllables
+         (r"(ing)", "ings,inger,ingers,ingest,inged,inging,ingings,ingly,ingness,ingnesses,ingment,ingments,ingless,ingful"),
+
+         # -LEAF rules omitted
+
+         # Words ending in -MAN
+         # (e.g., policewomen, hatchetmen, dolmen)
+         (r"(man)", "man,mens,mener,meners,menest,mened,mening,menings,menly,menness,mennesses,menless,menful"),
+
+         # Words ending in -MENT
+
+         # (e.g., segment, bisegment, cosegment, pigment, depigment, repigment)
+         (r"segment|pigment", "s,ed,ing,ings,er,ers,ly,ness,nesses,less,ful"),
+         # (e.g., judgment, abridgment)
+         (r"[%s].*dg(ment)" % vowels, "*e"),
+         # (e.g., merriment, embodiment)
+         (r"[%s].*[%s](iment)" % (vowels, cons), "*y"),
+         # (e.g., atonement, entrapment)
+         (r"[%s].*[%s](ment)" % (vowels, cons), ",*"),
+
+         # Words ending in -O
+
+         # (e.g., taboo, rodeo)
+         (r"[%s]o()" % vowels, "s,er,ers,est,ed,ing,ings,ly,ness,nesses,ment,ments,less,ful"),
+         # (e.g., tomato, bonito)
+         (r"[%s].*o()" % vowels, "s,es,er,ers,est,ed,ing,ings,ly,ness,nesses,ment,ments,less,ful"),
+
+         # Words ending in -UM
+
+         # (e.g., datum, quantum, tedium, strum, [oil]drum, vacuum)
+         (r"[%s].*(um)" % vowels, "a,ums,umer,ummer,umers,ummers,umed,ummed,uming,umming,umings,ummings,umness,umments,umless,umful"),
+
+         # Words ending in -Y
+
+         # (e.g., ably, horribly, wobbly)
+         (r"[%s].*b(ly)" % vowels, "le,les,ler,lers,lest,led,ling,lings,leness,lenesses,lement,lements,leless,leful"),
+         # (e.g., happily, dizzily)
+         (r"[%s].*[%s](ily)" % (vowels, cons), "y,ies,ier,iers,iest,ied,ying,yings,yness,iness,ynesses,inesses,iment,iments,iless,iful"),
+         # (e.g., peaceful+ly)
+         (r"[%s].*ful(ly)" % vowels, ",*"),
+         # (e.g., fully, folly, coolly, fatally, dally)
+         (r"[%s].*l(ly)" % vowels, ",*,lies,lier,liers,liest,lied,lying,lyings,liness,linesses,liment,liments,liless,liful,*l"),
+         # (e.g., monopoly, Xcephaly, holy)
+         (r"[%s](ly)" % vowels, "lies,lier,liers,liest,lied,lying,lyings,liness,linesses,liment,liments,liless,liful"),
+         # (e.g., frequently, comely, deeply, apply, badly)
+         (r"[%s].*(ly)" % vowels, ",*,lies,lier,liers,liest,lied,lying,lyings,liness,linesses,lyless,lyful"),
+         # (e.g., happy, ply, spy, cry)
+         (r"[%s](y)" % cons, "ies,ier,iers,iest,ied,ying,yings,ily,yness,iness,ynesses,inesses,iment,iments,iless,iful,yment,yments,yless,yful"),
+         # (e.g., betray, gay, stay)
+         (r"[%s]y()" % vowels, "s,er,ers,est,ed,ing,ings,ly,ness,nesses,ment,ments,less,ful"),
+
+         # Root rules
+
+         # (e.g., fix, arch, rash)
+         (r"[%s].*(ch|sh|[jxz])()" % vowels, "es,er,ers,est,ed,ing,ings,ly,ness,nesses,ment,ments,less,ful"),
+         # (e.g., unflag, open, besot)
+         (r"[%s].*[%s][%s][bdglmnprt]()" % (vowels, cons, vowels), "s,er,ers,est,ed,ing,ings,&er,&ers,&est,&ed,&ing,&ings,ly,ness,nesses,ment,ments,less,ful"),
+         # (e.g., bed, cop)
+         (r"[%s][%s][bdglmnprt]()" % (cons, vowels), "s,&er,&ers,&est,&ed,&ing,&ings,ly,ness,nesses,ment,ments,less,ful"),
+         # (e.g., schemata, automata)
+         (r"[%s].*[%s][%s]ma(ta)" % (vowels, cons, vowels), ",s,tas,tum,tums,ton,tons,tic,tical"),
+         # (e.g., chordata, data, errata, sonata, toccata)
+         (r"[%s].*t(a)" % vowels, "as,ae,um,ums,on,ons,ic,ical"),
+         # (e.g., polka, spa, schema, ova, polyhedra)
+         (r"[%s].*[%s](a)" % (vowels, cons), "as,aed,aing,ae,ata,um,ums,on,ons,al,atic,atical"),
+         # (e.g., full)
+         (r"[%s].*ll()" % vowels, "s,er,ers,est,ed,ing,ings,y,ness,nesses,ment,ments,-less,ful"),
+         # (e.g., spoon, rhythm)
+         (r"[%s].*()", "s,er,ers,est,ed,ing,ings,ly,ness,nesses,ment,ments,less,ful"),
+         )
+
+# There are a limited number of named groups available in a single
+# regular expression, so we'll partition the list of rules into
+# smaller chunks.
+
+_partition_size = 20
+_partitions = []
+for p in xrange(0, len(rules) // _partition_size + 1):
+    start = p * _partition_size
+    end = (p + 1) * _partition_size
+    pattern = "|".join("(?P<_g%s>%s)$" % (i, r[0])
+                       for i, r in enumerate(rules[start:end]))
+    _partitions.append(re.compile(pattern))
+
+
+def variations(word):
+    """Given an English word, returns a collection of morphological variations
+    on the word by algorithmically adding and removing suffixes. The variation
+    list may contain non-words (e.g. render -> renderment).
+
+    >>> variations("pull")
+    set(['pull', 'pullings', 'pullnesses', 'pullful', 'pullment', 'puller', ... ])
+    """
+
+    if word in _exdict:
+        return _exdict[word].split(" ")
+
+    for i, p in enumerate(_partitions):
+        match = p.search(word)
+        if match:
+            # Get the named group that matched
+            num = int([k for k, v in match.groupdict().iteritems()
+                       if v is not None and k.startswith("_g")][0][2:])
+            # Get the positional groups for the matched group (all other
+            # positional groups are None)
+            groups = [g for g in match.groups() if g is not None]
+            ending = groups[-1]
+            root = word[:0 - len(ending)] if ending else word
+
+            out = set((word, ))
+            results = rules[i * _partition_size + num][1]
+            for result in results.split(","):
+                if result.startswith("&"):
+                    out.add(root + root[-1] + result[1:])
+                elif result.startswith("*"):
+                    out.union(variations(root + result[1:]))
+                else:
+                    out.add(root + result)
+            return set(out)
+
+    return [word]
+
+
+if __name__ == '__main__':
+    import time
+    t = time.clock()
+    s = variations("rendering")
+    print time.clock() - t
+    print len(s)
+
diff --git a/lib/whoosh/whoosh/lang/paicehusk.py b/lib/whoosh/whoosh/lang/paicehusk.py
new file mode 100644
index 0000000..52bedd0
--- /dev/null
+++ b/lib/whoosh/whoosh/lang/paicehusk.py
@@ -0,0 +1,249 @@
+"""This module contains an object that implements the Paice-Husk stemming
+algorithm.
+
+If you just want to use the standard Paice-Husk stemming rules, use the
+module's ``stem()`` function::
+
+    stemmed_word = stem(word)
+
+If you want to use a custom rule set, read the rules into a string where the
+rules are separated by newlines, and instantiate the object with the string,
+then use the object's stem method to stem words::
+
+    stemmer = PaiceHuskStemmer(my_rules_string)
+    stemmed_word = stemmer.stem(word)
+"""
+
+import re
+from collections import defaultdict
+
+
+class PaiceHuskStemmer(object):
+    """Implements the Paice-Husk stemming algorithm.
+    """
+
+    rule_expr = re.compile(r"""
+    ^(?P<ending>\w+)
+    (?P<intact>[*]?)
+    (?P<num>\d+)
+    (?P<append>\w*)
+    (?P<cont>[.>])
+    """, re.UNICODE | re.VERBOSE)
+
+    stem_expr = re.compile("^\w+", re.UNICODE)
+
+    def __init__(self, ruletable):
+        """
+        :param ruletable: a string containing the rule data, separated
+            by newlines.
+        """
+        self.rules = defaultdict(list)
+        self.read_rules(ruletable)
+
+    def read_rules(self, ruletable):
+        rule_expr = self.rule_expr
+        rules = self.rules
+
+        for line in ruletable.split("\n"):
+            line = line.strip()
+            if not line:
+                continue
+
+            match = rule_expr.match(line)
+            if match:
+                ending = match.group("ending")[::-1]
+                lastchar = ending[-1]
+                intact = match.group("intact") == "*"
+                num = int(match.group("num"))
+                append = match.group("append")
+                cont = match.group("cont") == ">"
+
+                rules[lastchar].append((ending, intact, num, append, cont))
+            else:
+                raise Exception("Bad rule: %r" % line)
+
+    def first_vowel(self, word):
+        vp = min([p for p in [word.find(v) for v in "aeiou"]
+                  if p > -1])
+        yp = word.find("y")
+        if yp > 0 and yp < vp:
+            return yp
+        return vp
+
+    def strip_prefix(self, word):
+        for prefix in ("kilo", "micro", "milli", "intra", "ultra", "mega",
+                       "nano", "pico", "pseudo"):
+            if word.startswith(prefix):
+                return word[len(prefix):]
+        return word
+
+    def stem(self, word):
+        """Returns a stemmed version of the argument string.
+        """
+
+        rules = self.rules
+        match = self.stem_expr.match(word)
+        if not match:
+            return word
+        stem = self.strip_prefix(match.group(0))
+
+        is_intact = True
+        continuing = True
+        while continuing:
+            pfv = self.first_vowel(stem)
+            rulelist = rules.get(stem[-1])
+            if not rulelist:
+                break
+
+            continuing = False
+            for ending, intact, num, append, cont in rulelist:
+                if stem.endswith(ending):
+                    if intact and not is_intact:
+                        continue
+                    newlen = len(stem) - num + len(append)
+
+                    if ((pfv == 0 and newlen < 2)
+                        or (pfv > 0 and newlen < 3)):
+                        # If word starts with vowel, minimum stem length is 2.
+                        # If word starts with consonant, minimum stem length is
+                        # 3.
+                            continue
+
+                    is_intact = False
+                    stem = stem[:0 - num] + append
+
+                    continuing = cont
+                    break
+
+        return stem
+
+# The default rules for the Paice-Husk stemming algorithm
+
+defaultrules = """
+ai*2.     { -ia > -   if intact }
+a*1.      { -a > -    if intact }
+bb1.      { -bb > -b   }
+city3s.   { -ytic > -ys }
+ci2>      { -ic > -    }
+cn1t>     { -nc > -nt  }
+dd1.      { -dd > -d   }
+dei3y>    { -ied > -y  }
+deec2ss.  { -ceed > -cess }
+dee1.     { -eed > -ee }
+de2>      { -ed > -    }
+dooh4>    { -hood > -  }
+e1>       { -e > -     }
+feil1v.   { -lief > -liev }
+fi2>      { -if > -    }
+gni3>     { -ing > -   }
+gai3y.    { -iag > -y  }
+ga2>      { -ag > -    }
+gg1.      { -gg > -g   }
+ht*2.     { -th > -   if intact }
+hsiug5ct. { -guish > -ct }
+hsi3>     { -ish > -   }
+i*1.      { -i > -    if intact }
+i1y>      { -i > -y    }
+ji1d.     { -ij > -id   --  see nois4j> & vis3j> }
+juf1s.    { -fuj > -fus }
+ju1d.     { -uj > -ud  }
+jo1d.     { -oj > -od  }
+jeh1r.    { -hej > -her }
+jrev1t.   { -verj > -vert }
+jsim2t.   { -misj > -mit }
+jn1d.     { -nj > -nd  }
+j1s.      { -j > -s    }
+lbaifi6.  { -ifiabl > - }
+lbai4y.   { -iabl > -y }
+lba3>     { -abl > -   }
+lbi3.     { -ibl > -   }
+lib2l>    { -bil > -bl }
+lc1.      { -cl > c    }
+lufi4y.   { -iful > -y }
+luf3>     { -ful > -   }
+lu2.      { -ul > -    }
+lai3>     { -ial > -   }
+lau3>     { -ual > -   }
+la2>      { -al > -    }
+ll1.      { -ll > -l   }
+mui3.     { -ium > -   }
+mu*2.     { -um > -   if intact }
+msi3>     { -ism > -   }
+mm1.      { -mm > -m   }
+nois4j>   { -sion > -j }
+noix4ct.  { -xion > -ct }
+noi3>     { -ion > -   }
+nai3>     { -ian > -   }
+na2>      { -an > -    }
+nee0.     { protect  -een }
+ne2>      { -en > -    }
+nn1.      { -nn > -n   }
+pihs4>    { -ship > -  }
+pp1.      { -pp > -p   }
+re2>      { -er > -    }
+rae0.     { protect  -ear }
+ra2.      { -ar > -    }
+ro2>      { -or > -    }
+ru2>      { -ur > -    }
+rr1.      { -rr > -r   }
+rt1>      { -tr > -t   }
+rei3y>    { -ier > -y  }
+sei3y>    { -ies > -y  }
+sis2.     { -sis > -s  }
+si2>      { -is > -    }
+ssen4>    { -ness > -  }
+ss0.      { protect  -ss }
+suo3>     { -ous > -   }
+su*2.     { -us > -   if intact }
+s*1>      { -s > -    if intact }
+s0.       { -s > -s    }
+tacilp4y. { -plicat > -ply }
+ta2>      { -at > -    }
+tnem4>    { -ment > -  }
+tne3>     { -ent > -   }
+tna3>     { -ant > -   }
+tpir2b.   { -ript > -rib }
+tpro2b.   { -orpt > -orb }
+tcud1.    { -duct > -duc }
+tpmus2.   { -sumpt > -sum }
+tpec2iv.  { -cept > -ceiv }
+tulo2v.   { -olut > -olv }
+tsis0.    { protect  -sist }
+tsi3>     { -ist > -   }
+tt1.      { -tt > -t   }
+uqi3.     { -iqu > -   }
+ugo1.     { -ogu > -og }
+vis3j>    { -siv > -j  }
+vie0.     { protect  -eiv }
+vi2>      { -iv > -    }
+ylb1>     { -bly > -bl }
+yli3y>    { -ily > -y  }
+ylp0.     { protect  -ply }
+yl2>      { -ly > -    }
+ygo1.     { -ogy > -og }
+yhp1.     { -phy > -ph }
+ymo1.     { -omy > -om }
+ypo1.     { -opy > -op }
+yti3>     { -ity > -   }
+yte3>     { -ety > -   }
+ytl2.     { -lty > -l  }
+yrtsi5.   { -istry > - }
+yra3>     { -ary > -   }
+yro3>     { -ory > -   }
+yfi3.     { -ify > -   }
+ycn2t>    { -ncy > -nt }
+yca3>     { -acy > -   }
+zi2>      { -iz > -    }
+zy1s.     { -yz > -ys  }
+"""
+
+# Make the standard rules available as a module-level function
+
+stem = PaiceHuskStemmer(defaultrules).stem
+
+
+
+
+
+
+
diff --git a/lib/whoosh/whoosh/lang/phonetic.py b/lib/whoosh/whoosh/lang/phonetic.py
new file mode 100644
index 0000000..f912bfb
--- /dev/null
+++ b/lib/whoosh/whoosh/lang/phonetic.py
@@ -0,0 +1,121 @@
+#encoding: utf8
+
+"""
+This module contains quasi-phonetic encoders for words in different languages.
+"""
+
+import re
+
+# This soundex implementation is adapted from the recipe here:
+# http://code.activestate.com/recipes/52213/
+
+english_codes = '01230120022455012623010202'
+
+
+def soundex_en(word):
+    # digits holds the soundex values for the alphabet
+    r = ""
+    if word:
+        # Remember first character
+        fc = None
+        prevcode = None
+        for char in word.lower():
+            c = ord(char)
+            if c >= 97 and c <= 122:  # a-z
+                if not fc:
+                    fc = char
+                code = english_codes[c - 97]
+                # Don't append the code if it's the same as the previous
+                if code != prevcode:
+                    r += code
+                prevcode = code
+
+        # Replace first digit with first alpha character
+        r = fc + r[1:]
+
+    return r
+
+
+# Quasi-phonetic coder for Spanish, translated to Python from Sebastian
+# Ferreyra's version here:
+# http://www.javalobby.org/java/forums/t16936.html
+
+_esp_codes = (("\\Aw?[uh]?([aeiou])", ""),
+              ("c[eiéí]|z|ll|sh|ch|sch|cc|y[aeiouáéíóú]|ps|bs|x|j|g[eiéí]", "s"),
+              ("[aeiouhwáéíóúü]+", ""),
+              ("y", ""),
+              ("ñ|gn", "n"),
+              ("[dpc]t", "t"),
+              ("c[aouáóú]|ck|q", "k"),
+              ("v", "b"),
+              ("d$", "t"),  # Change a trailing d to a t
+              )
+_esp_codes = tuple((re.compile(pat), repl) for pat, repl in _esp_codes)
+
+
+def soundex_esp(word):
+    word = word.lower()
+    r = ""
+
+    prevcode = None
+    i = 0
+    while i < len(word):
+        code = None
+        for expr, ecode in _esp_codes:
+            match = expr.match(word, i)
+            if match:
+                i = match.end()
+                code = ecode
+                break
+
+        if code is None:
+            code = word[i]
+            i += 1
+
+        if code != prevcode:
+            r += code
+        prevcode = code
+
+    return r
+
+
+# This version of soundex for Arabic is translated to Python from Tammam
+# Koujan's C# version here:
+# http://www.codeproject.com/KB/recipes/ArabicSoundex.aspx
+
+# Create a dictionary mapping arabic characters to digits
+_arabic_codes = {}
+for chars, code in {'\u0627\u0623\u0625\u0622\u062d\u062e\u0647\u0639\u063a\u0634\u0648\u064a': "0",
+                    '\u0641\u0628': "1",
+                    '\u062c\u0632\u0633\u0635\u0638\u0642\u0643': "2",
+                    '\u062a\u062b\u062f\u0630\u0636\u0637': "3",
+                    '\u0644': "4",
+                    '\u0645\u0646': "5",
+                    '\u0631': "6",
+                    }.iteritems():
+    for char in chars:
+        _arabic_codes[char] = code
+
+
+def soundex_ar(word):
+    if word[0] in "\u0627\u0623\u0625\u0622":
+        word = word[1:]
+
+    r = "0"
+    prevcode = "0"
+    if len(word) > 1:
+        # Discard the first character
+        for char in word[1:]:
+            if char in _arabic_codes:
+                code = _arabic_codes.get(char, "0")
+            # Don't append the code if it's the same as the previous
+            if code != prevcode:
+                # If the code is a 0 (vowel), don't process it
+                if code != "0":
+                    r += code
+            prevcode = code
+    return r
+
+if __name__ == "__main__":
+    print soundex_esp("solidad")
+
diff --git a/lib/whoosh/whoosh/lang/porter.py b/lib/whoosh/whoosh/lang/porter.py
new file mode 100644
index 0000000..4046825
--- /dev/null
+++ b/lib/whoosh/whoosh/lang/porter.py
@@ -0,0 +1,190 @@
+"""
+Reimplementation of the
+`Porter stemming algorithm <http://tartarus.org/~martin/PorterStemmer/>`_
+in Python.
+
+In my quick tests, this implementation about 3.5 times faster than the
+seriously weird Python linked from the official page.
+"""
+
+import re
+
+# Suffix replacement lists
+
+_step2list = {
+              "ational": "ate",
+              "tional": "tion",
+              "enci": "ence",
+              "anci": "ance",
+              "izer": "ize",
+              "bli": "ble",
+              "alli": "al",
+              "entli": "ent",
+              "eli": "e",
+              "ousli": "ous",
+              "ization": "ize",
+              "ation": "ate",
+              "ator": "ate",
+              "alism": "al",
+              "iveness": "ive",
+              "fulness": "ful",
+              "ousness": "ous",
+              "aliti": "al",
+              "iviti": "ive",
+              "biliti": "ble",
+              "logi": "log",
+              }
+
+_step3list = {
+              "icate": "ic",
+              "ative": "",
+              "alize": "al",
+              "iciti": "ic",
+              "ical": "ic",
+              "ful": "",
+              "ness": "",
+              }
+
+
+_cons = "[^aeiou]"
+_vowel = "[aeiouy]"
+_cons_seq = "[^aeiouy]+"
+_vowel_seq = "[aeiou]+"
+
+# m > 0
+_mgr0 = re.compile("^(" + _cons_seq + ")?" + _vowel_seq + _cons_seq)
+# m == 0
+_meq1 = re.compile("^(" + _cons_seq + ")?" + _vowel_seq + _cons_seq + "(" + _vowel_seq + ")?$")
+# m > 1
+_mgr1 = re.compile("^(" + _cons_seq + ")?" + _vowel_seq + _cons_seq + _vowel_seq + _cons_seq)
+# vowel in stem
+_s_v = re.compile("^(" + _cons_seq + ")?" + _vowel)
+# ???
+_c_v = re.compile("^" + _cons_seq + _vowel + "[^aeiouwxy]$")
+
+# Patterns used in the rules
+
+_ed_ing = re.compile("^(.*)(ed|ing)$")
+_at_bl_iz = re.compile("(at|bl|iz)$")
+_step1b = re.compile("([^aeiouylsz])\\1$")
+_step2 = re.compile("^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$")
+_step3 = re.compile("^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$")
+_step4_1 = re.compile("^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$")
+_step4_2 = re.compile("^(.+?)(s|t)(ion)$")
+_step5 = re.compile("^(.+?)e$")
+
+
+# Stemming function
+
+def stem(w):
+    """Uses the Porter stemming algorithm to remove suffixes from English
+    words.
+
+    >>> stem("fundamentally")
+    "fundament"
+    """
+
+    if len(w) < 3:
+        return w
+
+    first_is_y = w[0] == "y"
+    if first_is_y:
+        w = "Y" + w[1:]
+
+    # Step 1a
+    if w.endswith("s"):
+        if w.endswith("sses"):
+            w = w[:-2]
+        elif w.endswith("ies"):
+            w = w[:-2]
+        elif w[-2] != "s":
+            w = w[:-1]
+
+    # Step 1b
+
+    if w.endswith("eed"):
+        s = w[:-3]
+        if _mgr0.match(s):
+            w = w[:-1]
+    else:
+        m = _ed_ing.match(w)
+        if m:
+            stem = m.group(1)
+            if _s_v.match(stem):
+                w = stem
+                if _at_bl_iz.match(w):
+                    w += "e"
+                elif _step1b.match(w):
+                    w = w[:-1]
+                elif _c_v.match(w):
+                    w += "e"
+
+    # Step 1c
+
+    if w.endswith("y"):
+        stem = w[:-1]
+        if _s_v.match(stem):
+            w = stem + "i"
+
+    # Step 2
+
+    m = _step2.match(w)
+    if m:
+        stem = m.group(1)
+        suffix = m.group(2)
+        if _mgr0.match(stem):
+            w = stem + _step2list[suffix]
+
+    # Step 3
+
+    m = _step3.match(w)
+    if m:
+        stem = m.group(1)
+        suffix = m.group(2)
+        if _mgr0.match(stem):
+            w = stem + _step3list[suffix]
+
+    # Step 4
+
+    m = _step4_1.match(w)
+    if m:
+        stem = m.group(1)
+        if _mgr1.match(stem):
+            w = stem
+    else:
+        m = _step4_2.match(w)
+        if m:
+            stem = m.group(1) + m.group(2)
+            if _mgr1.match(stem):
+                w = stem
+
+    # Step 5
+
+    m = _step5.match(w)
+    if m:
+        stem = m.group(1)
+        if _mgr1.match(stem) or (_meq1.match(stem) and not _c_v.match(stem)):
+            w = stem
+
+    if w.endswith("ll") and _mgr1.match(w):
+        w = w[:-1]
+
+    if first_is_y:
+        w = "y" + w[1:]
+
+    return w
+
+if __name__ == '__main__':
+    print stem("fundamentally")
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/lib/whoosh/whoosh/lang/porter2.py b/lib/whoosh/whoosh/lang/porter2.py
new file mode 100644
index 0000000..8a40ebe
--- /dev/null
+++ b/lib/whoosh/whoosh/lang/porter2.py
@@ -0,0 +1,316 @@
+"""An implementation of the Porter2 stemming algorithm.
+See http://snowball.tartarus.org/algorithms/english/stemmer.html
+
+Adapted from pyporter2 by Michael Dirolf.
+
+This algorithm is more correct but (at least in this implementation)
+several times slower than the original porter algorithm as implemented
+in stemming.porter.
+"""
+
+import re
+
+r_exp = re.compile(r"[^aeiouy]*[aeiouy]+[^aeiouy](\w*)")
+ewss_exp1 = re.compile(r"^[aeiouy][^aeiouy]$")
+ewss_exp2 = re.compile(r".*[^aeiouy][aeiouy][^aeiouywxY]$")
+ccy_exp = re.compile(r"([aeiouy])y")
+s1a_exp = re.compile(r"[aeiouy].")
+s1b_exp = re.compile(r"[aeiouy]")
+
+
+def get_r1(word):
+    # exceptional forms
+    if word.startswith('gener') or word.startswith('arsen'):
+        return 5
+    if word.startswith('commun'):
+        return 6
+
+    # normal form
+    match = r_exp.match(word)
+    if match:
+        return match.start(1)
+    return len(word)
+
+
+def get_r2(word):
+    match = r_exp.match(word, get_r1(word))
+    if match:
+        return match.start(1)
+    return len(word)
+
+
+def ends_with_short_syllable(word):
+    if len(word) == 2:
+        if ewss_exp1.match(word):
+            return True
+    if ewss_exp2.match(word):
+        return True
+    return False
+
+
+def is_short_word(word):
+    if ends_with_short_syllable(word):
+        if get_r1(word) == len(word):
+            return True
+    return False
+
+
+def remove_initial_apostrophe(word):
+    if word.startswith("'"):
+        return word[1:]
+    return word
+
+
+def capitalize_consonant_ys(word):
+    if word.startswith('y'):
+        word = 'Y' + word[1:]
+    return ccy_exp.sub('\g<1>Y', word)
+
+
+def step_0(word):
+    if word.endswith("'s'"):
+        return word[:-3]
+    if word.endswith("'s"):
+        return word[:-2]
+    if word.endswith("'"):
+        return word[:-1]
+    return word
+
+
+def step_1a(word):
+    if word.endswith('sses'):
+        return word[:-4] + 'ss'
+    if word.endswith('ied') or word.endswith('ies'):
+        if len(word) > 4:
+            return word[:-3] + 'i'
+        else:
+            return word[:-3] + 'ie'
+    if word.endswith('us') or word.endswith('ss'):
+        return word
+    if word.endswith('s'):
+        preceding = word[:-1]
+        if s1a_exp.search(preceding):
+            return preceding
+        return word
+    return word
+
+
+doubles = ('bb', 'dd', 'ff', 'gg', 'mm', 'nn', 'pp', 'rr', 'tt')
+
+
+def ends_with_double(word):
+    for double in doubles:
+        if word.endswith(double):
+            return True
+    return False
+
+
+def step_1b_helper(word):
+    if word.endswith('at') or word.endswith('bl') or word.endswith('iz'):
+        return word + 'e'
+    if ends_with_double(word):
+        return word[:-1]
+    if is_short_word(word):
+        return word + 'e'
+    return word
+
+
+s1b_suffixes = ('ed', 'edly', 'ing', 'ingly')
+
+
+def step_1b(word, r1):
+    if word.endswith('eedly'):
+        if len(word) - 5 >= r1:
+            return word[:-3]
+        return word
+    if word.endswith('eed'):
+        if len(word) - 3 >= r1:
+            return word[:-1]
+        return word
+
+    for suffix in s1b_suffixes:
+        if word.endswith(suffix):
+            preceding = word[:-len(suffix)]
+            if s1b_exp.search(preceding):
+                return step_1b_helper(preceding)
+            return word
+
+    return word
+
+
+def step_1c(word):
+    if word.endswith('y') or word.endswith('Y') and len(word) > 1:
+        if word[-2] not in 'aeiouy':
+            if len(word) > 2:
+                return word[:-1] + 'i'
+    return word
+
+
+def step_2_helper(word, r1, end, repl, prev):
+        if word.endswith(end):
+            if len(word) - len(end) >= r1:
+                if prev == []:
+                    return word[:-len(end)] + repl
+                for p in prev:
+                    if word[:-len(end)].endswith(p):
+                        return word[:-len(end)] + repl
+            return word
+        return None
+
+
+s2_triples = (('ization', 'ize', []),
+               ('ational', 'ate', []),
+               ('fulness', 'ful', []),
+               ('ousness', 'ous', []),
+               ('iveness', 'ive', []),
+               ('tional', 'tion', []),
+               ('biliti', 'ble', []),
+               ('lessli', 'less', []),
+               ('entli', 'ent', []),
+               ('ation', 'ate', []),
+               ('alism', 'al', []),
+               ('aliti', 'al', []),
+               ('ousli', 'ous', []),
+               ('iviti', 'ive', []),
+               ('fulli', 'ful', []),
+               ('enci', 'ence', []),
+               ('anci', 'ance', []),
+               ('abli', 'able', []),
+               ('izer', 'ize', []),
+               ('ator', 'ate', []),
+               ('alli', 'al', []),
+               ('bli', 'ble', []),
+               ('ogi', 'og', ['l']),
+               ('li', '', ['c', 'd', 'e', 'g', 'h', 'k', 'm', 'n', 'r', 't']))
+
+
+def step_2(word, r1):
+    for trip in s2_triples:
+        attempt = step_2_helper(word, r1, trip[0], trip[1], trip[2])
+        if attempt:
+            return attempt
+    return word
+
+
+def step_3_helper(word, r1, r2, end, repl, r2_necessary):
+    if word.endswith(end):
+        if len(word) - len(end) >= r1:
+            if not r2_necessary:
+                return word[:-len(end)] + repl
+            else:
+                if len(word) - len(end) >= r2:
+                    return word[:-len(end)] + repl
+        return word
+    return None
+
+
+s3_triples = (('ational', 'ate', False),
+               ('tional', 'tion', False),
+               ('alize', 'al', False),
+               ('icate', 'ic', False),
+               ('iciti', 'ic', False),
+               ('ative', '', True),
+               ('ical', 'ic', False),
+               ('ness', '', False),
+               ('ful', '', False))
+
+
+def step_3(word, r1, r2):
+    for trip in s3_triples:
+        attempt = step_3_helper(word, r1, r2, trip[0], trip[1], trip[2])
+        if attempt:
+            return attempt
+    return word
+
+
+s4_delete_list = ('al', 'ance', 'ence', 'er', 'ic', 'able', 'ible', 'ant', 'ement',
+                  'ment', 'ent', 'ism', 'ate', 'iti', 'ous', 'ive', 'ize')
+
+
+def step_4(word, r2):
+    for end in s4_delete_list:
+        if word.endswith(end):
+            if len(word) - len(end) >= r2:
+                return word[:-len(end)]
+            return word
+
+    if word.endswith('sion') or word.endswith('tion'):
+        if len(word) - 3 >= r2:
+            return word[:-3]
+
+    return word
+
+
+def step_5(word, r1, r2):
+    if word.endswith('l'):
+        if len(word) - 1 >= r2 and word[-2] == 'l':
+            return word[:-1]
+        return word
+
+    if word.endswith('e'):
+        if len(word) - 1 >= r2:
+            return word[:-1]
+        if len(word) - 1 >= r1 and not ends_with_short_syllable(word[:-1]):
+            return word[:-1]
+
+    return word
+
+
+def normalize_ys(word):
+    return word.replace('Y', 'y')
+
+
+exceptional_forms = {'skis': 'ski',
+                    'skies': 'sky',
+                    'dying': 'die',
+                    'lying': 'lie',
+                    'tying': 'tie',
+                    'idly': 'idl',
+                    'gently': 'gentl',
+                    'ugly': 'ugli',
+                    'early': 'earli',
+                    'only': 'onli',
+                    'singly': 'singl',
+                    'sky': 'sky',
+                    'news': 'news',
+                    'howe': 'howe',
+                    'atlas': 'atlas',
+                    'cosmos': 'cosmos',
+                    'bias': 'bias',
+                    'andes': 'andes'}
+
+exceptional_early_exit_post_1a = frozenset(['inning', 'outing', 'canning', 'herring',
+                                            'earring', 'proceed', 'exceed', 'succeed'])
+
+
+def stem(word):
+    if len(word) <= 2:
+        return word
+    word = remove_initial_apostrophe(word)
+
+    # handle some exceptional forms
+    if word in exceptional_forms:
+        return exceptional_forms[word]
+
+    word = capitalize_consonant_ys(word)
+    r1 = get_r1(word)
+    r2 = get_r2(word)
+    word = step_0(word)
+    word = step_1a(word)
+
+    # handle some more exceptional forms
+    if word in exceptional_early_exit_post_1a:
+        return word
+
+    word = step_1b(word, r1)
+    word = step_1c(word)
+    word = step_2(word, r1)
+    word = step_3(word, r1, r2)
+    word = step_4(word, r2)
+    word = step_5(word, r1, r2)
+    word = normalize_ys(word)
+
+    return word
+
+
+
diff --git a/lib/whoosh/whoosh/lang/wordnet.py b/lib/whoosh/whoosh/lang/wordnet.py
new file mode 100644
index 0000000..51a201a
--- /dev/null
+++ b/lib/whoosh/whoosh/lang/wordnet.py
@@ -0,0 +1,267 @@
+# Copyright 2009 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+"""This module contains low-level functions and a high-level class for parsing
+the prolog file "wn_s.pl" from the WordNet prolog download
+into an object suitable for looking up synonyms and performing query expansion.
+
+http://wordnetcode.princeton.edu/3.0/WNprolog-3.0.tar.gz
+"""
+
+from collections import defaultdict
+
+from whoosh.fields import Schema, ID, STORED
+
+
+def parse_file(f):
+    """Parses the WordNet wn_s.pl prolog file and returns two dictionaries:
+    word2nums and num2words.
+    """
+
+    word2nums = defaultdict(list)
+    num2words = defaultdict(list)
+
+    for line in f:
+        if not line.startswith("s("):
+            continue
+
+        line = line[2:]
+        num = int(line[:line.find(",")])
+        qt = line.find("'")
+        line = line[qt + 1:]
+        qt = line.find("'")
+        word = line[:qt].lower()
+
+        if not word.isalpha():
+            continue
+
+        word2nums[word].append(num)
+        num2words[num].append(word)
+
+    return word2nums, num2words
+
+
+def make_index(storage, indexname, word2nums, num2words):
+    """Creates a Whoosh index in the given storage object containing
+    synonyms taken from word2nums and num2words. Returns the Index
+    object.
+    """
+
+    schema = Schema(word=ID, syns=STORED)
+    ix = storage.create_index(schema, indexname=indexname)
+    w = ix.writer()
+    for word in word2nums.iterkeys():
+        syns = synonyms(word2nums, num2words, word)
+        w.add_document(word=unicode(word), syns=syns)
+    w.commit()
+    return ix
+
+
+def synonyms(word2nums, num2words, word):
+    """Uses the word2nums and num2words dicts to look up synonyms
+    for the given word. Returns a list of synonym strings.
+    """
+
+    keys = word2nums[word]
+    syns = set()
+    for key in keys:
+        syns = syns.union(num2words[key])
+
+    if word in syns:
+        syns.remove(word)
+    return sorted(syns)
+
+
+class Thesaurus(object):
+    """Represents the WordNet synonym database, either loaded into memory
+    from the wn_s.pl Prolog file, or stored on disk in a Whoosh index.
+
+    This class allows you to parse the prolog file "wn_s.pl" from the WordNet prolog
+    download into an object suitable for looking up synonyms and performing query
+    expansion.
+
+    http://wordnetcode.princeton.edu/3.0/WNprolog-3.0.tar.gz
+
+    To load a Thesaurus object from the wn_s.pl file...
+
+    >>> t = Thesaurus.from_filename("wn_s.pl")
+
+    To save the in-memory Thesaurus to a Whoosh index...
+
+    >>> from whoosh.filedb.filestore import FileStorage
+    >>> fs = FileStorage("index")
+    >>> t.to_storage(fs)
+
+    To load a Thesaurus object from a Whoosh index...
+
+    >>> t = Thesaurus.from_storage(fs)
+
+    The Thesaurus object is thus usable in two ways:
+
+    * Parse the wn_s.pl file into memory (Thesaurus.from_*) and then look up
+      synonyms in memory. This has a startup cost for parsing the file, and uses
+      quite a bit of memory to store two large dictionaries, however synonym
+      look-ups are very fast.
+
+    * Parse the wn_s.pl file into memory (Thesaurus.from_filename) then save it to
+      an index (to_storage). From then on, open the thesaurus from the saved
+      index (Thesaurus.from_storage). This has a large cost for storing the index,
+      but after that it is faster to open the Thesaurus (than re-parsing the file)
+      but slightly slower to look up synonyms.
+
+    Here are timings for various tasks on my (fast) Windows machine, which might
+    give an idea of relative costs for in-memory vs. on-disk.
+
+    ================================================ ================
+    Task                                             Approx. time (s)
+    ================================================ ================
+    Parsing the wn_s.pl file                         1.045
+    Saving to an on-disk index                       13.084
+    Loading from an on-disk index                    0.082
+    Look up synonyms for "light" (in memory)         0.0011
+    Look up synonyms for "light" (loaded from disk)  0.0028
+    ================================================ ================
+
+    Basically, if you can afford spending the memory necessary to parse the
+    Thesaurus and then cache it, it's faster. Otherwise, use an on-disk index.
+    """
+
+    def __init__(self):
+        self.w2n = None
+        self.n2w = None
+        self.searcher = None
+
+    @classmethod
+    def from_file(cls, fileobj):
+        """Creates a Thesaurus object from the given file-like object, which should
+        contain the WordNet wn_s.pl file.
+
+        >>> f = open("wn_s.pl")
+        >>> t = Thesaurus.from_file(f)
+        >>> t.synonyms("hail")
+        ['acclaim', 'come', 'herald']
+        """
+
+        thes = cls()
+        thes.w2n, thes.n2w = parse_file(fileobj)
+        return thes
+
+    @classmethod
+    def from_filename(cls, filename):
+        """Creates a Thesaurus object from the given filename, which should
+        contain the WordNet wn_s.pl file.
+
+        >>> t = Thesaurus.from_filename("wn_s.pl")
+        >>> t.synonyms("hail")
+        ['acclaim', 'come', 'herald']
+        """
+
+        f = open(filename, "rb")
+        try:
+            return cls.from_file(f)
+        finally:
+            f.close()
+
+    @classmethod
+    def from_storage(cls, storage, indexname="THES"):
+        """Creates a Thesaurus object from the given storage object,
+        which should contain an index created by Thesaurus.to_storage().
+
+        >>> from whoosh.filedb.filestore import FileStorage
+        >>> fs = FileStorage("index")
+        >>> t = Thesaurus.from_storage(fs)
+        >>> t.synonyms("hail")
+        ['acclaim', 'come', 'herald']
+
+        :param storage: A :class:`whoosh.store.Storage` object from
+            which to load the index.
+        :param indexname: A name for the index. This allows you to
+            store multiple indexes in the same storage object.
+        """
+
+        thes = cls()
+        index = storage.open_index(indexname=indexname)
+        thes.searcher = index.searcher()
+        return thes
+
+    def to_storage(self, storage, indexname="THES"):
+        """Creates am index in the given storage object from the
+        synonyms loaded from a WordNet file.
+
+        >>> from whoosh.filedb.filestore import FileStorage
+        >>> fs = FileStorage("index")
+        >>> t = Thesaurus.from_filename("wn_s.pl")
+        >>> t.to_storage(fs)
+
+        :param storage: A :class:`whoosh.store.Storage` object in
+            which to save the index.
+        :param indexname: A name for the index. This allows you to
+            store multiple indexes in the same storage object.
+        """
+
+        if not self.w2n or not self.n2w:
+            raise Exception("No synonyms loaded")
+        make_index(storage, indexname, self.w2n, self.n2w)
+
+    def synonyms(self, word):
+        """Returns a list of synonyms for the given word.
+
+        >>> thesaurus.synonyms("hail")
+        ['acclaim', 'come', 'herald']
+        """
+
+        word = word.lower()
+        if self.searcher:
+            return self.searcher.document(word=word)["syns"]
+        else:
+            return synonyms(self.w2n, self.n2w, word)
+
+
+if __name__ == "__main__":
+    from time import clock
+    from whoosh.filedb.filestore import FileStorage
+    st = FileStorage("c:/testindex")
+
+#    t = clock()
+#    th = Thesaurus.from_filename("c:/wordnet/wn_s.pl")
+#    print clock() - t
+#
+#    t = clock()
+#    th.to_storage(st)
+#    print clock() - t
+#
+#    t = clock()
+#    print th.synonyms("light")
+#    print clock() - t
+
+    t = clock()
+    th = Thesaurus.from_storage(st)
+    print clock() - t
+
+    t = clock()
+    print th.synonyms("hail")
+    print clock() - t
diff --git a/lib/whoosh/whoosh/matching.py b/lib/whoosh/whoosh/matching.py
new file mode 100644
index 0000000..83fa3cd
--- /dev/null
+++ b/lib/whoosh/whoosh/matching.py
@@ -0,0 +1,1508 @@
+# Copyright 2010 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+from itertools import izip, repeat
+
+
+"""
+This module contains "matcher" classes. Matchers deal with posting lists. The
+most basic matcher, which reads the list of postings for a term, will be
+provided by the backend implementation (for example,
+``whoosh.filedb.filepostings.FilePostingReader``). The classes in this module
+provide additional functionality, such as combining the results of two
+matchers, or modifying the results of a matcher.
+
+You do not need to deal with the classes in this module unless you need to
+write your own Matcher implementation to provide some new functionality. These
+classes are not instantiated by the user. They are usually created by a
+:class:`~whoosh.query.Query` object's ``matcher()`` method, which returns the
+appropriate matcher to implement the query (for example, the ``Or`` query's
+``matcher()`` method returns a ``UnionMatcher`` object).
+
+Certain backends support "quality" optimizations. These backends have the
+ability to skip ahead if it knows the current block of postings can't
+contribute to the top N documents. If the matcher tree and backend support
+these optimizations, the matcher's ``supports_quality()`` method will return
+``True``.
+"""
+
+
+class ReadTooFar(Exception):
+    """Raised when next() or skip_to() is called on an inactive matchers.
+    """
+
+
+class NoQualityAvailable(Exception):
+    """Raised when quality methods are called on a matcher that does not
+    support quality-based optimizations.
+    """
+
+
+# Matchers
+
+class Matcher(object):
+    """Base class for all matchers.
+    """
+
+    def is_active(self):
+        """Returns True if this matcher is still "active", that is, it has not
+        yet reached the end of the posting list.
+        """
+
+        raise NotImplementedError
+
+    def replace(self):
+        """Returns a possibly-simplified version of this matcher. For example,
+        if one of the children of a UnionMatcher is no longer active, calling
+        this method on the UnionMatcher will return the other child.
+        """
+
+        return self
+
+    def copy(self):
+        """Returns a copy of this matcher.
+        """
+
+        raise NotImplementedError
+
+    def depth(self):
+        """Returns the depth of the tree under this matcher, or 0 if this
+        matcher does not have any children.
+        """
+
+        return 0
+
+    def supports_quality(self):
+        """Returns True if this matcher supports the use of ``quality`` and
+        ``block_quality``.
+        """
+
+        return False
+
+    def quality(self):
+        """Returns a quality measurement of the current posting, according to
+        the current weighting algorithm. Raises ``NoQualityAvailable`` if the
+        matcher or weighting do not support quality measurements.
+        """
+
+        raise NoQualityAvailable
+
+    def block_quality(self):
+        """Returns a quality measurement of the current block of postings,
+        according to the current weighting algorithm. Raises
+        ``NoQualityAvailable`` if the matcher or weighting do not support
+        quality measurements.
+        """
+
+        raise NoQualityAvailable(self.__class__)
+
+    def id(self):
+        """Returns the ID of the current posting.
+        """
+
+        raise NotImplementedError
+
+    def all_ids(self):
+        """Returns a generator of all IDs in the matcher.
+
+        What this method returns for a matcher that has already read some
+        postings (whether it only yields the remaining postings or all postings
+        from the beginning) is undefined, so it's best to only use this method
+        on fresh matchers.
+        """
+
+        i = 0
+        while self.is_active():
+            yield self.id()
+            self.next()
+            i += 1
+            if i == 10:
+                self = self.replace()
+                i = 0
+
+    def all_items(self):
+        """Returns a generator of all (ID, encoded value) pairs in the matcher.
+
+        What this method returns for a matcher that has already read some
+        postings (whether it only yields the remaining postings or all postings
+        from the beginning) is undefined, so it's best to only use this method
+        on fresh matchers.
+        """
+
+        i = 0
+        while self.is_active():
+            yield (self.id(), self.value())
+            self.next()
+            i += 1
+            if i == 10:
+                self = self.replace()
+                i = 0
+
+    def items_as(self, astype):
+        """Returns a generator of all (ID, decoded value) pairs in the matcher.
+
+        What this method returns for a matcher that has already read some
+        postings (whether it only yields the remaining postings or all postings
+        from the beginning) is undefined, so it's best to only use this method
+        on fresh matchers.
+        """
+
+        while self.is_active():
+            yield (self.id(), self.value_as(astype))
+
+    def value(self):
+        """Returns the encoded value of the current posting.
+        """
+
+        raise NotImplementedError
+
+    def supports(self, astype):
+        """Returns True if the field's format supports the named data type,
+        for example 'frequency' or 'characters'.
+        """
+
+        raise NotImplementedError("supports not implemented in %s" % self.__class__)
+
+    def value_as(self, astype):
+        """Returns the value(s) of the current posting as the given type.
+        """
+
+        raise NotImplementedError("value_as not implemented in %s" % self.__class__)
+
+    def spans(self):
+        """Returns a list of :class:`whoosh.spans.Span` objects for the matches
+        in this document. Raises an exception if the field being searched does
+        not store positions.
+        """
+
+        from whoosh.spans import Span
+        if self.supports("characters"):
+            return [Span(pos, startchar=startchar, endchar=endchar)
+                    for pos, startchar, endchar in self.value_as("characters")]
+        elif self.supports("positions"):
+            return [Span(pos) for pos in self.value_as("positions")]
+        else:
+            raise Exception("Field does not support spans")
+
+    def skip_to(self, id):
+        """Moves this matcher to the first posting with an ID equal to or
+        greater than the given ID.
+        """
+
+        while self.is_active() and self.id() < id:
+            self.next()
+
+    def skip_to_quality(self, minquality):
+        """Moves this matcher to the next block with greater than the given
+        minimum quality value.
+        """
+
+        raise NotImplementedError(self.__class__.__name__)
+
+    def next(self):
+        """Moves this matcher to the next posting.
+        """
+
+        raise NotImplementedError(self.__class__.__name__)
+
+    def weight(self):
+        """Returns the weight of the current posting.
+        """
+
+        return self.value_as("weight")
+
+    def score(self):
+        """Returns the score of the current posting.
+        """
+
+        raise NotImplementedError(self.__class__.__name__)
+
+
+class NullMatcher(Matcher):
+    """Matcher with no postings which is never active.
+    """
+
+    def is_active(self):
+        return False
+
+    def all_ids(self):
+        return []
+
+    def copy(self):
+        return self
+
+
+class ListMatcher(Matcher):
+    """Synthetic matcher backed by a list of IDs.
+    """
+
+    def __init__(self, ids, weights=None, values=None, format=None,
+                 scorer=None, position=0, all_weights=None,
+                 maxwol=0.0, minlength=0):
+        """
+        :param ids: a list of doc IDs.
+        :param weights: a list of weights corresponding to the list of IDs.
+            If this argument is not supplied, a list of 1.0 values is used.
+        :param values: a list of encoded values corresponding to the list of
+            IDs.
+        :param format: a :class:`whoosh.formats.Format` object representing the
+            format of the field.
+        :param scorer: a :class:`whoosh.scoring.BaseScorer` object for scoring
+            the postings.
+        """
+
+        self._ids = ids
+        self._weights = weights
+        self._all_weights = all_weights
+        self._values = values
+        self._i = position
+        self._format = format
+        self._scorer = scorer
+        self._maxwol = maxwol
+        self._minlength = minlength
+
+    def __repr__(self):
+        return "<%s>" % self.__class__.__name__
+
+    def is_active(self):
+        return self._i < len(self._ids)
+
+    def copy(self):
+        return self.__class__(self._ids, self._weights, self._values,
+                              self._format, self._scorer, self._i,
+                              self._all_weights, self._maxwol, self._minlength)
+
+    def supports_quality(self):
+        return self._scorer is not None and self._scorer.supports_quality()
+
+    def quality(self):
+        return self._scorer.quality(self)
+
+    def block_quality(self):
+        return self._scorer.block_quality(self)
+
+    def skip_to_quality(self, minquality):
+        self._i += 1
+        while self._i < len(self._ids) and self.quality() <= minquality:
+            self._i += 1
+        return 0
+
+    def id(self):
+        return self._ids[self._i]
+
+    def all_ids(self):
+        return iter(self._ids)
+
+    def all_items(self):
+        values = self._values
+        if values is None:
+            values = repeat('')
+
+        return izip(self._ids, values)
+
+    def value(self):
+        if self._values:
+            return self._values[self._i]
+        else:
+            return ''
+
+    def value_as(self, astype):
+        decoder = self._format.decoder(astype)
+        return decoder(self.value())
+
+    def supports(self, astype):
+        return self._format.supports(astype)
+
+    def next(self):
+        self._i += 1
+
+    def weight(self):
+        if self._all_weights:
+            return self._all_weights
+        elif self._weights:
+            return self._weights[self._i]
+        else:
+            return 1.0
+
+    def block_maxweight(self):
+        if self._all_weights:
+            return self._all_weights
+        elif self._weights:
+            return max(self._weights)
+        else:
+            return 1.0
+
+    def block_maxwol(self):
+        return self._maxwol
+
+    def block_maxid(self):
+        return max(self._ids)
+
+    def block_minlength(self):
+        return self._minlength
+
+    def score(self):
+        if self._scorer:
+            return self._scorer.score(self)
+        else:
+            return self.weight()
+
+
+class WrappingMatcher(Matcher):
+    """Base class for matchers that wrap sub-matchers.
+    """
+
+    def __init__(self, child, boost=1.0):
+        self.child = child
+        self.boost = boost
+
+    def __repr__(self):
+        return "%s(%r, boost=%s)" % (self.__class__.__name__, self.child, self.boost)
+
+    def copy(self):
+        kwargs = {}
+        if hasattr(self, "boost"):
+            kwargs["boost"] = self.boost
+        return self.__class__(self.child.copy(), **kwargs)
+
+    def depth(self):
+        return 1 + self.child.depth()
+
+    def _replacement(self, newchild):
+        return self.__class__(newchild, boost=self.boost)
+
+    def replace(self):
+        r = self.child.replace()
+        if not r.is_active():
+            return NullMatcher()
+        if r is not self.child:
+            try:
+                return self._replacement(r)
+            except TypeError, e:
+                raise TypeError("Class %s got exception %s trying "
+                                "to replace itself" % (self.__class__, e))
+        else:
+            return self
+
+    def id(self):
+        return self.child.id()
+
+    def all_ids(self):
+        return self.child.all_ids()
+
+    def is_active(self):
+        return self.child.is_active()
+
+    def supports(self, astype):
+        return self.child.supports(astype)
+
+    def value(self):
+        return self.child.value()
+
+    def value_as(self, astype):
+        return self.child.value_as(astype)
+
+    def spans(self):
+        return self.child.spans()
+
+    def skip_to(self, id):
+        return self.child.skip_to(id)
+
+    def next(self):
+        self.child.next()
+
+    def supports_quality(self):
+        return self.child.supports_quality()
+
+    def skip_to_quality(self, minquality):
+        return self.child.skip_to_quality(minquality / self.boost)
+
+    def quality(self):
+        return self.child.quality() * self.boost
+
+    def block_quality(self):
+        return self.child.block_quality() * self.boost
+
+    def weight(self):
+        return self.child.weight() * self.boost
+
+    def score(self):
+        return self.child.score() * self.boost
+
+
+class MultiMatcher(Matcher):
+    """Serializes the results of a list of sub-matchers.
+    """
+
+    def __init__(self, matchers, idoffsets, current=0):
+        """
+        :param matchers: a list of Matcher objects.
+        :param idoffsets: a list of offsets corresponding to items in the
+            ``matchers`` list.
+        """
+
+        self.matchers = matchers
+        self.offsets = idoffsets
+        self.current = current
+        self._next_matcher()
+
+    def __repr__(self):
+        return "%s(%r, %r, current=%s)" % (self.__class__.__name__,
+                                           self.matchers, self.offsets,
+                                           self.current)
+
+    def is_active(self):
+        return self.current < len(self.matchers)
+
+    def _next_matcher(self):
+        matchers = self.matchers
+        while self.current < len(matchers) and not matchers[self.current].is_active():
+            self.current += 1
+
+    def copy(self):
+        return self.__class__([mr.copy() for mr in self.matchers],
+                              self.offsets, current=self.current)
+
+    def depth(self):
+        if self.is_active():
+            return 1 + max(mr.depth() for mr in self.matchers[self.current:])
+        else:
+            return 0
+
+    def replace(self):
+        if not self.is_active():
+            return NullMatcher()
+        # TODO: Possible optimization: if the last matcher is current, replace
+        # this with the last matcher, but wrap it with a matcher that adds the
+        # offset. Have to check whether that's actually faster, though.
+        return self
+
+    def id(self):
+        current = self.current
+        return self.matchers[current].id() + self.offsets[current]
+
+    def all_ids(self):
+        offsets = self.offsets
+        for i, mr in enumerate(self.matchers):
+            for id in mr.all_ids():
+                yield id + offsets[i]
+
+    def spans(self):
+        return self.matchers[self.current].spans()
+
+    def supports(self, astype):
+        return self.matchers[self.current].supports(astype)
+
+    def value(self):
+        return self.matchers[self.current].value()
+
+    def value_as(self, astype):
+        return self.matchers[self.current].value_as(astype)
+
+    def next(self):
+        if not self.is_active():
+            raise ReadTooFar
+
+        self.matchers[self.current].next()
+        if not self.matchers[self.current].is_active():
+            self._next_matcher()
+
+    def skip_to(self, id):
+        if not self.is_active():
+            raise ReadTooFar
+        if id <= self.id():
+            return
+
+        matchers = self.matchers
+        offsets = self.offsets
+        r = False
+
+        while self.current < len(matchers) and id > self.id():
+            mr = matchers[self.current]
+            sr = mr.skip_to(id - offsets[self.current])
+            r = sr or r
+            if mr.is_active():
+                break
+
+            self._next_matcher()
+
+        return r
+
+    def supports_quality(self):
+        return all(mr.supports_quality() for mr in self.matchers[self.current:])
+
+    def quality(self):
+        return self.matchers[self.current].quality()
+
+    def block_quality(self):
+        return self.matchers[self.current].block_quality()
+
+    def weight(self):
+        return self.matchers[self.current].weight()
+
+    def score(self):
+        return self.matchers[self.current].score()
+
+
+def ExcludeMatcher(child, excluded, boost=1.0):
+    return FilterMatcher(child, excluded, exclude=True, boost=boost)
+
+
+class FilterMatcher(WrappingMatcher):
+    """Filters the postings from the wrapped based on whether the IDs are
+    present in or absent from a set.
+    """
+
+    def __init__(self, child, ids, exclude=False, boost=1.0):
+        """
+        :param child: the child matcher.
+        :param ids: a set of IDs to filter by.
+        :param exclude: by default, only IDs from the wrapped matcher that are
+            IN the set are used. If this argument is True, only IDs from the
+            wrapped matcher that are NOT IN the set are used.
+        """
+
+        super(FilterMatcher, self).__init__(child)
+        self._ids = ids
+        self._exclude = exclude
+        self.boost = boost
+        self._find_next()
+
+    def __repr__(self):
+        return "%s(%r, %r, %r, boost=%s)" % (self.__class__.__name__,
+                                             self.child, self._ids,
+                                             self._exclude, self.boost)
+
+    def copy(self):
+        return self.__class__(self.child.copy(), self._ids, self._exclude,
+                              boost=self.boost)
+
+    def _replacement(self, newchild):
+        return self.__class__(newchild, self._ids, exclude=self._exclude, boost=self.boost)
+
+    def _find_next(self):
+        child = self.child
+        ids = self._ids
+        r = False
+
+        if self._exclude:
+            while child.is_active() and child.id() in ids:
+                r = child.next() or r
+        else:
+            while child.is_active() and child.id() not in ids:
+                r = child.next() or r
+        return r
+
+    def next(self):
+        self.child.next()
+        self._find_next()
+
+    def skip_to(self, id):
+        self.child.skip_to(id)
+        self._find_next()
+
+    def all_ids(self):
+        ids = self._ids
+        if self._exclude:
+            return (id for id in self.child.all_ids() if id not in ids)
+        else:
+            return (id for id in self.child.all_ids() if id in ids)
+
+    def all_items(self):
+        ids = self._ids
+        if self._exclude:
+            return (item for item in self.child.all_items() if item[0] not in ids)
+        else:
+            return (item for item in self.child.all_items() if item[0] in ids)
+
+
+class BiMatcher(Matcher):
+    """Base class for matchers that combine the results of two sub-matchers in
+    some way.
+    """
+
+    def __init__(self, a, b):
+        super(BiMatcher, self).__init__()
+        self.a = a
+        self.b = b
+
+    def __repr__(self):
+        return "%s(%r, %r)" % (self.__class__.__name__, self.a, self.b)
+
+    def copy(self):
+        return self.__class__(self.a.copy(), self.b.copy())
+
+    def depth(self):
+        return 1 + max(self.a.depth(), self.b.depth())
+
+    def skip_to(self, id):
+        if not self.is_active():
+            raise ReadTooFar
+        ra = self.a.skip_to(id)
+        rb = self.b.skip_to(id)
+        return ra or rb
+
+    def supports_quality(self):
+        return self.a.supports_quality() and self.b.supports_quality()
+
+    def supports(self, astype):
+        return self.a.supports(astype) and self.b.supports(astype)
+
+
+class AdditiveBiMatcher(BiMatcher):
+    """Base class for binary matchers where the scores of the sub-matchers are
+    added together.
+    """
+
+    def quality(self):
+        q = 0.0
+        if self.a.is_active():
+            q += self.a.quality()
+        if self.b.is_active():
+            q += self.b.quality()
+        return q
+
+    def block_quality(self):
+        bq = 0.0
+        if self.a.is_active():
+            bq += self.a.block_quality()
+        if self.b.is_active():
+            bq += self.b.block_quality()
+        return bq
+
+    def weight(self):
+        return (self.a.weight() + self.b.weight())
+
+    def score(self):
+        return (self.a.score() + self.b.score())
+
+
+class UnionMatcher(AdditiveBiMatcher):
+    """Matches the union (OR) of the postings in the two sub-matchers.
+    """
+
+    def replace(self):
+        a = self.a.replace()
+        b = self.b.replace()
+
+        a_active = a.is_active()
+        b_active = b.is_active()
+        if not (a_active or b_active):
+            return NullMatcher()
+        if not a_active:
+            return b
+        if not b_active:
+            return a
+
+        if a is not self.a or b is not self.b:
+            return self.__class__(a, b)
+        return self
+
+    def is_active(self):
+        return self.a.is_active() or self.b.is_active()
+
+    def skip_to(self, id):
+        ra = rb = False
+
+        if self.a.is_active():
+            ra = self.a.skip_to(id)
+        if self.b.is_active():
+            rb = self.b.skip_to(id)
+
+        return ra or rb
+
+    def id(self):
+        a = self.a
+        b = self.b
+        if not a.is_active():
+            return b.id()
+        if not b.is_active():
+            return a.id()
+        return min(a.id(), b.id())
+
+    # Using sets is faster in most cases, but could potentially use a lot of
+    # memory. Comment out this method override to not use sets.
+    def all_ids(self):
+        return iter(sorted(set(self.a.all_ids()) | set(self.b.all_ids())))
+
+    def next(self):
+        a = self.a
+        b = self.b
+        a_active = a.is_active()
+        b_active = b.is_active()
+
+        # Shortcut when one matcher is inactive
+        if not (a_active or b_active):
+            raise ReadTooFar
+        elif not a_active:
+            return b.next()
+        elif not b_active:
+            return a.next()
+
+        a_id = a.id()
+        b_id = b.id()
+        ar = br = None
+
+        # After all that, here's the actual implementation
+        if a_id <= b_id:
+            ar = a.next()
+        if b_id <= a_id:
+            br = b.next()
+        return ar or br
+
+    def spans(self):
+        if not self.a.is_active():
+            return self.b.spans()
+        if not self.b.is_active():
+            return self.a.spans()
+
+        id_a = self.a.id()
+        id_b = self.b.id()
+        if id_a < id_b:
+            return self.a.spans()
+        elif id_b < id_a:
+            return self.b.spans()
+        else:
+            return sorted(set(self.a.spans()) | set(self.b.spans()))
+
+    def weight(self):
+        a = self.a
+        b = self.b
+
+        if not a.is_active():
+            return b.weight()
+        if not b.is_active():
+            return a.weight()
+
+        id_a = a.id()
+        id_b = b.id()
+        if id_a < id_b:
+            return a.weight()
+        elif id_b < id_a:
+            return b.weight()
+        else:
+            return (a.weight() + b.weight())
+
+    def score(self):
+        a = self.a
+        b = self.b
+
+        if not a.is_active():
+            return b.score()
+        if not b.is_active():
+            return a.score()
+
+        id_a = a.id()
+        id_b = b.id()
+        if id_a < id_b:
+            return a.score()
+        elif id_b < id_a:
+            return b.score()
+        else:
+            return (a.score() + b.score())
+
+    def skip_to_quality(self, minquality):
+        a = self.a
+        b = self.b
+        if not (a.is_active() or b.is_active()):
+            raise ReadTooFar
+
+        # Short circuit if one matcher is inactive
+        if not a.is_active():
+            return b.skip_to_quality(minquality)
+        elif not b.is_active():
+            return a.skip_to_quality(minquality)
+
+        skipped = 0
+        aq = a.block_quality()
+        bq = b.block_quality()
+        while a.is_active() and b.is_active() and aq + bq <= minquality:
+            if aq < bq:
+                skipped += a.skip_to_quality(minquality - bq)
+                aq = a.block_quality()
+            else:
+                skipped += b.skip_to_quality(minquality - aq)
+                bq = b.block_quality()
+
+        return skipped
+
+
+class DisjunctionMaxMatcher(UnionMatcher):
+    """Matches the union (OR) of two sub-matchers. Where both sub-matchers
+    match the same posting, returns the weight/score of the higher-scoring
+    posting.
+    """
+
+    # TODO: this class inherits from AdditiveBiMatcher (through UnionMatcher)
+    # but it does not add the scores of the sub-matchers together (it
+    # overrides all methods that perform addition). Need to clean up the
+    # inheritance.
+
+    def __init__(self, a, b, tiebreak=0.0):
+        super(DisjunctionMaxMatcher, self).__init__(a, b)
+        self.tiebreak = tiebreak
+
+    def copy(self):
+        return self.__class__(self.a.copy(), self.b.copy(),
+                              tiebreak=self.tiebreak)
+
+    def score(self):
+        if not self.a.is_active():
+            return self.b.score()
+        elif not self.b.is_active():
+            return self.a.score()
+        else:
+            return max(self.a.score(), self.b.score())
+
+    def quality(self):
+        return max(self.a.quality(), self.b.quality())
+
+    def block_quality(self):
+        return max(self.a.block_quality(), self.b.block_quality())
+
+    def skip_to_quality(self, minquality):
+        a = self.a
+        b = self.b
+        minquality = minquality
+
+        # Short circuit if one matcher is inactive
+        if not a.is_active():
+            sk = b.skip_to_quality(minquality)
+            return sk
+        elif not b.is_active():
+            return a.skip_to_quality(minquality)
+
+        skipped = 0
+        aq = a.block_quality()
+        bq = b.block_quality()
+        while a.is_active() and b.is_active() and max(aq, bq) <= minquality:
+            if aq <= minquality:
+                skipped += a.skip_to_quality(minquality)
+                aq = a.block_quality()
+            if bq <= minquality:
+                skipped += b.skip_to_quality(minquality)
+                bq = b.block_quality()
+        return skipped
+
+
+class IntersectionMatcher(AdditiveBiMatcher):
+    """Matches the intersection (AND) of the postings in the two sub-matchers.
+    """
+
+    def __init__(self, a, b):
+        super(IntersectionMatcher, self).__init__(a, b)
+        if (self.a.is_active()
+            and self.b.is_active()
+            and self.a.id() != self.b.id()):
+            self._find_next()
+
+    def replace(self):
+        a = self.a.replace()
+        b = self.b.replace()
+
+        a_active = a
+        b_active = b.is_active()
+        if not (a_active and b_active):
+            return NullMatcher()
+
+        if a is not self.a or b is not self.b:
+            return self.__class__(a, b)
+        return self
+
+    def is_active(self):
+        return self.a.is_active() and self.b.is_active()
+
+    def _find_next(self):
+        a = self.a
+        b = self.b
+        a_id = a.id()
+        b_id = b.id()
+        assert a_id != b_id
+        r = False
+
+        while a.is_active() and b.is_active() and a_id != b_id:
+            if a_id < b_id:
+                ra = a.skip_to(b_id)
+                if not a.is_active():
+                    return
+                r = r or ra
+                a_id = a.id()
+            else:
+                rb = b.skip_to(a_id)
+                if not b.is_active():
+                    return
+                r = r or rb
+                b_id = b.id()
+        return r
+
+    def id(self):
+        return self.a.id()
+
+    # Using sets is faster in some cases, but could potentially use a lot of
+    # memory
+    def all_ids(self):
+        return iter(sorted(set(self.a.all_ids()) & set(self.b.all_ids())))
+
+    def skip_to(self, id):
+        if not self.is_active():
+            raise ReadTooFar
+        ra = self.a.skip_to(id)
+        rb = self.b.skip_to(id)
+        if self.is_active():
+            rn = False
+            if self.a.id() != self.b.id():
+                rn = self._find_next()
+            return ra or rb or rn
+
+    def skip_to_quality(self, minquality):
+        a = self.a
+        b = self.b
+        minquality = minquality
+
+        skipped = 0
+        aq = a.block_quality()
+        bq = b.block_quality()
+        while a.is_active() and b.is_active() and aq + bq <= minquality:
+            if aq < bq:
+                skipped += a.skip_to_quality(minquality - bq)
+            else:
+                skipped += b.skip_to_quality(minquality - aq)
+            if not a.is_active() or not b.is_active():
+                break
+            if a.id() != b.id():
+                self._find_next()
+            aq = a.block_quality()
+            bq = b.block_quality()
+        return skipped
+
+    def next(self):
+        if not self.is_active():
+            raise ReadTooFar
+
+        # We must assume that the ids are equal whenever next() is called (they
+        # should have been made equal by _find_next), so advance them both
+        ar = self.a.next()
+        if self.is_active():
+            nr = self._find_next()
+            return ar or nr
+
+    def spans(self):
+        return sorted(set(self.a.spans()) | set(self.b.spans()))
+
+
+class AndNotMatcher(BiMatcher):
+    """Matches the postings in the first sub-matcher that are NOT present in
+    the second sub-matcher.
+    """
+
+    def __init__(self, a, b):
+        super(AndNotMatcher, self).__init__(a, b)
+        if (self.a.is_active()
+            and self.b.is_active()
+            and self.a.id() == self.b.id()):
+            self._find_next()
+
+    def is_active(self):
+        return self.a.is_active()
+
+    def _find_next(self):
+        pos = self.a
+        neg = self.b
+        if not neg.is_active():
+            return
+        pos_id = pos.id()
+        r = False
+
+        if neg.id() < pos_id:
+            neg.skip_to(pos_id)
+
+        while pos.is_active() and neg.is_active() and pos_id == neg.id():
+            nr = pos.next()
+            if not pos.is_active():
+                break
+
+            r = r or nr
+            pos_id = pos.id()
+            neg.skip_to(pos_id)
+
+        return r
+
+    def replace(self):
+        if not self.a.is_active():
+            return NullMatcher()
+        if not self.b.is_active():
+            return self.a.replace()
+        return self
+
+    def quality(self):
+        return self.a.quality()
+
+    def block_quality(self):
+        return self.a.block_quality()
+
+    def skip_to_quality(self, minquality):
+        skipped = self.a.skip_to_quality(minquality)
+        self._find_next()
+        return skipped
+
+    def id(self):
+        return self.a.id()
+
+    def all_ids(self):
+        return iter(sorted(set(self.a.all_ids()) - set(self.b.all_ids())))
+
+    def next(self):
+        if not self.a.is_active():
+            raise ReadTooFar
+        ar = self.a.next()
+        nr = False
+        if self.b.is_active():
+            nr = self._find_next()
+        return ar or nr
+
+    def skip_to(self, id):
+        if not self.a.is_active():
+            raise ReadTooFar
+        if id < self.a.id():
+            return
+
+        self.a.skip_to(id)
+        if self.b.is_active():
+            self.b.skip_to(id)
+            self._find_next()
+
+    def weight(self):
+        return self.a.weight()
+
+    def score(self):
+        return self.a.score()
+
+    def supports(self, astype):
+        return self.a.supports(astype)
+
+    def value(self):
+        return self.a.value()
+
+    def value_as(self, astype):
+        return self.a.value_as(astype)
+
+
+class InverseMatcher(WrappingMatcher):
+    """Synthetic matcher, generates postings that are NOT present in the
+    wrapped matcher.
+    """
+
+    def __init__(self, child, limit, missing=None, weight=1.0):
+        super(InverseMatcher, self).__init__(child)
+        self.limit = limit
+        self._weight = weight
+        self.missing = missing or (lambda id: False)
+        self._id = 0
+        self._find_next()
+
+    def copy(self):
+        return self.__class__(self.child.copy(), self.limit,
+                              weight=self._weight, missing=self.missing)
+
+    def _replacement(self, newchild):
+        return self.__class__(newchild, self.limit, missing=self.missing,
+                              weight=self.weight)
+
+    def is_active(self):
+        return self._id < self.limit
+
+    def supports_quality(self):
+        return False
+
+    def _find_next(self):
+        child = self.child
+        missing = self.missing
+
+        if not child.is_active() and not missing(self._id):
+            return
+
+        if child.is_active() and child.id() < self._id:
+            child.skip_to(self._id)
+
+        # While self._id is missing or is in the child matcher, increase it
+        while child.is_active() and self._id < self.limit:
+            if missing(self._id):
+                self._id += 1
+                continue
+
+            if self._id == child.id():
+                self._id += 1
+                child.next()
+                continue
+
+            break
+
+    def id(self):
+        return self._id
+
+    def all_ids(self):
+        missing = self.missing
+        negs = set(self.child.all_ids())
+        return (id for id in xrange(self.limit)
+                if id not in negs and not missing(id))
+
+    def next(self):
+        if self._id >= self.limit:
+            raise ReadTooFar
+        self._id += 1
+        self._find_next()
+
+    def skip_to(self, id):
+        if self._id >= self.limit:
+            raise ReadTooFar
+        if id < self._id:
+            return
+        self._id = id
+        self._find_next()
+
+    def weight(self):
+        return self._weight
+
+    def score(self):
+        return self._weight
+
+
+class RequireMatcher(WrappingMatcher):
+    """Matches postings that are in both sub-matchers, but only uses scores
+    from the first.
+    """
+
+    def __init__(self, a, b):
+        self.a = a
+        self.b = b
+        self.child = IntersectionMatcher(a, b)
+
+    def copy(self):
+        return self.__class__(self.a.copy(), self.b.copy())
+
+    def replace(self):
+        if not self.child.is_active():
+            return NullMatcher()
+        return self
+
+    def quality(self):
+        return self.a.quality()
+
+    def block_quality(self):
+        return self.a.block_quality()
+
+    def skip_to_quality(self, minquality):
+        skipped = self.a.skip_to_quality(minquality)
+        self.child._find_next()
+        return skipped
+
+    def weight(self):
+        return self.a.weight()
+
+    def score(self):
+        return self.a.score()
+
+    def supports(self, astype):
+        return self.a.supports(astype)
+
+    def value(self):
+        return self.a.value()
+
+    def value_as(self, astype):
+        return self.a.value_as(astype)
+
+
+class AndMaybeMatcher(AdditiveBiMatcher):
+    """Matches postings in the first sub-matcher, and if the same posting is
+    in the second sub-matcher, adds their scores.
+    """
+
+    def __init__(self, a, b):
+        self.a = a
+        self.b = b
+
+        if a.is_active() and b.is_active() and a.id() != b.id():
+            b.skip_to(a.id())
+
+    def is_active(self):
+        return self.a.is_active()
+
+    def id(self):
+        return self.a.id()
+
+    def next(self):
+        if not self.a.is_active():
+            raise ReadTooFar
+
+        ar = self.a.next()
+        br = False
+        if self.a.is_active() and self.b.is_active():
+            br = self.b.skip_to(self.a.id())
+        return ar or br
+
+    def skip_to(self, id):
+        if not self.a.is_active():
+            raise ReadTooFar
+
+        ra = self.a.skip_to(id)
+        rb = False
+        if self.a.is_active() and self.b.is_active():
+            rb = self.b.skip_to(id)
+        return ra or rb
+
+    def replace(self):
+        ar = self.a.replace()
+        br = self.b.replace()
+        if not ar.is_active():
+            return NullMatcher()
+        if not br.is_active():
+            return ar
+        if ar is not self.a or br is not self.b:
+            return self.__class__(ar, br)
+        return self
+
+    def skip_to_quality(self, minquality):
+        a = self.a
+        b = self.b
+        minquality = minquality
+
+        if not a.is_active():
+            raise ReadTooFar
+        if not b.is_active():
+            return a.skip_to_quality(minquality)
+
+        skipped = 0
+        aq = a.block_quality()
+        bq = b.block_quality()
+        while a.is_active() and b.is_active() and aq + bq <= minquality:
+            if aq < bq:
+                skipped += a.skip_to_quality(minquality - bq)
+                aq = a.block_quality()
+            else:
+                skipped += b.skip_to_quality(minquality - aq)
+                bq = b.block_quality()
+
+        return skipped
+
+    def quality(self):
+        q = 0.0
+        if self.a.is_active():
+            q += self.a.quality()
+            if self.b.is_active() and self.a.id() == self.b.id():
+                q += self.b.quality()
+        return q
+
+    def weight(self):
+        if self.a.id() == self.b.id():
+            return self.a.weight() + self.b.weight()
+        else:
+            return self.a.weight()
+
+    def score(self):
+        if self.b.is_active() and self.a.id() == self.b.id():
+            return self.a.score() + self.b.score()
+        else:
+            return self.a.score()
+
+    def supports(self, astype):
+        return self.a.supports(astype)
+
+    def value(self):
+        return self.a.value()
+
+    def value_as(self, astype):
+        return self.a.value_as(astype)
+
+
+class ConstantScoreMatcher(WrappingMatcher):
+    def __init__(self, child, score=1.0):
+        super(ConstantScoreMatcher, self).__init__(child)
+        self._score = score
+
+    def copy(self):
+        return self.__class__(self.child.copy(), score=self._score)
+
+    def _replacement(self, newchild):
+        return self.__class__(newchild, score=self._score)
+
+    def quality(self):
+        return self._score
+
+    def block_quality(self):
+        return self._score
+
+    def score(self):
+        return self._score
+
+
+#class PhraseMatcher(WrappingMatcher):
+#    """Matches postings where a list of sub-matchers occur next to each other
+#    in order.
+#    """
+#
+#    def __init__(self, wordmatchers, slop=1, boost=1.0):
+#        self.wordmatchers = wordmatchers
+#        self.child = make_binary_tree(IntersectionMatcher, wordmatchers)
+#        self.slop = slop
+#        self.boost = boost
+#        self._spans = None
+#        self._find_next()
+#
+#    def copy(self):
+#        return self.__class__(self.wordmatchers[:], slop=self.slop, boost=self.boost)
+#
+#    def replace(self):
+#        if not self.is_active():
+#            return NullMatcher()
+#        return self
+#
+#    def all_ids(self):
+#        # Need to redefine this because the WrappingMatcher parent class
+#        # forwards to the submatcher, which in this case is just the
+#        # IntersectionMatcher.
+#        while self.is_active():
+#            yield self.id()
+#            self.next()
+#
+#    def next(self):
+#        ri = self.child.next()
+#        rn = self._find_next()
+#        return ri or rn
+#
+#    def skip_to(self, id):
+#        rs = self.child.skip_to(id)
+#        rn = self._find_next()
+#        return rs or rn
+#
+#    def skip_to_quality(self, minquality):
+#        skipped = 0
+#        while self.is_active() and self.quality() <= minquality:
+#            # TODO: doesn't count the documents matching the phrase yet
+#            skipped += self.child.skip_to_quality(minquality/self.boost)
+#            self._find_next()
+#        return skipped
+#
+#    def positions(self):
+#        if not self.is_active():
+#            raise ReadTooFar
+#        if not self.wordmatchers:
+#            return []
+#        return self.wordmatchers[0].positions()
+#
+#    def _find_next(self):
+#        isect = self.child
+#        slop = self.slop
+#
+#        # List of "active" positions
+#        current = []
+#
+#        while not current and isect.is_active():
+#            # [[list of positions for word 1],
+#            #  [list of positions for word 2], ...]
+#            poses = [m.positions() for m in self.wordmatchers]
+#
+#            # Set the "active" position list to the list of positions of the
+#            # first word. We well then iteratively update this list with the
+#            # positions of subsequent words if they are within the "slop"
+#            # distance of the positions in the active list.
+#            current = poses[0]
+#
+#            # For each list of positions for the subsequent words...
+#            for poslist in poses[1:]:
+#                # A list to hold the new list of active positions
+#                newposes = []
+#
+#                # For each position in the list of positions in this next word
+#                for newpos in poslist:
+#                    # Use bisect to only check the part of the current list
+#                    # that could contain positions within the "slop" distance
+#                    # of the new position
+#                    start = bisect_left(current, newpos - slop)
+#                    end = bisect_right(current, newpos)
+#
+#                    #
+#                    for curpos in current[start:end]:
+#                        delta = newpos - curpos
+#                        if delta > 0 and delta <= slop:
+#                            newposes.append(newpos)
+#
+#                current = newposes
+#                if not current: break
+#
+#            if not current:
+#                isect.next()
+#
+#        self._count = len(current)
+#
+#
+#class VectorPhraseMatcher(BasePhraseMatcher):
+#    """Phrase matcher for fields with a vector with positions (i.e. Positions
+#    or CharacterPositions format).
+#    """
+#
+#    def __init__(self, searcher, fieldname, words, isect, slop=1, boost=1.0):
+#        """
+#        :param searcher: a Searcher object.
+#        :param fieldname: the field in which to search.
+#        :param words: a sequence of token texts representing the words in the
+#            phrase.
+#        :param isect: an intersection matcher for the words in the phrase.
+#        :param slop:
+#        """
+#
+#        decodefn = searcher.field(fieldname).vector.decoder("positions")
+#        self.reader = searcher.reader()
+#        self.fieldname = fieldname
+#        self.words = words
+#        self.sortedwords = sorted(self.words)
+#        super(VectorPhraseMatcher, self).__init__(isect, decodefn, slop=slop,
+#                                                  boost=boost)
+#
+#    def _poses(self):
+#        vreader = self.reader.vector(self.child.id(), self.fieldname)
+#        poses = {}
+#        decode_positions = self.decode_positions
+#        for word in self.sortedwords:
+#            vreader.skip_to(word)
+#            if vreader.id() != word:
+#                raise Exception("Phrase query: %r in term index but not in"
+#                                " vector (possible analyzer mismatch)" % word)
+#            poses[word] = decode_positions(vreader.value())
+#        # Now put the position lists in phrase order
+#        return [poses[word] for word in self.words]
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/lib/whoosh/whoosh/qparser/__init__.py b/lib/whoosh/whoosh/qparser/__init__.py
new file mode 100644
index 0000000..186ec39
--- /dev/null
+++ b/lib/whoosh/whoosh/qparser/__init__.py
@@ -0,0 +1,31 @@
+# Copyright 2010 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+from whoosh.qparser.default import *
+
+
+
diff --git a/lib/whoosh/whoosh/qparser/common.py b/lib/whoosh/whoosh/qparser/common.py
new file mode 100644
index 0000000..0e0316c
--- /dev/null
+++ b/lib/whoosh/whoosh/qparser/common.py
@@ -0,0 +1,54 @@
+# Copyright 2010 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+"""
+This module contains common utility objects/functions for the other query
+parser modules.
+"""
+
+import re
+
+
+class QueryParserError(Exception):
+    def __init__(self, cause, msg=None):
+        super(QueryParserError, self).__init__(str(cause))
+        self.cause = cause
+
+
+def rcompile(pattern, flags=0):
+    if not isinstance(pattern, basestring):
+        # If it's not a string, assume it's already a compiled pattern
+        return pattern
+    return re.compile(pattern, re.UNICODE | flags)
+
+
+def get_single_text(field, text, **kwargs):
+    # Just take the first token
+    for t in field.process_text(text, mode="query", **kwargs):
+        return t
+
+
diff --git a/lib/whoosh/whoosh/qparser/dateparse.py b/lib/whoosh/whoosh/qparser/dateparse.py
new file mode 100644
index 0000000..3f25540
--- /dev/null
+++ b/lib/whoosh/whoosh/qparser/dateparse.py
@@ -0,0 +1,913 @@
+# Copyright 2010 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+import re
+from datetime import datetime, timedelta
+
+from whoosh.qparser import BasicSyntax, ErrorToken, Plugin, RangePlugin, Group, Word
+from whoosh.support.relativedelta import relativedelta
+from whoosh.support.times import (adatetime, timespan, fill_in, is_void,
+                                  TimeError, relative_days)
+
+
+class DateParseError(Exception):
+    "Represents an error in parsing date text."
+
+
+# Utility functions
+
+def rcompile(pattern):
+    """Just a shortcut to call re.compile with a standard set of flags.
+    """
+
+    return re.compile(pattern, re.IGNORECASE | re.UNICODE)
+
+
+def print_debug(level, msg, *args):
+    if level > 0:
+        print ("  " * (level - 1)) + (msg % args)
+
+
+# Parser element objects
+
+class Props(object):
+    """A dumb little object that just puts copies a dictionary into attibutes
+    so I can use dot syntax instead of square bracket string item lookup and
+    save a little bit of typing. Used by :class:`Regex`.
+    """
+
+    def __init__(self, **args):
+        self.__dict__ = args
+
+    def __repr__(self):
+        return repr(self.__dict__)
+
+    def get(self, key, default=None):
+        return self.__dict__.get(key, default)
+
+
+class ParserBase(object):
+    """Base class for date parser elements.
+    """
+
+    def to_parser(self, e):
+        if isinstance(e, basestring):
+            return Regex(e)
+        else:
+            return e
+
+    def parse(self, text, dt, pos=0, debug=-9999):
+        raise NotImplementedError
+
+    def date_from(self, text, dt=None, pos=0, debug=-9999):
+        if dt is None:
+            dt = datetime.now()
+
+        d, pos = self.parse(text, dt, pos, debug + 1)
+        return d
+
+
+class MultiBase(ParserBase):
+    """Base class for date parser elements such as Sequence and Bag that
+    have sub-elements.
+    """
+
+    def __init__(self, elements, name=None):
+        """
+        :param elements: the sub-elements to match.
+        :param name: a name for this element (for debugging purposes only).
+        """
+
+        self.elements = [self.to_parser(e) for e in elements]
+        self.name = name
+
+    def __repr__(self):
+        return "%s<%s>%r" % (self.__class__.__name__, self.name or '', self.elements)
+
+
+class Sequence(MultiBase):
+    """Merges the dates parsed by a sequence of sub-elements.
+    """
+
+    def __init__(self, elements, sep="(\\s+|\\s*,\\s*)", name=None,
+                 progressive=False):
+        """
+        :param elements: the sequence of sub-elements to parse.
+        :param sep: a separator regular expression to match between elements,
+            or None to not have separators.
+        :param name: a name for this element (for debugging purposes only).
+        :param progressive: if True, elements after the first do not need to
+            match. That is, for elements (a, b, c) and progressive=True, the
+            sequence matches like ``a[b[c]]``.
+        """
+
+        super(Sequence, self).__init__(elements, name)
+        self.sep_pattern = sep
+        if sep:
+            self.sep_expr = rcompile(sep)
+        else:
+            self.sep_expr = None
+        self.progressive = progressive
+
+    def parse(self, text, dt, pos=0, debug=-9999):
+        d = adatetime()
+        first = True
+        foundall = False
+        failed = False
+
+        print_debug(debug, "Seq %s sep=%r text=%r", self.name, self.sep_pattern, text[pos:])
+        for e in self.elements:
+            print_debug(debug, "Seq %s text=%r", self.name, text[pos:])
+            if self.sep_expr and not first:
+                print_debug(debug, "Seq %s looking for sep", self.name)
+                m = self.sep_expr.match(text, pos)
+                if m:
+                    pos = m.end()
+                else:
+                    print_debug(debug, "Seq %s didn't find sep", self.name)
+                    break
+
+            print_debug(debug, "Seq %s trying=%r at=%s", self.name, e, pos)
+
+            try:
+                at, newpos = e.parse(text, dt, pos=pos, debug=debug + 1)
+            except TimeError:
+                failed = True
+                break
+
+            print_debug(debug, "Seq %s result=%r", self.name, at)
+            if not at:
+                break
+            pos = newpos
+
+            print_debug(debug, "Seq %s adding=%r to=%r", self.name, at, d)
+            try:
+                d = fill_in(d, at)
+            except TimeError:
+                print_debug(debug, "Seq %s Error in fill_in", self.name)
+                failed = True
+                break
+            print_debug(debug, "Seq %s filled date=%r", self.name, d)
+
+            first = False
+        else:
+            foundall = True
+
+        if not failed and (foundall or (not first and self.progressive)):
+            print_debug(debug, "Seq %s final=%r", self.name, d)
+            return (d, pos)
+        else:
+            print_debug(debug, "Seq %s failed", self.name)
+            return (None, None)
+
+
+class Combo(Sequence):
+    """Parses a sequence of elements in order and combines the dates parsed
+    by the sub-elements somehow. The default behavior is to accept two dates
+    from the sub-elements and turn them into a range.
+    """
+
+    def __init__(self, elements, fn=None, sep="(\\s+|\\s*,\\s*)", min=2, max=2,
+                 name=None):
+        """
+        :param elements: the sequence of sub-elements to parse.
+        :param fn: a function to run on all dates found. It should return a
+            datetime, adatetime, or timespan object. If this argument is None,
+            the default behavior accepts two dates and returns a timespan.
+        :param sep: a separator regular expression to match between elements,
+            or None to not have separators.
+        :param min: the minimum number of dates required from the sub-elements.
+        :param max: the maximum number of dates allowed from the sub-elements.
+        :param name: a name for this element (for debugging purposes only).
+        """
+
+        super(Combo, self).__init__(elements, sep=sep, name=name)
+        self.fn = fn
+        self.min = min
+        self.max = max
+
+    def parse(self, text, dt, pos=0, debug=-9999):
+        dates = []
+        first = True
+
+        print_debug(debug, "Combo %s sep=%r text=%r", self.name, self.sep_pattern, text[pos:])
+        for e in self.elements:
+            if self.sep_expr and not first:
+                print_debug(debug, "Combo %s looking for sep at %r", self.name, text[pos:])
+                m = self.sep_expr.match(text, pos)
+                if m:
+                    pos = m.end()
+                else:
+                    print_debug(debug, "Combo %s didn't find sep", self.name)
+                    return (None, None)
+
+            print_debug(debug, "Combo %s trying=%r", self.name, e)
+            try:
+                at, pos = e.parse(text, dt, pos, debug + 1)
+            except TimeError:
+                at, pos = None, None
+
+            print_debug(debug, "Combo %s result=%r", self.name, at)
+            if at is None:
+                return (None, None)
+
+            first = False
+            if is_void(at):
+                continue
+            if len(dates) == self.max:
+                print_debug(debug, "Combo %s length > %s", self.name, self.max)
+                return (None, None)
+            dates.append(at)
+
+        print_debug(debug, "Combo %s dates=%r", self.name, dates)
+        if len(dates) < self.min:
+            print_debug(debug, "Combo %s length < %s", self.name, self.min)
+            return (None, None)
+
+        return (self.dates_to_timespan(dates), pos)
+
+    def dates_to_timespan(self, dates):
+        if self.fn:
+            return self.fn(dates)
+        elif len(dates) == 2:
+            return timespan(dates[0], dates[1])
+        else:
+            raise DateParseError("Don't know what to do with %r" % (dates, ))
+
+
+class Choice(MultiBase):
+    """Returns the date from the first of its sub-elements that matches.
+    """
+
+    def parse(self, text, dt, pos=0, debug=-9999):
+        print_debug(debug, "Choice %s text=%r", self.name, text[pos:])
+        for e in self.elements:
+            print_debug(debug, "Choice %s trying=%r", self.name, e)
+
+            try:
+                d, newpos = e.parse(text, dt, pos, debug + 1)
+            except TimeError:
+                d, newpos = None, None
+            if d:
+                print_debug(debug, "Choice %s matched", self.name)
+                return (d, newpos)
+        print_debug(debug, "Choice %s no match", self.name)
+        return (None, None)
+
+
+class Bag(MultiBase):
+    """Parses its sub-elements in any order and merges the dates.
+    """
+
+    def __init__(self, elements, sep="(\\s+|\\s*,\\s*)", onceper=True,
+                 requireall=False, allof=None, anyof=None, name=None):
+        """
+        :param elements: the sub-elements to parse.
+        :param sep: a separator regular expression to match between elements,
+            or None to not have separators.
+        :param onceper: only allow each element to match once.
+        :param requireall: if True, the sub-elements can match in any order,
+            but they must all match.
+        :param allof: a list of indexes into the list of elements. When this
+            argument is not None, this element matches only if all the
+            indicated sub-elements match.
+        :param allof: a list of indexes into the list of elements. When this
+            argument is not None, this element matches only if any of the
+            indicated sub-elements match.
+        :param name: a name for this element (for debugging purposes only).
+        """
+
+        super(Bag, self).__init__(elements, name)
+        self.sep_expr = rcompile(sep)
+        self.onceper = onceper
+        self.requireall = requireall
+        self.allof = allof
+        self.anyof = anyof
+
+    def parse(self, text, dt, pos=0, debug=-9999):
+        first = True
+        d = adatetime()
+        seen = [False] * len(self.elements)
+
+        while True:
+            newpos = pos
+            print_debug(debug, "Bag %s text=%r", self.name, text[pos:])
+            if not first:
+                print_debug(debug, "Bag %s looking for sep", self.name)
+                m = self.sep_expr.match(text, pos)
+                if m:
+                    newpos = m.end()
+                else:
+                    print_debug(debug, "Bag %s didn't find sep", self.name)
+                    break
+
+            for i, e in enumerate(self.elements):
+                print_debug(debug, "Bag %s trying=%r", self.name, e)
+
+                try:
+                    at, xpos = e.parse(text, dt, newpos, debug + 1)
+                except TimeError:
+                    at, xpos = None, None
+
+                print_debug(debug, "Bag %s result=%r", self.name, at)
+                if at:
+                    if self.onceper and seen[i]:
+                        return (None, None)
+
+                    d = fill_in(d, at)
+                    newpos = xpos
+                    seen[i] = True
+                    break
+            else:
+                break
+
+            pos = newpos
+            if self.onceper and all(seen):
+                break
+
+            first = False
+
+        if (not any(seen)
+            or (self.allof and not all(seen[pos] for pos in self.allof))
+            or (self.anyof and not any(seen[pos] for pos in self.anyof))
+            or (self.requireall and not all(seen))):
+            return (None, None)
+
+        print_debug(debug, "Bag %s final=%r", self.name, d)
+        return (d, pos)
+
+
+class Optional(ParserBase):
+    """Wraps a sub-element to indicate that the sub-element is optional.
+    """
+
+    def __init__(self, element):
+        self.element = self.to_parser(element)
+
+    def __repr__(self):
+        return "%s(%r)" % (self.__class__.__name__, self.element)
+
+    def parse(self, text, dt, pos=0, debug=-9999):
+        try:
+            d, pos = self.element.parse(text, dt, pos, debug + 1)
+        except TimeError:
+            d, pos = None, None
+
+        if d:
+            return (d, pos)
+        else:
+            return (adatetime(), pos)
+
+
+class ToEnd(ParserBase):
+    """Wraps a sub-element and requires that the end of the sub-element's match
+    be the end of the text.
+    """
+
+    def __init__(self, element):
+        self.element = element
+
+    def __repr__(self):
+        return "%s(%r)" % (self.__class__.__name__, self.element)
+
+    def parse(self, text, dt, pos=0, debug=-9999):
+        try:
+            d, pos = self.element.parse(text, dt, pos, debug + 1)
+        except TimeError:
+            d, pos = None, None
+
+        if d and pos == len(text):
+            return (d, pos)
+        else:
+            return (None, None)
+
+
+class Regex(ParserBase):
+    """Matches a regular expression and maps named groups in the pattern to
+    datetime attributes using a function or overridden method.
+
+    There are two points at which you can customize the behavior of this class,
+    either by supplying functions to the initializer or overriding methods.
+
+    * The ``modify`` function or ``modify_props`` method takes a ``Props``
+      object containing the named groups and modifies its values (in place).
+    * The ``fn`` function or ``props_to_date`` method takes a ``Props`` object
+      and the base datetime and returns an adatetime/datetime.
+    """
+
+    fn = None
+    modify = None
+
+    def __init__(self, pattern, fn=None, modify=None):
+        self.pattern = pattern
+        self.expr = rcompile(pattern)
+        self.fn = fn
+        self.modify = modify
+
+    def __repr__(self):
+        return "<%r>" % (self.pattern, )
+
+    def parse(self, text, dt, pos=0, debug=-9999):
+        m = self.expr.match(text, pos)
+        if not m:
+            return (None, None)
+
+        props = self.extract(m)
+        self.modify_props(props)
+
+        try:
+            d = self.props_to_date(props, dt)
+        except TimeError:
+            d = None
+
+        if d:
+            return (d, m.end())
+        else:
+            return (None, None)
+
+    def extract(self, match):
+        d = match.groupdict()
+        for key, value in d.iteritems():
+            try:
+                value = int(value)
+                d[key] = value
+            except (ValueError, TypeError):
+                pass
+        return Props(**d)
+
+    def modify_props(self, props):
+        if self.modify:
+            self.modify(props)
+
+    def props_to_date(self, props, dt):
+        if self.fn:
+            return self.fn(props, dt)
+        else:
+            args = {}
+            for key in adatetime.units:
+                args[key] = props.get(key)
+            return adatetime(**args)
+
+
+class Month(Regex):
+    def __init__(self, *patterns):
+        self.patterns = patterns
+        self.exprs = [rcompile(pat) for pat in self.patterns]
+
+        self.pattern = ("(?P<month>"
+                        + "|".join("(%s)" % pat for pat in self.patterns)
+                        + ")")
+        self.expr = rcompile(self.pattern)
+
+    def modify_props(self, p):
+        text = p.month
+        for i, expr in enumerate(self.exprs):
+            m = expr.match(text)
+            if m:
+                p.month = i + 1
+                break
+
+
+class PlusMinus(Regex):
+    def __init__(self, years, months, weeks, days, hours, minutes, seconds):
+        rel_years = "((?P<years>[0-9]+) *(%s))?" % years
+        rel_months = "((?P<months>[0-9]+) *(%s))?" % months
+        rel_weeks = "((?P<weeks>[0-9]+) *(%s))?" % weeks
+        rel_days = "((?P<days>[0-9]+) *(%s))?" % days
+        rel_hours = "((?P<hours>[0-9]+) *(%s))?" % hours
+        rel_mins = "((?P<mins>[0-9]+) *(%s))?" % minutes
+        rel_secs = "((?P<secs>[0-9]+) *(%s))?" % seconds
+
+        self.pattern = ("(?P<dir>[+-]) *%s *%s *%s *%s *%s *%s *%s(?=(\\W|$))"
+                        % (rel_years, rel_months, rel_weeks, rel_days,
+                           rel_hours, rel_mins, rel_secs))
+        self.expr = rcompile(self.pattern)
+
+    def props_to_date(self, p, dt):
+        if p.dir == "-":
+            dir = -1
+        else:
+            dir = 1
+
+        delta = relativedelta(years=(p.get("years") or 0) * dir,
+                              months=(p.get("months") or 0) * dir,
+                              weeks=(p.get("weeks") or 0) * dir,
+                              days=(p.get("days") or 0) * dir,
+                              hours=(p.get("hours") or 0) * dir,
+                              minutes=(p.get("mins") or 0) * dir,
+                              seconds=(p.get("secs") or 0) * dir)
+        return dt + delta
+
+
+class Daynames(Regex):
+    def __init__(self, next, last, daynames):
+        self.next_pattern = next
+        self.last_pattern = last
+        self._dayname_exprs = tuple(rcompile(pat) for pat in daynames)
+        dn_pattern = "|".join(daynames)
+        self.pattern = "(?P<dir>%s|%s) +(?P<day>%s)(?=(\\W|$))" % (next, last, dn_pattern)
+        self.expr = rcompile(self.pattern)
+
+    def props_to_date(self, p, dt):
+        if re.match(p.dir, self.last_pattern):
+            dir = -1
+        else:
+            dir = 1
+
+        for daynum, expr in enumerate(self._dayname_exprs):
+            m = expr.match(p.day)
+            if m:
+                break
+        current_daynum = dt.weekday()
+        days_delta = relative_days(current_daynum, daynum, dir)
+
+        d = dt.date() + timedelta(days=days_delta)
+        return adatetime(year=d.year, month=d.month, day=d.day)
+
+
+class Time12(Regex):
+    def __init__(self):
+        self.pattern = "(?P<hour>[1-9]|10|11|12)(:(?P<mins>[0-5][0-9])(:(?P<secs>[0-5][0-9])(\\.(?P<usecs>[0-9]{1,5}))?)?)?\\s*(?P<ampm>am|pm)(?=(\\W|$))"
+        self.expr = rcompile(self.pattern)
+
+    def props_to_date(self, p, dt):
+        isam = p.ampm.lower().startswith("a")
+
+        if p.hour == 12:
+            if isam:
+                hr = 0
+            else:
+                hr = 12
+        else:
+            hr = p.hour
+            if not isam:
+                hr += 12
+
+        return adatetime(hour=hr, minute=p.mins, second=p.secs, microsecond=p.usecs)
+
+
+# Top-level parser classes
+
+class DateParser(object):
+    """Base class for locale-specific parser classes.
+    """
+
+    day = Regex("(?P<day>([123][0-9])|[1-9])(?=(\\W|$))(?!=:)",
+                lambda p, dt: adatetime(day=p.day))
+    year = Regex("(?P<year>[0-9]{4})(?=(\\W|$))",
+                 lambda p, dt: adatetime(year=p.year))
+    time24 = Regex("(?P<hour>([0-1][0-9])|(2[0-3])):(?P<mins>[0-5][0-9])(:(?P<secs>[0-5][0-9])(\\.(?P<usecs>[0-9]{1,5}))?)?(?=(\\W|$))",
+                   lambda p, dt: adatetime(hour=p.hour, minute=p.mins, second=p.secs, microsecond=p.usecs))
+    time12 = Time12()
+
+    def __init__(self):
+        simple_year = "(?P<year>[0-9]{4})"
+        simple_month = "(?P<month>[0-1][0-9])"
+        simple_day = "(?P<day>[0-3][0-9])"
+        simple_hour = "(?P<hour>([0-1][0-9])|(2[0-3]))"
+        simple_minute = "(?P<minute>[0-5][0-9])"
+        simple_second = "(?P<second>[0-5][0-9])"
+        simple_usec = "(?P<microsecond>[0-9]{6})"
+
+        simple_seq = Sequence((simple_year, simple_month, simple_day, simple_hour,
+                               simple_minute, simple_second, simple_usec),
+                               sep="[- .:/]*", name="simple", progressive=True)
+        self.simple = Sequence((simple_seq, "(?=(\\s|$))"), sep='')
+
+        self.setup()
+
+    def setup(self):
+        raise NotImplementedError
+
+    #
+
+    def get_parser(self):
+        return self.all
+
+    def parse(self, text, dt, pos=0, debug=-9999):
+        parser = self.get_parser()
+
+        d, newpos = parser.parse(text, dt, pos=pos, debug=debug)
+        if isinstance(d, (adatetime, timespan)):
+            d = d.disambiguated(dt)
+
+        return (d, newpos)
+
+    def date_from(self, text, basedate=None, pos=0, debug=-9999, toend=True):
+        if basedate is None:
+            basedate = datetime.utcnow()
+
+        parser = self.get_parser()
+        if toend:
+            parser = ToEnd(parser)
+
+        d = parser.date_from(text, basedate, pos=pos, debug=debug)
+        if isinstance(d, (adatetime, timespan)):
+            d = d.disambiguated(basedate)
+        return d
+
+
+class English(DateParser):
+    day = Regex("(?P<day>([123][0-9])|[1-9])(st|nd|rd|th)?(?=(\\W|$))",
+                lambda p, dt: adatetime(day=p.day))
+
+    def setup(self):
+        self.plusdate = PlusMinus("years|year|yrs|yr|ys|y",
+                                  "months|month|mons|mon|mos|mo",
+                                  "weeks|week|wks|wk|ws|w",
+                                  "days|day|dys|dy|ds|d",
+                                  "hours|hour|hrs|hr|hs|h",
+                                  "minutes|minute|mins|min|ms|m",
+                                  "seconds|second|secs|sec|s")
+
+        self.dayname = Daynames("next", "last",
+                                ("monday|mon|mo", "tuesday|tues|tue|tu",
+                                 "wednesday|wed|we", "thursday|thur|thu|th",
+                                 "friday|fri|fr", "saturday|sat|sa",
+                                 "sunday|sun|su"))
+
+        midnight = Regex("midnight", lambda p, dt: adatetime(hour=0, minute=0, second=0, microsecond=0))
+        noon = Regex("noon", lambda p, dt: adatetime(hour=12, minute=0, second=0, microsecond=0))
+        now = Regex("now", lambda p, dt: dt)
+        self.time = Choice((self.time12, self.time24, midnight, noon, now), name="time")
+
+        def tomorrow_to_date(p, dt):
+            d = dt.date() + timedelta(days=+1)
+            return adatetime(year=d.year, month=d.month, day=d.day)
+        tomorrow = Regex("tomorrow", tomorrow_to_date)
+
+        def yesterday_to_date(p, dt):
+            d = dt.date() + timedelta(days=-1)
+            return adatetime(year=d.year, month=d.month, day=d.day)
+        yesterday = Regex("yesterday", yesterday_to_date)
+
+        thisyear = Regex("this year", lambda p, dt: adatetime(year=dt.year))
+        thismonth = Regex("this month", lambda p, dt: adatetime(year=dt.year, month=dt.month))
+        today = Regex("today", lambda p, dt: adatetime(year=dt.year, month=dt.month, day=dt.day))
+
+        self.month = Month("january|jan", "february|febuary|feb", "march|mar",
+                           "april|apr", "may", "june|jun", "july|jul", "august|aug",
+                           "september|sept|sep", "october|oct", "november|nov",
+                           "december|dec")
+
+        # If you specify a day number you must also specify a month... this
+        # Choice captures that constraint
+
+        self.dmy = Choice((Sequence((self.day, self.month, self.year), name="dmy"),
+                           Sequence((self.month, self.day, self.year), name="mdy"),
+                           Sequence((self.year, self.month, self.day), name="ymd"),
+                           Sequence((self.year, self.day, self.month), name="ydm"),
+                           Sequence((self.day, self.month), name="dm"),
+                           Sequence((self.month, self.day), name="md"),
+                           Sequence((self.month, self.year), name="my"),
+                           self.month, self.year, self.dayname, tomorrow,
+                           yesterday, thisyear, thismonth, today, now,
+                           ), name="date")
+
+        self.datetime = Bag((self.time, self.dmy), name="datetime")
+        self.bundle = Choice((self.plusdate, self.datetime, self.simple), name="bundle")
+        self.torange = Combo((self.bundle, "to", self.bundle), name="torange")
+
+        self.all = Choice((self.torange, self.bundle), name="all")
+
+
+# QueryParser plugin
+
+class DateParserPlugin(Plugin):
+    """Adds more powerful parsing of DATETIME fields.
+
+    >>> parser.add_plugin(DateParserPlugin())
+    >>> parser.parse(u"date:'last tuesday'")
+    """
+
+    def __init__(self, basedate=None, dateparser=None, callback=None,
+                 free=False):
+        """
+        :param basedate: a datetime object representing the current time
+            against which to measure relative dates. If you do not supply this
+            argument, the plugin uses ``datetime.utcnow()``.
+        :param dateparser: an instance of
+            :class:`whoosh.qparser.dateparse.DateParser`. If you do not supply
+            this argument, the plugin automatically uses
+            :class:`whoosh.qparser.dateparse.English`.
+        :param callback: a callback function for parsing errors. This allows
+            you to provide feedback to the user about problems parsing dates.
+        :param remove: if True, unparseable dates are removed from the token
+            stream instead of being replaced with ErrorToken.
+        :param free: if True, this plugin will install a filter early in the
+            parsing process and try to find undelimited dates such as
+            ``date:last tuesday``. Note that allowing this could result in
+            normal query words accidentally being parsed as dates sometimes.
+        """
+
+        self.basedate = basedate
+        if dateparser is None:
+            dateparser = English()
+        self.dateparser = dateparser
+        self.callback = callback
+        self.free = free
+
+    def tokens(self, parser):
+        if self.free:
+            # If we're tokenizing, we have to go before the FieldsPlugin
+            return ((DateToken, -1), )
+        else:
+            return ()
+
+    def filters(self, parser):
+        # Run the filter after the FieldsPlugin assigns field names
+        return ((self.do_dates, 110), )
+
+    def do_dates(self, parser, stream):
+        schema = parser.schema
+        if not schema:
+            return stream
+
+        from whoosh.fields import DATETIME
+        datefields = frozenset(fieldname for fieldname, field
+                               in parser.schema.items()
+                               if isinstance(field, DATETIME))
+
+        newstream = stream.empty()
+        for t in stream:
+            if isinstance(t, Group):
+                t = self.do_dates(parser, t)
+            elif (t.fieldname in datefields
+                  or (t.fieldname is None and parser.fieldname in datefields)):
+                if isinstance(t, Word):
+                    text = t.text
+                    try:
+                        dt = self.dateparser.date_from(text, self.basedate)
+                        if dt is None:
+                            if self.callback:
+                                self.callback(text)
+                            t = ErrorToken(t)
+                        else:
+                            t = DateToken(t.fieldname, dt, t.boost)
+                    except DateParseError, e:
+                        if self.callback:
+                            self.callback("%s (%r)" % (str(e), text))
+                        t = ErrorToken(t)
+
+                elif isinstance(t, RangePlugin.Range):
+                    start = end = None
+                    error = None
+
+                    dp = self.dateparser.get_parser()
+
+                    if t.start:
+                        start = dp.date_from(t.start, self.basedate)
+                        if start is None:
+                            error = t.start
+                    if t.end:
+                        end = dp.date_from(t.end, self.basedate)
+                        if end is None and error is None:
+                            error = t.end
+
+                    if error is not None:
+                        if self.callback:
+                            self.callback(error)
+                        t = ErrorToken(t)
+                    else:
+                        if start and end:
+                            ts = timespan(start, end).disambiguated(self.basedate)
+                            start, end = ts.start, ts.end
+                        elif start:
+                            start = start.disambiguated(self.basedate)
+                            if isinstance(start, timespan):
+                                start = start.start
+                        elif end:
+                            end = end.disambiguated(self.basedate)
+                            if isinstance(end, timespan):
+                                end = end.end
+                        t = DateRangeToken(t.fieldname, start, end, boost=t.boost)
+
+            newstream.append(t)
+        return newstream
+
+
+class DateToken(BasicSyntax):
+    expr = re.compile("([A-Za-z][A-Za-z_0-9]*):([^^]+)")
+
+    def __init__(self, fieldname, timeobj, boost=1.0, endpos=None):
+        self.fieldname = fieldname
+        self.timeobj = timeobj
+        self.boost = boost
+        self.endpos = endpos
+
+    def __repr__(self):
+        r = "%s:(%r)" % (self.fieldname, self.timeobj)
+        if self.boost != 1.0:
+            r + "^%s" % self.boost
+        return r
+
+    def set_boost(self, b):
+        return self.__class__(self.fieldname, self.timeobj, boost=b,
+                              endpos=self.endpos)
+
+    def set_fieldname(self, name):
+        if name is None:
+            raise Exception
+        return self.__class__(name, self.timeobj, boost=self.boost,
+                              endpos=self.endpos)
+
+    def query(self, parser):
+        from whoosh import query
+
+        fieldname = self.fieldname or parser.fieldname
+        field = parser.schema[fieldname]
+        dt = self.timeobj
+        if isinstance(self.timeobj, datetime):
+            return query.Term(fieldname, field.to_text(dt), boost=self.boost)
+        elif isinstance(self.timeobj, timespan):
+            return query.DateRange(fieldname, dt.start, dt.end,
+                                   boost=self.boost)
+        else:
+            raise Exception("Unknown time object: %r" % dt)
+
+    @classmethod
+    def create(cls, parser, match):
+        fieldname = match.group(1)
+        if parser.schema and fieldname in parser.schema:
+            field = parser.schema[fieldname]
+
+            from whoosh.fields import DATETIME
+            if isinstance(field, DATETIME):
+                text = match.group(2)
+                textstart = match.start(2)
+
+                plugin = parser.get_plugin(DateParserPlugin)
+                dateparser = plugin.dateparser
+                basedate = plugin.basedate
+
+                d, newpos = dateparser.parse(text, basedate)
+                if d:
+                    return cls(fieldname, d, endpos=newpos + textstart)
+
+
+class DateRangeToken(BasicSyntax):
+    def __init__(self, fieldname, starttime, endtime, boost=1.0, endpos=None):
+        self.fieldname = fieldname
+        self.starttime = starttime
+        self.endtime = endtime
+        self.boost = boost
+        self.endpos = endpos
+
+    def __repr__(self):
+        r = "%s:(%r, %r)" % (self.fieldname, self.starttime, self.endtime)
+        if self.boost != 1.0:
+            r + "^%s" % self.boost
+        return r
+
+    def set_boost(self, b):
+        return self.__class__(self.fieldname, self.starttime, self.endtime,
+                              boost=b, endpos=self.endpos)
+
+    def set_fieldname(self, name):
+        if name is None:
+            raise Exception
+        return self.__class__(name, self.starttime, self.endtime,
+                              boost=self.boost, endpos=self.endpos)
+
+    def query(self, parser):
+        from whoosh import query
+        fieldname = self.fieldname or parser.fieldname
+        start = self.starttime
+        end = self.endtime
+
+        if start is None and end is None:
+            return query.Every(fieldname)
+        else:
+            return query.DateRange(fieldname, start, end, boost=self.boost)
+
+
+
+
+
+
+
+
diff --git a/lib/whoosh/whoosh/qparser/default.py b/lib/whoosh/whoosh/qparser/default.py
new file mode 100644
index 0000000..068bd29
--- /dev/null
+++ b/lib/whoosh/whoosh/qparser/default.py
@@ -0,0 +1,358 @@
+# Copyright 2010 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+"""
+This module contains the new plug-in based hand-written query parser. This
+parser is able to adapt its behavior using interchangeable plug-in classes.
+"""
+
+from whoosh import query
+from whoosh.qparser.syntax import *
+from whoosh.qparser.plugins import *
+
+
+ws = "[ \t\r\n]+"
+wsexpr = rcompile(ws)
+
+
+full_profile = (BoostPlugin, OperatorsPlugin, FieldsPlugin, GroupPlugin,
+                PhrasePlugin, RangePlugin, SingleQuotesPlugin, WildcardPlugin)
+
+
+class QueryParser(object):
+    """A hand-written query parser built on modular plug-ins. The default
+    configuration implements a powerful fielded query language similar to
+    Lucene's.
+
+    You can use the ``plugins`` argument when creating the object to override
+    the default list of plug-ins, and/or use ``add_plugin()`` and/or
+    ``remove_plugin_class()`` to change the plug-ins included in the parser.
+
+    >>> from whoosh import qparser
+    >>> parser = qparser.QueryParser("content", schema)
+    >>> parser.remove_plugin_class(qparser.WildcardPlugin)
+    >>> parser.parse(u"hello there")
+    And([Term("content", u"hello"), Term("content", u"there")])
+    """
+
+    _multitoken_query_map = {"and": query.And, "or": query.Or,
+                             "phrase": query.Phrase}
+
+    def __init__(self, fieldname, schema, termclass=query.Term,
+                 phraseclass=query.Phrase, group=AndGroup, plugins=None):
+        """
+        :param fieldname: the default field -- use this as the field for any
+            terms without an explicit field.
+        :param schema: a :class:`whoosh.fields.Schema` object to use when
+            parsing. The appropriate fields in the schema will be used to
+            tokenize terms/phrases before they are turned into query objects.
+            You can specify None for the schema to create a parser that does
+            not analyze the text of the query, usually for testing purposes.
+        :param termclass: the query class to use for individual search terms.
+            The default is :class:`whoosh.query.Term`.
+        :param phraseclass: the query class to use for phrases. The default
+            is :class:`whoosh.query.Phrase`.
+        :param group: the default grouping. ``AndGroup`` makes terms required
+            by default. ``OrGroup`` makes terms optional by default.
+        :param plugins: a list of plugins to use. WhitespacePlugin is
+            automatically included, do not put it in this list. This overrides
+            the default list of plugins. Classes in the list will be
+            automatically instantiated.
+        """
+
+        self.fieldname = fieldname
+        self.schema = schema
+        self.termclass = termclass
+        self.phraseclass = phraseclass
+        self.group = group
+
+        if not plugins:
+            plugins = full_profile
+        plugins = list(plugins) + [WhitespacePlugin]
+        for i, plugin in enumerate(plugins):
+            if isinstance(plugin, type):
+                try:
+                    plugins[i] = plugin()
+                except TypeError:
+                    raise TypeError("Could not instantiate %r" % plugin)
+        self.plugins = plugins
+
+    def add_plugin(self, plugin):
+        """Adds the given plugin to the list of plugins in this parser.
+        """
+
+        if isinstance(plugin, type):
+            plugin = plugin()
+        self.plugins.append(plugin)
+
+    def remove_plugin(self, plugin):
+        """Removes the given plugin from the list of plugins in this parser.
+        """
+
+        self.plugins.remove(plugin)
+
+    def remove_plugin_class(self, cls):
+        """Removes any plugins of the given class from this parser.
+        """
+
+        self.plugins = [p for p in self.plugins if not isinstance(p, cls)]
+
+    def replace_plugin(self, plugin):
+        """Removes any plugins of the class of the given plugin and then adds
+        it. This is a convenience method to keep from having to call
+        ``remove_plugin_class`` followed by ``add_plugin`` each time you want
+        to reconfigure a default plugin.
+
+        >>> qp = qparser.QueryParser("content", schema)
+        >>> qp.replace_plugin(qparser.NotPlugin("(^| )-"))
+        """
+
+        self.remove_plugin_class(plugin.__class__)
+        self.add_plugin(plugin)
+
+    def get_plugin(self, cls, derived=True):
+        for plugin in self.plugins:
+            if (derived and isinstance(plugin, cls)) or plugin.__class__ is cls:
+                return plugin
+        raise KeyError("No plugin with class %r" % cls)
+
+    def _priorized(self, methodname):
+        items_and_priorities = []
+        for plugin in self.plugins:
+            method = getattr(plugin, methodname)
+            for item in method(self):
+                items_and_priorities.append(item)
+        items_and_priorities.sort(key=lambda x: x[1])
+        return [item for item, pri in items_and_priorities]
+
+    def multitoken_query(self, name, texts, fieldname, termclass, boost):
+        qclass = self._multitoken_query_map.get(name.lower())
+        if qclass:
+            return qclass([termclass(fieldname, t, boost=boost)
+                           for t in texts])
+
+    def term_query(self, fieldname, text, termclass, boost=1.0, tokenize=True,
+                   removestops=True):
+        """Returns the appropriate query object for a single term in the query
+        string.
+        """
+
+        if self.schema and fieldname in self.schema:
+            field = self.schema[fieldname]
+
+            # If this field type wants to parse queries itself, let it do so
+            # and return early
+            if field.self_parsing():
+                try:
+                    return field.parse_query(fieldname, text, boost=boost)
+                except QueryParserError:
+                    return query.NullQuery
+
+            # Otherwise, ask the field to process the text into a list of
+            # tokenized strings
+            texts = list(field.process_text(text, mode="query",
+                                            tokenize=tokenize,
+                                            removestops=removestops))
+
+            # If the analyzer returned more than one token, use the field's
+            # multitoken_query attribute to decide what query class, if any, to
+            # use to put the tokens together
+            if len(texts) > 1:
+                mtq = self.multitoken_query(field.multitoken_query, texts,
+                                            fieldname, termclass, boost)
+                if mtq:
+                    return mtq
+
+            # It's possible field.process_text() will return an empty list (for
+            # example, on a stop word)
+            if not texts:
+                return query.NullQuery
+
+            text = texts[0]
+
+        return termclass(fieldname, text, boost=boost)
+
+    def tokens(self):
+        """Returns a priorized list of tokens from the included plugins.
+        """
+
+        return self._priorized("tokens")
+
+    def filters(self):
+        """Returns a priorized list of filter functions from the included
+        plugins.
+        """
+
+        return self._priorized("filters")
+
+    def parse(self, text, normalize=True, debug=False):
+        """Parses the input string and returns a Query object/tree.
+
+        This method may return None if the input string does not result in any
+        valid queries.
+
+        :param text: the unicode string to parse.
+        :param normalize: whether to call normalize() on the query object/tree
+            before returning it. This should be left on unless you're trying to
+            debug the parser output.
+        :rtype: :class:`whoosh.query.Query`
+        """
+
+        if debug:
+            print "Tokenizing %r" % text
+        stream = self._tokenize(text, debug=debug)
+        if debug:
+            print "Stream=", stream
+        stream = self._filterize(stream, debug)
+
+        if debug:
+            print "Final stream=", stream
+        q = stream.query(self)
+        if debug:
+            print "Pre-normalized query=", q
+        if normalize:
+            q = q.normalize()
+        return q
+
+    def _tokenize(self, text, debug=False):
+        stack = []
+        i = 0
+        prev = 0
+
+        tokens = self.tokens()
+        while i < len(text):
+            matched = False
+
+            if debug:
+                print ".matching at %r" % text[i:]
+            for tk in tokens:
+                if debug:
+                    print "..trying token %r" % tk
+                m = tk.match(text, i)
+                if m:
+                    item = tk.create(self, m)
+                    if debug:
+                        print "...matched %r item %r" % (m.group(0), item)
+
+                    if item:
+                        if item.endpos is not None:
+                            newpos = item.endpos
+                        else:
+                            newpos = m.end()
+
+                        if newpos <= i:
+                            raise Exception("Parser element %r did not move the cursor forward (pos=%s match=%r)" % (tk, i, m.group(0)))
+
+                        if prev < i:
+                            if debug:
+                                print "...Adding in-between %r as a term" % text[prev:i]
+                            stack.append(Word(text[prev:i]))
+
+                        stack.append(item)
+                        prev = i = newpos
+                        matched = True
+                        break
+
+            if debug:
+                print ".stack is now %r" % (stack, )
+
+            if not matched:
+                i += 1
+
+        if prev < len(text):
+            stack.append(Word(text[prev:]))
+
+        if debug:
+            print "Final stack %r" % (stack, )
+        return self.group(stack)
+
+    def _filterize(self, stream, debug=False):
+        if debug:
+            print "Tokenized stream=", stream
+
+        for f in self.filters():
+            if debug:
+                print "Applying filter", f
+
+            stream = f(self, stream)
+            if debug:
+                print "Stream=", stream
+
+            if stream is None:
+                raise Exception("Function %s did not return a stream" % f)
+        return stream
+
+
+# Premade parser configurations
+
+def MultifieldParser(fieldnames, schema, fieldboosts=None, **kwargs):
+    """Returns a QueryParser configured to search in multiple fields.
+
+    Instead of assigning unfielded clauses to a default field, this parser
+    transforms them into an OR clause that searches a list of fields. For
+    example, if the list of multi-fields is "f1", "f2" and the query string is
+    "hello there", the class will parse "(f1:hello OR f2:hello) (f1:there OR
+    f2:there)". This is very useful when you have two textual fields (e.g.
+    "title" and "content") you want to search by default.
+
+    :param fieldnames: a list of field names to search.
+    :param fieldboosts: an optional dictionary mapping field names to boosts.
+    """
+
+    p = QueryParser(None, schema, **kwargs)
+    p.add_plugin(MultifieldPlugin(fieldnames, fieldboosts=fieldboosts))
+    return p
+
+
+def SimpleParser(fieldname, schema, **kwargs):
+    """Returns a QueryParser configured to support only +, -, and phrase
+    syntax.
+    """
+
+    return QueryParser(fieldname, schema,
+                       plugins=(PlusMinusPlugin, PhrasePlugin), **kwargs)
+
+
+def DisMaxParser(fieldboosts, schema, tiebreak=0.0, **kwargs):
+    """Returns a QueryParser configured to support only +, -, and phrase
+    syntax, and which converts individual terms into DisjunctionMax queries
+    across a set of fields.
+
+    :param fieldboosts: a dictionary mapping field names to boosts.
+    """
+
+    dmpi = DisMaxPlugin(fieldboosts, tiebreak)
+    return QueryParser(None, schema,
+                       plugins=(PlusMinusPlugin, PhrasePlugin, dmpi), **kwargs)
+
+
+
+
+
+
+
+
diff --git a/lib/whoosh/whoosh/qparser/plugins.py b/lib/whoosh/whoosh/qparser/plugins.py
new file mode 100644
index 0000000..d0b0ce1
--- /dev/null
+++ b/lib/whoosh/whoosh/qparser/plugins.py
@@ -0,0 +1,914 @@
+# Copyright 2010 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+"""
+This module contains plugins for the query parser. Most of the functionality
+of the default query parser is actually provided by plugins.
+"""
+
+import re
+
+from whoosh.qparser.syntax import *
+from whoosh.qparser.common import get_single_text, rcompile, QueryParserError
+
+
+class Plugin(object):
+    """Base class for parser plugins.
+    """
+
+    def tokens(self, parser):
+        """Returns a list of ``(token_class, priority)`` tuples to add to the
+        syntax the parser understands.
+        """
+
+        return ()
+
+    def filters(self, parser):
+        """Returns a list of ``(filter_function, priority)`` tuples to add to
+        parser.
+        """
+
+        return ()
+
+
+class RangePlugin(Plugin):
+    """Adds the ability to specify term ranges.
+
+    This plugin has no configuration.
+
+    This plugin is included in the default parser configuration.
+    """
+
+    def tokens(self, parser):
+        return ((RangePlugin.Range, 1), )
+
+    class Range(Token):
+        expr = re.compile(r"""
+        (?P<open>\{|\[)               # Open paren
+        (?P<start>
+            ('[^']*?'\s+)             # single-quoted
+            |                         # or
+            (.+?(?=[Tt][Oo]))         # everything until "to"
+        )?
+        [Tt][Oo]                      # "to"
+        (?P<end>
+            (\s+'[^']*?')             # single-quoted
+            |                         # or
+            ((.+?)(?=]|}))            # everything until "]" or "}"
+        )?
+        (?P<close>}|])                # Close paren
+        """, re.UNICODE | re.VERBOSE)
+
+        def __init__(self, start, end, startexcl, endexcl, fieldname=None, boost=1.0):
+            self.fieldname = fieldname
+            self.start = start
+            self.end = end
+            self.startexcl = startexcl
+            self.endexcl = endexcl
+            self.boost = boost
+
+        def __repr__(self):
+            r = "%s:(%r, %r, %s, %s)" % (self.fieldname, self.start, self.end,
+                                         self.startexcl, self.endexcl)
+            if self.boost != 1.0:
+                r += "^%s" % self.boost
+            return r
+
+        @classmethod
+        def create(cls, parser, match):
+            start = match.group("start")
+            end = match.group("end")
+            if start:
+                start = start.rstrip()
+                if start.startswith("'") and start.endswith("'"):
+                    start = start[1:-1]
+            if end:
+                end = end.lstrip()
+                if end.startswith("'") and end.endswith("'"):
+                    end = end[1:-1]
+
+            return cls(start, end, startexcl=match.group("open") == "{",
+                       endexcl=match.group("close") == "}")
+
+        def query(self, parser):
+            fieldname = self.fieldname or parser.fieldname
+            start, end = self.start, self.end
+            if parser.schema and fieldname in parser.schema:
+                field = parser.schema[fieldname]
+
+                if field.self_parsing():
+                    try:
+                        rangeq = field.parse_range(fieldname, start, end,
+                                                   self.startexcl, self.endexcl,
+                                                   boost=self.boost)
+                        if rangeq is not None:
+                            return rangeq
+                    except QueryParserError, e:
+                        return query.NullQuery
+
+                if start:
+                    start = get_single_text(field, start, tokenize=False,
+                                            removestops=False)
+                if end:
+                    end = get_single_text(field, end, tokenize=False,
+                                          removestops=False)
+
+            return query.TermRange(fieldname, start, end, self.startexcl,
+                                   self.endexcl, boost=self.boost)
+
+
+class PhrasePlugin(Plugin):
+    """Adds the ability to specify phrase queries inside double quotes.
+
+    This plugin has no configuration.
+
+    This plugin is included in the default parser configuration.
+    """
+
+    def tokens(self, parser):
+        return ((PhrasePlugin.Quotes, 0), )
+
+    class Quotes(BasicSyntax):
+        expr = rcompile('"(.*?)"')
+
+        def __init__(self, text, fieldname=None, boost=1.0, slop=1):
+            super(PhrasePlugin.Quotes, self).__init__(text, fieldname=fieldname,
+                                                      boost=boost)
+            self.slop = slop
+
+        def __repr__(self):
+            r = "%s:q(%r)" % (self.fieldname, self.text)
+            if self.boost != 1.0:
+                r += "^%s" % self.boost
+            return r
+
+        @classmethod
+        def create(cls, parser, match):
+            slop = 1
+            #if match.group(5):
+            #    try:
+            #        slop = int(match.group(5))
+            #    except ValueError:
+            #        pass
+            return cls(match.group(1), slop=slop)
+
+        def query(self, parser):
+            fieldname = self.fieldname or parser.fieldname
+            if parser.schema and fieldname in parser.schema:
+                field = parser.schema[fieldname]
+                #if field.self_parsing():
+                #    return field.parse_query(fieldname, self.text, boost=self.boost)
+                #else:
+                words = list(field.process_text(self.text, mode="query"))
+            else:
+                words = self.text.split(" ")
+
+            return parser.phraseclass(fieldname, words, boost=self.boost,
+                                      slop=self.slop)
+
+
+class SingleQuotesPlugin(Plugin):
+    """Adds the ability to specify single "terms" containing spaces by
+    enclosing them in single quotes.
+
+    This plugin has no configuration.
+
+    This plugin is included in the default parser configuration.
+    """
+
+    def tokens(self, parser):
+        return ((SingleQuotesPlugin.SingleQuotes, 0), )
+
+    class SingleQuotes(Token):
+        expr = rcompile(r"(^|(?<=\W))'(.*?)'(?=\s|\]|[)}]|$)")
+
+        @classmethod
+        def create(cls, parser, match):
+            return Word(match.group(2))
+
+
+class PrefixPlugin(Plugin):
+    """Adds the ability to specify prefix queries by ending a term with an
+    asterisk. This plugin is useful if you want the user to be able to create
+    prefix but not wildcard queries (for performance reasons). If you are
+    including the wildcard plugin, you should not include this plugin as well.
+    """
+
+    def tokens(self, parser):
+        return ((PrefixPlugin.Prefix, 0), )
+
+    class Prefix(BasicSyntax):
+        expr = rcompile("[^ \t\r\n*]+\\*(?= |$|\\))")
+        qclass = query.Prefix
+
+        def __repr__(self):
+            r = "%s:pre(%r)" % (self.fieldname, self.text)
+            if self.boost != 1.0:
+                r += "^%s" % self.boost
+            return r
+
+        @classmethod
+        def create(cls, parser, match):
+            return cls(match.group(0)[:-1])
+
+
+class WildcardPlugin(Plugin):
+    """Adds the ability to specify wildcard queries by using asterisk and
+    question mark characters in terms. Note that these types can be very
+    performance and memory intensive. You may consider not including this
+    type of query.
+
+    This plugin is included in the default parser configuration.
+    """
+
+    def tokens(self, parser):
+        return ((WildcardPlugin.Wild, 1), )
+
+    class Wild(BasicSyntax):
+        # Any number of word chars, followed by at least one question mark or
+        # star, followed by any number of word chars, question marks, or stars
+        # \u055E = Armenian question mark
+        # \u061F = Arabic question mark
+        # \u1367 = Ethiopic question mark
+        expr = rcompile(u"\\w*[*?\u055E\u061F\u1367](\\w|[*?\u055E\u061F\u1367])*")
+        qclass = query.Wildcard
+
+        def __repr__(self):
+            r = "%s:wild(%r)" % (self.fieldname, self.text)
+            if self.boost != 1.0:
+                r += "^%s" % self.boost
+            return r
+
+        @classmethod
+        def create(cls, parser, match):
+            return cls(match.group(0))
+
+
+class WhitespacePlugin(Plugin):
+    """Parses whitespace between words in the query string. You should always
+    include this plugin.
+
+    This plugin is always automatically included by the QueryParser.
+    """
+
+    def __init__(self, tokenclass=White):
+        self.tokenclass = tokenclass
+
+    def tokens(self, parser):
+        return ((self.tokenclass, 100), )
+
+    def filters(self, parser):
+        return ((self.do_whitespace, 500), )
+
+    def do_whitespace(self, parser, stream):
+        newstream = stream.empty()
+        for t in stream:
+            if isinstance(t, Group):
+                newstream.append(self.do_whitespace(parser, t))
+            elif not isinstance(t, self.tokenclass):
+                newstream.append(t)
+        return newstream
+
+
+class GroupPlugin(Plugin):
+    """Adds the ability to group clauses using parentheses.
+
+    This plugin is included in the default parser configuration.
+    """
+
+    def tokens(self, parser):
+        return ((GroupPlugin.Open, 0), (GroupPlugin.Close, 0))
+
+    def filters(self, parser):
+        # This should basically be the first plugin to run
+        return ((GroupPlugin.do_groups, 0), )
+
+    @staticmethod
+    def do_groups(parser, stream):
+        stack = [parser.group()]
+        for t in stream:
+            if isinstance(t, GroupPlugin.Open):
+                stack.append(parser.group())
+            elif isinstance(t, GroupPlugin.Close):
+                if len(stack) > 1:
+                    last = stack.pop()
+                    stack[-1].append(last)
+            else:
+                stack[-1].append(t)
+
+        top = stack[0]
+        if len(stack) > 1:
+            for ls in stack[1:]:
+                top.extend(ls)
+
+        if len(top) == 1 and isinstance(top[0], Group):
+            top = top[0].set_boost(top.boost)
+
+        return top
+
+    class Open(Singleton):
+        expr = rcompile("\\(")
+
+    class Close(Singleton):
+        expr = rcompile("\\)")
+
+
+class FieldsPlugin(Plugin):
+    """Adds the ability to specify the field of a clause using a colon.
+
+    This plugin is included in the default parser configuration.
+    """
+
+    def __init__(self, remove_unknown=True):
+        self.remove_unknown = remove_unknown
+
+    def tokens(self, parser):
+        return ((FieldsPlugin.Field, 0), )
+
+    def filters(self, parser):
+        return ((self.do_fieldnames, 100), )
+
+    def do_fieldnames(self, parser, stream):
+        fieldtoken = FieldsPlugin.Field
+
+        # Look for field tokens that aren't in the schema and convert them to
+        # text
+        if self.remove_unknown and parser.schema is not None:
+            newstream = stream.empty()
+            text = None
+            for token in stream:
+                if (isinstance(token, fieldtoken)
+                    and token.fieldname not in parser.schema):
+                    text = token.original
+                else:
+                    if text:
+                        try:
+                            token = token.prepend_text(text)
+                        except NotImplementedError:
+                            newstream.append(Word(text))
+                        text = None
+                    newstream.append(token)
+
+            if text:
+                newstream.append(Word(text))
+
+            stream = newstream
+
+        newstream = stream.empty()
+        i = len(stream)
+        # Iterate backwards through the stream, looking for field-able objects
+        # with field tokens in front of them
+        while i > 0:
+            i -= 1
+            t = stream[i]
+
+            if isinstance(t, fieldtoken):
+                # If this we see a field token in the stream, it means it
+                # wasn't in front of a field-able object, so convert it into a
+                # Word token
+                t = Word(t.original)
+            elif isinstance(t, Group):
+                t = self.do_fieldnames(parser, t)
+
+            # If this is a field-able object (not whitespace or a field token)
+            # and it has a field token in front of it, apply the field token
+            if (i > 0 and not isinstance(t, (White, fieldtoken))
+                and isinstance(stream[i - 1], fieldtoken)):
+                # Set the field name for this object from the field token
+                t = t.set_fieldname(stream[i - 1].fieldname)
+                # Skip past the field token
+                i -= 1
+
+            newstream.append(t)
+
+        newstream.reverse()
+        return newstream
+
+    class Field(Token):
+        expr = rcompile(r"(?P<fieldname>\w+):")
+
+        def __init__(self, fieldname, original):
+            self.fieldname = fieldname
+            self.original = original
+
+        def __repr__(self):
+            return "<%s:>" % self.fieldname
+
+        @classmethod
+        def create(cls, parser, match):
+            fieldname = match.group("fieldname")
+            return cls(fieldname, match.group(0))
+
+
+class OperatorsPlugin(Plugin):
+    """By default, adds the AND, OR, ANDNOT, ANDMAYBE, and NOT operators to
+    the parser syntax. This plugin scans the token stream for subclasses of
+    :class:`Operator` and calls their :meth:`Operator.make_group` methods
+    to allow them to manipulate the stream.
+
+    There are two levels of configuration available.
+
+    The first level is to change the regular expressions of the default
+    operators, using the ``And``, ``Or``, ``AndNot``, ``AndMaybe``, and/or
+    ``Not`` keyword arguments. The keyword value can be a pattern string or
+    a compiled expression, or None to remove the operator::
+
+        qp = qparser.QueryParser("content", schema)
+        cp = qparser.OperatorsPlugin(And="&", Or="\\|", AndNot="&!", AndMaybe="&~", Not=None)
+        qp.replace_plugin(cp)
+
+    You can also specify a list of ``(Operator, priority)`` pairs as the first
+    argument to the initializer. For example, assume you have created an
+    :class:`InfixOperator` subclass to implement a "before" operator. To add
+    this to the operators plugin with a priority of -5, you would do this::
+
+        additional = [(MyBefore(), -5)]
+        cp = qparser.OperatorsPlugin(additional)
+
+    Not that the list of operators you specify with the first argument is IN
+    ADDITION TO the defaults. To turn off one of the default operators, you
+    can pass None to the corresponding keyword argument::
+
+        cp = qparser.OperatorsPlugin([(MyAnd(), 0)], And=None)
+
+    If you want ONLY your list operators and none of the default operators, use
+    the ``clean`` keyword argument::
+
+        cp = qparser.OperatorsPlugin([(MyAnd(), 0)], clean=True)
+
+    This class replaces the ``CompoundsPlugin``. ``qparser.CompoundsPlugin`` is
+    now an alias for this class.
+    """
+
+    def __init__(self, ops=None, And=r"\sAND\s", Or=r"\sOR\s",
+                 AndNot=r"\sANDNOT\s", AndMaybe=r"\sANDMAYBE\s",
+                 Not=r"(^|(?<= ))NOT\s", Require=r"(^|(?<= ))REQUIRE\s",
+                 clean=False):
+        if isinstance(ops, tuple):
+            ops = list(ops)
+        if not ops:
+            ops = []
+
+        if not clean:
+            if Not:
+                ops.append((PrefixOperator(Not, NotGroup), 0))
+            if And:
+                ops.append((InfixOperator(And, AndGroup), 0))
+            if Or:
+                ops.append((InfixOperator(Or, OrGroup), 0))
+            if AndNot:
+                ops.append((InfixOperator(AndNot, AndNotGroup), -5))
+            if AndMaybe:
+                ops.append((InfixOperator(AndMaybe, AndMaybeGroup), -5))
+            if Require:
+                ops.append((InfixOperator(Require, RequireGroup), 0))
+
+        self.ops = ops
+
+    def tokens(self, parser):
+        return self.ops
+
+    def filters(self, parser):
+        return ((self.do_operators, 600), )
+
+    def do_operators(self, parser, stream, level=0):
+        #print "  " * level, "In=", stream
+        for op, _ in self.ops:
+            #print "  " * level, ":", op
+            if op.left_assoc:
+                i = 0
+                while i < len(stream):
+                    t = stream[i]
+                    if t is op:
+                        i = t.make_group(parser, stream, i)
+                    else:
+                        i += 1
+            else:
+                i = len(stream) - 1
+                while i >= 0:
+                    t = stream[i]
+                    if t is op:
+                        i = t.make_group(parser, stream, i)
+                    i -= 1
+            #print "  " * level, "=", stream
+
+        #print "  " * level, ">stream=", stream
+        newstream = stream.empty()
+        for t in stream:
+            if isinstance(t, Group):
+                t = self.do_operators(parser, t, level + 1)
+            newstream.append(t)
+
+        #print "  " * level, "<stream=", newstream
+        return newstream
+
+CompoundsPlugin = OperatorsPlugin
+
+
+class NotPlugin(Plugin):
+    """This plugin is deprecated, its functionality is now provided by the
+    :class:`OperatorsPlugin`.
+    """
+
+    def __init__(self, token="(^|(?<= ))NOT "):
+        class Not(Singleton):
+            expr = rcompile(token)
+
+        self.Not = Not
+
+    def tokens(self, parser):
+        return ((self.Not, 0), )
+
+    def filters(self, parser):
+        return ((self.do_not, 800), )
+
+    def do_not(self, parser, stream):
+        newstream = stream.empty()
+        notnext = False
+        for t in stream:
+            if isinstance(t, self.Not):
+                notnext = True
+                continue
+
+            if isinstance(t, Group):
+                t = self.do_not(parser, t)
+
+            if notnext:
+                t = NotGroup([t])
+
+            newstream.append(t)
+            notnext = False
+
+        return newstream
+
+
+class BoostPlugin(Plugin):
+    """Adds the ability to boost clauses of the query using the circumflex.
+
+    This plugin is included in the default parser configuration.
+    """
+
+    def tokens(self, parser):
+        return ((BoostPlugin.Boost, 0), )
+
+    def filters(self, parser):
+        return ((BoostPlugin.clean_boost, 0), (BoostPlugin.do_boost, 700))
+
+    @staticmethod
+    def clean_boost(parser, stream):
+        newstream = stream.empty()
+        for i, t in enumerate(stream):
+            if isinstance(t, BoostPlugin.Boost):
+                if i == 0 or isinstance(stream[i - 1], (BoostPlugin.Boost, White)):
+                    t = Word(t.original)
+            newstream.append(t)
+        return newstream
+
+    @staticmethod
+    def do_boost(parser, stream):
+        newstream = stream.empty()
+
+        for t in stream:
+            if isinstance(t, Group):
+                newstream.append(BoostPlugin.do_boost(parser, t))
+
+            elif isinstance(t, BoostPlugin.Boost):
+                if newstream:
+                    newstream.append(newstream.pop().set_boost(t.boost))
+
+            else:
+                newstream.append(t)
+
+        return newstream
+
+    class Boost(Token):
+        expr = rcompile("\\^([0-9]+(.[0-9]+)?)($|(?=[ \t\r\n]))")
+
+        def __init__(self, original, boost):
+            self.original = original
+            self.boost = boost
+
+        def __repr__(self):
+            return "<^%s>" % self.boost
+
+        @classmethod
+        def create(cls, parser, match):
+            try:
+                return cls(match.group(0), float(match.group(1)))
+            except ValueError:
+                return Word(match.group(0))
+
+
+class PlusMinusPlugin(Plugin):
+    """Adds the ability to use + and - in a flat OR query to specify required
+    and prohibited terms.
+
+    This is the basis for the parser configuration returned by
+    ``SimpleParser()``.
+    """
+
+    def tokens(self, parser):
+        return ((PlusMinusPlugin.Plus, 0), (PlusMinusPlugin.Minus, 0))
+
+    def filters(self, parser):
+        return ((PlusMinusPlugin.do_plusminus, 510), )
+
+    @staticmethod
+    def do_plusminus(parser, stream):
+        required = AndGroup()
+        optional = OrGroup()
+        prohibited = OrGroup()
+
+        nextlist = optional
+        for t in stream:
+            if isinstance(t, PlusMinusPlugin.Plus):
+                nextlist = required
+            elif isinstance(t, PlusMinusPlugin.Minus):
+                nextlist = prohibited
+            else:
+                nextlist.append(t)
+                nextlist = optional
+
+        r = optional
+        if required:
+            r = AndMaybeGroup([required, optional])
+        if prohibited:
+            r = AndNotGroup([r, prohibited])
+        return r
+
+    class Plus(Singleton):
+        expr = rcompile("\\+")
+
+    class Minus(Singleton):
+        expr = rcompile("-")
+
+
+class MultifieldPlugin(Plugin):
+    """Converts any unfielded terms into OR clauses that search for the
+    term in a specified list of fields.
+    """
+
+    def __init__(self, fieldnames, fieldboosts=None):
+        """
+        :param fieldnames: a list of fields to search.
+        :param fieldboosts: an optional dictionary mapping field names to
+            a boost to use for that field.
+        """
+
+        self.fieldnames = fieldnames
+        self.boosts = fieldboosts or {}
+
+    def filters(self, parser):
+        return ((self.do_multifield, 110), )
+
+    def do_multifield(self, parser, stream):
+        newstream = stream.empty()
+        for t in stream:
+            if isinstance(t, Group):
+                t = self.do_multifield(parser, t)
+            elif isinstance(t, BasicSyntax) and t.fieldname is None:
+                t = OrGroup([t.set_fieldname(fn).set_boost(self.boosts.get(fn, 1.0))
+                             for fn in self.fieldnames])
+            newstream.append(t)
+        return newstream
+
+
+class DisMaxPlugin(Plugin):
+    """Converts any unfielded terms into DisjunctionMax clauses that search
+    for the term in a specified list of fields.
+    """
+
+    def __init__(self, fieldboosts, tiebreak=0.0):
+        """
+        :param fieldboosts: a dictionary mapping field names to a boost to use
+            for that in the DisjuctionMax query.
+        """
+
+        self.fieldboosts = fieldboosts.items()
+        self.tiebreak = tiebreak
+
+    def filters(self, parser):
+        return ((self.do_dismax, 110), )
+
+    def do_dismax(self, parser, stream):
+        newstream = stream.empty()
+        for t in stream:
+            if isinstance(t, BasicSyntax) and t.fieldname is None:
+                t = DisMaxGroup([t.set_fieldname(fn).set_boost(b)
+                                 for fn, b in self.fieldboosts],
+                                 tiebreak=self.tiebreak)
+            newstream.append(t)
+        return newstream
+
+
+class FieldAliasPlugin(Plugin):
+    """Adds the ability to use "aliases" of fields in the query string.
+
+    >>> # Allow users to use 'body' or 'text' to refer to the 'content' field
+    >>> parser.add_plugin(FieldAliasPlugin({"content": ["body", "text"]}))
+    >>> parser.parse("text:hello")
+    Term("content", "hello")
+    """
+
+    def __init__(self, fieldmap):
+        """
+        :param fieldmap: a dictionary mapping fieldnames to a list of
+            aliases for the field.
+        """
+
+        self.fieldmap = fieldmap
+        self.reverse = {}
+        for key, values in fieldmap.iteritems():
+            for value in values:
+                self.reverse[value] = key
+
+    def filters(self, parser):
+        return ((self.do_aliases, 90), )
+
+    def do_aliases(self, parser, stream):
+        newstream = stream.empty()
+        for t in stream:
+            if isinstance(t, Group):
+                t = self.do_aliases(parser, t)
+            elif (t.fieldname is not None
+                  and t.fieldname in self.reverse):
+                t = t.set_fieldname(self.reverse[t.fieldname], force=True)
+            newstream.append(t)
+        return newstream
+
+
+class CopyFieldPlugin(Plugin):
+    """Looks for basic syntax tokens (terms, prefixes, wildcards, phrases, etc.)
+    occurring in a certain field and replaces it with a group (by default OR)
+    containing the original token and the token copied to a new field.
+
+    For example, the query::
+
+        hello name:matt
+
+    could be automatically converted by ``CopyFieldPlugin({"name", "author"})``
+    to::
+
+        hello (name:matt OR author:matt)
+
+    This is useful where one field was indexed with a differently-analyzed copy
+    of another, and you want the query to search both fields.
+    """
+
+    def __init__(self, map, group=OrGroup, mirror=False):
+        """
+        :param map: a dictionary mapping names of fields to copy to the
+            names of the destination fields.
+        :param group: the type of group to create in place of the original
+            token.
+        :param two_way: if True, the plugin copies both ways, so if the user
+            specifies a query in the 'toname' field, it will be copied to
+            the 'fromname' field.
+        """
+        self.map = map
+        self.group = group
+        self.mirror = mirror
+
+    def filters(self, parser):
+        return ((self.do_copyfield, 109), )
+
+    def do_copyfield(self, parser, stream):
+        mirror = self.mirror
+        map = self.map
+        if mirror:
+            # Add in reversed mappings
+            map.update(dict((v, k) for k, v in map.iteritems()))
+
+        newstream = stream.empty()
+        for t in stream:
+            if isinstance(t, Group):
+                t = self.do_copyfield(parser, t)
+            elif isinstance(t, BasicSyntax):
+                toname = None
+                if t.fieldname in map:
+                    toname = map[t.fieldname]
+                elif t.fieldname is None and parser.fieldname in map:
+                    toname = map[parser.fieldname]
+
+                if toname:
+                    # Replace the syntax object with a group containing the
+                    # original object and one with the field changed
+                    t = self.group([t, t.set_fieldname(toname, force=True)])
+            newstream.append(t)
+        return newstream
+
+
+class GtLtPlugin(Plugin):
+    """Allows the user to use greater than/less than symbols to create range
+    queries::
+
+        a:>100 b:<=z c:>=-1.4 d:<mz
+
+    This is the equivalent of::
+
+        a:{100 to] b:[to z] c:[-1.4 to] d:[to mz}
+
+    The plugin recognizes ``>``, ``<``, ``>=``, ``<=``, ``=>``, and ``=<``
+    after a field specifier. The field specifier is required. You cannot do the
+    following::
+
+        >100
+
+    This plugin requires the FieldsPlugin and RangePlugin to work.
+    """
+
+    def __init__(self, expr=r"(?P<rel>(<=|>=|<|>|=<|=>))"):
+        """
+        :param expr: a regular expression that must capture a "rel" group
+            (which contains <, >, >=, <=, =>, or =<)
+        """
+
+        self.expr = rcompile(expr)
+
+    def tokens(self, parser):
+        # Create a dynamic subclass of GtLtToken and give it the configured
+        # regular expression
+        tkclass = type("DynamicGtLtToken", (GtLtPlugin.GtLtToken, ),
+                       {"expr": self.expr})
+
+        return ((tkclass, 0), )
+
+    def filters(self, parser):
+        # Run before the fieldnames filter
+        return ((self.do_gtlt, 99), )
+
+    def make_range(self, text, rel):
+        if rel == "<":
+            return RangePlugin.Range(None, text, False, True)
+        elif rel == ">":
+            return RangePlugin.Range(text, None, True, False)
+        elif rel == "<=" or rel == "=<":
+            return RangePlugin.Range(None, text, False, False)
+        elif rel == ">=" or rel == "=>":
+            return RangePlugin.Range(text, None, False, False)
+
+    def do_gtlt(self, parser, stream):
+        # Look for GtLtTokens in the stream and
+        # - replace it with a Field token
+        # - if the next token is a Word, replace it with a Range based on the
+        #   GtLtToken
+
+        gtlttoken = GtLtPlugin.GtLtToken
+
+        newstream = stream.empty()
+        prev = None
+        for t in stream:
+            if isinstance(t, gtlttoken):
+                if not isinstance(prev, FieldsPlugin.Field):
+                    prev = None
+                    continue
+            elif isinstance(t, Word) and isinstance(prev, gtlttoken):
+                t = self.make_range(t.text, prev.rel)
+
+            if not isinstance(t, gtlttoken):
+                newstream.append(t)
+            prev = t
+
+        return newstream
+
+    class GtLtToken(Token):
+        def __init__(self, rel):
+            self.rel = rel
+
+        def __repr__(self):
+            return "{%s}" % (self.rel)
+
+        @classmethod
+        def create(cls, parser, match):
+            return cls(match.group("rel"))
+
+
+
+
+
+
+
diff --git a/lib/whoosh/whoosh/qparser/syntax.py b/lib/whoosh/whoosh/qparser/syntax.py
new file mode 100644
index 0000000..9a8f41e
--- /dev/null
+++ b/lib/whoosh/whoosh/qparser/syntax.py
@@ -0,0 +1,500 @@
+# Copyright 2010 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+"""
+This module contains support classes for the query parser. These objects are
+used to construct the parsed syntax tree of the query. The syntax tree is then
+tranlsated into a query tree by calling ``SyntaxObject.query()`` on the object
+at the top of the tree.
+"""
+
+import copy
+
+from whoosh import query
+from whoosh.qparser.common import rcompile
+
+
+class SyntaxObject(object):
+    """An object representing parsed text. These objects generally correspond
+    to a query object type, and are intermediate objects used to represent the
+    syntax tree parsed from a query string, and then generate a query tree from
+    the syntax tree. There will be syntax objects that do not have a
+    corresponding query type, such as the syntax object representing
+    whitespace.
+    """
+
+    def set_fieldname(self, name, force=False):
+        """Returns a version of this syntax object with the field name set to
+        the given name. Normally this only changes the field name if the
+        field name is not already set, but if the ``force`` keyword argument
+        is True, the field name will be changed regardless.
+
+        This method is mis-named and confusing, but is used by the parser
+        to assign field names to branches of the syntax tree, but only for
+        syntax objects that didn't have an explicit field name set by the user.
+        """
+
+        if force or self.fieldname is None:
+            t = copy.copy(self)
+            t.fieldname = name
+            return t
+        else:
+            return self
+
+    def set_boost(self, b):
+        if b != self.boost:
+            t = copy.copy(self)
+            t.boost = b
+            return t
+        else:
+            return self
+
+    def set_text(self, text):
+        raise NotImplementedError
+
+    def prepend_text(self, text):
+        raise NotImplementedError
+
+    def append_text(self, text):
+        raise NotImplementedError
+
+    def query(self, parser):
+        """Returns a query object tree representing this parser object.
+        """
+
+        raise NotImplementedError
+
+
+# Grouping objects
+
+class Group(SyntaxObject):
+    """Represents a group of syntax objects. These generally correspond to
+    compound query objects such as ``query.And`` and ``query.Or``.
+    """
+
+    # Whether this group can have any number of children. Other than AND and
+    # OR, most groups will represent binary queries, so the default is False.
+    many = False
+
+    # Sub-classes that want to use the default query() implementation should
+    # set this to the query class corresponding to this group
+    qclass = None
+
+    def __init__(self, tokens=None, boost=1.0):
+        self.tokens = tokens or []
+        self.boost = boost
+
+    def __repr__(self):
+        r = "%s(%r)" % (self.__class__.__name__, self.tokens)
+        if self.boost != 1.0:
+            r += "^%s" % self.boost
+        return r
+
+    def __nonzero__(self):
+        return bool(self.tokens)
+
+    def __iter__(self):
+        return iter(self.tokens)
+
+    def __len__(self):
+        return len(self.tokens)
+
+    def __getitem__(self, n):
+        return self.tokens.__getitem__(n)
+
+    def __setitem__(self, n, v):
+        self.tokens.__setitem__(n, v)
+
+    def __delitem__(self, n):
+        self.tokens.__delitem__(n)
+
+    def insert(self, n, v):
+        self.tokens.insert(n, v)
+
+    def set_boost(self, b):
+        return self.__class__(self.tokens[:], boost=b)
+
+    def set_fieldname(self, name, force=False):
+        return self.__class__([t.set_fieldname(name, force)
+                               for t in self.tokens])
+
+    def append(self, item):
+        self.tokens.append(item)
+
+    def extend(self, items):
+        self.tokens.extend(items)
+
+    def pop(self):
+        return self.tokens.pop()
+
+    def reverse(self):
+        self.tokens.reverse()
+
+    def query(self, parser):
+        return self.qclass([t.query(parser) for t in self.tokens],
+                           boost=self.boost)
+
+    def empty(self):
+        return self.__class__(boost=self.boost)
+
+
+class AndGroup(Group):
+    """Syntax group corresponding to an And query.
+    """
+
+    # This group can have more than 2 children
+    many = True
+    qclass = query.And
+
+
+class OrGroup(Group):
+    """Syntax group corresponding to an Or query.
+    """
+
+    # This group can have more than 2 children
+    many = True
+    qclass = query.Or
+
+
+class AndNotGroup(Group):
+    """Syntax group corresponding to an AndNot query.
+    """
+
+    def query(self, parser):
+        assert len(self.tokens) == 2
+        return query.AndNot(self.tokens[0].query(parser),
+                            self.tokens[1].query(parser), boost=self.boost)
+
+
+class AndMaybeGroup(Group):
+    """Syntax group corresponding to an AndMaybe query.
+    """
+
+    def query(self, parser):
+        assert len(self.tokens) == 2
+        return query.AndMaybe(self.tokens[0].query(parser),
+                              self.tokens[1].query(parser), boost=self.boost)
+
+
+class RequireGroup(Group):
+    """Syntax group corresponding to a Require query.
+    """
+
+    def query(self, parser):
+        assert len(self.tokens) == 2, self.tokens
+        return query.Require(self.tokens[0].query(parser),
+                             self.tokens[1].query(parser), boost=self.boost)
+
+
+class OrderedGroup(Group):
+    """Syntax group corresponding to the Ordered query.
+    """
+
+    many = True
+    qclass = query.Ordered
+
+
+class DisMaxGroup(Group):
+    """Syntax group corresponding to a DisjunctionMax query.
+    """
+
+    def __init__(self, tokens=None, tiebreak=0.0, boost=None):
+        super(DisMaxGroup, self).__init__(tokens)
+        self.tiebreak = tiebreak
+
+    def __repr__(self):
+        r = "dismax(%r" % self.tokens
+        if self.tiebreak != 0:
+            r += " tb=%s" % self.tiebreak
+        r += ")"
+        return r
+
+    def query(self, parser):
+        return query.DisjunctionMax([t.query(parser) for t in self.tokens],
+                                    tiebreak=self.tiebreak)
+
+    def empty(self):
+        return self.__class__(tiebreak=self.tiebreak, boost=self.boost)
+
+
+class NotGroup(Group):
+    """Syntax group corresponding to a Not query.
+    """
+
+    def __repr__(self):
+        return "NOT(%r)" % self.tokens
+
+    def query(self, parser):
+        assert len(self.tokens) == 1
+        return query.Not(self.tokens[0].query(parser))
+
+
+# Parse-able tokens
+
+class Token(SyntaxObject):
+    """A parse-able token object. Each token class has an ``expr`` attribute
+    containing a regular expression that matches the token text. When this
+    expression is found, the class/object's ``create()`` method is called and
+    returns a token object to represent the match in the token stream.
+
+    Many token classes will do the parsing using class methods and put
+    instances of themselves in the token stream, however parseable objects
+    requiring configuration (such as the :class:`Operator` subclasses may use
+    separate objects for doing the parsing and embodying the token.
+    """
+
+    fieldname = None
+    endpos = None
+
+    @classmethod
+    def match(cls, text, pos):
+        return cls.expr.match(text, pos)
+
+    @classmethod
+    def create(cls, parser, match):
+        return cls()
+
+    def query(self, parser):
+        raise NotImplementedError
+
+
+class Operator(Token):
+    """Represents a search operator which modifies the token stream by putting
+    certain tokens into a :class:`Group` object. For example, an "and" infix
+    operator would put the two tokens on either side of the operator into
+    an :class:`AndGroup`.
+
+    This is the base class for operators. Subclasses must implement the
+    :meth:`Operator.make_group` method.
+    """
+
+    def __init__(self, expr, grouptype, left_assoc=True):
+        """
+        :param expr: a pattern string or compiled expression of the token text.
+        :param grouptype: a :class:`Group` subclass that should be created to
+            contain objects affected by the operator.
+        """
+
+        self.expr = rcompile(expr)
+        self.grouptype = grouptype
+        self.left_assoc = left_assoc
+
+    def __repr__(self):
+        return "%s<%s>" % (self.__class__.__name__, self.expr.pattern)
+
+    def set_boost(self, b):
+        return self
+
+    def set_fieldname(self, name, force=False):
+        return self
+
+    def make_group(self, parser, stream, position):
+        raise NotImplementedError
+
+    def match(self, text, pos):
+        return self.expr.match(text, pos)
+
+    def create(self, parser, match):
+        return self
+
+
+class PrefixOperator(Operator):
+    """Implements a prefix operator. That is, the token immediately following
+    the operator will be put into the group.
+    """
+
+    def make_group(self, parser, stream, position):
+        if position < len(stream) - 1:
+            del stream[position]
+            stream[position] = self.grouptype([stream[position]])
+        else:
+            del stream[position]
+        return position
+
+
+class PostfixOperator(Operator):
+    """Implements a postfix operator. That is, the token immediately preceding
+    the operator will be put into the group.
+    """
+
+    def make_group(self, parser, stream, position):
+        if position > 0:
+            del stream[position]
+            stream[position - 1] = self.grouptype([stream[position - 1]])
+        else:
+            del stream[position]
+        return position
+
+
+class InfixOperator(Operator):
+    """Implements an infix operator. That is, the tokens immediately on either
+    side of the operator will be put into the group.
+    """
+
+    def __init__(self, expr, grouptype, left_assoc=True):
+        """
+        :param expr: a pattern string or compiled expression of the token text.
+        :param grouptype: a :class:`Group` subclass that should be created to
+            contain objects affected by the operator.
+        :param left_assoc: if True, the operator is left associative. Otherwise
+            it is right associative.
+        """
+
+        super(InfixOperator, self).__init__(expr, grouptype)
+        self.left_assoc = left_assoc
+
+    def make_group(self, parser, stream, position):
+        if position > 0 and position < len(stream) - 1:
+            left = stream[position - 1]
+            right = stream[position + 1]
+
+            # The first two clauses check whether the "strong" side is already
+            # a group of the type we are going to create. If it is, we just
+            # append the "weak" side to the "strong" side instead of creating
+            # a new group inside the existing one. This is necessary because
+            # we can quickly run into Python's recursion limit otherwise.
+            if self.grouptype.many and self.left_assoc and isinstance(left, self.grouptype):
+                left.append(right)
+                del stream[position:position + 2]
+            elif self.grouptype.many and not self.left_assoc and isinstance(right, self.grouptype):
+                right.insert(0, left)
+                del stream[position - 1:position + 1]
+                return position - 1
+            else:
+                # Replace the operator and the two surrounding objects
+                stream[position - 1:position + 2] = [self.grouptype([left, right])]
+        else:
+            del stream[position]
+        return position
+
+
+class Singleton(Token):
+    """Base class for tokens that don't carry any information specific to
+    each instance (e.g. "open parenthesis" token), so they can all share the
+    same instance.
+    """
+
+    me = None
+
+    def __repr__(self):
+        return self.__class__.__name__
+
+    def set_boost(self, b):
+        return self
+
+    def set_fieldname(self, name, force=False):
+        return self
+
+    @classmethod
+    def create(cls, parser, match):
+        if not cls.me:
+            cls.me = cls()
+        return cls.me
+
+
+class White(Singleton):
+    expr = rcompile("\\s+")
+
+
+class ErrorToken(Token):
+    """A token representing an unavoidable parsing error. The ``query()``
+    method always returns NullQuery.
+
+    The default parser usually does not produce "errors" (text that doesn't
+    match the syntax is simply treated as part of the query), so this is mostly
+    for use by plugins that may add more restrictive parsing, for example
+    :class:`DateParserPlugin`.
+
+    Since the corresponding NullQuery will be filtered out when the query is
+    normalized, this is really only useful for debugging and possibly for
+    plugin filters.
+
+    The ``token`` attribute may contain the token that produced the error.
+    """
+
+    def __init__(self, token):
+        self.token = token
+
+    def __repr__(self):
+        return "<%s (%r)>" % (self.__class__.__name__, self.token)
+
+    def query(self, parser):
+        return query.NullQuery
+
+
+class BasicSyntax(Token):
+    """Base class for "basic" (atomic) syntax -- term, prefix, wildcard,
+    phrase, range.
+    """
+
+    expr = None
+    qclass = None
+    tokenize = False
+    removestops = False
+
+    def __init__(self, text, fieldname=None, boost=1.0):
+        self.fieldname = fieldname
+        self.text = text
+        self.boost = boost
+
+    def set_text(self, text):
+        t = copy.copy(self)
+        t.text = text
+        return t
+
+    def prepend_text(self, text):
+        return self.set_text(text + self.text)
+
+    def append_text(self, text):
+        return self.set_text(self.text + text)
+
+    def __repr__(self):
+        r = "%s:%r" % (self.fieldname, self.text)
+        if self.boost != 1.0:
+            r += "^%s" % self.boost
+        return r
+
+    @classmethod
+    def create(cls, parser, match):
+        return cls(match.group(0))
+
+    def query(self, parser):
+        fieldname = self.fieldname or parser.fieldname
+        termclass = self.qclass or parser.termclass
+
+        return parser.term_query(fieldname, self.text, termclass,
+                                 boost=self.boost, tokenize=self.tokenize,
+                                 removestops=self.removestops)
+
+
+class Word(BasicSyntax):
+    """Syntax object representing a term.
+    """
+
+    expr = rcompile("[^ \t\r\n)]+")
+    tokenize = True
+    removestops = True
diff --git a/lib/whoosh/whoosh/query.py b/lib/whoosh/whoosh/query.py
new file mode 100644
index 0000000..c759b8c
--- /dev/null
+++ b/lib/whoosh/whoosh/query.py
@@ -0,0 +1,1912 @@
+# Copyright 2007 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+"""This module contains objects that query the search index. These query
+objects are composable to form complex query trees.
+"""
+
+from __future__ import division
+
+import copy
+import fnmatch
+import re
+from array import array
+
+from whoosh.lang.morph_en import variations
+from whoosh.matching import (AndMaybeMatcher, DisjunctionMaxMatcher,
+                             ListMatcher, IntersectionMatcher, InverseMatcher,
+                             NullMatcher, RequireMatcher, UnionMatcher,
+                             WrappingMatcher, ConstantScoreMatcher,
+                             AndNotMatcher)
+from whoosh.reading import TermNotFound
+from whoosh.support.levenshtein import relative
+from whoosh.support.times import datetime_to_long
+from whoosh.util import make_binary_tree, make_weighted_tree, methodcaller
+
+
+# Exceptions
+
+class QueryError(Exception):
+    """Error encountered while running a query.
+    """
+    pass
+
+
+# Utility classes
+
+class Lowest(object):
+    "A value that is always compares lower than any other object except itself."
+
+    def __cmp__(self, other):
+        if other.__class__ is Lowest:
+            return 0
+        return -1
+Lowest = Lowest()
+
+class Highest(object):
+    "A value that is always compares higher than any other object except itself."
+
+    def __cmp__(self, other):
+        if other.__class__ is Highest:
+            return 0
+        return 1
+Highest = Highest()
+
+
+# Base classes
+
+class Query(object):
+    """Abstract base class for all queries.
+
+    Note that this base class implements __or__, __and__, and __sub__ to allow
+    slightly more convenient composition of query objects::
+
+        >>> Term("content", u"a") | Term("content", u"b")
+        Or([Term("content", u"a"), Term("content", u"b")])
+
+        >>> Term("content", u"a") & Term("content", u"b")
+        And([Term("content", u"a"), Term("content", u"b")])
+
+        >>> Term("content", u"a") - Term("content", u"b")
+        And([Term("content", u"a"), Not(Term("content", u"b"))])
+    """
+
+    def __or__(self, query):
+        """Allows you to use | between query objects to wrap them in an Or
+        query.
+        """
+        return Or([self, query]).normalize()
+
+    def __and__(self, query):
+        """Allows you to use & between query objects to wrap them in an And
+        query.
+        """
+        return And([self, query]).normalize()
+
+    def __sub__(self, query):
+        """Allows you to use - between query objects to add the right-hand
+        query as a "NOT" query.
+        """
+
+        return And([self, Not(query)]).normalize()
+
+    def __hash__(self):
+        raise NotImplementedError
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    def is_leaf(self):
+        """Returns True if this is a leaf node in the query tree, or False if
+        this query has sub-queries.
+        """
+
+        return True
+
+    def apply(self, fn):
+        """If this query has children, calls the given function on each child
+        and returns a new copy of this node with the new children returned by
+        the function. If this is a leaf node, simply returns this object.
+
+        This is useful for writing functions that transform a query tree. For
+        example, this function changes all Term objects in a query tree into
+        Variations objects::
+
+            def term2var(q):
+                if isinstance(q, Term):
+                    return Variations(q.fieldname, q.text)
+                else:
+                    return q.apply(term2var)
+
+            q = And([Term("f", "alfa"),
+                     Or([Term("f", "bravo"),
+                         Not(Term("f", "charlie"))])])
+            q = term2var(q)
+
+        Note that this method does not automatically create copies of nodes.
+        To avoid modifying the original tree, your function should call the
+        :meth:`Query.copy` method on nodes before changing their attributes.
+        """
+
+        return self
+
+    def accept(self, fn):
+        """Applies the given function to this query's subqueries (if any) and
+        then to this query itself::
+
+            def boost_phrases(q):
+                if isintance(q, Phrase):
+                    q.boost *= 2.0
+                return q
+
+            myquery = myquery.accept(boost_phrases)
+
+        This method automatically creates copies of the nodes in the original
+        tree before passing them to your function, so your function can change
+        attributes on nodes without altering the original tree.
+
+        This method is less flexible than using :meth:`Query.apply` (in fact
+        it's implemented using that method) but is often more straightforward.
+        """
+
+        def fn_wrapper(q):
+            q = q.apply(fn_wrapper)
+            return fn(q)
+
+        return fn_wrapper(self)
+
+    def copy(self):
+        """Returns a copy of this query tree.
+        """
+
+        if self.is_leaf():
+            return copy.copy(self)
+        else:
+            return self.apply(methodcaller("copy"))
+
+    def replace(self, oldtext, newtext):
+        """Returns a copy of this query with oldtext replaced by newtext (if
+        oldtext was anywhere in this query).
+
+        Note that this returns a *new* query with the given text replaced. It
+        *does not* modify the original query "in place".
+        """
+
+        if self.is_leaf():
+            return self.copy()
+        else:
+            return self.apply(methodcaller("replace", oldtext, newtext))
+
+    def all_terms(self, termset=None, phrases=True):
+        """Returns a set of all terms in this query tree.
+
+        This method simply operates on the query itself, without reference to
+        an index (unlike existing_terms()), so it will *not* add terms that
+        require an index to compute, such as Prefix and Wildcard.
+
+        >>> q = And([Term("content", u"render"), Term("path", u"/a/b")])
+        >>> q.all_terms()
+        set([("content", u"render"), ("path", u"/a/b")])
+
+        :param phrases: Whether to add words found in Phrase queries.
+        :rtype: set
+        """
+
+        if termset is None:
+            termset = set()
+        self._all_terms(termset, phrases=phrases)
+        return termset
+
+    def _all_terms(self, *args, **kwargs):
+        # To be implemented in sub-classes
+        return
+
+    def existing_terms(self, ixreader, termset=None, reverse=False,
+                       phrases=True):
+        """Returns a set of all terms in this query tree that exist in the
+        index represented by the given ixreaderder.
+
+        This method references the IndexReader to expand Prefix and Wildcard
+        queries, and only adds terms that actually exist in the index (unless
+        reverse=True).
+
+        >>> ixreader = my_index.reader()
+        >>> q = And([Or([Term("content", u"render"),
+        ...             Term("content", u"rendering")]),
+        ...             Prefix("path", u"/a/")])
+        >>> q.existing_terms(ixreader, termset)
+        set([("content", u"render"), ("path", u"/a/b"), ("path", u"/a/c")])
+
+        :param ixreader: A :class:`whoosh.reading.IndexReader` object.
+        :param reverse: If True, this method adds *missing* terms rather than
+            *existing* terms to the set.
+        :param phrases: Whether to add words found in Phrase queries.
+        :rtype: set
+        """
+
+        if termset is None:
+            termset = set()
+        self._existing_terms(ixreader, termset, reverse=reverse,
+                             phrases=phrases)
+        return termset
+
+    def requires(self):
+        """Returns a set of queries that are *known* to be required to match
+        for the entire query to match. Note that other queries might also turn
+        out to be required but not be determinable by examining the static
+        query.
+
+        >>> a = Term("f", u"a")
+        >>> b = Term("f", u"b")
+        >>> And([a, b]).requires()
+        set([Term("f", u"a"), Term("f", u"b")])
+        >>> Or([a, b]).requires()
+        set([])
+        >>> AndMaybe(a, b).requires()
+        set([Term("f", u"a")])
+        >>> a.requires()
+        set([Term("f", u"a")])
+        """
+
+        # Subclasses should implement the _add_required_to(qset) method
+
+        return set([self])
+
+    def field(self):
+        """Returns the field this query matches in, or None if this query does
+        not match in a single field.
+        """
+
+        return self.fieldname
+
+    def estimate_size(self, ixreader):
+        """Returns an estimate of how many documents this query could
+        potentially match (for example, the estimated size of a simple term
+        query is the document frequency of the term). It is permissible to
+        overestimate, but not to underestimate.
+        """
+        raise NotImplementedError
+
+    def estimate_min_size(self, ixreader):
+        """Returns an estimate of the minimum number of documents this query
+        could potentially match.
+        """
+
+        return self.estimate_size(ixreader)
+
+    def matcher(self, searcher):
+        """Returns a :class:`~whoosh.matching.Matcher` object you can use to
+        retrieve documents and scores matching this query.
+
+        :rtype: :class:`whoosh.matching.Matcher`
+        """
+        raise NotImplementedError
+
+    def docs(self, searcher):
+        """Returns an iterator of docnums matching this query.
+
+        >>> searcher = my_index.searcher()
+        >>> list(my_query.docs(searcher))
+        [10, 34, 78, 103]
+
+        :param searcher: A :class:`whoosh.searching.Searcher` object.
+        """
+
+        try:
+            return self.matcher(searcher).all_ids()
+        except TermNotFound:
+            return iter([])
+
+    def normalize(self):
+        """Returns a recursively "normalized" form of this query. The
+        normalized form removes redundancy and empty queries. This is called
+        automatically on query trees created by the query parser, but you may
+        want to call it yourself if you're writing your own parser or building
+        your own queries.
+
+        >>> q = And([And([Term("f", u"a"),
+        ...               Term("f", u"b")]),
+        ...               Term("f", u"c"), Or([])])
+        >>> q.normalize()
+        And([Term("f", u"a"), Term("f", u"b"), Term("f", u"c")])
+
+        Note that this returns a *new, normalized* query. It *does not* modify
+        the original query "in place".
+        """
+        return self
+
+    def simplify(self, ixreader):
+        """Returns a recursively simplified form of this query, where
+        "second-order" queries (such as Prefix and Variations) are re-written
+        into lower-level queries (such as Term and Or).
+        """
+        return self
+
+
+class WrappingQuery(Query):
+    def __init__(self, child):
+        self.child = child
+
+    def __repr__(self):
+        return "%s(%r)" % (self.__class__.__name__, self.child)
+
+    def __hash__(self):
+        return hash(self.__class__.__name__) ^ hash(self.child)
+
+    def is_leaf(self):
+        return False
+
+    def apply(self, fn):
+        return self.__class__(fn(self.child))
+
+    def all_terms(self, termset=None, phrases=True):
+        return self.child.all_terms(termset=termset, phrases=phrases)
+
+    def existing_terms(self, ixreader, termset=None, reverse=False,
+                       phrases=True):
+        return self.child.existing_terms(ixreader, termset=termset,
+                                         reverse=reverse, phrases=phrases)
+
+    def requires(self):
+        return self.child.requires()
+
+    def field(self):
+        return self.child.field()
+
+    def estimate_size(self, ixreader):
+        return self.child.estimate_size(ixreader)
+
+    def estimate_min_size(self, ixreader):
+        return self.child.estimate_min_size(ixreader)
+
+    def matcher(self, searcher):
+        return self.child.matcher(searcher)
+
+
+class CompoundQuery(Query):
+    """Abstract base class for queries that combine or manipulate the results
+    of multiple sub-queries .
+    """
+
+    def __init__(self, subqueries, boost=1.0):
+        self.subqueries = subqueries
+        self.boost = boost
+
+    def __repr__(self):
+        r = "%s(%r" % (self.__class__.__name__, self.subqueries)
+        if self.boost != 1:
+            r += ", boost=%s" % self.boost
+        r += ")"
+        return r
+
+    def __unicode__(self):
+        r = u"("
+        r += (self.JOINT).join([unicode(s) for s in self.subqueries])
+        r += u")"
+        return r
+
+    def __eq__(self, other):
+        return other and self.__class__ is other.__class__ and\
+        self.subqueries == other.subqueries and\
+        self.boost == other.boost
+
+    def __getitem__(self, i):
+        return self.subqueries.__getitem__(i)
+
+    def __len__(self):
+        return len(self.subqueries)
+
+    def __iter__(self):
+        return iter(self.subqueries)
+
+    def __hash__(self):
+        h = hash(self.__class__.__name__) ^ hash(self.boost)
+        for q in self.subqueries:
+            h ^= hash(q)
+        return h
+
+    def is_leaf(self):
+        return False
+
+    def apply(self, fn):
+        return self.__class__([fn(q) for q in self.subqueries],
+                              boost=self.boost)
+
+    def field(self):
+        if self.subqueries:
+            f = self.subqueries[0].field()
+            if all(q.field() == f for q in self.subqueries[1:]):
+                return f
+
+    def estimate_size(self, ixreader):
+        return sum(q.estimate_size(ixreader) for q in self.subqueries)
+
+    def estimate_min_size(self, ixreader):
+        subs, nots = self._split_queries()
+        subs_min = min(q.estimate_min_size(ixreader) for q in subs)
+        if nots:
+            nots_sum = sum(q.estimate_size(ixreader) for q in nots)
+            subs_min = max(0, subs_min - nots_sum)
+        return subs_min
+
+    def _all_terms(self, termset, phrases=True):
+        for q in self.subqueries:
+            q.all_terms(termset, phrases=phrases)
+
+    def _existing_terms(self, ixreader, termset, reverse=False, phrases=True):
+        for q in self.subqueries:
+            q.existing_terms(ixreader, termset, reverse=reverse,
+                             phrases=phrases)
+
+    def normalize(self):
+        # Normalize subqueries and merge nested instances of this class
+        subqueries = []
+        for s in self.subqueries:
+            s = s.normalize()
+            if isinstance(s, self.__class__):
+                subqueries += [ss.normalize() for ss in s.subqueries]
+            else:
+                subqueries.append(s)
+
+        if all(q is NullQuery for q in subqueries):
+            return NullQuery
+
+        if any((isinstance(q, Every) and q.fieldname is None) for q in subqueries):
+            return Every()
+
+        # Merge ranges and Everys
+        everyfields = set()
+        i = 0
+        while i < len(subqueries):
+            q = subqueries[i]
+            f = q.field()
+            if f in everyfields:
+                subqueries.pop(i)
+                continue
+
+            if isinstance(q, (TermRange, NumericRange)):
+                j = i + 1
+                while j < len(subqueries):
+                    if q.overlaps(subqueries[j]):
+                        qq = subqueries.pop(j)
+                        q = q.merge(qq, intersect=self.intersect_merge)
+                    else:
+                        j += 1
+                q = subqueries[i] = q.normalize()
+
+            if isinstance(q, Every):
+                everyfields.add(q.fieldname)
+            i += 1
+
+        # Eliminate duplicate queries
+        subqs = []
+        seenqs = set()
+        for s in subqueries:
+            if (not isinstance(s, Every) and s.field() in everyfields):
+                continue
+            if s in seenqs:
+                continue
+            seenqs.add(s)
+            subqs.append(s)
+
+        # Remove NullQuerys
+        subqs = [q for q in subqs if q is not NullQuery]
+
+        if not subqs:
+            return NullQuery
+
+        if len(subqs) == 1:
+            sub = subqs[0]
+            if not (self.boost == 1.0 and sub.boost == 1.0):
+                sub = sub.copy()
+                sub.boost *= self.boost
+            return sub
+
+        return self.__class__(subqs, boost=self.boost)
+
+    def _split_queries(self):
+        subs = [q for q in self.subqueries if not isinstance(q, Not)]
+        nots = [q.query for q in self.subqueries if isinstance(q, Not)]
+        return (subs, nots)
+
+    def simplify(self, ixreader):
+        subs, nots = self._split_queries()
+
+        if subs:
+            subs = self.__class__([subq.simplify(ixreader) for subq in subs],
+                                  boost=self.boost).normalize()
+            if nots:
+                nots = Or(nots).simplify().normalize()
+                return AndNot(subs, nots)
+            else:
+                return subs
+        else:
+            return NullQuery
+
+    def _matcher(self, matchercls, q_weight_fn, searcher, **kwargs):
+        # q_weight_fn is a function which is called on each query and returns a
+        # "weight" value which is used to build a huffman-like matcher tree. If
+        # q_weight_fn is None, an order-preserving binary tree is used instead.
+
+        # Pull any queries inside a Not() out into their own list
+        subs, nots = self._split_queries()
+
+        if not subs:
+            return NullMatcher()
+
+        # Create a matcher from the list of subqueries
+        if len(subs) == 1:
+            m = subs[0].matcher(searcher)
+        elif q_weight_fn is None:
+            subms = [q.matcher(searcher) for q in subs]
+            m = make_binary_tree(matchercls, subms)
+        else:
+            subms = [(q_weight_fn(q), q.matcher(searcher)) for q in subs]
+            m = make_weighted_tree(matchercls, subms)
+
+        # If there were queries inside Not(), make a matcher for them and
+        # wrap the matchers in an AndNotMatcher
+        if nots:
+            if len(nots) == 1:
+                notm = nots[0].matcher(searcher)
+            else:
+                notms = [(q.estimate_size(), q.matcher(searcher))
+                         for q in nots]
+                notm = make_weighted_tree(UnionMatcher, notms)
+
+            if notm.is_active():
+                m = AndNotMatcher(m, notm)
+
+        # If this query had a boost, add a wrapping matcher to apply the boost
+        if self.boost != 1.0:
+            m = WrappingMatcher(m, self.boost)
+
+        return m
+
+
+class MultiTerm(Query):
+    """Abstract base class for queries that operate on multiple terms in the
+    same field.
+    """
+
+    TOO_MANY_CLAUSES = 1024
+    constantscore = False
+
+    def _words(self, ixreader):
+        raise NotImplementedError
+
+    def simplify(self, ixreader):
+        existing = [Term(self.fieldname, word, boost=self.boost)
+                    for word in sorted(set(self._words(ixreader)))]
+        if len(existing) == 1:
+            return existing[0]
+        elif existing:
+            return Or(existing)
+        else:
+            return NullQuery
+
+    def _all_terms(self, termset, phrases=True):
+        pass
+
+    def _existing_terms(self, ixreader, termset, reverse=False, phrases=True):
+        fieldname = self.fieldname
+        for word in self._words(ixreader):
+            t = (fieldname, word)
+            contains = t in ixreader
+            if reverse:
+                contains = not contains
+            if contains:
+                termset.add(t)
+
+    def estimate_size(self, ixreader):
+        return sum(ixreader.doc_frequency(self.fieldname, text)
+                   for text in self._words(ixreader))
+
+    def estimate_min_size(self, ixreader):
+        return min(ixreader.doc_frequency(self.fieldname, text)
+                   for text in self._words(ixreader))
+
+    def matcher(self, searcher):
+        fieldname = self.fieldname
+        qs = [Term(fieldname, word) for word in self._words(searcher.reader())]
+        if not qs:
+            return NullMatcher()
+
+        if len(qs) == 1:
+            # If there's only one term, just use it
+            q = qs[0]
+
+        elif self.constantscore or len(qs) > self.TOO_MANY_CLAUSES:
+            # If there's so many clauses that an Or search would take forever,
+            # trade memory for time and just put all the matching docs in a set
+            # and serve it up as a ListMatcher
+            docset = set()
+            for q in qs:
+                docset.update(q.matcher(searcher).all_ids())
+            return ListMatcher(sorted(docset), all_weights=self.boost)
+
+        else:
+            # The default case: Or the terms together
+            q = Or(qs)
+
+        return q.matcher(searcher)
+
+
+# Concrete classes
+
+class Term(Query):
+    """Matches documents containing the given term (fieldname+text pair).
+
+    >>> Term("content", u"render")
+    """
+
+    __inittypes__ = dict(fieldname=str, text=unicode, boost=float)
+
+    def __init__(self, fieldname, text, boost=1.0):
+        self.fieldname = fieldname
+        self.text = text
+        self.boost = boost
+
+    def __eq__(self, other):
+        return (other
+                and self.__class__ is other.__class__
+                and
+                self.fieldname == other.fieldname
+                and self.text == other.text
+                and self.boost == other.boost)
+
+    def __repr__(self):
+        r = "%s(%r, %r" % (self.__class__.__name__, self.fieldname, self.text)
+        if self.boost != 1.0:
+            r += ", boost=%s" % self.boost
+        r += ")"
+        return r
+
+    def __unicode__(self):
+        t = u"%s:%s" % (self.fieldname, self.text)
+        if self.boost != 1:
+            t += u"^" + unicode(self.boost)
+        return t
+
+    def __hash__(self):
+        return hash(self.fieldname) ^ hash(self.text) ^ hash(self.boost)
+
+    def _all_terms(self, termset, phrases=True):
+        termset.add((self.fieldname, self.text))
+
+    def _existing_terms(self, ixreader, termset, reverse=False, phrases=True):
+        fieldname, text = self.fieldname, self.text
+        contains = (fieldname, text) in ixreader
+        if reverse:
+            contains = not contains
+        if contains:
+            termset.add((fieldname, text))
+
+    def replace(self, oldtext, newtext):
+        q = self.copy()
+        if q.text == oldtext:
+            q.text = newtext
+        return q
+
+    def estimate_size(self, ixreader):
+        return ixreader.doc_frequency(self.fieldname, self.text)
+
+    def matcher(self, searcher):
+        try:
+            m = searcher.postings(self.fieldname, self.text)
+            if self.boost != 1.0:
+                m = WrappingMatcher(m, boost=self.boost)
+            return m
+        except TermNotFound:
+            return NullMatcher()
+
+
+class And(CompoundQuery):
+    """Matches documents that match ALL of the subqueries.
+
+    >>> And([Term("content", u"render"),
+    ...      Term("content", u"shade"),
+    ...      Not(Term("content", u"texture"))])
+    >>> # You can also do this
+    >>> Term("content", u"render") & Term("content", u"shade")
+    """
+
+    # This is used by the superclass's __unicode__ method.
+    JOINT = " AND "
+    intersect_merge = True
+
+    def requires(self):
+        s = set()
+        for q in self.subqueries:
+            s |= q.requires()
+        return s
+
+    def estimate_size(self, ixreader):
+        return min(q.estimate_size(ixreader) for q in self.subqueries)
+
+    def matcher(self, searcher):
+        r = searcher.reader()
+        return self._matcher(IntersectionMatcher,
+                             lambda q: 0 - q.estimate_size(r), searcher)
+
+
+class Or(CompoundQuery):
+    """Matches documents that match ANY of the subqueries.
+
+    >>> Or([Term("content", u"render"),
+    ...     And([Term("content", u"shade"), Term("content", u"texture")]),
+    ...     Not(Term("content", u"network"))])
+    >>> # You can also do this
+    >>> Term("content", u"render") | Term("content", u"shade")
+    """
+
+    # This is used by the superclass's __unicode__ method.
+    JOINT = " OR "
+    intersect_merge = False
+
+    def __init__(self, subqueries, boost=1.0, minmatch=0):
+        CompoundQuery.__init__(self, subqueries, boost=boost)
+        self.minmatch = minmatch
+
+    def __unicode__(self):
+        r = u"("
+        r += (self.JOINT).join([unicode(s) for s in self.subqueries])
+        r += u")"
+        if self.minmatch:
+            r += u">%s" % self.minmatch
+        return r
+
+    def normalize(self):
+        norm = CompoundQuery.normalize(self)
+        if norm.__class__ is self.__class__:
+            norm.minmatch = self.minmatch
+        return norm
+
+    def requires(self):
+        if len(self.subqueries) == 1:
+            return self.subqueries[0].requires()
+        else:
+            return set()
+
+    def matcher(self, searcher):
+        r = searcher.reader()
+        return self._matcher(UnionMatcher, lambda q: q.estimate_size(r),
+                             searcher)
+
+
+class DisjunctionMax(CompoundQuery):
+    """Matches all documents that match any of the subqueries, but scores each
+    document using the maximum score from the subqueries.
+    """
+
+    def __init__(self, subqueries, boost=1.0, tiebreak=0.0):
+        CompoundQuery.__init__(self, subqueries, boost=boost)
+        self.tiebreak = tiebreak
+
+    def __unicode__(self):
+        r = u"DisMax("
+        r += " ".join([unicode(s) for s in self.subqueries])
+        r += u")"
+        if self.tiebreak:
+            s += u"~" + unicode(self.tiebreak)
+        return r
+
+    def normalize(self):
+        norm = CompoundQuery.normalize(self)
+        if norm.__class__ is self.__class__:
+            norm.tiebreak = self.tiebreak
+        return norm
+
+    def requires(self):
+        if len(self.subqueries) == 1:
+            return self.subqueries[0].requires()
+        else:
+            return set()
+
+    def matcher(self, searcher):
+        r = searcher.reader()
+        return self._matcher(DisjunctionMaxMatcher,
+                             lambda q: q.estimate_size(r), searcher,
+                             tiebreak=self.tiebreak)
+
+
+class Not(Query):
+    """Excludes any documents that match the subquery.
+
+    >>> # Match documents that contain 'render' but not 'texture'
+    >>> And([Term("content", u"render"),
+    ...      Not(Term("content", u"texture"))])
+    >>> # You can also do this
+    >>> Term("content", u"render") - Term("content", u"texture")
+    """
+
+    __inittypes__ = dict(query=Query)
+
+    def __init__(self, query, boost=1.0):
+        """
+        :param query: A :class:`Query` object. The results of this query
+            are *excluded* from the parent query.
+        :param boost: Boost is meaningless for excluded documents but this
+            keyword argument is accepted for the sake of a consistent interface.
+        """
+
+        self.query = query
+        self.boost = boost
+
+    def __eq__(self, other):
+        return other and self.__class__ is other.__class__ and\
+        self.query == other.query
+
+    def __repr__(self):
+        return "%s(%s)" % (self.__class__.__name__, repr(self.query))
+
+    def __unicode__(self):
+        return u"NOT " + unicode(self.query)
+
+    def __hash__(self):
+        return hash(self.__class__.__name__) ^ hash(self.query) ^ hash(self.boost)
+
+    def is_leaf(self):
+        return False
+
+    def apply(self, fn):
+        return self.__class__(fn(self.query))
+
+    def normalize(self):
+        query = self.query.normalize()
+        if query is NullQuery:
+            return NullQuery
+        else:
+            return self.__class__(query, boost=self.boost)
+
+    def _all_terms(self, termset, phrases=True):
+        self.query.all_terms(termset, phrases=phrases)
+
+    def _existing_terms(self, ixreader, termset, reverse=False, phrases=True):
+        self.query.existing_terms(ixreader, termset, reverse=reverse,
+                                  phrases=phrases)
+
+    def field(self):
+        return None
+
+    def estimate_size(self, ixreader):
+        return ixreader.doc_count()
+
+    def estimate_min_size(self, ixreader):
+        return 1 if ixreader.doc_count() else 0
+
+    def matcher(self, searcher):
+        # Usually only called if Not is the root query. Otherwise, queries such
+        # as And and Or do special handling of Not subqueries.
+        reader = searcher.reader()
+        child = self.query.matcher(searcher)
+        return InverseMatcher(child, searcher.doc_count_all(),
+                              missing=reader.is_deleted)
+
+
+class PatternQuery(MultiTerm):
+    """An intermediate base class for common methods of Prefix and Wildcard.
+    """
+
+    __inittypes__ = dict(fieldname=str, text=unicode, boost=float)
+
+    def __init__(self, fieldname, text, boost=1.0, constantscore=True):
+        self.fieldname = fieldname
+        self.text = text
+        self.boost = boost
+        self.constantscore = constantscore
+
+    def __eq__(self, other):
+        return (other and self.__class__ is other.__class__
+                and self.fieldname == other.fieldname
+                and self.text == other.text and self.boost == other.boost
+                and self.constantscore == other.constantscore)
+
+    def __repr__(self):
+        r = "%s(%r, %r" % (self.__class__.__name__, self.fieldname, self.text)
+        if self.boost != 1:
+            r += ", boost=%s" % self.boost
+        r += ")"
+        return r
+
+    def __hash__(self):
+        return (hash(self.fieldname) ^ hash(self.text) ^ hash(self.boost)
+                ^ hash(self.constantscore))
+
+
+class Prefix(PatternQuery):
+    """Matches documents that contain any terms that start with the given text.
+
+    >>> # Match documents containing words starting with 'comp'
+    >>> Prefix("content", u"comp")
+    """
+
+    def __unicode__(self):
+        return "%s:%s*" % (self.fieldname, self.text)
+
+    def _words(self, ixreader):
+        return ixreader.expand_prefix(self.fieldname, self.text)
+
+
+class Wildcard(PatternQuery):
+    """Matches documents that contain any terms that match a wildcard
+    expression.
+
+    >>> Wildcard("content", u"in*f?x")
+    """
+
+    def __unicode__(self):
+        return "%s:%s" % (self.fieldname, self.text)
+
+    def _words(self, ixreader):
+        exp = re.compile(fnmatch.translate(self.text))
+
+        # Get the "prefix" -- the substring before the first wildcard.
+        qm = self.text.find("?")
+        st = self.text.find("*")
+        if qm < 0 and st < 0:
+            prefix = ""
+        elif qm < 0:
+            prefix = self.text[:st]
+        elif st < 0:
+            prefix = self.text[:qm]
+        else:
+            prefix = self.text[:min(st, qm)]
+
+        if prefix:
+            candidates = ixreader.expand_prefix(self.fieldname, prefix)
+        else:
+            candidates = ixreader.lexicon(self.fieldname)
+
+        for text in candidates:
+            if exp.match(text):
+                yield text
+
+    def normalize(self):
+        # If there are no wildcard characters in this "wildcard", turn it into
+        # a simple Term
+        text = self.text
+        if text == "*":
+            return Every(self.fieldname, boost=self.boost)
+        if "*" not in text and "?" not in text:
+            # If no wildcard chars, convert to a normal term.
+            return Term(self.fieldname, self.text, boost=self.boost)
+        elif ("?" not in text
+              and text.endswith("*")
+              and text.find("*") == len(text) - 1
+              and (len(text) < 2 or text[-2] != "\\")):
+            # If the only wildcard char is an asterisk at the end, convert to a
+            # Prefix query.
+            return Prefix(self.fieldname, self.text[:-1], boost=self.boost)
+        else:
+            return self
+
+
+class FuzzyTerm(MultiTerm):
+    """Matches documents containing words similar to the given term.
+    """
+
+    __inittypes__ = dict(fieldname=str, text=unicode, boost=float,
+                         minsimilarity=float, prefixlength=int)
+
+    def __init__(self, fieldname, text, boost=1.0, minsimilarity=0.5,
+                 prefixlength=1, constantscore=True):
+        """
+        :param fieldname: The name of the field to search.
+        :param text: The text to search for.
+        :param boost: A boost factor to apply to scores of documents matching
+            this query.
+        :param minsimilarity: The minimum similarity ratio to match. 1.0 is the
+            maximum (an exact match to 'text').
+        :param prefixlength: The matched terms must share this many initial
+            characters with 'text'. For example, if text is "light" and
+            prefixlength is 2, then only terms starting with "li" are checked
+            for similarity.
+        """
+
+        self.fieldname = fieldname
+        self.text = text
+        self.boost = boost
+        self.minsimilarity = minsimilarity
+        self.prefixlength = prefixlength
+        self.constantscore = constantscore
+
+    def __eq__(self, other):
+        return (other and self.__class__ is other.__class__
+                and self.fieldname == other.fieldname
+                and self.text == other.text
+                and self.minsimilarity == other.minsimilarity
+                and self.prefixlength == other.prefixlength
+                and self.boost == other.boost
+                and self.constantscore == other.constantscore)
+
+    def __repr__(self):
+        r = "%s(%r, %r, boost=%f, minsimilarity=%f, prefixlength=%d)"
+        return r % (self.__class__.__name__, self.fieldname, self.text,
+                    self.boost, self.minsimilarity, self.prefixlength)
+
+    def __unicode__(self):
+        r = u"~" + self.text
+        if self.boost != 1.0:
+            r += "^%f" % self.boost
+        return r
+
+    def __hash__(self):
+        return (hash(self.fieldname) ^ hash(self.text) ^ hash(self.boost)
+                ^ hash(self.minsimilarity) ^ hash(self.prefixlength)
+                ^ hash(self.constantscore))
+
+    def _all_terms(self, termset, phrases=True):
+        termset.add((self.fieldname, self.text))
+
+    def _words(self, ixreader):
+        text = self.text
+        minsim = self.minsimilarity
+        for term in ixreader.expand_prefix(self.fieldname,
+                                           text[:self.prefixlength]):
+            if text == term:
+                yield term
+            elif relative(text, term) > minsim:
+                yield term
+
+
+class RangeMixin(object):
+    # Contains methods shared by TermRange and NumericRange
+
+    def __repr__(self):
+        return ('%s(%r, %r, %r, %s, %s, boost=%s, constantscore=%s)'
+                % (self.__class__.__name__, self.fieldname, self.start,
+                   self.end, self.startexcl, self.endexcl, self.boost,
+                   self.constantscore))
+
+    def __unicode__(self):
+        startchar = "{" if self.startexcl else "["
+        endchar = "}" if self.endexcl else "]"
+        start = '' if self.start is None else self.start
+        end = '' if self.end is None else self.end
+        return u"%s:%s%s TO %s%s" % (self.fieldname, startchar, start, end,
+                                     endchar)
+
+    def __eq__(self, other):
+        return (other and self.__class__ is other.__class__
+                and self.fieldname == other.fieldname
+                and self.start == other.start and self.end == other.end
+                and self.startexcl == other.startexcl
+                and self.endexcl == other.endexcl
+                and self.boost == other.boost
+                and self.constantscore == other.constantscore)
+
+    def __hash__(self):
+        return (hash(self.fieldname) ^ hash(self.start) ^ hash(self.startexcl)
+                ^ hash(self.end) ^ hash(self.endexcl) ^ hash(self.boost))
+
+    def _comparable_start(self):
+        if self.start is None:
+            return (Lowest, 0)
+        else:
+            second = 1 if self.startexcl else 0
+            return (self.start, second)
+
+    def _comparable_end(self):
+        if self.end is None:
+            return (Highest, 0)
+        else:
+            second = -1 if self.endexcl else 0
+            return (self.end, second)
+
+    def overlaps(self, other):
+        if not isinstance(other, TermRange):
+            return False
+        if self.fieldname != other.fieldname:
+            return False
+
+        start1 = self._comparable_start()
+        start2 = other._comparable_start()
+        end1 = self._comparable_end()
+        end2 = other._comparable_end()
+
+        return ((start1 >= start2 and start1 <= end2)
+                or (end1 >= start2 and end1 <= end2)
+                or (start2 >= start1 and start2 <= end1)
+                or (end2 >= start1 and end2 <= end1))
+
+    def merge(self, other, intersect=True):
+        assert self.fieldname == other.fieldname
+
+        start1 = self._comparable_start()
+        start2 = other._comparable_start()
+        end1 = self._comparable_end()
+        end2 = other._comparable_end()
+
+        if start1 >= start2 and end1 <= end2:
+            start = start2
+            end = end2
+        elif start2 >= start1 and end2 <= end1:
+            start = start1
+            end = end1
+        elif intersect:
+            start = max(start1, start2)
+            end = min(end1, end2)
+        else:
+            start = min(start1, start2)
+            end = max(end1, end2)
+
+        startval = None if start[0] is Lowest else start[0]
+        startexcl = start[1] == 1
+        endval = None if end[0] is Highest else end[0]
+        endexcl = end[1] == -1
+
+        boost = max(self.boost, other.boost)
+        constantscore = self.constantscore or other.constantscore
+
+        return self.__class__(self.fieldname, startval, endval, startexcl,
+                              endexcl, boost=boost, constantscore=constantscore)
+
+
+class TermRange(RangeMixin, MultiTerm):
+    """Matches documents containing any terms in a given range.
+
+    >>> # Match documents where the indexed "id" field is greater than or equal
+    >>> # to 'apple' and less than or equal to 'pear'.
+    >>> TermRange("id", u"apple", u"pear")
+    """
+
+    def __init__(self, fieldname, start, end, startexcl=False, endexcl=False,
+                 boost=1.0, constantscore=True):
+        """
+        :param fieldname: The name of the field to search.
+        :param start: Match terms equal to or greater than this.
+        :param end: Match terms equal to or less than this.
+        :param startexcl: If True, the range start is exclusive. If False, the
+            range start is inclusive.
+        :param endexcl: If True, the range end is exclusive. If False, the
+            range end is inclusive.
+        :param boost: Boost factor that should be applied to the raw score of
+            results matched by this query.
+        """
+
+        self.fieldname = fieldname
+        self.start = start
+        self.end = end
+        self.startexcl = startexcl
+        self.endexcl = endexcl
+        self.boost = boost
+        self.constantscore = constantscore
+
+    def normalize(self):
+        if self.start in ('', None) and self.end in (u'\uffff', None):
+            return Every(self.fieldname, boost=self.boost)
+        elif self.start == self.end:
+            if self.startexcl or self.endexcl:
+                return NullQuery
+            return Term(self.fieldname, self.start, boost=self.boost)
+        else:
+            return TermRange(self.fieldname, self.start, self.end,
+                             self.startexcl, self.endexcl,
+                             boost=self.boost)
+
+    def replace(self, oldtext, newtext):
+        q = self.copy()
+        if q.start == oldtext:
+            q.start = newtext
+        if q.end == oldtext:
+            q.end = newtext
+        return q
+
+    def _words(self, ixreader):
+        fieldname = self.fieldname
+        start = '' if self.start is None else self.start
+        end = u'\uFFFF' if self.end is None else self.end
+        startexcl = self.startexcl
+        endexcl = self.endexcl
+
+        for fname, t, _, _ in ixreader.iter_from(fieldname, start):
+            if fname != fieldname:
+                break
+            if t == start and startexcl:
+                continue
+            if t == end and endexcl:
+                break
+            if t > end:
+                break
+            yield t
+
+
+class NumericRange(RangeMixin, Query):
+    """A range query for NUMERIC fields. Takes advantage of tiered indexing
+    to speed up large ranges by matching at a high resolution at the edges of
+    the range and a low resolution in the middle.
+
+    >>> # Match numbers from 10 to 5925 in the "number" field.
+    >>> nr = NumericRange("number", 10, 5925)
+    """
+
+    def __init__(self, fieldname, start, end, startexcl=False, endexcl=False,
+                 boost=1.0, constantscore=True):
+        """
+        :param fieldname: The name of the field to search.
+        :param start: Match terms equal to or greater than this number. This
+            should be a number type, not a string.
+        :param end: Match terms equal to or less than this number. This should
+            be a number type, not a string.
+        :param startexcl: If True, the range start is exclusive. If False, the
+            range start is inclusive.
+        :param endexcl: If True, the range end is exclusive. If False, the
+            range end is inclusive.
+        :param boost: Boost factor that should be applied to the raw score of
+            results matched by this query.
+        :param constantscore: If True, the compiled query returns a constant
+            score (the value of the ``boost`` keyword argument) instead of
+            actually scoring the matched terms. This gives a nice speed boost
+            and won't affect the results in most cases since numeric ranges
+            will almost always be used as a filter.
+        """
+
+        self.fieldname = fieldname
+        self.start = start
+        self.end = end
+        self.startexcl = startexcl
+        self.endexcl = endexcl
+        self.boost = boost
+        self.constantscore = constantscore
+
+    def simplify(self, ixreader):
+        return self._compile_query(ixreader).simplify(ixreader)
+
+    def estimate_size(self, ixreader):
+        return self._compile_query(ixreader).estimate_size(ixreader)
+
+    def estimate_min_size(self, ixreader):
+        return self._compile_query(ixreader).estimate_min_size(ixreader)
+
+    def docs(self, searcher):
+        q = self._compile_query(searcher.reader())
+        return q.docs(searcher)
+
+    def _compile_query(self, ixreader):
+        from whoosh.fields import NUMERIC
+        from whoosh.support.numeric import tiered_ranges
+
+        field = ixreader.schema[self.fieldname]
+        if not isinstance(field, NUMERIC):
+            raise Exception("NumericRange: field %r is not numeric" % self.fieldname)
+
+        start = field.prepare_number(self.start)
+        end = field.prepare_number(self.end)
+
+        subqueries = []
+        # Get the term ranges for the different resolutions
+        for starttext, endtext in tiered_ranges(field.type, field.signed,
+                                                start, end, field.shift_step,
+                                                self.startexcl, self.endexcl):
+            if starttext == endtext:
+                subq = Term(self.fieldname, starttext)
+            else:
+                subq = TermRange(self.fieldname, starttext, endtext)
+            subqueries.append(subq)
+
+        if len(subqueries) == 1:
+            q = subqueries[0]
+        elif subqueries:
+            q = Or(subqueries, boost=self.boost)
+        else:
+            return NullQuery
+
+        if self.constantscore:
+            q = ConstantScoreQuery(q, self.boost)
+        return q
+
+    def matcher(self, searcher):
+        q = self._compile_query(searcher.reader())
+        return q.matcher(searcher)
+
+
+class DateRange(NumericRange):
+    """This is a very thin subclass of :class:`NumericRange` that only
+    overrides the initializer and ``__repr__()`` methods to work with datetime
+    objects instead of numbers. Internally this object converts the datetime
+    objects it's created with to numbers and otherwise acts like a
+    ``NumericRange`` query.
+
+    >>> DateRange("date", datetime(2010, 11, 3, 3, 0), datetime(2010, 11, 3, 17, 59))
+    """
+
+    def __init__(self, fieldname, start, end, startexcl=False, endexcl=False,
+                 boost=1.0, constantscore=True):
+        self.startdate = start
+        self.enddate = end
+        if start:
+            start = datetime_to_long(start)
+        if end:
+            end = datetime_to_long(end)
+        super(DateRange, self).__init__(fieldname, start, end,
+                                        startexcl=startexcl, endexcl=endexcl,
+                                        boost=boost, constantscore=constantscore)
+
+    def __repr__(self):
+        return '%s(%r, %r, %r, %s, %s, boost=%s)' % (self.__class__.__name__,
+                                           self.fieldname,
+                                           self.startdate, self.enddate,
+                                           self.startexcl, self.endexcl,
+                                           self.boost)
+
+
+class Variations(MultiTerm):
+    """Query that automatically searches for morphological variations of the
+    given word in the same field.
+    """
+
+    def __init__(self, fieldname, text, boost=1.0):
+        self.fieldname = fieldname
+        self.text = text
+        self.boost = boost
+
+    def __repr__(self):
+        r = "%s(%r, %r" % (self.__class__.__name__, self.fieldname, self.text)
+        if self.boost != 1:
+            r += ", boost=%s" % self.boost
+        r += ")"
+        return r
+
+    def __eq__(self, other):
+        return (other and self.__class__ is other.__class__
+                and self.fieldname == other.fieldname
+                and self.text == other.text and self.boost == other.boost)
+
+    def __hash__(self):
+        return hash(self.fieldname) ^ hash(self.text) ^ hash(self.boost)
+
+    def _all_terms(self, termset, phrases=True):
+        termset.add(self.text)
+
+    def _existing_terms(self, ixreader, termset, reverse=False, phrases=True):
+        for word in variations(self.text):
+            t = (self.fieldname, word)
+            contains = t in ixreader
+            if reverse:
+                contains = not contains
+            if contains:
+                termset.add(t)
+
+    def _words(self, ixreader):
+        fieldname = self.fieldname
+        return [word for word in variations(self.text)
+                if (fieldname, word) in ixreader]
+
+    def __unicode__(self):
+        return u"%s:<%s>" % (self.fieldname, self.text)
+
+    def replace(self, oldtext, newtext):
+        q = self.copy()
+        if q.text == oldtext:
+            q.text = newtext
+        return q
+
+
+class Phrase(Query):
+    """Matches documents containing a given phrase."""
+
+    def __init__(self, fieldname, words, slop=1, boost=1.0):
+        """
+        :param fieldname: the field to search.
+        :param words: a list of words (unicode strings) in the phrase.
+        :param slop: the number of words allowed between each "word" in the
+            phrase; the default of 1 means the phrase must match exactly.
+        :param boost: a boost factor that to apply to the raw score of
+            documents matched by this query.
+        """
+
+        self.fieldname = fieldname
+        self.words = words
+        self.slop = slop
+        self.boost = boost
+
+    def __eq__(self, other):
+        return (other and self.__class__ is other.__class__ and
+                self.fieldname == other.fieldname and self.words == other.words
+                and self.slop == other.slop and self.boost == other.boost)
+
+    def __repr__(self):
+        return "%s(%r, %r, slop=%s, boost=%f)" % (self.__class__.__name__,
+                                                  self.fieldname, self.words,
+                                                  self.slop, self.boost)
+
+    def __unicode__(self):
+        return u'%s:"%s"' % (self.fieldname, u" ".join(self.words))
+
+    def __hash__(self):
+        h = hash(self.fieldname) ^ hash(self.slop) ^ hash(self.boost)
+        for w in self.words:
+            h ^= hash(w)
+        return h
+
+    def copy(self):
+        # Need to override the default shallow copy here to do a copy of the
+        # self.words list
+        return self.__class__(self.fieldname, self.words[:], boost=self.boost)
+
+    def _all_terms(self, termset, phrases=True):
+        if phrases:
+            fieldname = self.fieldname
+            for word in self.words:
+                termset.add((fieldname, word))
+
+    def _existing_terms(self, ixreader, termset, reverse=False, phrases=True):
+        if phrases:
+            fieldname = self.fieldname
+            for word in self.words:
+                contains = (fieldname, word) in ixreader
+                if reverse:
+                    contains = not contains
+                if contains:
+                    termset.add((fieldname, word))
+
+    def normalize(self):
+        if not self.words:
+            return NullQuery
+        if len(self.words) == 1:
+            return Term(self.fieldname, self.words[0])
+
+        words = [w for w in self.words if w is not None]
+        return self.__class__(self.fieldname, words, slop=self.slop,
+                              boost=self.boost)
+
+    def replace(self, oldtext, newtext):
+        q = self.copy()
+        for i in xrange(len(q.words)):
+            if q.words[i] == oldtext:
+                q.words[i] = newtext
+        return q
+
+    def _and_query(self):
+        fn = self.fieldname
+        return And([Term(fn, word) for word in self.words])
+
+    def estimate_size(self, ixreader):
+        return self._and_query().estimate_size(ixreader)
+
+    def estimate_min_size(self, ixreader):
+        return self._and_query().estimate_min_size(ixreader)
+
+    def matcher(self, searcher):
+        fieldname = self.fieldname
+        reader = searcher.reader()
+
+        # Shortcut the query if one of the words doesn't exist.
+        for word in self.words:
+            if (fieldname, word) not in reader:
+                return NullMatcher()
+
+        field = searcher.schema[fieldname]
+        if not field.format or not field.format.supports("positions"):
+            raise QueryError("Phrase search: %r field has no positions"
+                             % self.fieldname)
+
+        # Construct a tree of SpanNear queries representing the words in the
+        # phrase and return its matcher
+        from whoosh.spans import SpanNear
+        q = SpanNear.phrase(fieldname, self.words, slop=self.slop)
+        m = q.matcher(searcher)
+        if self.boost != 1.0:
+            m = WrappingMatcher(m, boost=self.boost)
+        return m
+
+
+class Ordered(And):
+    """Matches documents containing a list of sub-queries in the given order.
+    """
+
+    JOINT = " BEFORE "
+
+    def matcher(self, searcher):
+        from spans import SpanBefore
+
+        return self._matcher(SpanBefore._Matcher, None, searcher)
+
+
+class Every(Query):
+    """A query that matches every document containing any term in a given
+    field. If you don't specify a field, the query matches every document.
+
+    >>> # Match any documents with something in the "path" field
+    >>> q = Every("path")
+    >>> # Matcher every document
+    >>> q = Every()
+
+    The unfielded form (matching every document) is efficient.
+
+    The fielded is more efficient than a prefix query with an empty prefix or a
+    '*' wildcard, but it can still be very slow on large indexes. It requires
+    the searcher to read the full posting list of every term in the given
+    field.
+
+    Instead of using this query it is much more efficient when you create the
+    index to include a single term that appears in all documents that have the
+    field you want to match.
+
+    For example, instead of this::
+
+        # Match all documents that have something in the "path" field
+        q = Every("path")
+
+    Do this when indexing::
+
+        # Add an extra field that indicates whether a document has a path
+        schema = fields.Schema(path=fields.ID, has_path=fields.ID)
+
+        # When indexing, set the "has_path" field based on whether the document
+        # has anything in the "path" field
+        writer.add_document(text=text_value1)
+        writer.add_document(text=text_value2, path=path_value2, has_path="t")
+
+    Then to find all documents with a path::
+
+        q = Term("has_path", "t")
+    """
+
+    def __init__(self, fieldname=None, boost=1.0):
+        """
+        :param fieldname: the name of the field to match, or ``None`` or ``*``
+            to match all documents.
+        """
+
+        if not fieldname or fieldname == "*":
+            fieldname = None
+        self.fieldname = fieldname
+        self.boost = boost
+
+    def __repr__(self):
+        return "%s(%r, boost=%s)" % (self.__class__.__name__, self.fieldname,
+                                     self.boost)
+
+    def __eq__(self, other):
+        return (other and self.__class__ is other.__class__
+                and self.fieldname == other.fieldname
+                and self.boost == other.boost)
+
+    def __unicode__(self):
+        return u"%s:*" % self.fieldname
+
+    def __hash__(self):
+        return hash(self.fieldname)
+
+    def estimate_size(self, ixreader):
+        return ixreader.doc_count()
+
+    def matcher(self, searcher):
+        fieldname = self.fieldname
+        reader = searcher.reader()
+
+        if fieldname in (None, "", "*"):
+            doclist = list(searcher.reader().all_doc_ids())
+        elif reader.supports_caches() and reader.fieldcache_available(self.fieldname):
+            # If the reader has a field cache, use it to quickly get the list
+            # of documents that have a value for this field
+            fc = reader.fieldcache(self.fieldname)
+            doclist = [docnum for docnum, ord in fc.ords() if ord != 0]
+        else:
+            # This is a hacky hack, but just create an in-memory set of all the
+            # document numbers of every term in the field. This is SLOOOW for
+            # large indexes
+            doclist = set()
+            for text in searcher.lexicon(fieldname):
+                pr = searcher.postings(fieldname, text)
+                doclist.update(pr.all_ids())
+            doclist = sorted(doclist)
+
+        return ListMatcher(doclist, all_weights=self.boost)
+
+
+class NullQuery(Query):
+    "Represents a query that won't match anything."
+    def __call__(self):
+        return self
+
+    def __repr__(self):
+        return "<%s>" % (self.__class__.__name__, )
+
+    def __eq__(self, other):
+        return other is self
+
+    def __hash__(self):
+        return id(self)
+
+    def copy(self):
+        return self
+
+    def field(self):
+        return None
+
+    def estimate_size(self, ixreader):
+        return 0
+
+    def normalize(self):
+        return self
+
+    def simplify(self, ixreader):
+        return self
+
+    def docs(self, searcher):
+        return []
+
+    def matcher(self, searcher):
+        return NullMatcher()
+
+
+NullQuery = NullQuery()
+
+
+class ConstantScoreQuery(WrappingQuery):
+    """Wraps a query and uses a matcher that always gives a constant score
+    to all matching documents. This is a useful optimization when you don't
+    care about scores from a certain branch of the query tree because it is
+    simply acting as a filter. See also the :class:`AndMaybe` query.
+    """
+
+    def __init__(self, child, score=1.0):
+        super(ConstantScoreQuery, self).__init__(child)
+        self.score = score
+
+    def __eq__(self, other):
+        return (other and self.__class__ is other.__class__
+                and self.child == other.child and self.score == other.score)
+
+    def __hash__(self):
+        return hash(self.child) ^ hash(self.score)
+
+    def apply(self, fn):
+        return self.__class__(fn(self.child), self.score)
+
+    def matcher(self, searcher):
+        m = self.child.matcher(searcher)
+        if isinstance(m, NullMatcher):
+            return m
+        else:
+            ids = array("I", m.all_ids())
+            return ListMatcher(ids, all_weights=self.score)
+
+
+class WeightingQuery(WrappingQuery):
+    """Wraps a query and specifies a custom weighting model to apply to the
+    wrapped branch of the query tree. This is useful when you want to score
+    parts of the query using criteria that don't apply to the rest of the
+    query.
+    """
+
+    def __init__(self, child, model, fieldname=None, text=None):
+        super(WeightingQuery, self).__init__(child)
+        self.model = model
+        self.fieldname = fieldname
+        self.text = text
+
+    def __eq__(self, other):
+        return (other and self.__class__ is other.__class__
+                and self.child == other.child
+                and self.model == other.model
+                and self.fieldname == other.fieldname
+                and self.text == other.text)
+
+    def __hash__(self):
+        return hash(self.child) ^ hash(self.fieldname) ^ hash(self.text)
+
+    def apply(self, fn):
+        return self.__class__(fn(self.child), self.model, self.fieldname,
+                              self.text)
+
+    def matcher(self, searcher):
+        m = self.child.matcher(searcher)
+        scorer = self.model.scorer(searcher, self.fieldname, self.text)
+        if isinstance(m, NullMatcher):
+            return m
+        else:
+            return WeightingQuery.CustomScorerMatcher(m, scorer)
+
+    class CustomScorerMatcher(WrappingMatcher):
+        def __init__(self, child, scorer):
+            super(WeightingQuery, self).__init__(child)
+            self.scorer = scorer
+
+        def copy(self):
+            return self.__class__(self.child.copy(), self.scorer)
+
+        def _replacement(self, newchild):
+            return self.__class__(newchild, self.scorer)
+
+        def supports_quality(self):
+            return self.scorer.supports_quality()
+
+        def quality(self):
+            return self.scorer.quality(self)
+
+        def block_quality(self):
+            return self.scorer.block_quality(self)
+
+        def score(self):
+            return self.scorer.score(self)
+
+
+class BinaryQuery(CompoundQuery):
+    """Base class for binary queries (queries which are composed of two
+    sub-queries). Subclasses should set the ``matcherclass`` attribute or
+    override ``matcher()``, and may also need to override ``normalize()``,
+    ``estimate_size()``, and/or ``estimate_min_size()``.
+    """
+
+    def __init__(self, a, b, boost=1.0):
+        self.a = a
+        self.b = b
+        self.subqueries = (a, b)
+        self.boost = boost
+
+    def __eq__(self, other):
+        return (other and self.__class__ is other.__class__
+                and self.a == other.a and self.b == other.b
+                and self.boost == other.boost)
+
+    def __hash__(self):
+        return (hash(self.__class__.__name__) ^ hash(self.a) ^ hash(self.b)
+                ^ hash(self.boost))
+
+    def apply(self, fn):
+        return self.__class__(fn(self.a), fn(self.b), boost=self.boost)
+
+    def field(self):
+        f = self.a.field()
+        if self.b.field() == f:
+            return f
+
+    def normalize(self):
+        a = self.a.normalize()
+        b = self.b.normalize()
+        if a is NullQuery and b is NullQuery:
+            return NullQuery
+        elif a is NullQuery:
+            return b
+        elif b is NullQuery:
+            return a
+
+        return self.__class__(a, b, boost=self.boost)
+
+    def matcher(self, searcher):
+        return self.matcherclass(self.a.matcher(searcher),
+                                 self.b.matcher(searcher))
+
+
+class Require(BinaryQuery):
+    """Binary query returns results from the first query that also appear in
+    the second query, but only uses the scores from the first query. This lets
+    you filter results without affecting scores.
+    """
+
+    JOINT = " REQUIRE "
+    matcherclass = RequireMatcher
+
+    def requires(self):
+        return self.a.requires() | self.b.requires()
+
+    def estimate_size(self, ixreader):
+        return self.b.estimate_size(ixreader)
+
+    def estimate_min_size(self, ixreader):
+        return self.b.estimate_min_size(ixreader)
+
+    def normalize(self):
+        a = self.a.normalize()
+        b = self.b.normalize()
+        if a is NullQuery or b is NullQuery:
+            return NullQuery
+        return self.__class__(a, b, boost=self.boost)
+
+    def docs(self, searcher):
+        return And(self.subqueries).docs(searcher)
+
+
+class AndMaybe(BinaryQuery):
+    """Binary query takes results from the first query. If and only if the
+    same document also appears in the results from the second query, the score
+    from the second query will be added to the score from the first query.
+    """
+
+    JOINT = " ANDMAYBE "
+    matcherclass = AndMaybeMatcher
+
+    def normalize(self):
+        a = self.a.normalize()
+        b = self.b.normalize()
+        if a is NullQuery:
+            return NullQuery
+        if b is NullQuery:
+            return a
+        return self.__class__(a, b, boost=self.boost)
+
+    def requires(self):
+        return self.a.requires()
+
+    def estimate_min_size(self, ixreader):
+        return self.subqueries[0].estimate_min_size(ixreader)
+
+    def docs(self, searcher):
+        return self.subqueries[0].docs(searcher)
+
+
+class AndNot(BinaryQuery):
+    """Binary boolean query of the form 'a ANDNOT b', where documents that
+    match b are removed from the matches for a.
+    """
+
+    JOINT = " ANDNOT "
+    matcherclass = AndNotMatcher
+
+    def normalize(self):
+        a = self.a.normalize()
+        b = self.b.normalize()
+
+        if a is NullQuery:
+            return NullQuery
+        elif b is NullQuery:
+            return a
+
+        return self.__class__(a, b, boost=self.boost)
+
+    def _all_terms(self, termset, phrases=True):
+        self.a.all_terms(termset, phrases=phrases)
+
+    def _existing_terms(self, ixreader, termset, reverse=False, phrases=True):
+        self.a.existing_terms(ixreader, termset, reverse=reverse,
+                              phrases=phrases)
+
+    def requires(self):
+        return self.a.requires()
+
+
+class Otherwise(BinaryQuery):
+    """A binary query that only matches the second clause if the first clause
+    doesn't match any documents.
+    """
+
+    JOINT = " OTHERWISE "
+
+    def matcher(self, searcher):
+        m = self.a.matcher(searcher)
+        if not m.is_active():
+            m = self.b.matcher(searcher)
+        return m
+
+
+def BooleanQuery(required, should, prohibited):
+    return AndNot(AndMaybe(And(required), Or(should)), Or(prohibited)).normalize()
+
+
+
+
+
+
+
+
+
+
+
diff --git a/lib/whoosh/whoosh/ramindex.py b/lib/whoosh/whoosh/ramindex.py
new file mode 100644
index 0000000..6a7b8b0
--- /dev/null
+++ b/lib/whoosh/whoosh/ramindex.py
@@ -0,0 +1,356 @@
+# Copyright 2011 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+from collections import defaultdict
+from bisect import bisect_left
+from threading import RLock
+
+from whoosh.fields import UnknownFieldError
+from whoosh.matching import ListMatcher, NullMatcher
+from whoosh.reading import IndexReader, TermNotFound
+from whoosh.writing import IndexWriter
+from whoosh.util import synchronized
+
+
+class RamIndex(IndexReader, IndexWriter):
+    def __init__(self, schema):
+        self.schema = schema
+        self.docnum = 0
+        self._sync_lock = RLock()
+        self.is_closed = False
+        self.clear()
+
+    @synchronized
+    def clear(self):
+        self.invindex = {}
+        self.indexfreqs = defaultdict(int)
+        self.storedfields = []
+        self.fieldlengths = defaultdict(int)
+        self.vectors = {}
+        self.deleted = set()
+        self.usage = 0
+
+    @synchronized
+    def __contains__(self, term):
+        try:
+            self.invindex[term[0]][term[1]]
+            return True
+        except KeyError:
+            return False
+
+    @synchronized
+    def __iter__(self):
+        invindex = self.invindex
+        indexfreqs = self.indexfreqs
+        for fn in sorted(invindex.keys()):
+            for text in sorted(self.invindex[fn].keys()):
+                docfreq = len(invindex[fn][text])
+                indexfreq = indexfreqs[fn, text]
+                yield (fn, text, docfreq, indexfreq)
+
+    def close(self):
+        pass
+
+    @synchronized
+    def has_deletions(self):
+        return bool(self.deleted)
+
+    @synchronized
+    def is_deleted(self, docnum):
+        return docnum in self.deleted
+
+    @synchronized
+    def delete_document(self, docnum, delete=True):
+        if delete:
+            self.deleted.add(docnum)
+        else:
+            self.deleted.remove(docnum)
+
+    @synchronized
+    def stored_fields(self, docnum):
+        return self.storedfields[docnum]
+
+    @synchronized
+    def all_stored_fields(self):
+        deleted = self.deleted
+        return (sf for i, sf in enumerate(self.storedfields)
+                if i not in deleted)
+
+    def _test_field(self, fieldname):
+        if fieldname not in self.schema:
+            raise TermNotFound("No field %r" % fieldname)
+        if self.schema[fieldname].format is None:
+            raise TermNotFound("Field %r is not indexed" % fieldname)
+
+    @synchronized
+    def field_length(self, fieldname):
+        self._test_field(fieldname)
+        if fieldname not in self.schema or not self.schema[fieldname].scorable:
+            return 0
+        return sum(l for docnum_fieldname, l in self.fieldlengths.iteritems()
+                   if docnum_fieldname[1] == fieldname)
+
+    @synchronized
+    def max_field_length(self, fieldname):
+        self._test_field(fieldname)
+        if fieldname not in self.schema or not self.schema[fieldname].scorable:
+            return 0
+        return max(l for docnum_fieldname, l in self.fieldlengths.iteritems()
+                   if docnum_fieldname[1] == fieldname)
+
+    def doc_field_length(self, docnum, fieldname, default=0):
+        self._test_field(fieldname)
+        return self.fieldlengths.get((docnum, fieldname), default)
+
+    def has_vector(self, docnum, fieldname):
+        return (docnum, fieldname) in self.vectors
+
+    @synchronized
+    def vector(self, docnum, fieldname):
+        if fieldname not in self.schema:
+            raise TermNotFound("No  field %r" % fieldname)
+        vformat = self.schema[fieldname].vector
+        if not vformat:
+            raise Exception("No vectors are stored for field %r" % fieldname)
+
+        vformat = self.schema[fieldname].vector
+        ids, weights, values = zip(*self.vectors[docnum, fieldname])
+        return ListMatcher(ids, weights, values, format=vformat)
+
+    def doc_frequency(self, fieldname, text):
+        self._test_field(fieldname)
+        try:
+            return len(self.invindex[fieldname][text])
+        except KeyError:
+            return 0
+
+    def frequency(self, fieldname, text):
+        self._test_field(fieldname)
+        return self.indexfreqs[fieldname, text]
+
+    @synchronized
+    def iter_from(self, fieldname, text):
+        self._test_field(fieldname)
+        invindex = self.invindex
+        indexfreqs = self.indexfreqs
+
+        for fn in sorted(key for key in self.invindex.keys() if key >= fieldname):
+            texts = sorted(invindex[fn])
+            start = 0
+            if fn == fieldname:
+                start = bisect_left(texts, text)
+            for t in texts[start:]:
+                docfreq = len(invindex[fn][t])
+                indexfreq = indexfreqs[fn, t]
+                yield (fn, t, docfreq, indexfreq)
+
+    def lexicon(self, fieldname):
+        self._test_field(fieldname)
+        return sorted(self.invindex[fieldname].keys())
+
+    @synchronized
+    def expand_prefix(self, fieldname, prefix):
+        texts = self.lexicon(fieldname)
+        start = 0 if not prefix else bisect_left(texts, prefix)
+        for text in texts[start:]:
+            if text.startswith(prefix):
+                yield text
+            else:
+                break
+
+    @synchronized
+    def first_id(self, fieldname, text):
+        # Override to not construct a posting reader, just pull the first
+        # non-deleted docnum out of the list directly
+        self._test_field(fieldname)
+        try:
+            plist = self.invindex[fieldname][text]
+        except KeyError:
+            raise TermNotFound((fieldname, text))
+        else:
+            deleted = self.deleted
+            for x in plist:
+                docnum = x[0]
+                if docnum not in deleted:
+                    return docnum
+
+    @synchronized
+    def postings(self, fieldname, text, scorer=None):
+        self._test_field(fieldname)
+        try:
+            postings = self.invindex[fieldname][text]
+        except KeyError:
+            raise TermNotFound((fieldname, text))
+
+        excludeset = self.deleted
+        format = self.schema[fieldname].format
+        if excludeset:
+            postings = [x for x in postings if x[0] not in excludeset]
+            if not postings:
+                return NullMatcher()
+        ids, weights, values = zip(*postings)
+        return ListMatcher(ids, weights, values, format=format)
+
+    def reader(self):
+        return self
+
+    def searcher(self, **kwargs):
+        from whoosh.searching import Searcher
+        return Searcher(self.reader(), **kwargs)
+
+    def writer(self, **kwargs):
+        return self
+
+    def doc_count_all(self):
+        return len(self.storedfields)
+
+    def doc_count(self):
+        return len(self.storedfields) - len(self.deleted)
+
+    @synchronized
+    def update_document(self, **fields):
+        super(RamIndex, self).update_document(**fields)
+
+    @synchronized
+    def add_document(self, **fields):
+        schema = self.schema
+        invindex = self.invindex
+        indexfreqs = self.indexfreqs
+        fieldlengths = self.fieldlengths
+        usage = 0
+
+        fieldnames = [name for name in sorted(fields.keys())
+                      if not name.startswith("_")]
+
+        for name in fieldnames:
+            if name not in schema:
+                raise UnknownFieldError("There is no field named %r" % name)
+            if name not in invindex:
+                invindex[name] = {}
+
+        storedvalues = {}
+
+        for name in fieldnames:
+            field = schema[name]
+            value = fields.get(name)
+            if value:
+                fielddict = invindex[name]
+
+                # If the field is indexed, add the words in the value to the
+                # index
+                if field.indexed:
+                    # Count of all terms in the value
+                    count = 0
+                    # Count of UNIQUE terms in the value
+                    unique = 0
+
+                    for w, freq, weight, valuestring in field.index(value):
+                        if w not in fielddict:
+                            fielddict[w] = []
+                        fielddict[w].append((self.docnum, weight, valuestring))
+                        indexfreqs[name, w] += freq
+                        count += freq
+                        unique += 1
+
+                        usage += 44 + len(valuestring)
+
+                    if field.scorable:
+                        fieldlengths[self.docnum, name] = count
+                        usage += 36
+
+                vector = field.vector
+                if vector:
+                    vlist = sorted((w, weight, valuestring) for w, freq, weight, valuestring
+                                   in vector.word_values(value))
+                    self.vectors[self.docnum, name] = vlist
+                    usage += 28
+                    for x in vlist:
+                        usage += 44 + len(x[2])
+
+            if field.stored:
+                storedname = "_stored_" + name
+                if storedname in fields:
+                    stored_value = fields[storedname]
+                else :
+                    stored_value = value
+
+                storedvalues[name] = stored_value
+                usage += 28 + len(name)# + len(stored_value)
+
+        self.storedfields.append(storedvalues)
+        self.usage += usage
+        self.docnum += 1
+
+#    @synchronized
+#    def optimize(self):
+#        deleted = self.deleted
+#
+#        # Remove deleted documents from stored fields
+#        self.storedfields = [sf for i, sf in enumerate(self.storedfields)
+#                             if i not in deleted]
+#
+#        # Remove deleted documents from inverted index
+#        removedterms = defaultdict(set)
+#        for fn in self.invindex:
+#            termdict = self.invindex[fn]
+#            for text, postlist in termdict.items():
+#                newlist = [x for x in postlist if x[0] not in deleted]
+#                if newlist:
+#                    termdict[text] = newlist
+#                else:
+#                    removedterms[fn].add(text)
+#                    del termdict[text]
+#
+#        # If terms were removed as a result of document deletion, update
+#        # indexfreqs
+#        for fn, removed in removedterms.iteritems():
+#            for text in removed:
+#                del self.indexfreqs[fn, text]
+#
+#        # Remove documents from field lengths
+#        fieldlengths = self.fieldlengths
+#        for docnum, fieldname in fieldlengths.keys():
+#            if docnum in deleted:
+#                del fieldlengths[docnum, fieldname]
+#
+#        # Remove documents from vectors
+#        vectors = self.vectors
+#        for docnum, fieldname in vectors.keys():
+#            if docnum in deleted:
+#                del vectors[docnum, fieldname]
+#
+#        # Reset deleted list
+#        self.deleted = set()
+
+    def commit(self):
+        pass
+
+
+
+
+
+
diff --git a/lib/whoosh/whoosh/reading.py b/lib/whoosh/whoosh/reading.py
new file mode 100644
index 0000000..d2362af
--- /dev/null
+++ b/lib/whoosh/whoosh/reading.py
@@ -0,0 +1,701 @@
+# Copyright 2007 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+"""This module contains classes that allow reading from an index.
+"""
+
+from bisect import bisect_right
+from heapq import heapify, heapreplace, heappop, nlargest
+
+from whoosh.util import ClosableMixin
+from whoosh.matching import MultiMatcher
+
+
+# Exceptions
+
+class TermNotFound(Exception):
+    pass
+
+
+# Base class
+
+class IndexReader(ClosableMixin):
+    """Do not instantiate this object directly. Instead use Index.reader().
+    """
+
+    def is_atomic(self):
+        return True
+
+    def __contains__(self, term):
+        """Returns True if the given term tuple (fieldname, text) is
+        in this reader.
+        """
+        raise NotImplementedError
+
+    def __iter__(self):
+        """Yields (fieldname, text, docfreq, indexfreq) tuples for each term in
+        the reader, in lexical order.
+        """
+        raise NotImplementedError
+
+    def close(self):
+        """Closes the open files associated with this reader.
+        """
+
+        pass
+
+    def generation(self):
+        """Returns the generation of the index being read, or -1 if the backend
+        is not versioned.
+        """
+
+        return -1
+
+    def iter_from(self, fieldname, text):
+        """Yields (field_num, text, doc_freq, index_freq) tuples for all terms
+        in the reader, starting at the given term.
+        """
+        raise NotImplementedError
+
+    def expand_prefix(self, fieldname, prefix):
+        """Yields terms in the given field that start with the given prefix.
+        """
+
+        for fn, t, _, _ in self.iter_from(fieldname, prefix):
+            if fn != fieldname or not t.startswith(prefix):
+                return
+            yield t
+
+    def all_terms(self):
+        """Yields (fieldname, text) tuples for every term in the index.
+        """
+
+        for fn, t, _, _ in self:
+            yield (fn, t)
+
+    def iter_field(self, fieldname, prefix=''):
+        """Yields (text, doc_freq, index_freq) tuples for all terms in the
+        given field.
+        """
+
+        for fn, t, docfreq, freq in self.iter_from(fieldname, prefix):
+            if fn != fieldname:
+                return
+            yield t, docfreq, freq
+
+    def iter_prefix(self, fieldname, prefix):
+        """Yields (field_num, text, doc_freq, index_freq) tuples for all terms
+        in the given field with a certain prefix.
+        """
+
+        for fn, t, docfreq, colfreq in self.iter_from(fieldname, prefix):
+            if fn != fieldname or not t.startswith(prefix):
+                return
+            yield (t, docfreq, colfreq)
+
+    def lexicon(self, fieldname):
+        """Yields all terms in the given field.
+        """
+
+        for t, _, _ in self.iter_field(fieldname):
+            yield t
+
+    def has_deletions(self):
+        """Returns True if the underlying index/segment has deleted
+        documents.
+        """
+        raise NotImplementedError
+
+    def all_doc_ids(self):
+        """Returns an iterator of all (undeleted) document IDs in the reader.
+        """
+
+        # This default implementation works for backends like filedb that use
+        # a continuous 0-N range of numbers to address documents, but will need
+        # to be overridden if a backend, e.g., looks up documents using
+        # persistent ID strings.
+
+        is_deleted = self.is_deleted
+        return (docnum for docnum in xrange(self.doc_count_all())
+                if not is_deleted(docnum))
+
+    def is_deleted(self, docnum):
+        """Returns True if the given document number is marked deleted.
+        """
+        raise NotImplementedError
+
+    def stored_fields(self, docnum):
+        """Returns the stored fields for the given document number.
+
+        :param numerickeys: use field numbers as the dictionary keys instead of
+            field names.
+        """
+        raise NotImplementedError
+
+    def all_stored_fields(self):
+        """Yields the stored fields for all documents.
+        """
+
+        for docnum in xrange(self.doc_count_all()):
+            if not self.is_deleted(docnum):
+                yield self.stored_fields(docnum)
+
+    def doc_count_all(self):
+        """Returns the total number of documents, DELETED OR UNDELETED,
+        in this reader.
+        """
+        raise NotImplementedError
+
+    def doc_count(self):
+        """Returns the total number of UNDELETED documents in this reader.
+        """
+        raise NotImplementedError
+
+    def doc_frequency(self, fieldname, text):
+        """Returns how many documents the given term appears in.
+        """
+        raise NotImplementedError
+
+    def frequency(self, fieldname, text):
+        """Returns the total number of instances of the given term in the
+        collection.
+        """
+        raise NotImplementedError
+
+    def field_length(self, fieldname):
+        """Returns the total number of terms in the given field. This is used
+        by some scoring algorithms.
+        """
+        raise NotImplementedError
+
+    def doc_field_length(self, docnum, fieldname, default=0):
+        """Returns the number of terms in the given field in the given
+        document. This is used by some scoring algorithms.
+        """
+        raise NotImplementedError
+
+    def doc_field_lengths(self, docnum):
+        """Returns an iterator of (fieldname, length) pairs for the given
+        document. This is used internally.
+        """
+
+        for fieldname in self.schema.scorable_names():
+            length = self.doc_field_length(docnum, fieldname)
+            if length:
+                yield (fieldname, length)
+
+    def max_field_length(self, fieldname, default=0):
+        """Returns the maximum length of the field across all documents.
+        """
+        raise NotImplementedError
+
+    def first_id(self, fieldname, text):
+        """Returns the first ID in the posting list for the given term. This
+        may be optimized in certain backends.
+        """
+
+        p = self.postings(fieldname, text)
+        if p.is_active():
+            return p.id()
+        raise TermNotFound((fieldname, text))
+
+    def postings(self, fieldname, text, scorer=None):
+        """Returns a :class:`~whoosh.matching.Matcher` for the postings of the
+        given term.
+
+        >>> pr = reader.postings("content", "render")
+        >>> pr.skip_to(10)
+        >>> pr.id
+        12
+
+        :param fieldname: the field name or field number of the term.
+        :param text: the text of the term.
+        :rtype: :class:`whoosh.matching.Matcher`
+        """
+
+        raise NotImplementedError
+
+    def has_vector(self, docnum, fieldname):
+        """Returns True if the given document has a term vector for the given
+        field.
+        """
+        raise NotImplementedError
+
+    def vector(self, docnum, fieldname):
+        """Returns a :class:`~whoosh.matching.Matcher` object for the
+        given term vector.
+
+        >>> docnum = searcher.document_number(path=u'/a/b/c')
+        >>> v = searcher.vector(docnum, "content")
+        >>> v.all_as("frequency")
+        [(u"apple", 3), (u"bear", 2), (u"cab", 2)]
+
+        :param docnum: the document number of the document for which you want
+            the term vector.
+        :param fieldname: the field name or field number of the field for which
+            you want the term vector.
+        :rtype: :class:`whoosh.matching.Matcher`
+        """
+        raise NotImplementedError
+
+    def vector_as(self, astype, docnum, fieldname):
+        """Returns an iterator of (termtext, value) pairs for the terms in the
+        given term vector. This is a convenient shortcut to calling vector()
+        and using the Matcher object when all you want are the terms and/or
+        values.
+
+        >>> docnum = searcher.document_number(path=u'/a/b/c')
+        >>> searcher.vector_as("frequency", docnum, "content")
+        [(u"apple", 3), (u"bear", 2), (u"cab", 2)]
+
+        :param docnum: the document number of the document for which you want
+            the term vector.
+        :param fieldname: the field name or field number of the field for which
+            you want the term vector.
+        :param astype: a string containing the name of the format you want the
+            term vector's data in, for example "weights".
+        """
+
+        vec = self.vector(docnum, fieldname)
+        if astype == "weight":
+            while vec.is_active():
+                yield (vec.id(), vec.weight())
+                vec.next()
+        else:
+            format = self.schema[fieldname].format
+            decoder = format.decoder(astype)
+            while vec.is_active():
+                yield (vec.id(), decoder(vec.value()))
+                vec.next()
+
+    def most_frequent_terms(self, fieldname, number=5, prefix=''):
+        """Returns the top 'number' most frequent terms in the given field as a
+        list of (frequency, text) tuples.
+        """
+
+        return nlargest(number, ((tf, token)
+                                 for token, _, tf
+                                 in self.iter_prefix(fieldname, prefix)))
+
+    def most_distinctive_terms(self, fieldname, number=5, prefix=None):
+        """Returns the top 'number' terms with the highest `tf*idf` scores as
+        a list of (score, text) tuples.
+        """
+
+        return nlargest(number, ((tf * (1.0 / df), token)
+                                 for token, df, tf
+                                 in self.iter_prefix(fieldname, prefix)))
+
+    def leaf_readers(self):
+        """Returns a list of (IndexReader, docbase) pairs for the child readers
+        of this reader if it is a composite reader, or None if this reader
+        is atomic.
+        """
+
+        return False
+
+    #
+
+    def supports_caches(self):
+        """Returns True if this reader supports the field cache protocol.
+        """
+
+        return False
+
+    def sort_docs_by(self, fieldname, docnums, reverse=False):
+        """Returns a version of `docnums` sorted by the value of a field or
+        a set of fields in each document.
+
+        :param fieldname: either the name of a field, or a tuple of field names
+            to specify a multi-level sort.
+        :param docnums: a sequence of document numbers to sort.
+        :param reverse: if True, reverses the sort direction.
+        """
+
+        raise NotImplementedError
+
+    def key_docs_by(self, fieldname, docnums, limit, reverse=False, offset=0):
+        """Returns a sequence of `(sorting_key, docnum)` pairs for the
+        document numbers in `docnum`.
+
+        If `limit` is `None`, this method associates every document number with
+        a sorting key but does not sort them. If `limit` is not `None`, this
+        method returns a sorted list of at most `limit` pairs.
+
+        This method is useful for sorting and faceting documents in different
+        readers, by associating the sort key with the document number.
+
+        :param fieldname: either the name of a field, or a tuple of field names
+            to specify a multi-level sort.
+        :param docnums: a sequence of document numbers to key.
+        :param limit: if not `None`, only keys the first/last N documents.
+        :param reverse: if True, reverses the sort direction (when limit is not
+            `None`).
+        :param offset: a number to add to the docnums returned.
+        """
+
+        raise NotImplementedError
+
+    def group_docs_by(self, fieldname, docnums, groups, counts=False, offset=0):
+        """Returns a dictionary mapping field values to items with that value
+        in the given field(s).
+
+        :param fieldname: either the name of a field, or a tuple of field names
+            to specify a multi-level sort.
+        :param docnums: a sequence of document numbers to group.
+        :param counts: if True, return a dictionary of doc counts, instead of
+            a dictionary of lists of docnums.
+        :param offset: a number to add to the docnums returned.
+        """
+
+        gen = self.key_docs_by(fieldname, docnums, None, offset=offset)
+
+        if counts:
+            for key, docnum in gen:
+                if key not in groups:
+                    groups[key] = 0
+                groups[key] += 1
+        else:
+            for key, docnum in gen:
+                if key not in groups:
+                    groups[key] = []
+                groups[key].append(docnum)
+
+    def define_facets(self, name, doclists, save=False):
+        """Tells the reader to remember a set of facets under the given name.
+
+        :param name: the name to use for the set of facets.
+        :param doclists: a dictionary mapping facet names to lists of document
+            IDs.
+        :param save: whether to save caches (if any) to some form of permanent
+            storage (i.e. disk) if possible. This keyword may be used or
+            ignored in the backend.
+        """
+
+        raise NotImplementedError
+
+    def set_caching_policy(self, *args, **kwargs):
+        """Sets the field caching policy for this reader.
+        """
+
+        pass
+
+
+# Fake IndexReader class for empty indexes
+
+class EmptyReader(IndexReader):
+    def __init__(self, schema):
+        self.schema = schema
+
+    def __contains__(self, term):
+        return False
+
+    def __iter__(self):
+        return iter([])
+
+    def iter_from(self, fieldname, text):
+        return iter([])
+
+    def iter_field(self, fieldname):
+        return iter([])
+
+    def iter_prefix(self, fieldname):
+        return iter([])
+
+    def lexicon(self, fieldname):
+        return iter([])
+
+    def has_deletions(self):
+        return False
+
+    def is_deleted(self, docnum):
+        return False
+
+    def stored_fields(self, docnum):
+        raise KeyError("No document number %s" % docnum)
+
+    def all_stored_fields(self):
+        return iter([])
+
+    def doc_count_all(self):
+        return 0
+
+    def doc_count(self):
+        return 0
+
+    def doc_frequency(self, fieldname, text):
+        return 0
+
+    def frequency(self, fieldname, text):
+        return 0
+
+    def field_length(self, fieldname):
+        return 0
+
+    def doc_field_length(self, docnum, fieldname, default=0):
+        return default
+
+    def doc_field_lengths(self, docnum):
+        raise ValueError
+
+    def max_field_length(self, fieldname, default=0):
+        return 0
+
+    def postings(self, fieldname, text, scorer=None):
+        raise TermNotFound("%s:%r" % (fieldname, text))
+
+    def has_vector(self, docnum, fieldname):
+        return False
+
+    def vector(self, docnum, fieldname):
+        raise KeyError("No document number %s" % docnum)
+
+    def most_frequent_terms(self, fieldname, number=5, prefix=''):
+        return iter([])
+
+    def most_distinctive_terms(self, fieldname, number=5, prefix=None):
+        return iter([])
+
+
+# Multisegment reader class
+
+class MultiReader(IndexReader):
+    """Do not instantiate this object directly. Instead use Index.reader().
+    """
+
+    def is_atomic(self):
+        return False
+
+    def __init__(self, readers, generation=-1):
+        self.readers = readers
+        self._gen = generation
+        self.schema = None
+        if readers:
+            self.schema = readers[0].schema
+
+        self.doc_offsets = []
+        self.base = 0
+        for r in self.readers:
+            self.doc_offsets.append(self.base)
+            self.base += r.doc_count_all()
+
+        self.is_closed = False
+
+    def __contains__(self, term):
+        return any(r.__contains__(term) for r in self.readers)
+
+    def __iter__(self):
+        return self._merge_iters([iter(r) for r in self.readers])
+
+    def _document_segment(self, docnum):
+        return max(0, bisect_right(self.doc_offsets, docnum) - 1)
+
+    def _segment_and_docnum(self, docnum):
+        segmentnum = self._document_segment(docnum)
+        offset = self.doc_offsets[segmentnum]
+        return segmentnum, docnum - offset
+
+    def _merge_iters(self, iterlist):
+        # Merge-sorts terms coming from a list of
+        # term iterators (IndexReader.__iter__() or
+        # IndexReader.iter_from()).
+
+        # Fill in the list with the head term from each iterator.
+
+        current = []
+        for it in iterlist:
+            fnum, text, docfreq, termcount = it.next()
+            current.append((fnum, text, docfreq, termcount, it))
+        heapify(current)
+
+        # Number of active iterators
+        active = len(current)
+        while active > 0:
+            # Peek at the first term in the sorted list
+            fnum, text = current[0][:2]
+            docfreq = 0
+            termcount = 0
+
+            # Add together all terms matching the first term in the list.
+            while current and current[0][0] == fnum and current[0][1] == text:
+                docfreq += current[0][2]
+                termcount += current[0][3]
+                it = current[0][4]
+                try:
+                    fn, t, df, tc = it.next()
+                    heapreplace(current, (fn, t, df, tc, it))
+                except StopIteration:
+                    heappop(current)
+                    active -= 1
+
+            # Yield the term with the summed doc frequency and term count.
+            yield (fnum, text, docfreq, termcount)
+
+    def add_reader(self, reader):
+        self.readers.append(reader)
+        self.doc_offsets.append(self.base)
+        self.base += reader.doc_count_all()
+
+    def close(self):
+        for d in self.readers:
+            d.close()
+        self.is_closed = True
+
+    def generation(self):
+        return self._gen
+
+    def iter_from(self, fieldname, text):
+        return self._merge_iters([r.iter_from(fieldname, text)
+                                  for r in self.readers])
+
+    # expand_prefix
+    # all_terms
+    # iter_field
+    # iter_prefix
+    # lexicon
+
+    def has_deletions(self):
+        return any(r.has_deletions() for r in self.readers)
+
+    def is_deleted(self, docnum):
+        segmentnum, segmentdoc = self._segment_and_docnum(docnum)
+        return self.readers[segmentnum].is_deleted(segmentdoc)
+
+    def stored_fields(self, docnum):
+        segmentnum, segmentdoc = self._segment_and_docnum(docnum)
+        return self.readers[segmentnum].stored_fields(segmentdoc)
+
+    def all_stored_fields(self):
+        for reader in self.readers:
+            for result in reader.all_stored_fields():
+                yield result
+
+    def doc_count_all(self):
+        return sum(dr.doc_count_all() for dr in self.readers)
+
+    def doc_count(self):
+        return sum(dr.doc_count() for dr in self.readers)
+
+    def field_length(self, fieldname):
+        return sum(dr.field_length(fieldname) for dr in self.readers)
+
+    def doc_field_length(self, docnum, fieldname, default=0):
+        segmentnum, segmentdoc = self._segment_and_docnum(docnum)
+        reader = self.readers[segmentnum]
+        return reader.doc_field_length(segmentdoc, fieldname, default=default)
+
+    # max_field_length
+
+    def first_id(self, fieldname, text):
+        for i, r in enumerate(self.readers):
+            try:
+                id = r.first_id(fieldname, text)
+            except (KeyError, TermNotFound):
+                pass
+            else:
+                if id is None:
+                    raise TermNotFound((fieldname, text))
+                else:
+                    return self.doc_offsets[i] + id
+
+        raise TermNotFound((fieldname, text))
+
+    def postings(self, fieldname, text, scorer=None):
+        postreaders = []
+        docoffsets = []
+        term = (fieldname, text)
+
+        for i, r in enumerate(self.readers):
+            if term in r:
+                offset = self.doc_offsets[i]
+
+                # Get a posting reader for the term and add it to the list
+                pr = r.postings(fieldname, text, scorer=scorer)
+                postreaders.append(pr)
+                docoffsets.append(offset)
+
+        if not postreaders:
+            raise TermNotFound(fieldname, text)
+        else:
+            return MultiMatcher(postreaders, docoffsets)
+
+    def has_vector(self, docnum, fieldname):
+        segmentnum, segmentdoc = self._segment_and_docnum(docnum)
+        return self.readers[segmentnum].has_vector(segmentdoc, fieldname)
+
+    def vector(self, docnum, fieldname):
+        segmentnum, segmentdoc = self._segment_and_docnum(docnum)
+        return self.readers[segmentnum].vector(segmentdoc, fieldname)
+
+    def vector_as(self, astype, docnum, fieldname):
+        segmentnum, segmentdoc = self._segment_and_docnum(docnum)
+        return self.readers[segmentnum].vector_as(astype, segmentdoc, fieldname)
+
+    def format(self, fieldname):
+        for r in self.readers:
+            fmt = r.format(fieldname)
+            if fmt is not None:
+                return fmt
+
+    def vector_format(self, fieldname):
+        for r in self.readers:
+            vfmt = r.vector_format(fieldname)
+            if vfmt is not None:
+                return vfmt
+
+    def doc_frequency(self, fieldname, text):
+        return sum(r.doc_frequency(fieldname, text) for r in self.readers)
+
+    def frequency(self, fieldname, text):
+        return sum(r.frequency(fieldname, text) for r in self.readers)
+
+    # most_frequent_terms
+    # most_distinctive_terms
+
+    def leaf_readers(self):
+        return zip(self.readers, self.doc_offsets)
+
+    def set_caching_policy(self, *args, **kwargs):
+        for r in self.readers:
+            r.set_caching_policy(*args, **kwargs)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/lib/whoosh/whoosh/scoring.py b/lib/whoosh/whoosh/scoring.py
new file mode 100644
index 0000000..b04bcfc
--- /dev/null
+++ b/lib/whoosh/whoosh/scoring.py
@@ -0,0 +1,409 @@
+# Copyright 2008 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+"""
+This module contains classes for scoring (and sorting) search results.
+"""
+
+from __future__ import division
+from math import log, pi
+
+
+# Base classes
+
+class WeightingModel(object):
+    """Abstract base class for scoring models. A WeightingModel object provides
+    a method, ``scorer``, which returns an instance of
+    :class:`whoosh.scoring.Scorer`.
+
+    Basically, WeightingModel objects store the configuration information for
+    the model (for example, the values of B and K1 in the BM25F model), and
+    then creates a scorer instance based on additional run-time information
+    (the searcher, the fieldname, and term text) to do the actual scoring.
+    """
+
+    use_final = False
+
+    def idf(self, searcher, fieldname, text):
+        """Returns the inverse document frequency of the given term.
+        """
+
+        n = searcher.doc_frequency(fieldname, text)
+        return log((searcher.doc_count_all()) / (n + 1)) + 1
+
+    def scorer(self, searcher, fieldname, text, qf=1):
+        """Returns an instance of :class:`whoosh.scoring.Scorer` configured
+        for the given searcher, fieldname, and term text.
+        """
+
+        raise NotImplementedError(self.__class__.__name__)
+
+    def final(self, searcher, docnum, score):
+        """Returns a final score for each document. You can use this method
+        in subclasses to apply document-level adjustments to the score, for
+        example using the value of stored field to influence the score
+        (although that would be slow).
+
+        WeightingModel sub-classes that use ``final()`` should have the
+        attribute ``use_final`` set to ``True``.
+
+        :param searcher: :class:`whoosh.searching.Searcher` for the index.
+        :param docnum: the doc number of the document being scored.
+        :param score: the document's accumulated term score.
+
+        :rtype: float
+        """
+
+        return score
+
+
+class BaseScorer(object):
+    """Base class for "scorer" implementations. A scorer provides a method for
+    scoring a document, and sometimes methods for rating the "quality" of a
+    document and a matcher's current "block", to implement quality-based
+    optimizations.
+
+    Scorer objects are created by WeightingModel objects. Basically,
+    WeightingModel objects store the configuration information for the model
+    (for example, the values of B and K1 in the BM25F model), and then creates
+    a scorer instance.
+    """
+
+    def supports_quality(self):
+        """Returns True if this class supports quality optimizations.
+        """
+
+        return False
+
+    def score(self, matcher):
+        """Returns a score for the current document of the matcher.
+        """
+
+        raise NotImplementedError(self.__class__.__name__)
+
+    def quality(self, matcher):
+        """Returns an approximate quality rating for the current document of
+        the matcher.
+        """
+
+        raise NotImplementedError(self.__class__.__name__)
+
+    def block_quality(self, matcher):
+        """Returns an approximate quality rating for the matcher's current
+        block (whatever concept of block the matcher might use).
+        """
+
+        raise NotImplementedError(self.__class__.__name__)
+
+
+class WOLScorer(BaseScorer):
+    """A "middleware" abstract base class for scorers that use
+    weight-over-length (WOL) -- that is, weight divided by field length -- as
+    the approximate quality rating. This class requires a method
+    ``dfl(docnum)`` which returns the length of the field in the given
+    document.
+    """
+
+    def supports_quality(self):
+        return True
+
+    def quality(self, matcher):
+        return matcher.weight() / self.dfl(matcher.id())
+
+    def block_quality(self, matcher):
+        return matcher.block_maxwol()
+
+
+# WeightScorer
+
+class WeightScorer(BaseScorer):
+    """A scorer that simply returns the weight as the score. This is useful
+    for more complex weighting models to return when they are asked for a
+    scorer for fields that aren't scorable (don't store field lengths).
+    """
+
+    def supports_quality(self):
+        return True
+
+    def score(self, matcher):
+        return matcher.weight()
+
+    def quality(self, matcher):
+        return matcher.weight()
+
+    def block_quality(self, matcher):
+        return matcher.block_maxweight()
+
+
+# WeightingModel implementations
+
+class BM25F(WeightingModel):
+    """Implements the BM25F scoring algorithm.
+    """
+
+    def __init__(self, B=0.75, K1=1.2, **kwargs):
+        """
+
+        >>> from whoosh import scoring
+        >>> # Set a custom B value for the "content" field
+        >>> w = scoring.BM25F(B=0.75, content_B=1.0, K1=1.5)
+
+        :param B: free parameter, see the BM25 literature. Keyword arguments of
+            the form ``fieldname_B`` (for example, ``body_B``) set field-
+            specific values for B.
+        :param K1: free parameter, see the BM25 literature.
+        """
+
+        self.B = B
+        self.K1 = K1
+
+        self._field_B = {}
+        for k, v in kwargs.iteritems():
+            if k.endswith("_B"):
+                fieldname = k[:-2]
+                self._field_B[fieldname] = v
+
+    def scorer(self, searcher, fieldname, text, qf=1):
+        if not searcher.schema[fieldname].scorable:
+            return WeightScorer()
+
+        idf = searcher.idf(fieldname, text)
+        avglength = searcher.avg_field_length(fieldname) or 1
+
+        def dfl(docnum):
+            return searcher.doc_field_length(docnum, fieldname, 1)
+
+        if fieldname in self._field_B:
+            B = self._field_B[fieldname]
+        else:
+            B = self.B
+
+        return BM25F.BM25FScorer(idf, avglength, dfl, B, self.K1, qf=qf)
+
+    class BM25FScorer(WOLScorer):
+        def __init__(self, idf, avglength, dfl, B, K1, qf=1):
+            self.idf = idf
+            self.avglength = avglength
+            self.dfl = dfl
+            self.B = B
+            self.K1 = K1
+            self.qf = qf
+
+        def score(self, matcher):
+            weight = matcher.weight()
+            length = self.dfl(matcher.id())
+            B = self.B
+
+            w = weight / ((1 - B) + B * (length / self.avglength))
+            return self.idf * (w / (self.K1 + w))
+
+
+class PL2(WeightingModel):
+    """Implements the PL2 scoring model from Terrier.
+
+    See http://terrier.org/
+    """
+
+    rec_log2_of_e = 1.0 / log(2)
+
+    def __init__(self, c=1.0):
+        self.c = c
+
+    def scorer(self, searcher, fieldname, text, qf=1):
+        if not searcher.schema[fieldname].scorable:
+            return WeightScorer()
+
+        collfreq = searcher.frequency(fieldname, text)
+        doccount = searcher.doc_count_all()
+        avglength = searcher.avg_field_length(fieldname) or 1
+
+        def dfl(docnum):
+            return searcher.doc_field_length(docnum, fieldname, 1)
+
+        return PL2.PL2Scorer(collfreq, doccount, avglength, dfl, self.c, qf=qf)
+
+    class PL2Scorer(WOLScorer):
+        def __init__(self, collfreq, doccount, avglength, dfl, c, qf=1):
+            self.collfreq = collfreq
+            self.doccount = doccount
+            self.avglength = avglength
+            self.dfl = dfl
+            self.c = c
+            self.qf = qf
+
+        def score(self, matcher):
+            weight = matcher.weight()
+            length = self.dfl(matcher.id())
+            rec_log2_of_e = PL2.rec_log2_of_e
+
+            tf = weight * log(1.0 + (self.c * self.avglength) / length)
+            norm = 1.0 / (weight + 1.0)
+            f = self.collfreq / self.doccount
+            return (norm * self.qf * (tf * log(1.0 / f, 2)
+                                      + f * rec_log2_of_e
+                                      + 0.5 * log(2 * pi * tf, 2)
+                                      + tf * (log(tf, 2) - rec_log2_of_e)))
+
+
+# Simple models
+
+class Frequency(WeightingModel):
+    def scorer(self, searcher, fieldname, text, qf=1):
+        return WeightScorer()
+
+
+class TF_IDF(WeightingModel):
+    def scorer(self, searcher, fieldname, text, qf=1):
+        idf = searcher.idf(fieldname, text)
+        return TF_IDF.TF_IDFScorer(idf)
+
+    class TF_IDFScorer(BaseScorer):
+        def __init__(self, idf):
+            self.idf = idf
+
+        def supports_quality(self):
+            return True
+
+        def score(self, matcher):
+            return matcher.weight() * self.idf
+
+        def quality(self, matcher):
+            return matcher.weight()
+
+        def block_quality(self, matcher):
+            return matcher.block_maxweight()
+
+
+# Utility models
+
+class Weighting(WeightingModel):
+    """This class provides backwards-compatibility with the old weighting
+    class architecture, so any existing custom scorers don't need to be
+    rewritten.
+
+    It may also be useful for quick experimentation since you only need to
+    override the ``score()`` method to try a scoring algorithm, without having
+    to create an inner Scorer class::
+
+        class MyWeighting(Weighting):
+            def score(searcher, fieldname, text, docnum, weight):
+                # Return the docnum as the score, for some reason
+                return docnum
+
+        mysearcher = myindex.searcher(weighting=MyWeighting)
+    """
+
+    def scorer(self, searcher, fieldname, text, qf=1):
+        return self.CompatibilityScorer(searcher, fieldname, text, self.score)
+
+    def score(self, searcher, fieldname, text, docnum, weight):
+        raise NotImplementedError
+
+    class CompatibilityScorer(BaseScorer):
+        def __init__(self, searcher, fieldname, text, scoremethod):
+            self.searcher = searcher
+            self.fieldname = fieldname
+            self.text = text
+            self.scoremethod = scoremethod
+
+        def score(self, matcher):
+            return self.scoremethod(self.searcher, self.fieldname, self.text,
+                                    matcher.id(), matcher.weight())
+
+
+class MultiWeighting(WeightingModel):
+    """Chooses from multiple scoring algorithms based on the field.
+    """
+
+    def __init__(self, default, **weightings):
+        """The only non-keyword argument specifies the default
+        :class:`Weighting` instance to use. Keyword arguments specify
+        Weighting instances for specific fields.
+
+        For example, to use ``BM25`` for most fields, but ``Frequency`` for
+        the ``id`` field and ``TF_IDF`` for the ``keys`` field::
+
+            mw = MultiWeighting(BM25(), id=Frequency(), keys=TF_IDF())
+
+        :param default: the Weighting instance to use for fields not
+            specified in the keyword arguments.
+        """
+
+        self.default = default
+        # Store weighting functions by field name
+        self.weightings = weightings
+
+    def scorer(self, searcher, fieldname, text, qf=1):
+        w = self.weightings.get(fieldname, self.default)
+        return w.scorer(searcher, fieldname, text, qf=qf)
+
+
+class ReverseWeighting(WeightingModel):
+    """Wraps a weighting object and subtracts the wrapped model's scores from
+    0, essentially reversing the weighting model.
+    """
+
+    def __init__(self, weighting):
+        self.weighting = weighting
+
+    def scorer(self, searcher, fieldname, text, qf=1):
+        subscorer = self.weighting.scorer(searcher, fieldname, text, qf=qf)
+        return ReverseWeighting.ReverseScorer(subscorer)
+
+    class ReverseScorer(BaseScorer):
+        def __init__(self, subscorer):
+            self.subscorer = subscorer
+
+        def supports_quality(self):
+            return self.subscorer.supports_quality()
+
+        def score(self, matcher):
+            return 0 - self.subscorer.score(matcher)
+
+        def quality(self, matcher):
+            return 0 - self.subscorer.quality(matcher)
+
+        def block_quality(self, matcher):
+            return 0 - self.subscorer.block_quality(matcher)
+
+
+#class PositionWeighting(WeightingModel):
+#    def __init__(self, reversed=False):
+#        self.reversed = reversed
+#
+#    def scorer(self, searcher, fieldname, text, qf=1):
+#        return PositionWeighting.PositionScorer()
+#
+#    class PositionScorer(BaseScorer):
+#        def score(self, matcher):
+#            p = min(span.pos for span in matcher.spans())
+#            if self.reversed:
+#                return p
+#            else:
+#                return 0 - p
+
+
diff --git a/lib/whoosh/whoosh/searching.py b/lib/whoosh/whoosh/searching.py
new file mode 100644
index 0000000..628dc01
--- /dev/null
+++ b/lib/whoosh/whoosh/searching.py
@@ -0,0 +1,1636 @@
+# Copyright 2007 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+"""This module contains classes and functions related to searching the index.
+"""
+
+
+from __future__ import division
+import copy
+import threading
+from collections import defaultdict, deque
+from heapq import heappush, heapreplace
+from math import ceil
+
+from whoosh import classify, highlight, query, scoring
+from whoosh.reading import TermNotFound
+from whoosh.support.bitvector import BitSet, BitVector
+from whoosh.util import now, lru_cache
+
+
+class TimeLimit(Exception):
+    pass
+
+
+# Searcher class
+
+class Searcher(object):
+    """Wraps an :class:`~whoosh.reading.IndexReader` object and provides
+    methods for searching the index.
+    """
+
+    def __init__(self, reader, weighting=scoring.BM25F, closereader=True,
+                 fromindex=None, parent=None):
+        """
+        :param reader: An :class:`~whoosh.reading.IndexReader` object for
+            the index to search.
+        :param weighting: A :class:`whoosh.scoring.Weighting` object to use to
+            score found documents.
+        :param closereader: Whether the underlying reader will be closed when
+            the searcher is closed.
+        :param fromindex: An optional reference to the index of the underlying
+            reader. This is required for :meth:`Searcher.up_to_date` and
+            :meth:`Searcher.refresh` to work.
+        """
+
+        self.ixreader = reader
+        self.is_closed = False
+        self._closereader = closereader
+        self._ix = fromindex
+
+        if parent:
+            self.schema = parent.schema
+            self._doccount = parent._doccount
+            self._idf_cache = parent._idf_cache
+            self._filter_cache = parent._filter_cache
+        else:
+            self.schema = self.ixreader.schema
+            self._doccount = self.ixreader.doc_count_all()
+            self._idf_cache = {}
+            self._filter_cache = {}
+
+        if type(weighting) is type:
+            self.weighting = weighting()
+        else:
+            self.weighting = weighting
+
+        self.leafreaders = None
+        self.subsearchers = None
+        if not self.ixreader.is_atomic():
+            self.leafreaders = self.ixreader.leaf_readers()
+            self.subsearchers = [(self._subsearcher(r), offset) for r, offset
+                                 in self.leafreaders]
+
+        # Copy attributes/methods from wrapped reader
+        for name in ("stored_fields", "all_stored_fields", "vector", "vector_as",
+                     "lexicon", "frequency", "doc_frequency",
+                     "field_length", "doc_field_length", "max_field_length",
+                     ):
+            setattr(self, name, getattr(self.ixreader, name))
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, *exc_info):
+        self.close()
+
+    def _subsearcher(self, reader):
+        return self.__class__(reader, fromindex=self._ix,
+                              weighting=self.weighting, parent=self)
+
+    def is_atomic(self):
+        return self.reader().is_atomic()
+
+    def doc_count(self):
+        """Returns the number of UNDELETED documents in the index.
+        """
+
+        return self.ixreader.doc_count()
+
+    def doc_count_all(self):
+        """Returns the total number of documents, DELETED OR UNDELETED, in
+        the index.
+        """
+
+        return self._doccount
+
+    def up_to_date(self):
+        """Returns True if this Searcher represents the latest version of the
+        index, for backends that support versioning.
+        """
+
+        if not self._ix:
+            raise Exception("This searcher was not created with a reference to its index")
+        return self._ix.latest_generation() == self.ixreader.generation()
+
+    def refresh(self):
+        """Returns a fresh searcher for the latest version of the index::
+
+            my_searcher = my_searcher.refresh()
+
+        If the index has not changed since this searcher was created, this
+        searcher is simply returned.
+
+        This method may CLOSE underlying resources that are no longer needed
+        by the refreshed searcher, so you CANNOT continue to use the original
+        searcher after calling ``refresh()`` on it.
+        """
+
+        if not self._ix:
+            raise Exception("This searcher was not created with a reference to its index")
+        if self._ix.latest_generation() == self.reader().generation():
+            return self
+
+        # Get a new reader, re-using resources from the current reader if
+        # possible
+        self.is_closed = True
+        newreader = self._ix.reader(reuse=self.ixreader)
+        return self.__class__(newreader, fromindex=self._ix,
+                              weighting=self.weighting)
+
+    def close(self):
+        if self._closereader:
+            self.ixreader.close()
+        self.is_closed = True
+
+    def avg_field_length(self, fieldname, default=None):
+        if not self.ixreader.schema[fieldname].scorable:
+            return default
+        return self.ixreader.field_length(fieldname) / (self._doccount or 1)
+
+    def reader(self):
+        """Returns the underlying :class:`~whoosh.reading.IndexReader`.
+        """
+        return self.ixreader
+
+    def set_caching_policy(self, *args, **kwargs):
+        self.ixreader.set_caching_policy(*args, **kwargs)
+
+    def scorer(self, fieldname, text, qf=1):
+        if self._doccount:
+            scorer = self.weighting.scorer(self, fieldname, text, qf=qf)
+        else:
+            # Scoring functions tend to cache information that isn't available
+            # on an empty index.
+            scorer = None
+
+        return scorer
+
+    def postings(self, fieldname, text, qf=1):
+        """Returns a :class:`whoosh.matching.Matcher` for the postings of the
+        given term. Unlike the :func:`whoosh.reading.IndexReader.postings`
+        method, this method automatically sets the scoring functions on the
+        matcher from the searcher's weighting object.
+        """
+
+        scorer = self.scorer(fieldname, text, qf=qf)
+        return self.ixreader.postings(fieldname, text, scorer=scorer)
+
+    def idf(self, fieldname, text):
+        """Calculates the Inverse Document Frequency of the current term (calls
+        idf() on the searcher's Weighting object).
+        """
+
+        # This method just calls the Weighting object's idf() method, but
+        # caches the result. So Weighting objects should call *this* method
+        # which will then call *their own* idf() methods.
+
+        cache = self._idf_cache
+        term = (fieldname, text)
+        if term in cache:
+            return cache[term]
+
+        idf = self.weighting.idf(self, fieldname, text)
+        cache[term] = idf
+        return idf
+
+    def document(self, **kw):
+        """Convenience method returns the stored fields of a document
+        matching the given keyword arguments, where the keyword keys are
+        field names and the values are terms that must appear in the field.
+
+        This method is equivalent to::
+
+            searcher.stored_fields(searcher.document_number(<keyword args>))
+
+        Where Searcher.documents() returns a generator, this function returns
+        either a dictionary or None. Use it when you assume the given keyword
+        arguments either match zero or one documents (i.e. at least one of the
+        fields is a unique key).
+
+        >>> stored_fields = searcher.document(path=u"/a/b")
+        >>> if stored_fields:
+        ...   print stored_fields['title']
+        ... else:
+        ...   print "There is no document with the path /a/b"
+        """
+
+        for p in self.documents(**kw):
+            return p
+
+    def documents(self, **kw):
+        """Convenience method returns the stored fields of a document
+        matching the given keyword arguments, where the keyword keys are
+        field names and the values are terms that must appear in the field.
+
+        Returns a generator of dictionaries containing the
+        stored fields of any documents matching the keyword arguments.
+
+        >>> for stored_fields in searcher.documents(emailto=u"matt@whoosh.ca"):
+        ...   print "Email subject:", stored_fields['subject']
+        """
+
+        ixreader = self.ixreader
+        return (ixreader.stored_fields(docnum)
+                for docnum in self.document_numbers(**kw))
+
+    def _kw_to_text(self, kw):
+        for k, v in kw.iteritems():
+            field = self.schema[k]
+            kw[k] = field.to_text(v)
+
+    def _query_for_kw(self, kw):
+        subqueries = []
+        for key, value in kw.iteritems():
+            subqueries.append(query.Term(key, value))
+        return query.And(subqueries).normalize()
+
+    def document_number(self, **kw):
+        """Returns the document number of the document matching the given
+        keyword arguments, where the keyword keys are field names and the
+        values are terms that must appear in the field.
+
+        >>> docnum = searcher.document_number(path=u"/a/b")
+
+        Where Searcher.document_numbers() returns a generator, this function
+        returns either an int or None. Use it when you assume the given keyword
+        arguments either match zero or one documents (i.e. at least one of the
+        fields is a unique key).
+
+        :rtype: int
+        """
+
+        # In the common case where only one keyword was given, just use
+        # first_id() instead of building a query.
+
+        self._kw_to_text(kw)
+        if len(kw) == 1:
+            k, v = kw.items()[0]
+            try:
+                return self.reader().first_id(k, v)
+            except TermNotFound:
+                return None
+        else:
+            m = self._query_for_kw(kw).matcher(self)
+            if m.is_active():
+                return m.id()
+
+    def document_numbers(self, **kw):
+        """Returns a generator of the document numbers for documents matching
+        the given keyword arguments, where the keyword keys are field names and
+        the values are terms that must appear in the field.
+
+        >>> docnums = list(searcher.document_numbers(emailto=u"matt@whoosh.ca"))
+        """
+
+        if len(kw) == 0:
+            return []
+
+        self._kw_to_text(kw)
+        return self.docs_for_query(self._query_for_kw(kw))
+
+    def _find_unique(self, uniques):
+        # uniques is a list of ("unique_field_name", "field_value") tuples
+        delset = set()
+        for name, value in uniques:
+            docnum = self.document_number(**{name: value})
+            if docnum is not None:
+                delset.add(docnum)
+        return delset
+
+    @lru_cache(20)
+    def _query_to_comb(self, fq):
+        return BitSet(self.doc_count_all(), source=self.docs_for_query(fq))
+
+    def _filter_to_comb(self, obj):
+        if obj is None:
+            return None
+        if isinstance(obj, (set, BitVector, BitSet)):
+            c = obj
+        elif isinstance(obj, Results):
+            c = obj.docset
+        elif isinstance(obj, ResultsPage):
+            c = obj.results.docset
+        elif isinstance(obj, query.Query):
+            c = self._query_to_comb(obj)
+        else:
+            raise Exception("Don't know what to do with filter object %r" % obj)
+
+        return c
+
+    def docs_for_query(self, q, leafs=True):
+        if self.subsearchers and leafs:
+            for s, offset in self.subsearchers:
+                for docnum in q.docs(s):
+                    yield docnum + offset
+        else:
+            for docnum in q.docs(self):
+                yield docnum
+
+    def key_terms(self, docnums, fieldname, numterms=5,
+                  model=classify.Bo1Model, normalize=True):
+        """Returns the 'numterms' most important terms from the documents
+        listed (by number) in 'docnums'. You can get document numbers for the
+        documents your interested in with the document_number() and
+        document_numbers() methods.
+
+        >>> docnum = searcher.document_number(path=u"/a/b")
+        >>> keywords = list(searcher.key_terms([docnum], "content"))
+
+        "Most important" is generally defined as terms that occur frequently in
+        the top hits but relatively infrequently in the collection as a whole.
+
+        :param fieldname: Look at the terms in this field. This field must
+            store vectors.
+        :param docnums: A sequence of document numbers specifying which
+            documents to extract key terms from.
+        :param numterms: Return this number of important terms.
+        :param model: The classify.ExpansionModel to use. See the classify
+            module.
+        """
+
+        expander = classify.Expander(self.ixreader, fieldname, model=model)
+        for docnum in docnums:
+            expander.add_document(docnum)
+        return expander.expanded_terms(numterms, normalize=normalize)
+
+    def key_terms_from_text(self, fieldname, text, numterms=5,
+                            model=classify.Bo1Model, normalize=True):
+        """Return the 'numterms' most important terms from the given text.
+
+        :param numterms: Return this number of important terms.
+        :param model: The classify.ExpansionModel to use. See the classify
+            module.
+        """
+
+        expander = classify.Expander(self.ixreader, fieldname, model=model)
+        expander.add_text(text)
+        return expander.expanded_terms(numterms, normalize=normalize)
+
+    def more_like(self, docnum, fieldname, text=None, top=10, numterms=5,
+                  model=classify.Bo1Model, normalize=False):
+        """Returns a :class:`Results` object containing documents similar to
+        the given document, based on "key terms" in the given field::
+
+            # Get the ID for the document you're interested in
+            docnum = search.document_number(path=u"/a/b/c")
+
+            r = searcher.more_like(docnum)
+
+            print "Documents like", searcher.stored_fields(docnum)["title"]
+            for hit in r:
+                print hit["title"]
+
+        :param fieldname: the name of the field to use to test similarity.
+        :param text: by default, the method will attempt to load the contents
+            of the field from the stored fields for the document, or from a
+            term vector. If the field isn't stored or vectored in the index,
+            but you have access to the text another way (for example, loading
+            from a file or a database), you can supply it using the ``text``
+            parameter.
+        :param top: the number of results to return.
+        :param numterms: the number of "key terms" to extract from the hit and
+            search for. Using more terms is slower but gives potentially more
+            and more accurate results.
+        :param model: (expert) a :class:`whoosh.classify.ExpansionModel` to use
+            to compute "key terms".
+        :param normalize: whether to normalize term weights.
+        """
+
+        if text:
+            kts = self.key_terms_from_text(fieldname, text, numterms=numterms,
+                                           model=model, normalize=normalize)
+        else:
+            kts = self.key_terms([docnum], fieldname, numterms=numterms,
+                                 model=model, normalize=normalize)
+        # Create an Or query from the key terms
+        q = query.Or([query.Term(fieldname, word, boost=weight)
+                      for word, weight in kts])
+
+        # Filter the original document out of the results using a bit vector
+        # with every bit set except the one for this document
+        size = self.doc_count_all()
+        comb = BitVector(size, [n for n in xrange(self.doc_count_all())
+                                if n != docnum])
+        return self.search(q, limit=top, filter=comb)
+
+    def search_page(self, query, pagenum, pagelen=10, **kwargs):
+        if pagenum < 1:
+            raise ValueError("pagenum must be >= 1")
+        results = self.search(query, limit=pagenum * pagelen, **kwargs)
+        return ResultsPage(results, pagenum, pagelen)
+
+    def find(self, defaultfield, querystring, **kwargs):
+        from whoosh.qparser import QueryParser
+        qp = QueryParser(defaultfield, schema=self.ixreader.schema)
+        q = qp.parse(querystring)
+        return self.search(q, **kwargs)
+
+    def sorter(self, *args, **kwargs):
+        """Returns a :class:`whoosh.sorting.Sorter` object for this searcher.
+        See the documentation for ``Sorter`` for how to use the sorter object
+        to get sorted search results.
+        """
+
+        from whoosh.sorting import Sorter
+
+        return Sorter(self, *args, **kwargs)
+
+    def sort_query_using(self, q, fn, filter=None):
+        """Returns a :class:`Results` object with the documents matching the
+        given query ordered according score returned by the given function.
+
+        The function is called for each matching document with two arguments:
+        this searcher and the document number. The function can usually only
+        work with information that can be accessed based on the document
+        number, such as stored fields, term vectors, and field caches.
+
+        For example, assume an index where the "text" field was indexed with
+        term vectors. The following function loads the term vector for each
+        document and ranks documents containing equal occurrences of the terms
+        "love" and "hate" highest::
+
+            def fn(searcher, docnum):
+                # Create a dictionary of term text to frequency
+                v = dict(searcher.vector_as("frequency", docnum, "text"))
+                # Give highest scores to documents that have equal numbers
+                # of the two terms
+                return 1.0 / (abs(v["love"] - v["hate"]) + 1.0)
+
+            with myindex.searcher() as s:
+                q = And([Term("text", u"love"), Term("text", u"hate")])
+                results = s.sort_query_using(q, fn)
+
+        (Note that the "function" can be an object with a ``__call__`` method.
+        This may be useful for sharing information between calls.)
+
+        :param q: the query to run.
+        :param fn: a function to run on each document number to determine the
+            document's "score". Higher values appear earlier in the results.
+        :param filter: a query, Results object, or set of docnums. The results
+            will only contain documents that are also in the filter object.
+        """
+
+        t = now()
+        comb = self._filter_to_comb(filter)
+        ls = [(fn(self, docnum), docnum) for docnum in self.docs_for_query(q)
+              if (not comb) or docnum in comb]
+        docset = set(docnum for _, docnum in ls)
+        ls.sort(key=lambda x: (0 - x[0], x[1]))
+        return Results(self, q, ls, docset, runtime=now() - t)
+
+    def define_facets(self, name, qs, save=False):
+        def doclists_for_searcher(s):
+            return dict((key, q.docs(s)) for key, q in qs.iteritems())
+
+        if self.subsearchers:
+            for s in self.subsearchers:
+                dls = doclists_for_searcher(s)
+                s.reader().define_facets(name, dls, save=save)
+        else:
+            dls = doclists_for_searcher(self)
+            self.ixreader.define_facets(name, dls, save=save)
+
+    def categorize_query(self, q, fieldname, counts=False):
+        groups = {}
+        if isinstance(fieldname, basestring):
+            fieldname = (fieldname, )
+
+        if self.subsearchers:
+            for s, offset in self.subsearchers:
+                r = s.reader()
+                r.group_docs_by(fieldname, q.docs(s), groups, counts=counts,
+                                offset=offset)
+        else:
+            self.ixreader.group_docs_by(fieldname, q.docs(self), groups,
+                                        counts=counts)
+        return groups
+
+    def search(self, q, limit=10, sortedby=None, reverse=False, groupedby=None,
+               optimize=True, scored=True, filter=None, collector=None):
+        """Runs the query represented by the ``query`` object and returns a
+        Results object.
+
+        :param query: a :class:`whoosh.query.Query` object.
+        :param limit: the maximum number of documents to score. If you're only
+            interested in the top N documents, you can set limit=N to limit the
+            scoring for a faster search.
+        :param sortedby: the name of a field to sort by, or a tuple of field
+            names to sort by multiple fields. This is a shortcut for using a
+            :class:`whoosh.sorting.Sorter` object to do a simple sort. To do
+            complex sorts (where different fields are sorted in different
+            directions), use :meth:`Searcher.sorter` to get a sorter and use it
+            to perform the sorted search.
+        :param reverse: Reverses the direction of the sort.
+        :param groupedby: a list of field names or facet names. If this
+            argument is not None, you can use the :meth:`Results.groups` method
+            on the results object to retrieve a dictionary mapping field/facet
+            values to document numbers.
+        :param optimize: use optimizations to get faster results when possible.
+        :param scored: if False, the results are not scored and are returned in
+            "natural" order.
+        :param collector: (expert) an instance of :class:`Collector` to use to
+            collect the found documents.
+        :param filter: a query, Results object, or set of docnums. The results
+            will only contain documents that are also in the filter object.
+        :rtype: :class:`Results`
+        """
+
+        if limit is not None and limit < 1:
+            raise ValueError("limit must be >= 1")
+
+        if sortedby is not None:
+            sorter = self.sorter(sortedby=sortedby)
+            return sorter.sort_query(q, limit=limit, reverse=reverse,
+                                     filter=filter)
+
+        if isinstance(groupedby, basestring):
+            groupedby = (groupedby, )
+
+        if collector is None:
+            collector = Collector(limit=limit, usequality=optimize,
+                                  groupedby=groupedby, scored=scored,
+                                  reverse=reverse)
+        else:
+            collector.limit = limit
+            collector.usequality = optimize
+            collector.groupedby = groupedby
+            collector.scored = scored
+            collector.reverse = reverse
+
+        return collector.search(self, q, filter=filter)
+
+
+class Collector(object):
+    def __init__(self, limit=10, usequality=True, replace=10, groupedby=None,
+                 scored=True, timelimit=None, greedy=False, reverse=False):
+        """A Collector finds the matching documents, scores them, collects them
+        into a list, and produces a Results object from them.
+
+        Normally you do not need to instantiate an instance of the base
+        Collector class, the :meth:`Searcher.search` method does that for you.
+
+        If you create a custom Collector instance or subclass you can pass it
+        to the :meth:`Searcher.search` method using the ``collector`` keyword
+        argument::
+
+            mycollector = MyCollector()
+            results = searcher.search(myquery, collector=mycollector)
+
+        Note that when you call :meth:`Searcher.search` with a custom collector
+        the method will overwrite several attributes on the collector instance
+        with the values of keyword arguments to :meth:`Searcher.search`. To
+        avoid this, start the search from the collector instead::
+
+            mycollector = MyCollector()
+            results = mycollector.search(searcher, myquery)
+
+        **Do not** re-use or share Collector instances between searches. You
+        should create a new Collector instance for each search.
+
+        To limit the amount of time a search can take, pass the number of
+        seconds to the ``timelimit`` keyword argument::
+
+            # Limit the search to 4.5 seconds
+            col = Collector(timelimit=4.5, greedy=False)
+            # If this call takes more than 4.5 seconds, it will raise a
+            # whoosh.searching.TimeLimit exception
+            try:
+                r = searcher.search(myquery, collector=col)
+            except TimeLimit:
+                # You can still retrieve partial results from the collector
+                # after a time limit exception
+                r = col.results()
+
+        If the ``greedy`` keyword is ``True``, the collector will finish adding
+        the most recent hit before raising the ``TimeLimit`` exception.
+        """
+
+        self.limit = limit
+        self.usequality = usequality
+        self.replace = replace
+        self.groupedby = groupedby
+        self.scored = scored
+        self.timelimit = timelimit
+        self.greedy = greedy
+        self.reverse = reverse
+
+        self.reset()
+
+    def reset(self):
+        if self.should_add_all():
+            self._items = deque()
+        else:
+            self._items = []
+        self.groups = {}
+        self.docset = set()
+        self.done = False
+        self.minquality = None
+        self.doc_offset = 0
+        self.timesup = False
+        self.timer = None
+
+    def search(self, searcher, q, filter=None):
+        """Top-level method call which uses the given :class:`Searcher` and
+        :class:`whoosh.query.Query` objects to return a :class:`Results`
+        object.
+
+        This method takes care of calling :meth:`Collector.add_searcher`
+        for each sub-searcher in a collective searcher. You should only call
+        this method on a top-level searcher.
+        """
+
+        self.reset()
+        self._searcher = searcher
+        self._q = q
+
+        w = searcher.weighting
+        self.final = w.final if w.use_final else None
+
+        if self.limit and self.limit > searcher.doc_count_all():
+            self.limit = None
+
+        self._comb = None
+        if filter:
+            self.add_filter(filter)
+
+        if self.timelimit:
+            self.timer = threading.Timer(self.timelimit, self._timestop)
+            self.timer.start()
+
+        t = now()
+        if not searcher.is_atomic():
+            for s, offset in searcher.subsearchers:
+                if self.timesup:
+                    raise TimeLimit
+                self.doc_offset = offset
+                done = self.add_searcher(s, q)
+                if done:
+                    break
+        else:
+            self.add_searcher(searcher, q)
+
+        if self.timer:
+            self.timer.cancel()
+
+        runtime = now() - t
+        return self.results(runtime=runtime)
+
+    def _timestop(self):
+        # Called by the Timer when the time limit expires. We could raise the
+        # TimeLimit exception here, but that would probably leave the collector
+        # in an inconsistent state. Instead, we'll set a flag, and check the
+        # flag inside the add_(all|top)_matches loops.
+        self.timesup = True
+
+    def add_filter(self, obj):
+        c = self._searcher._filter_to_comb(obj)
+        if self._comb is None:
+            self._comb = set()
+        self._comb |= c
+
+    def add_searcher(self, searcher, q):
+        """Adds the documents from the given searcher with the given query to
+        the collector. This is called by the :meth:`Collector.search` method.
+        """
+
+        return self.add_matches(searcher, q.matcher(searcher))
+
+    def score(self, searcher, matcher):
+        """Called to compute the score for the current document in the given
+        :class:`whoosh.matching.Matcher`.
+        """
+
+        s = matcher.score()
+        if self.final:
+            s = self.final(searcher, matcher.id(), s)
+        return s
+
+    def collect(self, score, id):
+        """This method is called for each found document. This method is only
+        called by :meth:`Collector.add_all_matches`.
+
+        :param score: the  score for this document. This may be None if the
+            collector is not set up to compute scores.
+        :param id: the document number of the document.
+        """
+
+        # This method is only called by add_all_matches
+        self._items.append((score, id))
+        self.docset.add(id)
+
+    def should_add_all(self):
+        """Returns True if this collector needs to add all found documents (for
+        example, if ``limit=None``), or alse if this collector should only
+        add the top N found documents.
+        """
+
+        return not self.scored or not self.limit or self.groupedby
+
+    def add_matches(self, searcher, matcher):
+        """Calls either :meth:Collector.add_top_matches` or
+        :meth:`Collector.add_all_matches` depending on whether this collector
+        needs to examine all documents.
+        """
+
+        if self.should_add_all():
+            return self.add_all_matches(searcher, matcher)
+        else:
+            return self.add_top_matches(searcher, matcher)
+
+    def add_top_matches(self, searcher, matcher):
+        """Adds the matched documents from the given matcher to the collector's
+        priority queue.
+        """
+
+        offset = self.doc_offset
+        limit = self.limit
+        items = self._items
+        usequality = self.usequality
+        score = self.score
+        comb = self._comb
+        timelimited = bool(self.timelimit)
+        greedy = self.greedy
+
+        for id, quality in self.pull_matches(matcher, usequality):
+            if timelimited and not greedy and self.timesup:
+                raise TimeLimit
+
+            offsetid = id + offset
+            if comb and offsetid not in comb:
+                continue
+
+            if len(items) < limit:
+                # The heap isn't full, so just add this document
+                heappush(items, (score(searcher, matcher), offsetid, quality))
+
+            elif quality > self.minquality:
+                # The heap is full, but the posting quality indicates
+                # this document is good enough to make the top N, so
+                # calculate its true score and add it to the heap
+
+                s = score(searcher, matcher)
+                if s > items[0][0]:
+                    heapreplace(items, (s, offsetid, quality))
+                    self.minquality = items[0][2]
+
+            if timelimited and self.timesup:
+                raise TimeLimit
+
+    def add_all_matches(self, searcher, matcher):
+        """Adds the matched documents from the given matcher to the collector's
+        list of matched documents.
+        """
+
+        offset = self.doc_offset
+        limit = self.limit
+        items = self._items
+        scored = self.scored
+        score = self.score
+        comb = self._comb
+        timelimited = bool(self.timelimit)
+        greedy = self.greedy
+        reverse = self.reverse
+
+        keyfns = None
+        if self.groupedby:
+            limit = None
+            keyfns = {}
+            for name in self.groupedby:
+                keyfns[name] = searcher.reader().key_fn(name)
+
+        for id, _ in self.pull_matches(matcher, False):
+            if timelimited and not greedy and self.timesup:
+                raise TimeLimit
+
+            offsetid = id + offset
+            if comb and offsetid not in comb:
+                continue
+
+            if keyfns:
+                for name, keyfn in keyfns.iteritems():
+                    if name not in self.groups:
+                        self.groups[name] = defaultdict(list)
+                    key = keyfn(id)
+                    self.groups[name][key].append(id)
+
+            scr = 0
+            if scored:
+                scr = score(searcher, matcher)
+            self.collect(scr, offsetid)
+
+            if limit:
+                if reverse and len(items) > limit:
+                    items.popleft()
+                elif (not reverse) and len(items) >= limit:
+                    return True
+
+            if timelimited and self.timesup:
+                raise TimeLimit
+
+    def pull_matches(self, matcher, usequality):
+        """Low-level method yields (docid, quality) pairs from the given
+        matcher. Called by :meth:`Collector.add_top_matches` and
+        :meth:`Collector.add_all_matches`. If ``usequality`` is False or the
+        matcher doesn't support quality, the second item in each pair will be
+        ``None``.
+        """
+
+        docset = self.docset
+
+        # Can't use quality optimizations if the matcher doesn't support them
+        usequality = usequality and matcher.supports_quality()
+        replace = self.replace
+
+        # A flag to indicate whether we should check block quality at the start
+        # of the next loop
+        checkquality = True
+        replacecounter = 0
+
+        while matcher.is_active():
+            # If we're using quality optimizations, and the checkquality flag
+            # is true, try to skip ahead to the next block with the minimum
+            # required quality
+            if usequality and checkquality and self.minquality is not None:
+                matcher.skip_to_quality(self.minquality)
+                # Skipping ahead might have moved the matcher to the end of the
+                # posting list
+                if not matcher.is_active():
+                    break
+
+            # The current document ID
+            id = matcher.id()
+
+            if not usequality:
+                docset.add(id)
+
+            # If we're using quality optimizations, check whether the current
+            # posting has higher quality than the minimum before yielding it.
+            if usequality:
+                postingquality = matcher.quality()
+                if postingquality > self.minquality:
+                    yield (id, postingquality)
+            else:
+                yield (id, None)
+
+            # Move to the next document. This method returns True if the
+            # matcher has entered a new block, so we should check block quality
+            # again.
+            checkquality = matcher.next()
+
+            # Ask the matcher to replace itself with a more efficient version
+            # if possible
+            if replace and matcher.is_active():
+                replacecounter += 1
+                if replacecounter >= replace:
+                    matcher = matcher.replace()
+
+    def items(self):
+        """Returns the collected hits as a list of (score, docid) pairs.
+        """
+
+        # Turn the heap into a sorted list by sorting by score first (subtract
+        # from 0 to put highest scores first) and then by document number (to
+        # enforce a consistent ordering of documents with equal score)
+        items = self._items
+        if self.scored or self.reverse:
+            items = sorted(self._items, key=lambda x: (0 - x[0], x[1]),
+                           reverse=self.reverse)
+        return items
+
+    def results(self, runtime=None):
+        """Returns the collected hits as a :class:`Results` object.
+        """
+
+        docset = self.docset or None
+        return Results(self._searcher, self._q, self.items(), docset,
+                       groups=self.groups, runtime=runtime)
+
+
+class TermTrackingCollector(Collector):
+    """This collector records which parts of the query matched which documents
+    in the final results. The results for each part of the query are available
+    as a dictionary in the ``catalog`` attribute of the collector after the
+    search, where the keys are representations of the parts of the query and
+    the values are sets of document numbers that matched that part of the
+    query.
+
+    How to choose a key to represent query objects in the ``catalog``
+    dictionary was not entirely clear. The current implementation uses the
+    unicode representation of the query object, which usually returns something
+    at least recognizable (for example, ``unicode(Term("f", u"a")) == u"f:a"``
+    and ``unicode(Prefix("f", "b")) == u"f:b*"``).
+
+    >>> myparser = qparser.QueryParser("content", myindex.schema)
+    >>> myquery = myparser.parse(u"apple OR bear NOT camel")
+    >>> col = TermTrackingCollector()
+    >>> results = searcher.search(myquery, collector=col)
+    >>> # The docnums in the results that contained "apple"
+    >>> col.catalog["content:apple"]
+    set([1, 2, 3])
+    >>> for hit in results:
+    ...     print hit.rank, ":", hit["title"]
+    ...     for key, docset in col.catalog.keys():
+    ...         if hit.docnum in docset:
+    ...             print "   - Contains", key
+    """
+
+    # This collector works by rewriting the query with "TaggedQuery" wrappers
+    # around the leaf nodes before it searches. When base collector generates
+    # a matcher tree from the query tree, these wrappers "phone home" to this
+    # collector and register the leaf matchers. Then, when collecting hits, the
+    # collector checks with the leaf matchers at each hit to see if they are
+    # matching the current document.
+
+    def __init__(self, *args, **kwargs):
+        super(TermTrackingCollector, self).__init__(*args, **kwargs)
+        self.matchers = []
+        self.catalog = {}
+
+    def add_searcher(self, searcher, q):
+        # For each searcher added to the collector, reset the list of matchers
+        # and re-tag the query
+        self.matchers = []
+        q = self._tag(q)
+        return super(TermTrackingCollector, self).add_searcher(searcher, q)
+
+    def should_add_all(self):
+        # If you're using this collector, you need to examine all documents
+        return True
+
+    def collect(self, score, id):
+        # The id passed to this method is rebased for the top-level searcher,
+        # so we need to subtract the doc offset from it before we can compare
+        # it to a matcher's id()
+        offset = self.doc_offset
+
+        # Check the registered matchers, and if they're contributing to the
+        # current match, add the current match to the set of documents
+        # containing them
+        for q, m in self.matchers:
+            if m.is_active() and m.id() == id - offset:
+                key = unicode(q)
+                if key not in self.catalog:
+                    self.catalog[key] = set()
+                self.catalog[key].add(id)
+
+        super(TermTrackingCollector, self).collect(score, id)
+
+    def _tag(self, q):
+        # Takes a query and returns a copy of the query with a TaggedQuery
+        # wrapper around any leaf nodes in the query tree
+        if isinstance(q, query.Not):
+            return q
+        elif q.is_leaf():
+            return TermTrackingCollector.TaggedQuery(q, self)
+        else:
+            return q.apply(self._tag)
+
+    def _tag_matcher(self, q, m):
+        # This method is called from the TaggedQuery wrappers that the _tag
+        # method added to the query
+        self.matchers.append((q, m))
+
+    class TaggedQuery(query.WrappingQuery):
+        # The only purpose of this query wrapper is to "call home" to the
+        # TrackingCollector instance when the child query generates a matcher
+        # so the TrackingCollector can register it
+
+        def __init__(self, child, tracker):
+            self.child = child
+            self.tracker = tracker
+
+        def matcher(self, searcher):
+            m = self.child.matcher(searcher)
+            self.tracker._tag_matcher(self.child, m)
+            return m
+
+
+class Results(object):
+    """This object is returned by a Searcher. This object represents the
+    results of a search query. You can mostly use it as if it was a list of
+    dictionaries, where each dictionary is the stored fields of the document at
+    that position in the results.
+    """
+
+    def __init__(self, searcher, q, top_n, docset, groups=None, runtime=-1):
+        """
+        :param searcher: the :class:`Searcher` object that produced these
+            results.
+        :param query: the original query that created these results.
+        :param top_n: a list of (score, docnum) tuples representing the top
+            N search results.
+        :param scores: a list of scores corresponding to the document
+            numbers in top_n, or None if the results do not have scores.
+        :param runtime: the time it took to run this search.
+        """
+
+        self.searcher = searcher
+        self.q = q
+        self.top_n = top_n
+        self.docset = docset
+        self._groups = groups or {}
+        self.runtime = runtime
+        self._terms = None
+
+        self.fragmenter = highlight.ContextFragmenter()
+        self.fragment_scorer = highlight.BasicFragmentScorer()
+        self.formatter = highlight.HtmlFormatter(tagname="b")
+
+    def __repr__(self):
+        return "<Top %s Results for %r runtime=%s>" % (len(self.top_n),
+                                                       self.q,
+                                                       self.runtime)
+
+    def __len__(self):
+        """Returns the total number of documents that matched the query. Note
+        this may be more than the number of scored documents, given the value
+        of the ``limit`` keyword argument to :meth:`Searcher.search`.
+
+        If this Results object was created by searching with a ``limit``
+        keyword, then computing the exact length of the result set may be
+        expensive for large indexes or large result sets. You may consider
+        using :meth:`Results.has_exact_length`,
+        :meth:`Results.estimated_length`, and
+        :meth:`Results.estimated_min_length` to display an estimated size of
+        the result set instead of an exact number.
+        """
+
+        if self.docset is None:
+            self._load_docs()
+        return len(self.docset)
+
+    def __getitem__(self, n):
+        if isinstance(n, slice):
+            start, stop, step = n.indices(len(self.top_n))
+            return [Hit(self, self.top_n[i][1], i, self.top_n[i][0])
+                    for i in xrange(start, stop, step)]
+        else:
+            return Hit(self, self.top_n[n][1], n, self.top_n[n][0])
+
+    def __iter__(self):
+        """Yields a :class:`Hit` object for each result in ranked order.
+        """
+
+        for i in xrange(len(self.top_n)):
+            yield Hit(self, self.top_n[i][1], i, self.top_n[i][0])
+
+    def __contains__(self, docnum):
+        """Returns True if the given document number matched the query.
+        """
+
+        if self.docset is None:
+            self._load_docs()
+        return docnum in self.docset
+
+    def items(self):
+        """Returns an iterator of (docnum, score) pairs for the scored
+        documents in the results.
+        """
+
+        return ((docnum, score) for score, docnum in self.top_n)
+
+    def terms(self):
+        if self._terms is  None:
+            self._terms = self.q.existing_terms(self.searcher.reader())
+        return self._terms
+
+    def fields(self, n):
+        """Returns the stored fields for the document at the ``n`` th position
+        in the results. Use :meth:`Results.docnum` if you want the raw
+        document number instead of the stored fields.
+        """
+
+        return self.searcher.stored_fields(self.top_n[n][1])
+
+    def groups(self, name):
+        """If you generating groupings for the results by using the `groups`
+        keyword to the `search()` method, you can use this method to retrieve
+        the groups.
+
+        >>> results = searcher.search(my_query, groups=["tag"])
+        >>> results.groups("tag")
+
+        Returns a dictionary mapping category names to lists of document IDs.
+        """
+
+        return self._groups[name]
+
+    def _load_docs(self):
+        self.docset = set(self.searcher.docs_for_query(self.q))
+
+    def has_exact_length(self):
+        """True if this results object already knows the exact number of
+        matching documents.
+        """
+
+        return self.docset is not None
+
+    def estimated_length(self):
+        """The estimated maximum number of matching documents, or the
+        exact number of matching documents if it's known.
+        """
+
+        if self.docset is not None:
+            return len(self.docset)
+        return self.q.estimate_size(self.searcher.reader())
+
+    def estimated_min_length(self):
+        """The estimated minimum number of matching documents, or the
+        exact number of matching documents if it's known.
+        """
+
+        if self.docset is not None:
+            return len(self.docset)
+        return self.q.estimate_min_size(self.searcher.reader())
+
+    def scored_length(self):
+        """Returns the number of scored documents in the results, equal to or
+        less than the ``limit`` keyword argument to the search.
+
+        >>> r = mysearcher.search(myquery, limit=20)
+        >>> len(r)
+        1246
+        >>> r.scored_length()
+        20
+
+        This may be fewer than the total number of documents that match the
+        query, which is what ``len(Results)`` returns.
+        """
+
+        return len(self.top_n)
+
+    def docs(self):
+        """Returns a set-like object containing the document numbers that
+        matched the query.
+        """
+
+        if self.docset is None:
+            self._load_docs()
+        return self.docset
+
+    def copy(self):
+        """Returns a copy of this results object.
+        """
+
+        return self.__class__(self.searcher, self.q, self.top_n[:],
+                              copy.copy(self.docset), runtime=self.runtime)
+
+    def score(self, n):
+        """Returns the score for the document at the Nth position in the list
+        of ranked documents. If the search was not scored, this may return None.
+        """
+
+        return self.top_n[n][0]
+
+    def docnum(self, n):
+        """Returns the document number of the result at position n in the list
+        of ranked documents.
+        """
+        return self.top_n[n][1]
+
+    def highlights(self, n, fieldname, text=None, top=3, fragmenter=None,
+                   formatter=None, order=highlight.FIRST):
+        """Returns highlighted snippets for the document in the Nth position
+        in the results. It is usually more convenient to call this method on a
+        Hit object instead of the Results.
+
+        See the docs for the :meth:`Hit.highlights` method.
+        """
+
+        if text is None:
+            d = self.fields(n)
+            if fieldname not in d:
+                raise KeyError("Field %r is not in the stored fields.")
+            text = d[fieldname]
+
+        analyzer = self.searcher.schema[fieldname].format.analyzer
+        fragmenter = fragmenter or self.fragmenter
+        formatter = formatter or self.formatter
+
+        terms = set(ttext for fname, ttext in self.terms() if fname == fieldname)
+        return highlight.highlight(text, terms, analyzer, fragmenter,
+                                   formatter, top=top,
+                                   scorer=self.fragment_scorer, order=order)
+
+    def key_terms(self, fieldname, docs=10, numterms=5,
+                  model=classify.Bo1Model, normalize=True):
+        """Returns the 'numterms' most important terms from the top 'numdocs'
+        documents in these results. "Most important" is generally defined as
+        terms that occur frequently in the top hits but relatively infrequently
+        in the collection as a whole.
+
+        :param fieldname: Look at the terms in this field. This field must
+            store vectors.
+        :param docs: Look at this many of the top documents of the results.
+        :param terms: Return this number of important terms.
+        :param model: The classify.ExpansionModel to use. See the classify
+            module.
+        :returns: list of unicode strings.
+        """
+
+        if not len(self):
+            return
+        docs = min(docs, len(self))
+
+        reader = self.searcher.reader()
+
+        expander = classify.Expander(reader, fieldname, model=model)
+        for _, docnum in self.top_n[:docs]:
+            expander.add_document(docnum)
+
+        return expander.expanded_terms(numterms, normalize=normalize)
+
+    def extend(self, results):
+        """Appends hits from 'results' (that are not already in this
+        results object) to the end of these results.
+
+        :param results: another results object.
+        """
+
+        docs = self.docs()
+        for item in results.top_n:
+            if item[1] not in docs:
+                self.top_n.append(item)
+        self.docset = docs | results.docs()
+
+    def filter(self, results):
+        """Removes any hits that are not also in the other results object.
+        """
+
+        if not len(results):
+            return
+
+        otherdocs = results.docs()
+        items = [item for item in self.top_n if item[1] in otherdocs]
+        self.docset = self.docs() & otherdocs
+        self.top_n = items
+
+    def upgrade(self, results, reverse=False):
+        """Re-sorts the results so any hits that are also in 'results' appear
+        before hits not in 'results', otherwise keeping their current relative
+        positions. This does not add the documents in the other results object
+        to this one.
+
+        :param results: another results object.
+        :param reverse: if True, lower the position of hits in the other
+            results object instead of raising them.
+        """
+
+        if not len(results):
+            return
+
+        otherdocs = results.docs()
+        arein = [item for item in self.top_n if item[1] in otherdocs]
+        notin = [item for item in self.top_n if item[1] not in otherdocs]
+
+        if reverse:
+            items = notin + arein
+        else:
+            items = arein + notin
+
+        self.top_n = items
+
+    def upgrade_and_extend(self, results):
+        """Combines the effects of extend() and increase(): hits that are also
+        in 'results' are raised. Then any hits from the other results object
+        that are not in this results object are appended to the end.
+
+        :param results: another results object.
+        """
+
+        if not len(results):
+            return
+
+        docs = self.docs()
+        otherdocs = results.docs()
+
+        arein = [item for item in self.top_n if item[1] in otherdocs]
+        notin = [item for item in self.top_n if item[1] not in otherdocs]
+        other = [item for item in results.top_n if item[1] not in docs]
+
+        self.docset = docs | otherdocs
+        self.top_n = arein + notin + other
+
+
+class Hit(object):
+    """Represents a single search result ("hit") in a Results object.
+
+    This object acts like a dictionary of the matching document's stored
+    fields. If for some reason you need an actual ``dict`` object, use
+    ``Hit.fields()`` to get one.
+
+    >>> r = searcher.search(query.Term("content", "render"))
+    >>> r[0]
+    <Hit {title=u"Rendering the scene"}>
+    >>> r[0].rank
+    0
+    >>> r[0].docnum
+    4592L
+    >>> r[0].score
+    2.52045682
+    >>> r[0]["title"]
+    "Rendering the scene"
+    >>> r[0].keys()
+    ["title"]
+    """
+
+    def __init__(self, results, docnum, pos=None, score=None):
+        """
+        :param results: the Results object this hit belongs to.
+        :param pos: the position in the results list of this hit, for example
+            pos=0 means this is the first (highest scoring) hit.
+        :param docnum: the document number of this hit.
+        :param score: the score of this hit.
+        """
+
+        self.results = results
+        self.searcher = results.searcher
+        self.pos = self.rank = pos
+        self.docnum = docnum
+        self.score = score
+        self._fields = None
+
+    def fields(self):
+        """Returns a dictionary of the stored fields of the document this
+        object represents.
+        """
+
+        if self._fields is None:
+            self._fields = self.searcher.stored_fields(self.docnum)
+        return self._fields
+
+    def highlights(self, fieldname, text=None, top=3, fragmenter=None,
+                   formatter=None, order=highlight.FIRST):
+        """Returns highlighted snippets from the given field::
+
+            r = searcher.search(myquery)
+            for hit in r:
+                print hit["title"]
+                print hit.highlights("content")
+
+        See :doc:`how to highlight terms in search results </highlight>` for
+        more information.
+
+        You can set the ``fragmenter`` and ``formatter`` attributes on the
+        ``Results`` object instead of specifying the ``fragmenter`` and
+        ``formatter`` arguments to this method. For example, to return larger
+        fragments and highlight them by converting to upper-case instead of
+        with HTML tags::
+
+            from whoosh import highlight
+
+            r = searcher.search(myquery)
+            r.fragmenter = highlight.ContextFragmenter(surround=40)
+            r.formatter = highlight.UppercaseFormatter()
+            for hit in r:
+                print hit["title"]
+                print hit.highlights("content")
+
+        :param fieldname: the name of the field you want to highlight.
+        :param text: by default, the method will attempt to load the contents
+            of the field from the stored fields for the document. If the field
+            you want to highlight isn't stored in the index, but you have
+            access to the text another way (for example, loading from a file or
+            a database), you can supply it using the ``text`` parameter.
+        :param top: the maximum number of fragments to return.
+        :param fragmenter: A :class:`whoosh.highlight.Fragmenter` object. This
+            controls how the text is broken in fragments. The default is
+            :class:`whoosh.highlight.ContextFragmenter`. For some applications
+            you may find that a different fragmenting algorithm, such as
+            :class:`whoosh.highlight.SentenceFragmenter` gives better results.
+            For short fields you could use
+            :class:`whoosh.highlight.WholeFragmenter` which returns the entire
+            field as a single fragment.
+        :param formatter: A :class:`whoosh.highlight.Formatter` object. This
+            controls how the search terms are highlighted in the snippets. The
+            default is :class:`whoosh.highlight.HtmlFormatter` with
+            ``tagname='b'``.
+
+            Note that different formatters may return different objects, e.g.
+            plain text, HTML, a Genshi event stream, a SAX event generator,
+            etc.
+        :param order: the order of the fragments. This should be one of
+            :func:`whoosh.highlight.SCORE`, :func:`whoosh.highlight.FIRST`,
+            :func:`whoosh.highlight.LONGER`,
+            :func:`whoosh.highlight.SHORTER`, or a custom sorting function. The
+            default is ``highlight.FIRST``.
+        """
+
+        return self.results.highlights(self.rank, fieldname, text=text,
+                                       top=top, fragmenter=fragmenter,
+                                       formatter=formatter, order=order)
+
+    def more_like_this(self, fieldname, text=None, top=10, numterms=5,
+                       model=classify.Bo1Model, normalize=True):
+        """Returns a new Results object containing documents similar to this
+        hit, based on "key terms" in the given field::
+
+            r = searcher.search(myquery)
+            for hit in r:
+                print hit["title"]
+                print "Top 3 similar documents:"
+                for subhit in hit.more_like_this("content", top=3):
+                  print "  ", subhit["title"]
+
+        :param fieldname: the name of the field to use to test similarity.
+        :param text: by default, the method will attempt to load the contents
+            of the field from the stored fields for the document, or from a
+            term vector. If the field isn't stored or vectored in the index,
+            but you have access to the text another way (for example, loading
+            from a file or a database), you can supply it using the ``text``
+            parameter.
+        :param top: the number of results to return.
+        :param numterms: the number of "key terms" to extract from the hit and
+            search for. Using more terms is slower but gives potentially more
+            and more accurate results.
+        :param model: (expert) a :class:`whoosh.classify.ExpansionModel` to use
+            to compute "key terms".
+        :param normalize: whether to normalize term weights.
+        """
+
+        return self.searcher.more_like(self.docnum, text=text, top=top,
+                                       numterms=numterms, model=model,
+                                       normalize=normalize)
+
+    def __repr__(self):
+        return "<%s %r>" % (self.__class__.__name__, self.fields())
+
+    def __eq__(self, other):
+        if isinstance(other, Hit):
+            return self.fields() == other.fields()
+        elif isinstance(other, dict):
+            return self.fields() == other
+        else:
+            return False
+
+    def __len__(self):
+        return len(self.fields())
+
+    def __iter__(self):
+        return self.fields().iterkeys()
+
+    def __getitem__(self, key):
+        return self.fields().__getitem__(key)
+
+    def __contains__(self, key):
+        return key in self.fields()
+
+    def items(self):
+        return self.fields().items()
+
+    def keys(self):
+        return self.fields().keys()
+
+    def values(self):
+        return self.fields().values()
+
+    def iteritems(self):
+        return self.fields().iteritems()
+
+    def iterkeys(self):
+        return self.fields().iterkeys()
+
+    def itervalues(self):
+        return self.fields().itervalues()
+
+    def get(self, key, default=None):
+        return self.fields().get(key, default)
+
+    def __setitem__(self, key, value):
+        raise NotImplementedError("You cannot modify a search result")
+
+    def __delitem__(self, key, value):
+        raise NotImplementedError("You cannot modify a search result")
+
+    def clear(self):
+        raise NotImplementedError("You cannot modify a search result")
+
+    def update(self, dict=None, **kwargs):
+        raise NotImplementedError("You cannot modify a search result")
+
+
+class ResultsPage(object):
+    """Represents a single page out of a longer list of results, as returned
+    by :func:`whoosh.searching.Searcher.search_page`. Supports a subset of the
+    interface of the :class:`~whoosh.searching.Results` object, namely getting
+    stored fields with __getitem__ (square brackets), iterating, and the
+    ``score()`` and ``docnum()`` methods.
+
+    The ``offset`` attribute contains the results number this page starts at
+    (numbered from 0). For example, if the page length is 10, the ``offset``
+    attribute on the second page will be ``10``.
+
+    The ``pagecount`` attribute contains the number of pages available.
+
+    The ``pagenum`` attribute contains the page number. This may be less than
+    the page you requested if the results had too few pages. For example, if
+    you do::
+
+        ResultsPage(results, 5)
+
+    but the results object only contains 3 pages worth of hits, ``pagenum``
+    will be 3.
+
+    The ``pagelen`` attribute contains the number of results on this page
+    (which may be less than the page length you requested if this is the last
+    page of the results).
+
+    The ``total`` attribute contains the total number of hits in the results.
+
+    >>> mysearcher = myindex.searcher()
+    >>> pagenum = 2
+    >>> page = mysearcher.find_page(pagenum, myquery)
+    >>> print("Page %s of %s, results %s to %s of %s" %
+    ...       (pagenum, page.pagecount, page.offset+1,
+    ...        page.offset+page.pagelen, page.total))
+    >>> for i, fields in enumerate(page):
+    ...   print("%s. %r" % (page.offset + i + 1, fields))
+    >>> mysearcher.close()
+    """
+
+    def __init__(self, results, pagenum, pagelen=10):
+        """
+        :param results: a :class:`~whoosh.searching.Results` object.
+        :param pagenum: which page of the results to use, numbered from ``1``.
+        :param pagelen: the number of hits per page.
+        """
+
+        self.results = results
+        self.total = len(results)
+
+        if pagenum < 1:
+            raise ValueError("pagenum must be >= 1")
+
+        self.pagecount = int(ceil(self.total / pagelen))
+        if pagenum > 1 and pagenum > self.pagecount:
+            raise ValueError("Asked for page %s of %s" % (pagenum, self.pagecount))
+
+        self.pagenum = pagenum
+
+        offset = (pagenum - 1) * pagelen
+        if (offset + pagelen) > self.total:
+            pagelen = self.total - offset
+        self.offset = offset
+        self.pagelen = pagelen
+
+    def __getitem__(self, n):
+        offset = self.offset
+        if isinstance(n, slice):
+            start, stop, step = n.indices(self.pagelen)
+            return self.results.__getitem__(slice(start + offset, stop + offset, step))
+        else:
+            return self.results.__getitem__(n + offset)
+
+    def __iter__(self):
+        return iter(self.results[self.offset:self.offset + self.pagelen])
+
+    def __len__(self):
+        return self.total
+
+    def scored_length(self):
+        return self.results.scored_length()
+
+    def score(self, n):
+        """Returns the score of the hit at the nth position on this page.
+        """
+        return self.results.score(n + self.offset)
+
+    def docnum(self, n):
+        """Returns the document number of the hit at the nth position on this
+        page.
+        """
+        return self.results.docnum(n + self.offset)
+
+    def is_last_page(self):
+        """Returns True if this object represents the last page of results.
+        """
+
+        return self.pagecount == 0 or self.pagenum == self.pagecount
+
+
+
diff --git a/lib/whoosh/whoosh/sorting.py b/lib/whoosh/whoosh/sorting.py
new file mode 100644
index 0000000..a7b8ae4
--- /dev/null
+++ b/lib/whoosh/whoosh/sorting.py
@@ -0,0 +1,257 @@
+# Copyright 2011 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+from array import array
+from heapq import nlargest, nsmallest
+
+from whoosh.searching import Results
+from whoosh.util import now
+
+
+class Sorter(object):
+    """This object does the work of sorting search results.
+
+    For simple sorting (where all fields go in the same direction), you can
+    just use the ``sortedby`` and ``reverse`` arguments to
+    :meth:`whoosh.searching.Searcher.search`::
+
+        # Sort by ascending group
+        r = searcher.search(myquery, sortedby="group")
+        # Sort by ascending path and the ascending price price
+        r = searcher.search(myquery, sortedby=("path", "price"))
+        # Sort by descending path
+        r = searcher.search(myquery, sortedby="path", reverse=True)
+
+    These are the equivalent of using the sorter directly::
+
+        # Sort by ascending path and the ascending price price
+        sorter = searcher.sorter()
+        sorter.add_field("path")
+        sorter.add_field("price")
+        r = sorter.sort_query(myquery)
+
+    For complex sorting (where some fields are ascending and some fields are
+    descending), you must instantiate a sorter object from the searcher and
+    specify the fields to sort by::
+
+        # Sort by ascending group and then descending price
+        sorter = searcher.sorter()
+        sorter.add_field("group")
+        sorter.add_field("price", reverse=True)
+        r = sorter.sort_query(myquery)
+
+    Alternatively, you can set up the sort criteria using a keyword argument::
+
+        # Sort by ascending group and then descending price
+        crits = [("group", False), ("price", True)]
+        sorter = searcher.sorter(criteria=crits)
+        r = sorter.sort_query(myquery)
+
+    Note that complex sorting can be much slower on large indexes than a
+    sort in which all fields are sorted in the same direction. Also, when you
+    do this type of sort on a multi-segment index, the sort cannot reuse field
+    caches and must recreate a field cache-like structure across the entire
+    index, which can effectively double memory usage for cached fields.
+
+    You can re-use a configured sorter with different queries. However, the
+    sorter object always returns results from the searcher it was created with.
+    If the index changes and you refresh the searcher, you need to recreate the
+    sorter object to see the updates.
+    """
+
+    def __init__(self, searcher, criteria=None, sortedby=None):
+        """
+        :param searcher: a :class:`whoosh.searching.Searcher` object to use for
+            searching.
+        :param criteria: a list of ``(fieldname, reversed)`` tuples, where the
+            second value in each tuple is a boolean indicating whether to
+            reverse the order of the sort for that field. Alternatively you can
+            use the :meth:`Sorter.add_field` method on the instantiated sorter.
+        :param sortedby: a convenience that generates a proper "criteria" list
+            from a fieldname string or list of fieldnames, to set up the sorter
+            for a simple search.
+        """
+
+        self.searcher = searcher
+        self.criteria = criteria or []
+        if sortedby:
+            if isinstance(sortedby, basestring):
+                sortedby = [sortedby]
+            for fieldname in sortedby:
+                self.criteria.append((fieldname, False))
+
+        self.arrays = None
+
+    def add_field(self, fieldname, reverse=False):
+        """Adds a field to the sorting criteria. Results are sorted by the
+        fields in the order you add them. For example, if you do::
+
+            sorter.add_field("group")
+            sorter.add_field("price")
+
+        ...the results are sorted by ``group``, and for results with the same
+        value of ``group``, are then sorted by ``price``.
+
+        :param fieldname: the name of the field to sort by.
+        :param reverse: if True, reverses the natural ordering of the field.
+        """
+
+        self.criteria.append((fieldname, reverse))
+
+    def is_simple(self):
+        """Returns ``True`` if this is a "simple" sort (all the fields are
+        sorted in the same direction).
+        """
+
+        if len(self.criteria) < 2:
+            return True
+
+        firstdir = self.criteria[0][1]
+        return all(c[1] == firstdir for c in self.criteria)
+
+    def _results(self, q, docnums, docset, runtime):
+        top_n = [(None, docnum) for docnum in docnums]
+        return Results(self.searcher, q, top_n, docset, runtime=runtime)
+
+    def _simple_sort_query(self, q, limit=None, reverse=False, filter=None):
+        # If the direction of all sort fields is the same, we can use field
+        # caches to do the sorting
+
+        t = now()
+        docset = set()
+        sortedby = [c[0] for c in self.criteria]
+        reverse = self.criteria[0][1] ^ reverse
+        comb = self.searcher._filter_to_comb(filter)
+
+        if self.searcher.subsearchers:
+            heap = []
+
+            # I wish I could actually do a heap thing here, but the Python heap
+            # queue only works with greater-than, and I haven't thought of a
+            # smart way to get around that yet, so I'm being dumb and using
+            # nlargest/nsmallest on the heap + each subreader list :(
+            op = nlargest if reverse else nsmallest
+
+            for s, offset in self.searcher.subsearchers:
+                # This searcher is wrapping a MultiReader, so push the sorting
+                # down to the leaf readers and then combine the results.
+                docnums = [docnum for docnum in q.docs(s)
+                           if (not comb) or docnum + offset in comb]
+
+                # Add the docnums to the docset
+                docset.update(docnums)
+
+                # Ask the reader to return a list of (key, docnum) pairs to
+                # sort by. If limit=None, the returned list is not sorted. If
+                # limit=True, it is sorted.
+                r = s.reader()
+                srt = r.key_docs_by(sortedby, docnums, limit, reverse=reverse,
+                                    offset=offset)
+                if limit:
+                    # Pick the "limit" smallest/largest items from the current
+                    # and new list
+                    heap = op(limit, heap + srt)
+                else:
+                    # If limit=None, we'll just add everything to the "heap"
+                    # and sort it at the end.
+                    heap.extend(srt)
+
+            # Sort the heap and take the docnums
+            docnums = [docnum for _, docnum in sorted(heap, reverse=reverse)]
+
+        else:
+            # This searcher is wrapping an atomic reader, so we don't need to
+            # get tricky combining the results of multiple readers, just ask
+            # the reader to sort the results.
+            r = self.searcher.reader()
+            docnums = [docnum for docnum in q.docs(self.searcher)
+                       if (not comb) or docnum in comb]
+            docnums = r.sort_docs_by(sortedby, docnums, reverse=reverse)
+            docset = set(docnums)
+
+            # I artificially enforce the limit here, even thought the current
+            # implementation can't use it, so that the results don't change
+            # based on single- vs- multi-segment.
+            docnums = docnums[:limit]
+
+        runtime = now() - t
+        return self._results(q, docnums, docset, runtime)
+
+    def _complex_cache(self):
+        self.arrays = []
+        r = self.searcher.reader()
+        for name, reverse in self.criteria:
+            arry = array("i", [0] * r.doc_count_all())
+            field = self.searcher.schema[name]
+            for i, (t, _) in enumerate(field.sortable_values(r, name)):
+                if reverse:
+                    i = 0 - i
+                postings = r.postings(name, t)
+                for docid in postings.all_ids():
+                    arry[docid] = i
+            self.arrays.append(arry)
+
+    def _complex_key_fn(self, docnum):
+        return tuple(arry[docnum] for arry in self.arrays)
+
+    def _complex_sort_query(self, q, limit=None, reverse=False, filter=None):
+        t = now()
+        if self.arrays is None:
+            self._complex_cache()
+        comb = self.searcher._filter_to_comb(filter)
+        docnums = [docnum for docnum in self.searcher.docs_for_query(q)
+                   if (not comb) or docnum in comb]
+        docnums.sort(key=self._complex_key_fn, reverse=reverse)
+        docset = set(docnums)
+
+        # I artificially enforce the limit here, even thought the current
+        # implementation can't use it, so that the results don't change based
+        # on single- vs- multi-segment.
+        if limit:
+            docnums = docnums[:limit]
+        runtime = now() - t
+        return self._results(q, docnums, docset, runtime)
+
+    def sort_query(self, q, limit=None, reverse=False, filter=None):
+        """Returns a :class:`whoosh.searching.Results` object for the given
+        query, sorted according to the fields set up using the
+        :meth:`Sorter.add_field` method.
+
+        The parameters have the same meaning as for the
+        :meth:`whoosh.searching.Searcher.search` method.
+        """
+
+        if self.is_simple():
+            meth = self._simple_sort_query
+        else:
+            meth = self._complex_sort_query
+
+        return meth(q, limit, reverse, filter)
+
+
+
+
diff --git a/lib/whoosh/whoosh/spans.py b/lib/whoosh/whoosh/spans.py
new file mode 100644
index 0000000..ae08239
--- /dev/null
+++ b/lib/whoosh/whoosh/spans.py
@@ -0,0 +1,668 @@
+# Copyright 2010 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+"""
+This module contains Query objects that deal with "spans".
+
+Span queries allow for positional constraints on matching documents. For
+example, the :class:`whoosh.spans.SpanNear` query matches documents where one
+term occurs near another. Because you can nest span queries, and wrap them
+around almost any non-span query, you can create very complex constraints.
+
+For example, to find documents containing "whoosh" at most 5 positions before
+"library" in the "text" field::
+
+    from whoosh import query, spans
+    t1 = query.Term("text", "whoosh")
+    t2 = query.Term("text", "library")
+    q = spans.SpanNear(t1, t2, slop=5)
+
+"""
+
+from whoosh.matching import (WrappingMatcher, AndMaybeMatcher, UnionMatcher,
+                             IntersectionMatcher, NullMatcher)
+from whoosh.query import Query, And, AndMaybe, Or, Term
+from whoosh.util import make_binary_tree
+
+
+# Span class
+
+class Span(object):
+    __slots__ = ("start", "end", "startchar", "endchar")
+
+    def __init__(self, start, end=None, startchar=None, endchar=None):
+        if end is None:
+            end = start
+        assert start <= end
+        self.start = start
+        self.end = end
+        self.startchar = startchar
+        self.endchar = endchar
+
+    def __repr__(self):
+        if self.startchar or self.endchar:
+            return "<%d-%d %d:%d>" % (self.start, self.end, self.startchar, self.endchar)
+        else:
+            return "<%d-%d>" % (self.start, self.end)
+
+    def __eq__(self, span):
+        return (self.start == span.start
+                and self.end == span.end
+                and self.startchar == span.startchar
+                and self.endchar == span.endchar)
+
+    def __ne__(self, span):
+        return self.start != span.start or self.end != span.end
+
+    def __lt__(self, span):
+        return self.start < span.start
+
+    def __gt__(self, span):
+        return self.start > span.start
+
+    def __hash__(self):
+        return hash((self.start, self.end))
+
+    @classmethod
+    def merge(cls, spans):
+        """Merges overlapping and touches spans in the given list of spans.
+
+        Note that this modifies the original list.
+
+        >>> spans = [Span(1,2), Span(3)]
+        >>> Span.merge(spans)
+        >>> spans
+        [<1-3>]
+        """
+
+        i = 0
+        while i < len(spans) - 1:
+            here = spans[i]
+            j = i + 1
+            while j < len(spans):
+                there = spans[j]
+                if there.start > here.end + 1:
+                    break
+                if here.touches(there) or here.overlaps(there):
+                    here = here.to(there)
+                    spans[i] = here
+                    del spans[j]
+                else:
+                    j += 1
+            i += 1
+        return spans
+
+    def to(self, span):
+        return self.__class__(min(self.start, span.start), max(self.end, span.end),
+                              min(self.startchar, span.startchar), max(self.endchar, span.endchar))
+
+    def overlaps(self, span):
+        return ((self.start >= span.start and self.start <= span.end)
+                or (self.end >= span.start and self.end <= span.end)
+                or (span.start >= self.start and span.start <= self.end)
+                or (span.end >= self.start and span.end <= self.end))
+
+    def surrounds(self, span):
+        return self.start < span.start and self.end > span.end
+
+    def is_within(self, span):
+        return self.start >= span.start and self.end <= span.end
+
+    def is_before(self, span):
+        return self.end < span.start
+
+    def is_after(self, span):
+        return self.start > span.end
+
+    def touches(self, span):
+        return self.start == span.end + 1 or self.end == span.start - 1
+
+    def distance_to(self, span):
+        if self.overlaps(span):
+            return 0
+        elif self.is_before(span):
+            return span.start - self.end
+        elif self.is_after(span):
+            return self.start - span.end
+
+
+# Base matchers
+
+class SpanWrappingMatcher(WrappingMatcher):
+    """An abstract matcher class that wraps a "regular" matcher. This matcher
+    uses the sub-matcher's matching logic, but only matches documents that have
+    matching spans, i.e. where ``_get_spans()`` returns a non-empty list.
+
+    Subclasses must implement the ``_get_spans()`` method, which returns a list
+    of valid spans for the current document.
+    """
+
+    def __init__(self, child):
+        super(SpanWrappingMatcher, self).__init__(child)
+        self._spans = None
+        if self.is_active():
+            self._find_next()
+
+    def copy(self):
+        m = self.__class__(self.child.copy())
+        m._spans = self._spans
+        return m
+
+    def _replacement(self, newchild):
+        return self.__class__(newchild)
+
+    def _find_next(self):
+        if not self.is_active():
+            return
+
+        child = self.child
+        r = False
+
+        spans = self._get_spans()
+        while child.is_active() and not spans:
+            r = child.next() or r
+            if not child.is_active():
+                return True
+            spans = self._get_spans()
+        self._spans = spans
+
+        return r
+
+    def spans(self):
+        return self._spans
+
+    def next(self):
+        self.child.next()
+        self._find_next()
+
+    def skip_to(self, id):
+        self.child.skip_to(id)
+        self._find_next()
+
+    def all_ids(self):
+        while self.is_active():
+            if self.spans():
+                yield self.id()
+            self.next()
+
+
+class SpanBiMatcher(SpanWrappingMatcher):
+    def copy(self):
+        return self.__class__(self.a.copy(), self.b.copy())
+
+    def replace(self):
+        if not self.is_active():
+            return NullMatcher()
+        return self
+
+
+# Queries
+
+class SpanQuery(Query):
+    """Abstract base class for span-based queries. Each span query type wraps
+    a "regular" query that implements the basic document-matching functionality
+    (for example, SpanNear wraps an And query, because SpanNear requires that
+    the two sub-queries occur in the same documents. The wrapped query is
+    stored in the ``q`` attribute.
+
+    Subclasses usually only need to implement the initializer to set the
+    wrapped query, and ``matcher()`` to return a span-aware matcher object.
+    """
+
+    def _subm(self, s):
+        return self.q.matcher(s)
+
+    def __getattr__(self, name):
+        return getattr(self.q, name)
+
+    def __repr__(self):
+        return "%s(%r)" % (self.__class__.__name__, self.q)
+
+    def __eq__(self, other):
+        return (other and self.__class__ is other.__class__
+                and self.q == other.q)
+
+    def __hash__(self):
+        return hash(self.__class__.__name__) ^ hash(self.q)
+
+
+class SpanFirst(SpanQuery):
+    """Matches spans that end within the first N positions. This lets you
+    for example only match terms near the beginning of the document.
+    """
+
+    def __init__(self, q, limit=0):
+        """
+        :param q: the query to match.
+        :param limit: the query must match within this position at the start
+            of a document. The default is ``0``, which means the query must
+            match at the first position.
+        """
+
+        self.q = q
+        self.limit = limit
+
+    def __eq__(self, other):
+        return (other and self.__class__ is other.__class__
+                and self.q == other.q and self.limit == other.limit)
+
+    def __hash__(self):
+        return hash(self.q) ^ hash(self.limit)
+
+    def is_leaf(self):
+        return False
+
+    def apply(self, fn):
+        return self.__class__(fn(self.q), limit=self.limit)
+
+    def matcher(self, searcher):
+        return SpanFirst.SpanFirstMatcher(self._subm(searcher),
+                                          limit=self.limit)
+
+    class SpanFirstMatcher(SpanWrappingMatcher):
+        def __init__(self, child, limit=0):
+            self.limit = limit
+            super(SpanFirst.SpanFirstMatcher, self).__init__(child)
+
+        def copy(self):
+            return self.__class__(self.child.copy(), limit=self.limit)
+
+        def _replacement(self, newchild):
+            return self.__class__(newchild, limit=self.limit)
+
+        def _get_spans(self):
+            return [span for span in self.child.spans()
+                    if span.end <= self.limit]
+
+
+class SpanNear(SpanQuery):
+    """Matches queries that occur near each other. By default, only matches
+    queries that occur right next to each other (slop=1) and in order
+    (ordered=True).
+
+    For example, to find documents where "whoosh" occurs next to "library"
+    in the "text" field::
+
+        from whoosh import query, spans
+        t1 = query.Term("text", "whoosh")
+        t2 = query.Term("text", "library")
+        q = spans.SpanNear(t1, t2)
+
+    To find documents where "whoosh" occurs at most 5 positions before
+    "library"::
+
+        q = spans.SpanNear(t1, t2, slop=5)
+
+    To find documents where "whoosh" occurs at most 5 positions before or after
+    "library"::
+
+        q = spans.SpanNear(t1, t2, slop=5, ordered=False)
+
+    You can use the ``phrase()`` class method to create a tree of SpanNear
+    queries to match a list of terms::
+
+        q = spans.SpanNear.phrase("text", [u"whoosh", u"search", u"library"], slop=2)
+    """
+
+    def __init__(self, a, b, slop=1, ordered=True, mindist=1):
+        """
+        :param a: the first query to match.
+        :param b: the second query that must occur within "slop" positions of
+            the first query.
+        :param slop: the number of positions within which the queries must
+            occur. Default is 1, meaning the queries must occur right next
+            to each other.
+        :param ordered: whether a must occur before b. Default is True.
+        :pram mindist: the minimum distance allowed between the queries.
+        """
+
+        self.q = And([a, b])
+        self.a = a
+        self.b = b
+        self.slop = slop
+        self.ordered = ordered
+        self.mindist = mindist
+
+    def __repr__(self):
+        return "%s(%r, slop=%d, ordered=%s, mindist=%d)" % (self.__class__.__name__,
+                                                            self.q, self.slop,
+                                                            self.ordered,
+                                                            self.mindist)
+
+    def __eq__(self, other):
+        return (other and self.__class__ == other.__class__
+                and self.q == other.q and self.slop == other.slop
+                and self.ordered == other.ordered
+                and self.mindist == other.mindist)
+
+    def __hash__(self):
+        return (hash(self.a) ^ hash(self.b) ^ hash(self.slop)
+                ^ hash(self.ordered) ^ hash(self.mindist))
+
+    def is_leaf(self):
+        return False
+
+    def apply(self, fn):
+        return self.__class__(fn(self.a), fn(self.b), slop=self.slop,
+                              ordered=self.ordered, mindist=self.mindist)
+
+    def matcher(self, searcher):
+        ma = self.a.matcher(searcher)
+        mb = self.b.matcher(searcher)
+        return SpanNear.SpanNearMatcher(ma, mb, slop=self.slop,
+                                        ordered=self.ordered,
+                                        mindist=self.mindist)
+
+    @classmethod
+    def phrase(cls, fieldname, words, slop=1, ordered=True):
+        """Returns a tree of SpanNear queries to match a list of terms.
+
+        This class method is a convenience for constructing a phrase query
+        using a binary tree of SpanNear queries.
+
+        >>> SpanNear.phrase("f", [u"a", u"b", u"c", u"d"])
+        SpanNear(SpanNear(Term("f", u"a"), Term("f", u"b")), SpanNear(Term("f", u"c"), Term("f", u"d")))
+
+        :param fieldname: the name of the field to search in.
+        :param words: a sequence of token texts to search for.
+        :param slop: the number of positions within which the terms must
+            occur. Default is 1, meaning the terms must occur right next
+            to each other.
+        :param ordered: whether the terms must occur in order. Default is True.
+        """
+
+        terms = [Term(fieldname, word) for word in words]
+        return make_binary_tree(cls, terms, slop=slop, ordered=ordered)
+
+    class SpanNearMatcher(SpanWrappingMatcher):
+        def __init__(self, a, b, slop=1, ordered=True, mindist=1):
+            self.a = a
+            self.b = b
+            self.slop = slop
+            self.ordered = ordered
+            self.mindist = mindist
+            isect = IntersectionMatcher(a, b)
+            super(SpanNear.SpanNearMatcher, self).__init__(isect)
+
+        def copy(self):
+            return self.__class__(self.a.copy(), self.b.copy(), slop=self.slop,
+                                  ordered=self.ordered, mindist=self.mindist)
+
+        def replace(self):
+            if not self.is_active():
+                return NullMatcher()
+            return self
+
+        def _get_spans(self):
+            slop = self.slop
+            mindist = self.mindist
+            ordered = self.ordered
+            spans = set()
+
+            bspans = self.b.spans()
+            for aspan in self.a.spans():
+                for bspan in bspans:
+                    if (bspan.end < aspan.start - slop
+                        or (ordered and aspan.start > bspan.start)):
+                        # B is too far in front of A, or B is in front of A
+                        # *at all* when ordered is True
+                        continue
+                    if bspan.start > aspan.end + slop:
+                        # B is too far from A. Since spans are listed in
+                        # start position order, we know that all spans after
+                        # this one will also be too far.
+                        break
+
+                    # Check the distance between the spans
+                    dist = aspan.distance_to(bspan)
+                    if dist >= mindist and dist <= slop:
+                        spans.add(aspan.to(bspan))
+
+            return sorted(spans)
+
+
+class SpanOr(SpanQuery):
+    """Matches documents that match any of a list of sub-queries. Unlike
+    query.Or, this class merges together matching spans from the different
+    sub-queries when they overlap.
+    """
+
+    def __init__(self, subqs):
+        """
+        :param subqs: a list of queries to match.
+        """
+
+        self.q = Or(subqs)
+        self.subqs = subqs
+
+    def is_leaf(self):
+        return False
+
+    def apply(self, fn):
+        return self.__class__([fn(sq) for sq in self.subqs])
+
+    def matcher(self, searcher):
+        matchers = [q.matcher(searcher) for q in self.subqs]
+        return make_binary_tree(SpanOr.SpanOrMatcher, matchers)
+
+    class SpanOrMatcher(SpanBiMatcher):
+        def __init__(self, a, b):
+            self.a = a
+            self.b = b
+            super(SpanOr.SpanOrMatcher, self).__init__(UnionMatcher(a, b))
+
+        def _get_spans(self):
+            if self.a.is_active() and self.b.is_active() and self.a.id() == self.b.id():
+                spans = sorted(set(self.a.spans()) | set(self.b.spans()))
+            elif not self.b.is_active() or self.a.id() < self.b.id():
+                spans = self.a.spans()
+            else:
+                spans = self.b.spans()
+
+            Span.merge(spans)
+            return spans
+
+
+class SpanBiQuery(SpanQuery):
+    # Intermediate base class for methods common to "a/b" span query types
+
+    def is_leaf(self):
+        return False
+
+    def apply(self, fn):
+        return self.__class__(fn(self.a), fn(self.b))
+
+    def matcher(self, searcher):
+        ma = self.a.matcher(searcher)
+        mb = self.b.matcher(searcher)
+        return self._Matcher(ma, mb)
+
+
+class SpanNot(SpanBiQuery):
+    """Matches spans from the first query only if they don't overlap with
+    spans from the second query. If there are no non-overlapping spans, the
+    document does not match.
+
+    For example, to match documents that contain "bear" at most 2 places after
+    "apple" in the "text" field but don't have "cute" between them::
+
+        from whoosh import query, spans
+        t1 = query.Term("text", "apple")
+        t2 = query.Term("text", "bear")
+        near = spans.SpanNear(t1, t2, slop=2)
+        q = spans.SpanNot(near, query.Term("text", "cute"))
+    """
+
+    def __init__(self, a, b):
+        """
+        :param a: the query to match.
+        :param b: do not match any spans that overlap with spans from this
+            query.
+        """
+
+        self.q = AndMaybe(a, b)
+        self.a = a
+        self.b = b
+
+    class _Matcher(SpanBiMatcher):
+        def __init__(self, a, b):
+            self.a = a
+            self.b = b
+            super(SpanNot._Matcher, self).__init__(AndMaybeMatcher(a, b))
+
+        def _get_spans(self):
+            if self.a.id() == self.b.id():
+                spans = []
+                bspans = self.b.spans()
+                for aspan in self.a.spans():
+                    overlapped = False
+                    for bspan in bspans:
+                        if aspan.overlaps(bspan):
+                            overlapped = True
+                            break
+                    if not overlapped:
+                        spans.append(aspan)
+                return spans
+            else:
+                return self.a.spans()
+
+
+class SpanContains(SpanBiQuery):
+    """Matches documents where the spans of the first query contain any spans
+    of the second query.
+
+    For example, to match documents where "apple" occurs at most 10 places
+    before "bear" in the "text" field and "cute" is between them::
+
+        from whoosh import query, spans
+        t1 = query.Term("text", "apple")
+        t2 = query.Term("text", "bear")
+        near = spans.SpanNear(t1, t2, slop=10)
+        q = spans.SpanContains(near, query.Term("text", "cute"))
+    """
+
+    def __init__(self, a, b):
+        """
+        :param a: the query to match.
+        :param b: the query whose spans must occur within the matching spans
+            of the first query.
+        """
+
+        self.q = And([a, b])
+        self.a = a
+        self.b = b
+
+    class _Matcher(SpanBiMatcher):
+        def __init__(self, a, b):
+            self.a = a
+            self.b = b
+            isect = IntersectionMatcher(a, b)
+            super(SpanContains._Matcher, self).__init__(isect)
+
+        def _get_spans(self):
+            spans = []
+            bspans = self.b.spans()
+            for aspan in self.a.spans():
+                for bspan in bspans:
+                    if aspan.start > bspan.end:
+                        continue
+                    if aspan.end < bspan.start:
+                        break
+
+                    if bspan.is_within(aspan):
+                        spans.append(aspan)
+                        break
+            return spans
+
+
+class SpanBefore(SpanBiQuery):
+    """Matches documents where the spans of the first query occur before any
+    spans of the second query.
+
+    For example, to match documents where "apple" occurs anywhere before
+    "bear"::
+
+        from whoosh import query, spans
+        t1 = query.Term("text", "apple")
+        t2 = query.Term("text", "bear")
+        q = spans.SpanBefore(t1, t2)
+    """
+
+    def __init__(self, a, b):
+        """
+        :param a: the query that must occur before the second.
+        :param b: the query that must occur after the first.
+        """
+
+        self.a = a
+        self.b = b
+        self.q = And([a, b])
+
+    class _Matcher(SpanBiMatcher):
+        def __init__(self, a, b):
+            self.a = a
+            self.b = b
+            isect = IntersectionMatcher(a, b)
+            super(SpanBefore._Matcher, self).__init__(isect)
+
+        def _get_spans(self):
+            bminstart = min(bspan.start for bspan in self.b.spans())
+            return [aspan for aspan in self.a.spans() if aspan.end < bminstart]
+
+
+class SpanCondition(SpanBiQuery):
+    """Matches documents that satisfy both subqueries, but only uses the spans
+    from the first subquery.
+
+    This is useful when you want to place conditions on matches but not have
+    those conditions affect the spans returned.
+
+    For example, to get spans for the term ``alfa`` in documents that also
+    must contain the term ``bravo``::
+
+        SpanCondition(Term("text", u"alfa"), Term("text", u"bravo"))
+
+    """
+
+    def __init__(self, a, b):
+        self.a = a
+        self.b = b
+        self.q = And([a, b])
+
+    class _Matcher(SpanBiMatcher):
+        def __init__(self, a, b):
+            self.a = a
+            isect = IntersectionMatcher(a, b)
+            super(SpanCondition._Matcher, self).__init__(isect)
+
+        def _get_spans(self):
+            return self.a.spans()
+
+
+
+
+
diff --git a/lib/whoosh/whoosh/spelling.py b/lib/whoosh/whoosh/spelling.py
new file mode 100644
index 0000000..be7042c
--- /dev/null
+++ b/lib/whoosh/whoosh/spelling.py
@@ -0,0 +1,246 @@
+# Copyright 2007 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+"""This module contains functions/classes using a Whoosh index as a backend for
+a spell-checking engine.
+"""
+
+from collections import defaultdict
+
+from whoosh import analysis, fields, query, scoring
+from whoosh.support.levenshtein import relative, distance
+
+
+class SpellChecker(object):
+    """Implements a spell-checking engine using a search index for the backend
+    storage and lookup. This class is based on the Lucene contributed spell-
+    checker code.
+
+    To use this object::
+
+        st = store.FileStorage("spelldict")
+        sp = SpellChecker(st)
+
+        sp.add_words([u"aardvark", u"manticore", u"zebra", ...])
+        # or
+        ix = index.open_dir("index")
+        sp.add_field(ix, "content")
+
+        suggestions = sp.suggest(u"ardvark", number = 2)
+    """
+
+    def __init__(self, storage, indexname="SPELL",
+                 booststart=2.0, boostend=1.0,
+                 mingram=3, maxgram=4,
+                 minscore=0.5):
+        """
+        :param storage: The storage object in which to create the
+            spell-checker's dictionary index.
+        :param indexname: The name to use for the spell-checker's dictionary
+            index. You only need to change this if you have multiple spelling
+            indexes in the same storage.
+        :param booststart: How much to boost matches of the first N-gram (the
+            beginning of the word).
+        :param boostend: How much to boost matches of the last N-gram (the end
+            of the word).
+        :param mingram: The minimum gram length to store.
+        :param maxgram: The maximum gram length to store.
+        :param minscore: The minimum score matches much achieve to be returned.
+        """
+
+        self.storage = storage
+        self.indexname = indexname
+
+        self._index = None
+
+        self.booststart = booststart
+        self.boostend = boostend
+        self.mingram = mingram
+        self.maxgram = maxgram
+        self.minscore = minscore
+
+    def index(self, create=False):
+        """Returns the backend index of this object (instantiating it if it
+        didn't already exist).
+        """
+
+        import index
+        if create or not self._index:
+            create = create or not index.exists(self.storage, indexname=self.indexname)
+            if create:
+                self._index = self.storage.create_index(self._schema(), self.indexname)
+            else:
+                self._index = self.storage.open_index(self.indexname)
+        return self._index
+
+    def _schema(self):
+        # Creates a schema given this object's mingram and maxgram attributes.
+
+        from fields import Schema, FieldType, Frequency, ID, STORED
+        from analysis import SimpleAnalyzer
+
+        idtype = ID()
+        freqtype = FieldType(format=Frequency(SimpleAnalyzer()))
+
+        fls = [("word", STORED), ("score", STORED)]
+        for size in xrange(self.mingram, self.maxgram + 1):
+            fls.extend([("start%s" % size, idtype),
+                        ("end%s" % size, idtype),
+                        ("gram%s" % size, freqtype)])
+
+        return Schema(**dict(fls))
+
+    def suggestions_and_scores(self, text, weighting=None):
+        """Returns a list of possible alternative spellings of 'text', as
+        ('word', score, weight) triples, where 'word' is the suggested
+        word, 'score' is the score that was assigned to the word using
+        :meth:`SpellChecker.add_field` or :meth:`SpellChecker.add_scored_words`,
+        and 'weight' is the score the word received in the search for the
+        original word's ngrams.
+
+        You must add words to the dictionary (using add_field, add_words,
+        and/or add_scored_words) before you can use this.
+
+        This is a lower-level method, in case an expert user needs access to
+        the raw scores, for example to implement a custom suggestion ranking
+        algorithm. Most people will want to call :meth:`~SpellChecker.suggest`
+        instead, which simply returns the top N valued words.
+
+        :param text: The word to check.
+        :rtype: list
+        """
+
+        if weighting is None:
+            weighting = scoring.TF_IDF()
+
+        grams = defaultdict(list)
+        for size in xrange(self.mingram, self.maxgram + 1):
+            key = "gram%s" % size
+            nga = analysis.NgramAnalyzer(size)
+            for t in nga(text):
+                grams[key].append(t.text)
+
+        queries = []
+        for size in xrange(self.mingram, min(self.maxgram + 1, len(text))):
+            key = "gram%s" % size
+            gramlist = grams[key]
+            queries.append(query.Term("start%s" % size, gramlist[0],
+                                      boost=self.booststart))
+            queries.append(query.Term("end%s" % size, gramlist[-1],
+                                      boost=self.boostend))
+            for gram in gramlist:
+                queries.append(query.Term(key, gram))
+
+        q = query.Or(queries)
+        ix = self.index()
+        s = ix.searcher(weighting=weighting)
+        try:
+            result = s.search(q, limit=None)
+            return [(fs["word"], fs["score"], result.score(i))
+                    for i, fs in enumerate(result)
+                    if fs["word"] != text]
+        finally:
+            s.close()
+
+    def suggest(self, text, number=3, usescores=False):
+        """Returns a list of suggested alternative spellings of 'text'. You
+        must add words to the dictionary (using add_field, add_words, and/or
+        add_scored_words) before you can use this.
+
+        :param text: The word to check.
+        :param number: The maximum number of suggestions to return.
+        :param usescores: Use the per-word score to influence the suggestions.
+        :rtype: list
+        """
+
+        if usescores:
+            def keyfn(a):
+                return 0 - (1 / distance(text, a[0])) * a[1]
+        else:
+            def keyfn(a):
+                return distance(text, a[0])
+
+        suggestions = self.suggestions_and_scores(text)
+        suggestions.sort(key=keyfn)
+        return [word for word, _, weight in suggestions[:number]
+                if weight >= self.minscore]
+
+    def add_field(self, ix, fieldname):
+        """Adds the terms in a field from another index to the backend
+        dictionary. This method calls add_scored_words() and uses each term's
+        frequency as the score. As a result, more common words will be
+        suggested before rare words. If you want to calculate the scores
+        differently, use add_scored_words() directly.
+
+        :param ix: The index.Index object from which to add terms.
+        :param fieldname: The field name (or number) of a field in the source
+            index. All the indexed terms from this field will be added to the
+            dictionary.
+        """
+
+        r = ix.reader()
+        try:
+            self.add_scored_words((w, freq)
+                                  for w, _, freq in r.iter_field(fieldname))
+        finally:
+            r.close()
+
+    def add_words(self, ws, score=1):
+        """Adds a list of words to the backend dictionary.
+
+        :param ws: A sequence of words (strings) to add to the dictionary.
+        :param score: An optional score to use for ALL the words in 'ws'.
+        """
+        self.add_scored_words((w, score) for w in ws)
+
+    def add_scored_words(self, ws):
+        """Adds a list of ("word", score) tuples to the backend dictionary.
+        Associating words with a score lets you use the 'usescores' keyword
+        argument of the suggest() method to order the suggestions using the
+        scores.
+
+        :param ws: A sequence of ("word", score) tuples.
+        """
+
+        writer = self.index().writer()
+        for text, score in ws:
+            fields = {"word": text, "score": score}
+            for size in xrange(self.mingram, self.maxgram + 1):
+                nga = analysis.NgramAnalyzer(size)
+                gramlist = [t.text for t in nga(text)]
+                if len(gramlist) > 0:
+                    fields["start%s" % size] = gramlist[0]
+                    fields["end%s" % size] = gramlist[-1]
+                    fields["gram%s" % size] = " ".join(gramlist)
+            writer.add_document(**fields)
+        writer.commit()
+
+
+
+
+
+
diff --git a/lib/whoosh/whoosh/store.py b/lib/whoosh/whoosh/store.py
new file mode 100644
index 0000000..8ad2379
--- /dev/null
+++ b/lib/whoosh/whoosh/store.py
@@ -0,0 +1,86 @@
+# Copyright 2007 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+
+class LockError(Exception):
+    pass
+
+
+class Storage(object):
+    """Abstract base class for storage objects.
+    """
+
+    readonly = False
+
+    def __iter__(self):
+        return iter(self.list())
+
+    def create_index(self, schema, indexname=None):
+        raise NotImplementedError
+
+    def open_index(self, indexname=None, schema=None):
+        raise NotImplementedError
+
+    def create_file(self, name):
+        raise NotImplementedError
+
+    def open_file(self, name, *args, **kwargs):
+        raise NotImplementedError
+
+    def list(self):
+        raise NotImplementedError
+
+    def file_exists(self, name):
+        raise NotImplementedError
+
+    def file_modified(self, name):
+        raise NotImplementedError
+
+    def file_length(self, name):
+        raise NotImplementedError
+
+    def delete_file(self, name):
+        raise NotImplementedError
+
+    def rename_file(self, frm, to, safe=False):
+        raise NotImplementedError
+
+    def lock(self, name):
+        raise NotImplementedError
+
+    def close(self):
+        pass
+
+    def optimize(self):
+        pass
+
+
+
+
+
+
+
diff --git a/lib/whoosh/whoosh/support/__init__.py b/lib/whoosh/whoosh/support/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/whoosh/whoosh/support/__init__.py
diff --git a/lib/whoosh/whoosh/support/bench.py b/lib/whoosh/whoosh/support/bench.py
new file mode 100644
index 0000000..2c50146
--- /dev/null
+++ b/lib/whoosh/whoosh/support/bench.py
@@ -0,0 +1,612 @@
+# Copyright 2010 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+from __future__ import division
+import os.path
+from optparse import OptionParser
+from shutil import rmtree
+
+from whoosh import index, qparser, query
+from whoosh.util import now, find_object
+
+try:
+    import xappy
+except ImportError:
+    pass
+try:
+    import xapian
+except ImportError:
+    pass
+try:
+    import pysolr
+except ImportError:
+    pass
+
+try:
+    from persistent import Persistent
+
+    class ZDoc(Persistent):
+        def __init__(self, d):
+            self.__dict__.update(d)
+except ImportError:
+    pass
+
+
+class Module(object):
+    def __init__(self, bench, options, args):
+        self.bench = bench
+        self.options = options
+        self.args = args
+
+    def __repr__(self):
+        return self.__class__.__name__
+
+    def indexer(self, **kwargs):
+        pass
+
+    def index_document(self, d):
+        raise NotImplementedError
+
+    def finish(self, **kwargs):
+        pass
+
+    def _process_result(self, d):
+        attrname = "process_result_%s" % self.options.lib
+        if hasattr(self.bench.spec, attrname):
+            method = getattr(self.bench.spec, attrname)
+            self._process_result = method
+            return method(d)
+        else:
+            self._process_result = lambda x: x
+            return d
+
+    def searcher(self):
+        pass
+
+    def query(self):
+        raise NotImplementedError
+
+    def find(self, q):
+        raise NotImplementedError
+
+    def findterms(self, terms):
+        raise NotImplementedError
+
+    def results(self, r):
+        for hit in r:
+            yield self._process_result(hit)
+
+
+class Spec(object):
+    headline_field = "title"
+    main_field = "body"
+
+    def __init__(self, options, args):
+        self.options = options
+        self.args = args
+
+    def documents(self):
+        raise NotImplementedError
+
+    def setup(self):
+        pass
+
+    def print_results(self, ls):
+        showbody = self.options.showbody
+        snippets = self.options.snippets
+        limit = self.options.limit
+        for i, hit in enumerate(ls):
+            if i >= limit:
+                break
+
+            print "%d. %s" % (i + 1, hit.get(self.headline_field))
+            if snippets:
+                print self.show_snippet(hit)
+            if showbody:
+                print hit.get(self.main_field)
+
+
+class WhooshModule(Module):
+    def indexer(self, create=True):
+        schema = self.bench.spec.whoosh_schema()
+        path = os.path.join(self.options.dir, "%s_whoosh" % self.options.indexname)
+
+        if not os.path.exists(path):
+            os.mkdir(path)
+        if create:
+            ix = index.create_in(path, schema)
+        else:
+            ix = index.open_dir(path)
+
+        poolclass = None
+        if self.options.pool:
+            poolclass = find_object(self.options.pool)
+
+        kwargs = dict(limitmb=int(self.options.limitmb), poolclass=poolclass,
+                      dir=self.options.tempdir, procs=int(self.options.procs),
+                      batchsize=int(self.options.batch))
+
+        if self.options.expw:
+            from whoosh.filedb.multiproc import MultiSegmentWriter
+            self.writer = MultiSegmentWriter(ix, **kwargs)
+        else:
+            self.writer = ix.writer(**kwargs)
+
+        self._procdoc = None
+        if hasattr(self.bench.spec, "process_document_whoosh"):
+            self._procdoc = self.bench.spec.process_document_whoosh
+
+    def index_document(self, d):
+        _procdoc = self._procdoc
+        if _procdoc:
+            _procdoc(d)
+        self.writer.add_document(**d)
+
+    def finish(self, merge=True, optimize=False):
+        self.writer.commit(merge=merge, optimize=optimize)
+
+    def searcher(self):
+        path = os.path.join(self.options.dir, "%s_whoosh" % self.options.indexname)
+        ix = index.open_dir(path)
+        self.srch = ix.searcher()
+        self.parser = qparser.QueryParser(self.bench.spec.main_field, schema=ix.schema)
+
+    def query(self):
+        qstring = " ".join(self.args).decode("utf8")
+        return self.parser.parse(qstring)
+
+    def find(self, q):
+        return self.srch.search(q, limit=int(self.options.limit))
+
+    def findterms(self, terms):
+        limit = int(self.options.limit)
+        s = self.srch
+        q = query.Term(self.bench.spec.main_field, None)
+        for term in terms:
+            q.text = term
+            yield s.search(q, limit=limit)
+
+
+class XappyModule(Module):
+    def indexer(self, **kwargs):
+        path = os.path.join(self.options.dir, "%s_xappy" % self.options.indexname)
+        conn = self.bench.spec.xappy_connection(path)
+        return conn
+
+    def index_document(self, conn, d):
+        if hasattr(self.bench, "process_document_xappy"):
+            self.bench.process_document_xappy(d)
+        doc = xappy.UnprocessedDocument()
+        for key, values in d:
+            if not isinstance(values, list):
+                values = [values]
+            for value in values:
+                doc.fields.append(xappy.Field(key, value))
+        conn.add(doc)
+
+    def finish(self, conn):
+        conn.flush()
+
+    def searcher(self):
+        path = os.path.join(self.options.dir, "%s_xappy" % self.options.indexname)
+        return xappy.SearchConnection(path)
+
+    def query(self, conn):
+        return conn.query_parse(" ".join(self.args))
+
+    def find(self, conn, q):
+        return conn.search(q, 0, int(self.options.limit))
+
+    def findterms(self, conn, terms):
+        limit = int(self.options.limit)
+        for term in terms:
+            q = conn.query_field(self.bench.spec.main_field, term)
+            yield conn.search(q, 0, limit)
+
+    def results(self, r):
+        hf = self.bench.spec.headline_field
+        mf = self.bench.spec.main_field
+        for hit in r:
+            yield self._process_result({hf: hit.data[hf], mf: hit.data[mf]})
+
+
+class XapianModule(Module):
+    def indexer(self, **kwargs):
+        path = os.path.join(self.options.dir, "%s_xapian" % self.options.indexname)
+        self.database = xapian.WritableDatabase(path, xapian.DB_CREATE_OR_OPEN)
+        self.ixer = xapian.TermGenerator()
+
+    def index_document(self, d):
+        if hasattr(self.bench, "process_document_xapian"):
+            self.bench.process_document_xapian(d)
+        doc = xapian.Document()
+        doc.add_value(0, d.get(self.bench.spec.headline_field, "-"))
+        doc.set_data(d[self.bench.spec.main_field])
+        self.ixer.set_document(doc)
+        self.ixer.index_text(d[self.bench.spec.main_field])
+        self.database.add_document(doc)
+
+    def finish(self, **kwargs):
+        self.database.flush()
+
+    def searcher(self):
+        path = os.path.join(self.options.dir, "%s_xappy" % self.options.indexname)
+        self.db = xapian.Database(path)
+        self.enq = xapian.Enquire(self.db)
+        self.qp = xapian.QueryParser()
+        self.qp.set_database(self.db)
+
+    def query(self):
+        return self.qp.parse_query(" ".join(self.args))
+
+    def find(self, q):
+        self.enq.set_query(q)
+        return self.enq.get_mset(0, int(self.options.limit))
+
+    def findterms(self, terms):
+        limit = int(self.options.limit)
+        for term in terms:
+            q = self.qp.parse_query(term)
+            self.enq.set_query(q)
+            yield self.enq.get_mset(0, limit)
+
+    def results(self, matches):
+        hf = self.bench.spec.headline_field
+        mf = self.bench.spec.main_field
+        for m in matches:
+            yield self._process_result({hf: m.document.get_value(0),
+                                        mf: m.document.get_data()})
+
+
+class SolrModule(Module):
+    def indexer(self, **kwargs):
+        self.solr_doclist = []
+        self.conn = pysolr.Solr(self.options.url)
+        self.conn.delete("*:*")
+        self.conn.commit()
+
+    def index_document(self, d):
+        self.solr_doclist.append(d)
+        if len(self.solr_doclist) >= int(self.options.batch):
+            self.conn.add(self.solr_doclist, commit=False)
+            self.solr_doclist = []
+
+    def finish(self, **kwargs):
+        if self.solr_doclist:
+            self.conn.add(self.solr_doclist)
+        del self.solr_doclist
+        self.conn.optimize(block=True)
+
+    def searcher(self):
+        self.solr = pysolr.Solr(self.options.url)
+
+    def query(self):
+        return " ".join(self.args)
+
+    def find(self, q):
+        return self.solr.search(q, limit=int(self.options.limit))
+
+    def findterms(self, terms):
+        limit = int(self.options.limit)
+        for term in terms:
+            yield self.solr.search("body:" + term, limit=limit)
+
+
+class ZcatalogModule(Module):
+    def indexer(self, **kwargs):
+        from ZODB.FileStorage import FileStorage
+        from ZODB.DB import DB
+        from zcatalog import catalog
+        from zcatalog import indexes
+        import transaction
+
+        dir = os.path.join(self.options.dir, "%s_zcatalog" % self.options.indexname)
+        if os.path.exists(dir):
+            rmtree(dir)
+        os.mkdir(dir)
+
+        storage = FileStorage(os.path.join(dir, "index"))
+        db = DB(storage)
+        conn = db.open()
+
+        self.cat = catalog.Catalog()
+        self.bench.spec.zcatalog_setup(self.cat)
+        conn.root()["cat"] = self.cat
+        transaction.commit()
+
+        self.zcatalog_count = 0
+
+    def index_document(self, d):
+        if hasattr(self.bench, "process_document_zcatalog"):
+            self.bench.process_document_zcatalog(d)
+        doc = ZDoc(d)
+        self.cat.index_doc(doc)
+        self.zcatalog_count += 1
+        if self.zcatalog_count >= 100:
+            import transaction
+            transaction.commit()
+            self.zcatalog_count = 0
+
+    def finish(self, **kwargs):
+        import transaction
+        transaction.commit()
+        del self.zcatalog_count
+
+    def searcher(self):
+        from ZODB.FileStorage import FileStorage
+        from ZODB.DB import DB
+        from zcatalog import catalog
+        from zcatalog import indexes
+        import transaction
+
+        path = os.path.join(self.options.dir, "%s_zcatalog" % self.options.indexname, "index")
+        storage = FileStorage(path)
+        db = DB(storage)
+        conn = db.open()
+
+        self.cat = conn.root()["cat"]
+
+    def query(self):
+        return " ".join(self.args)
+
+    def find(self, q):
+        return self.cat.searchResults(body=q)
+
+    def findterms(self, terms):
+        for term in terms:
+            yield self.cat.searchResults(body=term)
+
+    def results(self, r):
+        hf = self.bench.spec.headline_field
+        mf = self.bench.spec.main_field
+        for hit in r:
+            # Have to access the attributes for them to be retrieved
+            yield self._process_result({hf: getattr(hit, hf),
+                                        mf: getattr(hit, mf)})
+
+
+class NucularModule(Module):
+    def indexer(self, create=True):
+        import shutil
+        from nucular import Nucular
+
+        dir = os.path.join(self.options.dir, "%s_nucular" % self.options.indexname)
+        if create:
+            if os.path.exists(dir):
+                shutil.rmtree(dir)
+            os.mkdir(dir)
+        self.archive = Nucular.Nucular(dir)
+        if create:
+            self.archive.create()
+        self.count = 0
+
+    def index_document(self, d):
+        try:
+            self.archive.indexDictionary(str(self.count), d)
+        except ValueError:
+            print "d=", d
+            raise
+        self.count += 1
+        if not self.count % int(self.options.batch):
+            t = now()
+            self.archive.store(lazy=True)
+            self.indexer(create=False)
+
+    def finish(self, **kwargs):
+        self.archive.store(lazy=False)
+        self.archive.aggregateRecent(fast=False, verbose=True)
+        self.archive.moveTransientToBase(verbose=True)
+        self.archive.cleanUp()
+
+    def searcher(self):
+        from nucular import Nucular
+
+        dir = os.path.join(self.options.dir, "%s_nucular" % self.options.indexname)
+        self.archive = Nucular.Nucular(dir)
+
+    def query(self):
+        return " ".join(self.args)
+
+    def find(self, q):
+        return self.archive.dictionaries(q)
+
+    def findterms(self, terms):
+        for term in terms:
+            q = self.archive.Query()
+            q.anyWord(term)
+            yield q.resultDictionaries()
+
+
+class Bench(object):
+    libs = {"whoosh": WhooshModule, "xappy": XappyModule,
+            "xapian": XapianModule, "solr": SolrModule,
+            "zcatalog": ZcatalogModule, "nucular": NucularModule}
+
+    def index(self, lib):
+        print "Indexing with %s..." % lib
+
+        options = self.options
+        every = None if options.every is None else int(options.every)
+        merge = options.merge
+        chunk = int(options.chunk)
+        skip = int(options.skip)
+        upto = int(options.upto)
+        count = 0
+        skipc = skip
+
+        starttime = chunkstarttime = now()
+
+        lib.indexer()
+
+        for d in self.spec.documents():
+            skipc -= 1
+            if not skipc:
+                lib.index_document(d)
+                count += 1
+                skipc = skip
+                if chunk and not count % chunk:
+                    t = now()
+                    sofar = t - starttime
+                    print "Done %d docs, %0.3f secs for %d, %0.3f total, %0.3f docs/s" % (count, t - chunkstarttime, chunk, sofar, count / sofar)
+                    chunkstarttime = t
+                if count > upto:
+                    break
+                if every and not count % every:
+                    print "----Commit"
+                    lib.finish(merge=merge)
+                    lib.indexer(create=False)
+
+        spooltime = now()
+        print "Spool time:", spooltime - starttime
+        lib.finish(merge=merge)
+        committime = now()
+        print "Commit time:", committime - spooltime
+        totaltime = committime - starttime
+        print "Total time to index %d documents: %0.3f secs (%0.3f minutes)" % (count, totaltime, totaltime / 60.0)
+        print "Indexed %0.3f docs/s" % (count / totaltime)
+
+    def search(self, lib):
+        lib.searcher()
+
+        t = now()
+        q = lib.query()
+        print "Query:", q
+        r = lib.find(q)
+        print "Search time:", now() - t
+
+        t = now()
+        self.spec.print_results(lib.results(r))
+        print "Print time:", now() - t
+
+    def search_file(self, lib):
+        f = open(self.options.termfile, "rb")
+        terms = [line.strip() for line in f]
+        f.close()
+
+        print "Searching %d terms with %s" % (len(terms), lib)
+        lib.searcher()
+        starttime = now()
+        for r in lib.findterms(terms):
+            pass
+        searchtime = now() - starttime
+        print "Search time:", searchtime, "searches/s:", float(len(terms)) / searchtime
+
+    def _parser(self, name):
+        p = OptionParser()
+        p.add_option("-x", "--lib", dest="lib",
+                     help="Name of the library to use to index/search.",
+                     default="whoosh")
+        p.add_option("-d", "--dir", dest="dir", metavar="DIRNAME",
+                     help="Directory in which to store index.", default=".")
+        p.add_option("-s", "--setup", dest="setup", action="store_true",
+                     help="Set up any support files or caches.", default=False)
+        p.add_option("-i", "--index", dest="index", action="store_true",
+                     help="Index the documents.", default=False)
+        p.add_option("-n", "--name", dest="indexname", metavar="PREFIX",
+                     help="Index name prefix.", default="%s_index" % name)
+        p.add_option("-U", "--url", dest="url", metavar="URL",
+                     help="Solr URL", default="http://localhost:8983/solr")
+        p.add_option("-m", "--mb", dest="limitmb",
+                     help="Max. memory usage, in MB", default="128")
+        p.add_option("-c", "--chunk", dest="chunk",
+                     help="Number of documents to index between progress messages.",
+                     default=1000)
+        p.add_option("-B", "--batch", dest="batch",
+                     help="Batch size for batch adding documents.",
+                     default=1000)
+        p.add_option("-k", "--skip", dest="skip", metavar="N",
+                     help="Index every Nth document.", default=1)
+        p.add_option("-e", "--commit-every", dest="every", metavar="NUM",
+                      help="Commit every NUM documents", default=None)
+        p.add_option("-M", "--no-merge", dest="merge", action="store_false",
+                     help="Don't merge segments when doing multiple commits",
+                     default=True)
+        p.add_option("-u", "--upto", dest="upto", metavar="N",
+                     help="Index up to this document number.", default=600000)
+        p.add_option("-p", "--procs", dest="procs", metavar="NUMBER",
+                     help="Number of processors to use.", default=0)
+        p.add_option("-l", "--limit", dest="limit", metavar="N",
+                     help="Maximum number of search results to retrieve.",
+                     default=10)
+        p.add_option("-b", "--body", dest="showbody", action="store_true",
+                     help="Show the body text in search results.",
+                     default=False)
+        p.add_option("-g", "--gen", dest="generate", metavar="N",
+                     help="Generate a list at most N terms present in all libraries.",
+                     default=None)
+        p.add_option("-f", "--file", dest="termfile", metavar="FILENAME",
+                     help="Search using the list of terms in this file.",
+                     default=None)
+        p.add_option("-t", "--tempdir", dest="tempdir", metavar="DIRNAME",
+                     help="Whoosh temp dir", default=None)
+        p.add_option("-P", "--pool", dest="pool", metavar="CLASSNAME",
+                     help="Whoosh pool class", default=None)
+        p.add_option("-X", "--expw", dest="expw", action="store_true",
+                     help="Use experimental whoosh writer", default=False)
+        p.add_option("-Z", "--storebody", dest="storebody", action="store_true",
+                     help="Store the body text in index", default=False)
+        p.add_option("-q", "--snippets", dest="snippets", action="store_true",
+                     help="Show highlighted snippets", default=False)
+
+        return p
+
+    def run(self, specclass):
+        parser = self._parser(specclass.name)
+        options, args = parser.parse_args()
+        self.options = options
+        self.args = args
+
+        if options.lib not in self.libs:
+            raise Exception("Unknown library: %r" % options.lib)
+        lib = self.libs[options.lib](self, options, args)
+
+        self.spec = specclass(options, args)
+
+        if options.setup:
+            self.spec.setup()
+
+        action = self.search
+        if options.index:
+            action = self.index
+        if options.termfile:
+            action = self.search_file
+        if options.generate:
+            action = self.generate_search_file
+
+        action(lib)
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/lib/whoosh/whoosh/support/bitvector.py b/lib/whoosh/whoosh/support/bitvector.py
new file mode 100644
index 0000000..d4d107e
--- /dev/null
+++ b/lib/whoosh/whoosh/support/bitvector.py
@@ -0,0 +1,276 @@
+"""
+An implementation of an object that acts like a collection of on/off bits.
+"""
+
+import operator
+from array import array
+
+#: Table of the number of '1' bits in each byte (0-255)
+BYTE_COUNTS = array('B', [
+    0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
+    1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+    1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+    2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+    1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+    2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+    2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+    3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+    1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+    2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+    2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+    3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+    2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+    3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+    3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+    4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8])
+
+
+class BitVector(object):
+    """
+    Implements a memory-efficient array of bits.
+
+    >>> bv = BitVector(10)
+    >>> bv
+    <BitVector 0000000000>
+    >>> bv[5] = True
+    >>> bv
+    <BitVector 0000010000>
+
+    You can initialize the BitVector using an iterable of integers representing bit
+    positions to turn on.
+
+    >>> bv2 = BitVector(10, [2, 4, 7])
+    >>> bv2
+    <BitVector 00101001000>
+    >>> bv[2]
+    True
+
+    BitVector supports bit-wise logic operations & (and), | (or), and ^ (xor)
+    between itself and another BitVector of equal size, or itself and a collection of
+    integers (usually a set() or frozenset()).
+
+    >>> bv | bv2
+    <BitVector 00101101000>
+
+    Note that ``BitVector.__len__()`` returns the number of "on" bits, not
+    the size of the bit array. This is to make BitVector interchangeable with
+    a set()/frozenset() of integers. To get the size, use BitVector.size.
+    """
+
+    def __init__(self, size, source=None, bits=None):
+        self.size = size
+
+        if bits:
+            self.bits = bits
+        else:
+            self.bits = array("B", ([0x00] * ((size >> 3) + 1)))
+
+        if source:
+            set = self.set
+            for num in source:
+                set(num)
+
+        self.bcount = None
+
+    def __eq__(self, other):
+        if isinstance(other, BitVector):
+            return self.bits == other.bits
+        return False
+
+    def __repr__(self):
+        return "<BitVector %s/%s>" % (len(self), self.size)
+
+    def __len__(self):
+        # This returns the count of "on" bits instead of the size to
+        # make BitVector exchangeable with a set() object.
+        return self.count()
+
+    def __contains__(self, index):
+        byte = self.bits[index >> 3]
+        if not byte:
+            return False
+        return byte & (1 << (index & 7)) != 0
+
+    def __iter__(self):
+        get = self.__getitem__
+        for i in xrange(0, self.size):
+            if get(i):
+                yield i
+
+    def __str__(self):
+        get = self.__getitem__
+        return "".join("1" if get(i) else "0"
+                       for i in xrange(0, self.size))
+
+    def __nonzero__(self):
+        return self.count() > 0
+
+    def __getitem__(self, index):
+        return self.bits[index >> 3] & (1 << (index & 7)) != 0
+
+    def __setitem__(self, index, value):
+        if value:
+            self.set(index)
+        else:
+            self.clear(index)
+
+    def _logic(self, op, bitv):
+        if self.size != bitv.size:
+            raise ValueError("Can't combine bitvectors of different sizes")
+        res = BitVector(size=self.size)
+        lpb = map(op, self.bits, bitv.bits)
+        res.bits = array('B', lpb)
+        return res
+
+    def union(self, other):
+        return self.__or__(other)
+
+    def intersection(self, other):
+        return self.__and__(other)
+
+    def __and__(self, other):
+        if not isinstance(other, BitVector):
+            other = BitVector(self.size, source=other)
+        return self._logic(operator.__and__, other)
+
+    def __or__(self, other):
+        if not isinstance(other, BitVector):
+            other = BitVector(self.size, source=other)
+        return self._logic(operator.__or__, other)
+
+    def __ror__(self, other):
+        return self.__or__(other)
+
+    def __rand__(self, other):
+        return self.__and__(other)
+
+    def __xor__(self, other):
+        if not isinstance(other, BitVector):
+            other = BitVector(self.size, source=other)
+        return self._logic(operator.__xor__, other)
+
+    def __invert__(self):
+        return BitVector(self.size, source=(x for x in xrange(self.size) if x not in self))
+
+    def count(self):
+        """Returns the number of "on" bits in the bit array."""
+
+        if self.bcount is None:
+            self.bcount = sum(BYTE_COUNTS[b & 0xFF] for b in self.bits)
+        return self.bcount
+
+    def set(self, index):
+        """Turns the bit at the given position on."""
+
+        if index >= self.size:
+            raise IndexError("Position %s greater than the size of the vector" % repr(index))
+        self.bits[index >> 3] |= 1 << (index & 7)
+        self.bcount = None
+
+    def clear(self, index):
+        """Turns the bit at the given position off."""
+
+        self.bits[index >> 3] &= ~(1 << (index & 7))
+        self.bcount = None
+
+    def update(self, iterable):
+        """Takes an iterable of integers representing positions, and turns
+        on the bits at those positions.
+        """
+
+        set = self.set
+        for index in iterable:
+            set(index)
+
+    def copy(self):
+        """Returns a copy of this BitArray."""
+
+        return BitVector(self.size, bits=self.bits)
+
+
+class BitSet(object):
+    """A set-like object for holding positive integers. It is dynamically
+    backed by either a set or BitVector depending on how many numbers are in
+    the set.
+
+    Provides ``add``, ``remove``, ``union``, ``intersection``,
+    ``__contains__``, ``__len__``, ``__iter__``, ``__and__``, ``__or__``, and
+    methods.
+    """
+
+    def __init__(self, size, source=None):
+        self.size = size
+
+        self._back = ()
+        self._switch(size < 256)
+
+        if source:
+            for num in source:
+                self.add(num)
+
+    def _switch(self, toset):
+        if toset:
+            self._back = set(self._back)
+            self.add = self._set_add
+            self.remove = self._back.remove
+        else:
+            self._back = BitVector(self.size, source=self._back)
+            self.add = self._back.set
+            self.remove = self._vec_remove
+
+        self.update = self._back.update
+
+    def __contains__(self, n):
+        return n in self._back
+
+    def __repr__(self):
+        return "<%s %s/%s>" % (self.__class__.__name__, len(self._back), self.size)
+
+    def __len__(self):
+        return len(self._back)
+
+    def __iter__(self):
+        return self._back.__iter__()
+
+    def as_set(self):
+        return frozenset(self._back)
+
+    def union(self, other):
+        return self.__or__(other)
+
+    def intersection(self, other):
+        return self.__and__(other)
+
+    def invert(self):
+        return BitSet(self.size, (x for x in xrange(self.size) if x not in self))
+
+    def __and__(self, other):
+        return BitSet(self.size, self._back.intersection(other))
+
+    def __or__(self, other):
+        return BitSet(self.size, self._back.union(other))
+
+    def __rand__(self, other):
+        return self.__and__(other)
+
+    def __ror__(self, other):
+        return self.__or__(other)
+
+    def __invert__(self):
+        return self.invert()
+
+    def _set_add(self, num):
+        self._back.add(num)
+        if len(self._back) * 4 > self.size // 8 + 32:
+            self._switch(False)
+
+    def _vec_remove(self, num):
+        self._back.clear(num)
+        if len(self._back) * 4 < self.size // 8 - 32:
+            self._switch(True)
+
+
+
+
+
+
diff --git a/lib/whoosh/whoosh/support/charset.py b/lib/whoosh/whoosh/support/charset.py
new file mode 100644
index 0000000..32518e9
--- /dev/null
+++ b/lib/whoosh/whoosh/support/charset.py
@@ -0,0 +1,803 @@
+# coding=utf-8
+
+"""This module contains tools for working with Sphinx charset table files. These files
+are useful for doing case and accent folding.
+See :class:`whoosh.analysis.CharsetTokenizer` and :class:`whoosh.analysis.CharsetFilter`.
+"""
+
+from collections import defaultdict
+from itertools import izip
+import re
+
+# This is a straightforward accent-folding charset taken from Carlos Bueno's
+# article "Accent Folding for Auto-Complete", for use with CharsetFilter.
+#
+# http://www.alistapart.com/articles/accent-folding-for-auto-complete/
+#
+# See the article for information and caveats. The code is lifted directly
+# from here:
+#
+# http://github.com/aristus/accent-folding/blob/master/accent_fold.py
+
+accent_map = {u'ẚ': u'a', u'Á': u'a', u'á': u'a', u'À': u'a', u'à': u'a', u'Ă': u'a',
+              u'ă': u'a', u'Ắ': u'a', u'ắ': u'a', u'Ằ': u'a', u'ằ': u'a', u'Ẵ': u'a',
+              u'ẵ': u'a', u'Ẳ': u'a', u'ẳ': u'a', u'Â': u'a', u'â': u'a', u'Ấ': u'a',
+              u'ấ': u'a', u'Ầ': u'a', u'ầ': u'a', u'Ẫ': u'a', u'ẫ': u'a', u'Ẩ': u'a',
+              u'ẩ': u'a', u'Ǎ': u'a', u'ǎ': u'a', u'Å': u'a', u'å': u'a', u'Ǻ': u'a',
+              u'ǻ': u'a', u'Ä': u'a', u'ä': u'a', u'Ǟ': u'a', u'ǟ': u'a', u'Ã': u'a',
+              u'ã': u'a', u'Ȧ': u'a', u'ȧ': u'a', u'Ǡ': u'a', u'ǡ': u'a', u'Ą': u'a',
+              u'ą': u'a', u'Ā': u'a', u'ā': u'a', u'Ả': u'a', u'ả': u'a', u'Ȁ': u'a',
+              u'ȁ': u'a', u'Ȃ': u'a', u'ȃ': u'a', u'Ạ': u'a', u'ạ': u'a', u'Ặ': u'a',
+              u'ặ': u'a', u'Ậ': u'a', u'ậ': u'a', u'Ḁ': u'a', u'ḁ': u'a', u'Ⱥ': u'a',
+              u'ⱥ': u'a', u'Ǽ': u'a', u'ǽ': u'a', u'Ǣ': u'a', u'ǣ': u'a', u'Ḃ': u'b',
+              u'ḃ': u'b', u'Ḅ': u'b', u'ḅ': u'b', u'Ḇ': u'b', u'ḇ': u'b', u'Ƀ': u'b',
+              u'ƀ': u'b', u'ᵬ': u'b', u'Ɓ': u'b', u'ɓ': u'b', u'Ƃ': u'b', u'ƃ': u'b',
+              u'Ć': u'c', u'ć': u'c', u'Ĉ': u'c', u'ĉ': u'c', u'Č': u'c', u'č': u'c',
+              u'Ċ': u'c', u'ċ': u'c', u'Ç': u'c', u'ç': u'c', u'Ḉ': u'c', u'ḉ': u'c',
+              u'Ȼ': u'c', u'ȼ': u'c', u'Ƈ': u'c', u'ƈ': u'c', u'ɕ': u'c', u'Ď': u'd',
+              u'ď': u'd', u'Ḋ': u'd', u'ḋ': u'd', u'Ḑ': u'd', u'ḑ': u'd', u'Ḍ': u'd',
+              u'ḍ': u'd', u'Ḓ': u'd', u'ḓ': u'd', u'Ḏ': u'd', u'ḏ': u'd', u'Đ': u'd',
+              u'đ': u'd', u'ᵭ': u'd', u'Ɖ': u'd', u'ɖ': u'd', u'Ɗ': u'd', u'ɗ': u'd',
+              u'Ƌ': u'd', u'ƌ': u'd', u'ȡ': u'd', u'ð': u'd', u'É': u'e', u'Ə': u'e',
+              u'Ǝ': u'e', u'ǝ': u'e', u'é': u'e', u'È': u'e', u'è': u'e', u'Ĕ': u'e',
+              u'ĕ': u'e', u'Ê': u'e', u'ê': u'e', u'Ế': u'e', u'ế': u'e', u'Ề': u'e',
+              u'ề': u'e', u'Ễ': u'e', u'ễ': u'e', u'Ể': u'e', u'ể': u'e', u'Ě': u'e',
+              u'ě': u'e', u'Ë': u'e', u'ë': u'e', u'Ẽ': u'e', u'ẽ': u'e', u'Ė': u'e',
+              u'ė': u'e', u'Ȩ': u'e', u'ȩ': u'e', u'Ḝ': u'e', u'ḝ': u'e', u'Ę': u'e',
+              u'ę': u'e', u'Ē': u'e', u'ē': u'e', u'Ḗ': u'e', u'ḗ': u'e', u'Ḕ': u'e',
+              u'ḕ': u'e', u'Ẻ': u'e', u'ẻ': u'e', u'Ȅ': u'e', u'ȅ': u'e', u'Ȇ': u'e',
+              u'ȇ': u'e', u'Ẹ': u'e', u'ẹ': u'e', u'Ệ': u'e', u'ệ': u'e', u'Ḙ': u'e',
+              u'ḙ': u'e', u'Ḛ': u'e', u'ḛ': u'e', u'Ɇ': u'e', u'ɇ': u'e', u'ɚ': u'e',
+              u'ɝ': u'e', u'Ḟ': u'f', u'ḟ': u'f', u'ᵮ': u'f', u'Ƒ': u'f', u'ƒ': u'f',
+              u'Ǵ': u'g', u'ǵ': u'g', u'Ğ': u'g', u'ğ': u'g', u'Ĝ': u'g', u'ĝ': u'g',
+              u'Ǧ': u'g', u'ǧ': u'g', u'Ġ': u'g', u'ġ': u'g', u'Ģ': u'g', u'ģ': u'g',
+              u'Ḡ': u'g', u'ḡ': u'g', u'Ǥ': u'g', u'ǥ': u'g', u'Ɠ': u'g', u'ɠ': u'g',
+              u'Ĥ': u'h', u'ĥ': u'h', u'Ȟ': u'h', u'ȟ': u'h', u'Ḧ': u'h', u'ḧ': u'h',
+              u'Ḣ': u'h', u'ḣ': u'h', u'Ḩ': u'h', u'ḩ': u'h', u'Ḥ': u'h', u'ḥ': u'h',
+              u'Ḫ': u'h', u'ḫ': u'h', u'H': u'h', u'̱': u'h', u'ẖ': u'h', u'Ħ': u'h',
+              u'ħ': u'h', u'Ⱨ': u'h', u'ⱨ': u'h', u'Í': u'i', u'í': u'i', u'Ì': u'i',
+              u'ì': u'i', u'Ĭ': u'i', u'ĭ': u'i', u'Î': u'i', u'î': u'i', u'Ǐ': u'i',
+              u'ǐ': u'i', u'Ï': u'i', u'ï': u'i', u'Ḯ': u'i', u'ḯ': u'i', u'Ĩ': u'i',
+              u'ĩ': u'i', u'İ': u'i', u'i': u'i', u'Į': u'i', u'į': u'i', u'Ī': u'i',
+              u'ī': u'i', u'Ỉ': u'i', u'ỉ': u'i', u'Ȉ': u'i', u'ȉ': u'i', u'Ȋ': u'i',
+              u'ȋ': u'i', u'Ị': u'i', u'ị': u'i', u'Ḭ': u'i', u'ḭ': u'i', u'I': u'i',
+              u'ı': u'i', u'Ɨ': u'i', u'ɨ': u'i', u'Ĵ': u'j', u'ĵ': u'j', u'J': u'j',
+              u'̌': u'j', u'ǰ': u'j', u'ȷ': u'j', u'Ɉ': u'j', u'ɉ': u'j', u'ʝ': u'j',
+              u'ɟ': u'j', u'ʄ': u'j', u'Ḱ': u'k', u'ḱ': u'k', u'Ǩ': u'k', u'ǩ': u'k',
+              u'Ķ': u'k', u'ķ': u'k', u'Ḳ': u'k', u'ḳ': u'k', u'Ḵ': u'k', u'ḵ': u'k',
+              u'Ƙ': u'k', u'ƙ': u'k', u'Ⱪ': u'k', u'ⱪ': u'k', u'Ĺ': u'a', u'ĺ': u'l',
+              u'Ľ': u'l', u'ľ': u'l', u'Ļ': u'l', u'ļ': u'l', u'Ḷ': u'l', u'ḷ': u'l',
+              u'Ḹ': u'l', u'ḹ': u'l', u'Ḽ': u'l', u'ḽ': u'l', u'Ḻ': u'l', u'ḻ': u'l',
+              u'Ł': u'l', u'ł': u'l', u'Ł': u'l', u'̣': u'l', u'ł': u'l', u'̣': u'l',
+              u'Ŀ': u'l', u'ŀ': u'l', u'Ƚ': u'l', u'ƚ': u'l', u'Ⱡ': u'l', u'ⱡ': u'l',
+              u'Ɫ': u'l', u'ɫ': u'l', u'ɬ': u'l', u'ɭ': u'l', u'ȴ': u'l', u'Ḿ': u'm',
+              u'ḿ': u'm', u'Ṁ': u'm', u'ṁ': u'm', u'Ṃ': u'm', u'ṃ': u'm', u'ɱ': u'm',
+              u'Ń': u'n', u'ń': u'n', u'Ǹ': u'n', u'ǹ': u'n', u'Ň': u'n', u'ň': u'n',
+              u'Ñ': u'n', u'ñ': u'n', u'Ṅ': u'n', u'ṅ': u'n', u'Ņ': u'n', u'ņ': u'n',
+              u'Ṇ': u'n', u'ṇ': u'n', u'Ṋ': u'n', u'ṋ': u'n', u'Ṉ': u'n', u'ṉ': u'n',
+              u'Ɲ': u'n', u'ɲ': u'n', u'Ƞ': u'n', u'ƞ': u'n', u'ɳ': u'n', u'ȵ': u'n',
+              u'N': u'n', u'̈': u'n', u'n': u'n', u'̈': u'n', u'Ó': u'o', u'ó': u'o',
+              u'Ò': u'o', u'ò': u'o', u'Ŏ': u'o', u'ŏ': u'o', u'Ô': u'o', u'ô': u'o',
+              u'Ố': u'o', u'ố': u'o', u'Ồ': u'o', u'ồ': u'o', u'Ỗ': u'o', u'ỗ': u'o',
+              u'Ổ': u'o', u'ổ': u'o', u'Ǒ': u'o', u'ǒ': u'o', u'Ö': u'o', u'ö': u'o',
+              u'Ȫ': u'o', u'ȫ': u'o', u'Ő': u'o', u'ő': u'o', u'Õ': u'o', u'õ': u'o',
+              u'Ṍ': u'o', u'ṍ': u'o', u'Ṏ': u'o', u'ṏ': u'o', u'Ȭ': u'o', u'ȭ': u'o',
+              u'Ȯ': u'o', u'ȯ': u'o', u'Ȱ': u'o', u'ȱ': u'o', u'Ø': u'o', u'ø': u'o',
+              u'Ǿ': u'o', u'ǿ': u'o', u'Ǫ': u'o', u'ǫ': u'o', u'Ǭ': u'o', u'ǭ': u'o',
+              u'Ō': u'o', u'ō': u'o', u'Ṓ': u'o', u'ṓ': u'o', u'Ṑ': u'o', u'ṑ': u'o',
+              u'Ỏ': u'o', u'ỏ': u'o', u'Ȍ': u'o', u'ȍ': u'o', u'Ȏ': u'o', u'ȏ': u'o',
+              u'Ơ': u'o', u'ơ': u'o', u'Ớ': u'o', u'ớ': u'o', u'Ờ': u'o', u'ờ': u'o',
+              u'Ỡ': u'o', u'ỡ': u'o', u'Ở': u'o', u'ở': u'o', u'Ợ': u'o', u'ợ': u'o',
+              u'Ọ': u'o', u'ọ': u'o', u'Ộ': u'o', u'ộ': u'o', u'Ɵ': u'o', u'ɵ': u'o',
+              u'Ṕ': u'p', u'ṕ': u'p', u'Ṗ': u'p', u'ṗ': u'p', u'Ᵽ': u'p', u'Ƥ': u'p',
+              u'ƥ': u'p', u'P': u'p', u'̃': u'p', u'p': u'p', u'̃': u'p', u'ʠ': u'q',
+              u'Ɋ': u'q', u'ɋ': u'q', u'Ŕ': u'r', u'ŕ': u'r', u'Ř': u'r', u'ř': u'r',
+              u'Ṙ': u'r', u'ṙ': u'r', u'Ŗ': u'r', u'ŗ': u'r', u'Ȑ': u'r', u'ȑ': u'r',
+              u'Ȓ': u'r', u'ȓ': u'r', u'Ṛ': u'r', u'ṛ': u'r', u'Ṝ': u'r', u'ṝ': u'r',
+              u'Ṟ': u'r', u'ṟ': u'r', u'Ɍ': u'r', u'ɍ': u'r', u'ᵲ': u'r', u'ɼ': u'r',
+              u'Ɽ': u'r', u'ɽ': u'r', u'ɾ': u'r', u'ᵳ': u'r', u'ß': u's', u'Ś': u's',
+              u'ś': u's', u'Ṥ': u's', u'ṥ': u's', u'Ŝ': u's', u'ŝ': u's', u'Š': u's',
+              u'š': u's', u'Ṧ': u's', u'ṧ': u's', u'Ṡ': u's', u'ṡ': u's', u'ẛ': u's',
+              u'Ş': u's', u'ş': u's', u'Ṣ': u's', u'ṣ': u's', u'Ṩ': u's', u'ṩ': u's',
+              u'Ș': u's', u'ș': u's', u'ʂ': u's', u'S': u's', u'̩': u's', u's': u's',
+              u'̩': u's', u'Þ': u't', u'þ': u't', u'Ť': u't', u'ť': u't', u'T': u't',
+              u'̈': u't', u'ẗ': u't', u'Ṫ': u't', u'ṫ': u't', u'Ţ': u't', u'ţ': u't',
+              u'Ṭ': u't', u'ṭ': u't', u'Ț': u't', u'ț': u't', u'Ṱ': u't', u'ṱ': u't',
+              u'Ṯ': u't', u'ṯ': u't', u'Ŧ': u't', u'ŧ': u't', u'Ⱦ': u't', u'ⱦ': u't',
+              u'ᵵ': u't', u'ƫ': u't', u'Ƭ': u't', u'ƭ': u't', u'Ʈ': u't', u'ʈ': u't',
+              u'ȶ': u't', u'Ú': u'u', u'ú': u'u', u'Ù': u'u', u'ù': u'u', u'Ŭ': u'u',
+              u'ŭ': u'u', u'Û': u'u', u'û': u'u', u'Ǔ': u'u', u'ǔ': u'u', u'Ů': u'u',
+              u'ů': u'u', u'Ü': u'u', u'ü': u'u', u'Ǘ': u'u', u'ǘ': u'u', u'Ǜ': u'u',
+              u'ǜ': u'u', u'Ǚ': u'u', u'ǚ': u'u', u'Ǖ': u'u', u'ǖ': u'u', u'Ű': u'u',
+              u'ű': u'u', u'Ũ': u'u', u'ũ': u'u', u'Ṹ': u'u', u'ṹ': u'u', u'Ų': u'u',
+              u'ų': u'u', u'Ū': u'u', u'ū': u'u', u'Ṻ': u'u', u'ṻ': u'u', u'Ủ': u'u',
+              u'ủ': u'u', u'Ȕ': u'u', u'ȕ': u'u', u'Ȗ': u'u', u'ȗ': u'u', u'Ư': u'u',
+              u'ư': u'u', u'Ứ': u'u', u'ứ': u'u', u'Ừ': u'u', u'ừ': u'u', u'Ữ': u'u',
+              u'ữ': u'u', u'Ử': u'u', u'ử': u'u', u'Ự': u'u', u'ự': u'u', u'Ụ': u'u',
+              u'ụ': u'u', u'Ṳ': u'u', u'ṳ': u'u', u'Ṷ': u'u', u'ṷ': u'u', u'Ṵ': u'u',
+              u'ṵ': u'u', u'Ʉ': u'u', u'ʉ': u'u', u'Ṽ': u'v', u'ṽ': u'v', u'Ṿ': u'v',
+              u'ṿ': u'v', u'Ʋ': u'v', u'ʋ': u'v', u'Ẃ': u'w', u'ẃ': u'w', u'Ẁ': u'w',
+              u'ẁ': u'w', u'Ŵ': u'w', u'ŵ': u'w', u'W': u'w', u'̊': u'w', u'ẘ': u'w',
+              u'Ẅ': u'w', u'ẅ': u'w', u'Ẇ': u'w', u'ẇ': u'w', u'Ẉ': u'w', u'ẉ': u'w',
+              u'Ẍ': u'x', u'ẍ': u'x', u'Ẋ': u'x', u'ẋ': u'x', u'Ý': u'y', u'ý': u'y',
+              u'Ỳ': u'y', u'ỳ': u'y', u'Ŷ': u'y', u'ŷ': u'y', u'Y': u'y', u'̊': u'y',
+              u'ẙ': u'y', u'Ÿ': u'y', u'ÿ': u'y', u'Ỹ': u'y', u'ỹ': u'y', u'Ẏ': u'y',
+              u'ẏ': u'y', u'Ȳ': u'y', u'ȳ': u'y', u'Ỷ': u'y', u'ỷ': u'y', u'Ỵ': u'y',
+              u'ỵ': u'y', u'ʏ': u'y', u'Ɏ': u'y', u'ɏ': u'y', u'Ƴ': u'y', u'ƴ': u'y',
+              u'Ź': u'z', u'ź': u'z', u'Ẑ': u'z', u'ẑ': u'z', u'Ž': u'z', u'ž': u'z',
+              u'Ż': u'z', u'ż': u'z', u'Ẓ': u'z', u'ẓ': u'z', u'Ẕ': u'z', u'ẕ': u'z',
+              u'Ƶ': u'z', u'ƶ': u'z', u'Ȥ': u'z', u'ȥ': u'z', u'ʐ': u'z', u'ʑ': u'z',
+              u'Ⱬ': u'z', u'ⱬ': u'z', u'Ǯ': u'z', u'ǯ': u'z', u'ƺ': u'z', u'2': u'2',
+              u'6': u'6', u'B': u'B', u'F': u'F', u'J': u'J', u'N': u'N', u'R': u'R',
+              u'V': u'V', u'Z': u'Z', u'b': u'b', u'f': u'f', u'j': u'j', u'n': u'n',
+              u'r': u'r', u'v': u'v', u'z': u'z', u'1': u'1', u'5': u'5', u'9': u'9',
+              u'A': u'A', u'E': u'E', u'I': u'I', u'M': u'M', u'Q': u'Q', u'U': u'U',
+              u'Y': u'Y', u'a': u'a', u'e': u'e', u'i': u'i', u'm': u'm', u'q': u'q',
+              u'u': u'u', u'y': u'y', u'0': u'0', u'4': u'4', u'8': u'8', u'D': u'D',
+              u'H': u'H', u'L': u'L', u'P': u'P', u'T': u'T', u'X': u'X', u'd': u'd',
+              u'h': u'h', u'l': u'l', u'p': u'p', u't': u't', u'x': u'x', u'3': u'3',
+              u'7': u'7', u'C': u'C', u'G': u'G', u'K': u'K', u'O': u'O', u'S': u'S',
+              u'W': u'W', u'c': u'c', u'g': u'g', u'k': u'k', u'o': u'o', u's': u's',
+              u'w': u'w'}
+# The unicode.translate() method actually requires a dictionary mapping
+# character *numbers* to characters, for some reason.
+accent_map = dict((ord(k), v) for k, v in accent_map.iteritems())
+
+
+# This Sphinx charset table taken from http://speeple.com/unicode-maps.txt
+
+default_charset = """
+##################################################
+# Latin
+# A
+U+00C0->a, U+00C1->a, U+00C2->a, U+00C3->a, U+00C4->a, U+00C5->a, U+00E0->a, U+00E1->a, U+00E2->a, U+00E3->a, U+00E4->a, U+00E5->a,
+U+0100->a, U+0101->a, U+0102->a, U+0103->a, U+010300->a, U+0104->a, U+0105->a, U+01CD->a, U+01CE->a, U+01DE->a, U+01DF->a, U+01E0->a,
+U+01E1->a, U+01FA->a, U+01FB->a, U+0200->a, U+0201->a, U+0202->a, U+0203->a, U+0226->a, U+0227->a, U+023A->a, U+0250->a, U+04D0->a,
+U+04D1->a, U+1D2C->a, U+1D43->a, U+1D44->a, U+1D8F->a, U+1E00->a, U+1E01->a, U+1E9A->a, U+1EA0->a, U+1EA1->a, U+1EA2->a, U+1EA3->a,
+U+1EA4->a, U+1EA5->a, U+1EA6->a, U+1EA7->a, U+1EA8->a, U+1EA9->a, U+1EAA->a, U+1EAB->a, U+1EAC->a, U+1EAD->a, U+1EAE->a, U+1EAF->a,
+U+1EB0->a, U+1EB1->a, U+1EB2->a, U+1EB3->a, U+1EB4->a, U+1EB5->a, U+1EB6->a, U+1EB7->a, U+2090->a, U+2C65->a
+
+# B
+U+0180->b, U+0181->b, U+0182->b, U+0183->b, U+0243->b, U+0253->b, U+0299->b, U+16D2->b, U+1D03->b, U+1D2E->b, U+1D2F->b, U+1D47->b,
+U+1D6C->b, U+1D80->b, U+1E02->b, U+1E03->b, U+1E04->b, U+1E05->b, U+1E06->b, U+1E07->b
+
+# C
+U+00C7->c, U+00E7->c, U+0106->c, U+0107->c, U+0108->c, U+0109->c, U+010A->c, U+010B->c, U+010C->c, U+010D->c, U+0187->c, U+0188->c,
+U+023B->c, U+023C->c, U+0255->c, U+0297->c, U+1D9C->c, U+1D9D->c, U+1E08->c, U+1E09->c, U+212D->c, U+2184->c
+
+# D
+U+010E->d, U+010F->d, U+0110->d, U+0111->d, U+0189->d, U+018A->d, U+018B->d, U+018C->d, U+01C5->d, U+01F2->d, U+0221->d, U+0256->d,
+U+0257->d, U+1D05->d, U+1D30->d, U+1D48->d, U+1D6D->d, U+1D81->d, U+1D91->d, U+1E0A->d, U+1E0B->d, U+1E0C->d, U+1E0D->d, U+1E0E->d,
+U+1E0F->d, U+1E10->d, U+1E11->d, U+1E12->d, U+1E13->d
+
+# E
+U+00C8->e, U+00C9->e, U+00CA->e, U+00CB->e, U+00E8->e, U+00E9->e, U+00EA->e, U+00EB->e, U+0112->e, U+0113->e, U+0114->e, U+0115->e,
+U+0116->e, U+0117->e, U+0118->e, U+0119->e, U+011A->e, U+011B->e, U+018E->e, U+0190->e, U+01DD->e, U+0204->e, U+0205->e, U+0206->e,
+U+0207->e, U+0228->e, U+0229->e, U+0246->e, U+0247->e, U+0258->e, U+025B->e, U+025C->e, U+025D->e, U+025E->e, U+029A->e, U+1D07->e,
+U+1D08->e, U+1D31->e, U+1D32->e, U+1D49->e, U+1D4B->e, U+1D4C->e, U+1D92->e, U+1D93->e, U+1D94->e, U+1D9F->e, U+1E14->e, U+1E15->e,
+U+1E16->e, U+1E17->e, U+1E18->e, U+1E19->e, U+1E1A->e, U+1E1B->e, U+1E1C->e, U+1E1D->e, U+1EB8->e, U+1EB9->e, U+1EBA->e, U+1EBB->e,
+U+1EBC->e, U+1EBD->e, U+1EBE->e, U+1EBF->e, U+1EC0->e, U+1EC1->e, U+1EC2->e, U+1EC3->e, U+1EC4->e, U+1EC5->e, U+1EC6->e, U+1EC7->e,
+U+2091->e
+
+# F
+U+0191->f, U+0192->f, U+1D6E->f, U+1D82->f, U+1DA0->f, U+1E1E->f, U+1E1F->f
+
+# G
+U+011C->g, U+011D->g, U+011E->g, U+011F->g, U+0120->g, U+0121->g, U+0122->g, U+0123->g, U+0193->g, U+01E4->g, U+01E5->g, U+01E6->g,
+U+01E7->g, U+01F4->g, U+01F5->g, U+0260->g, U+0261->g, U+0262->g, U+029B->g, U+1D33->g, U+1D4D->g, U+1D77->g, U+1D79->g, U+1D83->g,
+U+1DA2->g, U+1E20->g, U+1E21->g
+
+# H
+U+0124->h, U+0125->h, U+0126->h, U+0127->h, U+021E->h, U+021F->h, U+0265->h, U+0266->h, U+029C->h, U+02AE->h, U+02AF->h, U+02B0->h,
+U+02B1->h, U+1D34->h, U+1DA3->h, U+1E22->h, U+1E23->h, U+1E24->h, U+1E25->h, U+1E26->h, U+1E27->h, U+1E28->h, U+1E29->h, U+1E2A->h,
+U+1E2B->h, U+1E96->h, U+210C->h, U+2C67->h, U+2C68->h, U+2C75->h, U+2C76->h
+
+# I
+U+00CC->i, U+00CD->i, U+00CE->i, U+00CF->i, U+00EC->i, U+00ED->i, U+00EE->i, U+00EF->i, U+010309->i, U+0128->i, U+0129->i, U+012A->i,
+U+012B->i, U+012C->i, U+012D->i, U+012E->i, U+012F->i, U+0130->i, U+0131->i, U+0197->i, U+01CF->i, U+01D0->i, U+0208->i, U+0209->i,
+U+020A->i, U+020B->i, U+0268->i, U+026A->i, U+040D->i, U+0418->i, U+0419->i, U+0438->i, U+0439->i, U+0456->i, U+1D09->i, U+1D35->i,
+U+1D4E->i, U+1D62->i, U+1D7B->i, U+1D96->i, U+1DA4->i, U+1DA6->i, U+1DA7->i, U+1E2C->i, U+1E2D->i, U+1E2E->i, U+1E2F->i, U+1EC8->i,
+U+1EC9->i, U+1ECA->i, U+1ECB->i, U+2071->i, U+2111->i
+
+# J
+U+0134->j, U+0135->j, U+01C8->j, U+01CB->j, U+01F0->j, U+0237->j, U+0248->j, U+0249->j, U+025F->j, U+0284->j, U+029D->j, U+02B2->j,
+U+1D0A->j, U+1D36->j, U+1DA1->j, U+1DA8->j
+
+# K
+U+0136->k, U+0137->k, U+0198->k, U+0199->k, U+01E8->k, U+01E9->k, U+029E->k, U+1D0B->k, U+1D37->k, U+1D4F->k, U+1D84->k, U+1E30->k,
+U+1E31->k, U+1E32->k, U+1E33->k, U+1E34->k, U+1E35->k, U+2C69->k, U+2C6A->k
+
+# L
+U+0139->l, U+013A->l, U+013B->l, U+013C->l, U+013D->l, U+013E->l, U+013F->l, U+0140->l, U+0141->l, U+0142->l, U+019A->l, U+01C8->l,
+U+0234->l, U+023D->l, U+026B->l, U+026C->l, U+026D->l, U+029F->l, U+02E1->l, U+1D0C->l, U+1D38->l, U+1D85->l, U+1DA9->l, U+1DAA->l,
+U+1DAB->l, U+1E36->l, U+1E37->l, U+1E38->l, U+1E39->l, U+1E3A->l, U+1E3B->l, U+1E3C->l, U+1E3D->l, U+2C60->l, U+2C61->l, U+2C62->l
+
+# M
+U+019C->m, U+026F->m, U+0270->m, U+0271->m, U+1D0D->m, U+1D1F->m, U+1D39->m, U+1D50->m, U+1D5A->m, U+1D6F->m, U+1D86->m, U+1DAC->m,
+U+1DAD->m, U+1E3E->m, U+1E3F->m, U+1E40->m, U+1E41->m, U+1E42->m, U+1E43->m
+
+# N
+U+00D1->n, U+00F1->n, U+0143->n, U+0144->n, U+0145->n, U+0146->n, U+0147->n, U+0148->n, U+0149->n, U+019D->n, U+019E->n, U+01CB->n,
+U+01F8->n, U+01F9->n, U+0220->n, U+0235->n, U+0272->n, U+0273->n, U+0274->n, U+1D0E->n, U+1D3A->n, U+1D3B->n, U+1D70->n, U+1D87->n,
+U+1DAE->n, U+1DAF->n, U+1DB0->n, U+1E44->n, U+1E45->n, U+1E46->n, U+1E47->n, U+1E48->n, U+1E49->n, U+1E4A->n, U+1E4B->n, U+207F->n
+
+# O
+U+00D2->o, U+00D3->o, U+00D4->o, U+00D5->o, U+00D6->o, U+00D8->o, U+00F2->o, U+00F3->o, U+00F4->o, U+00F5->o, U+00F6->o, U+00F8->o,
+U+01030F->o, U+014C->o, U+014D->o, U+014E->o, U+014F->o, U+0150->o, U+0151->o, U+0186->o, U+019F->o, U+01A0->o, U+01A1->o, U+01D1->o,
+U+01D2->o, U+01EA->o, U+01EB->o, U+01EC->o, U+01ED->o, U+01FE->o, U+01FF->o, U+020C->o, U+020D->o, U+020E->o, U+020F->o, U+022A->o,
+U+022B->o, U+022C->o, U+022D->o, U+022E->o, U+022F->o, U+0230->o, U+0231->o, U+0254->o, U+0275->o, U+043E->o, U+04E6->o, U+04E7->o,
+U+04E8->o, U+04E9->o, U+04EA->o, U+04EB->o, U+1D0F->o, U+1D10->o, U+1D11->o, U+1D12->o, U+1D13->o, U+1D16->o, U+1D17->o, U+1D3C->o,
+U+1D52->o, U+1D53->o, U+1D54->o, U+1D55->o, U+1D97->o, U+1DB1->o, U+1E4C->o, U+1E4D->o, U+1E4E->o, U+1E4F->o, U+1E50->o, U+1E51->o,
+U+1E52->o, U+1E53->o, U+1ECC->o, U+1ECD->o, U+1ECE->o, U+1ECF->o, U+1ED0->o, U+1ED1->o, U+1ED2->o, U+1ED3->o, U+1ED4->o, U+1ED5->o,
+U+1ED6->o, U+1ED7->o, U+1ED8->o, U+1ED9->o, U+1EDA->o, U+1EDB->o, U+1EDC->o, U+1EDD->o, U+1EDE->o, U+1EDF->o, U+1EE0->o, U+1EE1->o,
+U+1EE2->o, U+1EE3->o, U+2092->o, U+2C9E->o, U+2C9F->o
+
+# P
+U+01A4->p, U+01A5->p, U+1D18->p, U+1D3E->p, U+1D56->p, U+1D71->p, U+1D7D->p, U+1D88->p, U+1E54->p, U+1E55->p, U+1E56->p, U+1E57->p,
+U+2C63->p
+
+# Q
+U+024A->q, U+024B->q, U+02A0->q
+
+# R
+U+0154->r, U+0155->r, U+0156->r, U+0157->r, U+0158->r, U+0159->r, U+0210->r, U+0211->r, U+0212->r, U+0213->r, U+024C->r, U+024D->r,
+U+0279->r, U+027A->r, U+027B->r, U+027C->r, U+027D->r, U+027E->r, U+027F->r, U+0280->r, U+0281->r, U+02B3->r, U+02B4->r, U+02B5->r,
+U+02B6->r, U+1D19->r, U+1D1A->r, U+1D3F->r, U+1D63->r, U+1D72->r, U+1D73->r, U+1D89->r, U+1DCA->r, U+1E58->r, U+1E59->r, U+1E5A->r,
+U+1E5B->r, U+1E5C->r, U+1E5D->r, U+1E5E->r, U+1E5F->r, U+211C->r, U+2C64->r
+
+# S
+U+00DF->s, U+015A->s, U+015B->s, U+015C->s, U+015D->s, U+015E->s, U+015F->s, U+0160->s, U+0161->s, U+017F->s, U+0218->s, U+0219->s,
+U+023F->s, U+0282->s, U+02E2->s, U+1D74->s, U+1D8A->s, U+1DB3->s, U+1E60->s, U+1E61->s, U+1E62->s, U+1E63->s, U+1E64->s, U+1E65->s,
+U+1E66->s, U+1E67->s, U+1E68->s, U+1E69->s, U+1E9B->s
+
+# T
+U+0162->t, U+0163->t, U+0164->t, U+0165->t, U+0166->t, U+0167->t, U+01AB->t, U+01AC->t, U+01AD->t, U+01AE->t, U+021A->t, U+021B->t,
+U+0236->t, U+023E->t, U+0287->t, U+0288->t, U+1D1B->t, U+1D40->t, U+1D57->t, U+1D75->t, U+1DB5->t, U+1E6A->t, U+1E6B->t, U+1E6C->t,
+U+1E6D->t, U+1E6E->t, U+1E6F->t, U+1E70->t, U+1E71->t, U+1E97->t, U+2C66->t
+
+# U
+U+00D9->u, U+00DA->u, U+00DB->u, U+00DC->u, U+00F9->u, U+00FA->u, U+00FB->u, U+00FC->u, U+010316->u, U+0168->u, U+0169->u, U+016A->u,
+U+016B->u, U+016C->u, U+016D->u, U+016E->u, U+016F->u, U+0170->u, U+0171->u, U+0172->u, U+0173->u, U+01AF->u, U+01B0->u, U+01D3->u,
+U+01D4->u, U+01D5->u, U+01D6->u, U+01D7->u, U+01D8->u, U+01D9->u, U+01DA->u, U+01DB->u, U+01DC->u, U+0214->u, U+0215->u, U+0216->u,
+U+0217->u, U+0244->u, U+0289->u, U+1D1C->u, U+1D1D->u, U+1D1E->u, U+1D41->u, U+1D58->u, U+1D59->u, U+1D64->u, U+1D7E->u, U+1D99->u,
+U+1DB6->u, U+1DB8->u, U+1E72->u, U+1E73->u, U+1E74->u, U+1E75->u, U+1E76->u, U+1E77->u, U+1E78->u, U+1E79->u, U+1E7A->u, U+1E7B->u,
+U+1EE4->u, U+1EE5->u, U+1EE6->u, U+1EE7->u, U+1EE8->u, U+1EE9->u, U+1EEA->u, U+1EEB->u, U+1EEC->u, U+1EED->u, U+1EEE->u, U+1EEF->u,
+U+1EF0->u, U+1EF1->u
+
+# V
+U+01B2->v, U+0245->v, U+028B->v, U+028C->v, U+1D20->v, U+1D5B->v, U+1D65->v, U+1D8C->v, U+1DB9->v, U+1DBA->v, U+1E7C->v, U+1E7D->v,
+U+1E7E->v, U+1E7F->v, U+2C74->v
+
+# W
+U+0174->w, U+0175->w, U+028D->w, U+02B7->w, U+1D21->w, U+1D42->w, U+1E80->w, U+1E81->w, U+1E82->w, U+1E83->w, U+1E84->w, U+1E85->w,
+U+1E86->w, U+1E87->w, U+1E88->w, U+1E89->w, U+1E98->w
+
+# X
+U+02E3->x, U+1D8D->x, U+1E8A->x, U+1E8B->x, U+1E8C->x, U+1E8D->x, U+2093->x
+
+# Y
+U+00DD->y, U+00FD->y, U+00FF->y, U+0176->y, U+0177->y, U+0178->y, U+01B3->y, U+01B4->y, U+0232->y, U+0233->y, U+024E->y, U+024F->y,
+U+028E->y, U+028F->y, U+02B8->y, U+1E8E->y, U+1E8F->y, U+1E99->y, U+1EF2->y, U+1EF3->y, U+1EF4->y, U+1EF5->y, U+1EF6->y, U+1EF7->y,
+U+1EF8->y, U+1EF9->y
+
+# Z
+U+0179->z, U+017A->z, U+017B->z, U+017C->z, U+017D->z, U+017E->z, U+01B5->z, U+01B6->z, U+0224->z, U+0225->z, U+0240->z, U+0290->z,
+U+0291->z, U+1D22->z, U+1D76->z, U+1D8E->z, U+1DBB->z, U+1DBC->z, U+1DBD->z, U+1E90->z, U+1E91->z, U+1E92->z, U+1E93->z, U+1E94->z,
+U+1E95->z, U+2128->z, U+2C6B->z, U+2C6C->z
+
+# Latin Extras:
+U+00C6->U+00E6, U+01E2->U+00E6, U+01E3->U+00E6, U+01FC->U+00E6, U+01FD->U+00E6, U+1D01->U+00E6, U+1D02->U+00E6, U+1D2D->U+00E6,
+U+1D46->U+00E6, U+00E6
+
+##################################################
+# Arabic
+U+0622->U+0627, U+0623->U+0627, U+0624->U+0648, U+0625->U+0627, U+0626->U+064A, U+06C0->U+06D5, U+06C2->U+06C1, U+06D3->U+06D2,
+U+FB50->U+0671, U+FB51->U+0671, U+FB52->U+067B, U+FB53->U+067B, U+FB54->U+067B, U+FB56->U+067E, U+FB57->U+067E, U+FB58->U+067E,
+U+FB5A->U+0680, U+FB5B->U+0680, U+FB5C->U+0680, U+FB5E->U+067A, U+FB5F->U+067A, U+FB60->U+067A, U+FB62->U+067F, U+FB63->U+067F,
+U+FB64->U+067F, U+FB66->U+0679, U+FB67->U+0679, U+FB68->U+0679, U+FB6A->U+06A4, U+FB6B->U+06A4, U+FB6C->U+06A4, U+FB6E->U+06A6,
+U+FB6F->U+06A6, U+FB70->U+06A6, U+FB72->U+0684, U+FB73->U+0684, U+FB74->U+0684, U+FB76->U+0683, U+FB77->U+0683, U+FB78->U+0683,
+U+FB7A->U+0686, U+FB7B->U+0686, U+FB7C->U+0686, U+FB7E->U+0687, U+FB7F->U+0687, U+FB80->U+0687, U+FB82->U+068D, U+FB83->U+068D,
+U+FB84->U+068C, U+FB85->U+068C, U+FB86->U+068E, U+FB87->U+068E, U+FB88->U+0688, U+FB89->U+0688, U+FB8A->U+0698, U+FB8B->U+0698,
+U+FB8C->U+0691, U+FB8D->U+0691, U+FB8E->U+06A9, U+FB8F->U+06A9, U+FB90->U+06A9, U+FB92->U+06AF, U+FB93->U+06AF, U+FB94->U+06AF,
+U+FB96->U+06B3, U+FB97->U+06B3, U+FB98->U+06B3, U+FB9A->U+06B1, U+FB9B->U+06B1, U+FB9C->U+06B1, U+FB9E->U+06BA, U+FB9F->U+06BA,
+U+FBA0->U+06BB, U+FBA1->U+06BB, U+FBA2->U+06BB, U+FBA4->U+06C0, U+FBA5->U+06C0, U+FBA6->U+06C1, U+FBA7->U+06C1, U+FBA8->U+06C1,
+U+FBAA->U+06BE, U+FBAB->U+06BE, U+FBAC->U+06BE, U+FBAE->U+06D2, U+FBAF->U+06D2, U+FBB0->U+06D3, U+FBB1->U+06D3, U+FBD3->U+06AD,
+U+FBD4->U+06AD, U+FBD5->U+06AD, U+FBD7->U+06C7, U+FBD8->U+06C7, U+FBD9->U+06C6, U+FBDA->U+06C6, U+FBDB->U+06C8, U+FBDC->U+06C8,
+U+FBDD->U+0677, U+FBDE->U+06CB, U+FBDF->U+06CB, U+FBE0->U+06C5, U+FBE1->U+06C5, U+FBE2->U+06C9, U+FBE3->U+06C9, U+FBE4->U+06D0,
+U+FBE5->U+06D0, U+FBE6->U+06D0, U+FBE8->U+0649, U+FBFC->U+06CC, U+FBFD->U+06CC, U+FBFE->U+06CC, U+0621, U+0627..U+063A, U+0641..U+064A,
+U+0660..U+0669, U+066E, U+066F, U+0671..U+06BF, U+06C1, U+06C3..U+06D2, U+06D5, U+06EE..U+06FC, U+06FF, U+0750..U+076D, U+FB55, U+FB59,
+U+FB5D, U+FB61, U+FB65, U+FB69, U+FB6D, U+FB71, U+FB75, U+FB79, U+FB7D, U+FB81, U+FB91, U+FB95, U+FB99, U+FB9D, U+FBA3, U+FBA9, U+FBAD,
+U+FBD6, U+FBE7, U+FBE9, U+FBFF
+
+##################################################
+# Armenian
+U+0531..U+0556->U+0561..U+0586, U+0561..U+0586, U+0587
+
+#################################################
+# Bengali
+U+09DC->U+09A1, U+09DD->U+09A2, U+09DF->U+09AF, U+09F0->U+09AC, U+09F1->U+09AC, U+0985..U+0990, U+0993..U+09B0, U+09B2, U+09B6..U+09B9,
+U+09CE, U+09E0, U+09E1, U+09E6..U+09EF
+
+#################################################
+# CJK*
+U+F900->U+8C48, U+F901->U+66F4, U+F902->U+8ECA, U+F903->U+8CC8, U+F904->U+6ED1, U+F905->U+4E32, U+F906->U+53E5, U+F907->U+9F9C,
+U+F908->U+9F9C, U+F909->U+5951, U+F90A->U+91D1, U+F90B->U+5587, U+F90C->U+5948, U+F90D->U+61F6, U+F90E->U+7669, U+F90F->U+7F85,
+U+F910->U+863F, U+F911->U+87BA, U+F912->U+88F8, U+F913->U+908F, U+F914->U+6A02, U+F915->U+6D1B, U+F916->U+70D9, U+F917->U+73DE,
+U+F918->U+843D, U+F919->U+916A, U+F91A->U+99F1, U+F91B->U+4E82, U+F91C->U+5375, U+F91D->U+6B04, U+F91E->U+721B, U+F91F->U+862D,
+U+F920->U+9E1E, U+F921->U+5D50, U+F922->U+6FEB, U+F923->U+85CD, U+F924->U+8964, U+F925->U+62C9, U+F926->U+81D8, U+F927->U+881F,
+U+F928->U+5ECA, U+F929->U+6717, U+F92A->U+6D6A, U+F92B->U+72FC, U+F92C->U+90CE, U+F92D->U+4F86, U+F92E->U+51B7, U+F92F->U+52DE,
+U+F930->U+64C4, U+F931->U+6AD3, U+F932->U+7210, U+F933->U+76E7, U+F934->U+8001, U+F935->U+8606, U+F936->U+865C, U+F937->U+8DEF,
+U+F938->U+9732, U+F939->U+9B6F, U+F93A->U+9DFA, U+F93B->U+788C, U+F93C->U+797F, U+F93D->U+7DA0, U+F93E->U+83C9, U+F93F->U+9304,
+U+F940->U+9E7F, U+F941->U+8AD6, U+F942->U+58DF, U+F943->U+5F04, U+F944->U+7C60, U+F945->U+807E, U+F946->U+7262, U+F947->U+78CA,
+U+F948->U+8CC2, U+F949->U+96F7, U+F94A->U+58D8, U+F94B->U+5C62, U+F94C->U+6A13, U+F94D->U+6DDA, U+F94E->U+6F0F, U+F94F->U+7D2F,
+U+F950->U+7E37, U+F951->U+964B, U+F952->U+52D2, U+F953->U+808B, U+F954->U+51DC, U+F955->U+51CC, U+F956->U+7A1C, U+F957->U+7DBE,
+U+F958->U+83F1, U+F959->U+9675, U+F95A->U+8B80, U+F95B->U+62CF, U+F95C->U+6A02, U+F95D->U+8AFE, U+F95E->U+4E39, U+F95F->U+5BE7,
+U+F960->U+6012, U+F961->U+7387, U+F962->U+7570, U+F963->U+5317, U+F964->U+78FB, U+F965->U+4FBF, U+F966->U+5FA9, U+F967->U+4E0D,
+U+F968->U+6CCC, U+F969->U+6578, U+F96A->U+7D22, U+F96B->U+53C3, U+F96C->U+585E, U+F96D->U+7701, U+F96E->U+8449, U+F96F->U+8AAA,
+U+F970->U+6BBA, U+F971->U+8FB0, U+F972->U+6C88, U+F973->U+62FE, U+F974->U+82E5, U+F975->U+63A0, U+F976->U+7565, U+F977->U+4EAE,
+U+F978->U+5169, U+F979->U+51C9, U+F97A->U+6881, U+F97B->U+7CE7, U+F97C->U+826F, U+F97D->U+8AD2, U+F97E->U+91CF, U+F97F->U+52F5,
+U+F980->U+5442, U+F981->U+5973, U+F982->U+5EEC, U+F983->U+65C5, U+F984->U+6FFE, U+F985->U+792A, U+F986->U+95AD, U+F987->U+9A6A,
+U+F988->U+9E97, U+F989->U+9ECE, U+F98A->U+529B, U+F98B->U+66C6, U+F98C->U+6B77, U+F98D->U+8F62, U+F98E->U+5E74, U+F98F->U+6190,
+U+F990->U+6200, U+F991->U+649A, U+F992->U+6F23, U+F993->U+7149, U+F994->U+7489, U+F995->U+79CA, U+F996->U+7DF4, U+F997->U+806F,
+U+F998->U+8F26, U+F999->U+84EE, U+F99A->U+9023, U+F99B->U+934A, U+F99C->U+5217, U+F99D->U+52A3, U+F99E->U+54BD, U+F99F->U+70C8,
+U+F9A0->U+88C2, U+F9A1->U+8AAA, U+F9A2->U+5EC9, U+F9A3->U+5FF5, U+F9A4->U+637B, U+F9A5->U+6BAE, U+F9A6->U+7C3E, U+F9A7->U+7375,
+U+F9A8->U+4EE4, U+F9A9->U+56F9, U+F9AA->U+5BE7, U+F9AB->U+5DBA, U+F9AC->U+601C, U+F9AD->U+73B2, U+F9AE->U+7469, U+F9AF->U+7F9A,
+U+F9B0->U+8046, U+F9B1->U+9234, U+F9B2->U+96F6, U+F9B3->U+9748, U+F9B4->U+9818, U+F9B5->U+4F8B, U+F9B6->U+79AE, U+F9B7->U+91B4,
+U+F9B8->U+96B8, U+F9B9->U+60E1, U+F9BA->U+4E86, U+F9BB->U+50DA, U+F9BC->U+5BEE, U+F9BD->U+5C3F, U+F9BE->U+6599, U+F9BF->U+6A02,
+U+F9C0->U+71CE, U+F9C1->U+7642, U+F9C2->U+84FC, U+F9C3->U+907C, U+F9C4->U+9F8D, U+F9C5->U+6688, U+F9C6->U+962E, U+F9C7->U+5289,
+U+F9C8->U+677B, U+F9C9->U+67F3, U+F9CA->U+6D41, U+F9CB->U+6E9C, U+F9CC->U+7409, U+F9CD->U+7559, U+F9CE->U+786B, U+F9CF->U+7D10,
+U+F9D0->U+985E, U+F9D1->U+516D, U+F9D2->U+622E, U+F9D3->U+9678, U+F9D4->U+502B, U+F9D5->U+5D19, U+F9D6->U+6DEA, U+F9D7->U+8F2A,
+U+F9D8->U+5F8B, U+F9D9->U+6144, U+F9DA->U+6817, U+F9DB->U+7387, U+F9DC->U+9686, U+F9DD->U+5229, U+F9DE->U+540F, U+F9DF->U+5C65,
+U+F9E0->U+6613, U+F9E1->U+674E, U+F9E2->U+68A8, U+F9E3->U+6CE5, U+F9E4->U+7406, U+F9E5->U+75E2, U+F9E6->U+7F79, U+F9E7->U+88CF,
+U+F9E8->U+88E1, U+F9E9->U+91CC, U+F9EA->U+96E2, U+F9EB->U+533F, U+F9EC->U+6EBA, U+F9ED->U+541D, U+F9EE->U+71D0, U+F9EF->U+7498,
+U+F9F0->U+85FA, U+F9F1->U+96A3, U+F9F2->U+9C57, U+F9F3->U+9E9F, U+F9F4->U+6797, U+F9F5->U+6DCB, U+F9F6->U+81E8, U+F9F7->U+7ACB,
+U+F9F8->U+7B20, U+F9F9->U+7C92, U+F9FA->U+72C0, U+F9FB->U+7099, U+F9FC->U+8B58, U+F9FD->U+4EC0, U+F9FE->U+8336, U+F9FF->U+523A,
+U+FA00->U+5207, U+FA01->U+5EA6, U+FA02->U+62D3, U+FA03->U+7CD6, U+FA04->U+5B85, U+FA05->U+6D1E, U+FA06->U+66B4, U+FA07->U+8F3B,
+U+FA08->U+884C, U+FA09->U+964D, U+FA0A->U+898B, U+FA0B->U+5ED3, U+FA0C->U+5140, U+FA0D->U+55C0, U+FA10->U+585A, U+FA12->U+6674,
+U+FA15->U+51DE, U+FA16->U+732A, U+FA17->U+76CA, U+FA18->U+793C, U+FA19->U+795E, U+FA1A->U+7965, U+FA1B->U+798F, U+FA1C->U+9756,
+U+FA1D->U+7CBE, U+FA1E->U+7FBD, U+FA20->U+8612, U+FA22->U+8AF8, U+FA25->U+9038, U+FA26->U+90FD, U+FA2A->U+98EF, U+FA2B->U+98FC,
+U+FA2C->U+9928, U+FA2D->U+9DB4, U+FA30->U+4FAE, U+FA31->U+50E7, U+FA32->U+514D, U+FA33->U+52C9, U+FA34->U+52E4, U+FA35->U+5351,
+U+FA36->U+559D, U+FA37->U+5606, U+FA38->U+5668, U+FA39->U+5840, U+FA3A->U+58A8, U+FA3B->U+5C64, U+FA3C->U+5C6E, U+FA3D->U+6094,
+U+FA3E->U+6168, U+FA3F->U+618E, U+FA40->U+61F2, U+FA41->U+654F, U+FA42->U+65E2, U+FA43->U+6691, U+FA44->U+6885, U+FA45->U+6D77,
+U+FA46->U+6E1A, U+FA47->U+6F22, U+FA48->U+716E, U+FA49->U+722B, U+FA4A->U+7422, U+FA4B->U+7891, U+FA4C->U+793E, U+FA4D->U+7949,
+U+FA4E->U+7948, U+FA4F->U+7950, U+FA50->U+7956, U+FA51->U+795D, U+FA52->U+798D, U+FA53->U+798E, U+FA54->U+7A40, U+FA55->U+7A81,
+U+FA56->U+7BC0, U+FA57->U+7DF4, U+FA58->U+7E09, U+FA59->U+7E41, U+FA5A->U+7F72, U+FA5B->U+8005, U+FA5C->U+81ED, U+FA5D->U+8279,
+U+FA5E->U+8279, U+FA5F->U+8457, U+FA60->U+8910, U+FA61->U+8996, U+FA62->U+8B01, U+FA63->U+8B39, U+FA64->U+8CD3, U+FA65->U+8D08,
+U+FA66->U+8FB6, U+FA67->U+9038, U+FA68->U+96E3, U+FA69->U+97FF, U+FA6A->U+983B, U+FA70->U+4E26, U+FA71->U+51B5, U+FA72->U+5168,
+U+FA73->U+4F80, U+FA74->U+5145, U+FA75->U+5180, U+FA76->U+52C7, U+FA77->U+52FA, U+FA78->U+559D, U+FA79->U+5555, U+FA7A->U+5599,
+U+FA7B->U+55E2, U+FA7C->U+585A, U+FA7D->U+58B3, U+FA7E->U+5944, U+FA7F->U+5954, U+FA80->U+5A62, U+FA81->U+5B28, U+FA82->U+5ED2,
+U+FA83->U+5ED9, U+FA84->U+5F69, U+FA85->U+5FAD, U+FA86->U+60D8, U+FA87->U+614E, U+FA88->U+6108, U+FA89->U+618E, U+FA8A->U+6160,
+U+FA8B->U+61F2, U+FA8C->U+6234, U+FA8D->U+63C4, U+FA8E->U+641C, U+FA8F->U+6452, U+FA90->U+6556, U+FA91->U+6674, U+FA92->U+6717,
+U+FA93->U+671B, U+FA94->U+6756, U+FA95->U+6B79, U+FA96->U+6BBA, U+FA97->U+6D41, U+FA98->U+6EDB, U+FA99->U+6ECB, U+FA9A->U+6F22,
+U+FA9B->U+701E, U+FA9C->U+716E, U+FA9D->U+77A7, U+FA9E->U+7235, U+FA9F->U+72AF, U+FAA0->U+732A, U+FAA1->U+7471, U+FAA2->U+7506,
+U+FAA3->U+753B, U+FAA4->U+761D, U+FAA5->U+761F, U+FAA6->U+76CA, U+FAA7->U+76DB, U+FAA8->U+76F4, U+FAA9->U+774A, U+FAAA->U+7740,
+U+FAAB->U+78CC, U+FAAC->U+7AB1, U+FAAD->U+7BC0, U+FAAE->U+7C7B, U+FAAF->U+7D5B, U+FAB0->U+7DF4, U+FAB1->U+7F3E, U+FAB2->U+8005,
+U+FAB3->U+8352, U+FAB4->U+83EF, U+FAB5->U+8779, U+FAB6->U+8941, U+FAB7->U+8986, U+FAB8->U+8996, U+FAB9->U+8ABF, U+FABA->U+8AF8,
+U+FABB->U+8ACB, U+FABC->U+8B01, U+FABD->U+8AFE, U+FABE->U+8AED, U+FABF->U+8B39, U+FAC0->U+8B8A, U+FAC1->U+8D08, U+FAC2->U+8F38,
+U+FAC3->U+9072, U+FAC4->U+9199, U+FAC5->U+9276, U+FAC6->U+967C, U+FAC7->U+96E3, U+FAC8->U+9756, U+FAC9->U+97DB, U+FACA->U+97FF,
+U+FACB->U+980B, U+FACC->U+983B, U+FACD->U+9B12, U+FACE->U+9F9C, U+FACF->U+2284A, U+FAD0->U+22844, U+FAD1->U+233D5, U+FAD2->U+3B9D,
+U+FAD3->U+4018, U+FAD4->U+4039, U+FAD5->U+25249, U+FAD6->U+25CD0, U+FAD7->U+27ED3, U+FAD8->U+9F43, U+FAD9->U+9F8E, U+2F800->U+4E3D,
+U+2F801->U+4E38, U+2F802->U+4E41, U+2F803->U+20122, U+2F804->U+4F60, U+2F805->U+4FAE, U+2F806->U+4FBB, U+2F807->U+5002, U+2F808->U+507A,
+U+2F809->U+5099, U+2F80A->U+50E7, U+2F80B->U+50CF, U+2F80C->U+349E, U+2F80D->U+2063A, U+2F80E->U+514D, U+2F80F->U+5154, U+2F810->U+5164,
+U+2F811->U+5177, U+2F812->U+2051C, U+2F813->U+34B9, U+2F814->U+5167, U+2F815->U+518D, U+2F816->U+2054B, U+2F817->U+5197,
+U+2F818->U+51A4, U+2F819->U+4ECC, U+2F81A->U+51AC, U+2F81B->U+51B5, U+2F81C->U+291DF, U+2F81D->U+51F5, U+2F81E->U+5203,
+U+2F81F->U+34DF, U+2F820->U+523B, U+2F821->U+5246, U+2F822->U+5272, U+2F823->U+5277, U+2F824->U+3515, U+2F825->U+52C7,
+U+2F826->U+52C9, U+2F827->U+52E4, U+2F828->U+52FA, U+2F829->U+5305, U+2F82A->U+5306, U+2F82B->U+5317, U+2F82C->U+5349,
+U+2F82D->U+5351, U+2F82E->U+535A, U+2F82F->U+5373, U+2F830->U+537D, U+2F831->U+537F, U+2F832->U+537F, U+2F833->U+537F,
+U+2F834->U+20A2C, U+2F835->U+7070, U+2F836->U+53CA, U+2F837->U+53DF, U+2F838->U+20B63, U+2F839->U+53EB, U+2F83A->U+53F1,
+U+2F83B->U+5406, U+2F83C->U+549E, U+2F83D->U+5438, U+2F83E->U+5448, U+2F83F->U+5468, U+2F840->U+54A2, U+2F841->U+54F6,
+U+2F842->U+5510, U+2F843->U+5553, U+2F844->U+5563, U+2F845->U+5584, U+2F846->U+5584, U+2F847->U+5599, U+2F848->U+55AB,
+U+2F849->U+55B3, U+2F84A->U+55C2, U+2F84B->U+5716, U+2F84C->U+5606, U+2F84D->U+5717, U+2F84E->U+5651, U+2F84F->U+5674,
+U+2F850->U+5207, U+2F851->U+58EE, U+2F852->U+57CE, U+2F853->U+57F4, U+2F854->U+580D, U+2F855->U+578B, U+2F856->U+5832,
+U+2F857->U+5831, U+2F858->U+58AC, U+2F859->U+214E4, U+2F85A->U+58F2, U+2F85B->U+58F7, U+2F85C->U+5906, U+2F85D->U+591A,
+U+2F85E->U+5922, U+2F85F->U+5962, U+2F860->U+216A8, U+2F861->U+216EA, U+2F862->U+59EC, U+2F863->U+5A1B, U+2F864->U+5A27,
+U+2F865->U+59D8, U+2F866->U+5A66, U+2F867->U+36EE, U+2F868->U+36FC, U+2F869->U+5B08, U+2F86A->U+5B3E, U+2F86B->U+5B3E,
+U+2F86C->U+219C8, U+2F86D->U+5BC3, U+2F86E->U+5BD8, U+2F86F->U+5BE7, U+2F870->U+5BF3, U+2F871->U+21B18, U+2F872->U+5BFF,
+U+2F873->U+5C06, U+2F874->U+5F53, U+2F875->U+5C22, U+2F876->U+3781, U+2F877->U+5C60, U+2F878->U+5C6E, U+2F879->U+5CC0,
+U+2F87A->U+5C8D, U+2F87B->U+21DE4, U+2F87C->U+5D43, U+2F87D->U+21DE6, U+2F87E->U+5D6E, U+2F87F->U+5D6B, U+2F880->U+5D7C,
+U+2F881->U+5DE1, U+2F882->U+5DE2, U+2F883->U+382F, U+2F884->U+5DFD, U+2F885->U+5E28, U+2F886->U+5E3D, U+2F887->U+5E69,
+U+2F888->U+3862, U+2F889->U+22183, U+2F88A->U+387C, U+2F88B->U+5EB0, U+2F88C->U+5EB3, U+2F88D->U+5EB6, U+2F88E->U+5ECA,
+U+2F88F->U+2A392, U+2F890->U+5EFE, U+2F891->U+22331, U+2F892->U+22331, U+2F893->U+8201, U+2F894->U+5F22, U+2F895->U+5F22,
+U+2F896->U+38C7, U+2F897->U+232B8, U+2F898->U+261DA, U+2F899->U+5F62, U+2F89A->U+5F6B, U+2F89B->U+38E3, U+2F89C->U+5F9A,
+U+2F89D->U+5FCD, U+2F89E->U+5FD7, U+2F89F->U+5FF9, U+2F8A0->U+6081, U+2F8A1->U+393A, U+2F8A2->U+391C, U+2F8A3->U+6094,
+U+2F8A4->U+226D4, U+2F8A5->U+60C7, U+2F8A6->U+6148, U+2F8A7->U+614C, U+2F8A8->U+614E, U+2F8A9->U+614C, U+2F8AA->U+617A,
+U+2F8AB->U+618E, U+2F8AC->U+61B2, U+2F8AD->U+61A4, U+2F8AE->U+61AF, U+2F8AF->U+61DE, U+2F8B0->U+61F2, U+2F8B1->U+61F6,
+U+2F8B2->U+6210, U+2F8B3->U+621B, U+2F8B4->U+625D, U+2F8B5->U+62B1, U+2F8B6->U+62D4, U+2F8B7->U+6350, U+2F8B8->U+22B0C,
+U+2F8B9->U+633D, U+2F8BA->U+62FC, U+2F8BB->U+6368, U+2F8BC->U+6383, U+2F8BD->U+63E4, U+2F8BE->U+22BF1, U+2F8BF->U+6422,
+U+2F8C0->U+63C5, U+2F8C1->U+63A9, U+2F8C2->U+3A2E, U+2F8C3->U+6469, U+2F8C4->U+647E, U+2F8C5->U+649D, U+2F8C6->U+6477,
+U+2F8C7->U+3A6C, U+2F8C8->U+654F, U+2F8C9->U+656C, U+2F8CA->U+2300A, U+2F8CB->U+65E3, U+2F8CC->U+66F8, U+2F8CD->U+6649,
+U+2F8CE->U+3B19, U+2F8CF->U+6691, U+2F8D0->U+3B08, U+2F8D1->U+3AE4, U+2F8D2->U+5192, U+2F8D3->U+5195, U+2F8D4->U+6700,
+U+2F8D5->U+669C, U+2F8D6->U+80AD, U+2F8D7->U+43D9, U+2F8D8->U+6717, U+2F8D9->U+671B, U+2F8DA->U+6721, U+2F8DB->U+675E,
+U+2F8DC->U+6753, U+2F8DD->U+233C3, U+2F8DE->U+3B49, U+2F8DF->U+67FA, U+2F8E0->U+6785, U+2F8E1->U+6852, U+2F8E2->U+6885,
+U+2F8E3->U+2346D, U+2F8E4->U+688E, U+2F8E5->U+681F, U+2F8E6->U+6914, U+2F8E7->U+3B9D, U+2F8E8->U+6942, U+2F8E9->U+69A3,
+U+2F8EA->U+69EA, U+2F8EB->U+6AA8, U+2F8EC->U+236A3, U+2F8ED->U+6ADB, U+2F8EE->U+3C18, U+2F8EF->U+6B21, U+2F8F0->U+238A7,
+U+2F8F1->U+6B54, U+2F8F2->U+3C4E, U+2F8F3->U+6B72, U+2F8F4->U+6B9F, U+2F8F5->U+6BBA, U+2F8F6->U+6BBB, U+2F8F7->U+23A8D,
+U+2F8F8->U+21D0B, U+2F8F9->U+23AFA, U+2F8FA->U+6C4E, U+2F8FB->U+23CBC, U+2F8FC->U+6CBF, U+2F8FD->U+6CCD, U+2F8FE->U+6C67,
+U+2F8FF->U+6D16, U+2F900->U+6D3E, U+2F901->U+6D77, U+2F902->U+6D41, U+2F903->U+6D69, U+2F904->U+6D78, U+2F905->U+6D85,
+U+2F906->U+23D1E, U+2F907->U+6D34, U+2F908->U+6E2F, U+2F909->U+6E6E, U+2F90A->U+3D33, U+2F90B->U+6ECB, U+2F90C->U+6EC7,
+U+2F90D->U+23ED1, U+2F90E->U+6DF9, U+2F90F->U+6F6E, U+2F910->U+23F5E, U+2F911->U+23F8E, U+2F912->U+6FC6, U+2F913->U+7039,
+U+2F914->U+701E, U+2F915->U+701B, U+2F916->U+3D96, U+2F917->U+704A, U+2F918->U+707D, U+2F919->U+7077, U+2F91A->U+70AD,
+U+2F91B->U+20525, U+2F91C->U+7145, U+2F91D->U+24263, U+2F91E->U+719C, U+2F91F->U+243AB, U+2F920->U+7228, U+2F921->U+7235,
+U+2F922->U+7250, U+2F923->U+24608, U+2F924->U+7280, U+2F925->U+7295, U+2F926->U+24735, U+2F927->U+24814, U+2F928->U+737A,
+U+2F929->U+738B, U+2F92A->U+3EAC, U+2F92B->U+73A5, U+2F92C->U+3EB8, U+2F92D->U+3EB8, U+2F92E->U+7447, U+2F92F->U+745C,
+U+2F930->U+7471, U+2F931->U+7485, U+2F932->U+74CA, U+2F933->U+3F1B, U+2F934->U+7524, U+2F935->U+24C36, U+2F936->U+753E,
+U+2F937->U+24C92, U+2F938->U+7570, U+2F939->U+2219F, U+2F93A->U+7610, U+2F93B->U+24FA1, U+2F93C->U+24FB8, U+2F93D->U+25044,
+U+2F93E->U+3FFC, U+2F93F->U+4008, U+2F940->U+76F4, U+2F941->U+250F3, U+2F942->U+250F2, U+2F943->U+25119, U+2F944->U+25133,
+U+2F945->U+771E, U+2F946->U+771F, U+2F947->U+771F, U+2F948->U+774A, U+2F949->U+4039, U+2F94A->U+778B, U+2F94B->U+4046,
+U+2F94C->U+4096, U+2F94D->U+2541D, U+2F94E->U+784E, U+2F94F->U+788C, U+2F950->U+78CC, U+2F951->U+40E3, U+2F952->U+25626,
+U+2F953->U+7956, U+2F954->U+2569A, U+2F955->U+256C5, U+2F956->U+798F, U+2F957->U+79EB, U+2F958->U+412F, U+2F959->U+7A40,
+U+2F95A->U+7A4A, U+2F95B->U+7A4F, U+2F95C->U+2597C, U+2F95D->U+25AA7, U+2F95E->U+25AA7, U+2F95F->U+7AEE, U+2F960->U+4202,
+U+2F961->U+25BAB, U+2F962->U+7BC6, U+2F963->U+7BC9, U+2F964->U+4227, U+2F965->U+25C80, U+2F966->U+7CD2, U+2F967->U+42A0,
+U+2F968->U+7CE8, U+2F969->U+7CE3, U+2F96A->U+7D00, U+2F96B->U+25F86, U+2F96C->U+7D63, U+2F96D->U+4301, U+2F96E->U+7DC7,
+U+2F96F->U+7E02, U+2F970->U+7E45, U+2F971->U+4334, U+2F972->U+26228, U+2F973->U+26247, U+2F974->U+4359, U+2F975->U+262D9,
+U+2F976->U+7F7A, U+2F977->U+2633E, U+2F978->U+7F95, U+2F979->U+7FFA, U+2F97A->U+8005, U+2F97B->U+264DA, U+2F97C->U+26523,
+U+2F97D->U+8060, U+2F97E->U+265A8, U+2F97F->U+8070, U+2F980->U+2335F, U+2F981->U+43D5, U+2F982->U+80B2, U+2F983->U+8103,
+U+2F984->U+440B, U+2F985->U+813E, U+2F986->U+5AB5, U+2F987->U+267A7, U+2F988->U+267B5, U+2F989->U+23393, U+2F98A->U+2339C,
+U+2F98B->U+8201, U+2F98C->U+8204, U+2F98D->U+8F9E, U+2F98E->U+446B, U+2F98F->U+8291, U+2F990->U+828B, U+2F991->U+829D,
+U+2F992->U+52B3, U+2F993->U+82B1, U+2F994->U+82B3, U+2F995->U+82BD, U+2F996->U+82E6, U+2F997->U+26B3C, U+2F998->U+82E5,
+U+2F999->U+831D, U+2F99A->U+8363, U+2F99B->U+83AD, U+2F99C->U+8323, U+2F99D->U+83BD, U+2F99E->U+83E7, U+2F99F->U+8457,
+U+2F9A0->U+8353, U+2F9A1->U+83CA, U+2F9A2->U+83CC, U+2F9A3->U+83DC, U+2F9A4->U+26C36, U+2F9A5->U+26D6B, U+2F9A6->U+26CD5,
+U+2F9A7->U+452B, U+2F9A8->U+84F1, U+2F9A9->U+84F3, U+2F9AA->U+8516, U+2F9AB->U+273CA, U+2F9AC->U+8564, U+2F9AD->U+26F2C,
+U+2F9AE->U+455D, U+2F9AF->U+4561, U+2F9B0->U+26FB1, U+2F9B1->U+270D2, U+2F9B2->U+456B, U+2F9B3->U+8650, U+2F9B4->U+865C,
+U+2F9B5->U+8667, U+2F9B6->U+8669, U+2F9B7->U+86A9, U+2F9B8->U+8688, U+2F9B9->U+870E, U+2F9BA->U+86E2, U+2F9BB->U+8779,
+U+2F9BC->U+8728, U+2F9BD->U+876B, U+2F9BE->U+8786, U+2F9BF->U+45D7, U+2F9C0->U+87E1, U+2F9C1->U+8801, U+2F9C2->U+45F9,
+U+2F9C3->U+8860, U+2F9C4->U+8863, U+2F9C5->U+27667, U+2F9C6->U+88D7, U+2F9C7->U+88DE, U+2F9C8->U+4635, U+2F9C9->U+88FA,
+U+2F9CA->U+34BB, U+2F9CB->U+278AE, U+2F9CC->U+27966, U+2F9CD->U+46BE, U+2F9CE->U+46C7, U+2F9CF->U+8AA0, U+2F9D0->U+8AED,
+U+2F9D1->U+8B8A, U+2F9D2->U+8C55, U+2F9D3->U+27CA8, U+2F9D4->U+8CAB, U+2F9D5->U+8CC1, U+2F9D6->U+8D1B, U+2F9D7->U+8D77,
+U+2F9D8->U+27F2F, U+2F9D9->U+20804, U+2F9DA->U+8DCB, U+2F9DB->U+8DBC, U+2F9DC->U+8DF0, U+2F9DD->U+208DE, U+2F9DE->U+8ED4,
+U+2F9DF->U+8F38, U+2F9E0->U+285D2, U+2F9E1->U+285ED, U+2F9E2->U+9094, U+2F9E3->U+90F1, U+2F9E4->U+9111, U+2F9E5->U+2872E,
+U+2F9E6->U+911B, U+2F9E7->U+9238, U+2F9E8->U+92D7, U+2F9E9->U+92D8, U+2F9EA->U+927C, U+2F9EB->U+93F9, U+2F9EC->U+9415,
+U+2F9ED->U+28BFA, U+2F9EE->U+958B, U+2F9EF->U+4995, U+2F9F0->U+95B7, U+2F9F1->U+28D77, U+2F9F2->U+49E6, U+2F9F3->U+96C3,
+U+2F9F4->U+5DB2, U+2F9F5->U+9723, U+2F9F6->U+29145, U+2F9F7->U+2921A, U+2F9F8->U+4A6E, U+2F9F9->U+4A76, U+2F9FA->U+97E0,
+U+2F9FB->U+2940A, U+2F9FC->U+4AB2, U+2F9FD->U+29496, U+2F9FE->U+980B, U+2F9FF->U+980B, U+2FA00->U+9829, U+2FA01->U+295B6,
+U+2FA02->U+98E2, U+2FA03->U+4B33, U+2FA04->U+9929, U+2FA05->U+99A7, U+2FA06->U+99C2, U+2FA07->U+99FE, U+2FA08->U+4BCE,
+U+2FA09->U+29B30, U+2FA0A->U+9B12, U+2FA0B->U+9C40, U+2FA0C->U+9CFD, U+2FA0D->U+4CCE, U+2FA0E->U+4CED, U+2FA0F->U+9D67,
+U+2FA10->U+2A0CE, U+2FA11->U+4CF8, U+2FA12->U+2A105, U+2FA13->U+2A20E, U+2FA14->U+2A291, U+2FA15->U+9EBB, U+2FA16->U+4D56,
+U+2FA17->U+9EF9, U+2FA18->U+9EFE, U+2FA19->U+9F05, U+2FA1A->U+9F0F, U+2FA1B->U+9F16, U+2FA1C->U+9F3B, U+2FA1D->U+2A600,
+U+2F00->U+4E00, U+2F01->U+4E28, U+2F02->U+4E36, U+2F03->U+4E3F, U+2F04->U+4E59, U+2F05->U+4E85, U+2F06->U+4E8C, U+2F07->U+4EA0,
+U+2F08->U+4EBA, U+2F09->U+513F, U+2F0A->U+5165, U+2F0B->U+516B, U+2F0C->U+5182, U+2F0D->U+5196, U+2F0E->U+51AB, U+2F0F->U+51E0,
+U+2F10->U+51F5, U+2F11->U+5200, U+2F12->U+529B, U+2F13->U+52F9, U+2F14->U+5315, U+2F15->U+531A, U+2F16->U+5338, U+2F17->U+5341,
+U+2F18->U+535C, U+2F19->U+5369, U+2F1A->U+5382, U+2F1B->U+53B6, U+2F1C->U+53C8, U+2F1D->U+53E3, U+2F1E->U+56D7, U+2F1F->U+571F,
+U+2F20->U+58EB, U+2F21->U+5902, U+2F22->U+590A, U+2F23->U+5915, U+2F24->U+5927, U+2F25->U+5973, U+2F26->U+5B50, U+2F27->U+5B80,
+U+2F28->U+5BF8, U+2F29->U+5C0F, U+2F2A->U+5C22, U+2F2B->U+5C38, U+2F2C->U+5C6E, U+2F2D->U+5C71, U+2F2E->U+5DDB, U+2F2F->U+5DE5,
+U+2F30->U+5DF1, U+2F31->U+5DFE, U+2F32->U+5E72, U+2F33->U+5E7A, U+2F34->U+5E7F, U+2F35->U+5EF4, U+2F36->U+5EFE, U+2F37->U+5F0B,
+U+2F38->U+5F13, U+2F39->U+5F50, U+2F3A->U+5F61, U+2F3B->U+5F73, U+2F3C->U+5FC3, U+2F3D->U+6208, U+2F3E->U+6236, U+2F3F->U+624B,
+U+2F40->U+652F, U+2F41->U+6534, U+2F42->U+6587, U+2F43->U+6597, U+2F44->U+65A4, U+2F45->U+65B9, U+2F46->U+65E0, U+2F47->U+65E5,
+U+2F48->U+66F0, U+2F49->U+6708, U+2F4A->U+6728, U+2F4B->U+6B20, U+2F4C->U+6B62, U+2F4D->U+6B79, U+2F4E->U+6BB3, U+2F4F->U+6BCB,
+U+2F50->U+6BD4, U+2F51->U+6BDB, U+2F52->U+6C0F, U+2F53->U+6C14, U+2F54->U+6C34, U+2F55->U+706B, U+2F56->U+722A, U+2F57->U+7236,
+U+2F58->U+723B, U+2F59->U+723F, U+2F5A->U+7247, U+2F5B->U+7259, U+2F5C->U+725B, U+2F5D->U+72AC, U+2F5E->U+7384, U+2F5F->U+7389,
+U+2F60->U+74DC, U+2F61->U+74E6, U+2F62->U+7518, U+2F63->U+751F, U+2F64->U+7528, U+2F65->U+7530, U+2F66->U+758B, U+2F67->U+7592,
+U+2F68->U+7676, U+2F69->U+767D, U+2F6A->U+76AE, U+2F6B->U+76BF, U+2F6C->U+76EE, U+2F6D->U+77DB, U+2F6E->U+77E2, U+2F6F->U+77F3,
+U+2F70->U+793A, U+2F71->U+79B8, U+2F72->U+79BE, U+2F73->U+7A74, U+2F74->U+7ACB, U+2F75->U+7AF9, U+2F76->U+7C73, U+2F77->U+7CF8,
+U+2F78->U+7F36, U+2F79->U+7F51, U+2F7A->U+7F8A, U+2F7B->U+7FBD, U+2F7C->U+8001, U+2F7D->U+800C, U+2F7E->U+8012, U+2F7F->U+8033,
+U+2F80->U+807F, U+2F81->U+8089, U+2F82->U+81E3, U+2F83->U+81EA, U+2F84->U+81F3, U+2F85->U+81FC, U+2F86->U+820C, U+2F87->U+821B,
+U+2F88->U+821F, U+2F89->U+826E, U+2F8A->U+8272, U+2F8B->U+8278, U+2F8C->U+864D, U+2F8D->U+866B, U+2F8E->U+8840, U+2F8F->U+884C,
+U+2F90->U+8863, U+2F91->U+897E, U+2F92->U+898B, U+2F93->U+89D2, U+2F94->U+8A00, U+2F95->U+8C37, U+2F96->U+8C46, U+2F97->U+8C55,
+U+2F98->U+8C78, U+2F99->U+8C9D, U+2F9A->U+8D64, U+2F9B->U+8D70, U+2F9C->U+8DB3, U+2F9D->U+8EAB, U+2F9E->U+8ECA, U+2F9F->U+8F9B,
+U+2FA0->U+8FB0, U+2FA1->U+8FB5, U+2FA2->U+9091, U+2FA3->U+9149, U+2FA4->U+91C6, U+2FA5->U+91CC, U+2FA6->U+91D1, U+2FA7->U+9577,
+U+2FA8->U+9580, U+2FA9->U+961C, U+2FAA->U+96B6, U+2FAB->U+96B9, U+2FAC->U+96E8, U+2FAD->U+9751, U+2FAE->U+975E, U+2FAF->U+9762,
+U+2FB0->U+9769, U+2FB1->U+97CB, U+2FB2->U+97ED, U+2FB3->U+97F3, U+2FB4->U+9801, U+2FB5->U+98A8, U+2FB6->U+98DB, U+2FB7->U+98DF,
+U+2FB8->U+9996, U+2FB9->U+9999, U+2FBA->U+99AC, U+2FBB->U+9AA8, U+2FBC->U+9AD8, U+2FBD->U+9ADF, U+2FBE->U+9B25, U+2FBF->U+9B2F,
+U+2FC0->U+9B32, U+2FC1->U+9B3C, U+2FC2->U+9B5A, U+2FC3->U+9CE5, U+2FC4->U+9E75, U+2FC5->U+9E7F, U+2FC6->U+9EA5, U+2FC7->U+9EBB,
+U+2FC8->U+9EC3, U+2FC9->U+9ECD, U+2FCA->U+9ED1, U+2FCB->U+9EF9, U+2FCC->U+9EFD, U+2FCD->U+9F0E, U+2FCE->U+9F13, U+2FCF->U+9F20,
+U+2FD0->U+9F3B, U+2FD1->U+9F4A, U+2FD2->U+9F52, U+2FD3->U+9F8D, U+2FD4->U+9F9C, U+2FD5->U+9FA0, U+3042->U+3041, U+3044->U+3043,
+U+3046->U+3045, U+3048->U+3047, U+304A->U+3049, U+304C->U+304B, U+304E->U+304D, U+3050->U+304F, U+3052->U+3051, U+3054->U+3053,
+U+3056->U+3055, U+3058->U+3057, U+305A->U+3059, U+305C->U+305B, U+305E->U+305D, U+3060->U+305F, U+3062->U+3061, U+3064->U+3063,
+U+3065->U+3063, U+3067->U+3066, U+3069->U+3068, U+3070->U+306F, U+3071->U+306F, U+3073->U+3072, U+3074->U+3072, U+3076->U+3075,
+U+3077->U+3075, U+3079->U+3078, U+307A->U+3078, U+307C->U+307B, U+307D->U+307B, U+3084->U+3083, U+3086->U+3085, U+3088->U+3087,
+U+308F->U+308E, U+3094->U+3046, U+3095->U+304B, U+3096->U+3051, U+30A2->U+30A1, U+30A4->U+30A3, U+30A6->U+30A5, U+30A8->U+30A7,
+U+30AA->U+30A9, U+30AC->U+30AB, U+30AE->U+30AD, U+30B0->U+30AF, U+30B2->U+30B1, U+30B4->U+30B3, U+30B6->U+30B5, U+30B8->U+30B7,
+U+30BA->U+30B9, U+30BC->U+30BB, U+30BE->U+30BD, U+30C0->U+30BF, U+30C2->U+30C1, U+30C5->U+30C4, U+30C7->U+30C6, U+30C9->U+30C8,
+U+30D0->U+30CF, U+30D1->U+30CF, U+30D3->U+30D2, U+30D4->U+30D2, U+30D6->U+30D5, U+30D7->U+30D5, U+30D9->U+30D8, U+30DA->U+30D8,
+U+30DC->U+30DB, U+30DD->U+30DB, U+30E4->U+30E3, U+30E6->U+30E5, U+30E8->U+30E7, U+30EF->U+30EE, U+30F4->U+30A6, U+30AB->U+30F5,
+U+30B1->U+30F6, U+30F7->U+30EF, U+30F8->U+30F0, U+30F9->U+30F1, U+30FA->U+30F2, U+30AF->U+31F0, U+30B7->U+31F1, U+30B9->U+31F2,
+U+30C8->U+31F3, U+30CC->U+31F4, U+30CF->U+31F5, U+30D2->U+31F6, U+30D5->U+31F7, U+30D8->U+31F8, U+30DB->U+31F9, U+30E0->U+31FA,
+U+30E9->U+31FB, U+30EA->U+31FC, U+30EB->U+31FD, U+30EC->U+31FE, U+30ED->U+31FF, U+FF66->U+30F2, U+FF67->U+30A1, U+FF68->U+30A3,
+U+FF69->U+30A5, U+FF6A->U+30A7, U+FF6B->U+30A9, U+FF6C->U+30E3, U+FF6D->U+30E5, U+FF6E->U+30E7, U+FF6F->U+30C3, U+FF71->U+30A1,
+U+FF72->U+30A3, U+FF73->U+30A5, U+FF74->U+30A7, U+FF75->U+30A9, U+FF76->U+30AB, U+FF77->U+30AD, U+FF78->U+30AF, U+FF79->U+30B1,
+U+FF7A->U+30B3, U+FF7B->U+30B5, U+FF7C->U+30B7, U+FF7D->U+30B9, U+FF7E->U+30BB, U+FF7F->U+30BD, U+FF80->U+30BF, U+FF81->U+30C1,
+U+FF82->U+30C3, U+FF83->U+30C6, U+FF84->U+30C8, U+FF85->U+30CA, U+FF86->U+30CB, U+FF87->U+30CC, U+FF88->U+30CD, U+FF89->U+30CE,
+U+FF8A->U+30CF, U+FF8B->U+30D2, U+FF8C->U+30D5, U+FF8D->U+30D8, U+FF8E->U+30DB, U+FF8F->U+30DE, U+FF90->U+30DF, U+FF91->U+30E0,
+U+FF92->U+30E1, U+FF93->U+30E2, U+FF94->U+30E3, U+FF95->U+30E5, U+FF96->U+30E7, U+FF97->U+30E9, U+FF98->U+30EA, U+FF99->U+30EB,
+U+FF9A->U+30EC, U+FF9B->U+30ED, U+FF9C->U+30EF, U+FF9D->U+30F3, U+FFA0->U+3164, U+FFA1->U+3131, U+FFA2->U+3132, U+FFA3->U+3133,
+U+FFA4->U+3134, U+FFA5->U+3135, U+FFA6->U+3136, U+FFA7->U+3137, U+FFA8->U+3138, U+FFA9->U+3139, U+FFAA->U+313A, U+FFAB->U+313B,
+U+FFAC->U+313C, U+FFAD->U+313D, U+FFAE->U+313E, U+FFAF->U+313F, U+FFB0->U+3140, U+FFB1->U+3141, U+FFB2->U+3142, U+FFB3->U+3143,
+U+FFB4->U+3144, U+FFB5->U+3145, U+FFB6->U+3146, U+FFB7->U+3147, U+FFB8->U+3148, U+FFB9->U+3149, U+FFBA->U+314A, U+FFBB->U+314B,
+U+FFBC->U+314C, U+FFBD->U+314D, U+FFBE->U+314E, U+FFC2->U+314F, U+FFC3->U+3150, U+FFC4->U+3151, U+FFC5->U+3152, U+FFC6->U+3153,
+U+FFC7->U+3154, U+FFCA->U+3155, U+FFCB->U+3156, U+FFCC->U+3157, U+FFCD->U+3158, U+FFCE->U+3159, U+FFCF->U+315A, U+FFD2->U+315B,
+U+FFD3->U+315C, U+FFD4->U+315D, U+FFD5->U+315E, U+FFD6->U+315F, U+FFD7->U+3160, U+FFDA->U+3161, U+FFDB->U+3162, U+FFDC->U+3163,
+U+3131->U+1100, U+3132->U+1101, U+3133->U+11AA, U+3134->U+1102, U+3135->U+11AC, U+3136->U+11AD, U+3137->U+1103, U+3138->U+1104,
+U+3139->U+1105, U+313A->U+11B0, U+313B->U+11B1, U+313C->U+11B2, U+313D->U+11B3, U+313E->U+11B4, U+313F->U+11B5, U+3140->U+111A,
+U+3141->U+1106, U+3142->U+1107, U+3143->U+1108, U+3144->U+1121, U+3145->U+1109, U+3146->U+110A, U+3147->U+110B, U+3148->U+110C,
+U+3149->U+110D, U+314A->U+110E, U+314B->U+110F, U+314C->U+1110, U+314D->U+1111, U+314E->U+1112, U+314F->U+1161, U+3150->U+1162,
+U+3151->U+1163, U+3152->U+1164, U+3153->U+1165, U+3154->U+1166, U+3155->U+1167, U+3156->U+1168, U+3157->U+1169, U+3158->U+116A,
+U+3159->U+116B, U+315A->U+116C, U+315B->U+116D, U+315C->U+116E, U+315D->U+116F, U+315E->U+1170, U+315F->U+1171, U+3160->U+1172,
+U+3161->U+1173, U+3162->U+1174, U+3163->U+1175, U+3165->U+1114, U+3166->U+1115, U+3167->U+11C7, U+3168->U+11C8, U+3169->U+11CC,
+U+316A->U+11CE, U+316B->U+11D3, U+316C->U+11D7, U+316D->U+11D9, U+316E->U+111C, U+316F->U+11DD, U+3170->U+11DF, U+3171->U+111D,
+U+3172->U+111E, U+3173->U+1120, U+3174->U+1122, U+3175->U+1123, U+3176->U+1127, U+3177->U+1129, U+3178->U+112B, U+3179->U+112C,
+U+317A->U+112D, U+317B->U+112E, U+317C->U+112F, U+317D->U+1132, U+317E->U+1136, U+317F->U+1140, U+3180->U+1147, U+3181->U+114C,
+U+3182->U+11F1, U+3183->U+11F2, U+3184->U+1157, U+3185->U+1158, U+3186->U+1159, U+3187->U+1184, U+3188->U+1185, U+3189->U+1188,
+U+318A->U+1191, U+318B->U+1192, U+318C->U+1194, U+318D->U+119E, U+318E->U+11A1, U+A490->U+A408, U+A491->U+A1B9, U+4E00..U+9FBB,
+U+3400..U+4DB5, U+20000..U+2A6D6, U+FA0E, U+FA0F, U+FA11, U+FA13, U+FA14, U+FA1F, U+FA21, U+FA23, U+FA24, U+FA27, U+FA28, U+FA29,
+U+3105..U+312C, U+31A0..U+31B7, U+3041, U+3043, U+3045, U+3047, U+3049, U+304B, U+304D, U+304F, U+3051, U+3053, U+3055, U+3057,
+U+3059, U+305B, U+305D, U+305F, U+3061, U+3063, U+3066, U+3068, U+306A..U+306F, U+3072, U+3075, U+3078, U+307B, U+307E..U+3083,
+U+3085, U+3087, U+3089..U+308E, U+3090..U+3093, U+30A1, U+30A3, U+30A5, U+30A7, U+30A9, U+30AD, U+30AF, U+30B3, U+30B5, U+30BB,
+U+30BD, U+30BF, U+30C1, U+30C3, U+30C4, U+30C6, U+30CA, U+30CB, U+30CD, U+30CE, U+30DE, U+30DF, U+30E1, U+30E2, U+30E3, U+30E5,
+U+30E7, U+30EE, U+30F0..U+30F3, U+30F5, U+30F6, U+31F0, U+31F1, U+31F2, U+31F3, U+31F4, U+31F5, U+31F6, U+31F7, U+31F8, U+31F9,
+U+31FA, U+31FB, U+31FC, U+31FD, U+31FE, U+31FF, U+AC00..U+D7A3, U+1100..U+1159, U+1161..U+11A2, U+11A8..U+11F9, U+A000..U+A48C,
+U+A492..U+A4C6
+
+##################################################
+# Coptic
+# Notes: Some shared Greek characters, may require amendments.
+U+2C80->U+2C81, U+2C81, U+2C82->U+2C83, U+2C83, U+2C84->U+2C85, U+2C85, U+2C86->U+2C87, U+2C87, U+2C88->U+2C89, U+2C89, U+2C8A->U+2C8B,
+U+2C8B, U+2C8C->U+2C8D, U+2C8D, U+2C8E->U+2C8F, U+2C8F, U+2C90->U+2C91, U+2C91, U+2C92->U+2C93, U+2C93, U+2C94->U+2C95, U+2C95,
+U+2C96->U+2C97, U+2C97, U+2C98->U+2C99, U+2C99, U+2C9A->U+2C9B, U+2C9B, U+2C9C->U+2C9D, U+2C9D, U+2C9E->U+2C9F, U+2C9F, U+2CA0->U+2CA1,
+U+2CA1, U+2CA2->U+2CA3, U+2CA3, U+2CA4->U+2CA5, U+2CA5, U+2CA6->U+2CA7, U+2CA7, U+2CA8->U+2CA9, U+2CA9, U+2CAA->U+2CAB, U+2CAB,
+U+2CAC->U+2CAD, U+2CAD, U+2CAE->U+2CAF, U+2CAF, U+2CB0->U+2CB1, U+2CB1, U+2CB2->U+2CB3, U+2CB3, U+2CB4->U+2CB5, U+2CB5,
+U+2CB6->U+2CB7, U+2CB7, U+2CB8->U+2CB9, U+2CB9, U+2CBA->U+2CBB, U+2CBB, U+2CBC->U+2CBD, U+2CBD, U+2CBE->U+2CBF, U+2CBF,
+U+2CC0->U+2CC1, U+2CC1, U+2CC2->U+2CC3, U+2CC3, U+2CC4->U+2CC5, U+2CC5, U+2CC6->U+2CC7, U+2CC7, U+2CC8->U+2CC9, U+2CC9,
+U+2CCA->U+2CCB, U+2CCB, U+2CCC->U+2CCD, U+2CCD, U+2CCE->U+2CCF, U+2CCF, U+2CD0->U+2CD1, U+2CD1, U+2CD2->U+2CD3, U+2CD3,
+U+2CD4->U+2CD5, U+2CD5, U+2CD6->U+2CD7, U+2CD7, U+2CD8->U+2CD9, U+2CD9, U+2CDA->U+2CDB, U+2CDB, U+2CDC->U+2CDD, U+2CDD,
+U+2CDE->U+2CDF, U+2CDF, U+2CE0->U+2CE1, U+2CE1, U+2CE2->U+2CE3, U+2CE3
+
+##################################################
+# Cryllic*
+U+0400->U+0435, U+0401->U+0435, U+0402->U+0452, U+0452, U+0403->U+0433, U+0404->U+0454, U+0454, U+0405->U+0455, U+0455,
+U+0406->U+0456, U+0407->U+0456, U+0457->U+0456, U+0456, U+0408..U+040B->U+0458..U+045B, U+0458..U+045B, U+040C->U+043A,
+U+040D->U+0438, U+040E->U+0443, U+040F->U+045F, U+045F, U+0450->U+0435, U+0451->U+0435, U+0453->U+0433, U+045C->U+043A,
+U+045D->U+0438, U+045E->U+0443, U+0460->U+0461, U+0461, U+0462->U+0463, U+0463, U+0464->U+0465, U+0465, U+0466->U+0467,
+U+0467, U+0468->U+0469, U+0469, U+046A->U+046B, U+046B, U+046C->U+046D, U+046D, U+046E->U+046F, U+046F, U+0470->U+0471,
+U+0471, U+0472->U+0473, U+0473, U+0474->U+0475, U+0476->U+0475, U+0477->U+0475, U+0475, U+0478->U+0479, U+0479, U+047A->U+047B,
+U+047B, U+047C->U+047D, U+047D, U+047E->U+047F, U+047F, U+0480->U+0481, U+0481, U+048A->U+0438, U+048B->U+0438, U+048C->U+044C,
+U+048D->U+044C, U+048E->U+0440, U+048F->U+0440, U+0490->U+0433, U+0491->U+0433, U+0490->U+0433, U+0491->U+0433, U+0492->U+0433,
+U+0493->U+0433, U+0494->U+0433, U+0495->U+0433, U+0496->U+0436, U+0497->U+0436, U+0498->U+0437, U+0499->U+0437, U+049A->U+043A,
+U+049B->U+043A, U+049C->U+043A, U+049D->U+043A, U+049E->U+043A, U+049F->U+043A, U+04A0->U+043A, U+04A1->U+043A, U+04A2->U+043D,
+U+04A3->U+043D, U+04A4->U+043D, U+04A5->U+043D, U+04A6->U+043F, U+04A7->U+043F, U+04A8->U+04A9, U+04A9, U+04AA->U+0441,
+U+04AB->U+0441, U+04AC->U+0442, U+04AD->U+0442, U+04AE->U+0443, U+04AF->U+0443, U+04B0->U+0443, U+04B1->U+0443, U+04B2->U+0445,
+U+04B3->U+0445, U+04B4->U+04B5, U+04B5, U+04B6->U+0447, U+04B7->U+0447, U+04B8->U+0447, U+04B9->U+0447, U+04BA->U+04BB, U+04BB,
+U+04BC->U+04BD, U+04BE->U+04BD, U+04BF->U+04BD, U+04BD, U+04C0->U+04CF, U+04CF, U+04C1->U+0436, U+04C2->U+0436, U+04C3->U+043A,
+U+04C4->U+043A, U+04C5->U+043B, U+04C6->U+043B, U+04C7->U+043D, U+04C8->U+043D, U+04C9->U+043D, U+04CA->U+043D, U+04CB->U+0447,
+U+04CC->U+0447, U+04CD->U+043C, U+04CE->U+043C, U+04D0->U+0430, U+04D1->U+0430, U+04D2->U+0430, U+04D3->U+0430, U+04D4->U+00E6,
+U+04D5->U+00E6, U+04D6->U+0435, U+04D7->U+0435, U+04D8->U+04D9, U+04DA->U+04D9, U+04DB->U+04D9, U+04D9, U+04DC->U+0436,
+U+04DD->U+0436, U+04DE->U+0437, U+04DF->U+0437, U+04E0->U+04E1, U+04E1, U+04E2->U+0438, U+04E3->U+0438, U+04E4->U+0438,
+U+04E5->U+0438, U+04E6->U+043E, U+04E7->U+043E, U+04E8->U+043E, U+04E9->U+043E, U+04EA->U+043E, U+04EB->U+043E, U+04EC->U+044D,
+U+04ED->U+044D, U+04EE->U+0443, U+04EF->U+0443, U+04F0->U+0443, U+04F1->U+0443, U+04F2->U+0443, U+04F3->U+0443, U+04F4->U+0447,
+U+04F5->U+0447, U+04F6->U+0433, U+04F7->U+0433, U+04F8->U+044B, U+04F9->U+044B, U+04FA->U+0433, U+04FB->U+0433, U+04FC->U+0445,
+U+04FD->U+0445, U+04FE->U+0445, U+04FF->U+0445, U+0410..U+0418->U+0430..U+0438, U+0419->U+0438, U+0430..U+0438,
+U+041A..U+042F->U+043A..U+044F, U+043A..U+044F
+
+##################################################
+# Devanagari
+U+0929->U+0928, U+0931->U+0930, U+0934->U+0933, U+0958->U+0915, U+0959->U+0916, U+095A->U+0917, U+095B->U+091C, U+095C->U+0921,
+U+095D->U+0922, U+095E->U+092B, U+095F->U+092F, U+0904..U+0928, U+092A..U+0930, U+0932, U+0933, U+0935..U+0939, U+0960, U+0961,
+U+0966..U+096F, U+097B..U+097F
+
+##################################################
+# Georgian
+U+10FC->U+10DC, U+10D0..U+10FA, U+10A0..U+10C5->U+2D00..U+2D25, U+2D00..U+2D25
+
+##################################################
+# Greek
+U+0386->U+03B1, U+0388->U+03B5, U+0389->U+03B7, U+038A->U+03B9, U+038C->U+03BF, U+038E->U+03C5, U+038F->U+03C9, U+0390->U+03B9,
+U+03AA->U+03B9, U+03AB->U+03C5, U+03AC->U+03B1, U+03AD->U+03B5, U+03AE->U+03B7, U+03AF->U+03B9, U+03B0->U+03C5, U+03CA->U+03B9,
+U+03CB->U+03C5, U+03CC->U+03BF, U+03CD->U+03C5, U+03CE->U+03C9, U+03D0->U+03B2, U+03D1->U+03B8, U+03D2->U+03C5, U+03D3->U+03C5,
+U+03D4->U+03C5, U+03D5->U+03C6, U+03D6->U+03C0, U+03D8->U+03D9, U+03DA->U+03DB, U+03DC->U+03DD, U+03DE->U+03DF, U+03E0->U+03E1,
+U+03E2->U+03E3, U+03E4->U+03E5, U+03E6->U+03E7, U+03E8->U+03E9, U+03EA->U+03EB, U+03EC->U+03ED, U+03EE->U+03EF, U+03F0->U+03BA,
+U+03F1->U+03C1, U+03F2->U+03C3, U+03F4->U+03B8, U+03F5->U+03B5, U+03F6->U+03B5, U+03F7->U+03F8, U+03F9->U+03C3, U+03FA->U+03FB,
+U+1F00->U+03B1, U+1F01->U+03B1, U+1F02->U+03B1, U+1F03->U+03B1, U+1F04->U+03B1, U+1F05->U+03B1, U+1F06->U+03B1, U+1F07->U+03B1,
+U+1F08->U+03B1, U+1F09->U+03B1, U+1F0A->U+03B1, U+1F0B->U+03B1, U+1F0C->U+03B1, U+1F0D->U+03B1, U+1F0E->U+03B1, U+1F0F->U+03B1,
+U+1F10->U+03B5, U+1F11->U+03B5, U+1F12->U+03B5, U+1F13->U+03B5, U+1F14->U+03B5, U+1F15->U+03B5, U+1F18->U+03B5, U+1F19->U+03B5,
+U+1F1A->U+03B5, U+1F1B->U+03B5, U+1F1C->U+03B5, U+1F1D->U+03B5, U+1F20->U+03B7, U+1F21->U+03B7, U+1F22->U+03B7, U+1F23->U+03B7,
+U+1F24->U+03B7, U+1F25->U+03B7, U+1F26->U+03B7, U+1F27->U+03B7, U+1F28->U+03B7, U+1F29->U+03B7, U+1F2A->U+03B7, U+1F2B->U+03B7,
+U+1F2C->U+03B7, U+1F2D->U+03B7, U+1F2E->U+03B7, U+1F2F->U+03B7, U+1F30->U+03B9, U+1F31->U+03B9, U+1F32->U+03B9, U+1F33->U+03B9,
+U+1F34->U+03B9, U+1F35->U+03B9, U+1F36->U+03B9, U+1F37->U+03B9, U+1F38->U+03B9, U+1F39->U+03B9, U+1F3A->U+03B9, U+1F3B->U+03B9,
+U+1F3C->U+03B9, U+1F3D->U+03B9, U+1F3E->U+03B9, U+1F3F->U+03B9, U+1F40->U+03BF, U+1F41->U+03BF, U+1F42->U+03BF, U+1F43->U+03BF,
+U+1F44->U+03BF, U+1F45->U+03BF, U+1F48->U+03BF, U+1F49->U+03BF, U+1F4A->U+03BF, U+1F4B->U+03BF, U+1F4C->U+03BF, U+1F4D->U+03BF,
+U+1F50->U+03C5, U+1F51->U+03C5, U+1F52->U+03C5, U+1F53->U+03C5, U+1F54->U+03C5, U+1F55->U+03C5, U+1F56->U+03C5, U+1F57->U+03C5,
+U+1F59->U+03C5, U+1F5B->U+03C5, U+1F5D->U+03C5, U+1F5F->U+03C5, U+1F60->U+03C9, U+1F61->U+03C9, U+1F62->U+03C9, U+1F63->U+03C9,
+U+1F64->U+03C9, U+1F65->U+03C9, U+1F66->U+03C9, U+1F67->U+03C9, U+1F68->U+03C9, U+1F69->U+03C9, U+1F6A->U+03C9, U+1F6B->U+03C9,
+U+1F6C->U+03C9, U+1F6D->U+03C9, U+1F6E->U+03C9, U+1F6F->U+03C9, U+1F70->U+03B1, U+1F71->U+03B1, U+1F72->U+03B5, U+1F73->U+03B5,
+U+1F74->U+03B7, U+1F75->U+03B7, U+1F76->U+03B9, U+1F77->U+03B9, U+1F78->U+03BF, U+1F79->U+03BF, U+1F7A->U+03C5, U+1F7B->U+03C5,
+U+1F7C->U+03C9, U+1F7D->U+03C9, U+1F80->U+03B1, U+1F81->U+03B1, U+1F82->U+03B1, U+1F83->U+03B1, U+1F84->U+03B1, U+1F85->U+03B1,
+U+1F86->U+03B1, U+1F87->U+03B1, U+1F88->U+03B1, U+1F89->U+03B1, U+1F8A->U+03B1, U+1F8B->U+03B1, U+1F8C->U+03B1, U+1F8D->U+03B1,
+U+1F8E->U+03B1, U+1F8F->U+03B1, U+1F90->U+03B7, U+1F91->U+03B7, U+1F92->U+03B7, U+1F93->U+03B7, U+1F94->U+03B7, U+1F95->U+03B7,
+U+1F96->U+03B7, U+1F97->U+03B7, U+1F98->U+03B7, U+1F99->U+03B7, U+1F9A->U+03B7, U+1F9B->U+03B7, U+1F9C->U+03B7, U+1F9D->U+03B7,
+U+1F9E->U+03B7, U+1F9F->U+03B7, U+1FA0->U+03C9, U+1FA1->U+03C9, U+1FA2->U+03C9, U+1FA3->U+03C9, U+1FA4->U+03C9, U+1FA5->U+03C9,
+U+1FA6->U+03C9, U+1FA7->U+03C9, U+1FA8->U+03C9, U+1FA9->U+03C9, U+1FAA->U+03C9, U+1FAB->U+03C9, U+1FAC->U+03C9, U+1FAD->U+03C9,
+U+1FAE->U+03C9, U+1FAF->U+03C9, U+1FB0->U+03B1, U+1FB1->U+03B1, U+1FB2->U+03B1, U+1FB3->U+03B1, U+1FB4->U+03B1, U+1FB6->U+03B1,
+U+1FB7->U+03B1, U+1FB8->U+03B1, U+1FB9->U+03B1, U+1FBA->U+03B1, U+1FBB->U+03B1, U+1FBC->U+03B1, U+1FC2->U+03B7, U+1FC3->U+03B7,
+U+1FC4->U+03B7, U+1FC6->U+03B7, U+1FC7->U+03B7, U+1FC8->U+03B5, U+1FC9->U+03B5, U+1FCA->U+03B7, U+1FCB->U+03B7, U+1FCC->U+03B7,
+U+1FD0->U+03B9, U+1FD1->U+03B9, U+1FD2->U+03B9, U+1FD3->U+03B9, U+1FD6->U+03B9, U+1FD7->U+03B9, U+1FD8->U+03B9, U+1FD9->U+03B9,
+U+1FDA->U+03B9, U+1FDB->U+03B9, U+1FE0->U+03C5, U+1FE1->U+03C5, U+1FE2->U+03C5, U+1FE3->U+03C5, U+1FE4->U+03C1, U+1FE5->U+03C1,
+U+1FE6->U+03C5, U+1FE7->U+03C5, U+1FE8->U+03C5, U+1FE9->U+03C5, U+1FEA->U+03C5, U+1FEB->U+03C5, U+1FEC->U+03C1, U+1FF2->U+03C9,
+U+1FF3->U+03C9, U+1FF4->U+03C9, U+1FF6->U+03C9, U+1FF7->U+03C9, U+1FF8->U+03BF, U+1FF9->U+03BF, U+1FFA->U+03C9, U+1FFB->U+03C9,
+U+1FFC->U+03C9, U+0391..U+03A1->U+03B1..U+03C1, U+03B1..U+03C1, U+03A3..U+03A9->U+03C3..U+03C9, U+03C3..U+03C9, U+03C2, U+03D9,
+U+03DB, U+03DD, U+03DF, U+03E1, U+03E3, U+03E5, U+03E7, U+03E9, U+03EB, U+03ED, U+03EF, U+03F3, U+03F8, U+03FB
+
+##################################################
+# Gujarati
+U+0A85..U+0A8C, U+0A8F, U+0A90, U+0A93..U+0AB0, U+0AB2, U+0AB3, U+0AB5..U+0AB9, U+0AE0, U+0AE1, U+0AE6..U+0AEF
+
+##################################################
+# Gurmukhi
+U+0A33->U+0A32, U+0A36->U+0A38, U+0A59->U+0A16, U+0A5A->U+0A17, U+0A5B->U+0A1C, U+0A5E->U+0A2B, U+0A05..U+0A0A, U+0A0F, U+0A10,
+U+0A13..U+0A28, U+0A2A..U+0A30, U+0A32, U+0A35, U+0A38, U+0A39, U+0A5C, U+0A66..U+0A6F
+
+#################################################
+# Hebrew*
+U+FB1D->U+05D9, U+FB1F->U+05F2, U+FB20->U+05E2, U+FB21->U+05D0, U+FB22->U+05D3, U+FB23->U+05D4, U+FB24->U+05DB, U+FB25->U+05DC,
+U+FB26->U+05DD, U+FB27->U+05E8, U+FB28->U+05EA, U+FB2A->U+05E9, U+FB2B->U+05E9, U+FB2C->U+05E9, U+FB2D->U+05E9, U+FB2E->U+05D0,
+U+FB2F->U+05D0, U+FB30->U+05D0, U+FB31->U+05D1, U+FB32->U+05D2, U+FB33->U+05D3, U+FB34->U+05D4, U+FB35->U+05D5, U+FB36->U+05D6,
+U+FB38->U+05D8, U+FB39->U+05D9, U+FB3A->U+05DA, U+FB3B->U+05DB, U+FB3C->U+05DC, U+FB3E->U+05DE, U+FB40->U+05E0, U+FB41->U+05E1,
+U+FB43->U+05E3, U+FB44->U+05E4, U+FB46->U+05E6, U+FB47->U+05E7, U+FB48->U+05E8, U+FB49->U+05E9, U+FB4A->U+05EA, U+FB4B->U+05D5,
+U+FB4C->U+05D1, U+FB4D->U+05DB, U+FB4E->U+05E4, U+FB4F->U+05D0, U+05D0..U+05F2
+
+#################################################
+# Kannada
+U+0C85..U+0C8C, U+0C8E..U+0C90, U+0C92..U+0CA8, U+0CAA..U+0CB3, U+0CB5..U+0CB9, U+0CE0, U+0CE1, U+0CE6..U+0CEF
+
+#################################################
+# Limbu
+U+1900..U+191C, U+1930..U+1938, U+1946..U+194F
+
+#################################################
+# Malayalam
+U+0D05..U+0D0C, U+0D0E..U+0D10, U+0D12..U+0D28, U+0D2A..U+0D39, U+0D60, U+0D61, U+0D66..U+0D6F
+
+#################################################
+# Tamil
+U+0B94->U+0B92, U+0B85..U+0B8A, U+0B8E..U+0B90, U+0B92, U+0B93, U+0B95, U+0B99, U+0B9A, U+0B9C, U+0B9E, U+0B9F, U+0BA3, U+0BA4,
+U+0BA8..U+0BAA, U+0BAE..U+0BB9, U+0BE6..U+0BEF
+
+#################################################
+# Thai
+U+0E01..U+0E30, U+0E32, U+0E33, U+0E40..U+0E46, U+0E50..U+0E5B
+
+##################################################
+# Common
+U+FF10..U+FF19->0..9, U+FF21..U+FF3A->a..z, U+FF41..U+FF5A->a..z, 0..9, A..Z->a..z, a..z
+"""
+
+# The expected value format is a commas-separated list of mappings.
+# Two simplest mappings simply declare a character as valid, and map a single character
+# to another single character, respectively. But specifying the whole table in such
+# form would result in bloated and barely manageable specifications. So there are
+# several syntax shortcuts that let you map ranges of characters at once. The complete
+# list is as follows:
+#
+# A->a
+#     Single char mapping, declares source char 'A' as allowed to occur within keywords
+#     and maps it to destination char 'a' (but does not declare 'a' as allowed).
+# A..Z->a..z
+#     Range mapping, declares all chars in source range as allowed and maps them to
+#     the destination range. Does not declare destination range as allowed. Also checks
+#     ranges' lengths (the lengths must be equal).
+# a
+#     Stray char mapping, declares a character as allowed and maps it to itself.
+#     Equivalent to a->a single char mapping.
+# a..z
+#     Stray range mapping, declares all characters in range as allowed and maps them to
+#     themselves. Equivalent to a..z->a..z range mapping.
+# A..Z/2
+#     Checkerboard range map. Maps every pair of chars to the second char.
+#     More formally, declares odd characters in range as allowed and maps them to the
+#     even ones; also declares even characters as allowed and maps them to themselves.
+#     For instance, A..Z/2 is equivalent to A->B, B->B, C->D, D->D, ..., Y->Z, Z->Z.
+#     This mapping shortcut is helpful for a number of Unicode blocks where uppercase
+#     and lowercase letters go in such interleaved order instead of contiguous chunks.
+
+_dewhite = re.compile(r"\s")
+_char = r"((?:U\+[0-9A-Fa-f]{4,6})|.)"
+_char_map = re.compile("^" + _char + "->" + _char + "$")
+_range_map = re.compile("^" + _char + r"\.\." + _char + "->" + _char + ".." + _char + "$")
+_stray_char = re.compile("^" + _char + "$")
+_stray_range = re.compile("^" + _char + r"\.\." + _char + "$")
+_checker_range = re.compile("^" + _char + r"\.\." + _char + "/2$")
+
+
+def charspec_to_int(string):
+    # Converts a character specification of the form 'A' or 'U+23BC'
+    # to an integer
+    if string.startswith("U+"):
+        return int(string[2:], 16)
+    elif len(string) == 1:
+        return ord(string)
+    else:
+        raise Exception("Can't convert charspec: %r" % string)
+
+
+def charset_table_to_dict(tablestring):
+    """Takes a string with the contents of a Sphinx charset table file and
+    returns a mapping object (a defaultdict, actually) of the kind expected by
+    the unicode.translate() method: that is, it maps a character number to a unicode
+    character or None if the character is not a valid word character.
+
+    The Sphinx charset table format is described at
+    http://www.sphinxsearch.com/docs/current.html#conf-charset-table.
+    """
+
+    #map = {}
+    map = defaultdict(lambda: None)
+    for line in tablestring.split("\n"):
+        if not line or line.startswith("#"):
+            continue
+        line = _dewhite.sub("", line)
+        for item in line.split(","):
+            if not item:
+                continue
+            match = _range_map.match(item)
+            if match:
+                start1 = charspec_to_int(match.group(1))
+                end1 = charspec_to_int(match.group(2))
+                start2 = charspec_to_int(match.group(3))
+                end2 = charspec_to_int(match.group(4))
+                assert (end1 - start1) == (end2 - start2)
+                try:
+                    for fromord, tooord in izip(xrange(start1, end1 + 1),
+                                                xrange(start2, end2 + 1)):
+                        map[fromord] = unichr(tooord)
+                except ValueError:
+                    pass
+                continue
+
+            match = _char_map.match(item)
+            if match:
+                fromord = charspec_to_int(match.group(1))
+                toord = charspec_to_int(match.group(2))
+                try:
+                    map[fromord] = unichr(toord)
+                except ValueError:
+                    pass
+                continue
+
+            match = _stray_char.match(item)
+            if match:
+                ord = charspec_to_int(match.group(0))
+                try:
+                    map[ord] = unichr(ord)
+                except ValueError:
+                    pass
+                continue
+
+            match = _stray_range.match(item)
+            if match:
+                start = charspec_to_int(match.group(1))
+                end = charspec_to_int(match.group(2))
+                try:
+                    for ord in xrange(start, end + 1):
+                            map[ord] = unichr(ord)
+                except ValueError:
+                    pass
+                continue
+
+            match = _checker_range.match(item)
+            if match:
+                fromord = charspec_to_int(match.group(1))
+                toord = charspec_to_int(match.group(2))
+                assert toord - fromord % 2 == 0
+                for ord in xrange(fromord, toord + 1, 2):
+                    try:
+                        map[ord] = unichr(ord + 1)
+                        map[ord + 1] = unichr(ord + 1)
+                    except ValueError:
+                        pass
+                continue
+
+            raise Exception("Don't know what to do with %r" % item)
+    return map
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/lib/whoosh/whoosh/support/filelock.py b/lib/whoosh/whoosh/support/filelock.py
new file mode 100644
index 0000000..69c099a
--- /dev/null
+++ b/lib/whoosh/whoosh/support/filelock.py
@@ -0,0 +1,160 @@
+# Copyright 2010 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+"""
+This module contains classes implementing exclusive locks for platforms with
+fcntl (UNIX and Mac OS X) and Windows. Whoosh originally used directory creation
+as a locking method, but it had the problem that if the program crashed the
+lock directory was left behind and would keep the index locked until it was
+cleaned up. Using OS-level file locks fixes this.
+"""
+
+import errno
+import os
+import time
+
+
+def try_for(fn, timeout=5.0, delay=0.1):
+    """Calls ``fn`` every ``delay`` seconds until it returns True or ``timeout``
+    seconds elapse. Returns True if the lock was acquired, or False if the
+    timeout was reached.
+
+    :param timeout: Length of time (in seconds) to keep retrying to acquire the
+        lock. 0 means return immediately. Only used when blocking is False.
+    :param delay: How often (in seconds) to retry acquiring the lock during
+        the timeout period. Only used when blocking is False and timeout > 0.
+    """
+
+    until = time.time() + timeout
+    v = fn()
+    while not v and time.time() < until:
+        time.sleep(delay)
+        v = fn()
+    return v
+
+
+class LockBase(object):
+    """Base class for file locks.
+    """
+
+    def __init__(self, filename):
+        self.fd = None
+        self.filename = filename
+        self.locked = False
+
+    def __del__(self):
+        if hasattr(self, "fd") and self.fd:
+            try:
+                self.release()
+            except:
+                pass
+
+    def acquire(self, blocking=False):
+        """Acquire the lock. Returns True if the lock was acquired.
+
+        :param blocking: if True, call blocks until the lock is acquired.
+            This may not be available on all platforms. On Windows, this is
+            actually just a delay of 10 seconds, rechecking every second.
+        """
+        pass
+
+    def release(self):
+        pass
+
+
+class FcntlLock(LockBase):
+    """File lock based on UNIX-only fcntl module.
+    """
+
+    def acquire(self, blocking=False):
+        import fcntl
+
+        flags = os.O_CREAT | os.O_WRONLY
+        self.fd = os.open(self.filename, flags)
+
+        mode = fcntl.LOCK_EX
+        if not blocking:
+            mode |= fcntl.LOCK_NB
+
+        try:
+            fcntl.flock(self.fd, mode)
+            self.locked = True
+            return True
+        except IOError, e:
+            if e.errno not in (errno.EAGAIN, errno.EACCES):
+                raise
+            os.close(self.fd)
+            self.fd = None
+            return False
+
+    def release(self):
+        import fcntl
+        fcntl.flock(self.fd, fcntl.LOCK_UN)
+        os.close(self.fd)
+        self.fd = None
+
+
+class MsvcrtLock(LockBase):
+    """File lock based on Windows-only msvcrt module.
+    """
+
+    def acquire(self, blocking=False):
+        import msvcrt
+
+        flags = os.O_CREAT | os.O_WRONLY
+        mode = msvcrt.LK_NBLCK
+        if blocking:
+            mode = msvcrt.LK_LOCK
+
+        self.fd = os.open(self.filename, flags)
+        try:
+            msvcrt.locking(self.fd, mode, 1)
+            return True
+        except IOError, e:
+            if e.errno not in (errno.EAGAIN, errno.EACCES, errno.EDEADLK):
+                raise
+            os.close(self.fd)
+            self.fd = None
+            return False
+
+    def release(self):
+        if self.fd is None:
+            raise Exception("Lock was not acquired")
+
+        import msvcrt
+        msvcrt.locking(self.fd, msvcrt.LK_UNLCK, 1)
+        os.close(self.fd)
+        self.fd = None
+
+
+if os.name == "nt":
+    FileLock = MsvcrtLock
+else:
+    FileLock = FcntlLock
+
+
+
diff --git a/lib/whoosh/whoosh/support/levenshtein.py b/lib/whoosh/whoosh/support/levenshtein.py
new file mode 100644
index 0000000..d129788
--- /dev/null
+++ b/lib/whoosh/whoosh/support/levenshtein.py
@@ -0,0 +1,33 @@
+"""
+Contains functions implementing the Levenshtein distance algorithm.
+"""
+
+
+def relative(a, b):
+    """Returns the relative distance between two strings, in the range
+    [0-1] where 1 means total equality.
+    """
+
+    d = distance(a, b)
+    longer = float(max((len(a), len(b))))
+    shorter = float(min((len(a), len(b))))
+    r = ((longer - d) / longer) * (shorter / longer)
+    return r
+
+
+def distance(s, t):
+    """Returns the Levenshtein edit distance between two strings."""
+
+    m, n = len(s), len(t)
+    d = [range(n + 1)]
+    d += [[i] for i in range(1, m + 1)]
+    for i in range(0, m):
+        for j in range(0, n):
+            cost = 1
+            if s[i] == t[j]:
+                cost = 0
+            d[i + 1].append(min(d[i][j + 1] + 1,    # deletion
+                                d[i + 1][j] + 1,    # insertion
+                                d[i][j] + cost))    # substitution
+    return d[m][n]
+
diff --git a/lib/whoosh/whoosh/support/numeric.py b/lib/whoosh/whoosh/support/numeric.py
new file mode 100644
index 0000000..9a85ad5
--- /dev/null
+++ b/lib/whoosh/whoosh/support/numeric.py
@@ -0,0 +1,317 @@
+# Copyright 2010 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+import struct
+from array import array
+
+
+_istruct = struct.Struct(">i")
+_qstruct = struct.Struct(">q")
+_dstruct = struct.Struct(">d")
+_ipack, _iunpack = _istruct.pack, _istruct.unpack
+_qpack, _qunpack = _qstruct.pack, _qstruct.unpack
+_dpack, _dunpack = _dstruct.pack, _dstruct.unpack
+
+_max_sortable_int = 4294967295L
+_max_sortable_long = 18446744073709551615L
+
+
+# Functions for converting numbers to and from sortable representations
+
+def int_to_sortable_int(x, signed=True):
+    if signed:
+        x += 1 << 31
+    assert x >= 0
+    return x
+
+
+def sortable_int_to_int(x, signed=True):
+    if signed:
+        x -= 1 << 31
+    return x
+
+
+def long_to_sortable_long(x, signed=True):
+    if signed:
+        x += 1 << 63
+    assert x >= 0
+    return x
+
+
+def sortable_long_to_long(x, signed=True):
+    if signed:
+        x -= 1 << 63
+    return x
+
+
+def float_to_sortable_long(x, signed=True):
+    x = _qunpack(_dpack(x))[0]
+    if x < 0:
+        x ^= 0x7fffffffffffffff
+    if signed:
+        x += 1 << 63
+    assert x >= 0
+    return x
+
+
+def sortable_long_to_float(x, signed=True):
+    if signed:
+        x -= 1 << 63
+    if x < 0:
+        x ^= 0x7fffffffffffffff
+    x = _dunpack(_qpack(x))[0]
+    return x
+
+
+# Functions for converting numbers to and from text
+
+def int_to_text(x, shift=0, signed=True):
+    x = int_to_sortable_int(x, signed)
+    return sortable_int_to_text(x, shift)
+
+
+def text_to_int(text, signed=True):
+    x = text_to_sortable_int(text)
+    x = sortable_int_to_int(x, signed)
+    return x
+
+
+def long_to_text(x, shift=0, signed=True):
+    x = long_to_sortable_long(x, signed)
+    return sortable_long_to_text(x, shift)
+
+
+def text_to_long(text, signed=True):
+    x = text_to_sortable_long(text)
+    x = sortable_long_to_long(x, signed)
+    return x
+
+
+def float_to_text(x, shift=0, signed=True):
+    x = float_to_sortable_long(x, signed)
+    return sortable_long_to_text(x, shift)
+
+
+def text_to_float(text, signed=True):
+    x = text_to_sortable_long(text)
+    x = sortable_long_to_float(x, signed)
+    return x
+
+
+# Functions for converting sortable representations to and from text.
+#
+# These functions use hexadecimal strings to encode the numbers, rather than
+# converting them to text using a 7-bit encoding, because while the hex
+# representation uses more space (8 bytes as opposed to 5 bytes for a 32 bit
+# number), it's 5-10 times faster to encode/decode in Python.
+#
+# The functions for 7 bit encoding are still available (to_7bit and from_7bit)
+# if needed.
+
+
+def sortable_int_to_text(x, shift=0):
+    if shift:
+        x >>= shift
+    #text = chr(shift) + u"%08x" % x
+    text = chr(shift) + to_base85(x, False)
+    return text
+
+
+def sortable_long_to_text(x, shift=0):
+    if shift:
+        x >>= shift
+    #text = chr(shift) + u"%016x" % x
+    #assert len(text) == 17
+    text = chr(shift) + to_base85(x, True)
+    return text
+
+
+def text_to_sortable_int(text):
+    #assert len(text) == 9
+    #return int(text[1:], 16)
+    return from_base85(text[1:])
+
+
+def text_to_sortable_long(text):
+    #assert len(text) == 17
+    #return long(text[1:], 16)
+    return from_base85(text[1:])
+
+
+# Functions for generating tiered ranges
+
+def split_range(valsize, step, start, end):
+    """Splits a range of numbers (from ``start`` to ``end``, inclusive)
+    into a sequence of trie ranges of the form ``(start, end, shift)``. The
+    consumer of these tuples is expected to shift the ``start`` and ``end``
+    right by ``shift``.
+
+    This is used for generating term ranges for a numeric field. The queries
+    for the edges of the range are generated at high precision and large blocks
+    in the middle are generated at low precision.
+    """
+
+    shift = 0
+    while True:
+        diff = 1 << (shift + step)
+        mask = ((1 << step) - 1) << shift
+        setbits = lambda x: x | ((1 << shift) - 1)
+
+        haslower = (start & mask) != 0
+        hasupper = (end & mask) != mask
+
+        not_mask = ~mask & ((1 << valsize + 1) - 1)
+        nextstart = (start + diff if haslower else start) & not_mask
+        nextend = (end - diff if hasupper else end) & not_mask
+
+        if shift + step >= valsize or nextstart > nextend:
+            yield (start, setbits(end), shift)
+            break
+
+        if haslower:
+            yield (start, setbits(start | mask), shift)
+        if hasupper:
+            yield (end & not_mask, setbits(end), shift)
+
+        start = nextstart
+        end = nextend
+        shift += step
+
+
+def tiered_ranges(numtype, signed, start, end, shift_step, startexcl, endexcl):
+    # First, convert the start and end of the range to sortable representations
+
+    valsize = 32 if numtype is int else 64
+
+    # Convert start and end values to sortable ints
+    if start is None:
+        start = 0
+    else:
+        if numtype is int:
+            start = int_to_sortable_int(start, signed)
+        elif numtype is long:
+            start = long_to_sortable_long(start, signed)
+        elif numtype is float:
+            start = float_to_sortable_long(start, signed)
+        if startexcl:
+            start += 1
+
+    if end is None:
+        end = _max_sortable_int if valsize == 32 else _max_sortable_long
+    else:
+        if numtype is int:
+            end = int_to_sortable_int(end, signed)
+        elif numtype is long:
+            end = long_to_sortable_long(end, signed)
+        elif numtype is float:
+            end = float_to_sortable_long(end, signed)
+        if endexcl:
+            end -= 1
+
+    if numtype is int:
+        to_text = sortable_int_to_text
+    else:
+        to_text = sortable_long_to_text
+
+    if not shift_step:
+        yield (to_text(start), to_text(end))
+        return
+
+    # Yield the term ranges for the different resolutions
+    for rstart, rend, shift in split_range(valsize, shift_step, start, end):
+        starttext = to_text(rstart, shift=shift)
+        endtext = to_text(rend, shift=shift)
+        yield (starttext, endtext)
+
+
+# Functions for encoding numeric values as sequences of 7-bit ascii characters
+
+# Instead of using the character set from the ascii85 algorithm, I put the
+# characters in order so that the encoded text sorts properly (my life would be
+# a lot easier if they had just done that from the start)
+_b85chars = "!$%&*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ^_abcdefghijklmnopqrstuvwxyz{|}~"
+_b85dec = {}
+for i in range(len(_b85chars)):
+    _b85dec[_b85chars[i]] = i
+
+
+def to_base85(x, islong=False):
+    "Encodes the given integer using base 85."
+
+    size = 10 if islong else 5
+    rems = ""
+    for i in xrange(size):
+        rems = _b85chars[x % 85] + rems
+        x //= 85
+    return rems
+
+
+def from_base85(text):
+    "Decodes the given base 85 text into an integer."
+
+    acc = 0
+    for c in text:
+        acc = acc * 85 + _b85dec[c]
+    return acc
+
+
+# Older, slower number-to-ascii functions
+
+def to_7bit(x, islong):
+    if not islong:
+        shift = 31
+        nchars = 5
+    else:
+        shift = 63
+        nchars = 10
+
+    buffer = array("c", "\x00" * nchars)
+    x += (1 << shift) - 1
+    while x:
+        buffer[nchars - 1] = chr(x & 0x7f)
+        x >>= 7
+        nchars -= 1
+    return buffer.tostring()
+
+
+def from_7bit(text):
+    if len(text) == 5:
+        shift = 31
+    elif len(text) == 10:
+        shift = 63
+    else:
+        raise ValueError("text is not 5 or 10 bytes")
+
+    x = 0
+    for char in text:
+        x <<= 7
+        char = ord(char)
+        if char > 0x7f:
+            raise Exception
+        x |= char
+    x -= (1 << shift) - 1
+    return int(x)
diff --git a/lib/whoosh/whoosh/support/relativedelta.py b/lib/whoosh/whoosh/support/relativedelta.py
new file mode 100644
index 0000000..3ba6fa4
--- /dev/null
+++ b/lib/whoosh/whoosh/support/relativedelta.py
@@ -0,0 +1,434 @@
+"""
+Copyright (c) 2003-2010  Gustavo Niemeyer <gustavo@niemeyer.net>
+
+This module offers extensions to the standard python 2.3+
+datetime module.
+"""
+__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
+__license__ = "PSF License"
+
+import datetime
+import calendar
+
+__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"]
+
+
+class weekday(object):
+    __slots__ = ["weekday", "n"]
+
+    def __init__(self, weekday, n=None):
+        self.weekday = weekday
+        self.n = n
+
+    def __call__(self, n):
+        if n == self.n:
+            return self
+        else:
+            return self.__class__(self.weekday, n)
+
+    def __eq__(self, other):
+        try:
+            if self.weekday != other.weekday or self.n != other.n:
+                return False
+        except AttributeError:
+            return False
+        return True
+
+    def __repr__(self):
+        s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
+        if not self.n:
+            return s
+        else:
+            return "%s(%+d)" % (s, self.n)
+
+MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)])
+
+
+class relativedelta:
+    """
+The relativedelta type is based on the specification of the excellent
+work done by M.-A. Lemburg in his mx.DateTime extension. However,
+notice that this type does *NOT* implement the same algorithm as
+his work. Do *NOT* expect it to behave like mx.DateTime's counterpart.
+
+There's two different ways to build a relativedelta instance. The
+first one is passing it two date/datetime classes:
+
+    relativedelta(datetime1, datetime2)
+
+And the other way is to use the following keyword arguments:
+
+    year, month, day, hour, minute, second, microsecond:
+        Absolute information.
+
+    years, months, weeks, days, hours, minutes, seconds, microseconds:
+        Relative information, may be negative.
+
+    weekday:
+        One of the weekday instances (MO, TU, etc). These instances may
+        receive a parameter N, specifying the Nth weekday, which could
+        be positive or negative (like MO(+1) or MO(-2). Not specifying
+        it is the same as specifying +1. You can also use an integer,
+        where 0=MO.
+
+    leapdays:
+        Will add given days to the date found, if year is a leap
+        year, and the date found is post 28 of february.
+
+    yearday, nlyearday:
+        Set the yearday or the non-leap year day (jump leap days).
+        These are converted to day/month/leapdays information.
+
+Here is the behavior of operations with relativedelta:
+
+1) Calculate the absolute year, using the 'year' argument, or the
+   original datetime year, if the argument is not present.
+
+2) Add the relative 'years' argument to the absolute year.
+
+3) Do steps 1 and 2 for month/months.
+
+4) Calculate the absolute day, using the 'day' argument, or the
+   original datetime day, if the argument is not present. Then,
+   subtract from the day until it fits in the year and month
+   found after their operations.
+
+5) Add the relative 'days' argument to the absolute day. Notice
+   that the 'weeks' argument is multiplied by 7 and added to
+   'days'.
+
+6) Do steps 1 and 2 for hour/hours, minute/minutes, second/seconds,
+   microsecond/microseconds.
+
+7) If the 'weekday' argument is present, calculate the weekday,
+   with the given (wday, nth) tuple. wday is the index of the
+   weekday (0-6, 0=Mon), and nth is the number of weeks to add
+   forward or backward, depending on its signal. Notice that if
+   the calculated date is already Monday, for example, using
+   (0, 1) or (0, -1) won't change the day.
+    """
+
+    def __init__(self, dt1=None, dt2=None,
+                 years=0, months=0, days=0, leapdays=0, weeks=0,
+                 hours=0, minutes=0, seconds=0, microseconds=0,
+                 year=None, month=None, day=None, weekday=None,
+                 yearday=None, nlyearday=None,
+                 hour=None, minute=None, second=None, microsecond=None):
+        if dt1 and dt2:
+            if not isinstance(dt1, datetime.date) or \
+               not isinstance(dt2, datetime.date):
+                raise TypeError("relativedelta only diffs datetime/date")
+            if type(dt1) is not type(dt2):
+                if not isinstance(dt1, datetime.datetime):
+                    dt1 = datetime.datetime.fromordinal(dt1.toordinal())
+                elif not isinstance(dt2, datetime.datetime):
+                    dt2 = datetime.datetime.fromordinal(dt2.toordinal())
+            self.years = 0
+            self.months = 0
+            self.days = 0
+            self.leapdays = 0
+            self.hours = 0
+            self.minutes = 0
+            self.seconds = 0
+            self.microseconds = 0
+            self.year = None
+            self.month = None
+            self.day = None
+            self.weekday = None
+            self.hour = None
+            self.minute = None
+            self.second = None
+            self.microsecond = None
+            self._has_time = 0
+
+            months = (dt1.year * 12 + dt1.month) - (dt2.year * 12 + dt2.month)
+            self._set_months(months)
+            dtm = self.__radd__(dt2)
+            if dt1 < dt2:
+                while dt1 > dtm:
+                    months += 1
+                    self._set_months(months)
+                    dtm = self.__radd__(dt2)
+            else:
+                while dt1 < dtm:
+                    months -= 1
+                    self._set_months(months)
+                    dtm = self.__radd__(dt2)
+            delta = dt1 - dtm
+            self.seconds = delta.seconds + delta.days * 86400
+            self.microseconds = delta.microseconds
+        else:
+            self.years = years
+            self.months = months
+            self.days = days + weeks * 7
+            self.leapdays = leapdays
+            self.hours = hours
+            self.minutes = minutes
+            self.seconds = seconds
+            self.microseconds = microseconds
+            self.year = year
+            self.month = month
+            self.day = day
+            self.hour = hour
+            self.minute = minute
+            self.second = second
+            self.microsecond = microsecond
+
+            if type(weekday) is int:
+                self.weekday = weekdays[weekday]
+            else:
+                self.weekday = weekday
+
+            yday = 0
+            if nlyearday:
+                yday = nlyearday
+            elif yearday:
+                yday = yearday
+                if yearday > 59:
+                    self.leapdays = -1
+            if yday:
+                ydayidx = [31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 366]
+                for idx, ydays in enumerate(ydayidx):
+                    if yday <= ydays:
+                        self.month = idx + 1
+                        if idx == 0:
+                            self.day = yday
+                        else:
+                            self.day = yday - ydayidx[idx - 1]
+                        break
+                else:
+                    raise ValueError("invalid year day (%d)" % yday)
+
+        self._fix()
+
+    def _fix(self):
+        if abs(self.microseconds) > 999999:
+            s = self.microseconds // abs(self.microseconds)
+            div, mod = divmod(self.microseconds * s, 1000000)
+            self.microseconds = mod * s
+            self.seconds += div * s
+        if abs(self.seconds) > 59:
+            s = self.seconds // abs(self.seconds)
+            div, mod = divmod(self.seconds * s, 60)
+            self.seconds = mod * s
+            self.minutes += div * s
+        if abs(self.minutes) > 59:
+            s = self.minutes // abs(self.minutes)
+            div, mod = divmod(self.minutes * s, 60)
+            self.minutes = mod * s
+            self.hours += div * s
+        if abs(self.hours) > 23:
+            s = self.hours // abs(self.hours)
+            div, mod = divmod(self.hours * s, 24)
+            self.hours = mod * s
+            self.days += div * s
+        if abs(self.months) > 11:
+            s = self.months // abs(self.months)
+            div, mod = divmod(self.months * s, 12)
+            self.months = mod * s
+            self.years += div * s
+        if (self.hours or self.minutes or self.seconds or self.microseconds or
+            self.hour is not None or self.minute is not None or
+            self.second is not None or self.microsecond is not None):
+            self._has_time = 1
+        else:
+            self._has_time = 0
+
+    def _set_months(self, months):
+        self.months = months
+        if abs(self.months) > 11:
+            s = self.months // abs(self.months)
+            div, mod = divmod(self.months * s, 12)
+            self.months = mod * s
+            self.years = div * s
+        else:
+            self.years = 0
+
+    def __radd__(self, other):
+        if not isinstance(other, datetime.date):
+            raise TypeError("unsupported type for add operation")
+        elif self._has_time and not isinstance(other, datetime.datetime):
+            other = datetime.datetime.fromordinal(other.toordinal())
+        year = (self.year or other.year) + self.years
+        month = self.month or other.month
+        if self.months:
+            assert 1 <= abs(self.months) <= 12
+            month += self.months
+            if month > 12:
+                year += 1
+                month -= 12
+            elif month < 1:
+                year -= 1
+                month += 12
+        day = min(calendar.monthrange(year, month)[1],
+                  self.day or other.day)
+        repl = {"year": year, "month": month, "day": day}
+        for attr in ["hour", "minute", "second", "microsecond"]:
+            value = getattr(self, attr)
+            if value is not None:
+                repl[attr] = value
+        days = self.days
+        if self.leapdays and month > 2 and calendar.isleap(year):
+            days += self.leapdays
+        ret = (other.replace(**repl)
+               + datetime.timedelta(days=days,
+                                    hours=self.hours,
+                                    minutes=self.minutes,
+                                    seconds=self.seconds,
+                                    microseconds=self.microseconds))
+        if self.weekday:
+            weekday, nth = self.weekday.weekday, self.weekday.n or 1
+            jumpdays = (abs(nth) - 1) * 7
+            if nth > 0:
+                jumpdays += (7 - ret.weekday() + weekday) % 7
+            else:
+                jumpdays += (ret.weekday() - weekday) % 7
+                jumpdays *= -1
+            ret += datetime.timedelta(days=jumpdays)
+        return ret
+
+    def __rsub__(self, other):
+        return self.__neg__().__radd__(other)
+
+    def __add__(self, other):
+        if not isinstance(other, relativedelta):
+            raise TypeError("unsupported type for add operation")
+        return relativedelta(years=other.years + self.years,
+                             months=other.months + self.months,
+                             days=other.days + self.days,
+                             hours=other.hours + self.hours,
+                             minutes=other.minutes + self.minutes,
+                             seconds=other.seconds + self.seconds,
+                             microseconds=other.microseconds + self.microseconds,
+                             leapdays=other.leapdays or self.leapdays,
+                             year=other.year or self.year,
+                             month=other.month or self.month,
+                             day=other.day or self.day,
+                             weekday=other.weekday or self.weekday,
+                             hour=other.hour or self.hour,
+                             minute=other.minute or self.minute,
+                             second=other.second or self.second,
+                             microsecond=other.second or self.microsecond)
+
+    def __sub__(self, other):
+        if not isinstance(other, relativedelta):
+            raise TypeError("unsupported type for sub operation")
+        return relativedelta(years=other.years - self.years,
+                             months=other.months - self.months,
+                             days=other.days - self.days,
+                             hours=other.hours - self.hours,
+                             minutes=other.minutes - self.minutes,
+                             seconds=other.seconds - self.seconds,
+                             microseconds=other.microseconds - self.microseconds,
+                             leapdays=other.leapdays or self.leapdays,
+                             year=other.year or self.year,
+                             month=other.month or self.month,
+                             day=other.day or self.day,
+                             weekday=other.weekday or self.weekday,
+                             hour=other.hour or self.hour,
+                             minute=other.minute or self.minute,
+                             second=other.second or self.second,
+                             microsecond=other.second or self.microsecond)
+
+    def __neg__(self):
+        return relativedelta(years=-self.years,
+                             months=-self.months,
+                             days=-self.days,
+                             hours=-self.hours,
+                             minutes=-self.minutes,
+                             seconds=-self.seconds,
+                             microseconds=-self.microseconds,
+                             leapdays=self.leapdays,
+                             year=self.year,
+                             month=self.month,
+                             day=self.day,
+                             weekday=self.weekday,
+                             hour=self.hour,
+                             minute=self.minute,
+                             second=self.second,
+                             microsecond=self.microsecond)
+
+    def __nonzero__(self):
+        return not (not self.years and
+                    not self.months and
+                    not self.days and
+                    not self.hours and
+                    not self.minutes and
+                    not self.seconds and
+                    not self.microseconds and
+                    not self.leapdays and
+                    self.year is None and
+                    self.month is None and
+                    self.day is None and
+                    self.weekday is None and
+                    self.hour is None and
+                    self.minute is None and
+                    self.second is None and
+                    self.microsecond is None)
+
+    def __mul__(self, other):
+        f = float(other)
+        return relativedelta(years=self.years * f,
+                             months=self.months * f,
+                             days=self.days * f,
+                             hours=self.hours * f,
+                             minutes=self.minutes * f,
+                             seconds=self.seconds * f,
+                             microseconds=self.microseconds * f,
+                             leapdays=self.leapdays,
+                             year=self.year,
+                             month=self.month,
+                             day=self.day,
+                             weekday=self.weekday,
+                             hour=self.hour,
+                             minute=self.minute,
+                             second=self.second,
+                             microsecond=self.microsecond)
+
+    def __eq__(self, other):
+        if not isinstance(other, relativedelta):
+            return False
+        if self.weekday or other.weekday:
+            if not self.weekday or not other.weekday:
+                return False
+            if self.weekday.weekday != other.weekday.weekday:
+                return False
+            n1, n2 = self.weekday.n, other.weekday.n
+            if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)):
+                return False
+        return (self.years == other.years and
+                self.months == other.months and
+                self.days == other.days and
+                self.hours == other.hours and
+                self.minutes == other.minutes and
+                self.seconds == other.seconds and
+                self.leapdays == other.leapdays and
+                self.year == other.year and
+                self.month == other.month and
+                self.day == other.day and
+                self.hour == other.hour and
+                self.minute == other.minute and
+                self.second == other.second and
+                self.microsecond == other.microsecond)
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    def __div__(self, other):
+        return self.__mul__(1 / float(other))
+
+    def __repr__(self):
+        l = []
+        for attr in ["years", "months", "days", "leapdays",
+                     "hours", "minutes", "seconds", "microseconds"]:
+            value = getattr(self, attr)
+            if value:
+                l.append("%s=%+d" % (attr, value))
+        for attr in ["year", "month", "day", "weekday",
+                     "hour", "minute", "second", "microsecond"]:
+            value = getattr(self, attr)
+            if value is not None:
+                l.append("%s=%s" % (attr, repr(value)))
+        return "%s(%s)" % (self.__class__.__name__, ", ".join(l))
+
+# vim:ts=4:sw=4:et
diff --git a/lib/whoosh/whoosh/support/testing.py b/lib/whoosh/whoosh/support/testing.py
new file mode 100644
index 0000000..f8660e3
--- /dev/null
+++ b/lib/whoosh/whoosh/support/testing.py
@@ -0,0 +1,85 @@
+import shutil
+import tempfile
+from functools import wraps
+
+from whoosh.filedb.filestore import FileStorage
+
+
+class TempStorage(object):
+    def __init__(self, basename='', parentdir=None, suppress=frozenset(),
+                 keepdir=False):
+        self.basename = basename
+        self.parentdir = parentdir
+        self.suppress = suppress
+        self.keepdir = keepdir
+        self.dir = None
+
+    def __enter__(self):
+        self.dir = tempfile.mkdtemp(prefix=self.basename, suffix=".tmpix",
+                                    dir=self.parentdir)
+        return FileStorage(self.dir)
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        if not self.keepdir:
+            try:
+                shutil.rmtree(self.dir)
+            except OSError, e:
+                print "Can't remove temp dir: " + str(e)
+
+        if exc_type is not None:
+            if self.keepdir:
+                print "Temp dir=", self.dir
+            if exc_type not in self.suppress:
+                return False
+
+
+class TempIndex(TempStorage):
+    def __init__(self, schema, ixname='', **kwargs):
+        super(TempIndex, self).__init__(basename=ixname, **kwargs)
+        self.schema = schema
+
+    def __enter__(self):
+        fstore = super(TempIndex, self).__enter__()
+        return fstore.create_index(self.schema, indexname=self.basename)
+
+
+def skip_if(cond):
+    """A Nose test decorator that skips the decorated test if the given
+    function returns True at runtime.
+    """
+
+    def decorating_function(testfn):
+        @wraps(testfn)
+        def wrapper(*args, **kwargs):
+            if cond():
+                from nose.plugins.skip import SkipTest
+                raise SkipTest
+            else:
+                return testfn(*args, **kwargs)
+
+        return wrapper
+    return decorating_function
+
+
+def skip_if_unavailable(modulename):
+    """A Nose test decorator that only runs the decorated test if a module
+    can be imported::
+
+        @skip_if_unavailable("multiprocessing")
+        def test_mp():
+
+    Raises ``SkipTest`` if the module cannot be imported.
+    """
+
+    def cantimport():
+        try:
+            __import__(modulename)
+        except ImportError:
+            return True
+        else:
+            return False
+
+    return skip_if(cantimport)
+
+
+
diff --git a/lib/whoosh/whoosh/support/times.py b/lib/whoosh/whoosh/support/times.py
new file mode 100644
index 0000000..a7d6ce4
--- /dev/null
+++ b/lib/whoosh/whoosh/support/times.py
@@ -0,0 +1,459 @@
+# Copyright 2010 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+import calendar
+import copy
+from datetime import date, datetime, timedelta
+
+
+class TimeError(Exception):
+    pass
+
+
+def relative_days(current_wday, wday, dir):
+    """Returns the number of days (positive or negative) to the "next" or
+    "last" of a certain weekday. ``current_wday`` and ``wday`` are numbers,
+    i.e. 0 = monday, 1 = tuesday, 2 = wednesday, etc.
+
+    >>> # Get the number of days to the next tuesday, if today is Sunday
+    >>> relative_days(6, 1, 1)
+    2
+
+    :param current_wday: the number of the current weekday.
+    :param wday: the target weekday.
+    :param dir: -1 for the "last" (past) weekday, 1 for the "next" (future)
+        weekday.
+    """
+
+    if current_wday == wday:
+        return 7 * dir
+
+    if dir == 1:
+        return (wday + 7 - current_wday) % 7
+    else:
+        return (current_wday + 7 - wday) % 7 * -1
+
+
+def datetime_to_long(dt):
+    """Converts a datetime object to a long integer representing the number
+    of microseconds since ``datetime.min``.
+    """
+
+    td = dt - dt.min
+    total = td.days * 86400000000  # Microseconds in a day
+    total += td.seconds * 1000000  # Microseconds in a second
+    total += td.microseconds
+    return total
+
+
+def long_to_datetime(x):
+    """Converts a long integer representing the number of microseconds since
+    ``datetime.min`` to a datetime object.
+    """
+
+    days = x // 86400000000  # Microseconds in a day
+    x -= days * 86400000000
+
+    seconds = x // 1000000  # Microseconds in a second
+    x -= seconds * 1000000
+
+    return datetime.min + timedelta(days=days, seconds=seconds, microseconds=x)
+
+
+# Ambiguous datetime object
+
+class adatetime(object):
+    """An "ambiguous" datetime object. This object acts like a
+    ``datetime.datetime`` object but can have any of its attributes set to
+    None, meaning unspecified.
+    """
+
+    units = frozenset(("year", "month", "day", "hour", "minute", "second", "microsecond"))
+
+    def __init__(self, year=None, month=None, day=None, hour=None, minute=None,
+                 second=None, microsecond=None):
+        if isinstance(year, datetime):
+            self.year, self.month, self.day = year.year, year.month, year.day
+            self.hour, self.minute, self.second = year.hour, year.minute, year.second
+            self.microsecond = year.microsecond
+        else:
+            if month is not None and month < 1 or month > 12:
+                raise TimeError("month must be in 1..12")
+
+            if day is not None and  day < 1:
+                raise TimeError("day must be greater than 1")
+            if (year is not None and month is not None and day is not None
+                and day > calendar.monthrange(year, month)[1]):
+                raise TimeError("day is out of range for month")
+
+            if hour is not None and hour < 0 or hour > 23:
+                raise TimeError("hour must be in 0..23")
+            if minute is not None and minute < 0 or minute > 59:
+                raise TimeError("minute must be in 0..59")
+            if second is not None and second < 0 or second > 59:
+                raise TimeError("second must be in 0..59")
+            if microsecond is not None and microsecond < 0 or microsecond > 999999:
+                raise TimeError("microsecond must be in 0..999999")
+
+            self.year, self.month, self.day = year, month, day
+            self.hour, self.minute, self.second = hour, minute, second
+            self.microsecond = microsecond
+
+    def __eq__(self, other):
+        if not other.__class__ is self.__class__:
+            if not is_ambiguous(self) and isinstance(other, datetime):
+                return fix(self) == other
+            else:
+                return False
+        return all(getattr(self, unit) == getattr(other, unit)
+                   for unit in self.units)
+
+    def __repr__(self):
+        return "%s%r" % (self.__class__.__name__, self.tuple())
+
+    def tuple(self):
+        """Returns the attributes of the ``adatetime`` object as a tuple of
+        ``(year, month, day, hour, minute, second, microsecond)``.
+        """
+
+        return (self.year, self.month, self.day, self.hour, self.minute,
+                self.second, self.microsecond)
+
+    def date(self):
+        return date(self.year, self.month, self.day)
+
+    def copy(self):
+        return adatetime(year=self.year, month=self.month, day=self.day,
+                     hour=self.hour, minute=self.minute, second=self.second,
+                     microsecond=self.microsecond)
+
+    def replace(self, **kwargs):
+        """Returns a copy of this object with the attributes given as keyword
+        arguments replaced.
+
+        >>> adt = adatetime(year=2009, month=10, day=31)
+        >>> adt.replace(year=2010)
+        (2010, 10, 31, None, None, None, None)
+        """
+
+        newadatetime = self.copy()
+        for key, value in kwargs.iteritems():
+            if key in self.units:
+                setattr(newadatetime, key, value)
+            else:
+                raise KeyError("Unknown argument %r" % key)
+        return newadatetime
+
+    def floor(self):
+        """Returns a ``datetime`` version of this object with all unspecified
+        (None) attributes replaced by their lowest values.
+
+        This method raises an error if the ``adatetime`` object has no year.
+
+        >>> adt = adatetime(year=2009, month=5)
+        >>> adt.floor()
+        datetime.datetime(2009, 5, 1, 0, 0, 0, 0)
+        """
+
+        year, month, day, hour, minute, second, microsecond =\
+        self.year, self.month, self.day, self.hour, self.minute, self.second, self.microsecond
+
+        if year is None:
+            raise ValueError("Date has no year")
+
+        if month is None:
+            month = 1
+        if day is None:
+            day = 1
+        if hour is None:
+            hour = 0
+        if minute is None:
+            minute = 0
+        if second is None:
+            second = 0
+        if microsecond is None:
+            microsecond = 0
+        return datetime(year, month, day, hour, minute, second, microsecond)
+
+    def ceil(self):
+        """Returns a ``datetime`` version of this object with all unspecified
+        (None) attributes replaced by their highest values.
+
+        This method raises an error if the ``adatetime`` object has no year.
+
+        >>> adt = adatetime(year=2009, month=5)
+        >>> adt.floor()
+        datetime.datetime(2009, 5, 30, 23, 59, 59, 999999)
+        """
+
+        year, month, day, hour, minute, second, microsecond =\
+        self.year, self.month, self.day, self.hour, self.minute, self.second, self.microsecond
+
+        if year is None:
+            raise ValueError("Date has no year")
+
+        if month is None:
+            month = 12
+        if day is None:
+            day = calendar.monthrange(year, month)[1]
+        if hour is None:
+            hour = 23
+        if minute is None:
+            minute = 59
+        if second is None:
+            second = 59
+        if microsecond is None:
+            microsecond = 999999
+        return datetime(year, month, day, hour, minute, second, microsecond)
+
+    def disambiguated(self, basedate):
+        """Returns either a ``datetime`` or unambiguous ``timespan`` version
+        of this object.
+
+        Unless this ``adatetime`` object is full specified down to the
+        microsecond, this method will return a timespan built from the "floor"
+        and "ceil" of this object.
+
+        This method raises an error if the ``adatetime`` object has no year.
+
+        >>> adt = adatetime(year=2009, month=10, day=31)
+        >>> adt.disambiguated()
+        timespan(datetime.datetime(2009, 10, 31, 0, 0, 0, 0), datetime.datetime(2009, 10, 31, 23, 59 ,59, 999999)
+        """
+
+        dt = self
+        if not is_ambiguous(dt):
+            return fix(dt)
+        return timespan(dt, dt).disambiguated(basedate)
+
+
+# Time span class
+
+class timespan(object):
+    """A span of time between two ``datetime`` or ``adatetime`` objects.
+    """
+
+    def __init__(self, start, end):
+        """
+        :param start: a ``datetime`` or ``adatetime`` object representing the
+            start of the time span.
+        :param end: a ``datetime`` or ``adatetime`` object representing the
+            end of the time span.
+        """
+
+        if not isinstance(start, (datetime, adatetime)):
+            raise TimeError("%r is not a datetime object" % start)
+        if not isinstance(end, (datetime, adatetime)):
+            raise TimeError("%r is not a datetime object" % end)
+
+        self.start = copy.copy(start)
+        self.end = copy.copy(end)
+
+    def __eq__(self, other):
+        if not other.__class__ is self.__class__:
+            return False
+        return self.start == other.start and self.end == other.end
+
+    def __repr__(self):
+        return "%s(%r, %r)" % (self.__class__.__name__, self.start, self.end)
+
+    def disambiguated(self, basedate, debug=0):
+        """Returns an unambiguous version of this object.
+
+        >>> start = adatetime(year=2009, month=2)
+        >>> end = adatetime(year=2009, month=10)
+        >>> ts = timespan(start, end)
+        >>> ts
+        timespan(adatetime(2009, 2, None, None, None, None, None), adatetime(2009, 10, None, None, None, None, None))
+        >>> td.disambiguated(datetime.now())
+        timespan(datetime.datetime(2009, 2, 28, 0, 0, 0, 0), datetime.datetime(2009, 10, 31, 23, 59 ,59, 999999)
+        """
+
+        #- If year is in start but not end, use basedate.year for end
+        #-- If year is in start but not end, but startdate is > basedate,
+        #   use "next <monthname>" to get end month/year
+        #- If year is in end but not start, copy year from end to start
+        #- Support "next february", "last april", etc.
+
+        start, end = copy.copy(self.start), copy.copy(self.end)
+        start_year_was_amb = start.year is None
+        end_year_was_amb = end.year is None
+
+        if has_no_date(start) and has_no_date(end):
+            # The start and end points are just times, so use the basedate
+            # for the date information.
+            by, bm, bd = basedate.year, basedate.month, basedate.day
+            start = start.replace(year=by, month=bm, day=bd)
+            end = end.replace(year=by, month=bm, day=bd)
+        else:
+            # If one side has a year and the other doesn't, the decision
+            # of what year to assign to the ambiguous side is kind of
+            # arbitrary. I've used a heuristic here based on how the range
+            # "reads", but it may only be reasonable in English. And maybe
+            # even just to me.
+
+            if start.year is None and end.year is None:
+                # No year on either side, use the basedate
+                start.year = end.year = basedate.year
+            elif start.year is None:
+                # No year in the start, use the year from the end
+                start.year = end.year
+            elif end.year is None:
+                end.year = max(start.year, basedate.year)
+
+        if start.year == end.year:
+            # Once again, if one side has a month and day but the other side
+            # doesn't, the disambiguation is arbitrary. Does "3 am to 5 am
+            # tomorrow" mean 3 AM today to 5 AM tomorrow, or 3am tomorrow to
+            # 5 am tomorrow? What I picked is similar to the year: if the
+            # end has a month+day and the start doesn't, copy the month+day
+            # from the end to the start UNLESS that would make the end come
+            # before the start on that day, in which case use the basedate
+            # instead. If the start has a month+day and the end doesn't, use
+            # the basedate.
+            start_dm = not (start.month is None and start.day is None)
+            end_dm = not (end.month is None and end.day is None)
+            if end_dm and not start_dm:
+                if start.floor().time() > end.ceil().time():
+                    start.month = basedate.month
+                    start.day = basedate.day
+                else:
+                    start.month = end.month
+                    start.day = end.day
+            elif start_dm and not end_dm:
+                end.month = basedate.month
+                end.day = basedate.day
+
+        if floor(start).date() > ceil(end).date():
+            # If the disambiguated dates are out of order:
+            # - If no start year was given, reduce the start year to put the
+            #   start before the end
+            # - If no end year was given, increase the end year to put the end
+            #   after the start
+            # - If a year was specified for both, just swap the start and end
+            if start_year_was_amb:
+                start.year = end.year - 1
+            elif end_year_was_amb:
+                end.year = start.year + 1
+            else:
+                start, end = end, start
+
+        start = floor(start)
+        end = ceil(end)
+
+        if start.date() == end.date() and start.time() > end.time():
+            # If the start and end are on the same day, but the start time
+            # is after the end time, move the end time to the next day
+            end += timedelta(days=1)
+
+        return timespan(start, end)
+
+
+# Functions for working with datetime/adatetime objects
+
+def floor(at):
+    if isinstance(at, datetime):
+        return at
+    return at.floor()
+
+
+def ceil(at):
+    if isinstance(at, datetime):
+        return at
+    return at.ceil()
+
+
+def fill_in(at, basedate, units=adatetime.units):
+    """Returns a copy of ``at`` with any unspecified (None) units filled in
+    with values from ``basedate``.
+    """
+
+    if isinstance(at, datetime):
+        return at
+
+    args = {}
+    for unit in units:
+        v = getattr(at, unit)
+        if v is None:
+            v = getattr(basedate, unit)
+        args[unit] = v
+    return fix(adatetime(**args))
+
+
+def has_no_date(at):
+    """Returns True if the given object is an ``adatetime`` where ``year``,
+    ``month``, and ``day`` are all None.
+    """
+
+    if isinstance(at, datetime):
+        return False
+    return at.year is None and at.month is None and at.day is None
+
+
+def has_no_time(at):
+    """Returns True if the given object is an ``adatetime`` where ``hour``,
+    ``minute``, ``second`` and ``microsecond`` are all None.
+    """
+
+    if isinstance(at, datetime):
+        return False
+    return at.hour is None and at.minute is None and at.second is None and at.microsecond is None
+
+
+def is_ambiguous(at):
+    """Returns True if the given object is an ``adatetime`` with any of its
+    attributes equal to None.
+    """
+
+    if isinstance(at, datetime):
+        return False
+    return any((getattr(at, attr) is None) for attr in adatetime.units)
+
+
+def is_void(at):
+    """Returns True if the given object is an ``adatetime`` with all of its
+    attributes equal to None.
+    """
+
+    if isinstance(at, datetime):
+        return False
+    return all((getattr(at, attr) is None) for attr in adatetime.units)
+
+
+def fix(at):
+    """If the given object is an ``adatetime`` that is unambiguous (because
+    all its attributes are specified, that is, not equal to None), returns a
+    ``datetime`` version of it. Otherwise returns the ``adatetime`` object
+    unchanged.
+    """
+
+    if is_ambiguous(at) or isinstance(at, datetime):
+        return at
+    return datetime(year=at.year, month=at.month, day=at.day, hour=at.hour,
+                    minute=at.minute, second=at.second, microsecond=at.microsecond)
+
+
diff --git a/lib/whoosh/whoosh/support/unicode.py b/lib/whoosh/whoosh/support/unicode.py
new file mode 100644
index 0000000..99bd4fb
--- /dev/null
+++ b/lib/whoosh/whoosh/support/unicode.py
@@ -0,0 +1,279 @@
+import re
+from bisect import bisect_right
+
+
+# http://unicode.org/Public/UNIDATA/Blocks.txt
+_blockdata = '''
+# Blocks-5.1.0.txt
+# Date: 2008-03-20, 17:41:00 PDT [KW]
+#
+# Unicode Character Database
+# Copyright (c) 1991-2008 Unicode, Inc.
+# For terms of use, see http://www.unicode.org/terms_of_use.html
+# For documentation, see UCD.html
+#
+# Note:   The casing of block names is not normative.
+#         For example, "Basic Latin" and "BASIC LATIN" are equivalent.
+#
+# Format:
+# Start Code..End Code; Block Name
+
+# ================================================
+
+# Note:   When comparing block names, casing, whitespace, hyphens,
+#         and underbars are ignored.
+#         For example, "Latin Extended-A" and "latin extended a" are equivalent.
+#         For more information on the comparison of property values,
+#            see UCD.html.
+#
+#  All code points not explicitly listed for Block
+#  have the value No_Block.
+
+# Property: Block
+#
+# @missing: 0000..10FFFF; No_Block
+
+0000..007F; Basic Latin
+0080..00FF; Latin-1 Supplement
+0100..017F; Latin Extended-A
+0180..024F; Latin Extended-B
+0250..02AF; IPA Extensions
+02B0..02FF; Spacing Modifier Letters
+0300..036F; Combining Diacritical Marks
+0370..03FF; Greek and Coptic
+0400..04FF; Cyrillic
+0500..052F; Cyrillic Supplement
+0530..058F; Armenian
+0590..05FF; Hebrew
+0600..06FF; Arabic
+0700..074F; Syriac
+0750..077F; Arabic Supplement
+0780..07BF; Thaana
+07C0..07FF; NKo
+0900..097F; Devanagari
+0980..09FF; Bengali
+0A00..0A7F; Gurmukhi
+0A80..0AFF; Gujarati
+0B00..0B7F; Oriya
+0B80..0BFF; Tamil
+0C00..0C7F; Telugu
+0C80..0CFF; Kannada
+0D00..0D7F; Malayalam
+0D80..0DFF; Sinhala
+0E00..0E7F; Thai
+0E80..0EFF; Lao
+0F00..0FFF; Tibetan
+1000..109F; Myanmar
+10A0..10FF; Georgian
+1100..11FF; Hangul Jamo
+1200..137F; Ethiopic
+1380..139F; Ethiopic Supplement
+13A0..13FF; Cherokee
+1400..167F; Unified Canadian Aboriginal Syllabics
+1680..169F; Ogham
+16A0..16FF; Runic
+1700..171F; Tagalog
+1720..173F; Hanunoo
+1740..175F; Buhid
+1760..177F; Tagbanwa
+1780..17FF; Khmer
+1800..18AF; Mongolian
+1900..194F; Limbu
+1950..197F; Tai Le
+1980..19DF; New Tai Lue
+19E0..19FF; Khmer Symbols
+1A00..1A1F; Buginese
+1B00..1B7F; Balinese
+1B80..1BBF; Sundanese
+1C00..1C4F; Lepcha
+1C50..1C7F; Ol Chiki
+1D00..1D7F; Phonetic Extensions
+1D80..1DBF; Phonetic Extensions Supplement
+1DC0..1DFF; Combining Diacritical Marks Supplement
+1E00..1EFF; Latin Extended Additional
+1F00..1FFF; Greek Extended
+2000..206F; General Punctuation
+2070..209F; Superscripts and Subscripts
+20A0..20CF; Currency Symbols
+20D0..20FF; Combining Diacritical Marks for Symbols
+2100..214F; Letterlike Symbols
+2150..218F; Number Forms
+2190..21FF; Arrows
+2200..22FF; Mathematical Operators
+2300..23FF; Miscellaneous Technical
+2400..243F; Control Pictures
+2440..245F; Optical Character Recognition
+2460..24FF; Enclosed Alphanumerics
+2500..257F; Box Drawing
+2580..259F; Block Elements
+25A0..25FF; Geometric Shapes
+2600..26FF; Miscellaneous Symbols
+2700..27BF; Dingbats
+27C0..27EF; Miscellaneous Mathematical Symbols-A
+27F0..27FF; Supplemental Arrows-A
+2800..28FF; Braille Patterns
+2900..297F; Supplemental Arrows-B
+2980..29FF; Miscellaneous Mathematical Symbols-B
+2A00..2AFF; Supplemental Mathematical Operators
+2B00..2BFF; Miscellaneous Symbols and Arrows
+2C00..2C5F; Glagolitic
+2C60..2C7F; Latin Extended-C
+2C80..2CFF; Coptic
+2D00..2D2F; Georgian Supplement
+2D30..2D7F; Tifinagh
+2D80..2DDF; Ethiopic Extended
+2DE0..2DFF; Cyrillic Extended-A
+2E00..2E7F; Supplemental Punctuation
+2E80..2EFF; CJK Radicals Supplement
+2F00..2FDF; Kangxi Radicals
+2FF0..2FFF; Ideographic Description Characters
+3000..303F; CJK Symbols and Punctuation
+3040..309F; Hiragana
+30A0..30FF; Katakana
+3100..312F; Bopomofo
+3130..318F; Hangul Compatibility Jamo
+3190..319F; Kanbun
+31A0..31BF; Bopomofo Extended
+31C0..31EF; CJK Strokes
+31F0..31FF; Katakana Phonetic Extensions
+3200..32FF; Enclosed CJK Letters and Months
+3300..33FF; CJK Compatibility
+3400..4DBF; CJK Unified Ideographs Extension A
+4DC0..4DFF; Yijing Hexagram Symbols
+4E00..9FFF; CJK Unified Ideographs
+A000..A48F; Yi Syllables
+A490..A4CF; Yi Radicals
+A500..A63F; Vai
+A640..A69F; Cyrillic Extended-B
+A700..A71F; Modifier Tone Letters
+A720..A7FF; Latin Extended-D
+A800..A82F; Syloti Nagri
+A840..A87F; Phags-pa
+A880..A8DF; Saurashtra
+A900..A92F; Kayah Li
+A930..A95F; Rejang
+AA00..AA5F; Cham
+AC00..D7AF; Hangul Syllables
+D800..DB7F; High Surrogates
+DB80..DBFF; High Private Use Surrogates
+DC00..DFFF; Low Surrogates
+E000..F8FF; Private Use Area
+F900..FAFF; CJK Compatibility Ideographs
+FB00..FB4F; Alphabetic Presentation Forms
+FB50..FDFF; Arabic Presentation Forms-A
+FE00..FE0F; Variation Selectors
+FE10..FE1F; Vertical Forms
+FE20..FE2F; Combining Half Marks
+FE30..FE4F; CJK Compatibility Forms
+FE50..FE6F; Small Form Variants
+FE70..FEFF; Arabic Presentation Forms-B
+FF00..FFEF; Halfwidth and Fullwidth Forms
+FFF0..FFFF; Specials
+10000..1007F; Linear B Syllabary
+10080..100FF; Linear B Ideograms
+10100..1013F; Aegean Numbers
+10140..1018F; Ancient Greek Numbers
+10190..101CF; Ancient Symbols
+101D0..101FF; Phaistos Disc
+10280..1029F; Lycian
+102A0..102DF; Carian
+10300..1032F; Old Italic
+10330..1034F; Gothic
+10380..1039F; Ugaritic
+103A0..103DF; Old Persian
+10400..1044F; Deseret
+10450..1047F; Shavian
+10480..104AF; Osmanya
+10800..1083F; Cypriot Syllabary
+10900..1091F; Phoenician
+10920..1093F; Lydian
+10A00..10A5F; Kharoshthi
+12000..123FF; Cuneiform
+12400..1247F; Cuneiform Numbers and Punctuation
+1D000..1D0FF; Byzantine Musical Symbols
+1D100..1D1FF; Musical Symbols
+1D200..1D24F; Ancient Greek Musical Notation
+1D300..1D35F; Tai Xuan Jing Symbols
+1D360..1D37F; Counting Rod Numerals
+1D400..1D7FF; Mathematical Alphanumeric Symbols
+1F000..1F02F; Mahjong Tiles
+1F030..1F09F; Domino Tiles
+20000..2A6DF; CJK Unified Ideographs Extension B
+2F800..2FA1F; CJK Compatibility Ideographs Supplement
+E0000..E007F; Tags
+E0100..E01EF; Variation Selectors Supplement
+F0000..FFFFF; Supplementary Private Use Area-A
+100000..10FFFF; Supplementary Private Use Area-B
+
+# EOF
+'''
+
+
+pattern = re.compile(r'([0-9A-F]+)\.\.([0-9A-F]+);\ (\S.*\S)')
+_starts = []
+_ends = []
+_names = []
+
+
+class blocks(object):
+    pass
+
+
+def _init():
+    count = 0
+    for line in _blockdata.splitlines():
+        m = pattern.match(line)
+        if m:
+            start, end, name = m.groups()
+            _starts.append(int(start, 16))
+            _ends.append(int(end, 16))
+            _names.append(name)
+            setattr(blocks, name.replace(" ", "_"), count)
+            count += 1
+_init()
+
+
+def blockname(ch):
+    """Return the Unicode block name for ch, or None if ch has no block.
+
+    >>> blockname(u'a')
+    'Basic Latin'
+    >>> blockname(unichr(0x0b80))
+    'Tamil'
+    >>> block(unichr(2048))
+    None
+    """
+
+    assert isinstance(ch, unicode) and len(ch) == 1, repr(ch)
+    cp = ord(ch)
+    i = bisect_right(_starts, cp) - 1
+    end = _ends[i]
+    if cp > end:
+        return None
+    return _names[i]
+
+
+def blocknum(ch):
+    """Returns the unicode block number for ch, or None if ch has no block.
+
+    >>> blocknum(u'a')
+    0
+    >>> blocknum(unichr(0x0b80))
+    22
+    >>> blocknum(unichr(2048))
+    None
+    """
+
+    cp = ord(ch)
+    i = bisect_right(_starts, cp) - 1
+    end = _ends[i]
+    if cp > end:
+        return None
+    return i
+
+
+
+
+
+
+
diff --git a/lib/whoosh/whoosh/system.py b/lib/whoosh/whoosh/system.py
new file mode 100644
index 0000000..785864a
--- /dev/null
+++ b/lib/whoosh/whoosh/system.py
@@ -0,0 +1,58 @@
+# Copyright 2007 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+import sys
+from struct import Struct, calcsize
+
+IS_LITTLE = sys.byteorder == "little"
+
+_INT_SIZE = calcsize("!i")
+_SHORT_SIZE = calcsize("!H")
+_LONG_SIZE = calcsize("!Q")
+_FLOAT_SIZE = calcsize("!f")
+
+_sbyte_struct = Struct("!b")
+_ushort_struct = Struct("!H")
+_int_struct = Struct("!i")
+_uint_struct = Struct("!I")
+_long_struct = Struct("!q")
+_float_struct = Struct("!f")
+
+pack_sbyte = _sbyte_struct.pack
+pack_ushort = _ushort_struct.pack
+pack_int = _int_struct.pack
+pack_uint = _uint_struct.pack
+pack_long = _long_struct.pack
+pack_float = _float_struct.pack
+
+unpack_sbyte = _sbyte_struct.unpack
+unpack_ushort = _ushort_struct.unpack
+unpack_int = _int_struct.unpack
+unpack_uint = _uint_struct.unpack
+unpack_long = _long_struct.unpack
+unpack_float = _float_struct.unpack
+
diff --git a/lib/whoosh/whoosh/util.py b/lib/whoosh/whoosh/util.py
new file mode 100644
index 0000000..a0c49b3
--- /dev/null
+++ b/lib/whoosh/whoosh/util.py
@@ -0,0 +1,610 @@
+# Copyright 2007 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+"""Miscellaneous utility functions and classes.
+"""
+
+from __future__ import with_statement
+import codecs
+import re
+import sys
+import time
+from array import array
+from bisect import insort
+from copy import copy
+from functools import wraps
+from math import log
+from struct import pack, unpack
+from threading import Lock
+
+from whoosh.system import IS_LITTLE
+
+
+try:
+    from itertools import permutations
+except ImportError:
+    # This function was only added to itertools in 2.6...
+    def permutations(iterable, r=None):
+        pool = tuple(iterable)
+        n = len(pool)
+        r = n if r is None else r
+        if r > n:
+            return
+        indices = range(n)
+        cycles = range(n, n - r, -1)
+        yield tuple(pool[i] for i in indices[:r])
+        while n:
+            for i in reversed(range(r)):
+                cycles[i] -= 1
+                if cycles[i] == 0:
+                    indices[i:] = indices[i + 1:] + indices[i:i + 1]
+                    cycles[i] = n - i
+                else:
+                    j = cycles[i]
+                    indices[i], indices[-j] = indices[-j], indices[i]
+                    yield tuple(pool[i] for i in indices[:r])
+                    break
+            else:
+                return
+
+
+try:
+    from operator import methodcaller
+except ImportError:
+    def methodcaller(name, *args, **kwargs):
+        def caller(obj):
+            return getattr(obj, name)(*args, **kwargs)
+        return caller
+
+
+if sys.platform == 'win32':
+    now = time.clock
+else:
+    now = time.time
+
+
+# Note: these functions return a tuple of (text, length), so when you call
+# them, you have to add [0] on the end, e.g. str = utf8encode(unicode)[0]
+
+utf8encode = codecs.getencoder("utf_8")
+utf8decode = codecs.getdecoder("utf_8")
+
+
+# Functions
+
+def array_to_string(a):
+    if IS_LITTLE:
+        a = copy(a)
+        a.byteswap()
+    return a.tostring()
+
+
+def string_to_array(typecode, s):
+    a = array(typecode)
+    a.fromstring(s)
+    if IS_LITTLE:
+        a.byteswap()
+    return a
+
+
+def make_binary_tree(fn, args, **kwargs):
+    """Takes a function/class that takes two positional arguments and a list of
+    arguments and returns a binary tree of results/instances.
+
+    >>> make_binary_tree(UnionMatcher, [matcher1, matcher2, matcher3])
+    UnionMatcher(matcher1, UnionMatcher(matcher2, matcher3))
+
+    Any keyword arguments given to this function are passed to the class
+    initializer.
+    """
+
+    count = len(args)
+    if not count:
+        raise ValueError("Called make_binary_tree with empty list")
+    elif count == 1:
+        return args[0]
+
+    half = count // 2
+    return fn(make_binary_tree(fn, args[:half], **kwargs),
+              make_binary_tree(fn, args[half:], **kwargs), **kwargs)
+
+
+def make_weighted_tree(fn, ls, **kwargs):
+    """Takes a function/class that takes two positional arguments and a list of
+    (weight, argument) tuples and returns a huffman-like weighted tree of
+    results/instances.
+    """
+
+    if not ls:
+        raise ValueError("Called make_weighted_tree with empty list")
+
+    ls.sort()
+    while len(ls) > 1:
+        a = ls.pop(0)
+        b = ls.pop(0)
+        insort(ls, (a[0] + b[0], fn(a[1], b[1])))
+    return ls[0][1]
+
+
+# Varint cache
+
+# Build a cache of the varint byte sequences for the first N integers, so we
+# don't have to constantly recalculate them on the fly. This makes a small but
+# noticeable difference.
+
+def _varint(i):
+    s = ""
+    while (i & ~0x7F) != 0:
+        s += chr((i & 0x7F) | 0x80)
+        i = i >> 7
+    s += chr(i)
+    return s
+
+
+_varint_cache_size = 512
+_varint_cache = []
+for i in xrange(0, _varint_cache_size):
+    _varint_cache.append(_varint(i))
+_varint_cache = tuple(_varint_cache)
+
+
+def varint(i):
+    """Encodes the given integer into a string of the minimum number  of bytes.
+    """
+    if i < len(_varint_cache):
+        return _varint_cache[i]
+    return _varint(i)
+
+
+def varint_to_int(vi):
+    b = ord(vi[0])
+    p = 1
+    i = b & 0x7f
+    shift = 7
+    while b & 0x80 != 0:
+        b = ord(vi[p])
+        p += 1
+        i |= (b & 0x7F) << shift
+        shift += 7
+    return i
+
+
+def signed_varint(i):
+    """Zig-zag encodes a signed integer into a varint.
+    """
+
+    if i >= 0:
+        return varint(i << 1)
+    return varint((i << 1) ^ (~0))
+
+
+def decode_signed_varint(i):
+    """Zig-zag decodes an integer value.
+    """
+
+    if not i & 1:
+        return i >> 1
+    return (i >> 1) ^ (~0)
+
+
+def read_varint(readfn):
+    """
+    Reads a variable-length encoded integer.
+
+    :param readfn: a callable that reads a given number of bytes,
+        like file.read().
+    """
+
+    b = ord(readfn(1))
+    i = b & 0x7F
+
+    shift = 7
+    while b & 0x80 != 0:
+        b = ord(readfn(1))
+        i |= (b & 0x7F) << shift
+        shift += 7
+    return i
+
+
+# Fibonacci function
+
+_fib_cache = {}
+
+
+def fib(n):
+    """Returns the nth value in the Fibonacci sequence.
+    """
+
+    if n <= 2:
+        return n
+    if n in _fib_cache:
+        return _fib_cache[n]
+    result = fib(n - 1) + fib(n - 2)
+    _fib_cache[n] = result
+    return result
+
+
+# Float-to-byte encoding/decoding
+
+def float_to_byte(value, mantissabits=5, zeroexp=2):
+    """Encodes a floating point number in a single byte.
+    """
+
+    # Assume int size == float size
+
+    fzero = (63 - zeroexp) << mantissabits
+    bits = unpack("i", pack("f", value))[0]
+    smallfloat = bits >> (24 - mantissabits)
+    if smallfloat < fzero:
+        # Map negative numbers and 0 to 0
+        # Map underflow to next smallest non-zero number
+        if bits <= 0:
+            return chr(0)
+        else:
+            return chr(1)
+    elif smallfloat >= fzero + 0x100:
+        # Map overflow to largest number
+        return chr(255)
+    else:
+        return chr(smallfloat - fzero)
+
+
+def byte_to_float(b, mantissabits=5, zeroexp=2):
+    """Decodes a floating point number stored in a single byte.
+    """
+    b = ord(b)
+    if b == 0:
+        return 0.0
+
+    bits = (b & 0xff) << (24 - mantissabits)
+    bits += (63 - zeroexp) << 24
+    return unpack("f", pack("i", bits))[0]
+
+
+# Length-to-byte approximation functions
+
+def length_to_byte(length):
+    """Returns a logarithmic approximation of the given number, in the range
+    0-255. The approximation has high precision at the low end (e.g.
+    1 -> 0, 2 -> 1, 3 -> 2 ...) and low precision at the high end. Numbers
+    equal to or greater than 108116 all approximate to 255.
+
+    This is useful for storing field lengths, where the general case is small
+    documents and very large documents are more rare.
+    """
+
+    # This encoding formula works up to 108116 -> 255, so if the length is
+    # equal to or greater than that limit, just return 255.
+    if length >= 108116:
+        return 255
+
+    # The parameters of this formula where chosen heuristically so that low
+    # numbers would approximate closely, and the byte range 0-255 would cover
+    # a decent range of document lengths (i.e. 1 to ~100000).
+    return int(round(log((length / 27.0) + 1, 1.033)))
+
+
+def _byte_to_length(n):
+    return int(round((pow(1.033, n) - 1) * 27))
+
+_length_byte_cache = array("i", (_byte_to_length(i) for i in xrange(256)))
+byte_to_length = _length_byte_cache.__getitem__
+
+
+# Prefix encoding functions
+
+def first_diff(a, b):
+    """Returns the position of the first differing character in the strings
+    a and b. For example, first_diff('render', 'rending') == 4. This function
+    limits the return value to 255 so the difference can be encoded in a single
+    byte.
+    """
+
+    i = -1
+    for i in xrange(0, len(a)):
+        if a[i] != b[1]:
+            return i
+        if i == 255:
+            return i
+
+
+def prefix_encode(a, b):
+    """Compresses string b as an integer (encoded in a byte) representing
+    the prefix it shares with a, followed by the suffix encoded as UTF-8.
+    """
+    i = first_diff(a, b)
+    return chr(i) + b[i:].encode("utf8")
+
+
+def prefix_encode_all(ls):
+    """Compresses the given list of (unicode) strings by storing each string
+    (except the first one) as an integer (encoded in a byte) representing
+    the prefix it shares with its predecessor, followed by the suffix encoded
+    as UTF-8.
+    """
+
+    last = u''
+    for w in ls:
+        i = first_diff(last, w)
+        yield chr(i) + w[i:].encode("utf8")
+        last = w
+
+
+def prefix_decode_all(ls):
+    """Decompresses a list of strings compressed by prefix_encode().
+    """
+
+    last = u''
+    for w in ls:
+        i = ord(w[0])
+        decoded = last[:i] + w[1:].decode("utf8")
+        yield decoded
+        last = decoded
+
+
+# Natural key sorting function
+
+_nkre = re.compile(r"\D+|\d+", re.UNICODE)
+
+
+def _nkconv(i):
+    try:
+        return int(i)
+    except ValueError:
+        return i.lower()
+
+
+def natural_key(s):
+    """Converts string ``s`` into a tuple that will sort "naturally" (i.e.,
+    ``name5`` will come before ``name10`` and ``1`` will come before ``A``).
+    This function is designed to be used as the ``key`` argument to sorting
+    functions.
+
+    :param s: the str/unicode string to convert.
+    :rtype: tuple
+    """
+
+    # Use _nkre to split the input string into a sequence of
+    # digit runs and non-digit runs. Then use _nkconv() to convert
+    # the digit runs into ints and the non-digit runs to lowercase.
+    return tuple(_nkconv(m) for m in _nkre.findall(s))
+
+
+# Mixins and decorators
+
+class ClosableMixin(object):
+    """Mix-in for classes with a close() method to allow them to be used as a
+    context manager.
+    """
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, *exc_info):
+        self.close()
+
+
+def protected(func):
+    """Decorator for storage-access methods. This decorator (a) checks if the
+    object has already been closed, and (b) synchronizes on a threading lock.
+    The parent object must have 'is_closed' and '_sync_lock' attributes.
+    """
+
+    @wraps(func)
+    def protected_wrapper(self, *args, **kwargs):
+        if self.is_closed:
+            raise Exception("%r has been closed" % self)
+        with self._sync_lock:
+            return func(self, *args, **kwargs)
+
+    return protected_wrapper
+
+
+def synchronized(func):
+    """Decorator for storage-access methods, which synchronizes on a threading
+    lock. The parent object must have 'is_closed' and '_sync_lock' attributes.
+    """
+
+    @wraps(func)
+    def synchronized_wrapper(self, *args, **kwargs):
+        with self._sync_lock:
+            return func(self, *args, **kwargs)
+
+    return synchronized_wrapper
+
+
+def unbound_cache(func):
+    """Caching decorator with an unbounded cache size.
+    """
+
+    cache = {}
+
+    @wraps(func)
+    def caching_wrapper(*args):
+        try:
+            return cache[args]
+        except KeyError:
+            result = func(*args)
+            cache[args] = result
+            return result
+
+    return caching_wrapper
+
+
+def lru_cache(maxsize=100):
+    """Least-recently-used cache decorator.
+
+    This function duplicates (more-or-less) the protocol of the
+    ``functools.lru_cache`` decorator in the Python 3.2 standard library, but
+    uses the clock face LRU algorithm instead of an ordered dictionary.
+
+    If *maxsize* is set to None, the LRU features are disabled and the cache
+    can grow without bound.
+
+    Arguments to the cached function must be hashable.
+
+    View the cache statistics named tuple (hits, misses, maxsize, currsize)
+    with f.cache_info().  Clear the cache and statistics with f.cache_clear().
+    Access the underlying function with f.__wrapped__.
+    """
+
+    def decorating_function(user_function):
+
+        stats = [0, 0, 0]  # hits, misses
+        data = {}
+
+        if maxsize:
+            # The keys at each point on the clock face
+            clock_keys = [None] * maxsize
+            # The "referenced" bits at each point on the clock face
+            clock_refs = array("B", (0 for _ in xrange(maxsize)))
+            lock = Lock()
+
+            @wraps(user_function)
+            def wrapper(*args):
+                key = args
+                try:
+                    with lock:
+                        pos, result = data[key]
+                        # The key is in the cache. Set the key's reference bit
+                        clock_refs[pos] = 1
+                        # Record a cache hit
+                        stats[0] += 1
+                except KeyError:
+                    # Compute the value
+                    result = user_function(*args)
+                    with lock:
+                        # Current position of the clock hand
+                        hand = stats[2]
+                        # Remember to stop here after a full revolution
+                        end = hand
+                        # Sweep around the clock looking for a position with
+                        # the reference bit off
+                        while True:
+                            hand = (hand + 1) % maxsize
+                            current_ref = clock_refs[hand]
+                            if current_ref:
+                                # This position's "referenced" bit is set. Turn
+                                # the bit off and move on.
+                                clock_refs[hand] = 0
+                            elif not current_ref or hand == end:
+                                # We've either found a position with the
+                                # "reference" bit off or reached the end of the
+                                # circular cache. So we'll replace this
+                                # position with the new key
+                                current_key = clock_keys[hand]
+                                if current_key in data:
+                                    del data[current_key]
+                                clock_keys[hand] = key
+                                clock_refs[hand] = 1
+                                break
+                        # Put the key and result in the cache
+                        data[key] = (hand, result)
+                        # Save the new hand position
+                        stats[2] = hand
+                        # Record a cache miss
+                        stats[1] += 1
+                return result
+
+        else:
+            @wraps(user_function)
+            def wrapper(*args):
+                key = args
+                try:
+                    result = data[key]
+                    stats[0] += 1
+                except KeyError:
+                    result = user_function(*args)
+                    data[key] = result
+                    stats[1] += 1
+                return result
+
+        def cache_info():
+            """Report cache statistics"""
+            return (stats[0], stats[1], maxsize, len(data))
+
+        def cache_clear():
+            """Clear the cache and cache statistics"""
+            data.clear()
+            stats[0] = stats[1] = stats[2] = 0
+            for i in xrange(maxsize):
+                clock_keys[i] = None
+                clock_refs[i] = 0
+
+        wrapper.cache_info = cache_info
+        wrapper.cache_clear = cache_clear
+        return wrapper
+
+    return decorating_function
+
+
+def find_object(name, blacklist=None, whitelist=None):
+    """Imports and returns an object given a fully qualified name.
+
+    >>> find_object("whoosh.analysis.StopFilter")
+    <class 'whoosh.analysis.StopFilter'>
+    """
+
+    if blacklist:
+        for pre in blacklist:
+            if name.startswith(pre):
+                raise TypeError("%r: can't instantiate names starting with %r" % (name, pre))
+    if whitelist:
+        passes = False
+        for pre in whitelist:
+            if name.startswith(pre):
+                passes = True
+                break
+        if not passes:
+            raise TypeError("Can't instantiate %r" % name)
+
+    lastdot = name.rfind(".")
+
+    assert lastdot > -1, "Name %r must be fully qualified" % name
+    modname = name[:lastdot]
+    clsname = name[lastdot + 1:]
+
+    mod = __import__(modname, fromlist=[clsname])
+    cls = getattr(mod, clsname)
+    return cls
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/lib/whoosh/whoosh/writing.py b/lib/whoosh/whoosh/writing.py
new file mode 100644
index 0000000..2be84a3
--- /dev/null
+++ b/lib/whoosh/whoosh/writing.py
@@ -0,0 +1,518 @@
+# Copyright 2007 Matt Chaput. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#    1. Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#
+#    2. Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# The views and conclusions contained in the software and documentation are
+# those of the authors and should not be interpreted as representing official
+# policies, either expressed or implied, of Matt Chaput.
+
+from __future__ import with_statement
+import threading
+import time
+
+from whoosh.store import LockError
+from whoosh.util import synchronized
+
+
+# Exceptions
+
+class IndexingError(Exception):
+    pass
+
+
+# Base class
+
+class IndexWriter(object):
+    """High-level object for writing to an index.
+
+    To get a writer for a particular index, call
+    :meth:`~whoosh.index.Index.writer` on the Index object.
+
+    >>> writer = my_index.writer()
+
+    You can use this object as a context manager. If an exception is thrown
+    from within the context it calls cancel(), otherwise it calls commit() when
+    the context exits.
+    """
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        if exc_type:
+            self.cancel()
+        else:
+            self.commit()
+
+    def add_field(self, fieldname, fieldtype, **kwargs):
+        """Adds a field to the index's schema.
+
+        :param fieldname: the name of the field to add.
+        :param fieldtype: an instantiated :class:`whoosh.fields.FieldType`
+            object.
+        """
+
+        self.schema.add(fieldname, fieldtype, **kwargs)
+
+    def remove_field(self, fieldname, **kwargs):
+        """Removes the named field from the index's schema. Depending on the
+        backend implementation, this may or may not actually remove existing
+        data for the field from the index. Optimizing the index should always
+        clear out existing data for a removed field.
+        """
+
+        self.schema.remove(fieldname, **kwargs)
+
+    def reader(self, **kwargs):
+        """Returns a reader for the existing index.
+        """
+
+        raise NotImplementedError
+
+    def searcher(self, **kwargs):
+        from whoosh.searching import Searcher
+
+        return Searcher(self.reader(), **kwargs)
+
+    def delete_by_term(self, fieldname, text, searcher=None):
+        """Deletes any documents containing "term" in the "fieldname" field.
+        This is useful when you have an indexed field containing a unique ID
+        (such as "pathname") for each document.
+
+        :returns: the number of documents deleted.
+        """
+
+        from whoosh.query import Term
+
+        q = Term(fieldname, text)
+        return self.delete_by_query(q, searcher=searcher)
+
+    def delete_by_query(self, q, searcher=None):
+        """Deletes any documents matching a query object.
+
+        :returns: the number of documents deleted.
+        """
+
+        if searcher:
+            s = searcher
+        else:
+            s = self.searcher()
+
+        try:
+            count = 0
+            for docnum in s.docs_for_query(q):
+                if not self.is_deleted(docnum):
+                    self.delete_document(docnum)
+                    count += 1
+        finally:
+            if not searcher:
+                s.close()
+
+        return count
+
+    def delete_document(self, docnum, delete=True):
+        """Deletes a document by number.
+        """
+        raise NotImplementedError
+
+    def add_document(self, **fields):
+        """The keyword arguments map field names to the values to index/store.
+
+        For fields that are both indexed and stored, you can specify an
+        alternate value to store using a keyword argument in the form
+        "_stored_<fieldname>". For example, if you have a field named "title"
+        and you want to index the text "a b c" but store the text "e f g", use
+        keyword arguments like this::
+
+            writer.add_document(title=u"a b c", _stored_title=u"e f g")
+        """
+        raise NotImplementedError
+
+    def _unique_fields(self, fields):
+        # Check which of the supplied fields are unique
+        unique_fields = [name for name, field in self.schema.items()
+                         if name in fields and field.unique]
+        if not unique_fields:
+            raise IndexingError("None of the fields in %r"
+                                " are unique" % fields.keys())
+        return unique_fields
+
+    def update_document(self, **fields):
+        """The keyword arguments map field names to the values to index/store.
+
+        Note that this method will only replace a *committed* document;
+        currently it cannot replace documents you've added to the IndexWriter
+        but haven't yet committed. For example, if you do this:
+
+        >>> writer.update_document(unique_id=u"1", content=u"Replace me")
+        >>> writer.update_document(unique_id=u"1", content=u"Replacement")
+
+        ...this will add two documents with the same value of ``unique_id``,
+        instead of the second document replacing the first.
+
+        For fields that are both indexed and stored, you can specify an
+        alternate value to store using a keyword argument in the form
+        "_stored_<fieldname>". For example, if you have a field named "title"
+        and you want to index the text "a b c" but store the text "e f g", use
+        keyword arguments like this::
+
+            writer.update_document(title=u"a b c", _stored_title=u"e f g")
+        """
+
+        # Delete the set of documents matching the unique terms
+        unique_fields = self._unique_fields(fields)
+        with self.searcher() as s:
+            for docnum in s._find_unique([(name, fields[name])
+                                          for name in unique_fields]):
+                self.delete_document(docnum)
+
+        # Add the given fields
+        self.add_document(**fields)
+
+    def commit(self):
+        """Finishes writing and unlocks the index.
+        """
+        pass
+
+    def cancel(self):
+        """Cancels any documents/deletions added by this object
+        and unlocks the index.
+        """
+        pass
+
+
+class PostingWriter(object):
+    def start(self, format):
+        """Start a new set of postings for a new term. Implementations may
+        raise an exception if this is called without a corresponding call to
+        finish().
+        """
+        raise NotImplementedError
+
+    def write(self, id, weight, valuestring):
+        """Add a posting with the given ID and value.
+        """
+        raise NotImplementedError
+
+    def finish(self):
+        """Finish writing the postings for the current term. Implementations
+        may raise an exception if this is called without a preceding call to
+        start().
+        """
+        pass
+
+    def close(self):
+        """Finish writing all postings and close the underlying file.
+        """
+        pass
+
+
+class AsyncWriter(threading.Thread, IndexWriter):
+    """Convenience wrapper for a writer object that might fail due to locking
+    (i.e. the ``filedb`` writer). This object will attempt once to obtain the
+    underlying writer, and if it's successful, will simply pass method calls on
+    to it.
+
+    If this object *can't* obtain a writer immediately, it will *buffer*
+    delete, add, and update method calls in memory until you call ``commit()``.
+    At that point, this object will start running in a separate thread, trying
+    to obtain the writer over and over, and once it obtains it, "replay" all
+    the buffered method calls on it.
+
+    In a typical scenario where you're adding a single or a few documents to
+    the index as the result of a Web transaction, this lets you just create the
+    writer, add, and commit, without having to worry about index locks,
+    retries, etc.
+
+    For example, to get an aynchronous writer, instead of this:
+
+    >>> writer = myindex.writer(postlimitmb=128)
+
+    Do this:
+
+    >>> from whoosh.writing import AsyncWriter
+    >>> writer = AsyncWriter(myindex, )
+    """
+
+    def __init__(self, index, delay=0.25, writerargs=None):
+        """
+        :param index: the :class:`whoosh.index.Index` to write to.
+        :param delay: the delay (in seconds) between attempts to instantiate
+            the actual writer.
+        :param writerargs: an optional dictionary specifying keyword arguments
+            to to be passed to the index's ``writer()`` method.
+        """
+
+        threading.Thread.__init__(self)
+        self.running = False
+        self.index = index
+        self.writerargs = writerargs or {}
+        self.delay = delay
+        self.events = []
+        try:
+            self.writer = self.index.writer(**self.writerargs)
+        except LockError:
+            self.writer = None
+
+    def reader(self):
+        return self.index.reader()
+
+    def searcher(self, **kwargs):
+        from whoosh.searching import Searcher
+        return Searcher(self.reader(), fromindex=self.index, **kwargs)
+
+    def _record(self, method, args, kwargs):
+        if self.writer:
+            getattr(self.writer, method)(*args, **kwargs)
+        else:
+            self.events.append((method, args, kwargs))
+
+    def run(self):
+        self.running = True
+        writer = self.writer
+        while writer is None:
+            try:
+                writer = self.writerfn(**self.writerargs)
+            except LockError:
+                time.sleep(self.delay)
+        for method, args, kwargs in self.events:
+            getattr(writer, method)(*args, **kwargs)
+        writer.commit(*self.commitargs, **self.commitkwargs)
+
+    def delete_document(self, *args, **kwargs):
+        self._record("delete_document", args, kwargs)
+
+    def add_document(self, *args, **kwargs):
+        self._record("add_document", args, kwargs)
+
+    def update_document(self, *args, **kwargs):
+        self._record("update_document", args, kwargs)
+
+    def add_field(self, *args, **kwargs):
+        self._record("add_field", args, kwargs)
+
+    def remove_field(self, *args, **kwargs):
+        self._record("remove_field", args, kwargs)
+
+    def delete_by_term(self, *args, **kwargs):
+        self._record("delete_by_term", args, kwargs)
+
+    def commit(self, *args, **kwargs):
+        if self.writer:
+            self.writer.commit(*args, **kwargs)
+        else:
+            self.commitargs, self.commitkwargs = args, kwargs
+            self.start()
+
+    def cancel(self, *args, **kwargs):
+        if self.writer:
+            self.writer.cancel(*args, **kwargs)
+
+
+class BufferedWriter(IndexWriter):
+    """Convenience class that acts like a writer but buffers added documents to
+    a :class:`~whoosh.ramindex.RamIndex` before dumping the buffered documents
+    as a batch into the actual index.
+
+    In scenarios where you are continuously adding single documents very
+    rapidly (for example a web application where lots of users are adding
+    content simultaneously), using a BufferedWriter is *much* faster than
+    opening and committing a writer for each document you add.
+
+    (This class may also be useful for batches of ``update_document`` calls. In
+    a normal writer, ``update_document`` calls cannot update documents you've
+    added *in that writer*. With ``BufferedWriter``, this will work.)
+
+    If you're adding a batches of documents at a time, you can just use a
+    regular writer -- you're already committing a "batch" of documents, so you
+    don't need this class.
+
+    To use this class, create it from your index and *keep it open*, sharing
+    it between threads.
+
+    >>> from whoosh.writing import BufferedWriter
+    >>> writer = BufferedWriter(myindex, period=120, limit=100)
+
+    You can control how often the ``BufferedWriter`` flushes the in-memory
+    index to disk using the ``period`` and ``limit`` arguments. ``period`` is
+    the maximum number of seconds between commits. ``limit`` is the maximum
+    number of additions to buffer between commits.
+
+    You can read/search the combination of the on-disk index and the buffered
+    documents in memory by calling ``BufferedWriter.reader()`` or
+    ``BufferedWriter.searcher()``. This allows quasi-real-time search, where
+    documents are available for searching as soon as they are buffered in
+    memory, before they are committed to disk.
+
+    >>> searcher = writer.searcher()
+
+    .. tip::
+        By using a searcher from the shared writer, multiple *threads* can
+        search the buffered documents. Of course, other *processes* will only
+        see the documents that have been written to disk. If you want indexed
+        documents to become available to other processes as soon as possible,
+        you have to use a traditional writer instead of a ``BufferedWriter``.
+
+    Calling ``commit()`` on the ``BufferedWriter`` manually commits any batched
+    up changes. You can continue to make changes after calling ``commit()``,
+    and you can call ``commit()`` multiple times.
+
+    .. note::
+        This object keeps an underlying writer open and stores documents in
+        memory, so you must explicitly call the :meth:`~BufferedWriter.close()`
+        method on this object before it goes out of scope to release the
+        write lock and make sure any uncommitted changes are saved.
+    """
+
+    def __init__(self, index, period=60, limit=10, writerargs=None,
+                 commitargs=None, tempixclass=None):
+        """
+        :param index: the :class:`whoosh.index.Index` to write to.
+        :param period: the maximum amount of time (in seconds) between commits.
+            Set this to ``0`` or ``None`` to not use a timer. Do not set this
+            any lower than a few seconds.
+        :param limit: the maximum number of documents to buffer before
+            committing.
+        :param writerargs: dictionary specifying keyword arguments to be passed
+            to the index's ``writer()`` method when creating a writer.
+        :param commitargs: dictionary specifying keyword arguments to be passed
+            to the writer's ``commit()`` method when committing a writer.
+        """
+
+        self.index = index
+        self.period = period
+        self.limit = limit
+        self.writerargs = writerargs or {}
+        self.commitargs = commitargs or {}
+        self._sync_lock = threading.RLock()
+        self._write_lock = threading.Lock()
+
+        if tempixclass is None:
+            from whoosh.ramindex import RamIndex as tempixclass
+        self.tempixclass = tempixclass
+
+        self.writer = None
+        self.base = self.index.doc_count_all()
+        self.bufferedcount = 0
+        self.commitcount = 0
+        self.ramindex = self._create_ramindex()
+        if self.period:
+            self.timer = threading.Timer(self.period, self.commit)
+
+    def __del__(self):
+        if hasattr(self, "writer") and self.writer:
+            if not self.writer.is_closed:
+                self.writer.cancel()
+            del self.writer
+
+    def _create_ramindex(self):
+        return self.tempixclass(self.index.schema)
+
+    def _get_writer(self):
+        if self.writer is None:
+            self.writer = self.index.writer(**self.writerargs)
+            self.schema = self.writer.schema
+            self.base = self.index.doc_count_all()
+            self.bufferedcount = 0
+        return self.writer
+
+    @synchronized
+    def reader(self, **kwargs):
+        from whoosh.reading import MultiReader
+
+        writer = self._get_writer()
+        ramreader = self.ramindex
+        if self.index.is_empty():
+            return ramreader
+        else:
+            reader = writer.reader(**kwargs)
+            if reader.is_atomic():
+                reader = MultiReader([reader, ramreader])
+            else:
+                reader.add_reader(ramreader)
+            return reader
+
+    def searcher(self, **kwargs):
+        from whoosh.searching import Searcher
+
+        return Searcher(self.reader(), fromindex=self.index, **kwargs)
+
+    def close(self):
+        self.commit(restart=False)
+
+    def commit(self, restart=True):
+        if self.period:
+            self.timer.cancel()
+
+        # Replace the RAM index
+        with self._sync_lock:
+            oldramindex = self.ramindex
+            self.ramindex = self._create_ramindex()
+
+        with self._write_lock:
+            if self.bufferedcount:
+                self._get_writer().add_reader(oldramindex.reader())
+
+            if self.writer:
+                self.writer.commit(**self.commitargs)
+                self.writer = None
+                self.commitcount += 1
+
+            if restart:
+                if self.period:
+                    self.timer = threading.Timer(self.period, self.commit)
+
+    def add_reader(self, reader):
+        with self._write_lock:
+            self._get_writer().add_reader(reader)
+
+    def add_document(self, **fields):
+        with self._sync_lock:
+            self.ramindex.add_document(**fields)
+            self.bufferedcount += 1
+        if self.bufferedcount >= self.limit:
+            self.commit()
+
+    @synchronized
+    def update_document(self, **fields):
+        self._get_writer()
+        super(BufferedWriter, self).update_document(**fields)
+
+    @synchronized
+    def delete_document(self, docnum, delete=True):
+        if docnum < self.base:
+            return self._get_writer().delete_document(docnum, delete=delete)
+        else:
+            return self.ramindex.delete_document(docnum - self.base, delete=delete)
+
+    @synchronized
+    def is_deleted(self, docnum):
+        if docnum < self.base:
+            return self.writer.is_deleted(docnum)
+        else:
+            return self.ramindex.is_deleted(docnum - self.base)
+
+# Backwards compatibility with old name
+BatchWriter = BufferedWriter
+
+
+
diff --git a/remote_api_shell.py b/remote_api_shell.py
index e8360c1..8f18e20 100755
--- a/remote_api_shell.py
+++ b/remote_api_shell.py
@@ -47,6 +47,7 @@
   os.path.join(DIR_PATH, 'lib', 'ipaddr'),
   os.path.join(DIR_PATH, 'lib', 'protorpc'),
   os.path.join(DIR_PATH, 'lib', 'webob'),
+  os.path.join(DIR_PATH, 'lib', 'whoosh'),
   os.path.join(DIR_PATH, 'lib', 'yaml', 'lib'),
   os.path.join(DIR_PATH, 'lib', 'simplejson'),
   os.path.join(DIR_PATH, 'lib', 'graphy'),