Update BOTO to revision 3968
git-svn-id: svn://svn.chromium.org/boto@5 4f2e627c-b00b-48dd-b1fb-2c643665b734
diff --git a/MANIFEST.in b/MANIFEST.in
index fceffb7..d5e4f61 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1 +1,12 @@
include boto/cacerts/cacerts.txt
+include README.rst
+include Changelog.rst
+include boto/file/README
+include .gitignore
+include pylintrc
+include boto/pyami/copybot.cfg
+include boto/services/sonofmmm.cfg
+include boto/mturk/test/*.doctest
+include boto/mturk/test/.gitignore
+recursive-include tests *.py *.txt
+recursive-include docs *
diff --git a/README.chromium b/README.chromium
index 14982ac..054e7e4 100644
--- a/README.chromium
+++ b/README.chromium
@@ -1,22 +1,5 @@
URL: http://github.com/boto/boto
-Version: 2.1.1
+Version: 2.6.0
License: MIT License
-This is a forked copy of boto v2.1.1.
-
-
-Fix checksum support to be compatible with Windows.
-See http://bugs.python.org/issue1735418 for more info.
-
-index 5492e14..d7d2aa0 100644
---- a/boto/s3/resumable_download_handler.py
-+++ b/boto/s3/resumable_download_handler.py
-@@ -220,7 +220,7 @@ class ResumableDownloadHandler(object):
- gsutil runs), and the user could change some of the file and not
- realize they have inconsistent data.
- """
-- fp = open(file_name, 'r')
-+ fp = open(file_name, 'rb')
- if key.bucket.connection.debug >= 1:
- print 'Checking md5 against etag.'
- hex_md5 = key.compute_md5(fp)[0]
+This is a forked copy of boto at revision 3968
diff --git a/README.markdown b/README.markdown
deleted file mode 100644
index 95d32f0..0000000
--- a/README.markdown
+++ /dev/null
@@ -1,72 +0,0 @@
-# boto
-boto 2.1.1
-31-Oct-2011
-
-## Introduction
-
-Boto is a Python package that provides interfaces to Amazon Web Services.
-At the moment, boto supports:
-
- * Simple Storage Service (S3)
- * SimpleQueue Service (SQS)
- * Elastic Compute Cloud (EC2)
- * Mechanical Turk
- * SimpleDB
- * CloudFront
- * CloudWatch
- * AutoScale
- * Elastic Load Balancer (ELB)
- * Virtual Private Cloud (VPC)
- * Elastic Map Reduce (EMR)
- * Relational Data Service (RDS)
- * Simple Notification Server (SNS)
- * Google Storage
- * Identity and Access Management (IAM)
- * Route53 DNS Service (route53)
- * Simple Email Service (SES)
- * Flexible Payment Service (FPS)
- * CloudFormation
-
-The goal of boto is to support the full breadth and depth of Amazon
-Web Services. In addition, boto provides support for other public
-services such as Google Storage in addition to private cloud systems
-like Eucalyptus, OpenStack and Open Nebula.
-
-Boto is developed mainly using Python 2.6.6 and Python 2.7.1 on Mac OSX
-and Ubuntu Maverick. It is known to work on other Linux distributions
-and on Windows. Boto requires no additional libraries or packages
-other than those that are distributed with Python. Efforts are made
-to keep boto compatible with Python 2.5.x but no guarantees are made.
-
-## Finding Out More About Boto
-
-The main source code repository for boto can be found on
-[github.com](http://github.com/boto/boto)
-
-[Online documentation](http://readthedocs.org/docs/boto/) is also
-available. The online documentation includes full API documentation
-as well as Getting Started Guides for many of the boto modules.
-
-Boto releases can be found on the [Google Project
-page](http://code.google.com/p/boto/downloads/list) or on the [Python
-Cheese Shop](http://pypi.python.org/).
-
-Join our `IRC channel`_ (#boto on FreeNode).
- IRC channel: http://webchat.freenode.net/?channels=boto
-
-## Getting Started with Boto
-
-Your credentials can be passed into the methods that create
-connections. Alternatively, boto will check for the existance of the
-following environment variables to ascertain your credentials:
-
-AWS_ACCESS_KEY_ID - Your AWS Access Key ID
-AWS_SECRET_ACCESS_KEY - Your AWS Secret Access Key
-
-Credentials and other boto-related settings can also be stored in a
-boto config file. See
-[this](http://code.google.com/p/boto/wiki/BotoConfig) for details.
-
-Copyright (c) 2006-2011 Mitch Garnaat <mitch@garnaat.com>
-Copyright (c) 2010-2011, Eucalyptus Systems, Inc.
-All rights reserved.
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..3499ad4
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,143 @@
+####
+boto
+####
+boto 2.6.0
+19-Sep-2012
+
+.. image:: https://secure.travis-ci.org/boto/boto.png?branch=develop
+ :target: https://secure.travis-ci.org/boto/boto
+
+************
+Introduction
+************
+
+Boto is a Python package that provides interfaces to Amazon Web Services.
+At the moment, boto supports:
+
+* Compute
+ * Amazon Elastic Compute Cloud (EC2)
+ * Amazon Elastic Map Reduce (EMR)
+ * AutoScaling
+ * Elastic Load Balancing (ELB)
+* Content Delivery
+ * Amazon CloudFront
+* Database
+ * Amazon Relational Data Service (RDS)
+ * Amazon DynamoDB
+ * Amazon SimpleDB
+* Deployment and Management
+ * AWS Identity and Access Management (IAM)
+ * Amazon CloudWatch
+ * AWS Elastic Beanstalk
+ * AWS CloudFormation
+* Application Services
+ * Amazon CloudSearch
+ * Amazon Simple Workflow Service (SWF)
+ * Amazon Simple Queue Service (SQS)
+ * Amazon Simple Notification Server (SNS)
+ * Amazon Simple Email Service (SES)
+* Networking
+ * Amazon Route53
+ * Amazon Virtual Private Cloud (VPC)
+* Payments and Billing
+ * Amazon Flexible Payment Service (FPS)
+* Storage
+ * Amazon Simple Storage Service (S3)
+ * Amazon Glacier
+ * Amazon Elastic Block Store (EBS)
+ * Google Cloud Storage
+* Workforce
+ * Amazon Mechanical Turk
+* Other
+ * Marketplace Web Services
+
+The goal of boto is to support the full breadth and depth of Amazon
+Web Services. In addition, boto provides support for other public
+services such as Google Storage in addition to private cloud systems
+like Eucalyptus, OpenStack and Open Nebula.
+
+Boto is developed mainly using Python 2.6.6 and Python 2.7.1 on Mac OSX
+and Ubuntu Maverick. It is known to work on other Linux distributions
+and on Windows. Boto requires no additional libraries or packages
+other than those that are distributed with Python. Efforts are made
+to keep boto compatible with Python 2.5.x but no guarantees are made.
+
+************
+Installation
+************
+
+Install via `pip`_:
+
+::
+
+ $ pip install boto
+
+Install from source:
+
+::
+
+ $ git clone git://github.com/boto/boto.git
+ $ cd boto
+ $ python setup.py install
+
+**********
+ChangeLogs
+**********
+
+To see what has changed over time in boto, you can check out the
+`release notes`_ in the wiki.
+
+*********************************
+Special Note for Python 3.x Users
+*********************************
+
+If you are interested in trying out boto with Python 3.x, check out the
+`neo`_ branch. This is under active development and the goal is a version
+of boto that works in Python 2.6, 2.7, and 3.x. Not everything is working
+just yet but many things are and it's worth a look if you are an active
+Python 3.x user.
+
+***************************
+Finding Out More About Boto
+***************************
+
+The main source code repository for boto can be found on `github.com`_.
+The boto project uses the `gitflow`_ model for branching.
+
+`Online documentation`_ is also available. The online documentation includes
+full API documentation as well as Getting Started Guides for many of the boto
+modules.
+
+Boto releases can be found on the `Python Cheese Shop`_.
+
+Join our IRC channel `#boto` on FreeNode.
+Webchat IRC channel: http://webchat.freenode.net/?channels=boto
+
+*************************
+Getting Started with Boto
+*************************
+
+Your credentials can be passed into the methods that create
+connections. Alternatively, boto will check for the existance of the
+following environment variables to ascertain your credentials:
+
+**AWS_ACCESS_KEY_ID** - Your AWS Access Key ID
+
+**AWS_SECRET_ACCESS_KEY** - Your AWS Secret Access Key
+
+Credentials and other boto-related settings can also be stored in a
+boto config file. See `this`_ for details.
+
+Copyright (c) 2006-2012 Mitch Garnaat <mitch@garnaat.com>
+Copyright (c) 2010-2011, Eucalyptus Systems, Inc.
+Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+All rights reserved.
+
+.. _pip: http://www.pip-installer.org/
+.. _release notes: https://github.com/boto/boto/wiki
+.. _github.com: http://github.com/boto/boto
+.. _Online documentation: http://docs.pythonboto.org
+.. _Python Cheese Shop: http://pypi.python.org/pypi/boto
+.. _this: http://code.google.com/p/boto/wiki/BotoConfig
+.. _gitflow: http://nvie.com/posts/a-successful-git-branching-model/
+.. _neo: https://github.com/boto/boto/tree/neo
diff --git a/bin/cq b/bin/cq
index dd9b914..242d0d2 100755
--- a/bin/cq
+++ b/bin/cq
@@ -31,8 +31,8 @@
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], 'hcq:o:t:r:',
- ['help', 'clear', 'queue',
- 'output', 'timeout', 'region'])
+ ['help', 'clear', 'queue=',
+ 'output=', 'timeout=', 'region='])
except:
usage()
sys.exit(2)
diff --git a/bin/elbadmin b/bin/elbadmin
index a5ec6bb..6c8a8c7 100755
--- a/bin/elbadmin
+++ b/bin/elbadmin
@@ -15,136 +15,172 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
#
# Elastic Load Balancer Tool
#
-VERSION="0.1"
+VERSION = "0.2"
usage = """%prog [options] [command]
Commands:
- list|ls List all Elastic Load Balancers
- delete <name> Delete ELB <name>
- get <name> Get all instances associated with <name>
- create <name> Create an ELB
- add <name> <instance> Add <instance> in ELB <name>
- remove|rm <name> <instance> Remove <instance> from ELB <name>
- enable|en <name> <zone> Enable Zone <zone> for ELB <name>
- disable <name> <zone> Disable Zone <zone> for ELB <name>
- addl <name> Add listeners (specified by -l) to the ELB <name>
- rml <name> <port> Remove Listener(s) specified by the port on the ELB
+ list|ls List all Elastic Load Balancers
+ delete <name> Delete ELB <name>
+ get <name> Get all instances associated with <name>
+ create <name> Create an ELB; -z and -l are required
+ add <name> <instance> Add <instance> in ELB <name>
+ remove|rm <name> <instance> Remove <instance> from ELB <name>
+ reap <name> Remove terminated instances from ELB <name>
+ enable|en <name> <zone> Enable Zone <zone> for ELB <name>
+ disable <name> <zone> Disable Zone <zone> for ELB <name>
+ addl <name> Add listeners (specified by -l) to the ELB
+ <name>
+ rml <name> <port> Remove Listener(s) specified by the port on
+ the ELB <name>
"""
+
+def find_elb(elb, name):
+ try:
+ elbs = elb.get_all_load_balancers(name)
+ except boto.exception.BotoServerError as se:
+ if se.code == 'LoadBalancerNotFound':
+ elbs = []
+ else:
+ raise
+
+ if len(elbs) < 1:
+ print "No load balancer by the name of %s found" % name
+ return None
+ elif len(elbs) > 1:
+ print "More than one elb matches %s?" % name
+ return None
+
+ # Should not happen
+ if name not in elbs[0].name:
+ print "No load balancer by the name of %s found" % name
+ return None
+
+ return elbs[0]
+
+
def list(elb):
"""List all ELBs"""
- print "%-20s %s" % ("Name", "DNS Name")
- print "-"*80
+ print "%-20s %s" % ("Name", "DNS Name")
+ print "-" * 80
for b in elb.get_all_load_balancers():
print "%-20s %s" % (b.name, b.dns_name)
+
def get(elb, name):
"""Get details about ELB <name>"""
- elbs = elb.get_all_load_balancers(name)
- if len(elbs) < 1:
- print "No load balancer by the name of %s found" % name
- return
- for b in elbs:
- if name in b.name:
- print "="*80
- print "Name: %s" % b.name
- print "DNS Name: %s" % b.dns_name
- print
+ b = find_elb(elb, name)
+ if b:
+ print "=" * 80
+ print "Name: %s" % b.name
+ print "DNS Name: %s" % b.dns_name
+ if b.canonical_hosted_zone_name:
+ chzn = b.canonical_hosted_zone_name
+ print "Canonical hosted zone name: %s" % chzn
+ if b.canonical_hosted_zone_name_id:
+ chznid = b.canonical_hosted_zone_name_id
+ print "Canonical hosted zone name id: %s" % chznid
+ print
- print "Listeners"
- print "---------"
- print "%-8s %-8s %s" % ("IN", "OUT", "PROTO")
- for l in b.listeners:
- print "%-8s %-8s %s" % (l[0], l[1], l[2])
+ print "Health Check: %s" % b.health_check
+ print
- print
+ print "Listeners"
+ print "---------"
+ print "%-8s %-8s %s" % ("IN", "OUT", "PROTO")
+ for l in b.listeners:
+ print "%-8s %-8s %s" % (l[0], l[1], l[2])
- print " Zones "
- print "---------"
- for z in b.availability_zones:
- print z
+ print
- print
+ print " Zones "
+ print "---------"
+ for z in b.availability_zones:
+ print z
- print "Instances"
- print "---------"
- for i in b.instances:
- print i.id
+ print
- print
+ print "Instances"
+ print "---------"
+ print "%-12s %-15s %s" % ("ID", "STATE", "DESCRIPTION")
+ for state in b.get_instance_health():
+ print "%-12s %-15s %s" % (state.instance_id, state.state,
+ state.description)
+
+ print
+
def create(elb, name, zones, listeners):
"""Create an ELB named <name>"""
l_list = []
for l in listeners:
l = l.split(",")
- if l[2]=='HTTPS':
+ if l[2] == 'HTTPS':
l_list.append((int(l[0]), int(l[1]), l[2], l[3]))
- else : l_list.append((int(l[0]), int(l[1]), l[2]))
-
+ else:
+ l_list.append((int(l[0]), int(l[1]), l[2]))
+
b = elb.create_load_balancer(name, zones, l_list)
return get(elb, name)
+
def delete(elb, name):
"""Delete this ELB"""
- b = elb.get_all_load_balancers(name)
- if len(b) < 1:
- print "No load balancer by the name of %s found" % name
- return
- for i in b:
- if name in i.name:
- i.delete()
- print "Load Balancer %s deleted" % name
+ b = find_elb(elb, name)
+ if b:
+ b.delete()
+ print "Load Balancer %s deleted" % name
+
def add_instance(elb, name, instance):
"""Add <instance> to ELB <name>"""
- b = elb.get_all_load_balancers(name)
- if len(b) < 1:
- print "No load balancer by the name of %s found" % name
- return
- for i in b:
- if name in i.name:
- i.register_instances([instance])
- return get(elb, name)
+ b = find_elb(elb, name)
+ if b:
+ b.register_instances([instance])
+ return get(elb, name)
def remove_instance(elb, name, instance):
"""Remove instance from elb <name>"""
- b = elb.get_all_load_balancers(name)
- if len(b) < 1:
- print "No load balancer by the name of %s found" % name
- return
- for i in b:
- if name in i.name:
- i.deregister_instances([instance])
- return get(elb, name)
+ b = find_elb(elb, name)
+ if b:
+ b.deregister_instances([instance])
+ return get(elb, name)
+
+
+def reap_instances(elb, name):
+ """Remove terminated instances from elb <name>"""
+ b = find_elb(elb, name)
+ if b:
+ for state in b.get_instance_health():
+ if (state.state == 'OutOfService' and
+ state.description == 'Instance is in terminated state.'):
+ b.deregister_instances([state.instance_id])
+ return get(elb, name)
+
def enable_zone(elb, name, zone):
"""Enable <zone> for elb"""
- b = elb.get_all_load_balancers(name)
- if len(b) < 1:
- print "No load balancer by the name of %s found" % name
- return
- b = b[0]
- b.enable_zones([zone])
- return get(elb, name)
+ b = find_elb(elb, name)
+ if b:
+ b.enable_zones([zone])
+ return get(elb, name)
+
def disable_zone(elb, name, zone):
"""Disable <zone> for elb"""
- b = elb.get_all_load_balancers(name)
- if len(b) < 1:
- print "No load balancer by the name of %s found" % name
- return
- b = b[0]
- b.disable_zones([zone])
- return get(elb, name)
+ b = find_elb(elb, name)
+ if b:
+ b.disable_zones([zone])
+ return get(elb, name)
+
def add_listener(elb, name, listeners):
"""Add listeners to a given load balancer"""
@@ -152,25 +188,18 @@
for l in listeners:
l = l.split(",")
l_list.append((int(l[0]), int(l[1]), l[2]))
- b = elb.get_all_load_balancers(name)
- if len(b) < 1:
- print "No load balancer by the name of %s found" % name
- return
- b = b[0]
- b.create_listeners(l_list)
- return get(elb, name)
+ b = find_elb(elb, name)
+ if b:
+ b.create_listeners(l_list)
+ return get(elb, name)
+
def rm_listener(elb, name, ports):
"""Remove listeners from a given load balancer"""
- b = elb.get_all_load_balancers(name)
- if len(b) < 1:
- print "No load balancer by the name of %s found" % name
- return
- b = b[0]
- b.delete_listeners(ports)
- return get(elb, name)
-
-
+ b = find_elb(elb, name)
+ if b:
+ b.delete_listeners(ports)
+ return get(elb, name)
if __name__ == "__main__":
@@ -183,8 +212,12 @@
from optparse import OptionParser
from boto.mashups.iobject import IObject
parser = OptionParser(version=VERSION, usage=usage)
- parser.add_option("-z", "--zone", help="Operate on zone", action="append", default=[], dest="zones")
- parser.add_option("-l", "--listener", help="Specify Listener in,out,proto", action="append", default=[], dest="listeners")
+ parser.add_option("-z", "--zone",
+ help="Operate on zone",
+ action="append", default=[], dest="zones")
+ parser.add_option("-l", "--listener",
+ help="Specify Listener in,out,proto",
+ action="append", default=[], dest="listeners")
(options, args) = parser.parse_args()
@@ -202,6 +235,12 @@
elif command == "get":
get(elb, args[1])
elif command == "create":
+ if not options.listeners:
+ print "-l option required for command create"
+ sys.exit(1)
+ if not options.zones:
+ print "-z option required for command create"
+ sys.exit(1)
create(elb, args[1], options.zones, options.listeners)
elif command == "delete":
delete(elb, args[1])
@@ -209,11 +248,19 @@
add_instance(elb, args[1], args[2])
elif command in ("rm", "remove"):
remove_instance(elb, args[1], args[2])
+ elif command == "reap":
+ reap_instances(elb, args[1])
elif command in ("en", "enable"):
enable_zone(elb, args[1], args[2])
elif command == "disable":
disable_zone(elb, args[1], args[2])
elif command == "addl":
+ if not options.listeners:
+ print "-l option required for command addl"
+ sys.exit(1)
add_listener(elb, args[1], options.listeners)
elif command == "rml":
+ if not args[2:]:
+ print "port required"
+ sys.exit(2)
rm_listener(elb, args[1], args[2:])
diff --git a/bin/glacier b/bin/glacier
new file mode 100755
index 0000000..aad1e8b
--- /dev/null
+++ b/bin/glacier
@@ -0,0 +1,154 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2012 Miguel Olivares http://moliware.com/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+"""
+ glacier
+ ~~~~~~~
+
+ Amazon Glacier tool built on top of boto. Look at the usage method to see
+ how to use it.
+
+ Author: Miguel Olivares <miguel@moliware.com>
+"""
+import sys
+
+from boto.glacier import connect_to_region
+from getopt import getopt, GetoptError
+from os.path import isfile
+
+
+COMMANDS = ('vaults', 'jobs', 'upload')
+
+
+def usage():
+ print """
+glacier <command> [args]
+
+ Commands
+ vaults - Operations with vaults
+ jobs - Operations with jobs
+ upload - Upload files to a vault. If the vault doesn't exits, it is
+ created
+
+ Common args:
+ access_key - Your AWS Access Key ID. If not supplied, boto will
+ use the value of the environment variable
+ AWS_ACCESS_KEY_ID
+ secret_key - Your AWS Secret Access Key. If not supplied, boto
+ will use the value of the environment variable
+ AWS_SECRET_ACCESS_KEY
+ region - AWS region to use. Possible vaules: us-east-1, us-west-1,
+ us-west-2, ap-northeast-1, eu-west-1.
+ Default: us-east-1
+
+ Vaults operations:
+
+ List vaults:
+ glacier vaults
+
+ Jobs operations:
+
+ List jobs:
+ glacier jobs <vault name>
+
+ Uploading files:
+
+ glacier upload <vault name> <files>
+
+ Examples :
+ glacier upload pics *.jpg
+ glacier upload pics a.jpg b.jpg
+"""
+ sys.exit()
+
+
+def connect(region, debug_level=0, access_key=None, secret_key=None):
+ """ Connect to a specific region """
+ return connect_to_region(region,
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key,
+ debug=debug_level)
+
+
+def list_vaults(region, access_key=None, secret_key=None):
+ layer2 = connect(region, access_key, secret_key)
+ for vault in layer2.list_vaults():
+ print vault.arn
+
+
+def list_jobs(vault_name, region, access_key=None, secret_key=None):
+ layer2 = connect(region, access_key, secret_key)
+ print layer2.layer1.list_jobs(vault_name)
+
+
+def upload_files(vault_name, filenames, region, access_key=None, secret_key=None):
+ layer2 = connect(region, access_key, secret_key)
+ layer2.create_vault(vault_name)
+ glacier_vault = layer2.get_vault(vault_name)
+ for filename in filenames:
+ if isfile(filename):
+ print 'Uploading %s to %s' % (filename, vault_name)
+ glacier_vault.upload_archive(filename)
+
+
+def main():
+ if len(sys.argv) < 2:
+ usage()
+
+ command = sys.argv[1]
+ if command not in COMMANDS:
+ usage()
+
+ argv = sys.argv[2:]
+ options = 'a:s:r:'
+ long_options = ['access_key=', 'secret_key=', 'region=']
+ try:
+ opts, args = getopt(argv, options, long_options)
+ except GetoptError, e:
+ usage()
+
+ # Parse agument
+ access_key = secret_key = None
+ region = 'us-east-1'
+ for option, value in opts:
+ if option in ('a', '--access_key'):
+ access_key = value
+ elif option in ('s', '--secret_key'):
+ secret_key = value
+ elif option in ('r', '--region'):
+ region = value
+ # handle each command
+ if command == 'vaults':
+ list_vaults(region, access_key, secret_key)
+ elif command == 'jobs':
+ if len(args) != 1:
+ usage()
+ list_jobs(args[0], region, access_key, secret_key)
+ elif command == 'upload':
+ if len(args) < 2:
+ usage()
+ upload_files(args[0], args[1:], region, access_key, secret_key)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/bin/instance_events b/bin/instance_events
new file mode 100755
index 0000000..b36a480
--- /dev/null
+++ b/bin/instance_events
@@ -0,0 +1,145 @@
+#!/usr/bin/env python
+# Copyright (c) 2011 Jim Browne http://www.42lines.net
+# Borrows heavily from boto/bin/list_instances which has no attribution
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+
+VERSION="0.1"
+usage = """%prog [options]
+Options:
+ -h, --help show help message (including options list) and exit
+"""
+
+from operator import itemgetter
+
+HEADERS = {
+ 'ID': {'get': itemgetter('id'), 'length':14},
+ 'Zone': {'get': itemgetter('zone'), 'length':14},
+ 'Hostname': {'get': itemgetter('dns'), 'length':20},
+ 'Code': {'get': itemgetter('code'), 'length':18},
+ 'Description': {'get': itemgetter('description'), 'length':30},
+ 'NotBefore': {'get': itemgetter('not_before'), 'length':25},
+ 'NotAfter': {'get': itemgetter('not_after'), 'length':25},
+ 'T:': {'length': 30},
+}
+
+def get_column(name, event=None):
+ if name.startswith('T:'):
+ return event[name]
+ return HEADERS[name]['get'](event)
+
+def list(region, headers, order, completed):
+ """List status events for all instances in a given region"""
+
+ import re
+
+ ec2 = boto.connect_ec2(region=region)
+
+ reservations = ec2.get_all_instances()
+
+ instanceinfo = {}
+ events = {}
+
+ displaytags = [ x for x in headers if x.startswith('T:') ]
+
+ # Collect the tag for every possible instance
+ for res in reservations:
+ for instance in res.instances:
+ iid = instance.id
+ instanceinfo[iid] = {}
+ for tagname in displaytags:
+ _, tag = tagname.split(':', 1)
+ instanceinfo[iid][tagname] = instance.tags.get(tag,'')
+ instanceinfo[iid]['dns'] = instance.public_dns_name
+
+ stats = ec2.get_all_instance_status()
+
+ for stat in stats:
+ if stat.events:
+ for event in stat.events:
+ events[stat.id] = {}
+ events[stat.id]['id'] = stat.id
+ events[stat.id]['dns'] = instanceinfo[stat.id]['dns']
+ events[stat.id]['zone'] = stat.zone
+ for tag in displaytags:
+ events[stat.id][tag] = instanceinfo[stat.id][tag]
+ events[stat.id]['code'] = event.code
+ events[stat.id]['description'] = event.description
+ events[stat.id]['not_before'] = event.not_before
+ events[stat.id]['not_after'] = event.not_after
+ if completed and re.match('^\[Completed\]',event.description):
+ events[stat.id]['not_before'] = 'Completed'
+ events[stat.id]['not_after'] = 'Completed'
+
+ # Create format string
+ format_string = ""
+ for h in headers:
+ if h.startswith('T:'):
+ format_string += "%%-%ds" % HEADERS['T:']['length']
+ else:
+ format_string += "%%-%ds" % HEADERS[h]['length']
+
+
+ print format_string % headers
+ print "-" * len(format_string % headers)
+
+ for instance in sorted(events,
+ key=lambda ev: get_column(order, events[ev])):
+ e = events[instance]
+ print format_string % tuple(get_column(h, e) for h in headers)
+
+if __name__ == "__main__":
+ import boto
+ from optparse import OptionParser
+ from boto.ec2 import regions
+
+ parser = OptionParser(version=VERSION, usage=usage)
+ parser.add_option("-a", "--all", help="check all regions", dest="all", default=False,action="store_true")
+ parser.add_option("-r", "--region", help="region to check (default us-east-1)", dest="region", default="us-east-1")
+ parser.add_option("-H", "--headers", help="Set headers (use 'T:tagname' for including tags)", default=None, action="store", dest="headers", metavar="ID,Zone,Hostname,Code,Description,NotBefore,NotAfter,T:Name")
+ parser.add_option("-S", "--sort", help="Header for sort order", default=None, action="store", dest="order",metavar="HeaderName")
+ parser.add_option("-c", "--completed", help="List time fields as \"Completed\" for completed events (Default: false)", default=False, action="store_true", dest="completed")
+
+ (options, args) = parser.parse_args()
+
+ if options.headers:
+ headers = tuple(options.headers.split(','))
+ else:
+ headers = ('ID', 'Zone', 'Hostname', 'Code', 'NotBefore', 'NotAfter')
+
+ if options.order:
+ order = options.order
+ else:
+ order = 'ID'
+
+ if options.all:
+ for r in regions():
+ print "Region %s" % r.name
+ list(r, headers, order, options.completed)
+ else:
+ # Connect the region
+ for r in regions():
+ if r.name == options.region:
+ region = r
+ break
+ else:
+ print "Region %s not found." % options.region
+ sys.exit(1)
+
+ list(r, headers, order, options.completed)
diff --git a/bin/launch_instance b/bin/launch_instance
index 53032ad..77a5419 100755
--- a/bin/launch_instance
+++ b/bin/launch_instance
@@ -133,6 +133,7 @@
parser.add_option("-d", "--dns", help="Returns public and private DNS (implicates --wait)", default=False, action="store_true", dest="dns")
parser.add_option("-T", "--tag", help="Set tag", default=None, action="append", dest="tags", metavar="key:value")
parser.add_option("-s", "--scripts", help="Pass in a script or a folder containing scripts to be run when the instance starts up, assumes cloud-init. Specify scripts in a list specified by commas. If multiple scripts are specified, they are run lexically (A good way to ensure they run in the order is to prefix filenames with numbers)", type='string', action="callback", callback=scripts_callback)
+ parser.add_option("--role", help="IAM Role to use, this implies --no-add-cred", dest="role")
(options, args) = parser.parse_args()
@@ -152,7 +153,7 @@
print "Region %s not found." % options.region
sys.exit(1)
ec2 = boto.connect_ec2(region=region)
- if not options.nocred:
+ if not options.nocred and not options.role:
cfg.add_creds(ec2)
iobj = IObject()
@@ -214,10 +215,15 @@
if options.save_ebs:
shutdown_proc = "save"
+ instance_profile_name = None
+ if options.role:
+ instance_profile_name = options.role
+
r = ami.run(min_count=int(options.min_count), max_count=int(options.max_count),
key_name=key_name, user_data=user_data,
security_groups=groups, instance_type=options.type,
- placement=options.zone, instance_initiated_shutdown_behavior=shutdown_proc)
+ placement=options.zone, instance_initiated_shutdown_behavior=shutdown_proc,
+ instance_profile_name=instance_profile_name)
instance = r.instances[0]
diff --git a/bin/list_instances b/bin/list_instances
index 5abe9b6..4da5596 100755
--- a/bin/list_instances
+++ b/bin/list_instances
@@ -13,6 +13,7 @@
'Zone': {'get': attrgetter('placement'), 'length':15},
'Groups': {'get': attrgetter('groups'), 'length':30},
'Hostname': {'get': attrgetter('public_dns_name'), 'length':50},
+ 'PrivateHostname': {'get': attrgetter('private_dns_name'), 'length':50},
'State': {'get': attrgetter('state'), 'length':15},
'Image': {'get': attrgetter('image_id'), 'length':15},
'Type': {'get': attrgetter('instance_type'), 'length':15},
@@ -33,6 +34,7 @@
parser = OptionParser()
parser.add_option("-r", "--region", help="Region (default us-east-1)", dest="region", default="us-east-1")
parser.add_option("-H", "--headers", help="Set headers (use 'T:tagname' for including tags)", default=None, action="store", dest="headers", metavar="ID,Zone,Groups,Hostname,State,T:Name")
+ parser.add_option("-t", "--tab", help="Tab delimited, skip header - useful in shell scripts", action="store_true", default=False)
(options, args) = parser.parse_args()
# Connect the region
@@ -61,13 +63,20 @@
# List and print
- print format_string % headers
- print "-" * len(format_string % headers)
+
+ if not options.tab:
+ print format_string % headers
+ print "-" * len(format_string % headers)
+
for r in ec2.get_all_instances():
- groups = [g.id for g in r.groups]
+ groups = [g.name for g in r.groups]
for i in r.instances:
i.groups = ','.join(groups)
- print format_string % tuple(get_column(h, i) for h in headers)
+ if options.tab:
+ print "\t".join(tuple(get_column(h, i) for h in headers))
+ else:
+ print format_string % tuple(get_column(h, i) for h in headers)
+
if __name__ == "__main__":
main()
diff --git a/bin/lss3 b/bin/lss3
index 377a5a5..497d084 100755
--- a/bin/lss3
+++ b/bin/lss3
@@ -64,7 +64,7 @@
pairs.append([name, None])
if pairs[-1][0].lower() != pairs[-1][0]:
mixedCase = True
-
+
if mixedCase:
s3 = boto.connect_s3(calling_format=OrdinaryCallingFormat())
else:
diff --git a/bin/route53 b/bin/route53
index c2f2cb4..488a9ca 100755
--- a/bin/route53
+++ b/bin/route53
@@ -3,6 +3,34 @@
#
# route53 is similar to sdbadmin for Route53, it's a simple
# console utility to perform the most frequent tasks with Route53
+#
+# Example usage. Use route53 get after each command to see how the
+# zone changes.
+#
+# Add a non-weighted record, change its value, then delete. Default TTL:
+#
+# route53 add_record ZPO9LGHZ43QB9 rr.example.com A 4.3.2.1
+# route53 change_record ZPO9LGHZ43QB9 rr.example.com A 9.8.7.6
+# route53 del_record ZPO9LGHZ43QB9 rr.example.com A 9.8.7.6
+#
+# Add a weighted record with two different weights. Note that the TTL
+# must be specified as route53 uses positional parameters rather than
+# option flags:
+#
+# route53 add_record ZPO9LGHZ43QB9 wrr.example.com A 1.2.3.4 600 foo9 10
+# route53 add_record ZPO9LGHZ43QB9 wrr.example.com A 4.3.2.1 600 foo8 10
+#
+# route53 change_record ZPO9LGHZ43QB9 wrr.example.com A 9.9.9.9 600 foo8 10
+#
+# route53 del_record ZPO9LGHZ43QB9 wrr.example.com A 1.2.3.4 600 foo9 10
+# route53 del_record ZPO9LGHZ43QB9 wrr.example.com A 9.9.9.9 600 foo8 10
+#
+# Add a non-weighted alias, change its value, then delete. Alaises inherit
+# their TTLs from the backing ELB:
+#
+# route53 add_alias ZPO9LGHZ43QB9 alias.example.com A Z3DZXE0Q79N41H lb-1218761514.us-east-1.elb.amazonaws.com.
+# route53 change_alias ZPO9LGHZ43QB9 alias.example.com. A Z3DZXE0Q79N41H lb2-1218761514.us-east-1.elb.amazonaws.com.
+# route53 delete_alias ZPO9LGHZ43QB9 alias.example.com. A Z3DZXE0Q79N41H lb2-1218761514.us-east-1.elb.amazonaws.com.
def _print_zone_info(zoneinfo):
print "="*80
@@ -12,7 +40,7 @@
print "="*80
print zoneinfo['Config']
print
-
+
def create(conn, hostname, caller_reference=None, comment=''):
"""Create a hosted zone, returning the nameservers"""
@@ -44,63 +72,88 @@
for record in response:
print '%-40s %-5s %-20s %s' % (record.name, record.type, record.ttl, record.to_print())
-
-def add_record(conn, hosted_zone_id, name, type, values, ttl=600, comment=""):
- """Add a new record to a zone"""
+def _add_del(conn, hosted_zone_id, change, name, type, identifier, weight, values, ttl, comment):
from boto.route53.record import ResourceRecordSets
changes = ResourceRecordSets(conn, hosted_zone_id, comment)
- change = changes.add_change("CREATE", name, type, ttl)
+ change = changes.add_change(change, name, type, ttl,
+ identifier=identifier, weight=weight)
for value in values.split(','):
change.add_value(value)
print changes.commit()
-def del_record(conn, hosted_zone_id, name, type, values, ttl=600, comment=""):
- """Delete a record from a zone"""
+def _add_del_alias(conn, hosted_zone_id, change, name, type, identifier, weight, alias_hosted_zone_id, alias_dns_name, comment):
from boto.route53.record import ResourceRecordSets
changes = ResourceRecordSets(conn, hosted_zone_id, comment)
- change = changes.add_change("DELETE", name, type, ttl)
- for value in values.split(','):
- change.add_value(value)
- print changes.commit()
-
-def add_alias(conn, hosted_zone_id, name, type, alias_hosted_zone_id, alias_dns_name, comment=""):
- """Add a new alias to a zone"""
- from boto.route53.record import ResourceRecordSets
- changes = ResourceRecordSets(conn, hosted_zone_id, comment)
- change = changes.add_change("CREATE", name, type)
+ change = changes.add_change(change, name, type,
+ identifier=identifier, weight=weight)
change.set_alias(alias_hosted_zone_id, alias_dns_name)
print changes.commit()
-def del_alias(conn, hosted_zone_id, name, type, alias_hosted_zone_id, alias_dns_name, comment=""):
- """Delete an alias from a zone"""
- from boto.route53.record import ResourceRecordSets
- changes = ResourceRecordSets(conn, hosted_zone_id, comment)
- change = changes.add_change("DELETE", name, type)
- change.set_alias(alias_hosted_zone_id, alias_dns_name)
- print changes.commit()
+def add_record(conn, hosted_zone_id, name, type, values, ttl=600,
+ identifier=None, weight=None, comment=""):
+ """Add a new record to a zone. identifier and weight are optional."""
+ _add_del(conn, hosted_zone_id, "CREATE", name, type, identifier,
+ weight, values, ttl, comment)
-def change_record(conn, hosted_zone_id, name, type, values, ttl=600, comment=""):
- """Delete and then add a record to a zone"""
+def del_record(conn, hosted_zone_id, name, type, values, ttl=600,
+ identifier=None, weight=None, comment=""):
+ """Delete a record from a zone: name, type, ttl, identifier, and weight must match."""
+ _add_del(conn, hosted_zone_id, "DELETE", name, type, identifier,
+ weight, values, ttl, comment)
+
+def add_alias(conn, hosted_zone_id, name, type, alias_hosted_zone_id,
+ alias_dns_name, identifier=None, weight=None, comment=""):
+ """Add a new alias to a zone. identifier and weight are optional."""
+ _add_del_alias(conn, hosted_zone_id, "CREATE", name, type, identifier,
+ weight, alias_hosted_zone_id, alias_dns_name, comment)
+
+def del_alias(conn, hosted_zone_id, name, type, alias_hosted_zone_id,
+ alias_dns_name, identifier=None, weight=None, comment=""):
+ """Delete an alias from a zone: name, type, alias_hosted_zone_id, alias_dns_name, weight and identifier must match."""
+ _add_del_alias(conn, hosted_zone_id, "DELETE", name, type, identifier,
+ weight, alias_hosted_zone_id, alias_dns_name, comment)
+
+def change_record(conn, hosted_zone_id, name, type, newvalues, ttl=600,
+ identifier=None, weight=None, comment=""):
+ """Delete and then add a record to a zone. identifier and weight are optional."""
from boto.route53.record import ResourceRecordSets
changes = ResourceRecordSets(conn, hosted_zone_id, comment)
- response = conn.get_all_rrsets(hosted_zone_id, type, name, maxitems=1)[0]
- change1 = changes.add_change("DELETE", name, type, response.ttl)
- for old_value in response.resource_records:
- change1.add_value(old_value)
- change2 = changes.add_change("CREATE", name, type, ttl)
- for new_value in values.split(','):
+ # Assume there are not more than 10 WRRs for a given (name, type)
+ responses = conn.get_all_rrsets(hosted_zone_id, type, name, maxitems=10)
+ for response in responses:
+ if response.name != name or response.type != type:
+ continue
+ if response.identifier != identifier or response.weight != weight:
+ continue
+ change1 = changes.add_change("DELETE", name, type, response.ttl,
+ identifier=response.identifier,
+ weight=response.weight)
+ for old_value in response.resource_records:
+ change1.add_value(old_value)
+
+ change2 = changes.add_change("CREATE", name, type, ttl,
+ identifier=identifier, weight=weight)
+ for new_value in newvalues.split(','):
change2.add_value(new_value)
print changes.commit()
-def change_alias(conn, hosted_zone_id, name, type, alias_hosted_zone_id, alias_dns_name, comment=""):
- """Delete and then add an alias to a zone"""
+def change_alias(conn, hosted_zone_id, name, type, new_alias_hosted_zone_id, new_alias_dns_name, identifier=None, weight=None, comment=""):
+ """Delete and then add an alias to a zone. identifier and weight are optional."""
from boto.route53.record import ResourceRecordSets
changes = ResourceRecordSets(conn, hosted_zone_id, comment)
- response = conn.get_all_rrsets(hosted_zone_id, type, name, maxitems=1)[0]
- change1 = changes.add_change("DELETE", name, type)
- change1.set_alias(response.alias_hosted_zone_id, response.alias_dns_name)
- change2 = changes.add_change("CREATE", name, type)
- change2.set_alias(alias_hosted_zone_id, alias_dns_name)
+ # Assume there are not more than 10 WRRs for a given (name, type)
+ responses = conn.get_all_rrsets(hosted_zone_id, type, name, maxitems=10)
+ for response in responses:
+ if response.name != name or response.type != type:
+ continue
+ if response.identifier != identifier or response.weight != weight:
+ continue
+ change1 = changes.add_change("DELETE", name, type,
+ identifier=response.identifier,
+ weight=response.weight)
+ change1.set_alias(response.alias_hosted_zone_id, response.alias_dns_name)
+ change2 = changes.add_change("CREATE", name, type, identifier=identifier, weight=weight)
+ change2.set_alias(new_alias_hosted_zone_id, new_alias_dns_name)
print changes.commit()
def help(conn, fnc=None):
diff --git a/bin/s3multiput b/bin/s3multiput
index df6e9fe..7631174 100755
--- a/bin/s3multiput
+++ b/bin/s3multiput
@@ -41,8 +41,8 @@
s3put [-a/--access_key <access_key>] [-s/--secret_key <secret_key>]
-b/--bucket <bucket_name> [-c/--callback <num_cb>]
[-d/--debug <debug_level>] [-i/--ignore <ignore_dirs>]
- [-n/--no_op] [-p/--prefix <prefix>] [-q/--quiet]
- [-g/--grant grant] [-w/--no_overwrite] [-r/--reduced] path
+ [-n/--no_op] [-p/--prefix <prefix>] [-k/--key_prefix <key_prefix>]
+ [-q/--quiet] [-g/--grant grant] [-w/--no_overwrite] [-r/--reduced] path
Where
access_key - Your AWS Access Key ID. If not supplied, boto will
@@ -76,6 +76,9 @@
/bar/fie.baz
The prefix must end in a trailing separator and if it
does not then one will be added.
+ key_prefix - A prefix to be added to the S3 key name, after any
+ stripping of the file path is done based on the
+ "-p/--prefix" option.
reduced - Use Reduced Redundancy storage
grant - A canned ACL policy that will be granted on each file
transferred to S3. The value of provided must be one
@@ -98,10 +101,10 @@
def submit_cb(bytes_so_far, total_bytes):
print '%d bytes transferred / %d bytes total' % (bytes_so_far, total_bytes)
-def get_key_name(fullpath, prefix):
+def get_key_name(fullpath, prefix, key_prefix):
key_name = fullpath[len(prefix):]
l = key_name.split(os.sep)
- return '/'.join(l)
+ return key_prefix + '/'.join(l)
def _upload_part(bucketname, aws_key, aws_secret, multipart_id, part_num,
source_path, offset, bytes, debug, cb, num_cb, amount_of_retries=10):
@@ -189,15 +192,16 @@
quiet = False
no_op = False
prefix = '/'
+ key_prefix = ''
grant = None
no_overwrite = False
reduced = False
try:
- opts, args = getopt.getopt(sys.argv[1:], 'a:b:c::d:g:hi:np:qs:wr',
- ['access_key', 'bucket', 'callback', 'debug', 'help', 'grant',
- 'ignore', 'no_op', 'prefix', 'quiet', 'secret_key', 'no_overwrite',
- 'reduced'])
+ opts, args = getopt.getopt(sys.argv[1:], 'a:b:c::d:g:hi:k:np:qs:wr',
+ ['access_key=', 'bucket=', 'callback=', 'debug=', 'help', 'grant=',
+ 'ignore=', 'key_prefix=', 'no_op', 'prefix=', 'quiet', 'secret_key=',
+ 'no_overwrite', 'reduced'])
except:
usage()
@@ -226,6 +230,8 @@
prefix = a
if prefix[-1] != os.sep:
prefix = prefix + os.sep
+ if o in ('-k', '--key_prefix'):
+ key_prefix = a
if o in ('-q', '--quiet'):
quiet = True
if o in ('-s', '--secret_key'):
@@ -256,7 +262,7 @@
if not quiet:
print 'Getting list of existing keys to check against'
keys = []
- for key in b.list():
+ for key in b.list(get_key_name(path, prefix, key_prefix)):
keys.append(key.name)
for root, dirs, files in os.walk(path):
for ignore in ignore_dirs:
@@ -264,7 +270,7 @@
dirs.remove(ignore)
for file in files:
fullpath = os.path.join(root, file)
- key_name = get_key_name(fullpath, prefix)
+ key_name = get_key_name(fullpath, prefix, key_prefix)
copy_file = True
if no_overwrite:
if key_name in keys:
@@ -285,12 +291,12 @@
else:
upload(bucket_name, aws_access_key_id,
aws_secret_access_key, fullpath, key_name,
- reduced, debug, cb, num_cb)
+ reduced, debug, cb, num_cb, grant or 'private')
total += 1
# upload a single file
elif os.path.isfile(path):
- key_name = get_key_name(os.path.abspath(path), prefix)
+ key_name = get_key_name(os.path.abspath(path), prefix, key_prefix)
copy_file = True
if no_overwrite:
if b.get_key(key_name):
@@ -311,7 +317,7 @@
else:
upload(bucket_name, aws_access_key_id,
aws_secret_access_key, path, key_name,
- reduced, debug, cb, num_cb)
+ reduced, debug, cb, num_cb, grant or 'private')
if __name__ == "__main__":
- main()
+ main()
\ No newline at end of file
diff --git a/bin/s3put b/bin/s3put
index a748ec3..9e5c5f2 100755
--- a/bin/s3put
+++ b/bin/s3put
@@ -96,9 +96,9 @@
try:
opts, args = getopt.getopt(
sys.argv[1:], 'a:b:c::d:g:hi:np:qs:vwr',
- ['access_key', 'bucket', 'callback', 'debug', 'help', 'grant',
- 'ignore', 'no_op', 'prefix', 'quiet', 'secret_key',
- 'no_overwrite', 'reduced']
+ ['access_key=', 'bucket=', 'callback=', 'debug=', 'help',
+ 'grant=', 'ignore=', 'no_op', 'prefix=', 'quiet',
+ 'secret_key=', 'no_overwrite', 'reduced', "header="]
)
except:
usage()
@@ -116,6 +116,7 @@
grant = None
no_overwrite = False
reduced = False
+ headers = {}
for o, a in opts:
if o in ('-h', '--help'):
usage()
@@ -147,6 +148,9 @@
quiet = True
if o in ('-s', '--secret_key'):
aws_secret_access_key = a
+ if o in ('--header'):
+ (k,v) = a.split("=")
+ headers[k] = v
if len(args) != 1:
print usage()
path = os.path.expanduser(args[0])
@@ -162,13 +166,15 @@
if not quiet:
print 'Getting list of existing keys to check against'
keys = []
- for key in b.list():
+ for key in b.list(get_key_name(path, prefix)):
keys.append(key.name)
for root, dirs, files in os.walk(path):
for ignore in ignore_dirs:
if ignore in dirs:
dirs.remove(ignore)
for file in files:
+ if file.startswith("."):
+ continue
fullpath = os.path.join(root, file)
key_name = get_key_name(fullpath, prefix)
copy_file = True
@@ -185,10 +191,11 @@
k.set_contents_from_filename(
fullpath, cb=cb, num_cb=num_cb,
policy=grant, reduced_redundancy=reduced,
+ headers=headers
)
total += 1
elif os.path.isfile(path):
- key_name = os.path.split(path)[1]
+ key_name = get_key_name(path, prefix)
copy_file = True
if no_overwrite:
if b.get_key(key_name):
@@ -199,7 +206,7 @@
k = b.new_key(key_name)
k.set_contents_from_filename(path, cb=cb, num_cb=num_cb,
policy=grant,
- reduced_redundancy=reduced)
+ reduced_redundancy=reduced, headers=headers)
else:
print usage()
diff --git a/bin/sdbadmin b/bin/sdbadmin
index e8ff9b5..7e87c7b 100755
--- a/bin/sdbadmin
+++ b/bin/sdbadmin
@@ -27,6 +27,15 @@
import time
from boto import sdb
+# Allow support for JSON
+try:
+ import simplejson as json
+except:
+ try:
+ import json
+ except:
+ json = False
+
def choice_input(options, default=None, title=None):
"""
Choice input
@@ -50,11 +59,17 @@
return choice and len(choice) > 0 and choice[0].lower() == "y"
-def dump_db(domain, file_name):
+def dump_db(domain, file_name, use_json=False):
"""
Dump SDB domain to file
"""
- doc = domain.to_xml(open(file_name, "w"))
+ f = open(file_name, "w")
+ if use_json:
+ for item in domain:
+ data = {"name": item.name, "attributes": item}
+ print >> f, json.dumps(data)
+ else:
+ doc = domain.to_xml(f)
def empty_db(domain):
"""
@@ -63,7 +78,7 @@
for item in domain:
item.delete()
-def load_db(domain, file):
+def load_db(domain, file, use_json=False):
"""
Load a domain from a file, this doesn't overwrite any existing
data in the file so if you want to do a full recovery and restore
@@ -72,7 +87,16 @@
:param domain: The SDB Domain object to load to
:param file: The File to load the DB from
"""
- domain.from_xml(file)
+ if use_json:
+ for line in file.readlines():
+ if line:
+ data = json.loads(line)
+ item = domain.new_item(data['name'])
+ item.update(data['attributes'])
+ item.save()
+
+ else:
+ domain.from_xml(file)
def create_db(domain_name, region_name):
"""Create a new DB
@@ -95,6 +119,8 @@
parser.add_option("-c", "--create", help="Create domain", dest="create", default=False, action="store_true")
parser.add_option("-a", "--all-domains", help="Operate on all domains", action="store_true", default=False, dest="all_domains")
+ if json:
+ parser.add_option("-j", "--use-json", help="Load/Store as JSON instead of XML", action="store_true", default=False, dest="json")
parser.add_option("-d", "--domain", help="Do functions on domain (may be more then one)", action="append", dest="domains")
parser.add_option("-f", "--file", help="Input/Output file we're operating on", dest="file_name")
parser.add_option("-r", "--region", help="Region (e.g. us-east-1[default] or eu-west-1)", default="us-east-1", dest="region_name")
@@ -152,7 +178,7 @@
file_name = options.file_name
else:
file_name = "%s.db" % domain.name
- dump_db(domain, file_name)
+ dump_db(domain, file_name, options.json)
if options.load:
for domain in domains:
@@ -161,7 +187,7 @@
file_name = options.file_name
else:
file_name = "%s.db" % domain.name
- load_db(domain, open(file_name, "rb"))
+ load_db(domain, open(file_name, "rb"), options.json)
total_time = round(time.time() - stime, 2)
diff --git a/boto/__init__.py b/boto/__init__.py
index 00e2fc8..b0eb6bd 100644
--- a/boto/__init__.py
+++ b/boto/__init__.py
@@ -1,6 +1,7 @@
-# Copyright (c) 2006-2011 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010-2011, Eucalyptus Systems, Inc.
# Copyright (c) 2011, Nexenta Systems Inc.
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
@@ -25,17 +26,22 @@
from boto.pyami.config import Config, BotoConfigLocations
from boto.storage_uri import BucketStorageUri, FileStorageUri
import boto.plugin
-import os, re, sys
+import os
+import platform
+import re
+import sys
import logging
import logging.config
+import urlparse
from boto.exception import InvalidUriError
-__version__ = '2.1.1'
-Version = __version__ # for backware compatibility
+__version__ = '2.6.0-dev'
+Version = __version__ # for backware compatibility
UserAgent = 'Boto/%s (%s)' % (__version__, sys.platform)
config = Config()
+
def init_logging():
for file in BotoConfigLocations:
try:
@@ -43,15 +49,20 @@
except:
pass
+
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('boto')
+perflog = logging.getLogger('boto.perf')
log.addHandler(NullHandler())
+perflog.addHandler(NullHandler())
init_logging()
# convenience function to set logging to a particular file
+
+
def set_file_logger(name, filepath, level=logging.INFO, format_string=None):
global log
if not format_string:
@@ -65,6 +76,7 @@
logger.addHandler(fh)
log = logger
+
def set_stream_logger(name, level=logging.DEBUG, format_string=None):
global log
if not format_string:
@@ -78,6 +90,7 @@
logger.addHandler(fh)
log = logger
+
def connect_sqs(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
@@ -92,6 +105,7 @@
from boto.sqs.connection import SQSConnection
return SQSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
def connect_s3(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
@@ -106,6 +120,7 @@
from boto.s3.connection import S3Connection
return S3Connection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
def connect_gs(gs_access_key_id=None, gs_secret_access_key=None, **kwargs):
"""
@type gs_access_key_id: string
@@ -120,6 +135,7 @@
from boto.gs.connection import GSConnection
return GSConnection(gs_access_key_id, gs_secret_access_key, **kwargs)
+
def connect_ec2(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
@@ -134,6 +150,7 @@
from boto.ec2.connection import EC2Connection
return EC2Connection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
def connect_elb(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
@@ -148,7 +165,9 @@
from boto.ec2.elb import ELBConnection
return ELBConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
-def connect_autoscale(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+
+def connect_autoscale(aws_access_key_id=None, aws_secret_access_key=None,
+ **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
@@ -160,9 +179,12 @@
:return: A connection to Amazon's Auto Scaling Service
"""
from boto.ec2.autoscale import AutoScaleConnection
- return AutoScaleConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+ return AutoScaleConnection(aws_access_key_id, aws_secret_access_key,
+ **kwargs)
-def connect_cloudwatch(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+
+def connect_cloudwatch(aws_access_key_id=None, aws_secret_access_key=None,
+ **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
@@ -174,7 +196,9 @@
:return: A connection to Amazon's EC2 Monitoring service
"""
from boto.ec2.cloudwatch import CloudWatchConnection
- return CloudWatchConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+ return CloudWatchConnection(aws_access_key_id, aws_secret_access_key,
+ **kwargs)
+
def connect_sdb(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
@@ -190,6 +214,7 @@
from boto.sdb.connection import SDBConnection
return SDBConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
def connect_fps(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
@@ -204,7 +229,9 @@
from boto.fps.connection import FPSConnection
return FPSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
-def connect_mturk(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+
+def connect_mturk(aws_access_key_id=None, aws_secret_access_key=None,
+ **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
@@ -218,7 +245,9 @@
from boto.mturk.connection import MTurkConnection
return MTurkConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
-def connect_cloudfront(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+
+def connect_cloudfront(aws_access_key_id=None, aws_secret_access_key=None,
+ **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
@@ -230,7 +259,9 @@
:return: A connection to FPS
"""
from boto.cloudfront import CloudFrontConnection
- return CloudFrontConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+ return CloudFrontConnection(aws_access_key_id, aws_secret_access_key,
+ **kwargs)
+
def connect_vpc(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
@@ -246,6 +277,7 @@
from boto.vpc import VPCConnection
return VPCConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
def connect_rds(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
@@ -260,6 +292,7 @@
from boto.rds import RDSConnection
return RDSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
def connect_emr(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
@@ -274,6 +307,7 @@
from boto.emr import EmrConnection
return EmrConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
def connect_sns(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
@@ -303,7 +337,9 @@
from boto.iam import IAMConnection
return IAMConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
-def connect_route53(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+
+def connect_route53(aws_access_key_id=None, aws_secret_access_key=None,
+ **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
@@ -315,7 +351,26 @@
:return: A connection to Amazon's Route53 DNS Service
"""
from boto.route53 import Route53Connection
- return Route53Connection(aws_access_key_id, aws_secret_access_key, **kwargs)
+ return Route53Connection(aws_access_key_id, aws_secret_access_key,
+ **kwargs)
+
+
+def connect_cloudformation(aws_access_key_id=None, aws_secret_access_key=None,
+ **kwargs):
+ """
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.cloudformation.CloudFormationConnection`
+ :return: A connection to Amazon's CloudFormation Service
+ """
+ from boto.cloudformation import CloudFormationConnection
+ return CloudFormationConnection(aws_access_key_id, aws_secret_access_key,
+ **kwargs)
+
def connect_euca(host=None, aws_access_key_id=None, aws_secret_access_key=None,
port=8773, path='/services/Eucalyptus', is_secure=False,
@@ -355,7 +410,62 @@
region=reg, port=port, path=path,
is_secure=is_secure, **kwargs)
-def connect_walrus(host=None, aws_access_key_id=None, aws_secret_access_key=None,
+
+def connect_glacier(aws_access_key_id=None, aws_secret_access_key=None,
+ **kwargs):
+ """
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.glacier.layer2.Layer2`
+ :return: A connection to Amazon's Glacier Service
+ """
+ from boto.glacier.layer2 import Layer2
+ return Layer2(aws_access_key_id, aws_secret_access_key,
+ **kwargs)
+
+
+def connect_ec2_endpoint(url, aws_access_key_id=None,
+ aws_secret_access_key=None,
+ **kwargs):
+ """
+ Connect to an EC2 Api endpoint. Additional arguments are passed
+ through to connect_ec2.
+
+ :type url: string
+ :param url: A url for the ec2 api endpoint to connect to
+
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.ec2.connection.EC2Connection`
+ :return: A connection to Eucalyptus server
+ """
+ from boto.ec2.regioninfo import RegionInfo
+
+ purl = urlparse.urlparse(url)
+ kwargs['port'] = purl.port
+ kwargs['host'] = purl.hostname
+ kwargs['path'] = purl.path
+ if not 'is_secure' in kwargs:
+ kwargs['is_secure'] = (purl.scheme == "https")
+
+ kwargs['region'] = RegionInfo(name=purl.hostname,
+ endpoint=purl.hostname)
+ kwargs['aws_access_key_id'] = aws_access_key_id
+ kwargs['aws_secret_access_key'] = aws_secret_access_key
+
+ return(connect_ec2(**kwargs))
+
+
+def connect_walrus(host=None, aws_access_key_id=None,
+ aws_secret_access_key=None,
port=8773, path='/services/Walrus', is_secure=False,
**kwargs):
"""
@@ -387,12 +497,13 @@
None)
if not host:
host = config.get('Boto', 'walrus_host', None)
-
+
return S3Connection(aws_access_key_id, aws_secret_access_key,
host=host, port=port, path=path,
calling_format=OrdinaryCallingFormat(),
is_secure=is_secure, **kwargs)
+
def connect_ses(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
@@ -407,6 +518,7 @@
from boto.ses import SESConnection
return SESConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
def connect_sts(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
@@ -421,21 +533,21 @@
from boto.sts import STSConnection
return STSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
def connect_ia(ia_access_key_id=None, ia_secret_access_key=None,
is_secure=False, **kwargs):
"""
Connect to the Internet Archive via their S3-like API.
:type ia_access_key_id: string
- :param ia_access_key_id: Your IA Access Key ID. This will also look in your
- boto config file for an entry in the Credentials
- section called "ia_access_key_id"
+ :param ia_access_key_id: Your IA Access Key ID. This will also look
+ in your boto config file for an entry in the Credentials
+ section called "ia_access_key_id"
:type ia_secret_access_key: string
:param ia_secret_access_key: Your IA Secret Access Key. This will also
- look in your boto config file for an entry
- in the Credentials section called
- "ia_secret_access_key"
+ look in your boto config file for an entry in the Credentials
+ section called "ia_secret_access_key"
:rtype: :class:`boto.s3.connection.S3Connection`
:return: A connection to the Internet Archive
@@ -453,45 +565,79 @@
calling_format=OrdinaryCallingFormat(),
is_secure=is_secure, **kwargs)
-def check_extensions(module_name, module_path):
+
+def connect_dynamodb(aws_access_key_id=None,
+ aws_secret_access_key=None,
+ **kwargs):
"""
- This function checks for extensions to boto modules. It should be called in the
- __init__.py file of all boto modules. See:
- http://code.google.com/p/boto/wiki/ExtendModules
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
- for details.
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.dynamodb.layer2.Layer2`
+ :return: A connection to the Layer2 interface for DynamoDB.
"""
- option_name = '%s_extend' % module_name
- version = config.get('Boto', option_name, None)
- if version:
- dirname = module_path[0]
- path = os.path.join(dirname, version)
- if os.path.isdir(path):
- log.info('extending module %s with: %s' % (module_name, path))
- module_path.insert(0, path)
+ from boto.dynamodb.layer2 import Layer2
+ return Layer2(aws_access_key_id, aws_secret_access_key, **kwargs)
-_aws_cache = {}
-def _get_aws_conn(service):
- global _aws_cache
- conn = _aws_cache.get(service)
- if not conn:
- meth = getattr(sys.modules[__name__], 'connect_' + service)
- conn = meth()
- _aws_cache[service] = conn
- return conn
+def connect_swf(aws_access_key_id=None,
+ aws_secret_access_key=None,
+ **kwargs):
+ """
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
-def lookup(service, name):
- global _aws_cache
- conn = _get_aws_conn(service)
- obj = _aws_cache.get('.'.join((service, name)), None)
- if not obj:
- obj = conn.lookup(name)
- _aws_cache['.'.join((service, name))] = obj
- return obj
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.swf.layer1.Layer1`
+ :return: A connection to the Layer1 interface for SWF.
+ """
+ from boto.swf.layer1 import Layer1
+ return Layer1(aws_access_key_id, aws_secret_access_key, **kwargs)
+
+
+def connect_cloudsearch(aws_access_key_id=None,
+ aws_secret_access_key=None,
+ **kwargs):
+ """
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.ec2.autoscale.CloudSearchConnection`
+ :return: A connection to Amazon's CloudSearch service
+ """
+ from boto.cloudsearch.layer2 import Layer2
+ return Layer2(aws_access_key_id, aws_secret_access_key,
+ **kwargs)
+
+
+def connect_beanstalk(aws_access_key_id=None,
+ aws_secret_access_key=None,
+ **kwargs):
+ """
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.beanstalk.layer1.Layer1`
+ :return: A connection to Amazon's Elastic Beanstalk service
+ """
+ from boto.beanstalk.layer1 import Layer1
+ return Layer1(aws_access_key_id, aws_secret_access_key, **kwargs)
+
def storage_uri(uri_str, default_scheme='file', debug=0, validate=True,
- bucket_storage_uri_class=BucketStorageUri):
+ bucket_storage_uri_class=BucketStorageUri,
+ suppress_consec_slashes=True):
"""
Instantiate a StorageUri from a URI string.
@@ -505,6 +651,8 @@
:param validate: whether to check for bucket name validity.
:type bucket_storage_uri_class: BucketStorageUri interface.
:param bucket_storage_uri_class: Allows mocking for unit tests.
+ :param suppress_consec_slashes: If provided, controls whether
+ consecutive slashes will be suppressed in key paths.
We allow validate to be disabled to allow caller
to implement bucket-level wildcarding (outside the boto library;
@@ -519,7 +667,8 @@
* s3://bucket/name
* gs://bucket
* s3://bucket
- * filename
+ * filename (which could be a Unix path like /a/b/c or a Windows path like
+ C:\a\b\c)
The last example uses the default scheme ('file', unless overridden)
"""
@@ -532,8 +681,14 @@
# Check for common error: user specifies gs:bucket instead
# of gs://bucket. Some URI parsers allow this, but it can cause
# confusion for callers, so we don't.
- if uri_str.find(':') != -1:
- raise InvalidUriError('"%s" contains ":" instead of "://"' % uri_str)
+ colon_pos = uri_str.find(':')
+ if colon_pos != -1:
+ # Allow Windows path names including drive letter (C: etc.)
+ drive_char = uri_str[0].lower()
+ if not (platform.system().lower().startswith('windows')
+ and colon_pos == 1
+ and drive_char >= 'a' and drive_char <= 'z'):
+ raise InvalidUriError('"%s" contains ":" instead of "://"' % uri_str)
scheme = default_scheme.lower()
path = uri_str
else:
@@ -566,7 +721,10 @@
object_name = ''
if len(path_parts) > 1:
object_name = path_parts[1]
- return bucket_storage_uri_class(scheme, bucket_name, object_name, debug)
+ return bucket_storage_uri_class(
+ scheme, bucket_name, object_name, debug,
+ suppress_consec_slashes=suppress_consec_slashes)
+
def storage_uri_for_key(key):
"""Returns a StorageUri for the given key.
diff --git a/boto/auth.py b/boto/auth.py
index 084dde9..29f9ac5 100644
--- a/boto/auth.py
+++ b/boto/auth.py
@@ -35,6 +35,9 @@
import hmac
import sys
import urllib
+import time
+import datetime
+import copy
from email.utils import formatdate
from boto.auth_handler import AuthHandler
@@ -68,12 +71,17 @@
import sha
sha256 = None
+
class HmacKeys(object):
"""Key based Auth handler helper."""
def __init__(self, host, config, provider):
if provider.access_key is None or provider.secret_key is None:
raise boto.auth_handler.NotReadyToAuthenticate()
+ self.host = host
+ self.update_provider(provider)
+
+ def update_provider(self, provider):
self._provider = provider
self._hmac = hmac.new(self._provider.secret_key, digestmod=sha)
if sha256:
@@ -88,57 +96,97 @@
else:
return 'HmacSHA1'
- def sign_string(self, string_to_sign):
- boto.log.debug('Canonical: %s' % string_to_sign)
+ def _get_hmac(self):
if self._hmac_256:
- hmac = self._hmac_256.copy()
+ digestmod = sha256
else:
- hmac = self._hmac.copy()
- hmac.update(string_to_sign)
- return base64.encodestring(hmac.digest()).strip()
+ digestmod = sha
+ return hmac.new(self._provider.secret_key,
+ digestmod=digestmod)
+
+ def sign_string(self, string_to_sign):
+ new_hmac = self._get_hmac()
+ new_hmac.update(string_to_sign)
+ return base64.encodestring(new_hmac.digest()).strip()
+
+ def __getstate__(self):
+ pickled_dict = copy.copy(self.__dict__)
+ del pickled_dict['_hmac']
+ del pickled_dict['_hmac_256']
+ return pickled_dict
+
+ def __setstate__(self, dct):
+ self.__dict__ = dct
+ self.update_provider(self._provider)
+
+
+class AnonAuthHandler(AuthHandler, HmacKeys):
+ """
+ Implements Anonymous requests.
+ """
+
+ capability = ['anon']
+
+ def __init__(self, host, config, provider):
+ AuthHandler.__init__(self, host, config, provider)
+
+ def add_auth(self, http_request, **kwargs):
+ pass
+
class HmacAuthV1Handler(AuthHandler, HmacKeys):
""" Implements the HMAC request signing used by S3 and GS."""
-
+
capability = ['hmac-v1', 's3']
-
+
def __init__(self, host, config, provider):
AuthHandler.__init__(self, host, config, provider)
HmacKeys.__init__(self, host, config, provider)
self._hmac_256 = None
-
+
+ def update_provider(self, provider):
+ super(HmacAuthV1Handler, self).update_provider(provider)
+ self._hmac_256 = None
+
def add_auth(self, http_request, **kwargs):
headers = http_request.headers
method = http_request.method
auth_path = http_request.auth_path
- if not headers.has_key('Date'):
+ if 'Date' not in headers:
headers['Date'] = formatdate(usegmt=True)
if self._provider.security_token:
key = self._provider.security_token_header
headers[key] = self._provider.security_token
- c_string = boto.utils.canonical_string(method, auth_path, headers,
- None, self._provider)
- b64_hmac = self.sign_string(c_string)
+ string_to_sign = boto.utils.canonical_string(method, auth_path,
+ headers, None,
+ self._provider)
+ boto.log.debug('StringToSign:\n%s' % string_to_sign)
+ b64_hmac = self.sign_string(string_to_sign)
auth_hdr = self._provider.auth_header
headers['Authorization'] = ("%s %s:%s" %
(auth_hdr,
self._provider.access_key, b64_hmac))
+
class HmacAuthV2Handler(AuthHandler, HmacKeys):
"""
Implements the simplified HMAC authorization used by CloudFront.
"""
capability = ['hmac-v2', 'cloudfront']
-
+
def __init__(self, host, config, provider):
AuthHandler.__init__(self, host, config, provider)
HmacKeys.__init__(self, host, config, provider)
self._hmac_256 = None
-
+
+ def update_provider(self, provider):
+ super(HmacAuthV2Handler, self).update_provider(provider)
+ self._hmac_256 = None
+
def add_auth(self, http_request, **kwargs):
headers = http_request.headers
- if not headers.has_key('Date'):
+ if 'Date' not in headers:
headers['Date'] = formatdate(usegmt=True)
b64_hmac = self.sign_string(headers['Date'])
@@ -146,28 +194,275 @@
headers['Authorization'] = ("%s %s:%s" %
(auth_hdr,
self._provider.access_key, b64_hmac))
-
+
+
class HmacAuthV3Handler(AuthHandler, HmacKeys):
"""Implements the new Version 3 HMAC authorization used by Route53."""
-
+
capability = ['hmac-v3', 'route53', 'ses']
-
+
def __init__(self, host, config, provider):
AuthHandler.__init__(self, host, config, provider)
HmacKeys.__init__(self, host, config, provider)
-
+
def add_auth(self, http_request, **kwargs):
headers = http_request.headers
- if not headers.has_key('Date'):
+ if 'Date' not in headers:
headers['Date'] = formatdate(usegmt=True)
+ if self._provider.security_token:
+ key = self._provider.security_token_header
+ headers[key] = self._provider.security_token
+
b64_hmac = self.sign_string(headers['Date'])
s = "AWS3-HTTPS AWSAccessKeyId=%s," % self._provider.access_key
s += "Algorithm=%s,Signature=%s" % (self.algorithm(), b64_hmac)
headers['X-Amzn-Authorization'] = s
+
+class HmacAuthV3HTTPHandler(AuthHandler, HmacKeys):
+ """
+ Implements the new Version 3 HMAC authorization used by DynamoDB.
+ """
+
+ capability = ['hmac-v3-http']
+
+ def __init__(self, host, config, provider):
+ AuthHandler.__init__(self, host, config, provider)
+ HmacKeys.__init__(self, host, config, provider)
+
+ def headers_to_sign(self, http_request):
+ """
+ Select the headers from the request that need to be included
+ in the StringToSign.
+ """
+ headers_to_sign = {}
+ headers_to_sign = {'Host': self.host}
+ for name, value in http_request.headers.items():
+ lname = name.lower()
+ if lname.startswith('x-amz'):
+ headers_to_sign[name] = value
+ return headers_to_sign
+
+ def canonical_headers(self, headers_to_sign):
+ """
+ Return the headers that need to be included in the StringToSign
+ in their canonical form by converting all header keys to lower
+ case, sorting them in alphabetical order and then joining
+ them into a string, separated by newlines.
+ """
+ l = sorted(['%s:%s' % (n.lower().strip(),
+ headers_to_sign[n].strip()) for n in headers_to_sign])
+ return '\n'.join(l)
+
+ def string_to_sign(self, http_request):
+ """
+ Return the canonical StringToSign as well as a dict
+ containing the original version of all headers that
+ were included in the StringToSign.
+ """
+ headers_to_sign = self.headers_to_sign(http_request)
+ canonical_headers = self.canonical_headers(headers_to_sign)
+ string_to_sign = '\n'.join([http_request.method,
+ http_request.path,
+ '',
+ canonical_headers,
+ '',
+ http_request.body])
+ return string_to_sign, headers_to_sign
+
+ def add_auth(self, req, **kwargs):
+ """
+ Add AWS3 authentication to a request.
+
+ :type req: :class`boto.connection.HTTPRequest`
+ :param req: The HTTPRequest object.
+ """
+ # This could be a retry. Make sure the previous
+ # authorization header is removed first.
+ if 'X-Amzn-Authorization' in req.headers:
+ del req.headers['X-Amzn-Authorization']
+ req.headers['X-Amz-Date'] = formatdate(usegmt=True)
+ if self._provider.security_token:
+ req.headers['X-Amz-Security-Token'] = self._provider.security_token
+ string_to_sign, headers_to_sign = self.string_to_sign(req)
+ boto.log.debug('StringToSign:\n%s' % string_to_sign)
+ hash_value = sha256(string_to_sign).digest()
+ b64_hmac = self.sign_string(hash_value)
+ s = "AWS3 AWSAccessKeyId=%s," % self._provider.access_key
+ s += "Algorithm=%s," % self.algorithm()
+ s += "SignedHeaders=%s," % ';'.join(headers_to_sign)
+ s += "Signature=%s" % b64_hmac
+ req.headers['X-Amzn-Authorization'] = s
+
+
+class HmacAuthV4Handler(AuthHandler, HmacKeys):
+ """
+ Implements the new Version 4 HMAC authorization.
+ """
+
+ capability = ['hmac-v4']
+
+ def __init__(self, host, config, provider):
+ AuthHandler.__init__(self, host, config, provider)
+ HmacKeys.__init__(self, host, config, provider)
+
+ def _sign(self, key, msg, hex=False):
+ if hex:
+ sig = hmac.new(key, msg.encode('utf-8'), sha256).hexdigest()
+ else:
+ sig = hmac.new(key, msg.encode('utf-8'), sha256).digest()
+ return sig
+
+ def headers_to_sign(self, http_request):
+ """
+ Select the headers from the request that need to be included
+ in the StringToSign.
+ """
+ headers_to_sign = {}
+ headers_to_sign = {'Host': self.host}
+ for name, value in http_request.headers.items():
+ lname = name.lower()
+ if lname.startswith('x-amz'):
+ headers_to_sign[name] = value
+ return headers_to_sign
+
+ def query_string(self, http_request):
+ parameter_names = sorted(http_request.params.keys())
+ pairs = []
+ for pname in parameter_names:
+ pval = str(http_request.params[pname]).encode('utf-8')
+ pairs.append(urllib.quote(pname, safe='') + '=' +
+ urllib.quote(pval, safe='-_~'))
+ return '&'.join(pairs)
+
+ def canonical_query_string(self, http_request):
+ l = []
+ for param in http_request.params:
+ value = str(http_request.params[param])
+ l.append('%s=%s' % (urllib.quote(param, safe='-_.~'),
+ urllib.quote(value, safe='-_.~')))
+ l = sorted(l)
+ return '&'.join(l)
+
+ def canonical_headers(self, headers_to_sign):
+ """
+ Return the headers that need to be included in the StringToSign
+ in their canonical form by converting all header keys to lower
+ case, sorting them in alphabetical order and then joining
+ them into a string, separated by newlines.
+ """
+ l = ['%s:%s' % (n.lower().strip(),
+ headers_to_sign[n].strip()) for n in headers_to_sign]
+ l = sorted(l)
+ return '\n'.join(l)
+
+ def signed_headers(self, headers_to_sign):
+ l = ['%s' % n.lower().strip() for n in headers_to_sign]
+ l = sorted(l)
+ return ';'.join(l)
+
+ def canonical_uri(self, http_request):
+ return http_request.path
+
+ def payload(self, http_request):
+ body = http_request.body
+ # If the body is a file like object, we can use
+ # boto.utils.compute_hash, which will avoid reading
+ # the entire body into memory.
+ if hasattr(body, 'seek') and hasattr(body, 'read'):
+ return boto.utils.compute_hash(body, hash_algorithm=sha256)[0]
+ return sha256(http_request.body).hexdigest()
+
+ def canonical_request(self, http_request):
+ cr = [http_request.method.upper()]
+ cr.append(self.canonical_uri(http_request))
+ cr.append(self.canonical_query_string(http_request))
+ headers_to_sign = self.headers_to_sign(http_request)
+ cr.append(self.canonical_headers(headers_to_sign) + '\n')
+ cr.append(self.signed_headers(headers_to_sign))
+ cr.append(self.payload(http_request))
+ return '\n'.join(cr)
+
+ def scope(self, http_request):
+ scope = [self._provider.access_key]
+ scope.append(http_request.timestamp)
+ scope.append(http_request.region_name)
+ scope.append(http_request.service_name)
+ scope.append('aws4_request')
+ return '/'.join(scope)
+
+ def credential_scope(self, http_request):
+ scope = []
+ http_request.timestamp = http_request.headers['X-Amz-Date'][0:8]
+ scope.append(http_request.timestamp)
+ parts = http_request.host.split('.')
+ if len(parts) == 3:
+ http_request.region_name = 'us-east-1'
+ else:
+ http_request.region_name = parts[1]
+ scope.append(http_request.region_name)
+ http_request.service_name = parts[0]
+ scope.append(http_request.service_name)
+ scope.append('aws4_request')
+ return '/'.join(scope)
+
+ def string_to_sign(self, http_request, canonical_request):
+ """
+ Return the canonical StringToSign as well as a dict
+ containing the original version of all headers that
+ were included in the StringToSign.
+ """
+ sts = ['AWS4-HMAC-SHA256']
+ sts.append(http_request.headers['X-Amz-Date'])
+ sts.append(self.credential_scope(http_request))
+ sts.append(sha256(canonical_request).hexdigest())
+ return '\n'.join(sts)
+
+ def signature(self, http_request, string_to_sign):
+ key = self._provider.secret_key
+ k_date = self._sign(('AWS4' + key).encode('utf-8'),
+ http_request.timestamp)
+ k_region = self._sign(k_date, http_request.region_name)
+ k_service = self._sign(k_region, http_request.service_name)
+ k_signing = self._sign(k_service, 'aws4_request')
+ return self._sign(k_signing, string_to_sign, hex=True)
+
+ def add_auth(self, req, **kwargs):
+ """
+ Add AWS4 authentication to a request.
+
+ :type req: :class`boto.connection.HTTPRequest`
+ :param req: The HTTPRequest object.
+ """
+ # This could be a retry. Make sure the previous
+ # authorization header is removed first.
+ if 'X-Amzn-Authorization' in req.headers:
+ del req.headers['X-Amzn-Authorization']
+ now = datetime.datetime.utcnow()
+ req.headers['X-Amz-Date'] = now.strftime('%Y%m%dT%H%M%SZ')
+ if self._provider.security_token:
+ req.headers['X-Amz-Security-Token'] = self._provider.security_token
+ canonical_request = self.canonical_request(req)
+ boto.log.debug('CanonicalRequest:\n%s' % canonical_request)
+ string_to_sign = self.string_to_sign(req, canonical_request)
+ boto.log.debug('StringToSign:\n%s' % string_to_sign)
+ signature = self.signature(req, string_to_sign)
+ boto.log.debug('Signature:\n%s' % signature)
+ headers_to_sign = self.headers_to_sign(req)
+ l = ['AWS4-HMAC-SHA256 Credential=%s' % self.scope(req)]
+ l.append('SignedHeaders=%s' % self.signed_headers(headers_to_sign))
+ l.append('Signature=%s' % signature)
+ req.headers['Authorization'] = ','.join(l)
+ qs = self.query_string(req)
+ if qs:
+ req.path = req.path.split('?')[0]
+ req.path = req.path + '?' + qs
+
+
class QuerySignatureHelper(HmacKeys):
- """Helper for Query signature based Auth handler.
+ """
+ Helper for Query signature based Auth handler.
Concrete sub class need to implement _calc_sigature method.
"""
@@ -184,7 +479,7 @@
boto.log.debug('query_string: %s Signature: %s' % (qs, signature))
if http_request.method == 'POST':
headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
- http_request.body = qs + '&Signature=' + urllib.quote(signature)
+ http_request.body = qs + '&Signature=' + urllib.quote_plus(signature)
http_request.headers['Content-Length'] = str(len(http_request.body))
else:
http_request.body = ''
@@ -192,7 +487,8 @@
# already be there, we need to get rid of that and rebuild it
http_request.path = http_request.path.split('?')[0]
http_request.path = (http_request.path + '?' + qs +
- '&Signature=' + urllib.quote(signature))
+ '&Signature=' + urllib.quote_plus(signature))
+
class QuerySignatureV0AuthHandler(QuerySignatureHelper, AuthHandler):
"""Provides Signature V0 Signing"""
@@ -202,11 +498,11 @@
def _calc_signature(self, params, *args):
boto.log.debug('using _calc_signature_0')
- hmac = self._hmac.copy()
+ hmac = self._get_hmac()
s = params['Action'] + params['Timestamp']
hmac.update(s)
keys = params.keys()
- keys.sort(cmp = lambda x, y: cmp(x.lower(), y.lower()))
+ keys.sort(cmp=lambda x, y: cmp(x.lower(), y.lower()))
pairs = []
for key in keys:
val = boto.utils.get_utf8_value(params[key])
@@ -214,6 +510,7 @@
qs = '&'.join(pairs)
return (qs, base64.b64encode(hmac.digest()))
+
class QuerySignatureV1AuthHandler(QuerySignatureHelper, AuthHandler):
"""
Provides Query Signature V1 Authentication.
@@ -224,9 +521,9 @@
def _calc_signature(self, params, *args):
boto.log.debug('using _calc_signature_1')
- hmac = self._hmac.copy()
+ hmac = self._get_hmac()
keys = params.keys()
- keys.sort(cmp = lambda x, y: cmp(x.lower(), y.lower()))
+ keys.sort(cmp=lambda x, y: cmp(x.lower(), y.lower()))
pairs = []
for key in keys:
hmac.update(key)
@@ -236,6 +533,7 @@
qs = '&'.join(pairs)
return (qs, base64.b64encode(hmac.digest()))
+
class QuerySignatureV2AuthHandler(QuerySignatureHelper, AuthHandler):
"""Provides Query Signature V2 Authentication."""
@@ -246,16 +544,11 @@
def _calc_signature(self, params, verb, path, server_name):
boto.log.debug('using _calc_signature_2')
string_to_sign = '%s\n%s\n%s\n' % (verb, server_name.lower(), path)
- if self._hmac_256:
- hmac = self._hmac_256.copy()
- params['SignatureMethod'] = 'HmacSHA256'
- else:
- hmac = self._hmac.copy()
- params['SignatureMethod'] = 'HmacSHA1'
+ hmac = self._get_hmac()
+ params['SignatureMethod'] = self.algorithm()
if self._provider.security_token:
params['SecurityToken'] = self._provider.security_token
- keys = params.keys()
- keys.sort()
+ keys = sorted(params.keys())
pairs = []
for key in keys:
val = boto.utils.get_utf8_value(params[key])
@@ -272,6 +565,34 @@
return (qs, b64)
+class POSTPathQSV2AuthHandler(QuerySignatureV2AuthHandler, AuthHandler):
+ """
+ Query Signature V2 Authentication relocating signed query
+ into the path and allowing POST requests with Content-Types.
+ """
+
+ capability = ['mws']
+
+ def add_auth(self, req, **kwargs):
+ req.params['AWSAccessKeyId'] = self._provider.access_key
+ req.params['SignatureVersion'] = self.SignatureVersion
+ req.params['Timestamp'] = boto.utils.get_ts()
+ qs, signature = self._calc_signature(req.params, req.method,
+ req.auth_path, req.host)
+ boto.log.debug('query_string: %s Signature: %s' % (qs, signature))
+ if req.method == 'POST':
+ req.headers['Content-Length'] = str(len(req.body))
+ req.headers['Content-Type'] = req.headers.get('Content-Type',
+ 'text/plain')
+ else:
+ req.body = ''
+ # if this is a retried req, the qs from the previous try will
+ # already be there, we need to get rid of that and rebuild it
+ req.path = req.path.split('?')[0]
+ req.path = (req.path + '?' + qs +
+ '&Signature=' + urllib.quote_plus(signature))
+
+
def get_auth_handler(host, config, provider, requested_capability=None):
"""Finds an AuthHandler that is ready to authenticate.
@@ -281,7 +602,7 @@
:type host: string
:param host: The name of the host
- :type config:
+ :type config:
:param config:
:type provider:
@@ -302,13 +623,13 @@
ready_handlers.append(handler(host, config, provider))
except boto.auth_handler.NotReadyToAuthenticate:
pass
-
+
if not ready_handlers:
checked_handlers = auth_handlers
names = [handler.__name__ for handler in checked_handlers]
raise boto.exception.NoAuthHandlerFound(
'No handler was ready to authenticate. %d handlers were checked.'
- ' %s '
+ ' %s '
'Check your credentials' % (len(names), str(names)))
if len(ready_handlers) > 1:
diff --git a/boto/fps/test/__init__.py b/boto/beanstalk/__init__.py
similarity index 100%
rename from boto/fps/test/__init__.py
rename to boto/beanstalk/__init__.py
diff --git a/boto/beanstalk/exception.py b/boto/beanstalk/exception.py
new file mode 100644
index 0000000..c209cef
--- /dev/null
+++ b/boto/beanstalk/exception.py
@@ -0,0 +1,64 @@
+import sys
+import json
+from boto.exception import BotoServerError
+
+
+def simple(e):
+ err = json.loads(e.error_message)
+ code = err['Error']['Code']
+
+ try:
+ # Dynamically get the error class.
+ simple_e = getattr(sys.modules[__name__], code)(e, err)
+ except AttributeError:
+ # Return original exception on failure.
+ return e
+
+ return simple_e
+
+
+class SimpleException(BotoServerError):
+ def __init__(self, e, err):
+ super(SimpleException, self).__init__(e.status, e.reason, e.body)
+ self.body = e.error_message
+ self.request_id = err['RequestId']
+ self.error_code = err['Error']['Code']
+ self.error_message = err['Error']['Message']
+
+ def __repr__(self):
+ return self.__class__.__name__ + ': ' + self.error_message
+ def __str__(self):
+ return self.__class__.__name__ + ': ' + self.error_message
+
+
+class ValidationError(SimpleException): pass
+
+# Common beanstalk exceptions.
+class IncompleteSignature(SimpleException): pass
+class InternalFailure(SimpleException): pass
+class InvalidAction(SimpleException): pass
+class InvalidClientTokenId(SimpleException): pass
+class InvalidParameterCombination(SimpleException): pass
+class InvalidParameterValue(SimpleException): pass
+class InvalidQueryParameter(SimpleException): pass
+class MalformedQueryString(SimpleException): pass
+class MissingAction(SimpleException): pass
+class MissingAuthenticationToken(SimpleException): pass
+class MissingParameter(SimpleException): pass
+class OptInRequired(SimpleException): pass
+class RequestExpired(SimpleException): pass
+class ServiceUnavailable(SimpleException): pass
+class Throttling(SimpleException): pass
+
+
+# Action specific exceptions.
+class TooManyApplications(SimpleException): pass
+class InsufficientPrivileges(SimpleException): pass
+class S3LocationNotInServiceRegion(SimpleException): pass
+class TooManyApplicationVersions(SimpleException): pass
+class TooManyConfigurationTemplates(SimpleException): pass
+class TooManyEnvironments(SimpleException): pass
+class S3SubscriptionRequired(SimpleException): pass
+class TooManyBuckets(SimpleException): pass
+class OperationInProgress(SimpleException): pass
+class SourceBundleDeletion(SimpleException): pass
diff --git a/boto/beanstalk/layer1.py b/boto/beanstalk/layer1.py
new file mode 100644
index 0000000..5e994e1
--- /dev/null
+++ b/boto/beanstalk/layer1.py
@@ -0,0 +1,1167 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+import json
+
+import boto
+import boto.jsonresponse
+from boto.regioninfo import RegionInfo
+from boto.connection import AWSQueryConnection
+
+
+class Layer1(AWSQueryConnection):
+
+ APIVersion = '2010-12-01'
+ DefaultRegionName = 'us-east-1'
+ DefaultRegionEndpoint = 'elasticbeanstalk.us-east-1.amazonaws.com'
+
+ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
+ is_secure=True, port=None,
+ proxy=None, proxy_port=None,
+ proxy_user=None, proxy_pass=None, debug=0,
+ https_connection_factory=None, region=None, path='/',
+ api_version=None, security_token=None):
+ if not region:
+ region = RegionInfo(self, self.DefaultRegionName,
+ self.DefaultRegionEndpoint)
+ self.region = region
+ AWSQueryConnection.__init__(self, aws_access_key_id,
+ aws_secret_access_key,
+ is_secure, port, proxy, proxy_port,
+ proxy_user, proxy_pass,
+ self.region.endpoint, debug,
+ https_connection_factory, path,
+ security_token)
+
+ def _required_auth_capability(self):
+ return ['sign-v2']
+
+ def _encode_bool(self, v):
+ v = bool(v)
+ return {True: "true", False: "false"}[v]
+
+ def _get_response(self, action, params, path='/', verb='GET'):
+ params['ContentType'] = 'JSON'
+ response = self.make_request(action, params, path, verb)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status == 200:
+ return json.loads(body)
+ else:
+ raise self.ResponseError(response.status, response.reason, body)
+
+ def check_dns_availability(self, cname_prefix):
+ """Checks if the specified CNAME is available.
+
+ :type cname_prefix: string
+ :param cname_prefix: The prefix used when this CNAME is
+ reserved.
+ """
+ params = {'CNAMEPrefix': cname_prefix}
+ return self._get_response('CheckDNSAvailability', params)
+
+ def create_application(self, application_name, description=None):
+ """
+ Creates an application that has one configuration template
+ named default and no application versions.
+
+ :type application_name: string
+ :param application_name: The name of the application.
+ Constraint: This name must be unique within your account. If the
+ specified name already exists, the action returns an
+ InvalidParameterValue error.
+
+ :type description: string
+ :param description: Describes the application.
+
+ :raises: TooManyApplicationsException
+ """
+ params = {'ApplicationName': application_name}
+ if description:
+ params['Description'] = description
+ return self._get_response('CreateApplication', params)
+
+ def create_application_version(self, application_name, version_label,
+ description=None, s3_bucket=None,
+ s3_key=None, auto_create_application=None):
+ """Creates an application version for the specified application.
+
+ :type application_name: string
+ :param application_name: The name of the application. If no
+ application is found with this name, and AutoCreateApplication
+ is false, returns an InvalidParameterValue error.
+
+ :type version_label: string
+ :param version_label: A label identifying this
+ version.Constraint: Must be unique per application. If an
+ application version already exists with this label for the
+ specified application, AWS Elastic Beanstalk returns an
+ InvalidParameterValue error.
+
+ :type description: string
+ :param description: Describes this version.
+
+ :type s3_bucket: string
+ :param s3_bucket: The Amazon S3 bucket where the data is
+ located.
+
+ :type s3_key: string
+ :param s3_key: The Amazon S3 key where the data is located.
+ Both s3_bucket and s3_key must be specified in order to use
+ a specific source bundle. If both of these values are not specified
+ the sample application will be used.
+
+ :type auto_create_application: boolean
+ :param auto_create_application: Determines how the system
+ behaves if the specified application for this version does not
+ already exist: true: Automatically creates the specified
+ application for this version if it does not already exist.
+ false: Returns an InvalidParameterValue if the specified
+ application for this version does not already exist. Default:
+ false Valid Values: true | false
+
+ :raises: TooManyApplicationsException,
+ TooManyApplicationVersionsException,
+ InsufficientPrivilegesException,
+ S3LocationNotInServiceRegionException
+
+ """
+ params = {'ApplicationName': application_name,
+ 'VersionLabel': version_label}
+ if description:
+ params['Description'] = description
+ if s3_bucket and s3_key:
+ params['SourceBundle.S3Bucket'] = s3_bucket
+ params['SourceBundle.S3Key'] = s3_key
+ if auto_create_application:
+ params['AutoCreateApplication'] = self._encode_bool(
+ auto_create_application)
+ return self._get_response('CreateApplicationVersion', params)
+
+ def create_configuration_template(self, application_name, template_name,
+ solution_stack_name=None,
+ source_configuration_application_name=None,
+ source_configuration_template_name=None,
+ environment_id=None, description=None,
+ option_settings=None):
+ """Creates a configuration template.
+
+ Templates are associated with a specific application and are used to
+ deploy different versions of the application with the same
+ configuration settings.
+
+ :type application_name: string
+ :param application_name: The name of the application to
+ associate with this configuration template. If no application is
+ found with this name, AWS Elastic Beanstalk returns an
+ InvalidParameterValue error.
+
+ :type template_name: string
+ :param template_name: The name of the configuration
+ template.Constraint: This name must be unique per application.
+ Default: If a configuration template already exists with this
+ name, AWS Elastic Beanstalk returns an InvalidParameterValue
+ error.
+
+ :type solution_stack_name: string
+ :param solution_stack_name: The name of the solution stack used
+ by this configuration. The solution stack specifies the
+ operating system, architecture, and application server for a
+ configuration template. It determines the set of configuration
+ options as well as the possible and default values. Use
+ ListAvailableSolutionStacks to obtain a list of available
+ solution stacks. Default: If the SolutionStackName is not
+ specified and the source configuration parameter is blank, AWS
+ Elastic Beanstalk uses the default solution stack. If not
+ specified and the source configuration parameter is specified,
+ AWS Elastic Beanstalk uses the same solution stack as the source
+ configuration template.
+
+ :type source_configuration_application_name: string
+ :param source_configuration_application_name: The name of the
+ application associated with the configuration.
+
+ :type source_configuration_template_name: string
+ :param source_configuration_template_name: The name of the
+ configuration template.
+
+ :type environment_id: string
+ :param environment_id: The ID of the environment used with this
+ configuration template.
+
+ :type description: string
+ :param description: Describes this configuration.
+
+ :type option_settings: list
+ :param option_settings: If specified, AWS Elastic Beanstalk sets
+ the specified configuration option to the requested value. The
+ new value overrides the value obtained from the solution stack
+ or the source configuration template.
+
+ :raises: InsufficientPrivilegesException,
+ TooManyConfigurationTemplatesException
+ """
+ params = {'ApplicationName': application_name,
+ 'TemplateName': template_name}
+ if solution_stack_name:
+ params['SolutionStackName'] = solution_stack_name
+ if source_configuration_application_name:
+ params['ApplicationName'] = source_configuration_application_name
+ if source_configuration_template_name:
+ params['TemplateName'] = source_configuration_template_name
+ if environment_id:
+ params['EnvironmentId'] = environment_id
+ if description:
+ params['Description'] = description
+ if option_settings:
+ self._build_list_params(params, option_settings,
+ 'OptionSettings.member',
+ ('Namespace', 'OptionName', 'Value'))
+ return self._get_response('CreateConfigurationTemplate', params)
+
+ def create_environment(self, application_name, environment_name,
+ version_label=None, template_name=None,
+ solution_stack_name=None, cname_prefix=None,
+ description=None, option_settings=None,
+ options_to_remove=None):
+ """Launches an environment for the application using a configuration.
+
+ :type application_name: string
+ :param application_name: The name of the application that
+ contains the version to be deployed. If no application is found
+ with this name, CreateEnvironment returns an
+ InvalidParameterValue error.
+
+ :type version_label: string
+ :param version_label: The name of the application version to
+ deploy. If the specified application has no associated
+ application versions, AWS Elastic Beanstalk UpdateEnvironment
+ returns an InvalidParameterValue error. Default: If not
+ specified, AWS Elastic Beanstalk attempts to launch the most
+ recently created application version.
+
+ :type environment_name: string
+ :param environment_name: A unique name for the deployment
+ environment. Used in the application URL. Constraint: Must be
+ from 4 to 23 characters in length. The name can contain only
+ letters, numbers, and hyphens. It cannot start or end with a
+ hyphen. This name must be unique in your account. If the
+ specified name already exists, AWS Elastic Beanstalk returns an
+ InvalidParameterValue error. Default: If the CNAME parameter is
+ not specified, the environment name becomes part of the CNAME,
+ and therefore part of the visible URL for your application.
+
+ :type template_name: string
+ :param template_name: The name of the configuration template to
+ use in deployment. If no configuration template is found with
+ this name, AWS Elastic Beanstalk returns an
+ InvalidParameterValue error. Condition: You must specify either
+ this parameter or a SolutionStackName, but not both. If you
+ specify both, AWS Elastic Beanstalk returns an
+ InvalidParameterCombination error. If you do not specify either,
+ AWS Elastic Beanstalk returns a MissingRequiredParameter error.
+
+ :type solution_stack_name: string
+ :param solution_stack_name: This is an alternative to specifying
+ a configuration name. If specified, AWS Elastic Beanstalk sets
+ the configuration values to the default values associated with
+ the specified solution stack. Condition: You must specify
+ either this or a TemplateName, but not both. If you specify
+ both, AWS Elastic Beanstalk returns an
+ InvalidParameterCombination error. If you do not specify either,
+ AWS Elastic Beanstalk returns a MissingRequiredParameter error.
+
+ :type cname_prefix: string
+ :param cname_prefix: If specified, the environment attempts to
+ use this value as the prefix for the CNAME. If not specified,
+ the environment uses the environment name.
+
+ :type description: string
+ :param description: Describes this environment.
+
+ :type option_settings: list
+ :param option_settings: If specified, AWS Elastic Beanstalk sets
+ the specified configuration options to the requested value in
+ the configuration set for the new environment. These override
+ the values obtained from the solution stack or the configuration
+ template. Each element in the list is a tuple of (Namespace,
+ OptionName, Value), for example::
+
+ [('aws:autoscaling:launchconfiguration',
+ 'Ec2KeyName', 'mykeypair')]
+
+ :type options_to_remove: list
+ :param options_to_remove: A list of custom user-defined
+ configuration options to remove from the configuration set for
+ this new environment.
+
+ :raises: TooManyEnvironmentsException, InsufficientPrivilegesException
+
+ """
+ params = {'ApplicationName': application_name,
+ 'EnvironmentName': environment_name}
+ if version_label:
+ params['VersionLabel'] = version_label
+ if template_name:
+ params['TemplateName'] = template_name
+ if solution_stack_name:
+ params['SolutionStackName'] = solution_stack_name
+ if cname_prefix:
+ params['CNAMEPrefix'] = cname_prefix
+ if description:
+ params['Description'] = description
+ if option_settings:
+ self._build_list_params(params, option_settings,
+ 'OptionSettings.member',
+ ('Namespace', 'OptionName', 'Value'))
+ if options_to_remove:
+ self.build_list_params(params, options_to_remove,
+ 'OptionsToRemove.member')
+ return self._get_response('CreateEnvironment', params)
+
+ def create_storage_location(self):
+ """
+ Creates the Amazon S3 storage location for the account. This
+ location is used to store user log files.
+
+ :raises: TooManyBucketsException,
+ S3SubscriptionRequiredException,
+ InsufficientPrivilegesException
+
+ """
+ return self._get_response('CreateStorageLocation', params={})
+
+ def delete_application(self, application_name,
+ terminate_env_by_force=None):
+ """
+ Deletes the specified application along with all associated
+ versions and configurations. The application versions will not
+ be deleted from your Amazon S3 bucket.
+
+ :type application_name: string
+ :param application_name: The name of the application to delete.
+
+ :type terminate_env_by_force: boolean
+ :param terminate_env_by_force: When set to true, running
+ environments will be terminated before deleting the application.
+
+ :raises: OperationInProgressException
+
+ """
+ params = {'ApplicationName': application_name}
+ if terminate_env_by_force:
+ params['TerminateEnvByForce'] = self._encode_bool(
+ terminate_env_by_force)
+ return self._get_response('DeleteApplication', params)
+
+ def delete_application_version(self, application_name, version_label,
+ delete_source_bundle=None):
+ """Deletes the specified version from the specified application.
+
+ :type application_name: string
+ :param application_name: The name of the application to delete
+ releases from.
+
+ :type version_label: string
+ :param version_label: The label of the version to delete.
+
+ :type delete_source_bundle: boolean
+ :param delete_source_bundle: Indicates whether to delete the
+ associated source bundle from Amazon S3. Valid Values: true | false
+
+ :raises: SourceBundleDeletionException,
+ InsufficientPrivilegesException,
+ OperationInProgressException,
+ S3LocationNotInServiceRegionException
+ """
+ params = {'ApplicationName': application_name,
+ 'VersionLabel': version_label}
+ if delete_source_bundle:
+ params['DeleteSourceBundle'] = self._encode_bool(
+ delete_source_bundle)
+ return self._get_response('DeleteApplicationVersion', params)
+
+ def delete_configuration_template(self, application_name, template_name):
+ """Deletes the specified configuration template.
+
+ :type application_name: string
+ :param application_name: The name of the application to delete
+ the configuration template from.
+
+ :type template_name: string
+ :param template_name: The name of the configuration template to
+ delete.
+
+ :raises: OperationInProgressException
+
+ """
+ params = {'ApplicationName': application_name,
+ 'TemplateName': template_name}
+ return self._get_response('DeleteConfigurationTemplate', params)
+
+ def delete_environment_configuration(self, application_name,
+ environment_name):
+ """
+ Deletes the draft configuration associated with the running
+ environment. Updating a running environment with any
+ configuration changes creates a draft configuration set. You can
+ get the draft configuration using DescribeConfigurationSettings
+ while the update is in progress or if the update fails. The
+ DeploymentStatus for the draft configuration indicates whether
+ the deployment is in process or has failed. The draft
+ configuration remains in existence until it is deleted with this
+ action.
+
+ :type application_name: string
+ :param application_name: The name of the application the
+ environment is associated with.
+
+ :type environment_name: string
+ :param environment_name: The name of the environment to delete
+ the draft configuration from.
+
+ """
+ params = {'ApplicationName': application_name,
+ 'EnvironmentName': environment_name}
+ return self._get_response('DeleteEnvironmentConfiguration', params)
+
+ def describe_application_versions(self, application_name=None,
+ version_labels=None):
+ """Returns descriptions for existing application versions.
+
+ :type application_name: string
+ :param application_name: If specified, AWS Elastic Beanstalk
+ restricts the returned descriptions to only include ones that
+ are associated with the specified application.
+
+ :type version_labels: list
+ :param version_labels: If specified, restricts the returned
+ descriptions to only include ones that have the specified
+ version labels.
+
+ """
+ params = {}
+ if application_name:
+ params['ApplicationName'] = application_name
+ if version_labels:
+ self.build_list_params(params, version_labels,
+ 'VersionLabels.member')
+ return self._get_response('DescribeApplicationVersions', params)
+
+ def describe_applications(self, application_names=None):
+ """Returns the descriptions of existing applications.
+
+ :type application_names: list
+ :param application_names: If specified, AWS Elastic Beanstalk
+ restricts the returned descriptions to only include those with
+ the specified names.
+
+ """
+ params = {}
+ if application_names:
+ self.build_list_params(params, application_names,
+ 'ApplicationNames.member')
+ return self._get_response('DescribeApplications', params)
+
+ def describe_configuration_options(self, application_name=None,
+ template_name=None,
+ environment_name=None,
+ solution_stack_name=None, options=None):
+ """Describes configuration options used in a template or environment.
+
+ Describes the configuration options that are used in a
+ particular configuration template or environment, or that a
+ specified solution stack defines. The description includes the
+ values the options, their default values, and an indication of
+ the required action on a running environment if an option value
+ is changed.
+
+ :type application_name: string
+ :param application_name: The name of the application associated
+ with the configuration template or environment. Only needed if
+ you want to describe the configuration options associated with
+ either the configuration template or environment.
+
+ :type template_name: string
+ :param template_name: The name of the configuration template
+ whose configuration options you want to describe.
+
+ :type environment_name: string
+ :param environment_name: The name of the environment whose
+ configuration options you want to describe.
+
+ :type solution_stack_name: string
+ :param solution_stack_name: The name of the solution stack whose
+ configuration options you want to describe.
+
+ :type options: list
+ :param options: If specified, restricts the descriptions to only
+ the specified options.
+ """
+ params = {}
+ if application_name:
+ params['ApplicationName'] = application_name
+ if template_name:
+ params['TemplateName'] = template_name
+ if environment_name:
+ params['EnvironmentName'] = environment_name
+ if solution_stack_name:
+ params['SolutionStackName'] = solution_stack_name
+ if options:
+ self.build_list_params(params, options, 'Options.member')
+ return self._get_response('DescribeConfigurationOptions', params)
+
+ def describe_configuration_settings(self, application_name,
+ template_name=None,
+ environment_name=None):
+ """
+ Returns a description of the settings for the specified
+ configuration set, that is, either a configuration template or
+ the configuration set associated with a running environment.
+ When describing the settings for the configuration set
+ associated with a running environment, it is possible to receive
+ two sets of setting descriptions. One is the deployed
+ configuration set, and the other is a draft configuration of an
+ environment that is either in the process of deployment or that
+ failed to deploy.
+
+ :type application_name: string
+ :param application_name: The application for the environment or
+ configuration template.
+
+ :type template_name: string
+ :param template_name: The name of the configuration template to
+ describe. Conditional: You must specify either this parameter
+ or an EnvironmentName, but not both. If you specify both, AWS
+ Elastic Beanstalk returns an InvalidParameterCombination error.
+ If you do not specify either, AWS Elastic Beanstalk returns a
+ MissingRequiredParameter error.
+
+ :type environment_name: string
+ :param environment_name: The name of the environment to
+ describe. Condition: You must specify either this or a
+ TemplateName, but not both. If you specify both, AWS Elastic
+ Beanstalk returns an InvalidParameterCombination error. If you
+ do not specify either, AWS Elastic Beanstalk returns
+ MissingRequiredParameter error.
+ """
+ params = {'ApplicationName': application_name}
+ if template_name:
+ params['TemplateName'] = template_name
+ if environment_name:
+ params['EnvironmentName'] = environment_name
+ return self._get_response('DescribeConfigurationSettings', params)
+
+ def describe_environment_resources(self, environment_id=None,
+ environment_name=None):
+ """Returns AWS resources for this environment.
+
+ :type environment_id: string
+ :param environment_id: The ID of the environment to retrieve AWS
+ resource usage data. Condition: You must specify either this or
+ an EnvironmentName, or both. If you do not specify either, AWS
+ Elastic Beanstalk returns MissingRequiredParameter error.
+
+ :type environment_name: string
+ :param environment_name: The name of the environment to retrieve
+ AWS resource usage data. Condition: You must specify either
+ this or an EnvironmentId, or both. If you do not specify either,
+ AWS Elastic Beanstalk returns MissingRequiredParameter error.
+
+ :raises: InsufficientPrivilegesException
+ """
+ params = {}
+ if environment_id:
+ params['EnvironmentId'] = environment_id
+ if environment_name:
+ params['EnvironmentName'] = environment_name
+ return self._get_response('DescribeEnvironmentResources', params)
+
+ def describe_environments(self, application_name=None, version_label=None,
+ environment_ids=None, environment_names=None,
+ include_deleted=None,
+ included_deleted_back_to=None):
+ """Returns descriptions for existing environments.
+
+ :type application_name: string
+ :param application_name: If specified, AWS Elastic Beanstalk
+ restricts the returned descriptions to include only those that
+ are associated with this application.
+
+ :type version_label: string
+ :param version_label: If specified, AWS Elastic Beanstalk
+ restricts the returned descriptions to include only those that
+ are associated with this application version.
+
+ :type environment_ids: list
+ :param environment_ids: If specified, AWS Elastic Beanstalk
+ restricts the returned descriptions to include only those that
+ have the specified IDs.
+
+ :type environment_names: list
+ :param environment_names: If specified, AWS Elastic Beanstalk
+ restricts the returned descriptions to include only those that
+ have the specified names.
+
+ :type include_deleted: boolean
+ :param include_deleted: Indicates whether to include deleted
+ environments: true: Environments that have been deleted after
+ IncludedDeletedBackTo are displayed. false: Do not include
+ deleted environments.
+
+ :type included_deleted_back_to: timestamp
+ :param included_deleted_back_to: If specified when
+ IncludeDeleted is set to true, then environments deleted after
+ this date are displayed.
+ """
+ params = {}
+ if application_name:
+ params['ApplicationName'] = application_name
+ if version_label:
+ params['VersionLabel'] = version_label
+ if environment_ids:
+ self.build_list_params(params, environment_ids,
+ 'EnvironmentIds.member')
+ if environment_names:
+ self.build_list_params(params, environment_names,
+ 'EnvironmentNames.member')
+ if include_deleted:
+ params['IncludeDeleted'] = self._encode_bool(include_deleted)
+ if included_deleted_back_to:
+ params['IncludedDeletedBackTo'] = included_deleted_back_to
+ return self._get_response('DescribeEnvironments', params)
+
+ def describe_events(self, application_name=None, version_label=None,
+ template_name=None, environment_id=None,
+ environment_name=None, request_id=None, severity=None,
+ start_time=None, end_time=None, max_records=None,
+ next_token=None):
+ """Returns event descriptions matching criteria up to the last 6 weeks.
+
+ :type application_name: string
+ :param application_name: If specified, AWS Elastic Beanstalk
+ restricts the returned descriptions to include only those
+ associated with this application.
+
+ :type version_label: string
+ :param version_label: If specified, AWS Elastic Beanstalk
+ restricts the returned descriptions to those associated with
+ this application version.
+
+ :type template_name: string
+ :param template_name: If specified, AWS Elastic Beanstalk
+ restricts the returned descriptions to those that are associated
+ with this environment configuration.
+
+ :type environment_id: string
+ :param environment_id: If specified, AWS Elastic Beanstalk
+ restricts the returned descriptions to those associated with
+ this environment.
+
+ :type environment_name: string
+ :param environment_name: If specified, AWS Elastic Beanstalk
+ restricts the returned descriptions to those associated with
+ this environment.
+
+ :type request_id: string
+ :param request_id: If specified, AWS Elastic Beanstalk restricts
+ the described events to include only those associated with this
+ request ID.
+
+ :type severity: string
+ :param severity: If specified, limits the events returned from
+ this call to include only those with the specified severity or
+ higher.
+
+ :type start_time: timestamp
+ :param start_time: If specified, AWS Elastic Beanstalk restricts
+ the returned descriptions to those that occur on or after this
+ time.
+
+ :type end_time: timestamp
+ :param end_time: If specified, AWS Elastic Beanstalk restricts
+ the returned descriptions to those that occur up to, but not
+ including, the EndTime.
+
+ :type max_records: integer
+ :param max_records: Specifies the maximum number of events that
+ can be returned, beginning with the most recent event.
+
+ :type next_token: string
+ :param next_token: Pagination token. If specified, the events
+ return the next batch of results.
+ """
+ params = {}
+ if application_name:
+ params['ApplicationName'] = application_name
+ if version_label:
+ params['VersionLabel'] = version_label
+ if template_name:
+ params['TemplateName'] = template_name
+ if environment_id:
+ params['EnvironmentId'] = environment_id
+ if environment_name:
+ params['EnvironmentName'] = environment_name
+ if request_id:
+ params['RequestId'] = request_id
+ if severity:
+ params['Severity'] = severity
+ if start_time:
+ params['StartTime'] = start_time
+ if end_time:
+ params['EndTime'] = end_time
+ if max_records:
+ params['MaxRecords'] = max_records
+ if next_token:
+ params['NextToken'] = next_token
+ return self._get_response('DescribeEvents', params)
+
+ def list_available_solution_stacks(self):
+ """Returns a list of the available solution stack names."""
+ return self._get_response('ListAvailableSolutionStacks', params={})
+
+ def rebuild_environment(self, environment_id=None, environment_name=None):
+ """
+ Deletes and recreates all of the AWS resources (for example:
+ the Auto Scaling group, load balancer, etc.) for a specified
+ environment and forces a restart.
+
+ :type environment_id: string
+ :param environment_id: The ID of the environment to rebuild.
+ Condition: You must specify either this or an EnvironmentName,
+ or both. If you do not specify either, AWS Elastic Beanstalk
+ returns MissingRequiredParameter error.
+
+ :type environment_name: string
+ :param environment_name: The name of the environment to rebuild.
+ Condition: You must specify either this or an EnvironmentId, or
+ both. If you do not specify either, AWS Elastic Beanstalk
+ returns MissingRequiredParameter error.
+
+ :raises: InsufficientPrivilegesException
+ """
+ params = {}
+ if environment_id:
+ params['EnvironmentId'] = environment_id
+ if environment_name:
+ params['EnvironmentName'] = environment_name
+ return self._get_response('RebuildEnvironment', params)
+
+ def request_environment_info(self, info_type='tail', environment_id=None,
+ environment_name=None):
+ """
+ Initiates a request to compile the specified type of
+ information of the deployed environment. Setting the InfoType
+ to tail compiles the last lines from the application server log
+ files of every Amazon EC2 instance in your environment. Use
+ RetrieveEnvironmentInfo to access the compiled information.
+
+ :type info_type: string
+ :param info_type: The type of information to request.
+
+ :type environment_id: string
+ :param environment_id: The ID of the environment of the
+ requested data. If no such environment is found,
+ RequestEnvironmentInfo returns an InvalidParameterValue error.
+ Condition: You must specify either this or an EnvironmentName,
+ or both. If you do not specify either, AWS Elastic Beanstalk
+ returns MissingRequiredParameter error.
+
+ :type environment_name: string
+ :param environment_name: The name of the environment of the
+ requested data. If no such environment is found,
+ RequestEnvironmentInfo returns an InvalidParameterValue error.
+ Condition: You must specify either this or an EnvironmentId, or
+ both. If you do not specify either, AWS Elastic Beanstalk
+ returns MissingRequiredParameter error.
+ """
+ params = {'InfoType': info_type}
+ if environment_id:
+ params['EnvironmentId'] = environment_id
+ if environment_name:
+ params['EnvironmentName'] = environment_name
+ return self._get_response('RequestEnvironmentInfo', params)
+
+ def restart_app_server(self, environment_id=None, environment_name=None):
+ """
+ Causes the environment to restart the application container
+ server running on each Amazon EC2 instance.
+
+ :type environment_id: string
+ :param environment_id: The ID of the environment to restart the
+ server for. Condition: You must specify either this or an
+ EnvironmentName, or both. If you do not specify either, AWS
+ Elastic Beanstalk returns MissingRequiredParameter error.
+
+ :type environment_name: string
+ :param environment_name: The name of the environment to restart
+ the server for. Condition: You must specify either this or an
+ EnvironmentId, or both. If you do not specify either, AWS
+ Elastic Beanstalk returns MissingRequiredParameter error.
+ """
+ params = {}
+ if environment_id:
+ params['EnvironmentId'] = environment_id
+ if environment_name:
+ params['EnvironmentName'] = environment_name
+ return self._get_response('RestartAppServer', params)
+
+ def retrieve_environment_info(self, info_type='tail', environment_id=None,
+ environment_name=None):
+ """
+ Retrieves the compiled information from a RequestEnvironmentInfo
+ request.
+
+ :type info_type: string
+ :param info_type: The type of information to retrieve.
+
+ :type environment_id: string
+ :param environment_id: The ID of the data's environment. If no
+ such environment is found, returns an InvalidParameterValue
+ error. Condition: You must specify either this or an
+ EnvironmentName, or both. If you do not specify either, AWS
+ Elastic Beanstalk returns MissingRequiredParameter error.
+
+ :type environment_name: string
+ :param environment_name: The name of the data's environment. If
+ no such environment is found, returns an InvalidParameterValue
+ error. Condition: You must specify either this or an
+ EnvironmentId, or both. If you do not specify either, AWS
+ Elastic Beanstalk returns MissingRequiredParameter error.
+ """
+ params = {'InfoType': info_type}
+ if environment_id:
+ params['EnvironmentId'] = environment_id
+ if environment_name:
+ params['EnvironmentName'] = environment_name
+ return self._get_response('RetrieveEnvironmentInfo', params)
+
+ def swap_environment_cnames(self, source_environment_id=None,
+ source_environment_name=None,
+ destination_environment_id=None,
+ destination_environment_name=None):
+ """Swaps the CNAMEs of two environments.
+
+ :type source_environment_id: string
+ :param source_environment_id: The ID of the source environment.
+ Condition: You must specify at least the SourceEnvironmentID or
+ the SourceEnvironmentName. You may also specify both. If you
+ specify the SourceEnvironmentId, you must specify the
+ DestinationEnvironmentId.
+
+ :type source_environment_name: string
+ :param source_environment_name: The name of the source
+ environment. Condition: You must specify at least the
+ SourceEnvironmentID or the SourceEnvironmentName. You may also
+ specify both. If you specify the SourceEnvironmentName, you must
+ specify the DestinationEnvironmentName.
+
+ :type destination_environment_id: string
+ :param destination_environment_id: The ID of the destination
+ environment. Condition: You must specify at least the
+ DestinationEnvironmentID or the DestinationEnvironmentName. You
+ may also specify both. You must specify the SourceEnvironmentId
+ with the DestinationEnvironmentId.
+
+ :type destination_environment_name: string
+ :param destination_environment_name: The name of the destination
+ environment. Condition: You must specify at least the
+ DestinationEnvironmentID or the DestinationEnvironmentName. You
+ may also specify both. You must specify the
+ SourceEnvironmentName with the DestinationEnvironmentName.
+ """
+ params = {}
+ if source_environment_id:
+ params['SourceEnvironmentId'] = source_environment_id
+ if source_environment_name:
+ params['SourceEnvironmentName'] = source_environment_name
+ if destination_environment_id:
+ params['DestinationEnvironmentId'] = destination_environment_id
+ if destination_environment_name:
+ params['DestinationEnvironmentName'] = destination_environment_name
+ return self._get_response('SwapEnvironmentCNAMEs', params)
+
+ def terminate_environment(self, environment_id=None, environment_name=None,
+ terminate_resources=None):
+ """Terminates the specified environment.
+
+ :type environment_id: string
+ :param environment_id: The ID of the environment to terminate.
+ Condition: You must specify either this or an EnvironmentName,
+ or both. If you do not specify either, AWS Elastic Beanstalk
+ returns MissingRequiredParameter error.
+
+ :type environment_name: string
+ :param environment_name: The name of the environment to
+ terminate. Condition: You must specify either this or an
+ EnvironmentId, or both. If you do not specify either, AWS
+ Elastic Beanstalk returns MissingRequiredParameter error.
+
+ :type terminate_resources: boolean
+ :param terminate_resources: Indicates whether the associated AWS
+ resources should shut down when the environment is terminated:
+ true: (default) The user AWS resources (for example, the Auto
+ Scaling group, LoadBalancer, etc.) are terminated along with the
+ environment. false: The environment is removed from the AWS
+ Elastic Beanstalk but the AWS resources continue to operate.
+ For more information, see the AWS Elastic Beanstalk User Guide.
+ Default: true Valid Values: true | false
+
+ :raises: InsufficientPrivilegesException
+ """
+ params = {}
+ if environment_id:
+ params['EnvironmentId'] = environment_id
+ if environment_name:
+ params['EnvironmentName'] = environment_name
+ if terminate_resources:
+ params['TerminateResources'] = self._encode_bool(
+ terminate_resources)
+ return self._get_response('TerminateEnvironment', params)
+
+ def update_application(self, application_name, description=None):
+ """
+ Updates the specified application to have the specified
+ properties.
+
+ :type application_name: string
+ :param application_name: The name of the application to update.
+ If no such application is found, UpdateApplication returns an
+ InvalidParameterValue error.
+
+ :type description: string
+ :param description: A new description for the application.
+ Default: If not specified, AWS Elastic Beanstalk does not update
+ the description.
+ """
+ params = {'ApplicationName': application_name}
+ if description:
+ params['Description'] = description
+ return self._get_response('UpdateApplication', params)
+
+ def update_application_version(self, application_name, version_label,
+ description=None):
+ """Updates the application version to have the properties.
+
+ :type application_name: string
+ :param application_name: The name of the application associated
+ with this version. If no application is found with this name,
+ UpdateApplication returns an InvalidParameterValue error.
+
+ :type version_label: string
+ :param version_label: The name of the version to update. If no
+ application version is found with this label, UpdateApplication
+ returns an InvalidParameterValue error.
+
+ :type description: string
+ :param description: A new description for this release.
+ """
+ params = {'ApplicationName': application_name,
+ 'VersionLabel': version_label}
+ if description:
+ params['Description'] = description
+ return self._get_response('UpdateApplicationVersion', params)
+
+ def update_configuration_template(self, application_name, template_name,
+ description=None, option_settings=None,
+ options_to_remove=None):
+ """
+ Updates the specified configuration template to have the
+ specified properties or configuration option values.
+
+ :type application_name: string
+ :param application_name: The name of the application associated
+ with the configuration template to update. If no application is
+ found with this name, UpdateConfigurationTemplate returns an
+ InvalidParameterValue error.
+
+ :type template_name: string
+ :param template_name: The name of the configuration template to
+ update. If no configuration template is found with this name,
+ UpdateConfigurationTemplate returns an InvalidParameterValue
+ error.
+
+ :type description: string
+ :param description: A new description for the configuration.
+
+ :type option_settings: list
+ :param option_settings: A list of configuration option settings
+ to update with the new specified option value.
+
+ :type options_to_remove: list
+ :param options_to_remove: A list of configuration options to
+ remove from the configuration set. Constraint: You can remove
+ only UserDefined configuration options.
+
+ :raises: InsufficientPrivilegesException
+ """
+ params = {'ApplicationName': application_name,
+ 'TemplateName': template_name}
+ if description:
+ params['Description'] = description
+ if option_settings:
+ self._build_list_params(params, option_settings,
+ 'OptionSettings.member',
+ ('Namespace', 'OptionName', 'Value'))
+ if options_to_remove:
+ self.build_list_params(params, options_to_remove,
+ 'OptionsToRemove.member')
+ return self._get_response('UpdateConfigurationTemplate', params)
+
+ def update_environment(self, environment_id=None, environment_name=None,
+ version_label=None, template_name=None,
+ description=None, option_settings=None,
+ options_to_remove=None):
+ """
+ Updates the environment description, deploys a new application
+ version, updates the configuration settings to an entirely new
+ configuration template, or updates select configuration option
+ values in the running environment. Attempting to update both
+ the release and configuration is not allowed and AWS Elastic
+ Beanstalk returns an InvalidParameterCombination error. When
+ updating the configuration settings to a new template or
+ individual settings, a draft configuration is created and
+ DescribeConfigurationSettings for this environment returns two
+ setting descriptions with different DeploymentStatus values.
+
+ :type environment_id: string
+ :param environment_id: The ID of the environment to update. If
+ no environment with this ID exists, AWS Elastic Beanstalk
+ returns an InvalidParameterValue error. Condition: You must
+ specify either this or an EnvironmentName, or both. If you do
+ not specify either, AWS Elastic Beanstalk returns
+ MissingRequiredParameter error.
+
+ :type environment_name: string
+ :param environment_name: The name of the environment to update.
+ If no environment with this name exists, AWS Elastic Beanstalk
+ returns an InvalidParameterValue error. Condition: You must
+ specify either this or an EnvironmentId, or both. If you do not
+ specify either, AWS Elastic Beanstalk returns
+ MissingRequiredParameter error.
+
+ :type version_label: string
+ :param version_label: If this parameter is specified, AWS
+ Elastic Beanstalk deploys the named application version to the
+ environment. If no such application version is found, returns an
+ InvalidParameterValue error.
+
+ :type template_name: string
+ :param template_name: If this parameter is specified, AWS
+ Elastic Beanstalk deploys this configuration template to the
+ environment. If no such configuration template is found, AWS
+ Elastic Beanstalk returns an InvalidParameterValue error.
+
+ :type description: string
+ :param description: If this parameter is specified, AWS Elastic
+ Beanstalk updates the description of this environment.
+
+ :type option_settings: list
+ :param option_settings: If specified, AWS Elastic Beanstalk
+ updates the configuration set associated with the running
+ environment and sets the specified configuration options to the
+ requested value.
+
+ :type options_to_remove: list
+ :param options_to_remove: A list of custom user-defined
+ configuration options to remove from the configuration set for
+ this environment.
+
+ :raises: InsufficientPrivilegesException
+ """
+ params = {}
+ if environment_id:
+ params['EnvironmentId'] = environment_id
+ if environment_name:
+ params['EnvironmentName'] = environment_name
+ if version_label:
+ params['VersionLabel'] = version_label
+ if template_name:
+ params['TemplateName'] = template_name
+ if description:
+ params['Description'] = description
+ if option_settings:
+ self._build_list_params(params, option_settings,
+ 'OptionSettings.member',
+ ('Namespace', 'OptionName', 'Value'))
+ if options_to_remove:
+ self.build_list_params(params, options_to_remove,
+ 'OptionsToRemove.member')
+ return self._get_response('UpdateEnvironment', params)
+
+ def validate_configuration_settings(self, application_name,
+ option_settings, template_name=None,
+ environment_name=None):
+ """
+ Takes a set of configuration settings and either a
+ configuration template or environment, and determines whether
+ those values are valid. This action returns a list of messages
+ indicating any errors or warnings associated with the selection
+ of option values.
+
+ :type application_name: string
+ :param application_name: The name of the application that the
+ configuration template or environment belongs to.
+
+ :type template_name: string
+ :param template_name: The name of the configuration template to
+ validate the settings against. Condition: You cannot specify
+ both this and an environment name.
+
+ :type environment_name: string
+ :param environment_name: The name of the environment to validate
+ the settings against. Condition: You cannot specify both this
+ and a configuration template name.
+
+ :type option_settings: list
+ :param option_settings: A list of the options and desired values
+ to evaluate.
+
+ :raises: InsufficientPrivilegesException
+ """
+ params = {'ApplicationName': application_name}
+ self._build_list_params(params, option_settings,
+ 'OptionSettings.member',
+ ('Namespace', 'OptionName', 'Value'))
+ if template_name:
+ params['TemplateName'] = template_name
+ if environment_name:
+ params['EnvironmentName'] = environment_name
+ return self._get_response('ValidateConfigurationSettings', params)
+
+ def _build_list_params(self, params, user_values, prefix, tuple_names):
+ # For params such as the ConfigurationOptionSettings,
+ # they can specify a list of tuples where each tuple maps to a specific
+ # arg. For example:
+ # user_values = [('foo', 'bar', 'baz']
+ # prefix=MyOption.member
+ # tuple_names=('One', 'Two', 'Three')
+ # would result in:
+ # MyOption.member.1.One = foo
+ # MyOption.member.1.Two = bar
+ # MyOption.member.1.Three = baz
+ for i, user_value in enumerate(user_values, 1):
+ current_prefix = '%s.%s' % (prefix, i)
+ for key, value in zip(tuple_names, user_value):
+ full_key = '%s.%s' % (current_prefix, key)
+ params[full_key] = value
diff --git a/boto/beanstalk/response.py b/boto/beanstalk/response.py
new file mode 100644
index 0000000..22bc102
--- /dev/null
+++ b/boto/beanstalk/response.py
@@ -0,0 +1,703 @@
+"""Classify responses from layer1 and strict type values."""
+from datetime import datetime
+
+
+class BaseObject(object):
+
+ def __repr__(self):
+ result = self.__class__.__name__ + '{ '
+ counter = 0
+ for key, value in self.__dict__.iteritems():
+ # first iteration no comma
+ counter += 1
+ if counter > 1:
+ result += ', '
+ result += key + ': '
+ result += self._repr_by_type(value)
+ result += ' }'
+ return result
+
+ def _repr_by_type(self, value):
+ # Everything is either a 'Response', 'list', or 'None/str/int/bool'.
+ result = ''
+ if isinstance(value, Response):
+ result += value.__repr__()
+ elif isinstance(value, list):
+ result += self._repr_list(value)
+ else:
+ result += str(value)
+ return result
+
+ def _repr_list(self, array):
+ result = '['
+ for value in array:
+ result += ' ' + self._repr_by_type(value) + ','
+ # Check for trailing comma with a space.
+ if len(result) > 1:
+ result = result[:-1] + ' '
+ result += ']'
+ return result
+
+
+class Response(BaseObject):
+ def __init__(self, response):
+ super(Response, self).__init__()
+
+ if response['ResponseMetadata']:
+ self.response_metadata = ResponseMetadata(response['ResponseMetadata'])
+ else:
+ self.response_metadata = None
+
+
+class ResponseMetadata(BaseObject):
+ def __init__(self, response):
+ super(ResponseMetadata, self).__init__()
+
+ self.request_id = str(response['RequestId'])
+
+
+class ApplicationDescription(BaseObject):
+ def __init__(self, response):
+ super(ApplicationDescription, self).__init__()
+
+ self.application_name = str(response['ApplicationName'])
+ self.configuration_templates = []
+ if response['ConfigurationTemplates']:
+ for member in response['ConfigurationTemplates']:
+ configuration_template = str(member)
+ self.configuration_templates.append(configuration_template)
+ self.date_created = datetime.fromtimestamp(response['DateCreated'])
+ self.date_updated = datetime.fromtimestamp(response['DateUpdated'])
+ self.description = str(response['Description'])
+ self.versions = []
+ if response['Versions']:
+ for member in response['Versions']:
+ version = str(member)
+ self.versions.append(version)
+
+
+class ApplicationVersionDescription(BaseObject):
+ def __init__(self, response):
+ super(ApplicationVersionDescription, self).__init__()
+
+ self.application_name = str(response['ApplicationName'])
+ self.date_created = datetime.fromtimestamp(response['DateCreated'])
+ self.date_updated = datetime.fromtimestamp(response['DateUpdated'])
+ self.description = str(response['Description'])
+ if response['SourceBundle']:
+ self.source_bundle = S3Location(response['SourceBundle'])
+ else:
+ self.source_bundle = None
+ self.version_label = str(response['VersionLabel'])
+
+
+class AutoScalingGroup(BaseObject):
+ def __init__(self, response):
+ super(AutoScalingGroup, self).__init__()
+
+ self.name = str(response['Name'])
+
+
+class ConfigurationOptionDescription(BaseObject):
+ def __init__(self, response):
+ super(ConfigurationOptionDescription, self).__init__()
+
+ self.change_severity = str(response['ChangeSeverity'])
+ self.default_value = str(response['DefaultValue'])
+ self.max_length = int(response['MaxLength']) if response['MaxLength'] else None
+ self.max_value = int(response['MaxValue']) if response['MaxValue'] else None
+ self.min_value = int(response['MinValue']) if response['MinValue'] else None
+ self.name = str(response['Name'])
+ self.namespace = str(response['Namespace'])
+ if response['Regex']:
+ self.regex = OptionRestrictionRegex(response['Regex'])
+ else:
+ self.regex = None
+ self.user_defined = str(response['UserDefined'])
+ self.value_options = []
+ if response['ValueOptions']:
+ for member in response['ValueOptions']:
+ value_option = str(member)
+ self.value_options.append(value_option)
+ self.value_type = str(response['ValueType'])
+
+
+class ConfigurationOptionSetting(BaseObject):
+ def __init__(self, response):
+ super(ConfigurationOptionSetting, self).__init__()
+
+ self.namespace = str(response['Namespace'])
+ self.option_name = str(response['OptionName'])
+ self.value = str(response['Value'])
+
+
+class ConfigurationSettingsDescription(BaseObject):
+ def __init__(self, response):
+ super(ConfigurationSettingsDescription, self).__init__()
+
+ self.application_name = str(response['ApplicationName'])
+ self.date_created = datetime.fromtimestamp(response['DateCreated'])
+ self.date_updated = datetime.fromtimestamp(response['DateUpdated'])
+ self.deployment_status = str(response['DeploymentStatus'])
+ self.description = str(response['Description'])
+ self.environment_name = str(response['EnvironmentName'])
+ self.option_settings = []
+ if response['OptionSettings']:
+ for member in response['OptionSettings']:
+ option_setting = ConfigurationOptionSetting(member)
+ self.option_settings.append(option_setting)
+ self.solution_stack_name = str(response['SolutionStackName'])
+ self.template_name = str(response['TemplateName'])
+
+
+class EnvironmentDescription(BaseObject):
+ def __init__(self, response):
+ super(EnvironmentDescription, self).__init__()
+
+ self.application_name = str(response['ApplicationName'])
+ self.cname = str(response['CNAME'])
+ self.date_created = datetime.fromtimestamp(response['DateCreated'])
+ self.date_updated = datetime.fromtimestamp(response['DateUpdated'])
+ self.description = str(response['Description'])
+ self.endpoint_url = str(response['EndpointURL'])
+ self.environment_id = str(response['EnvironmentId'])
+ self.environment_name = str(response['EnvironmentName'])
+ self.health = str(response['Health'])
+ if response['Resources']:
+ self.resources = EnvironmentResourcesDescription(response['Resources'])
+ else:
+ self.resources = None
+ self.solution_stack_name = str(response['SolutionStackName'])
+ self.status = str(response['Status'])
+ self.template_name = str(response['TemplateName'])
+ self.version_label = str(response['VersionLabel'])
+
+
+class EnvironmentInfoDescription(BaseObject):
+ def __init__(self, response):
+ EnvironmentInfoDescription(Response, self).__init__()
+
+ self.ec2_instance_id = str(response['Ec2InstanceId'])
+ self.info_type = str(response['InfoType'])
+ self.message = str(response['Message'])
+ self.sample_timestamp = datetime.fromtimestamp(response['SampleTimestamp'])
+
+
+class EnvironmentResourceDescription(BaseObject):
+ def __init__(self, response):
+ super(EnvironmentResourceDescription, self).__init__()
+
+ self.auto_scaling_groups = []
+ if response['AutoScalingGroups']:
+ for member in response['AutoScalingGroups']:
+ auto_scaling_group = AutoScalingGroup(member)
+ self.auto_scaling_groups.append(auto_scaling_group)
+ self.environment_name = str(response['EnvironmentName'])
+ self.instances = []
+ if response['Instances']:
+ for member in response['Instances']:
+ instance = Instance(member)
+ self.instances.append(instance)
+ self.launch_configurations = []
+ if response['LaunchConfigurations']:
+ for member in response['LaunchConfigurations']:
+ launch_configuration = LaunchConfiguration(member)
+ self.launch_configurations.append(launch_configuration)
+ self.load_balancers = []
+ if response['LoadBalancers']:
+ for member in response['LoadBalancers']:
+ load_balancer = LoadBalancer(member)
+ self.load_balancers.append(load_balancer)
+ self.triggers = []
+ if response['Triggers']:
+ for member in response['Triggers']:
+ trigger = Trigger(member)
+ self.triggers.append(trigger)
+
+
+class EnvironmentResourcesDescription(BaseObject):
+ def __init__(self, response):
+ super(EnvironmentResourcesDescription, self).__init__()
+
+ if response['LoadBalancer']:
+ self.load_balancer = LoadBalancerDescription(response['LoadBalancer'])
+ else:
+ self.load_balancer = None
+
+
+class EventDescription(BaseObject):
+ def __init__(self, response):
+ super(EventDescription, self).__init__()
+
+ self.application_name = str(response['ApplicationName'])
+ self.environment_name = str(response['EnvironmentName'])
+ self.event_date = datetime.fromtimestamp(response['EventDate'])
+ self.message = str(response['Message'])
+ self.request_id = str(response['RequestId'])
+ self.severity = str(response['Severity'])
+ self.template_name = str(response['TemplateName'])
+ self.version_label = str(response['VersionLabel'])
+
+
+class Instance(BaseObject):
+ def __init__(self, response):
+ super(Instance, self).__init__()
+
+ self.id = str(response['Id'])
+
+
+class LaunchConfiguration(BaseObject):
+ def __init__(self, response):
+ super(LaunchConfiguration, self).__init__()
+
+ self.name = str(response['Name'])
+
+
+class Listener(BaseObject):
+ def __init__(self, response):
+ super(Listener, self).__init__()
+
+ self.port = int(response['Port']) if response['Port'] else None
+ self.protocol = str(response['Protocol'])
+
+
+class LoadBalancer(BaseObject):
+ def __init__(self, response):
+ super(LoadBalancer, self).__init__()
+
+ self.name = str(response['Name'])
+
+
+class LoadBalancerDescription(BaseObject):
+ def __init__(self, response):
+ super(LoadBalancerDescription, self).__init__()
+
+ self.domain = str(response['Domain'])
+ self.listeners = []
+ if response['Listeners']:
+ for member in response['Listeners']:
+ listener = Listener(member)
+ self.listeners.append(listener)
+ self.load_balancer_name = str(response['LoadBalancerName'])
+
+
+class OptionRestrictionRegex(BaseObject):
+ def __init__(self, response):
+ super(OptionRestrictionRegex, self).__init__()
+
+ self.label = response['Label']
+ self.pattern = response['Pattern']
+
+
+class SolutionStackDescription(BaseObject):
+ def __init__(self, response):
+ super(SolutionStackDescription, self).__init__()
+
+ self.permitted_file_types = []
+ if response['PermittedFileTypes']:
+ for member in response['PermittedFileTypes']:
+ permitted_file_type = str(member)
+ self.permitted_file_types.append(permitted_file_type)
+ self.solution_stack_name = str(response['SolutionStackName'])
+
+
+class S3Location(BaseObject):
+ def __init__(self, response):
+ super(S3Location, self).__init__()
+
+ self.s3_bucket = str(response['S3Bucket'])
+ self.s3_key = str(response['S3Key'])
+
+
+class Trigger(BaseObject):
+ def __init__(self, response):
+ super(Trigger, self).__init__()
+
+ self.name = str(response['Name'])
+
+
+class ValidationMessage(BaseObject):
+ def __init__(self, response):
+ super(ValidationMessage, self).__init__()
+
+ self.message = str(response['Message'])
+ self.namespace = str(response['Namespace'])
+ self.option_name = str(response['OptionName'])
+ self.severity = str(response['Severity'])
+
+
+# These are the response objects layer2 uses, one for each layer1 api call.
+class CheckDNSAvailabilityResponse(Response):
+ def __init__(self, response):
+ response = response['CheckDNSAvailabilityResponse']
+ super(CheckDNSAvailabilityResponse, self).__init__(response)
+
+ response = response['CheckDNSAvailabilityResult']
+ self.fully_qualified_cname = str(response['FullyQualifiedCNAME'])
+ self.available = bool(response['Available'])
+
+
+# Our naming convension produces this class name but api names it with more
+# capitals.
+class CheckDnsAvailabilityResponse(CheckDNSAvailabilityResponse): pass
+
+
+class CreateApplicationResponse(Response):
+ def __init__(self, response):
+ response = response['CreateApplicationResponse']
+ super(CreateApplicationResponse, self).__init__(response)
+
+ response = response['CreateApplicationResult']
+ if response['Application']:
+ self.application = ApplicationDescription(response['Application'])
+ else:
+ self.application = None
+
+
+class CreateApplicationVersionResponse(Response):
+ def __init__(self, response):
+ response = response['CreateApplicationVersionResponse']
+ super(CreateApplicationVersionResponse, self).__init__(response)
+
+ response = response['CreateApplicationVersionResult']
+ if response['ApplicationVersion']:
+ self.application_version = ApplicationVersionDescription(response['ApplicationVersion'])
+ else:
+ self.application_version = None
+
+
+class CreateConfigurationTemplateResponse(Response):
+ def __init__(self, response):
+ response = response['CreateConfigurationTemplateResponse']
+ super(CreateConfigurationTemplateResponse, self).__init__(response)
+
+ response = response['CreateConfigurationTemplateResult']
+ self.application_name = str(response['ApplicationName'])
+ self.date_created = datetime.fromtimestamp(response['DateCreated'])
+ self.date_updated = datetime.fromtimestamp(response['DateUpdated'])
+ self.deployment_status = str(response['DeploymentStatus'])
+ self.description = str(response['Description'])
+ self.environment_name = str(response['EnvironmentName'])
+ self.option_settings = []
+ if response['OptionSettings']:
+ for member in response['OptionSettings']:
+ option_setting = ConfigurationOptionSetting(member)
+ self.option_settings.append(option_setting)
+ self.solution_stack_name = str(response['SolutionStackName'])
+ self.template_name = str(response['TemplateName'])
+
+
+class CreateEnvironmentResponse(Response):
+ def __init__(self, response):
+ response = response['CreateEnvironmentResponse']
+ super(CreateEnvironmentResponse, self).__init__(response)
+
+ response = response['CreateEnvironmentResult']
+ self.application_name = str(response['ApplicationName'])
+ self.cname = str(response['CNAME'])
+ self.date_created = datetime.fromtimestamp(response['DateCreated'])
+ self.date_updated = datetime.fromtimestamp(response['DateUpdated'])
+ self.description = str(response['Description'])
+ self.endpoint_url = str(response['EndpointURL'])
+ self.environment_id = str(response['EnvironmentId'])
+ self.environment_name = str(response['EnvironmentName'])
+ self.health = str(response['Health'])
+ if response['Resources']:
+ self.resources = EnvironmentResourcesDescription(response['Resources'])
+ else:
+ self.resources = None
+ self.solution_stack_name = str(response['SolutionStackName'])
+ self.status = str(response['Status'])
+ self.template_name = str(response['TemplateName'])
+ self.version_label = str(response['VersionLabel'])
+
+
+class CreateStorageLocationResponse(Response):
+ def __init__(self, response):
+ response = response['CreateStorageLocationResponse']
+ super(CreateStorageLocationResponse, self).__init__(response)
+
+ response = response['CreateStorageLocationResult']
+ self.s3_bucket = str(response['S3Bucket'])
+
+
+class DeleteApplicationResponse(Response):
+ def __init__(self, response):
+ response = response['DeleteApplicationResponse']
+ super(DeleteApplicationResponse, self).__init__(response)
+
+
+class DeleteApplicationVersionResponse(Response):
+ def __init__(self, response):
+ response = response['DeleteApplicationVersionResponse']
+ super(DeleteApplicationVersionResponse, self).__init__(response)
+
+
+class DeleteConfigurationTemplateResponse(Response):
+ def __init__(self, response):
+ response = response['DeleteConfigurationTemplateResponse']
+ super(DeleteConfigurationTemplateResponse, self).__init__(response)
+
+
+class DeleteEnvironmentConfigurationResponse(Response):
+ def __init__(self, response):
+ response = response['DeleteEnvironmentConfigurationResponse']
+ super(DeleteEnvironmentConfigurationResponse, self).__init__(response)
+
+
+class DescribeApplicationVersionsResponse(Response):
+ def __init__(self, response):
+ response = response['DescribeApplicationVersionsResponse']
+ super(DescribeApplicationVersionsResponse, self).__init__(response)
+
+ response = response['DescribeApplicationVersionsResult']
+ self.application_versions = []
+ if response['ApplicationVersions']:
+ for member in response['ApplicationVersions']:
+ application_version = ApplicationVersionDescription(member)
+ self.application_versions.append(application_version)
+
+
+class DescribeApplicationsResponse(Response):
+ def __init__(self, response):
+ response = response['DescribeApplicationsResponse']
+ super(DescribeApplicationsResponse, self).__init__(response)
+
+ response = response['DescribeApplicationsResult']
+ self.applications = []
+ if response['Applications']:
+ for member in response['Applications']:
+ application = ApplicationDescription(member)
+ self.applications.append(application)
+
+
+class DescribeConfigurationOptionsResponse(Response):
+ def __init__(self, response):
+ response = response['DescribeConfigurationOptionsResponse']
+ super(DescribeConfigurationOptionsResponse, self).__init__(response)
+
+ response = response['DescribeConfigurationOptionsResult']
+ self.options = []
+ if response['Options']:
+ for member in response['Options']:
+ option = ConfigurationOptionDescription(member)
+ self.options.append(option)
+ self.solution_stack_name = str(response['SolutionStackName'])
+
+
+class DescribeConfigurationSettingsResponse(Response):
+ def __init__(self, response):
+ response = response['DescribeConfigurationSettingsResponse']
+ super(DescribeConfigurationSettingsResponse, self).__init__(response)
+
+ response = response['DescribeConfigurationSettingsResult']
+ self.configuration_settings = []
+ if response['ConfigurationSettings']:
+ for member in response['ConfigurationSettings']:
+ configuration_setting = ConfigurationSettingsDescription(member)
+ self.configuration_settings.append(configuration_setting)
+
+
+class DescribeEnvironmentResourcesResponse(Response):
+ def __init__(self, response):
+ response = response['DescribeEnvironmentResourcesResponse']
+ super(DescribeEnvironmentResourcesResponse, self).__init__(response)
+
+ response = response['DescribeEnvironmentResourcesResult']
+ if response['EnvironmentResources']:
+ self.environment_resources = EnvironmentResourceDescription(response['EnvironmentResources'])
+ else:
+ self.environment_resources = None
+
+
+class DescribeEnvironmentsResponse(Response):
+ def __init__(self, response):
+ response = response['DescribeEnvironmentsResponse']
+ super(DescribeEnvironmentsResponse, self).__init__(response)
+
+ response = response['DescribeEnvironmentsResult']
+ self.environments = []
+ if response['Environments']:
+ for member in response['Environments']:
+ environment = EnvironmentDescription(member)
+ self.environments.append(environment)
+
+
+class DescribeEventsResponse(Response):
+ def __init__(self, response):
+ response = response['DescribeEventsResponse']
+ super(DescribeEventsResponse, self).__init__(response)
+
+ response = response['DescribeEventsResult']
+ self.events = []
+ if response['Events']:
+ for member in response['Events']:
+ event = EventDescription(member)
+ self.events.append(event)
+ self.next_tokent = str(response['NextToken'])
+
+
+class ListAvailableSolutionStacksResponse(Response):
+ def __init__(self, response):
+ response = response['ListAvailableSolutionStacksResponse']
+ super(ListAvailableSolutionStacksResponse, self).__init__(response)
+
+ response = response['ListAvailableSolutionStacksResult']
+ self.solution_stack_details = []
+ if response['SolutionStackDetails']:
+ for member in response['SolutionStackDetails']:
+ solution_stack_detail = SolutionStackDescription(member)
+ self.solution_stack_details.append(solution_stack_detail)
+ self.solution_stacks = []
+ if response['SolutionStacks']:
+ for member in response['SolutionStacks']:
+ solution_stack = str(member)
+ self.solution_stacks.append(solution_stack)
+
+
+class RebuildEnvironmentResponse(Response):
+ def __init__(self, response):
+ response = response['RebuildEnvironmentResponse']
+ super(RebuildEnvironmentResponse, self).__init__(response)
+
+
+class RequestEnvironmentInfoResponse(Response):
+ def __init__(self, response):
+ response = response['RequestEnvironmentInfoResponse']
+ super(RequestEnvironmentInfoResponse, self).__init__(response)
+
+
+class RestartAppServerResponse(Response):
+ def __init__(self, response):
+ response = response['RestartAppServerResponse']
+ super(RestartAppServerResponse, self).__init__(response)
+
+
+class RetrieveEnvironmentInfoResponse(Response):
+ def __init__(self, response):
+ response = response['RetrieveEnvironmentInfoResponse']
+ super(RetrieveEnvironmentInfoResponse, self).__init__(response)
+
+ response = response['RetrieveEnvironmentInfoResult']
+ self.environment_info = []
+ if response['EnvironmentInfo']:
+ for member in response['EnvironmentInfo']:
+ environment_info = EnvironmentInfoDescription(member)
+ self.environment_info.append(environment_info)
+
+
+class SwapEnvironmentCNAMEsResponse(Response):
+ def __init__(self, response):
+ response = response['SwapEnvironmentCNAMEsResponse']
+ super(SwapEnvironmentCNAMEsResponse, self).__init__(response)
+
+
+class SwapEnvironmentCnamesResponse(SwapEnvironmentCNAMEsResponse): pass
+
+
+class TerminateEnvironmentResponse(Response):
+ def __init__(self, response):
+ response = response['TerminateEnvironmentResponse']
+ super(TerminateEnvironmentResponse, self).__init__(response)
+
+ response = response['TerminateEnvironmentResult']
+ self.application_name = str(response['ApplicationName'])
+ self.cname = str(response['CNAME'])
+ self.date_created = datetime.fromtimestamp(response['DateCreated'])
+ self.date_updated = datetime.fromtimestamp(response['DateUpdated'])
+ self.description = str(response['Description'])
+ self.endpoint_url = str(response['EndpointURL'])
+ self.environment_id = str(response['EnvironmentId'])
+ self.environment_name = str(response['EnvironmentName'])
+ self.health = str(response['Health'])
+ if response['Resources']:
+ self.resources = EnvironmentResourcesDescription(response['Resources'])
+ else:
+ self.resources = None
+ self.solution_stack_name = str(response['SolutionStackName'])
+ self.status = str(response['Status'])
+ self.template_name = str(response['TemplateName'])
+ self.version_label = str(response['VersionLabel'])
+
+
+class UpdateApplicationResponse(Response):
+ def __init__(self, response):
+ response = response['UpdateApplicationResponse']
+ super(UpdateApplicationResponse, self).__init__(response)
+
+ response = response['UpdateApplicationResult']
+ if response['Application']:
+ self.application = ApplicationDescription(response['Application'])
+ else:
+ self.application = None
+
+
+class UpdateApplicationVersionResponse(Response):
+ def __init__(self, response):
+ response = response['UpdateApplicationVersionResponse']
+ super(UpdateApplicationVersionResponse, self).__init__(response)
+
+ response = response['UpdateApplicationVersionResult']
+ if response['ApplicationVersion']:
+ self.application_version = ApplicationVersionDescription(response['ApplicationVersion'])
+ else:
+ self.application_version = None
+
+
+class UpdateConfigurationTemplateResponse(Response):
+ def __init__(self, response):
+ response = response['UpdateConfigurationTemplateResponse']
+ super(UpdateConfigurationTemplateResponse, self).__init__(response)
+
+ response = response['UpdateConfigurationTemplateResult']
+ self.application_name = str(response['ApplicationName'])
+ self.date_created = datetime.fromtimestamp(response['DateCreated'])
+ self.date_updated = datetime.fromtimestamp(response['DateUpdated'])
+ self.deployment_status = str(response['DeploymentStatus'])
+ self.description = str(response['Description'])
+ self.environment_name = str(response['EnvironmentName'])
+ self.option_settings = []
+ if response['OptionSettings']:
+ for member in response['OptionSettings']:
+ option_setting = ConfigurationOptionSetting(member)
+ self.option_settings.append(option_setting)
+ self.solution_stack_name = str(response['SolutionStackName'])
+ self.template_name = str(response['TemplateName'])
+
+
+class UpdateEnvironmentResponse(Response):
+ def __init__(self, response):
+ response = response['UpdateEnvironmentResponse']
+ super(UpdateEnvironmentResponse, self).__init__(response)
+
+ response = response['UpdateEnvironmentResult']
+ self.application_name = str(response['ApplicationName'])
+ self.cname = str(response['CNAME'])
+ self.date_created = datetime.fromtimestamp(response['DateCreated'])
+ self.date_updated = datetime.fromtimestamp(response['DateUpdated'])
+ self.description = str(response['Description'])
+ self.endpoint_url = str(response['EndpointURL'])
+ self.environment_id = str(response['EnvironmentId'])
+ self.environment_name = str(response['EnvironmentName'])
+ self.health = str(response['Health'])
+ if response['Resources']:
+ self.resources = EnvironmentResourcesDescription(response['Resources'])
+ else:
+ self.resources = None
+ self.solution_stack_name = str(response['SolutionStackName'])
+ self.status = str(response['Status'])
+ self.template_name = str(response['TemplateName'])
+ self.version_label = str(response['VersionLabel'])
+
+
+class ValidateConfigurationSettingsResponse(Response):
+ def __init__(self, response):
+ response = response['ValidateConfigurationSettingsResponse']
+ super(ValidateConfigurationSettingsResponse, self).__init__(response)
+
+ response = response['ValidateConfigurationSettingsResult']
+ self.messages = []
+ if response['Messages']:
+ for member in response['Messages']:
+ message = ValidationMessage(member)
+ self.messages.append(message)
diff --git a/boto/beanstalk/wrapper.py b/boto/beanstalk/wrapper.py
new file mode 100644
index 0000000..aa9a7d2
--- /dev/null
+++ b/boto/beanstalk/wrapper.py
@@ -0,0 +1,29 @@
+"""Wraps layer1 api methods and converts layer1 dict responses to objects."""
+from boto.beanstalk.layer1 import Layer1
+import boto.beanstalk.response
+from boto.exception import BotoServerError
+import boto.beanstalk.exception as exception
+
+
+def beanstalk_wrapper(func, name):
+ def _wrapped_low_level_api(*args, **kwargs):
+ try:
+ response = func(*args, **kwargs)
+ except BotoServerError, e:
+ raise exception.simple(e)
+ # Turn 'this_is_a_function_name' into 'ThisIsAFunctionNameResponse'.
+ cls_name = ''.join([part.capitalize() for part in name.split('_')]) + 'Response'
+ cls = getattr(boto.beanstalk.response, cls_name)
+ return cls(response)
+ return _wrapped_low_level_api
+
+
+class Layer1Wrapper(object):
+ def __init__(self, *args, **kwargs):
+ self.api = Layer1(*args, **kwargs)
+
+ def __getattr__(self, name):
+ try:
+ return beanstalk_wrapper(getattr(self.api, name), name)
+ except AttributeError:
+ raise AttributeError("%s has no attribute %r" % (self, name))
diff --git a/boto/cloudformation/__init__.py b/boto/cloudformation/__init__.py
index 4f8e090..53a02e5 100644
--- a/boto/cloudformation/__init__.py
+++ b/boto/cloudformation/__init__.py
@@ -15,11 +15,52 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
-# this is here for backward compatibility
-# originally, the SNSConnection class was defined here
from connection import CloudFormationConnection
+from boto.regioninfo import RegionInfo
+
+RegionData = {
+ 'us-east-1': 'cloudformation.us-east-1.amazonaws.com',
+ 'us-west-1': 'cloudformation.us-west-1.amazonaws.com',
+ 'us-west-2': 'cloudformation.us-west-2.amazonaws.com',
+ 'sa-east-1': 'cloudformation.sa-east-1.amazonaws.com',
+ 'eu-west-1': 'cloudformation.eu-west-1.amazonaws.com',
+ 'ap-northeast-1': 'cloudformation.ap-northeast-1.amazonaws.com',
+ 'ap-southeast-1': 'cloudformation.ap-southeast-1.amazonaws.com'}
+
+
+def regions():
+ """
+ Get all available regions for the CloudFormation service.
+
+ :rtype: list
+ :return: A list of :class:`boto.RegionInfo` instances
+ """
+ regions = []
+ for region_name in RegionData:
+ region = RegionInfo(name=region_name,
+ endpoint=RegionData[region_name],
+ connection_cls=CloudFormationConnection)
+ regions.append(region)
+ return regions
+
+
+def connect_to_region(region_name, **kw_params):
+ """
+ Given a valid region name, return a
+ :class:`boto.cloudformation.CloudFormationConnection`.
+
+ :param str region_name: The name of the region to connect to.
+
+ :rtype: :class:`boto.cloudformation.CloudFormationConnection` or ``None``
+ :return: A connection to the given region, or None if an invalid region
+ name is given
+ """
+ for region in regions():
+ if region.name == region_name:
+ return region.connect(**kw_params)
+ return None
diff --git a/boto/cloudformation/connection.py b/boto/cloudformation/connection.py
index 59640bd..816066c 100644
--- a/boto/cloudformation/connection.py
+++ b/boto/cloudformation/connection.py
@@ -14,7 +14,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -31,14 +31,16 @@
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
+
class CloudFormationConnection(AWSQueryConnection):
"""
A Connection to the CloudFormation Service.
"""
- DefaultRegionName = 'us-east-1'
- DefaultRegionEndpoint = 'cloudformation.us-east-1.amazonaws.com'
- APIVersion = '2010-05-15'
+ APIVersion = boto.config.get('Boto', 'cfn_version', '2010-05-15')
+ DefaultRegionName = boto.config.get('Boto', 'cfn_region_name', 'us-east-1')
+ DefaultRegionEndpoint = boto.config.get('Boto', 'cfn_region_endpoint',
+ 'cloudformation.us-east-1.amazonaws.com')
valid_states = ("CREATE_IN_PROGRESS", "CREATE_FAILED", "CREATE_COMPLETE",
"ROLLBACK_IN_PROGRESS", "ROLLBACK_FAILED", "ROLLBACK_COMPLETE",
@@ -47,27 +49,35 @@
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
- https_connection_factory=None, region=None, path='/', converter=None):
+ https_connection_factory=None, region=None, path='/',
+ converter=None, security_token=None, validate_certs=True):
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint, CloudFormationConnection)
self.region = region
- AWSQueryConnection.__init__(self, aws_access_key_id, aws_secret_access_key,
- is_secure, port, proxy, proxy_port, proxy_user, proxy_pass,
- self.region.endpoint, debug, https_connection_factory, path)
+ AWSQueryConnection.__init__(self, aws_access_key_id,
+ aws_secret_access_key,
+ is_secure, port, proxy, proxy_port,
+ proxy_user, proxy_pass,
+ self.region.endpoint, debug,
+ https_connection_factory, path,
+ security_token,
+ validate_certs=validate_certs)
def _required_auth_capability(self):
- return ['cloudformation']
+ return ['hmac-v4']
def encode_bool(self, v):
v = bool(v)
return {True: "true", False: "false"}[v]
- def create_stack(self, stack_name, template_body=None, template_url=None,
- parameters=[], notification_arns=[], disable_rollback=False,
- timeout_in_minutes=None):
+ def _build_create_or_update_params(self, stack_name, template_body,
+ template_url, parameters,
+ notification_arns, disable_rollback,
+ timeout_in_minutes, capabilities, tags):
"""
- Creates a CloudFormation Stack as specified by the template.
+ Helper that creates JSON parameters needed by a Stack Create or
+ Stack Update call.
:type stack_name: string
:param stack_name: The name of the Stack, must be unique amoung running
@@ -78,28 +88,37 @@
:type template_url: string
:param template_url: An S3 URL of a stored template JSON document. If
- both the template_body and template_url are
- specified, the template_body takes precedence
+ both the template_body and template_url are
+ specified, the template_body takes precedence
:type parameters: list of tuples
:param parameters: A list of (key, value) pairs for template input
- parameters.
+ parameters.
:type notification_arns: list of strings
:param notification_arns: A list of SNS topics to send Stack event
- notifications to
+ notifications to.
:type disable_rollback: bool
:param disable_rollback: Indicates whether or not to rollback on
- failure
+ failure.
:type timeout_in_minutes: int
:param timeout_in_minutes: Maximum amount of time to let the Stack
- spend creating itself. If this timeout is exceeded,
- the Stack will enter the CREATE_FAILED state
+ spend creating itself. If this timeout is exceeded,
+ the Stack will enter the CREATE_FAILED state.
- :rtype: string
- :return: The unique Stack ID
+ :type capabilities: list
+ :param capabilities: The list of capabilities you want to allow in
+ the stack. Currently, the only valid capability is
+ 'CAPABILITY_IAM'.
+
+ :type tags: dict
+ :param tags: A dictionary of (key, value) pairs of tags to
+ associate with this stack.
+
+ :rtype: dict
+ :return: JSON parameters represented as a Python dict.
"""
params = {'ContentType': "JSON", 'StackName': stack_name,
'DisableRollback': self.encode_bool(disable_rollback)}
@@ -112,13 +131,72 @@
" specified, only TemplateBody will be honored by the API")
if len(parameters) > 0:
for i, (key, value) in enumerate(parameters):
- params['Parameters.member.%d.ParameterKey' % (i+1)] = key
- params['Parameters.member.%d.ParameterValue' % (i+1)] = value
+ params['Parameters.member.%d.ParameterKey' % (i + 1)] = key
+ params['Parameters.member.%d.ParameterValue' % (i + 1)] = value
+ if capabilities:
+ for i, value in enumerate(capabilities):
+ params['Capabilities.member.%d' % (i + 1)] = value
+ if tags:
+ for i, (key, value) in enumerate(tags.items()):
+ params['Tags.member.%d.Key' % (i + 1)] = key
+ params['Tags.member.%d.Value' % (i + 1)] = value
if len(notification_arns) > 0:
- self.build_list_params(params, notification_arns, "NotificationARNs.member")
+ self.build_list_params(params, notification_arns,
+ "NotificationARNs.member")
if timeout_in_minutes:
params['TimeoutInMinutes'] = int(timeout_in_minutes)
+ return params
+ def create_stack(self, stack_name, template_body=None, template_url=None,
+ parameters=[], notification_arns=[], disable_rollback=False,
+ timeout_in_minutes=None, capabilities=None, tags=None):
+ """
+ Creates a CloudFormation Stack as specified by the template.
+
+ :type stack_name: string
+ :param stack_name: The name of the Stack, must be unique amoung running
+ Stacks
+
+ :type template_body: string
+ :param template_body: The template body (JSON string)
+
+ :type template_url: string
+ :param template_url: An S3 URL of a stored template JSON document. If
+ both the template_body and template_url are
+ specified, the template_body takes precedence
+
+ :type parameters: list of tuples
+ :param parameters: A list of (key, value) pairs for template input
+ parameters.
+
+ :type notification_arns: list of strings
+ :param notification_arns: A list of SNS topics to send Stack event
+ notifications to.
+
+ :type disable_rollback: bool
+ :param disable_rollback: Indicates whether or not to rollback on
+ failure.
+
+ :type timeout_in_minutes: int
+ :param timeout_in_minutes: Maximum amount of time to let the Stack
+ spend creating itself. If this timeout is exceeded,
+ the Stack will enter the CREATE_FAILED state.
+
+ :type capabilities: list
+ :param capabilities: The list of capabilities you want to allow in
+ the stack. Currently, the only valid capability is
+ 'CAPABILITY_IAM'.
+
+ :type tags: dict
+ :param tags: A dictionary of (key, value) pairs of tags to
+ associate with this stack.
+
+ :rtype: string
+ :return: The unique Stack ID.
+ """
+ params = self._build_create_or_update_params(stack_name,
+ template_body, template_url, parameters, notification_arns,
+ disable_rollback, timeout_in_minutes, capabilities, tags)
response = self.make_request('CreateStack', params, '/', 'POST')
body = response.read()
if response.status == 200:
@@ -129,6 +207,66 @@
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
+ def update_stack(self, stack_name, template_body=None, template_url=None,
+ parameters=[], notification_arns=[], disable_rollback=False,
+ timeout_in_minutes=None, capabilities=None, tags=None):
+ """
+ Updates a CloudFormation Stack as specified by the template.
+
+ :type stack_name: string
+ :param stack_name: The name of the Stack, must be unique amoung running
+ Stacks.
+
+ :type template_body: string
+ :param template_body: The template body (JSON string)
+
+ :type template_url: string
+ :param template_url: An S3 URL of a stored template JSON document. If
+ both the template_body and template_url are
+ specified, the template_body takes precedence.
+
+ :type parameters: list of tuples
+ :param parameters: A list of (key, value) pairs for template input
+ parameters.
+
+ :type notification_arns: list of strings
+ :param notification_arns: A list of SNS topics to send Stack event
+ notifications to.
+
+ :type disable_rollback: bool
+ :param disable_rollback: Indicates whether or not to rollback on
+ failure.
+
+ :type timeout_in_minutes: int
+ :param timeout_in_minutes: Maximum amount of time to let the Stack
+ spend creating itself. If this timeout is exceeded,
+ the Stack will enter the CREATE_FAILED state
+
+ :type capabilities: list
+ :param capabilities: The list of capabilities you want to allow in
+ the stack. Currently, the only valid capability is
+ 'CAPABILITY_IAM'.
+
+ :type tags: dict
+ :param tags: A dictionary of (key, value) pairs of tags to
+ associate with this stack.
+
+ :rtype: string
+ :return: The unique Stack ID.
+ """
+ params = self._build_create_or_update_params(stack_name,
+ template_body, template_url, parameters, notification_arns,
+ disable_rollback, timeout_in_minutes, capabilities, tags)
+ response = self.make_request('UpdateStack', params, '/', 'POST')
+ body = response.read()
+ if response.status == 200:
+ body = json.loads(body)
+ return body['UpdateStackResponse']['UpdateStackResult']['StackId']
+ else:
+ boto.log.error('%s %s' % (response.status, response.reason))
+ boto.log.error('%s' % body)
+ raise self.ResponseError(response.status, response.reason, body)
+
def delete_stack(self, stack_name_or_id):
params = {'ContentType': "JSON", 'StackName': stack_name_or_id}
# TODO: change this to get_status ?
@@ -153,7 +291,8 @@
def describe_stack_resource(self, stack_name_or_id, logical_resource_id):
params = {'ContentType': "JSON", 'StackName': stack_name_or_id,
'LogicalResourceId': logical_resource_id}
- response = self.make_request('DescribeStackResource', params, '/', 'GET')
+ response = self.make_request('DescribeStackResource', params,
+ '/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
@@ -172,8 +311,8 @@
params['LogicalResourceId'] = logical_resource_id
if physical_resource_id:
params['PhysicalResourceId'] = physical_resource_id
- return self.get_list('DescribeStackResources', params, [('member',
- StackResource)])
+ return self.get_list('DescribeStackResources', params,
+ [('member', StackResource)])
def describe_stacks(self, stack_name_or_id=None):
params = {}
@@ -196,8 +335,8 @@
params = {'StackName': stack_name_or_id}
if next_token:
params['NextToken'] = next_token
- return self.get_list('ListStackResources', params, [('member',
- StackResourceSummary)])
+ return self.get_list('ListStackResources', params,
+ [('member', StackResourceSummary)])
def list_stacks(self, stack_status_filters=[], next_token=None):
params = {}
@@ -207,15 +346,15 @@
self.build_list_params(params, stack_status_filters,
"StackStatusFilter.member")
- return self.get_list('ListStacks', params, [('member',
- StackSummary)])
+ return self.get_list('ListStacks', params,
+ [('member', StackSummary)])
def validate_template(self, template_body=None, template_url=None):
params = {}
if template_body:
params['TemplateBody'] = template_body
if template_url:
- params['TemplateUrl'] = template_url
+ params['TemplateURL'] = template_url
if template_body and template_url:
boto.log.warning("If both TemplateBody and TemplateURL are"
" specified, only TemplateBody will be honored by the API")
diff --git a/boto/cloudformation/stack.py b/boto/cloudformation/stack.py
index 8b9e115..9a9d63b 100644
--- a/boto/cloudformation/stack.py
+++ b/boto/cloudformation/stack.py
@@ -2,7 +2,8 @@
from boto.resultset import ResultSet
-class Stack:
+
+class Stack(object):
def __init__(self, connection=None):
self.connection = connection
self.creation_time = None
@@ -11,6 +12,8 @@
self.notification_arns = []
self.outputs = []
self.parameters = []
+ self.capabilities = []
+ self.tags = []
self.stack_id = None
self.stack_status = None
self.stack_name = None
@@ -24,6 +27,15 @@
elif name == "Outputs":
self.outputs = ResultSet([('member', Output)])
return self.outputs
+ elif name == "Capabilities":
+ self.capabilities = ResultSet([('member', Capability)])
+ return self.capabilities
+ elif name == "Tags":
+ self.tags = Tag()
+ return self.tags
+ elif name == 'NotificationARNs':
+ self.notification_arns = ResultSet([('member', NotificationARN)])
+ return self.notification_arns
else:
return None
@@ -34,8 +46,6 @@
self.description = value
elif name == "DisableRollback":
self.disable_rollback = bool(value)
- elif name == "NotificationARNs":
- self.notification_arns = value
elif name == 'StackId':
self.stack_id = value
elif name == 'StackName':
@@ -91,7 +101,8 @@
def get_template(self):
return self.connection.get_template(stack_name_or_id=self.stack_id)
-class StackSummary:
+
+class StackSummary(object):
def __init__(self, connection=None):
self.connection = connection
self.stack_id = None
@@ -122,7 +133,8 @@
else:
setattr(self, name, value)
-class Parameter:
+
+class Parameter(object):
def __init__(self, connection=None):
self.connection = None
self.key = None
@@ -142,7 +154,8 @@
def __repr__(self):
return "Parameter:\"%s\"=\"%s\"" % (self.key, self.value)
-class Output:
+
+class Output(object):
def __init__(self, connection=None):
self.connection = connection
self.description = None
@@ -165,7 +178,57 @@
def __repr__(self):
return "Output:\"%s\"=\"%s\"" % (self.key, self.value)
-class StackResource:
+
+class Capability(object):
+ def __init__(self, connection=None):
+ self.connection = None
+ self.value = None
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ self.value = value
+
+ def __repr__(self):
+ return "Capability:\"%s\"" % (self.value)
+
+
+class Tag(dict):
+
+ def __init__(self, connection=None):
+ dict.__init__(self)
+ self.connection = connection
+ self._current_key = None
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == "Key":
+ self._current_key = value
+ elif name == "Value":
+ self[self._current_key] = value
+ else:
+ setattr(self, name, value)
+
+
+class NotificationARN(object):
+ def __init__(self, connection=None):
+ self.connection = None
+ self.value = None
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ self.value = value
+
+ def __repr__(self):
+ return "NotificationARN:\"%s\"" % (self.value)
+
+
+class StackResource(object):
def __init__(self, connection=None):
self.connection = connection
self.description = None
@@ -207,7 +270,8 @@
return "StackResource:%s (%s)" % (self.logical_resource_id,
self.resource_type)
-class StackResourceSummary:
+
+class StackResourceSummary(object):
def __init__(self, connection=None):
self.connection = connection
self.last_updated_timestamp = None
@@ -222,7 +286,7 @@
def endElement(self, name, value, connection):
if name == "LastUpdatedTimestamp":
- self.last_updated_timestampe = datetime.strptime(value,
+ self.last_updated_timestamp = datetime.strptime(value,
'%Y-%m-%dT%H:%M:%SZ')
elif name == "LogicalResourceId":
self.logical_resource_id = value
@@ -241,7 +305,8 @@
return "StackResourceSummary:%s (%s)" % (self.logical_resource_id,
self.resource_type)
-class StackEvent:
+
+class StackEvent(object):
valid_states = ("CREATE_IN_PROGRESS", "CREATE_FAILED", "CREATE_COMPLETE",
"DELETE_IN_PROGRESS", "DELETE_FAILED", "DELETE_COMPLETE")
def __init__(self, connection=None):
diff --git a/boto/cloudfront/__init__.py b/boto/cloudfront/__init__.py
index 7f98b70..9888f50 100644
--- a/boto/cloudfront/__init__.py
+++ b/boto/cloudfront/__init__.py
@@ -14,7 +14,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -30,10 +30,11 @@
from boto.cloudfront.identity import OriginAccessIdentity
from boto.cloudfront.identity import OriginAccessIdentitySummary
from boto.cloudfront.identity import OriginAccessIdentityConfig
-from boto.cloudfront.invalidation import InvalidationBatch
+from boto.cloudfront.invalidation import InvalidationBatch, InvalidationSummary, InvalidationListResultSet
from boto.resultset import ResultSet
from boto.cloudfront.exception import CloudFrontServerError
+
class CloudFrontConnection(AWSAuthConnection):
DefaultHost = 'cloudfront.amazonaws.com'
@@ -41,10 +42,13 @@
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
port=None, proxy=None, proxy_port=None,
- host=DefaultHost, debug=0):
+ host=DefaultHost, debug=0, security_token=None,
+ validate_certs=True):
AWSAuthConnection.__init__(self, host,
- aws_access_key_id, aws_secret_access_key,
- True, port, proxy, proxy_port, debug=debug)
+ aws_access_key_id, aws_secret_access_key,
+ True, port, proxy, proxy_port, debug=debug,
+ security_token=security_token,
+ validate_certs=validate_certs)
def get_etag(self, response):
response_headers = response.msg
@@ -57,16 +61,20 @@
return ['cloudfront']
# Generics
-
- def _get_all_objects(self, resource, tags):
+
+ def _get_all_objects(self, resource, tags, result_set_class=None,
+ result_set_kwargs=None):
if not tags:
- tags=[('DistributionSummary', DistributionSummary)]
- response = self.make_request('GET', '/%s/%s' % (self.Version, resource))
+ tags = [('DistributionSummary', DistributionSummary)]
+ response = self.make_request('GET', '/%s/%s' % (self.Version,
+ resource))
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise CloudFrontServerError(response.status, response.reason, body)
- rs = ResultSet(tags)
+ rs_class = result_set_class or ResultSet
+ rs_kwargs = result_set_kwargs or dict()
+ rs = rs_class(tags, **rs_kwargs)
h = handler.XmlHandler(rs, self)
xml.sax.parseString(body, h)
return rs
@@ -99,24 +107,26 @@
h = handler.XmlHandler(d, self)
xml.sax.parseString(body, h)
return d
-
+
def _set_config(self, distribution_id, etag, config):
if isinstance(config, StreamingDistributionConfig):
resource = 'streaming-distribution'
else:
resource = 'distribution'
uri = '/%s/%s/%s/config' % (self.Version, resource, distribution_id)
- headers = {'If-Match' : etag, 'Content-Type' : 'text/xml'}
+ headers = {'If-Match': etag, 'Content-Type': 'text/xml'}
response = self.make_request('PUT', uri, headers, config.to_xml())
body = response.read()
boto.log.debug(body)
if response.status != 200:
raise CloudFrontServerError(response.status, response.reason, body)
return self.get_etag(response)
-
+
def _create_object(self, config, resource, dist_class):
- response = self.make_request('POST', '/%s/%s' % (self.Version, resource),
- {'Content-Type' : 'text/xml'}, data=config.to_xml())
+ response = self.make_request('POST', '/%s/%s' % (self.Version,
+ resource),
+ {'Content-Type': 'text/xml'},
+ data=config.to_xml())
body = response.read()
boto.log.debug(body)
if response.status == 201:
@@ -127,19 +137,19 @@
return d
else:
raise CloudFrontServerError(response.status, response.reason, body)
-
+
def _delete_object(self, id, etag, resource):
uri = '/%s/%s/%s' % (self.Version, resource, id)
- response = self.make_request('DELETE', uri, {'If-Match' : etag})
+ response = self.make_request('DELETE', uri, {'If-Match': etag})
body = response.read()
boto.log.debug(body)
if response.status != 204:
raise CloudFrontServerError(response.status, response.reason, body)
# Distributions
-
+
def get_all_distributions(self):
- tags=[('DistributionSummary', DistributionSummary)]
+ tags = [('DistributionSummary', DistributionSummary)]
return self._get_all_objects('distribution', tags)
def get_distribution_info(self, distribution_id):
@@ -148,10 +158,10 @@
def get_distribution_config(self, distribution_id):
return self._get_config(distribution_id, 'distribution',
DistributionConfig)
-
+
def set_distribution_config(self, distribution_id, etag, config):
return self._set_config(distribution_id, etag, config)
-
+
def create_distribution(self, origin, enabled, caller_reference='',
cnames=None, comment='', trusted_signers=None):
config = DistributionConfig(origin=origin, enabled=enabled,
@@ -159,14 +169,14 @@
cnames=cnames, comment=comment,
trusted_signers=trusted_signers)
return self._create_object(config, 'distribution', Distribution)
-
+
def delete_distribution(self, distribution_id, etag):
return self._delete_object(distribution_id, etag, 'distribution')
# Streaming Distributions
-
+
def get_all_streaming_distributions(self):
- tags=[('StreamingDistributionSummary', StreamingDistributionSummary)]
+ tags = [('StreamingDistributionSummary', StreamingDistributionSummary)]
return self._get_all_objects('streaming-distribution', tags)
def get_streaming_distribution_info(self, distribution_id):
@@ -176,10 +186,10 @@
def get_streaming_distribution_config(self, distribution_id):
return self._get_config(distribution_id, 'streaming-distribution',
StreamingDistributionConfig)
-
+
def set_streaming_distribution_config(self, distribution_id, etag, config):
return self._set_config(distribution_id, etag, config)
-
+
def create_streaming_distribution(self, origin, enabled,
caller_reference='',
cnames=None, comment='',
@@ -190,14 +200,15 @@
trusted_signers=trusted_signers)
return self._create_object(config, 'streaming-distribution',
StreamingDistribution)
-
+
def delete_streaming_distribution(self, distribution_id, etag):
- return self._delete_object(distribution_id, etag, 'streaming-distribution')
+ return self._delete_object(distribution_id, etag,
+ 'streaming-distribution')
# Origin Access Identity
def get_all_origin_access_identity(self):
- tags=[('CloudFrontOriginAccessIdentitySummary',
+ tags = [('CloudFrontOriginAccessIdentitySummary',
OriginAccessIdentitySummary)]
return self._get_all_objects('origin-access-identity/cloudfront', tags)
@@ -209,23 +220,23 @@
return self._get_config(access_id,
'origin-access-identity/cloudfront',
OriginAccessIdentityConfig)
-
+
def set_origin_access_identity_config(self, access_id,
etag, config):
return self._set_config(access_id, etag, config)
-
+
def create_origin_access_identity(self, caller_reference='', comment=''):
config = OriginAccessIdentityConfig(caller_reference=caller_reference,
comment=comment)
return self._create_object(config, 'origin-access-identity/cloudfront',
OriginAccessIdentity)
-
+
def delete_origin_access_identity(self, access_id, etag):
return self._delete_object(access_id, etag,
'origin-access-identity/cloudfront')
# Object Invalidation
-
+
def create_invalidation_request(self, distribution_id, paths,
caller_reference=None):
"""Creates a new invalidation request
@@ -239,7 +250,7 @@
uri = '/%s/distribution/%s/invalidation' % (self.Version,
distribution_id)
response = self.make_request('POST', uri,
- {'Content-Type' : 'text/xml'},
+ {'Content-Type': 'text/xml'},
data=paths.to_xml())
body = response.read()
if response.status == 201:
@@ -249,9 +260,12 @@
else:
raise CloudFrontServerError(response.status, response.reason, body)
- def invalidation_request_status (self, distribution_id, request_id, caller_reference=None):
- uri = '/%s/distribution/%s/invalidation/%s' % (self.Version, distribution_id, request_id )
- response = self.make_request('GET', uri, {'Content-Type' : 'text/xml'})
+ def invalidation_request_status(self, distribution_id,
+ request_id, caller_reference=None):
+ uri = '/%s/distribution/%s/invalidation/%s' % (self.Version,
+ distribution_id,
+ request_id)
+ response = self.make_request('GET', uri, {'Content-Type': 'text/xml'})
body = response.read()
if response.status == 200:
paths = InvalidationBatch([])
@@ -261,4 +275,50 @@
else:
raise CloudFrontServerError(response.status, response.reason, body)
+ def get_invalidation_requests(self, distribution_id, marker=None,
+ max_items=None):
+ """
+ Get all invalidation requests for a given CloudFront distribution.
+ This returns an instance of an InvalidationListResultSet that
+ automatically handles all of the result paging, etc. from CF - you just
+ need to keep iterating until there are no more results.
+ :type distribution_id: string
+ :param distribution_id: The id of the CloudFront distribution
+
+ :type marker: string
+ :param marker: Use this only when paginating results and only in
+ follow-up request after you've received a response where
+ the results are truncated. Set this to the value of the
+ Marker element in the response you just received.
+
+ :type max_items: int
+ :param max_items: Use this only when paginating results and only in a
+ follow-up request to indicate the maximum number of
+ invalidation requests you want in the response. You
+ will need to pass the next_marker property from the
+ previous InvalidationListResultSet response in the
+ follow-up request in order to get the next 'page' of
+ results.
+
+ :rtype: :class:`boto.cloudfront.invalidation.InvalidationListResultSet`
+ :returns: An InvalidationListResultSet iterator that lists invalidation
+ requests for a given CloudFront distribution. Automatically
+ handles paging the results.
+ """
+ uri = 'distribution/%s/invalidation' % distribution_id
+ params = dict()
+ if marker:
+ params['Marker'] = marker
+ if max_items:
+ params['MaxItems'] = max_items
+ if params:
+ uri += '?%s=%s' % params.popitem()
+ for k, v in params.items():
+ uri += '&%s=%s' % (k, v)
+ tags=[('InvalidationSummary', InvalidationSummary)]
+ rs_class = InvalidationListResultSet
+ rs_kwargs = dict(connection=self, distribution_id=distribution_id,
+ max_items=max_items, marker=marker)
+ return self._get_all_objects(uri, tags, result_set_class=rs_class,
+ result_set_kwargs=rs_kwargs)
diff --git a/boto/cloudfront/distribution.py b/boto/cloudfront/distribution.py
index 01ceed4..718f2c2 100644
--- a/boto/cloudfront/distribution.py
+++ b/boto/cloudfront/distribution.py
@@ -21,7 +21,11 @@
import uuid
import base64
-import json
+import time
+try:
+ import simplejson as json
+except ImportError:
+ import json
from boto.cloudfront.identity import OriginAccessIdentity
from boto.cloudfront.object import Object, StreamingObject
from boto.cloudfront.signers import ActiveTrustedSigners, TrustedSigners
@@ -508,46 +512,44 @@
:type keypair_id: str
:param keypair_id: The keypair ID of the Amazon KeyPair used to sign
- theURL. This ID MUST correspond to the private key
- specified with private_key_file or
- private_key_string.
+ theURL. This ID MUST correspond to the private key
+ specified with private_key_file or private_key_string.
:type expire_time: int
:param expire_time: The expiry time of the URL. If provided, the URL
- will expire after the time has passed. If not
- provided the URL will never expire. Format is a
- unix epoch. Use time.time() + duration_in_sec.
+ will expire after the time has passed. If not provided the URL will
+ never expire. Format is a unix epoch.
+ Use time.time() + duration_in_sec.
:type valid_after_time: int
:param valid_after_time: If provided, the URL will not be valid until
- after valid_after_time. Format is a unix
- epoch. Use time.time() + secs_until_valid.
+ after valid_after_time. Format is a unix epoch.
+ Use time.time() + secs_until_valid.
:type ip_address: str
:param ip_address: If provided, only allows access from the specified
- IP address. Use '192.168.0.10' for a single IP or
- use '192.168.0.0/24' CIDR notation for a subnet.
+ IP address. Use '192.168.0.10' for a single IP or
+ use '192.168.0.0/24' CIDR notation for a subnet.
:type policy_url: str
:param policy_url: If provided, allows the signature to contain
- wildcard globs in the URL. For example, you could
- provide: 'http://example.com/media/*' and the policy
- and signature would allow access to all contents of
- the media subdirectory. If not specified, only
- allow access to the exact url provided in 'url'.
+ wildcard globs in the URL. For example, you could
+ provide: 'http://example.com/media/\*' and the policy
+ and signature would allow access to all contents of
+ the media subdirectory. If not specified, only
+ allow access to the exact url provided in 'url'.
:type private_key_file: str or file object.
:param private_key_file: If provided, contains the filename of the
- private key file used for signing or an open
- file object containing the private key
- contents. Only one of private_key_file or
- private_key_string can be provided.
+ private key file used for signing or an open
+ file object containing the private key
+ contents. Only one of private_key_file or
+ private_key_string can be provided.
:type private_key_string: str
:param private_key_string: If provided, contains the private key string
- used for signing. Only one of
- private_key_file or private_key_string can
- be provided.
+ used for signing. Only one of private_key_file or
+ private_key_string can be provided.
:rtype: str
:return: The signed URL.
@@ -591,9 +593,10 @@
if policy_url is None:
policy_url = url
# Can't use canned policy
- policy = self._custom_policy(policy_url, expires=None,
- valid_after=None,
- ip_address=None)
+ policy = self._custom_policy(policy_url, expires=expire_time,
+ valid_after=valid_after_time,
+ ip_address=ip_address)
+
encoded_policy = self._url_base64_encode(policy)
params["Policy"] = encoded_policy
#sign the policy
@@ -620,8 +623,12 @@
Creates a custom policy string based on the supplied parameters.
"""
condition = {}
- if expires:
- condition["DateLessThan"] = {"AWS:EpochTime": expires}
+ # SEE: http://docs.amazonwebservices.com/AmazonCloudFront/latest/DeveloperGuide/RestrictingAccessPrivateContent.html#CustomPolicy
+ # The 'DateLessThan' property is required.
+ if not expires:
+ # Defaults to ONE day
+ expires = int(time.time()) + 86400
+ condition["DateLessThan"] = {"AWS:EpochTime": expires}
if valid_after:
condition["DateGreaterThan"] = {"AWS:EpochTime": valid_after}
if ip_address:
diff --git a/boto/cloudfront/invalidation.py b/boto/cloudfront/invalidation.py
index b213e65..dcc3c4c 100644
--- a/boto/cloudfront/invalidation.py
+++ b/boto/cloudfront/invalidation.py
@@ -22,6 +22,9 @@
import uuid
import urllib
+from boto.resultset import ResultSet
+
+
class InvalidationBatch(object):
"""A simple invalidation request.
:see: http://docs.amazonwebservices.com/AmazonCloudFront/2010-08-01/APIReference/index.html?InvalidationBatchDatatype.html
@@ -40,10 +43,13 @@
# If we passed in a distribution,
# then we use that as the connection object
if distribution:
- self.connection = connection
+ self.connection = distribution
else:
self.connection = connection
+ def __repr__(self):
+ return '<InvalidationBatch: %s>' % self.id
+
def add(self, path):
"""Add another path to this invalidation request"""
return self.paths.append(path)
@@ -95,3 +101,116 @@
elif name == "CallerReference":
self.caller_reference = value
return None
+
+
+class InvalidationListResultSet(object):
+ """
+ A resultset for listing invalidations on a given CloudFront distribution.
+ Implements the iterator interface and transparently handles paging results
+ from CF so even if you have many thousands of invalidations on the
+ distribution you can iterate over all invalidations in a reasonably
+ efficient manner.
+ """
+ def __init__(self, markers=None, connection=None, distribution_id=None,
+ invalidations=None, marker='', next_marker=None,
+ max_items=None, is_truncated=False):
+ self.markers = markers or []
+ self.connection = connection
+ self.distribution_id = distribution_id
+ self.marker = marker
+ self.next_marker = next_marker
+ self.max_items = max_items
+ self.auto_paginate = max_items is None
+ self.is_truncated = is_truncated
+ self._inval_cache = invalidations or []
+
+ def __iter__(self):
+ """
+ A generator function for listing invalidation requests for a given
+ CloudFront distribution.
+ """
+ conn = self.connection
+ distribution_id = self.distribution_id
+ result_set = self
+ for inval in result_set._inval_cache:
+ yield inval
+ if not self.auto_paginate:
+ return
+ while result_set.is_truncated:
+ result_set = conn.get_invalidation_requests(distribution_id,
+ marker=result_set.next_marker,
+ max_items=result_set.max_items)
+ for i in result_set._inval_cache:
+ yield i
+
+ def startElement(self, name, attrs, connection):
+ for root_elem, handler in self.markers:
+ if name == root_elem:
+ obj = handler(connection, distribution_id=self.distribution_id)
+ self._inval_cache.append(obj)
+ return obj
+
+ def endElement(self, name, value, connection):
+ if name == 'IsTruncated':
+ self.is_truncated = self.to_boolean(value)
+ elif name == 'Marker':
+ self.marker = value
+ elif name == 'NextMarker':
+ self.next_marker = value
+ elif name == 'MaxItems':
+ self.max_items = int(value)
+
+ def to_boolean(self, value, true_value='true'):
+ if value == true_value:
+ return True
+ else:
+ return False
+
+class InvalidationSummary(object):
+ """
+ Represents InvalidationSummary complex type in CloudFront API that lists
+ the id and status of a given invalidation request.
+ """
+ def __init__(self, connection=None, distribution_id=None, id='',
+ status=''):
+ self.connection = connection
+ self.distribution_id = distribution_id
+ self.id = id
+ self.status = status
+
+ def __repr__(self):
+ return '<InvalidationSummary: %s>' % self.id
+
+ def startElement(self, name, attrs, connection):
+ pass
+
+ def endElement(self, name, value, connection):
+ if name == 'Id':
+ self.id = value
+ elif name == 'Status':
+ self.status = value
+
+ def get_distribution(self):
+ """
+ Returns a Distribution object representing the parent CloudFront
+ distribution of the invalidation request listed in the
+ InvalidationSummary.
+
+ :rtype: :class:`boto.cloudfront.distribution.Distribution`
+ :returns: A Distribution object representing the parent CloudFront
+ distribution of the invalidation request listed in the
+ InvalidationSummary
+ """
+ return self.connection.get_distribution_info(self.distribution_id)
+
+ def get_invalidation_request(self):
+ """
+ Returns an InvalidationBatch object representing the invalidation
+ request referred to in the InvalidationSummary.
+
+ :rtype: :class:`boto.cloudfront.invalidation.InvalidationBatch`
+ :returns: An InvalidationBatch object representing the invalidation
+ request referred to by the InvalidationSummary
+ """
+ return self.connection.invalidation_request_status(
+ self.distribution_id, self.id)
diff --git a/boto/cloudsearch/__init__.py b/boto/cloudsearch/__init__.py
new file mode 100644
index 0000000..9c8157a
--- /dev/null
+++ b/boto/cloudsearch/__init__.py
@@ -0,0 +1,45 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from boto.ec2.regioninfo import RegionInfo
+
+
+def regions():
+ """
+ Get all available regions for the Amazon CloudSearch service.
+
+ :rtype: list
+ :return: A list of :class:`boto.regioninfo.RegionInfo`
+ """
+ import boto.cloudsearch.layer1
+ return [RegionInfo(name='us-east-1',
+ endpoint='cloudsearch.us-east-1.amazonaws.com',
+ connection_cls=boto.cloudsearch.layer1.Layer1),
+ ]
+
+
+def connect_to_region(region_name, **kw_params):
+ for region in regions():
+ if region.name == region_name:
+ return region.connect(**kw_params)
+ return None
diff --git a/boto/cloudsearch/document.py b/boto/cloudsearch/document.py
new file mode 100644
index 0000000..64a11e0
--- /dev/null
+++ b/boto/cloudsearch/document.py
@@ -0,0 +1,150 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+try:
+ import simplejson as json
+except ImportError:
+ import json
+
+import boto.exception
+import requests
+import boto
+
+class SearchServiceException(Exception):
+ pass
+
+
+class CommitMismatchError(Exception):
+ pass
+
+
+class DocumentServiceConnection(object):
+
+ def __init__(self, domain=None, endpoint=None):
+ self.domain = domain
+ self.endpoint = endpoint
+ if not self.endpoint:
+ self.endpoint = domain.doc_service_endpoint
+ self.documents_batch = []
+ self._sdf = None
+
+ def add(self, _id, version, fields, lang='en'):
+ d = {'type': 'add', 'id': _id, 'version': version, 'lang': lang,
+ 'fields': fields}
+ self.documents_batch.append(d)
+
+ def delete(self, _id, version):
+ d = {'type': 'delete', 'id': _id, 'version': version}
+ self.documents_batch.append(d)
+
+ def get_sdf(self):
+ return self._sdf if self._sdf else json.dumps(self.documents_batch)
+
+ def clear_sdf(self):
+ self._sdf = None
+ self.documents_batch = []
+
+ def add_sdf_from_s3(self, key_obj):
+ """@todo (lucas) would be nice if this could just take an s3://uri..."""
+ self._sdf = key_obj.get_contents_as_string()
+
+ def commit(self):
+ sdf = self.get_sdf()
+
+ if ': null' in sdf:
+ boto.log.error('null value in sdf detected. This will probably raise '
+ '500 error.')
+ index = sdf.index(': null')
+ boto.log.error(sdf[index - 100:index + 100])
+
+ url = "http://%s/2011-02-01/documents/batch" % (self.endpoint)
+
+ request_config = {
+ 'pool_connections': 20,
+ 'keep_alive': True,
+ 'max_retries': 5,
+ 'pool_maxsize': 50
+ }
+
+ r = requests.post(url, data=sdf, config=request_config,
+ headers={'Content-Type': 'application/json'})
+
+ return CommitResponse(r, self, sdf)
+
+
+class CommitResponse(object):
+ """Wrapper for response to Cloudsearch document batch commit.
+
+ :type response: :class:`requests.models.Response`
+ :param response: Response from Cloudsearch /documents/batch API
+
+ :type doc_service: :class:`exfm.cloudsearch.DocumentServiceConnection`
+ :param doc_service: Object containing the documents posted and methods to
+ retry
+
+ :raises: :class:`boto.exception.BotoServerError`
+ :raises: :class:`exfm.cloudsearch.SearchServiceException`
+ """
+ def __init__(self, response, doc_service, sdf):
+ self.response = response
+ self.doc_service = doc_service
+ self.sdf = sdf
+
+ try:
+ self.content = json.loads(response.content)
+ except:
+ boto.log.error('Error indexing documents.\nResponse Content:\n{}\n\n'
+ 'SDF:\n{}'.format(response.content, self.sdf))
+ raise boto.exception.BotoServerError(self.response.status_code, '',
+ body=response.content)
+
+ self.status = self.content['status']
+ if self.status == 'error':
+ self.errors = [e.get('message') for e in self.content.get('errors',
+ [])]
+ else:
+ self.errors = []
+
+ self.adds = self.content['adds']
+ self.deletes = self.content['deletes']
+ self._check_num_ops('add', self.adds)
+ self._check_num_ops('delete', self.deletes)
+
+ def _check_num_ops(self, type_, response_num):
+ """Raise exception if number of ops in response doesn't match commit
+
+ :type type_: str
+ :param type_: Type of commit operation: 'add' or 'delete'
+
+ :type response_num: int
+ :param response_num: Number of adds or deletes in the response.
+
+ :raises: :class:`exfm.cloudsearch.SearchServiceException`
+ """
+ commit_num = len([d for d in self.doc_service.documents_batch
+ if d['type'] == type_])
+
+ if response_num != commit_num:
+ raise CommitMismatchError(
+ 'Incorrect number of {}s returned. Commit: {} Respose: {}'\
+ .format(type_, commit_num, response_num))
diff --git a/boto/cloudsearch/domain.py b/boto/cloudsearch/domain.py
new file mode 100644
index 0000000..43fcac8
--- /dev/null
+++ b/boto/cloudsearch/domain.py
@@ -0,0 +1,397 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+import boto
+try:
+ import simplejson as json
+except ImportError:
+ import json
+from .optionstatus import OptionStatus
+from .optionstatus import IndexFieldStatus
+from .optionstatus import ServicePoliciesStatus
+from .optionstatus import RankExpressionStatus
+from .document import DocumentServiceConnection
+from .search import SearchConnection
+
+def handle_bool(value):
+ if value in [True, 'true', 'True', 'TRUE', 1]:
+ return True
+ return False
+
+
+class Domain(object):
+ """
+ A Cloudsearch domain.
+
+ :ivar name: The name of the domain.
+
+ :ivar id: The internally generated unique identifier for the domain.
+
+ :ivar created: A boolean which is True if the domain is
+ created. It can take several minutes to initialize a domain
+ when CreateDomain is called. Newly created search domains are
+ returned with a False value for Created until domain creation
+ is complete
+
+ :ivar deleted: A boolean which is True if the search domain has
+ been deleted. The system must clean up resources dedicated to
+ the search domain when delete is called. Newly deleted
+ search domains are returned from list_domains with a True
+ value for deleted for several minutes until resource cleanup
+ is complete.
+
+ :ivar processing: True if processing is being done to activate the
+ current domain configuration.
+
+ :ivar num_searchable_docs: The number of documents that have been
+ submittted to the domain and indexed.
+
+ :ivar requires_index_document: True if index_documents needs to be
+ called to activate the current domain configuration.
+
+ :ivar search_instance_count: The number of search instances that are
+ available to process search requests.
+
+ :ivar search_instance_type: The instance type that is being used to
+ process search requests.
+
+ :ivar search_partition_count: The number of partitions across which
+ the search index is spread.
+ """
+
+ def __init__(self, layer1, data):
+ self.layer1 = layer1
+ self.update_from_data(data)
+
+ def update_from_data(self, data):
+ self.created = data['created']
+ self.deleted = data['deleted']
+ self.processing = data['processing']
+ self.requires_index_documents = data['requires_index_documents']
+ self.domain_id = data['domain_id']
+ self.domain_name = data['domain_name']
+ self.num_searchable_docs = data['num_searchable_docs']
+ self.search_instance_count = data['search_instance_count']
+ self.search_instance_type = data.get('search_instance_type', None)
+ self.search_partition_count = data['search_partition_count']
+ self._doc_service = data['doc_service']
+ self._search_service = data['search_service']
+
+ @property
+ def doc_service_arn(self):
+ return self._doc_service['arn']
+
+ @property
+ def doc_service_endpoint(self):
+ return self._doc_service['endpoint']
+
+ @property
+ def search_service_arn(self):
+ return self._search_service['arn']
+
+ @property
+ def search_service_endpoint(self):
+ return self._search_service['endpoint']
+
+ @property
+ def created(self):
+ return self._created
+
+ @created.setter
+ def created(self, value):
+ self._created = handle_bool(value)
+
+ @property
+ def deleted(self):
+ return self._deleted
+
+ @deleted.setter
+ def deleted(self, value):
+ self._deleted = handle_bool(value)
+
+ @property
+ def processing(self):
+ return self._processing
+
+ @processing.setter
+ def processing(self, value):
+ self._processing = handle_bool(value)
+
+ @property
+ def requires_index_documents(self):
+ return self._requires_index_documents
+
+ @requires_index_documents.setter
+ def requires_index_documents(self, value):
+ self._requires_index_documents = handle_bool(value)
+
+ @property
+ def search_partition_count(self):
+ return self._search_partition_count
+
+ @search_partition_count.setter
+ def search_partition_count(self, value):
+ self._search_partition_count = int(value)
+
+ @property
+ def search_instance_count(self):
+ return self._search_instance_count
+
+ @search_instance_count.setter
+ def search_instance_count(self, value):
+ self._search_instance_count = int(value)
+
+ @property
+ def num_searchable_docs(self):
+ return self._num_searchable_docs
+
+ @num_searchable_docs.setter
+ def num_searchable_docs(self, value):
+ self._num_searchable_docs = int(value)
+
+ @property
+ def name(self):
+ return self.domain_name
+
+ @property
+ def id(self):
+ return self.domain_id
+
+ def delete(self):
+ """
+ Delete this domain and all index data associated with it.
+ """
+ return self.layer1.delete_domain(self.name)
+
+ def get_stemming(self):
+ """
+ Return a :class:`boto.cloudsearch.option.OptionStatus` object
+ representing the currently defined stemming options for
+ the domain.
+ """
+ return OptionStatus(self, None,
+ self.layer1.describe_stemming_options,
+ self.layer1.update_stemming_options)
+
+ def get_stopwords(self):
+ """
+ Return a :class:`boto.cloudsearch.option.OptionStatus` object
+ representing the currently defined stopword options for
+ the domain.
+ """
+ return OptionStatus(self, None,
+ self.layer1.describe_stopword_options,
+ self.layer1.update_stopword_options)
+
+ def get_synonyms(self):
+ """
+ Return a :class:`boto.cloudsearch.option.OptionStatus` object
+ representing the currently defined synonym options for
+ the domain.
+ """
+ return OptionStatus(self, None,
+ self.layer1.describe_synonym_options,
+ self.layer1.update_synonym_options)
+
+ def get_access_policies(self):
+ """
+ Return a :class:`boto.cloudsearch.option.OptionStatus` object
+ representing the currently defined access policies for
+ the domain.
+ """
+ return ServicePoliciesStatus(self, None,
+ self.layer1.describe_service_access_policies,
+ self.layer1.update_service_access_policies)
+
+ def index_documents(self):
+ """
+ Tells the search domain to start indexing its documents using
+ the latest text processing options and IndexFields. This
+ operation must be invoked to make options whose OptionStatus
+ has OptioState of RequiresIndexDocuments visible in search
+ results.
+ """
+ self.layer1.index_documents(self.name)
+
+ def get_index_fields(self, field_names=None):
+ """
+ Return a list of index fields defined for this domain.
+ """
+ data = self.layer1.describe_index_fields(self.name, field_names)
+ return [IndexFieldStatus(self, d) for d in data]
+
+ def create_index_field(self, field_name, field_type,
+ default='', facet=False, result=False, searchable=False,
+ source_attributes=[]):
+ """
+ Defines an ``IndexField``, either replacing an existing
+ definition or creating a new one.
+
+ :type field_name: string
+ :param field_name: The name of a field in the search index.
+
+ :type field_type: string
+ :param field_type: The type of field. Valid values are
+ uint | literal | text
+
+ :type default: string or int
+ :param default: The default value for the field. If the
+ field is of type ``uint`` this should be an integer value.
+ Otherwise, it's a string.
+
+ :type facet: bool
+ :param facet: A boolean to indicate whether facets
+ are enabled for this field or not. Does not apply to
+ fields of type ``uint``.
+
+ :type results: bool
+ :param results: A boolean to indicate whether values
+ of this field can be returned in search results or
+ used in ranking. Does not apply to fields of type ``uint``.
+
+ :type searchable: bool
+ :param searchable: A boolean to indicate whether search
+ is enabled for this field or not. Applies only to fields
+ of type ``literal``.
+
+ :type source_attributes: list of dicts
+ :param source_attributes: An optional list of dicts that
+ provide information about attributes for this index field.
+ A maximum of 20 source attributes can be configured for
+ each index field.
+
+ Each item in the list is a dict with the following keys:
+
+ * data_copy - The value is a dict with the following keys:
+ * default - Optional default value if the source attribute
+ is not specified in a document.
+ * name - The name of the document source field to add
+ to this ``IndexField``.
+ * data_function - Identifies the transformation to apply
+ when copying data from a source attribute.
+ * data_map - The value is a dict with the following keys:
+ * cases - A dict that translates source field values
+ to custom values.
+ * default - An optional default value to use if the
+ source attribute is not specified in a document.
+ * name - the name of the document source field to add
+ to this ``IndexField``
+ * data_trim_title - Trims common title words from a source
+ document attribute when populating an ``IndexField``.
+ This can be used to create an ``IndexField`` you can
+ use for sorting. The value is a dict with the following
+ fields:
+ * default - An optional default value.
+ * language - an IETF RFC 4646 language code.
+ * separator - The separator that follows the text to trim.
+ * name - The name of the document source field to add.
+
+ :raises: BaseException, InternalException, LimitExceededException,
+ InvalidTypeException, ResourceNotFoundException
+ """
+ data = self.layer1.define_index_field(self.name, field_name,
+ field_type, default=default,
+ facet=facet, result=result,
+ searchable=searchable,
+ source_attributes=source_attributes)
+ return IndexFieldStatus(self, data,
+ self.layer1.describe_index_fields)
+
+ def get_rank_expressions(self, rank_names=None):
+ """
+ Return a list of rank expressions defined for this domain.
+ """
+ fn = self.layer1.describe_rank_expressions
+ data = fn(self.name, rank_names)
+ return [RankExpressionStatus(self, d, fn) for d in data]
+
+ def create_rank_expression(self, name, expression):
+ """
+ Create a new rank expression.
+
+ :type rank_name: string
+ :param rank_name: The name of an expression computed for ranking
+ while processing a search request.
+
+ :type rank_expression: string
+ :param rank_expression: The expression to evaluate for ranking
+ or thresholding while processing a search request. The
+ RankExpression syntax is based on JavaScript expressions
+ and supports:
+
+ * Integer, floating point, hex and octal literals
+ * Shortcut evaluation of logical operators such that an
+ expression a || b evaluates to the value a if a is
+ true without evaluting b at all
+ * JavaScript order of precedence for operators
+ * Arithmetic operators: + - * / %
+ * Boolean operators (including the ternary operator)
+ * Bitwise operators
+ * Comparison operators
+ * Common mathematic functions: abs ceil erf exp floor
+ lgamma ln log2 log10 max min sqrt pow
+ * Trigonometric library functions: acosh acos asinh asin
+ atanh atan cosh cos sinh sin tanh tan
+ * Random generation of a number between 0 and 1: rand
+ * Current time in epoch: time
+ * The min max functions that operate on a variable argument list
+
+ Intermediate results are calculated as double precision
+ floating point values. The final return value of a
+ RankExpression is automatically converted from floating
+ point to a 32-bit unsigned integer by rounding to the
+ nearest integer, with a natural floor of 0 and a ceiling
+ of max(uint32_t), 4294967295. Mathematical errors such as
+ dividing by 0 will fail during evaluation and return a
+ value of 0.
+
+ The source data for a RankExpression can be the name of an
+ IndexField of type uint, another RankExpression or the
+ reserved name text_relevance. The text_relevance source is
+ defined to return an integer from 0 to 1000 (inclusive) to
+ indicate how relevant a document is to the search request,
+ taking into account repetition of search terms in the
+ document and proximity of search terms to each other in
+ each matching IndexField in the document.
+
+ For more information about using rank expressions to
+ customize ranking, see the Amazon CloudSearch Developer
+ Guide.
+
+ :raises: BaseException, InternalException, LimitExceededException,
+ InvalidTypeException, ResourceNotFoundException
+ """
+ data = self.layer1.define_rank_expression(self.name, name, expression)
+ return RankExpressionStatus(self, data,
+ self.layer1.describe_rank_expressions)
+
+ def get_document_service(self):
+ return DocumentServiceConnection(domain=self)
+
+ def get_search_service(self):
+ return SearchConnection(domain=self)
+
+ def __repr__(self):
+ return '<Domain: %s>' % self.domain_name
+
diff --git a/boto/cloudsearch/layer1.py b/boto/cloudsearch/layer1.py
new file mode 100644
index 0000000..054fc32
--- /dev/null
+++ b/boto/cloudsearch/layer1.py
@@ -0,0 +1,738 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import boto
+import boto.jsonresponse
+from boto.connection import AWSQueryConnection
+from boto.regioninfo import RegionInfo
+
+#boto.set_stream_logger('cloudsearch')
+
+
+def do_bool(val):
+ return 'true' if val in [True, 1, '1', 'true'] else 'false'
+
+
+class Layer1(AWSQueryConnection):
+
+ APIVersion = '2011-02-01'
+ DefaultRegionName = boto.config.get('Boto', 'cs_region_name', 'us-east-1')
+ DefaultRegionEndpoint = boto.config.get('Boto', 'cs_region_endpoint',
+ 'cloudsearch.us-east-1.amazonaws.com')
+
+ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
+ is_secure=True, host=None, port=None,
+ proxy=None, proxy_port=None,
+ proxy_user=None, proxy_pass=None, debug=0,
+ https_connection_factory=None, region=None, path='/',
+ api_version=None, security_token=None,
+ validate_certs=True):
+ if not region:
+ region = RegionInfo(self, self.DefaultRegionName,
+ self.DefaultRegionEndpoint)
+ self.region = region
+ AWSQueryConnection.__init__(self, aws_access_key_id,
+ aws_secret_access_key,
+ is_secure, port, proxy, proxy_port,
+ proxy_user, proxy_pass,
+ self.region.endpoint, debug,
+ https_connection_factory, path,
+ security_token,
+ validate_certs=validate_certs)
+
+ def _required_auth_capability(self):
+ return ['sign-v2']
+
+ def get_response(self, doc_path, action, params, path='/',
+ parent=None, verb='GET', list_marker=None):
+ if not parent:
+ parent = self
+ response = self.make_request(action, params, path, verb)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status == 200:
+ e = boto.jsonresponse.Element(
+ list_marker=list_marker if list_marker else 'Set',
+ pythonize_name=True)
+ h = boto.jsonresponse.XmlHandler(e, parent)
+ h.parse(body)
+ inner = e
+ for p in doc_path:
+ inner = inner.get(p)
+ if not inner:
+ return None if list_marker == None else []
+ if isinstance(inner, list):
+ return [dict(**i) for i in inner]
+ else:
+ return dict(**inner)
+ else:
+ raise self.ResponseError(response.status, response.reason, body)
+
+ def create_domain(self, domain_name):
+ """
+ Create a new search domain.
+
+ :type domain_name: string
+ :param domain_name: A string that represents the name of a
+ domain. Domain names must be unique across the domains
+ owned by an account within an AWS region. Domain names
+ must start with a letter or number and can contain the
+ following characters: a-z (lowercase), 0-9, and -
+ (hyphen). Uppercase letters and underscores are not
+ allowed.
+
+ :raises: BaseException, InternalException, LimitExceededException
+ """
+ doc_path = ('create_domain_response',
+ 'create_domain_result',
+ 'domain_status')
+ params = {'DomainName': domain_name}
+ return self.get_response(doc_path, 'CreateDomain',
+ params, verb='POST')
+
+ def define_index_field(self, domain_name, field_name, field_type,
+ default='', facet=False, result=False,
+ searchable=False, source_attributes=None):
+ """
+ Defines an ``IndexField``, either replacing an existing
+ definition or creating a new one.
+
+ :type domain_name: string
+ :param domain_name: A string that represents the name of a
+ domain. Domain names must be unique across the domains
+ owned by an account within an AWS region. Domain names
+ must start with a letter or number and can contain the
+ following characters: a-z (lowercase), 0-9, and -
+ (hyphen). Uppercase letters and underscores are not
+ allowed.
+
+ :type field_name: string
+ :param field_name: The name of a field in the search index.
+
+ :type field_type: string
+ :param field_type: The type of field. Valid values are
+ uint | literal | text
+
+ :type default: string or int
+ :param default: The default value for the field. If the
+ field is of type ``uint`` this should be an integer value.
+ Otherwise, it's a string.
+
+ :type facet: bool
+ :param facet: A boolean to indicate whether facets
+ are enabled for this field or not. Does not apply to
+ fields of type ``uint``.
+
+ :type results: bool
+ :param results: A boolean to indicate whether values
+ of this field can be returned in search results or
+ used in ranking. Does not apply to fields of type ``uint``.
+
+ :type searchable: bool
+ :param searchable: A boolean to indicate whether search
+ is enabled for this field or not. Applies only to fields
+ of type ``literal``.
+
+ :type source_attributes: list of dicts
+ :param source_attributes: An optional list of dicts that
+ provide information about attributes for this index field.
+ A maximum of 20 source attributes can be configured for
+ each index field.
+
+ Each item in the list is a dict with the following keys:
+
+ * data_copy - The value is a dict with the following keys:
+ * default - Optional default value if the source attribute
+ is not specified in a document.
+ * name - The name of the document source field to add
+ to this ``IndexField``.
+ * data_function - Identifies the transformation to apply
+ when copying data from a source attribute.
+ * data_map - The value is a dict with the following keys:
+ * cases - A dict that translates source field values
+ to custom values.
+ * default - An optional default value to use if the
+ source attribute is not specified in a document.
+ * name - the name of the document source field to add
+ to this ``IndexField``
+ * data_trim_title - Trims common title words from a source
+ document attribute when populating an ``IndexField``.
+ This can be used to create an ``IndexField`` you can
+ use for sorting. The value is a dict with the following
+ fields:
+ * default - An optional default value.
+ * language - an IETF RFC 4646 language code.
+ * separator - The separator that follows the text to trim.
+ * name - The name of the document source field to add.
+
+ :raises: BaseException, InternalException, LimitExceededException,
+ InvalidTypeException, ResourceNotFoundException
+ """
+ doc_path = ('define_index_field_response',
+ 'define_index_field_result',
+ 'index_field')
+ params = {'DomainName': domain_name,
+ 'IndexField.IndexFieldName': field_name,
+ 'IndexField.IndexFieldType': field_type}
+ if field_type == 'literal':
+ params['IndexField.LiteralOptions.DefaultValue'] = default
+ params['IndexField.LiteralOptions.FacetEnabled'] = do_bool(facet)
+ params['IndexField.LiteralOptions.ResultEnabled'] = do_bool(result)
+ params['IndexField.LiteralOptions.SearchEnabled'] = do_bool(searchable)
+ elif field_type == 'uint':
+ params['IndexField.UIntOptions.DefaultValue'] = default
+ elif field_type == 'text':
+ params['IndexField.TextOptions.DefaultValue'] = default
+ params['IndexField.TextOptions.FacetEnabled'] = do_bool(facet)
+ params['IndexField.TextOptions.ResultEnabled'] = do_bool(result)
+
+ return self.get_response(doc_path, 'DefineIndexField',
+ params, verb='POST')
+
+ def define_rank_expression(self, domain_name, rank_name, rank_expression):
+ """
+ Defines a RankExpression, either replacing an existing
+ definition or creating a new one.
+
+ :type domain_name: string
+ :param domain_name: A string that represents the name of a
+ domain. Domain names must be unique across the domains
+ owned by an account within an AWS region. Domain names
+ must start with a letter or number and can contain the
+ following characters: a-z (lowercase), 0-9, and -
+ (hyphen). Uppercase letters and underscores are not
+ allowed.
+
+ :type rank_name: string
+ :param rank_name: The name of an expression computed for ranking
+ while processing a search request.
+
+ :type rank_expression: string
+ :param rank_expression: The expression to evaluate for ranking
+ or thresholding while processing a search request. The
+ RankExpression syntax is based on JavaScript expressions
+ and supports:
+
+ * Integer, floating point, hex and octal literals
+ * Shortcut evaluation of logical operators such that an
+ expression a || b evaluates to the value a if a is
+ true without evaluting b at all
+ * JavaScript order of precedence for operators
+ * Arithmetic operators: + - * / %
+ * Boolean operators (including the ternary operator)
+ * Bitwise operators
+ * Comparison operators
+ * Common mathematic functions: abs ceil erf exp floor
+ lgamma ln log2 log10 max min sqrt pow
+ * Trigonometric library functions: acosh acos asinh asin
+ atanh atan cosh cos sinh sin tanh tan
+ * Random generation of a number between 0 and 1: rand
+ * Current time in epoch: time
+ * The min max functions that operate on a variable argument list
+
+ Intermediate results are calculated as double precision
+ floating point values. The final return value of a
+ RankExpression is automatically converted from floating
+ point to a 32-bit unsigned integer by rounding to the
+ nearest integer, with a natural floor of 0 and a ceiling
+ of max(uint32_t), 4294967295. Mathematical errors such as
+ dividing by 0 will fail during evaluation and return a
+ value of 0.
+
+ The source data for a RankExpression can be the name of an
+ IndexField of type uint, another RankExpression or the
+ reserved name text_relevance. The text_relevance source is
+ defined to return an integer from 0 to 1000 (inclusive) to
+ indicate how relevant a document is to the search request,
+ taking into account repetition of search terms in the
+ document and proximity of search terms to each other in
+ each matching IndexField in the document.
+
+ For more information about using rank expressions to
+ customize ranking, see the Amazon CloudSearch Developer
+ Guide.
+
+ :raises: BaseException, InternalException, LimitExceededException,
+ InvalidTypeException, ResourceNotFoundException
+ """
+ doc_path = ('define_rank_expression_response',
+ 'define_rank_expression_result',
+ 'rank_expression')
+ params = {'DomainName': domain_name,
+ 'RankExpression.RankExpression': rank_expression,
+ 'RankExpression.RankName': rank_name}
+ return self.get_response(doc_path, 'DefineRankExpression',
+ params, verb='POST')
+
+ def delete_domain(self, domain_name):
+ """
+ Delete a search domain.
+
+ :type domain_name: string
+ :param domain_name: A string that represents the name of a
+ domain. Domain names must be unique across the domains
+ owned by an account within an AWS region. Domain names
+ must start with a letter or number and can contain the
+ following characters: a-z (lowercase), 0-9, and -
+ (hyphen). Uppercase letters and underscores are not
+ allowed.
+
+ :raises: BaseException, InternalException
+ """
+ doc_path = ('delete_domain_response',
+ 'delete_domain_result',
+ 'domain_status')
+ params = {'DomainName': domain_name}
+ return self.get_response(doc_path, 'DeleteDomain',
+ params, verb='POST')
+
+ def delete_index_field(self, domain_name, field_name):
+ """
+ Deletes an existing ``IndexField`` from the search domain.
+
+ :type domain_name: string
+ :param domain_name: A string that represents the name of a
+ domain. Domain names must be unique across the domains
+ owned by an account within an AWS region. Domain names
+ must start with a letter or number and can contain the
+ following characters: a-z (lowercase), 0-9, and -
+ (hyphen). Uppercase letters and underscores are not
+ allowed.
+
+ :type field_name: string
+ :param field_name: A string that represents the name of
+ an index field. Field names must begin with a letter and
+ can contain the following characters: a-z (lowercase),
+ 0-9, and _ (underscore). Uppercase letters and hyphens are
+ not allowed. The names "body", "docid", and
+ "text_relevance" are reserved and cannot be specified as
+ field or rank expression names.
+
+ :raises: BaseException, InternalException, ResourceNotFoundException
+ """
+ doc_path = ('delete_index_field_response',
+ 'delete_index_field_result',
+ 'index_field')
+ params = {'DomainName': domain_name,
+ 'IndexFieldName': field_name}
+ return self.get_response(doc_path, 'DeleteIndexField',
+ params, verb='POST')
+
+ def delete_rank_expression(self, domain_name, rank_name):
+ """
+ Deletes an existing ``RankExpression`` from the search domain.
+
+ :type domain_name: string
+ :param domain_name: A string that represents the name of a
+ domain. Domain names must be unique across the domains
+ owned by an account within an AWS region. Domain names
+ must start with a letter or number and can contain the
+ following characters: a-z (lowercase), 0-9, and -
+ (hyphen). Uppercase letters and underscores are not
+ allowed.
+
+ :type rank_name: string
+ :param rank_name: Name of the ``RankExpression`` to delete.
+
+ :raises: BaseException, InternalException, ResourceNotFoundException
+ """
+ doc_path = ('delete_rank_expression_response',
+ 'delete_rank_expression_result',
+ 'rank_expression')
+ params = {'DomainName': domain_name, 'RankName': rank_name}
+ return self.get_response(doc_path, 'DeleteRankExpression',
+ params, verb='POST')
+
+ def describe_default_search_field(self, domain_name):
+ """
+ Describes options defining the default search field used by
+ indexing for the search domain.
+
+ :type domain_name: string
+ :param domain_name: A string that represents the name of a
+ domain. Domain names must be unique across the domains
+ owned by an account within an AWS region. Domain names
+ must start with a letter or number and can contain the
+ following characters: a-z (lowercase), 0-9, and -
+ (hyphen). Uppercase letters and underscores are not
+ allowed.
+
+ :raises: BaseException, InternalException, ResourceNotFoundException
+ """
+ doc_path = ('describe_default_search_field_response',
+ 'describe_default_search_field_result',
+ 'default_search_field')
+ params = {'DomainName': domain_name}
+ return self.get_response(doc_path, 'DescribeDefaultSearchField',
+ params, verb='POST')
+
+ def describe_domains(self, domain_names=None):
+ """
+ Describes the domains (optionally limited to one or more
+ domains by name) owned by this account.
+
+ :type domain_names: list
+ :param domain_names: Limits the response to the specified domains.
+
+ :raises: BaseException, InternalException
+ """
+ doc_path = ('describe_domains_response',
+ 'describe_domains_result',
+ 'domain_status_list')
+ params = {}
+ if domain_names:
+ for i, domain_name in enumerate(domain_names, 1):
+ params['DomainNames.member.%d' % i] = domain_name
+ return self.get_response(doc_path, 'DescribeDomains',
+ params, verb='POST',
+ list_marker='DomainStatusList')
+
+ def describe_index_fields(self, domain_name, field_names=None):
+ """
+ Describes index fields in the search domain, optionally
+ limited to a single ``IndexField``.
+
+ :type domain_name: string
+ :param domain_name: A string that represents the name of a
+ domain. Domain names must be unique across the domains
+ owned by an account within an AWS region. Domain names
+ must start with a letter or number and can contain the
+ following characters: a-z (lowercase), 0-9, and -
+ (hyphen). Uppercase letters and underscores are not
+ allowed.
+
+ :type field_names: list
+ :param field_names: Limits the response to the specified fields.
+
+ :raises: BaseException, InternalException, ResourceNotFoundException
+ """
+ doc_path = ('describe_index_fields_response',
+ 'describe_index_fields_result',
+ 'index_fields')
+ params = {'DomainName': domain_name}
+ if field_names:
+ for i, field_name in enumerate(field_names, 1):
+ params['FieldNames.member.%d' % i] = field_name
+ return self.get_response(doc_path, 'DescribeIndexFields',
+ params, verb='POST',
+ list_marker='IndexFields')
+
+ def describe_rank_expressions(self, domain_name, rank_names=None):
+ """
+ Describes RankExpressions in the search domain, optionally
+ limited to a single expression.
+
+ :type domain_name: string
+ :param domain_name: A string that represents the name of a
+ domain. Domain names must be unique across the domains
+ owned by an account within an AWS region. Domain names
+ must start with a letter or number and can contain the
+ following characters: a-z (lowercase), 0-9, and -
+ (hyphen). Uppercase letters and underscores are not
+ allowed.
+
+ :type rank_names: list
+ :param rank_names: Limit response to the specified rank names.
+
+ :raises: BaseException, InternalException, ResourceNotFoundException
+ """
+ doc_path = ('describe_rank_expressions_response',
+ 'describe_rank_expressions_result',
+ 'rank_expressions')
+ params = {'DomainName': domain_name}
+ if rank_names:
+ for i, rank_name in enumerate(rank_names, 1):
+ params['RankNames.member.%d' % i] = rank_name
+ return self.get_response(doc_path, 'DescribeRankExpressions',
+ params, verb='POST',
+ list_marker='RankExpressions')
+
+ def describe_service_access_policies(self, domain_name):
+ """
+ Describes the resource-based policies controlling access to
+ the services in this search domain.
+
+ :type domain_name: string
+ :param domain_name: A string that represents the name of a
+ domain. Domain names must be unique across the domains
+ owned by an account within an AWS region. Domain names
+ must start with a letter or number and can contain the
+ following characters: a-z (lowercase), 0-9, and -
+ (hyphen). Uppercase letters and underscores are not
+ allowed.
+
+ :raises: BaseException, InternalException, ResourceNotFoundException
+ """
+ doc_path = ('describe_service_access_policies_response',
+ 'describe_service_access_policies_result',
+ 'access_policies')
+ params = {'DomainName': domain_name}
+ return self.get_response(doc_path, 'DescribeServiceAccessPolicies',
+ params, verb='POST')
+
+ def describe_stemming_options(self, domain_name):
+ """
+ Describes stemming options used by indexing for the search domain.
+
+ :type domain_name: string
+ :param domain_name: A string that represents the name of a
+ domain. Domain names must be unique across the domains
+ owned by an account within an AWS region. Domain names
+ must start with a letter or number and can contain the
+ following characters: a-z (lowercase), 0-9, and -
+ (hyphen). Uppercase letters and underscores are not
+ allowed.
+
+ :raises: BaseException, InternalException, ResourceNotFoundException
+ """
+ doc_path = ('describe_stemming_options_response',
+ 'describe_stemming_options_result',
+ 'stems')
+ params = {'DomainName': domain_name}
+ return self.get_response(doc_path, 'DescribeStemmingOptions',
+ params, verb='POST')
+
+ def describe_stopword_options(self, domain_name):
+ """
+ Describes stopword options used by indexing for the search domain.
+
+ :type domain_name: string
+ :param domain_name: A string that represents the name of a
+ domain. Domain names must be unique across the domains
+ owned by an account within an AWS region. Domain names
+ must start with a letter or number and can contain the
+ following characters: a-z (lowercase), 0-9, and -
+ (hyphen). Uppercase letters and underscores are not
+ allowed.
+
+ :raises: BaseException, InternalException, ResourceNotFoundException
+ """
+ doc_path = ('describe_stopword_options_response',
+ 'describe_stopword_options_result',
+ 'stopwords')
+ params = {'DomainName': domain_name}
+ return self.get_response(doc_path, 'DescribeStopwordOptions',
+ params, verb='POST')
+
+ def describe_synonym_options(self, domain_name):
+ """
+ Describes synonym options used by indexing for the search domain.
+
+ :type domain_name: string
+ :param domain_name: A string that represents the name of a
+ domain. Domain names must be unique across the domains
+ owned by an account within an AWS region. Domain names
+ must start with a letter or number and can contain the
+ following characters: a-z (lowercase), 0-9, and -
+ (hyphen). Uppercase letters and underscores are not
+ allowed.
+
+ :raises: BaseException, InternalException, ResourceNotFoundException
+ """
+ doc_path = ('describe_synonym_options_response',
+ 'describe_synonym_options_result',
+ 'synonyms')
+ params = {'DomainName': domain_name}
+ return self.get_response(doc_path, 'DescribeSynonymOptions',
+ params, verb='POST')
+
+ def index_documents(self, domain_name):
+ """
+ Tells the search domain to start scanning its documents using
+ the latest text processing options and ``IndexFields``. This
+ operation must be invoked to make visible in searches any
+ options whose <a>OptionStatus</a> has ``OptionState`` of
+ ``RequiresIndexDocuments``.
+
+ :type domain_name: string
+ :param domain_name: A string that represents the name of a
+ domain. Domain names must be unique across the domains
+ owned by an account within an AWS region. Domain names
+ must start with a letter or number and can contain the
+ following characters: a-z (lowercase), 0-9, and -
+ (hyphen). Uppercase letters and underscores are not
+ allowed.
+
+ :raises: BaseException, InternalException, ResourceNotFoundException
+ """
+ doc_path = ('index_documents_response',
+ 'index_documents_result',
+ 'field_names')
+ params = {'DomainName': domain_name}
+ return self.get_response(doc_path, 'IndexDocuments', params,
+ verb='POST', list_marker='FieldNames')
+
+ def update_default_search_field(self, domain_name, default_search_field):
+ """
+ Updates options defining the default search field used by
+ indexing for the search domain.
+
+ :type domain_name: string
+ :param domain_name: A string that represents the name of a
+ domain. Domain names must be unique across the domains
+ owned by an account within an AWS region. Domain names
+ must start with a letter or number and can contain the
+ following characters: a-z (lowercase), 0-9, and -
+ (hyphen). Uppercase letters and underscores are not
+ allowed.
+
+ :type default_search_field: string
+ :param default_search_field: The IndexField to use for search
+ requests issued with the q parameter. The default is an
+ empty string, which automatically searches all text
+ fields.
+
+ :raises: BaseException, InternalException, InvalidTypeException,
+ ResourceNotFoundException
+ """
+ doc_path = ('update_default_search_field_response',
+ 'update_default_search_field_result',
+ 'default_search_field')
+ params = {'DomainName': domain_name,
+ 'DefaultSearchField': default_search_field}
+ return self.get_response(doc_path, 'UpdateDefaultSearchField',
+ params, verb='POST')
+
+ def update_service_access_policies(self, domain_name, access_policies):
+ """
+ Updates the policies controlling access to the services in
+ this search domain.
+
+ :type domain_name: string
+ :param domain_name: A string that represents the name of a
+ domain. Domain names must be unique across the domains
+ owned by an account within an AWS region. Domain names
+ must start with a letter or number and can contain the
+ following characters: a-z (lowercase), 0-9, and -
+ (hyphen). Uppercase letters and underscores are not
+ allowed.
+
+ :type access_policies: string
+ :param access_policies: An IAM access policy as described in
+ The Access Policy Language in Using AWS Identity and
+ Access Management. The maximum size of an access policy
+ document is 100KB.
+
+ :raises: BaseException, InternalException, LimitExceededException,
+ ResourceNotFoundException, InvalidTypeException
+ """
+ doc_path = ('update_service_access_policies_response',
+ 'update_service_access_policies_result',
+ 'access_policies')
+ params = {'AccessPolicies': access_policies,
+ 'DomainName': domain_name}
+ return self.get_response(doc_path, 'UpdateServiceAccessPolicies',
+ params, verb='POST')
+
+ def update_stemming_options(self, domain_name, stems):
+ """
+ Updates stemming options used by indexing for the search domain.
+
+ :type domain_name: string
+ :param domain_name: A string that represents the name of a
+ domain. Domain names must be unique across the domains
+ owned by an account within an AWS region. Domain names
+ must start with a letter or number and can contain the
+ following characters: a-z (lowercase), 0-9, and -
+ (hyphen). Uppercase letters and underscores are not
+ allowed.
+
+ :type stems: string
+ :param stems: Maps terms to their stems. The JSON object
+ has a single key called "stems" whose value is a
+ dict mapping terms to their stems. The maximum size
+ of a stemming document is 500KB.
+ Example: {"stems":{"people": "person", "walking":"walk"}}
+
+ :raises: BaseException, InternalException, InvalidTypeException,
+ LimitExceededException, ResourceNotFoundException
+ """
+ doc_path = ('update_stemming_options_response',
+ 'update_stemming_options_result',
+ 'stems')
+ params = {'DomainName': domain_name,
+ 'Stems': stems}
+ return self.get_response(doc_path, 'UpdateStemmingOptions',
+ params, verb='POST')
+
+ def update_stopword_options(self, domain_name, stopwords):
+ """
+ Updates stopword options used by indexing for the search domain.
+
+ :type domain_name: string
+ :param domain_name: A string that represents the name of a
+ domain. Domain names must be unique across the domains
+ owned by an account within an AWS region. Domain names
+ must start with a letter or number and can contain the
+ following characters: a-z (lowercase), 0-9, and -
+ (hyphen). Uppercase letters and underscores are not
+ allowed.
+
+ :type stopwords: string
+ :param stopwords: Lists stopwords in a JSON object. The object has a
+ single key called "stopwords" whose value is an array of strings.
+ The maximum size of a stopwords document is 10KB. Example:
+ {"stopwords": ["a", "an", "the", "of"]}
+
+ :raises: BaseException, InternalException, InvalidTypeException,
+ LimitExceededException, ResourceNotFoundException
+ """
+ doc_path = ('update_stopword_options_response',
+ 'update_stopword_options_result',
+ 'stopwords')
+ params = {'DomainName': domain_name,
+ 'Stopwords': stopwords}
+ return self.get_response(doc_path, 'UpdateStopwordOptions',
+ params, verb='POST')
+
+ def update_synonym_options(self, domain_name, synonyms):
+ """
+ Updates synonym options used by indexing for the search domain.
+
+ :type domain_name: string
+ :param domain_name: A string that represents the name of a
+ domain. Domain names must be unique across the domains
+ owned by an account within an AWS region. Domain names
+ must start with a letter or number and can contain the
+ following characters: a-z (lowercase), 0-9, and -
+ (hyphen). Uppercase letters and underscores are not
+ allowed.
+
+ :type synonyms: string
+ :param synonyms: Maps terms to their synonyms. The JSON object
+ has a single key "synonyms" whose value is a dict mapping terms
+ to their synonyms. Each synonym is a simple string or an
+ array of strings. The maximum size of a stopwords document
+ is 100KB. Example:
+ {"synonyms": {"cat": ["feline", "kitten"], "puppy": "dog"}}
+
+ :raises: BaseException, InternalException, InvalidTypeException,
+ LimitExceededException, ResourceNotFoundException
+ """
+ doc_path = ('update_synonym_options_response',
+ 'update_synonym_options_result',
+ 'synonyms')
+ params = {'DomainName': domain_name,
+ 'Synonyms': synonyms}
+ return self.get_response(doc_path, 'UpdateSynonymOptions',
+ params, verb='POST')
diff --git a/boto/cloudsearch/layer2.py b/boto/cloudsearch/layer2.py
new file mode 100644
index 0000000..af5c4d1
--- /dev/null
+++ b/boto/cloudsearch/layer2.py
@@ -0,0 +1,67 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+from .layer1 import Layer1
+from .domain import Domain
+
+
+class Layer2(object):
+
+ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
+ is_secure=True, port=None, proxy=None, proxy_port=None,
+ host=None, debug=0, session_token=None, region=None,
+ validate_certs=True):
+ self.layer1 = Layer1(aws_access_key_id, aws_secret_access_key,
+ is_secure, port, proxy, proxy_port,
+ host, debug, session_token, region,
+ validate_certs=validate_certs)
+
+ def list_domains(self, domain_names=None):
+ """
+ Return a list of :class:`boto.cloudsearch.domain.Domain`
+ objects for each domain defined in the current account.
+ """
+ domain_data = self.layer1.describe_domains(domain_names)
+ return [Domain(self.layer1, data) for data in domain_data]
+
+ def create_domain(self, domain_name):
+ """
+ Create a new CloudSearch domain and return the corresponding
+ :class:`boto.cloudsearch.domain.Domain` object.
+ """
+ data = self.layer1.create_domain(domain_name)
+ return Domain(self.layer1, data)
+
+ def lookup(self, domain_name):
+ """
+ Lookup a single domain
+ :param domain_name: The name of the domain to look up
+ :type domain_name: str
+
+ :return: Domain object, or None if the domain isn't found
+ :rtype: :class:`boto.cloudsearch.domain.Domain`
+ """
+ domains = self.list_domains(domain_names=[domain_name])
+ if len(domains) > 0:
+ return domains[0]
diff --git a/boto/cloudsearch/optionstatus.py b/boto/cloudsearch/optionstatus.py
new file mode 100644
index 0000000..869d82f
--- /dev/null
+++ b/boto/cloudsearch/optionstatus.py
@@ -0,0 +1,249 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+try:
+ import simplejson as json
+except ImportError:
+ import json
+
+class OptionStatus(dict):
+ """
+ Presents a combination of status field (defined below) which are
+ accessed as attributes and option values which are stored in the
+ native Python dictionary. In this class, the option values are
+ merged from a JSON object that is stored as the Option part of
+ the object.
+
+ :ivar domain_name: The name of the domain this option is associated with.
+ :ivar create_date: A timestamp for when this option was created.
+ :ivar state: The state of processing a change to an option.
+ Possible values:
+
+ * RequiresIndexDocuments: the option's latest value will not
+ be visible in searches until IndexDocuments has been called
+ and indexing is complete.
+ * Processing: the option's latest value is not yet visible in
+ all searches but is in the process of being activated.
+ * Active: the option's latest value is completely visible.
+
+ :ivar update_date: A timestamp for when this option was updated.
+ :ivar update_version: A unique integer that indicates when this
+ option was last updated.
+ """
+
+ def __init__(self, domain, data=None, refresh_fn=None, save_fn=None):
+ self.domain = domain
+ self.refresh_fn = refresh_fn
+ self.save_fn = save_fn
+ self.refresh(data)
+
+ def _update_status(self, status):
+ self.creation_date = status['creation_date']
+ self.status = status['state']
+ self.update_date = status['update_date']
+ self.update_version = int(status['update_version'])
+
+ def _update_options(self, options):
+ if options:
+ self.update(json.loads(options))
+
+ def refresh(self, data=None):
+ """
+ Refresh the local state of the object. You can either pass
+ new state data in as the parameter ``data`` or, if that parameter
+ is omitted, the state data will be retrieved from CloudSearch.
+ """
+ if not data:
+ if self.refresh_fn:
+ data = self.refresh_fn(self.domain.name)
+ if data:
+ self._update_status(data['status'])
+ self._update_options(data['options'])
+
+ def to_json(self):
+ """
+ Return the JSON representation of the options as a string.
+ """
+ return json.dumps(self)
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'CreationDate':
+ self.created = value
+ elif name == 'State':
+ self.state = value
+ elif name == 'UpdateDate':
+ self.updated = value
+ elif name == 'UpdateVersion':
+ self.update_version = int(value)
+ elif name == 'Options':
+ self.update_from_json_doc(value)
+ else:
+ setattr(self, name, value)
+
+ def save(self):
+ """
+ Write the current state of the local object back to the
+ CloudSearch service.
+ """
+ if self.save_fn:
+ data = self.save_fn(self.domain.name, self.to_json())
+ self.refresh(data)
+
+ def wait_for_state(self, state):
+ """
+ Performs polling of CloudSearch to wait for the ``state``
+ of this object to change to the provided state.
+ """
+ while self.state != state:
+ time.sleep(5)
+ self.refresh()
+
+
+class IndexFieldStatus(OptionStatus):
+
+ def _update_options(self, options):
+ self.update(options)
+
+ def save(self):
+ pass
+
+
+class RankExpressionStatus(IndexFieldStatus):
+
+ pass
+
+class ServicePoliciesStatus(OptionStatus):
+
+ def new_statement(self, arn, ip):
+ """
+ Returns a new policy statement that will allow
+ access to the service described by ``arn`` by the
+ ip specified in ``ip``.
+
+ :type arn: string
+ :param arn: The Amazon Resource Notation identifier for the
+ service you wish to provide access to. This would be
+ either the search service or the document service.
+
+ :type ip: string
+ :param ip: An IP address or CIDR block you wish to grant access
+ to.
+ """
+ return {
+ "Effect":"Allow",
+ "Action":"*", # Docs say use GET, but denies unless *
+ "Resource": arn,
+ "Condition": {
+ "IpAddress": {
+ "aws:SourceIp": [ip]
+ }
+ }
+ }
+
+ def _allow_ip(self, arn, ip):
+ if 'Statement' not in self:
+ s = self.new_statement(arn, ip)
+ self['Statement'] = [s]
+ self.save()
+ else:
+ add_statement = True
+ for statement in self['Statement']:
+ if statement['Resource'] == arn:
+ for condition_name in statement['Condition']:
+ if condition_name == 'IpAddress':
+ add_statement = False
+ condition = statement['Condition'][condition_name]
+ if ip not in condition['aws:SourceIp']:
+ condition['aws:SourceIp'].append(ip)
+
+ if add_statement:
+ s = self.new_statement(arn, ip)
+ self['Statement'].append(s)
+ self.save()
+
+ def allow_search_ip(self, ip):
+ """
+ Add the provided ip address or CIDR block to the list of
+ allowable address for the search service.
+
+ :type ip: string
+ :param ip: An IP address or CIDR block you wish to grant access
+ to.
+ """
+ arn = self.domain.search_service_arn
+ self._allow_ip(arn, ip)
+
+ def allow_doc_ip(self, ip):
+ """
+ Add the provided ip address or CIDR block to the list of
+ allowable address for the document service.
+
+ :type ip: string
+ :param ip: An IP address or CIDR block you wish to grant access
+ to.
+ """
+ arn = self.domain.doc_service_arn
+ self._allow_ip(arn, ip)
+
+ def _disallow_ip(self, arn, ip):
+ if 'Statement' not in self:
+ return
+ need_update = False
+ for statement in self['Statement']:
+ if statement['Resource'] == arn:
+ for condition_name in statement['Condition']:
+ if condition_name == 'IpAddress':
+ condition = statement['Condition'][condition_name]
+ if ip in condition['aws:SourceIp']:
+ condition['aws:SourceIp'].remove(ip)
+ need_update = True
+ if need_update:
+ self.save()
+
+ def disallow_search_ip(self, ip):
+ """
+ Remove the provided ip address or CIDR block from the list of
+ allowable address for the search service.
+
+ :type ip: string
+ :param ip: An IP address or CIDR block you wish to grant access
+ to.
+ """
+ arn = self.domain.search_service_arn
+ self._disallow_ip(arn, ip)
+
+ def disallow_doc_ip(self, ip):
+ """
+ Remove the provided ip address or CIDR block from the list of
+ allowable address for the document service.
+
+ :type ip: string
+ :param ip: An IP address or CIDR block you wish to grant access
+ to.
+ """
+ arn = self.domain.doc_service_arn
+ self._disallow_ip(arn, ip)
diff --git a/boto/cloudsearch/search.py b/boto/cloudsearch/search.py
new file mode 100644
index 0000000..f1b16e4
--- /dev/null
+++ b/boto/cloudsearch/search.py
@@ -0,0 +1,298 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from math import ceil
+import time
+import json
+import boto
+import requests
+
+
+class SearchServiceException(Exception):
+ pass
+
+
+class CommitMismatchError(Exception):
+ pass
+
+
+class SearchResults(object):
+
+ def __init__(self, **attrs):
+ self.rid = attrs['info']['rid']
+ # self.doc_coverage_pct = attrs['info']['doc-coverage-pct']
+ self.cpu_time_ms = attrs['info']['cpu-time-ms']
+ self.time_ms = attrs['info']['time-ms']
+ self.hits = attrs['hits']['found']
+ self.docs = attrs['hits']['hit']
+ self.start = attrs['hits']['start']
+ self.rank = attrs['rank']
+ self.match_expression = attrs['match-expr']
+ self.query = attrs['query']
+ self.search_service = attrs['search_service']
+
+ self.num_pages_needed = ceil(self.hits / self.query.real_size)
+
+ def __len__(self):
+ return len(self.docs)
+
+ def __iter__(self):
+ return iter(self.docs)
+
+ def next_page(self):
+ """Call Cloudsearch to get the next page of search results
+
+ :rtype: :class:`exfm.cloudsearch.SearchResults`
+ :return: A cloudsearch SearchResults object
+ """
+ if self.query.page <= self.num_pages_needed:
+ self.query.start += self.query.real_size
+ self.query.page += 1
+ return self.search_service(self.query)
+ else:
+ raise StopIteration
+
+
+class Query(object):
+
+ RESULTS_PER_PAGE = 500
+
+ def __init__(self, q=None, bq=None, rank=None,
+ return_fields=None, size=10,
+ start=0, facet=None, facet_constraints=None,
+ facet_sort=None, facet_top_n=None, t=None):
+
+ self.q = q
+ self.bq = bq
+ self.rank = rank or []
+ self.return_fields = return_fields or []
+ self.start = start
+ self.facet = facet or []
+ self.facet_constraints = facet_constraints or {}
+ self.facet_sort = facet_sort or {}
+ self.facet_top_n = facet_top_n or {}
+ self.t = t or {}
+ self.page = 0
+ self.update_size(size)
+
+ def update_size(self, new_size):
+ self.size = new_size
+ self.real_size = Query.RESULTS_PER_PAGE if (self.size >
+ Query.RESULTS_PER_PAGE or self.size == 0) else self.size
+
+ def to_params(self):
+ """Transform search parameters from instance properties to a dictionary
+
+ :rtype: dict
+ :return: search parameters
+ """
+ params = {'start': self.start, 'size': self.real_size}
+
+ if self.q:
+ params['q'] = self.q
+
+ if self.bq:
+ params['bq'] = self.bq
+
+ if self.rank:
+ params['rank'] = ','.join(self.rank)
+
+ if self.return_fields:
+ params['return-fields'] = ','.join(self.return_fields)
+
+ if self.facet:
+ params['facet'] = ','.join(self.facet)
+
+ if self.facet_constraints:
+ for k, v in self.facet_constraints.iteritems():
+ params['facet-%s-constraints' % k] = v
+
+ if self.facet_sort:
+ for k, v in self.facet_sort.iteritems():
+ params['facet-%s-sort' % k] = v
+
+ if self.facet_top_n:
+ for k, v in self.facet_top_n.iteritems():
+ params['facet-%s-top-n' % k] = v
+
+ if self.t:
+ for k, v in self.t.iteritems():
+ params['t-%s' % k] = v
+ return params
+
+
+class SearchConnection(object):
+
+ def __init__(self, domain=None, endpoint=None):
+ self.domain = domain
+ self.endpoint = endpoint
+ if not endpoint:
+ self.endpoint = domain.search_service_endpoint
+
+ def build_query(self, q=None, bq=None, rank=None, return_fields=None,
+ size=10, start=0, facet=None, facet_constraints=None,
+ facet_sort=None, facet_top_n=None, t=None):
+ return Query(q=q, bq=bq, rank=rank, return_fields=return_fields,
+ size=size, start=start, facet=facet,
+ facet_constraints=facet_constraints,
+ facet_sort=facet_sort, facet_top_n=facet_top_n, t=t)
+
+ def search(self, q=None, bq=None, rank=None, return_fields=None,
+ size=10, start=0, facet=None, facet_constraints=None,
+ facet_sort=None, facet_top_n=None, t=None):
+ """
+ Query Cloudsearch
+
+ :type q:
+ :param q:
+
+ :type bq:
+ :param bq:
+
+ :type rank:
+ :param rank:
+
+ :type return_fields:
+ :param return_fields:
+
+ :type size:
+ :param size:
+
+ :type start:
+ :param start:
+
+ :type facet:
+ :param facet:
+
+ :type facet_constraints:
+ :param facet_constraints:
+
+ :type facet_sort:
+ :param facet_sort:
+
+ :type facet_top_n:
+ :param facet_top_n:
+
+ :type t:
+ :param t:
+
+ :rtype: :class:`exfm.cloudsearch.SearchResults`
+ :return: A cloudsearch SearchResults object
+ """
+
+ query = self.build_query(q=q, bq=bq, rank=rank,
+ return_fields=return_fields,
+ size=size, start=start, facet=facet,
+ facet_constraints=facet_constraints,
+ facet_sort=facet_sort,
+ facet_top_n=facet_top_n, t=t)
+ return self(query)
+
+ def __call__(self, query):
+ """Make a call to CloudSearch
+
+ :type query: :class:`exfm.cloudsearch.Query`
+ :param query: A fully specified Query instance
+
+ :rtype: :class:`exfm.cloudsearch.SearchResults`
+ :return: A cloudsearch SearchResults object
+ """
+ url = "http://%s/2011-02-01/search" % (self.endpoint)
+ params = query.to_params()
+
+ r = requests.get(url, params=params)
+ data = json.loads(r.content)
+ data['query'] = query
+ data['search_service'] = self
+
+ if 'messages' in data and 'error' in data:
+ for m in data['messages']:
+ if m['severity'] == 'fatal':
+ raise SearchServiceException("Error processing search %s "
+ "=> %s" % (params, m['message']), query)
+ elif 'error' in data:
+ raise SearchServiceException("Unknown error processing search %s"
+ % (params), query)
+
+ return SearchResults(**data)
+
+ def get_all_paged(self, query, per_page):
+ """Get a generator to iterate over all pages of search results
+
+ :type query: :class:`exfm.cloudsearch.Query`
+ :param query: A fully specified Query instance
+
+ :type per_page: int
+ :param per_page: Number of docs in each SearchResults object.
+
+ :rtype: generator
+ :return: Generator containing :class:`exfm.cloudsearch.SearchResults`
+ """
+ query.update_size(per_page)
+ page = 0
+ num_pages_needed = 0
+ while page <= num_pages_needed:
+ results = self(query)
+ num_pages_needed = results.num_pages_needed
+ yield results
+ query.start += query.real_size
+ page += 1
+
+ def get_all_hits(self, query):
+ """Get a generator to iterate over all search results
+
+ Transparently handles the results paging from Cloudsearch
+ search results so even if you have many thousands of results
+ you can iterate over all results in a reasonably efficient
+ manner.
+
+ :type query: :class:`exfm.cloudsearch.Query`
+ :param query: A fully specified Query instance
+
+ :rtype: generator
+ :return: All docs matching query
+ """
+ page = 0
+ num_pages_needed = 0
+ while page <= num_pages_needed:
+ results = self(query)
+ num_pages_needed = results.num_pages_needed
+ for doc in results:
+ yield doc
+ query.start += query.real_size
+ page += 1
+
+ def get_num_hits(self, query):
+ """Return the total number of hits for query
+
+ :type query: :class:`exfm.cloudsearch.Query`
+ :param query: A fully specified Query instance
+
+ :rtype: int
+ :return: Total number of hits for query
+ """
+ query.update_size(1)
+ return self(query).hits
+
+
+
diff --git a/boto/cloudsearch/sourceattribute.py b/boto/cloudsearch/sourceattribute.py
new file mode 100644
index 0000000..c343507
--- /dev/null
+++ b/boto/cloudsearch/sourceattribute.py
@@ -0,0 +1,75 @@
+# Copyright (c) 202 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+class SourceAttribute(object):
+ """
+ Provide information about attributes for an index field.
+ A maximum of 20 source attributes can be configured for
+ each index field.
+
+ :ivar default: Optional default value if the source attribute
+ is not specified in a document.
+
+ :ivar name: The name of the document source field to add
+ to this ``IndexField``.
+
+ :ivar data_function: Identifies the transformation to apply
+ when copying data from a source attribute.
+
+ :ivar data_map: The value is a dict with the following keys:
+ * cases - A dict that translates source field values
+ to custom values.
+ * default - An optional default value to use if the
+ source attribute is not specified in a document.
+ * name - the name of the document source field to add
+ to this ``IndexField``
+ :ivar data_trim_title: Trims common title words from a source
+ document attribute when populating an ``IndexField``.
+ This can be used to create an ``IndexField`` you can
+ use for sorting. The value is a dict with the following
+ fields:
+ * default - An optional default value.
+ * language - an IETF RFC 4646 language code.
+ * separator - The separator that follows the text to trim.
+ * name - The name of the document source field to add.
+ """
+
+ ValidDataFunctions = ('Copy', 'TrimTitle', 'Map')
+
+ def __init__(self):
+ self.data_copy = {}
+ self._data_function = self.ValidDataFunctions[0]
+ self.data_map = {}
+ self.data_trim_title = {}
+
+ @property
+ def data_function(self):
+ return self._data_function
+
+ @data_function.setter
+ def data_function(self, value):
+ if value not in self.ValidDataFunctions:
+ valid = '|'.join(self.ValidDataFunctions)
+ raise ValueError('data_function must be one of: %s' % valid)
+ self._data_function = value
+
diff --git a/boto/connection.py b/boto/connection.py
index 3c9f237..080ff5e 100644
--- a/boto/connection.py
+++ b/boto/connection.py
@@ -1,4 +1,5 @@
-# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# Copyright (c) 2010 Google
# Copyright (c) 2008 rPath, Inc.
# Copyright (c) 2009 The Echo Nest Corporation
@@ -53,8 +54,10 @@
import socket
import sys
import time
-import urllib, urlparse
+import urllib
+import urlparse
import xml.sax
+import copy
import auth
import auth_handler
@@ -64,7 +67,8 @@
import boto.cacerts
from boto import config, UserAgent
-from boto.exception import AWSConnectionError, BotoClientError, BotoServerError
+from boto.exception import AWSConnectionError, BotoClientError
+from boto.exception import BotoServerError
from boto.provider import Provider
from boto.resultset import ResultSet
@@ -86,10 +90,11 @@
ON_APP_ENGINE = all(key in os.environ for key in (
'USER_IS_ADMIN', 'CURRENT_VERSION_ID', 'APPLICATION_ID'))
-PORTS_BY_SECURITY = { True: 443, False: 80 }
+PORTS_BY_SECURITY = {True: 443,
+ False: 80}
-DEFAULT_CA_CERTS_FILE = os.path.join(
- os.path.dirname(os.path.abspath(boto.cacerts.__file__ )), "cacerts.txt")
+DEFAULT_CA_CERTS_FILE = os.path.join(os.path.dirname(os.path.abspath(boto.cacerts.__file__ )), "cacerts.txt")
+
class HostConnectionPool(object):
@@ -127,7 +132,7 @@
ready to be returned by get().
"""
return len(self.queue)
-
+
def put(self, conn):
"""
Adds a connection to the pool, along with the time it was
@@ -169,13 +174,13 @@
state we care about isn't available in any public methods.
"""
if ON_APP_ENGINE:
- # Google App Engine implementation of HTTPConnection doesn't contain
+ # Google AppEngine implementation of HTTPConnection doesn't contain
# _HTTPConnection__response attribute. Moreover, it's not possible
# to determine if given connection is ready. Reusing connections
# simply doesn't make sense with App Engine urlfetch service.
return False
else:
- response = conn._HTTPConnection__response
+ response = getattr(conn, '_HTTPConnection__response', None)
return (response is None) or response.isclosed()
def clean(self):
@@ -196,6 +201,7 @@
now = time.time()
return return_time + ConnectionPool.STALE_DURATION < now
+
class ConnectionPool(object):
"""
@@ -209,7 +215,7 @@
#
# The amout of time between calls to clean.
#
-
+
CLEAN_INTERVAL = 5.0
#
@@ -232,6 +238,18 @@
# The last time the pool was cleaned.
self.last_clean_time = 0.0
self.mutex = threading.Lock()
+ ConnectionPool.STALE_DURATION = \
+ config.getfloat('Boto', 'connection_stale_duration',
+ ConnectionPool.STALE_DURATION)
+
+ def __getstate__(self):
+ pickled_dict = copy.copy(self.__dict__)
+ pickled_dict['host_to_pool'] = {}
+ del pickled_dict['mutex']
+ return pickled_dict
+
+ def __setstate__(self, dct):
+ self.__init__()
def size(self):
"""
@@ -242,7 +260,9 @@
def get_http_connection(self, host, is_secure):
"""
Gets a connection from the pool for the named host. Returns
- None if there is no connection that can be reused.
+ None if there is no connection that can be reused. It's the caller's
+ responsibility to call close() on the connection when it's no longer
+ needed.
"""
self.clean()
with self.mutex:
@@ -268,7 +288,7 @@
get rid of empty pools. Pools clean themselves every time a
connection is fetched; this cleaning takes care of pools that
aren't being used any more, so nothing is being gotten from
- them.
+ them.
"""
with self.mutex:
now = time.time()
@@ -282,6 +302,7 @@
del self.host_to_pool[host]
self.last_clean_time = now
+
class HTTPRequest(object):
def __init__(self, method, protocol, host, port, path, auth_path,
@@ -299,26 +320,26 @@
:type port: int
:param port: port on which the request is being sent. Zero means unset,
- in which case default port will be chosen.
+ in which case default port will be chosen.
:type path: string
- :param path: URL path that is bein accessed.
+ :param path: URL path that is being accessed.
:type auth_path: string
:param path: The part of the URL path used when creating the
- authentication string.
+ authentication string.
:type params: dict
- :param params: HTTP url query parameters, with key as name of the param,
- and value as value of param.
+ :param params: HTTP url query parameters, with key as name of
+ the param, and value as value of param.
:type headers: dict
:param headers: HTTP headers, with key as name of the header and value
- as value of header.
+ as value of header.
:type body: string
:param body: Body of the HTTP request. If not present, will be None or
- empty string ('').
+ empty string ('').
"""
self.method = method
self.protocol = protocol
@@ -356,17 +377,50 @@
self.headers['User-Agent'] = UserAgent
# I'm not sure if this is still needed, now that add_auth is
# setting the content-length for POST requests.
- if not self.headers.has_key('Content-Length'):
- if not self.headers.has_key('Transfer-Encoding') or \
+ if 'Content-Length' not in self.headers:
+ if 'Transfer-Encoding' not in self.headers or \
self.headers['Transfer-Encoding'] != 'chunked':
self.headers['Content-Length'] = str(len(self.body))
+
+class HTTPResponse(httplib.HTTPResponse):
+
+ def __init__(self, *args, **kwargs):
+ httplib.HTTPResponse.__init__(self, *args, **kwargs)
+ self._cached_response = ''
+
+ def read(self, amt=None):
+ """Read the response.
+
+ This method does not have the same behavior as
+ httplib.HTTPResponse.read. Instead, if this method is called with
+ no ``amt`` arg, then the response body will be cached. Subsequent
+ calls to ``read()`` with no args **will return the cached response**.
+
+ """
+ if amt is None:
+ # The reason for doing this is that many places in boto call
+ # response.read() and except to get the response body that they
+ # can then process. To make sure this always works as they expect
+ # we're caching the response so that multiple calls to read()
+ # will return the full body. Note that this behavior only
+ # happens if the amt arg is not specified.
+ if not self._cached_response:
+ self._cached_response = httplib.HTTPResponse.read(self)
+ return self._cached_response
+ else:
+ return httplib.HTTPResponse.read(self, amt)
+
+
class AWSAuthConnection(object):
- def __init__(self, host, aws_access_key_id=None, aws_secret_access_key=None,
+ def __init__(self, host, aws_access_key_id=None,
+ aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, path='/',
- provider='aws', security_token=None):
+ provider='aws', security_token=None,
+ suppress_consec_slashes=True,
+ validate_certs=True):
"""
:type host: str
:param host: The host to make the connection to
@@ -383,9 +437,8 @@
:type https_connection_factory: list or tuple
:param https_connection_factory: A pair of an HTTP connection
- factory and the exceptions to catch.
- The factory should have a similar
- interface to L{httplib.HTTPSConnection}.
+ factory and the exceptions to catch. The factory should have
+ a similar interface to L{httplib.HTTPSConnection}.
:param str proxy: Address/hostname for a proxy server
@@ -400,16 +453,28 @@
:type port: int
:param port: The port to use to connect
+
+ :type suppress_consec_slashes: bool
+ :param suppress_consec_slashes: If provided, controls whether
+ consecutive slashes will be suppressed in key paths.
+
+ :type validate_certs: bool
+ :param validate_certs: Controls whether SSL certificates
+ will be validated or not. Defaults to True.
"""
+ self.suppress_consec_slashes = suppress_consec_slashes
self.num_retries = 6
# Override passed-in is_secure setting if value was defined in config.
if config.has_option('Boto', 'is_secure'):
is_secure = config.getboolean('Boto', 'is_secure')
self.is_secure = is_secure
- # Whether or not to validate server certificates. At some point in the
- # future, the default should be flipped to true.
+ # Whether or not to validate server certificates.
+ # The default is now to validate certificates. This can be
+ # overridden in the boto config file are by passing an
+ # explicit validate_certs parameter to the class constructor.
self.https_validate_certificates = config.getbool(
- 'Boto', 'https_validate_certificates', False)
+ 'Boto', 'https_validate_certificates',
+ validate_certs)
if self.https_validate_certificates and not HAVE_HTTPS_CONNECTION:
raise BotoClientError(
"SSL server certificate validation is enabled in boto "
@@ -426,7 +491,6 @@
# define subclasses of the above that are not retryable.
self.http_unretryable_exceptions = []
if HAVE_HTTPS_CONNECTION:
- self.http_unretryable_exceptions.append(ssl.SSLError)
self.http_unretryable_exceptions.append(
https_connection.InvalidCertificateException)
@@ -443,10 +507,10 @@
self.protocol = 'http'
self.host = host
self.path = path
- if debug:
- self.debug = debug
- else:
- self.debug = config.getint('Boto', 'debug', debug)
+ # if the value passed in for debug
+ if not isinstance(debug, (int, long)):
+ debug = 0
+ self.debug = config.getint('Boto', 'debug', debug)
if port:
self.port = port
else:
@@ -463,10 +527,15 @@
timeout = config.getint('Boto', 'http_socket_timeout')
self.http_connection_kwargs['timeout'] = timeout
- self.provider = Provider(provider,
- aws_access_key_id,
- aws_secret_access_key,
- security_token)
+ if isinstance(provider, Provider):
+ # Allow overriding Provider
+ self.provider = provider
+ else:
+ self._provider_type = provider
+ self.provider = Provider(self._provider_type,
+ aws_access_key_id,
+ aws_secret_access_key,
+ security_token)
# allow config file to override default host
if self.provider.host:
@@ -501,6 +570,12 @@
secret_key = aws_secret_access_key
def get_path(self, path='/'):
+ # The default behavior is to suppress consecutive slashes for reasons
+ # discussed at
+ # https://groups.google.com/forum/#!topic/boto-dev/-ft0XPUy0y8
+ # You can override that behavior with the suppress_consec_slashes param.
+ if not self.suppress_consec_slashes:
+ return self.path + re.sub('^/*', "", path)
pos = path.find('?')
if pos >= 0:
params = path[pos:]
@@ -546,7 +621,7 @@
self.proxy_port = proxy_port
self.proxy_user = proxy_user
self.proxy_pass = proxy_pass
- if os.environ.has_key('http_proxy') and not self.proxy:
+ if 'http_proxy' in os.environ and not self.proxy:
pattern = re.compile(
'(?:http://)?' \
'(?:(?P<user>\w+):(?P<pass>.*)@)?' \
@@ -605,7 +680,13 @@
else:
boto.log.debug('establishing HTTP connection: kwargs=%s' %
self.http_connection_kwargs)
- connection = httplib.HTTPConnection(host,
+ if self.https_connection_factory:
+ # even though the factory says https, this is too handy
+ # to not be able to allow overriding for http also.
+ connection = self.https_connection_factory(host,
+ **self.http_connection_kwargs)
+ else:
+ connection = httplib.HTTPConnection(host,
**self.http_connection_kwargs)
if self.debug > 1:
connection.set_debuglevel(self.debug)
@@ -614,6 +695,9 @@
# set a private variable which will enable that
if host.split(':')[0] == self.host and is_secure == self.is_secure:
self._connection = (host, is_secure)
+ # Set the response class of the http connection to use our custom
+ # class.
+ connection.response_class = HTTPResponse
return connection
def put_http_connection(self, host, is_secure, connection):
@@ -632,7 +716,12 @@
if self.proxy_user and self.proxy_pass:
for k, v in self.get_proxy_auth_header().items():
sock.sendall("%s: %s\r\n" % (k, v))
- sock.sendall("\r\n")
+ # See discussion about this config option at
+ # https://groups.google.com/forum/?fromgroups#!topic/boto-dev/teenFvOq2Cc
+ if config.getbool('Boto', 'send_crlf_after_proxy_auth_headers', False):
+ sock.sendall("\r\n")
+ else:
+ sock.sendall("\r\n")
resp = httplib.HTTPResponse(sock, strict=True, debuglevel=self.debug)
resp.begin()
@@ -641,7 +730,8 @@
# been generated by the socket library
raise socket.error(-71,
"Error talking to HTTP proxy %s:%s: %s (%s)" %
- (self.proxy, self.proxy_port, resp.status, resp.reason))
+ (self.proxy, self.proxy_port,
+ resp.status, resp.reason))
# We can safely close the response, it duped the original socket
resp.close()
@@ -683,7 +773,8 @@
auth = base64.encodestring(self.proxy_user + ':' + self.proxy_pass)
return {'Proxy-Authorization': 'Basic %s' % auth}
- def _mexe(self, request, sender=None, override_num_retries=None):
+ def _mexe(self, request, sender=None, override_num_retries=None,
+ retry_handler=None):
"""
mexe - Multi-execute inside a loop, retrying multiple times to handle
transient Internet errors by simply trying again.
@@ -691,6 +782,7 @@
This code was inspired by the S3Utils classes posted to the boto-users
Google group by Larry Bates. Thanks!
+
"""
boto.log.debug('Method: %s' % request.method)
boto.log.debug('Path: %s' % request.path)
@@ -711,35 +803,51 @@
next_sleep = random.random() * (2 ** i)
try:
# we now re-sign each request before it is retried
+ boto.log.debug('Token: %s' % self.provider.security_token)
request.authorize(connection=self)
if callable(sender):
response = sender(connection, request.method, request.path,
request.body, request.headers)
else:
- connection.request(request.method, request.path, request.body,
- request.headers)
+ connection.request(request.method, request.path,
+ request.body, request.headers)
response = connection.getresponse()
location = response.getheader('location')
# -- gross hack --
# httplib gets confused with chunked responses to HEAD requests
# so I have to fake it out
- if request.method == 'HEAD' and getattr(response, 'chunked', False):
+ if request.method == 'HEAD' and getattr(response,
+ 'chunked', False):
response.chunked = 0
+ if callable(retry_handler):
+ status = retry_handler(response, i, next_sleep)
+ if status:
+ msg, i, next_sleep = status
+ if msg:
+ boto.log.debug(msg)
+ time.sleep(next_sleep)
+ continue
if response.status == 500 or response.status == 503:
- boto.log.debug('received %d response, retrying in %3.1f seconds' %
- (response.status, next_sleep))
+ msg = 'Received %d response. ' % response.status
+ msg += 'Retrying in %3.1f seconds' % next_sleep
+ boto.log.debug(msg)
body = response.read()
elif response.status < 300 or response.status >= 400 or \
not location:
- self.put_http_connection(request.host, self.is_secure, connection)
+ self.put_http_connection(request.host, self.is_secure,
+ connection)
return response
else:
- scheme, request.host, request.path, params, query, fragment = \
- urlparse.urlparse(location)
+ scheme, request.host, request.path, \
+ params, query, fragment = urlparse.urlparse(location)
if query:
request.path += '?' + query
- boto.log.debug('Redirecting: %s' % scheme + '://' + request.host + request.path)
- connection = self.get_http_connection(request.host, scheme == 'https')
+ msg = 'Redirecting: %s' % scheme + '://'
+ msg += request.host + request.path
+ boto.log.debug(msg)
+ connection = self.get_http_connection(request.host,
+ scheme == 'https')
+ response = None
continue
except self.http_exceptions, e:
for unretryable in self.http_unretryable_exceptions:
@@ -750,18 +858,21 @@
raise e
boto.log.debug('encountered %s exception, reconnecting' % \
e.__class__.__name__)
- connection = self.new_http_connection(request.host, self.is_secure)
+ connection = self.new_http_connection(request.host,
+ self.is_secure)
time.sleep(next_sleep)
i += 1
- # If we made it here, it's because we have exhausted our retries and stil haven't
- # succeeded. So, if we have a response object, use it to raise an exception.
- # Otherwise, raise the exception that must have already happened.
+ # If we made it here, it's because we have exhausted our retries
+ # and stil haven't succeeded. So, if we have a response object,
+ # use it to raise an exception.
+ # Otherwise, raise the exception that must have already h#appened.
if response:
raise BotoServerError(response.status, response.reason, body)
elif e:
raise e
else:
- raise BotoClientError('Please report this exception as a Boto Issue!')
+ msg = 'Please report this exception as a Boto Issue!'
+ raise BotoClientError(msg)
def build_base_http_request(self, method, path, auth_path,
params=None, headers=None, data='', host=None):
@@ -789,10 +900,13 @@
path, auth_path, params, headers, data)
def make_request(self, method, path, headers=None, data='', host=None,
- auth_path=None, sender=None, override_num_retries=None):
+ auth_path=None, sender=None, override_num_retries=None,
+ params=None):
"""Makes a request to the server, with stock multiple-retry logic."""
+ if params is None:
+ params = {}
http_request = self.build_base_http_request(method, path, auth_path,
- {}, headers, data, host)
+ params, headers, data, host)
return self._mexe(http_request, sender, override_num_retries)
def close(self):
@@ -800,7 +914,8 @@
and making a new request will open a connection again."""
boto.log.debug('closing all HTTP connections')
- self.connection = None # compat field
+ self._connection = None # compat field
+
class AWSQueryConnection(AWSAuthConnection):
@@ -810,13 +925,15 @@
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, host=None, debug=0,
- https_connection_factory=None, path='/', security_token=None):
+ https_connection_factory=None, path='/', security_token=None,
+ validate_certs=True):
AWSAuthConnection.__init__(self, host, aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy,
proxy_port, proxy_user, proxy_pass,
debug, https_connection_factory, path,
- security_token=security_token)
+ security_token=security_token,
+ validate_certs=validate_certs)
def _required_auth_capability(self):
return []
@@ -830,7 +947,8 @@
self.server_name())
if action:
http_request.params['Action'] = action
- http_request.params['Version'] = self.APIVersion
+ if self.APIVersion:
+ http_request.params['Version'] = self.APIVersion
return self._mexe(http_request)
def build_list_params(self, params, items, label):
diff --git a/boto/core/README b/boto/core/README
new file mode 100644
index 0000000..9c3f217
--- /dev/null
+++ b/boto/core/README
@@ -0,0 +1,58 @@
+What's This All About?
+======================
+
+This directory contains the beginnings of what is hoped will be the
+new core of boto. We want to move from using httplib to using
+requests. We also want to offer full support for Python 2.6, 2.7, and
+3.x. This is a pretty big change and will require some time to roll
+out but this module provides a starting point.
+
+What you will find in this module:
+
+* auth.py provides a SigV2 authentication packages as a args hook for requests.
+* credentials.py provides a way of finding AWS credentials (see below).
+* dictresponse.py provides a generic response handler that parses XML responses
+ and returns them as nested Python data structures.
+* service.py provides a simple example of a service that actually makes an EC2
+ request and returns a response.
+
+Credentials
+===========
+
+Credentials are being handled a bit differently here. The following
+describes the order of search for credentials:
+
+1. If your local environment for has ACCESS_KEY and SECRET_KEY variables
+ defined, these will be used.
+
+2. If your local environment has AWS_CREDENTIAL_FILE defined, it is assumed
+ that it will be a config file with entries like this:
+
+ [default]
+ access_key = xxxxxxxxxxxxxxxx
+ sercret_key = xxxxxxxxxxxxxxxxxx
+
+ [test]
+ access_key = yyyyyyyyyyyyyy
+ secret_key = yyyyyyyyyyyyyyyyyy
+
+ Each section in the config file is called a persona and you can reference
+ a particular persona by name when instantiating a Service class.
+
+3. If a standard boto config file is found that contains credentials, those
+ will be used.
+
+4. If temporary credentials for an IAM Role are found in the instance
+ metadata of an EC2 instance, these credentials will be used.
+
+Trying Things Out
+=================
+To try this code out, cd to the directory containing the core module.
+
+ >>> import core.service
+ >>> s = core.service.Service()
+ >>> s.describe_instances()
+
+This code should return a Python data structure containing information
+about your currently running EC2 instances. This example should run in
+Python 2.6.x, 2.7.x and Python 3.x.
\ No newline at end of file
diff --git a/boto/core/__init__.py b/boto/core/__init__.py
new file mode 100644
index 0000000..e27666d
--- /dev/null
+++ b/boto/core/__init__.py
@@ -0,0 +1,23 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
diff --git a/boto/core/auth.py b/boto/core/auth.py
new file mode 100644
index 0000000..890faa5
--- /dev/null
+++ b/boto/core/auth.py
@@ -0,0 +1,78 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+import requests.packages.urllib3
+import hmac
+import base64
+from hashlib import sha256
+import sys
+import datetime
+
+try:
+ from urllib.parse import quote
+except ImportError:
+ from urllib import quote
+
+
+class SigV2Auth(object):
+ """
+ Sign an Query Signature V2 request.
+ """
+ def __init__(self, credentials, api_version=''):
+ self.credentials = credentials
+ self.api_version = api_version
+ self.hmac = hmac.new(self.credentials.secret_key.encode('utf-8'),
+ digestmod=sha256)
+
+ def calc_signature(self, args):
+ scheme, host, port = requests.packages.urllib3.get_host(args['url'])
+ string_to_sign = '%s\n%s\n%s\n' % (args['method'], host, '/')
+ hmac = self.hmac.copy()
+ args['params']['SignatureMethod'] = 'HmacSHA256'
+ if self.credentials.token:
+ args['params']['SecurityToken'] = self.credentials.token
+ sorted_params = sorted(args['params'])
+ pairs = []
+ for key in sorted_params:
+ value = args['params'][key]
+ pairs.append(quote(key, safe='') + '=' +
+ quote(value, safe='-_~'))
+ qs = '&'.join(pairs)
+ string_to_sign += qs
+ print('string_to_sign')
+ print(string_to_sign)
+ hmac.update(string_to_sign.encode('utf-8'))
+ b64 = base64.b64encode(hmac.digest()).strip().decode('utf-8')
+ return (qs, b64)
+
+ def add_auth(self, args):
+ args['params']['Action'] = 'DescribeInstances'
+ args['params']['AWSAccessKeyId'] = self.credentials.access_key
+ args['params']['SignatureVersion'] = '2'
+ args['params']['Timestamp'] = datetime.datetime.utcnow().isoformat()
+ args['params']['Version'] = self.api_version
+ qs, signature = self.calc_signature(args)
+ args['params']['Signature'] = signature
+ if args['method'] == 'POST':
+ args['data'] = args['params']
+ args['params'] = {}
diff --git a/boto/core/credentials.py b/boto/core/credentials.py
new file mode 100644
index 0000000..1f315a3
--- /dev/null
+++ b/boto/core/credentials.py
@@ -0,0 +1,154 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+import os
+from six.moves import configparser
+import requests
+import json
+
+
+class Credentials(object):
+ """
+ Holds the credentials needed to authenticate requests. In addition
+ the Credential object knows how to search for credentials and how
+ to choose the right credentials when multiple credentials are found.
+ """
+
+ def __init__(self, access_key=None, secret_key=None, token=None):
+ self.access_key = access_key
+ self.secret_key = secret_key
+ self.token = token
+
+
+def _search_md(url='http://169.254.169.254/latest/meta-data/iam/'):
+ d = {}
+ try:
+ r = requests.get(url, timeout=.1)
+ if r.content:
+ fields = r.content.split('\n')
+ for field in fields:
+ if field.endswith('/'):
+ d[field[0:-1]] = get_iam_role(url + field)
+ else:
+ val = requests.get(url + field).content
+ if val[0] == '{':
+ val = json.loads(val)
+ else:
+ p = val.find('\n')
+ if p > 0:
+ val = r.content.split('\n')
+ d[field] = val
+ except (requests.Timeout, requests.ConnectionError):
+ pass
+ return d
+
+
+def search_metadata(**kwargs):
+ credentials = None
+ metadata = _search_md()
+ # Assuming there's only one role on the instance profile.
+ if metadata:
+ metadata = metadata['iam']['security-credentials'].values()[0]
+ credentials = Credentials(metadata['AccessKeyId'],
+ metadata['SecretAccessKey'],
+ metadata['Token'])
+ return credentials
+
+
+def search_environment(**kwargs):
+ """
+ Search for credentials in explicit environment variables.
+ """
+ credentials = None
+ access_key = os.environ.get(kwargs['access_key_name'].upper(), None)
+ secret_key = os.environ.get(kwargs['secret_key_name'].upper(), None)
+ if access_key and secret_key:
+ credentials = Credentials(access_key, secret_key)
+ return credentials
+
+
+def search_file(**kwargs):
+ """
+ If the 'AWS_CREDENTIAL_FILE' environment variable exists, parse that
+ file for credentials.
+ """
+ credentials = None
+ if 'AWS_CREDENTIAL_FILE' in os.environ:
+ persona = kwargs.get('persona', 'default')
+ access_key_name = kwargs['access_key_name']
+ secret_key_name = kwargs['secret_key_name']
+ access_key = secret_key = None
+ path = os.getenv('AWS_CREDENTIAL_FILE')
+ path = os.path.expandvars(path)
+ path = os.path.expanduser(path)
+ cp = configparser.RawConfigParser()
+ cp.read(path)
+ if not cp.has_section(persona):
+ raise ValueError('Persona: %s not found' % persona)
+ if cp.has_option(persona, access_key_name):
+ access_key = cp.get(persona, access_key_name)
+ else:
+ access_key = None
+ if cp.has_option(persona, secret_key_name):
+ secret_key = cp.get(persona, secret_key_name)
+ else:
+ secret_key = None
+ if access_key and secret_key:
+ credentials = Credentials(access_key, secret_key)
+ return credentials
+
+
+def search_boto_config(**kwargs):
+ """
+ Look for credentials in boto config file.
+ """
+ credentials = access_key = secret_key = None
+ if 'BOTO_CONFIG' in os.environ:
+ paths = [os.environ['BOTO_CONFIG']]
+ else:
+ paths = ['/etc/boto.cfg', '~/.boto']
+ paths = [os.path.expandvars(p) for p in paths]
+ paths = [os.path.expanduser(p) for p in paths]
+ cp = configparser.RawConfigParser()
+ cp.read(paths)
+ if cp.has_section('Credentials'):
+ access_key = cp.get('Credentials', 'aws_access_key_id')
+ secret_key = cp.get('Credentials', 'aws_secret_access_key')
+ if access_key and secret_key:
+ credentials = Credentials(access_key, secret_key)
+ return credentials
+
+AllCredentialFunctions = [search_environment,
+ search_file,
+ search_boto_config,
+ search_metadata]
+
+
+def get_credentials(persona='default'):
+ for cred_fn in AllCredentialFunctions:
+ credentials = cred_fn(persona=persona,
+ access_key_name='access_key',
+ secret_key_name='secret_key')
+ if credentials:
+ break
+ return credentials
diff --git a/boto/core/dictresponse.py b/boto/core/dictresponse.py
new file mode 100644
index 0000000..3518834
--- /dev/null
+++ b/boto/core/dictresponse.py
@@ -0,0 +1,178 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+import xml.sax
+
+
+def pythonize_name(name, sep='_'):
+ s = ''
+ if name[0].isupper:
+ s = name[0].lower()
+ for c in name[1:]:
+ if c.isupper():
+ s += sep + c.lower()
+ else:
+ s += c
+ return s
+
+
+class XmlHandler(xml.sax.ContentHandler):
+
+ def __init__(self, root_node, connection):
+ self.connection = connection
+ self.nodes = [('root', root_node)]
+ self.current_text = ''
+
+ def startElement(self, name, attrs):
+ self.current_text = ''
+ t = self.nodes[-1][1].startElement(name, attrs, self.connection)
+ if t != None:
+ if isinstance(t, tuple):
+ self.nodes.append(t)
+ else:
+ self.nodes.append((name, t))
+
+ def endElement(self, name):
+ self.nodes[-1][1].endElement(name, self.current_text, self.connection)
+ if self.nodes[-1][0] == name:
+ self.nodes.pop()
+ self.current_text = ''
+
+ def characters(self, content):
+ self.current_text += content
+
+ def parse(self, s):
+ xml.sax.parseString(s, self)
+
+
+class Element(dict):
+
+ def __init__(self, connection=None, element_name=None,
+ stack=None, parent=None, list_marker=None,
+ item_marker=None, pythonize_name=False):
+ dict.__init__(self)
+ self.connection = connection
+ self.element_name = element_name
+ self.list_marker = list_marker or ['Set']
+ self.item_marker = item_marker or ['member', 'item']
+ if stack is None:
+ self.stack = []
+ else:
+ self.stack = stack
+ self.pythonize_name = pythonize_name
+ self.parent = parent
+
+ def __getattr__(self, key):
+ if key in self:
+ return self[key]
+ for k in self:
+ e = self[k]
+ if isinstance(e, Element):
+ try:
+ return getattr(e, key)
+ except AttributeError:
+ pass
+ raise AttributeError
+
+ def get_name(self, name):
+ if self.pythonize_name:
+ name = pythonize_name(name)
+ return name
+
+ def startElement(self, name, attrs, connection):
+ self.stack.append(name)
+ for lm in self.list_marker:
+ if name.endswith(lm):
+ l = ListElement(self.connection, name, self.list_marker,
+ self.item_marker, self.pythonize_name)
+ self[self.get_name(name)] = l
+ return l
+ if len(self.stack) > 0:
+ element_name = self.stack[-1]
+ e = Element(self.connection, element_name, self.stack, self,
+ self.list_marker, self.item_marker,
+ self.pythonize_name)
+ self[self.get_name(element_name)] = e
+ return (element_name, e)
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ if len(self.stack) > 0:
+ self.stack.pop()
+ value = value.strip()
+ if value:
+ if isinstance(self.parent, Element):
+ self.parent[self.get_name(name)] = value
+ elif isinstance(self.parent, ListElement):
+ self.parent.append(value)
+
+
+class ListElement(list):
+
+ def __init__(self, connection=None, element_name=None,
+ list_marker=['Set'], item_marker=('member', 'item'),
+ pythonize_name=False):
+ list.__init__(self)
+ self.connection = connection
+ self.element_name = element_name
+ self.list_marker = list_marker
+ self.item_marker = item_marker
+ self.pythonize_name = pythonize_name
+
+ def get_name(self, name):
+ if self.pythonize_name:
+ name = utils.pythonize_name(name)
+ return name
+
+ def startElement(self, name, attrs, connection):
+ for lm in self.list_marker:
+ if name.endswith(lm):
+ l = ListElement(self.connection, name,
+ self.list_marker, self.item_marker,
+ self.pythonize_name)
+ setattr(self, self.get_name(name), l)
+ return l
+ if name in self.item_marker:
+ e = Element(self.connection, name, parent=self,
+ list_marker=self.list_marker,
+ item_marker=self.item_marker,
+ pythonize_name=self.pythonize_name)
+ self.append(e)
+ return e
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == self.element_name:
+ if len(self) > 0:
+ empty = []
+ for e in self:
+ if isinstance(e, Element):
+ if len(e) == 0:
+ empty.append(e)
+ for e in empty:
+ self.remove(e)
+ else:
+ setattr(self, self.get_name(name), value)
diff --git a/boto/core/service.py b/boto/core/service.py
new file mode 100644
index 0000000..53c53c5
--- /dev/null
+++ b/boto/core/service.py
@@ -0,0 +1,67 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+import requests
+from .auth import SigV2Auth
+from .credentials import get_credentials
+from .dictresponse import Element, XmlHandler
+
+
+class Service(object):
+ """
+ This is a simple example service that connects to the EC2 endpoint
+ and supports a single request (DescribeInstances) to show how to
+ use the requests-based code rather than the standard boto code which
+ is based on httplib. At the moment, the only auth mechanism
+ supported is SigV2.
+ """
+
+ def __init__(self, host='https://ec2.us-east-1.amazonaws.com',
+ path='/', api_version='2012-03-01', persona=None):
+ self.credentials = get_credentials(persona)
+ self.auth = SigV2Auth(self.credentials, api_version=api_version)
+ self.host = host
+ self.path = path
+
+ def get_response(self, params, list_marker=None):
+ r = requests.post(self.host, params=params,
+ hooks={'args': self.auth.add_auth})
+ r.encoding = 'utf-8'
+ body = r.text.encode('utf-8')
+ e = Element(list_marker=list_marker, pythonize_name=True)
+ h = XmlHandler(e, self)
+ h.parse(body)
+ return e
+
+ def build_list_params(self, params, items, label):
+ if isinstance(items, str):
+ items = [items]
+ for i in range(1, len(items) + 1):
+ params['%s.%d' % (label, i)] = items[i - 1]
+
+ def describe_instances(self, instance_ids=None):
+ params = {}
+ if instance_ids:
+ self.build_list_params(params, instance_ids, 'InstanceId')
+ return self.get_response(params)
diff --git a/boto/dynamodb/__init__.py b/boto/dynamodb/__init__.py
new file mode 100644
index 0000000..c60b5c3
--- /dev/null
+++ b/boto/dynamodb/__init__.py
@@ -0,0 +1,60 @@
+# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2011 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+from boto.regioninfo import RegionInfo
+
+
+def regions():
+ """
+ Get all available regions for the Amazon DynamoDB service.
+
+ :rtype: list
+ :return: A list of :class:`boto.regioninfo.RegionInfo`
+ """
+ import boto.dynamodb.layer2
+ return [RegionInfo(name='us-east-1',
+ endpoint='dynamodb.us-east-1.amazonaws.com',
+ connection_cls=boto.dynamodb.layer2.Layer2),
+ RegionInfo(name='us-west-1',
+ endpoint='dynamodb.us-west-1.amazonaws.com',
+ connection_cls=boto.dynamodb.layer2.Layer2),
+ RegionInfo(name='us-west-2',
+ endpoint='dynamodb.us-west-2.amazonaws.com',
+ connection_cls=boto.dynamodb.layer2.Layer2),
+ RegionInfo(name='ap-northeast-1',
+ endpoint='dynamodb.ap-northeast-1.amazonaws.com',
+ connection_cls=boto.dynamodb.layer2.Layer2),
+ RegionInfo(name='ap-southeast-1',
+ endpoint='dynamodb.ap-southeast-1.amazonaws.com',
+ connection_cls=boto.dynamodb.layer2.Layer2),
+ RegionInfo(name='eu-west-1',
+ endpoint='dynamodb.eu-west-1.amazonaws.com',
+ connection_cls=boto.dynamodb.layer2.Layer2),
+ ]
+
+
+def connect_to_region(region_name, **kw_params):
+ for region in regions():
+ if region.name == region_name:
+ return region.connect(**kw_params)
+ return None
diff --git a/boto/dynamodb/batch.py b/boto/dynamodb/batch.py
new file mode 100644
index 0000000..87c84fc
--- /dev/null
+++ b/boto/dynamodb/batch.py
@@ -0,0 +1,249 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+
+class Batch(object):
+ """
+ Used to construct a BatchGet request.
+
+ :ivar table: The Table object from which the item is retrieved.
+
+ :ivar keys: A list of scalar or tuple values. Each element in the
+ list represents one Item to retrieve. If the schema for the
+ table has both a HashKey and a RangeKey, each element in the
+ list should be a tuple consisting of (hash_key, range_key). If
+ the schema for the table contains only a HashKey, each element
+ in the list should be a scalar value of the appropriate type
+ for the table schema. NOTE: The maximum number of items that
+ can be retrieved for a single operation is 100. Also, the
+ number of items retrieved is constrained by a 1 MB size limit.
+
+ :ivar attributes_to_get: A list of attribute names.
+ If supplied, only the specified attribute names will
+ be returned. Otherwise, all attributes will be returned.
+ """
+
+ def __init__(self, table, keys, attributes_to_get=None):
+ self.table = table
+ self.keys = keys
+ self.attributes_to_get = attributes_to_get
+
+ def to_dict(self):
+ """
+ Convert the Batch object into the format required for Layer1.
+ """
+ batch_dict = {}
+ key_list = []
+ for key in self.keys:
+ if isinstance(key, tuple):
+ hash_key, range_key = key
+ else:
+ hash_key = key
+ range_key = None
+ k = self.table.layer2.build_key_from_values(self.table.schema,
+ hash_key, range_key)
+ key_list.append(k)
+ batch_dict['Keys'] = key_list
+ if self.attributes_to_get:
+ batch_dict['AttributesToGet'] = self.attributes_to_get
+ return batch_dict
+
+class BatchWrite(object):
+ """
+ Used to construct a BatchWrite request. Each BatchWrite object
+ represents a collection of PutItem and DeleteItem requests for
+ a single Table.
+
+ :ivar table: The Table object from which the item is retrieved.
+
+ :ivar puts: A list of :class:`boto.dynamodb.item.Item` objects
+ that you want to write to DynamoDB.
+
+ :ivar deletes: A list of scalar or tuple values. Each element in the
+ list represents one Item to delete. If the schema for the
+ table has both a HashKey and a RangeKey, each element in the
+ list should be a tuple consisting of (hash_key, range_key). If
+ the schema for the table contains only a HashKey, each element
+ in the list should be a scalar value of the appropriate type
+ for the table schema.
+ """
+
+ def __init__(self, table, puts=None, deletes=None):
+ self.table = table
+ self.puts = puts or []
+ self.deletes = deletes or []
+
+ def to_dict(self):
+ """
+ Convert the Batch object into the format required for Layer1.
+ """
+ op_list = []
+ for item in self.puts:
+ d = {'Item': self.table.layer2.dynamize_item(item)}
+ d = {'PutRequest': d}
+ op_list.append(d)
+ for key in self.deletes:
+ if isinstance(key, tuple):
+ hash_key, range_key = key
+ else:
+ hash_key = key
+ range_key = None
+ k = self.table.layer2.build_key_from_values(self.table.schema,
+ hash_key, range_key)
+ d = {'Key': k}
+ op_list.append({'DeleteRequest': d})
+ return (self.table.name, op_list)
+
+
+class BatchList(list):
+ """
+ A subclass of a list object that contains a collection of
+ :class:`boto.dynamodb.batch.Batch` objects.
+ """
+
+ def __init__(self, layer2):
+ list.__init__(self)
+ self.unprocessed = None
+ self.layer2 = layer2
+
+ def add_batch(self, table, keys, attributes_to_get=None):
+ """
+ Add a Batch to this BatchList.
+
+ :type table: :class:`boto.dynamodb.table.Table`
+ :param table: The Table object in which the items are contained.
+
+ :type keys: list
+ :param keys: A list of scalar or tuple values. Each element in the
+ list represents one Item to retrieve. If the schema for the
+ table has both a HashKey and a RangeKey, each element in the
+ list should be a tuple consisting of (hash_key, range_key). If
+ the schema for the table contains only a HashKey, each element
+ in the list should be a scalar value of the appropriate type
+ for the table schema. NOTE: The maximum number of items that
+ can be retrieved for a single operation is 100. Also, the
+ number of items retrieved is constrained by a 1 MB size limit.
+
+ :type attributes_to_get: list
+ :param attributes_to_get: A list of attribute names.
+ If supplied, only the specified attribute names will
+ be returned. Otherwise, all attributes will be returned.
+ """
+ self.append(Batch(table, keys, attributes_to_get))
+
+ def resubmit(self):
+ """
+ Resubmit the batch to get the next result set. The request object is
+ rebuild from scratch meaning that all batch added between ``submit``
+ and ``resubmit`` will be lost.
+
+ Note: This method is experimental and subject to changes in future releases
+ """
+ del self[:]
+
+ if not self.unprocessed:
+ return None
+
+ for table_name, table_req in self.unprocessed.iteritems():
+ table_keys = table_req['Keys']
+ table = self.layer2.get_table(table_name)
+
+ keys = []
+ for key in table_keys:
+ h = key['HashKeyElement']
+ r = None
+ if 'RangeKeyElement' in key:
+ r = key['RangeKeyElement']
+ keys.append((h, r))
+
+ attributes_to_get = None
+ if 'AttributesToGet' in table_req:
+ attributes_to_get = table_req['AttributesToGet']
+
+ self.add_batch(table, keys, attributes_to_get=attributes_to_get)
+
+ return self.submit()
+
+
+ def submit(self):
+ res = self.layer2.batch_get_item(self)
+ if 'UnprocessedKeys' in res:
+ self.unprocessed = res['UnprocessedKeys']
+ return res
+
+ def to_dict(self):
+ """
+ Convert a BatchList object into format required for Layer1.
+ """
+ d = {}
+ for batch in self:
+ b = batch.to_dict()
+ if b['Keys']:
+ d[batch.table.name] = b
+ return d
+
+class BatchWriteList(list):
+ """
+ A subclass of a list object that contains a collection of
+ :class:`boto.dynamodb.batch.BatchWrite` objects.
+ """
+
+ def __init__(self, layer2):
+ list.__init__(self)
+ self.layer2 = layer2
+
+ def add_batch(self, table, puts=None, deletes=None):
+ """
+ Add a BatchWrite to this BatchWriteList.
+
+ :type table: :class:`boto.dynamodb.table.Table`
+ :param table: The Table object in which the items are contained.
+
+ :type puts: list of :class:`boto.dynamodb.item.Item` objects
+ :param puts: A list of items that you want to write to DynamoDB.
+
+ :type deletes: A list
+ :param deletes: A list of scalar or tuple values. Each element
+ in the list represents one Item to delete. If the schema
+ for the table has both a HashKey and a RangeKey, each
+ element in the list should be a tuple consisting of
+ (hash_key, range_key). If the schema for the table
+ contains only a HashKey, each element in the list should
+ be a scalar value of the appropriate type for the table
+ schema.
+ """
+ self.append(BatchWrite(table, puts, deletes))
+
+ def submit(self):
+ return self.layer2.batch_write_item(self)
+
+ def to_dict(self):
+ """
+ Convert a BatchWriteList object into format required for Layer1.
+ """
+ d = {}
+ for batch in self:
+ table_name, batch_dict = batch.to_dict()
+ d[table_name] = batch_dict
+ return d
+
diff --git a/boto/dynamodb/condition.py b/boto/dynamodb/condition.py
new file mode 100644
index 0000000..0b76790
--- /dev/null
+++ b/boto/dynamodb/condition.py
@@ -0,0 +1,170 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+from boto.dynamodb.types import dynamize_value
+
+
+class Condition(object):
+ """
+ Base class for conditions. Doesn't do a darn thing but allows
+ is to test if something is a Condition instance or not.
+ """
+
+ def __eq__(self, other):
+ if isinstance(other, Condition):
+ return self.to_dict() == other.to_dict()
+
+class ConditionNoArgs(Condition):
+ """
+ Abstract class for Conditions that require no arguments, such
+ as NULL or NOT_NULL.
+ """
+
+ def __repr__(self):
+ return '%s' % self.__class__.__name__
+
+ def to_dict(self):
+ return {'ComparisonOperator': self.__class__.__name__}
+
+
+class ConditionOneArg(Condition):
+ """
+ Abstract class for Conditions that require a single argument
+ such as EQ or NE.
+ """
+
+ def __init__(self, v1):
+ self.v1 = v1
+
+ def __repr__(self):
+ return '%s:%s' % (self.__class__.__name__, self.v1)
+
+ def to_dict(self):
+ return {'AttributeValueList': [dynamize_value(self.v1)],
+ 'ComparisonOperator': self.__class__.__name__}
+
+
+class ConditionTwoArgs(Condition):
+ """
+ Abstract class for Conditions that require two arguments.
+ The only example of this currently is BETWEEN.
+ """
+
+ def __init__(self, v1, v2):
+ self.v1 = v1
+ self.v2 = v2
+
+ def __repr__(self):
+ return '%s(%s, %s)' % (self.__class__.__name__, self.v1, self.v2)
+
+ def to_dict(self):
+ values = (self.v1, self.v2)
+ return {'AttributeValueList': [dynamize_value(v) for v in values],
+ 'ComparisonOperator': self.__class__.__name__}
+
+
+class ConditionSeveralArgs(Condition):
+ """
+ Abstract class for conditions that require several argument (ex: IN).
+ """
+
+ def __init__(self, values):
+ self.values = values
+
+ def __repr__(self):
+ return '{}({})'.format(self.__class__.__name__,
+ ', '.join(self.values))
+
+ def to_dict(self):
+ return {'AttributeValueList': [dynamize_value(v) for v in self.values],
+ 'ComparisonOperator': self.__class__.__name__}
+
+
+class EQ(ConditionOneArg):
+
+ pass
+
+
+class NE(ConditionOneArg):
+
+ pass
+
+
+class LE(ConditionOneArg):
+
+ pass
+
+
+class LT(ConditionOneArg):
+
+ pass
+
+
+class GE(ConditionOneArg):
+
+ pass
+
+
+class GT(ConditionOneArg):
+
+ pass
+
+
+class NULL(ConditionNoArgs):
+
+ pass
+
+
+class NOT_NULL(ConditionNoArgs):
+
+ pass
+
+
+class CONTAINS(ConditionOneArg):
+
+ pass
+
+
+class NOT_CONTAINS(ConditionOneArg):
+
+ pass
+
+
+class BEGINS_WITH(ConditionOneArg):
+
+ pass
+
+
+class IN(ConditionSeveralArgs):
+
+ pass
+
+
+class BEGINS_WITH(ConditionOneArg):
+
+ pass
+
+
+class BETWEEN(ConditionTwoArgs):
+
+ pass
diff --git a/boto/dynamodb/exceptions.py b/boto/dynamodb/exceptions.py
new file mode 100644
index 0000000..b60d5aa
--- /dev/null
+++ b/boto/dynamodb/exceptions.py
@@ -0,0 +1,45 @@
+"""
+Exceptions that are specific to the dynamodb module.
+"""
+from boto.exception import BotoServerError, BotoClientError
+from boto.exception import DynamoDBResponseError
+
+class DynamoDBExpiredTokenError(BotoServerError):
+ """
+ Raised when a DynamoDB security token expires. This is generally boto's
+ (or the user's) notice to renew their DynamoDB security tokens.
+ """
+ pass
+
+
+class DynamoDBKeyNotFoundError(BotoClientError):
+ """
+ Raised when attempting to retrieve or interact with an item whose key
+ can't be found.
+ """
+ pass
+
+
+class DynamoDBItemError(BotoClientError):
+ """
+ Raised when invalid parameters are passed when creating a
+ new Item in DynamoDB.
+ """
+ pass
+
+
+class DynamoDBConditionalCheckFailedError(DynamoDBResponseError):
+ """
+ Raised when a ConditionalCheckFailedException response is received.
+ This happens when a conditional check, expressed via the expected_value
+ paramenter, fails.
+ """
+ pass
+
+class DynamoDBValidationError(DynamoDBResponseError):
+ """
+ Raised when a ValidationException response is received. This happens
+ when one or more required parameter values are missing, or if the item
+ has exceeded the 64Kb size limit.
+ """
+ pass
diff --git a/boto/dynamodb/item.py b/boto/dynamodb/item.py
new file mode 100644
index 0000000..4d4abda
--- /dev/null
+++ b/boto/dynamodb/item.py
@@ -0,0 +1,196 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+from boto.dynamodb.exceptions import DynamoDBItemError
+
+
+class Item(dict):
+ """
+ An item in Amazon DynamoDB.
+
+ :ivar hash_key: The HashKey of this item.
+ :ivar range_key: The RangeKey of this item or None if no RangeKey
+ is defined.
+ :ivar hash_key_name: The name of the HashKey associated with this item.
+ :ivar range_key_name: The name of the RangeKey associated with this item.
+ :ivar table: The Table this item belongs to.
+ """
+
+ def __init__(self, table, hash_key=None, range_key=None, attrs=None):
+ self.table = table
+ self._updates = None
+ self._hash_key_name = self.table.schema.hash_key_name
+ self._range_key_name = self.table.schema.range_key_name
+ if attrs == None:
+ attrs = {}
+ if hash_key == None:
+ hash_key = attrs.get(self._hash_key_name, None)
+ self[self._hash_key_name] = hash_key
+ if self._range_key_name:
+ if range_key == None:
+ range_key = attrs.get(self._range_key_name, None)
+ self[self._range_key_name] = range_key
+ for key, value in attrs.items():
+ if key != self._hash_key_name and key != self._range_key_name:
+ self[key] = value
+ self.consumed_units = 0
+ self._updates = {}
+
+ @property
+ def hash_key(self):
+ return self[self._hash_key_name]
+
+ @property
+ def range_key(self):
+ return self.get(self._range_key_name)
+
+ @property
+ def hash_key_name(self):
+ return self._hash_key_name
+
+ @property
+ def range_key_name(self):
+ return self._range_key_name
+
+ def add_attribute(self, attr_name, attr_value):
+ """
+ Queue the addition of an attribute to an item in DynamoDB.
+ This will eventually result in an UpdateItem request being issued
+ with an update action of ADD when the save method is called.
+
+ :type attr_name: str
+ :param attr_name: Name of the attribute you want to alter.
+
+ :type attr_value: int|long|float|set
+ :param attr_value: Value which is to be added to the attribute.
+ """
+ self._updates[attr_name] = ("ADD", attr_value)
+
+ def delete_attribute(self, attr_name, attr_value=None):
+ """
+ Queue the deletion of an attribute from an item in DynamoDB.
+ This call will result in a UpdateItem request being issued
+ with update action of DELETE when the save method is called.
+
+ :type attr_name: str
+ :param attr_name: Name of the attribute you want to alter.
+
+ :type attr_value: set
+ :param attr_value: A set of values to be removed from the attribute.
+ This parameter is optional. If None, the whole attribute is
+ removed from the item.
+ """
+ self._updates[attr_name] = ("DELETE", attr_value)
+
+ def put_attribute(self, attr_name, attr_value):
+ """
+ Queue the putting of an attribute to an item in DynamoDB.
+ This call will result in an UpdateItem request being issued
+ with the update action of PUT when the save method is called.
+
+ :type attr_name: str
+ :param attr_name: Name of the attribute you want to alter.
+
+ :type attr_value: int|long|float|str|set
+ :param attr_value: New value of the attribute.
+ """
+ self._updates[attr_name] = ("PUT", attr_value)
+
+ def save(self, expected_value=None, return_values=None):
+ """
+ Commits pending updates to Amazon DynamoDB.
+
+ :type expected_value: dict
+ :param expected_value: A dictionary of name/value pairs that
+ you expect. This dictionary should have name/value pairs
+ where the name is the name of the attribute and the value is
+ either the value you are expecting or False if you expect
+ the attribute not to exist.
+
+ :type return_values: str
+ :param return_values: Controls the return of attribute name/value pairs
+ before they were updated. Possible values are: None, 'ALL_OLD',
+ 'UPDATED_OLD', 'ALL_NEW' or 'UPDATED_NEW'. If 'ALL_OLD' is
+ specified and the item is overwritten, the content of the old item
+ is returned. If 'ALL_NEW' is specified, then all the attributes of
+ the new version of the item are returned. If 'UPDATED_NEW' is
+ specified, the new versions of only the updated attributes are
+ returned.
+ """
+ return self.table.layer2.update_item(self, expected_value,
+ return_values)
+
+ def delete(self, expected_value=None, return_values=None):
+ """
+ Delete the item from DynamoDB.
+
+ :type expected_value: dict
+ :param expected_value: A dictionary of name/value pairs that
+ you expect. This dictionary should have name/value pairs
+ where the name is the name of the attribute and the value
+ is either the value you are expecting or False if you expect
+ the attribute not to exist.
+
+ :type return_values: str
+ :param return_values: Controls the return of attribute
+ name-value pairs before then were changed. Possible
+ values are: None or 'ALL_OLD'. If 'ALL_OLD' is
+ specified and the item is overwritten, the content
+ of the old item is returned.
+ """
+ return self.table.layer2.delete_item(self, expected_value,
+ return_values)
+
+ def put(self, expected_value=None, return_values=None):
+ """
+ Store a new item or completely replace an existing item
+ in Amazon DynamoDB.
+
+ :type expected_value: dict
+ :param expected_value: A dictionary of name/value pairs that
+ you expect. This dictionary should have name/value pairs
+ where the name is the name of the attribute and the value
+ is either the value you are expecting or False if you expect
+ the attribute not to exist.
+
+ :type return_values: str
+ :param return_values: Controls the return of attribute
+ name-value pairs before then were changed. Possible
+ values are: None or 'ALL_OLD'. If 'ALL_OLD' is
+ specified and the item is overwritten, the content
+ of the old item is returned.
+ """
+ return self.table.layer2.put_item(self, expected_value, return_values)
+
+ def __setitem__(self, key, value):
+ """Overrwrite the setter to instead update the _updates
+ method so this can act like a normal dict"""
+ if self._updates is not None:
+ self.put_attribute(key, value)
+ dict.__setitem__(self, key, value)
+
+ def __delitem__(self, key):
+ """Remove this key from the items"""
+ if self._updates is not None:
+ self.delete_attribute(key)
+ dict.__delitem__(self, key)
diff --git a/boto/dynamodb/layer1.py b/boto/dynamodb/layer1.py
new file mode 100644
index 0000000..40dac5c
--- /dev/null
+++ b/boto/dynamodb/layer1.py
@@ -0,0 +1,554 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+import boto
+from boto.connection import AWSAuthConnection
+from boto.exception import DynamoDBResponseError
+from boto.provider import Provider
+from boto.dynamodb import exceptions as dynamodb_exceptions
+
+import time
+try:
+ import simplejson as json
+except ImportError:
+ import json
+
+#
+# To get full debug output, uncomment the following line and set the
+# value of Debug to be 2
+#
+#boto.set_stream_logger('dynamodb')
+Debug = 0
+
+
+class Layer1(AWSAuthConnection):
+ """
+ This is the lowest-level interface to DynamoDB. Methods at this
+ layer map directly to API requests and parameters to the methods
+ are either simple, scalar values or they are the Python equivalent
+ of the JSON input as defined in the DynamoDB Developer's Guide.
+ All responses are direct decoding of the JSON response bodies to
+ Python data structures via the json or simplejson modules.
+
+ :ivar throughput_exceeded_events: An integer variable that
+ keeps a running total of the number of ThroughputExceeded
+ responses this connection has received from Amazon DynamoDB.
+ """
+
+ DefaultRegionName = 'us-east-1'
+ """The default region name for DynamoDB API."""
+
+ ServiceName = 'DynamoDB'
+ """The name of the Service"""
+
+ Version = '20111205'
+ """DynamoDB API version."""
+
+ ThruputError = "ProvisionedThroughputExceededException"
+ """The error response returned when provisioned throughput is exceeded"""
+
+ SessionExpiredError = 'com.amazon.coral.service#ExpiredTokenException'
+ """The error response returned when session token has expired"""
+
+ ConditionalCheckFailedError = 'ConditionalCheckFailedException'
+ """The error response returned when a conditional check fails"""
+
+ ValidationError = 'ValidationException'
+ """The error response returned when an item is invalid in some way"""
+
+ ResponseError = DynamoDBResponseError
+
+ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
+ is_secure=True, port=None, proxy=None, proxy_port=None,
+ debug=0, security_token=None, region=None,
+ validate_certs=True):
+ if not region:
+ region_name = boto.config.get('DynamoDB', 'region',
+ self.DefaultRegionName)
+ for reg in boto.dynamodb.regions():
+ if reg.name == region_name:
+ region = reg
+ break
+
+ self.region = region
+ AWSAuthConnection.__init__(self, self.region.endpoint,
+ aws_access_key_id,
+ aws_secret_access_key,
+ is_secure, port, proxy, proxy_port,
+ debug=debug, security_token=security_token,
+ validate_certs=validate_certs)
+ self.throughput_exceeded_events = 0
+
+ def _get_session_token(self):
+ self.provider = Provider(self._provider_type)
+ self._auth_handler.update_provider(self.provider)
+
+ def _required_auth_capability(self):
+ return ['hmac-v4']
+
+ def make_request(self, action, body='', object_hook=None):
+ """
+ :raises: ``DynamoDBExpiredTokenError`` if the security token expires.
+ """
+ headers = {'X-Amz-Target': '%s_%s.%s' % (self.ServiceName,
+ self.Version, action),
+ 'Host': self.region.endpoint,
+ 'Content-Type': 'application/x-amz-json-1.0',
+ 'Content-Length': str(len(body))}
+ http_request = self.build_base_http_request('POST', '/', '/',
+ {}, headers, body, None)
+ start = time.time()
+ response = self._mexe(http_request, sender=None,
+ override_num_retries=10,
+ retry_handler=self._retry_handler)
+ elapsed = (time.time() - start)*1000
+ request_id = response.getheader('x-amzn-RequestId')
+ boto.log.debug('RequestId: %s' % request_id)
+ boto.perflog.info('%s: id=%s time=%sms',
+ headers['X-Amz-Target'], request_id, int(elapsed))
+ response_body = response.read()
+ boto.log.debug(response_body)
+ return json.loads(response_body, object_hook=object_hook)
+
+ def _retry_handler(self, response, i, next_sleep):
+ status = None
+ if response.status == 400:
+ response_body = response.read()
+ boto.log.debug(response_body)
+ data = json.loads(response_body)
+ if self.ThruputError in data.get('__type'):
+ self.throughput_exceeded_events += 1
+ msg = "%s, retry attempt %s" % (self.ThruputError, i)
+ if i == 0:
+ next_sleep = 0
+ else:
+ next_sleep = 0.05 * (2 ** i)
+ i += 1
+ status = (msg, i, next_sleep)
+ elif self.SessionExpiredError in data.get('__type'):
+ msg = 'Renewing Session Token'
+ self._get_session_token()
+ status = (msg, i + self.num_retries - 1, 0)
+ elif self.ConditionalCheckFailedError in data.get('__type'):
+ raise dynamodb_exceptions.DynamoDBConditionalCheckFailedError(
+ response.status, response.reason, data)
+ elif self.ValidationError in data.get('__type'):
+ raise dynamodb_exceptions.DynamoDBValidationError(
+ response.status, response.reason, data)
+ else:
+ raise self.ResponseError(response.status, response.reason,
+ data)
+ return status
+
+ def list_tables(self, limit=None, start_table=None):
+ """
+ Returns a dictionary of results. The dictionary contains
+ a **TableNames** key whose value is a list of the table names.
+ The dictionary could also contain a **LastEvaluatedTableName**
+ key whose value would be the last table name returned if
+ the complete list of table names was not returned. This
+ value would then be passed as the ``start_table`` parameter on
+ a subsequent call to this method.
+
+ :type limit: int
+ :param limit: The maximum number of tables to return.
+
+ :type start_table: str
+ :param start_table: The name of the table that starts the
+ list. If you ran a previous list_tables and not
+ all results were returned, the response dict would
+ include a LastEvaluatedTableName attribute. Use
+ that value here to continue the listing.
+ """
+ data = {}
+ if limit:
+ data['Limit'] = limit
+ if start_table:
+ data['ExclusiveStartTableName'] = start_table
+ json_input = json.dumps(data)
+ return self.make_request('ListTables', json_input)
+
+ def describe_table(self, table_name):
+ """
+ Returns information about the table including current
+ state of the table, primary key schema and when the
+ table was created.
+
+ :type table_name: str
+ :param table_name: The name of the table to describe.
+ """
+ data = {'TableName': table_name}
+ json_input = json.dumps(data)
+ return self.make_request('DescribeTable', json_input)
+
+ def create_table(self, table_name, schema, provisioned_throughput):
+ """
+ Add a new table to your account. The table name must be unique
+ among those associated with the account issuing the request.
+ This request triggers an asynchronous workflow to begin creating
+ the table. When the workflow is complete, the state of the
+ table will be ACTIVE.
+
+ :type table_name: str
+ :param table_name: The name of the table to create.
+
+ :type schema: dict
+ :param schema: A Python version of the KeySchema data structure
+ as defined by DynamoDB
+
+ :type provisioned_throughput: dict
+ :param provisioned_throughput: A Python version of the
+ ProvisionedThroughput data structure defined by
+ DynamoDB.
+ """
+ data = {'TableName': table_name,
+ 'KeySchema': schema,
+ 'ProvisionedThroughput': provisioned_throughput}
+ json_input = json.dumps(data)
+ response_dict = self.make_request('CreateTable', json_input)
+ return response_dict
+
+ def update_table(self, table_name, provisioned_throughput):
+ """
+ Updates the provisioned throughput for a given table.
+
+ :type table_name: str
+ :param table_name: The name of the table to update.
+
+ :type provisioned_throughput: dict
+ :param provisioned_throughput: A Python version of the
+ ProvisionedThroughput data structure defined by
+ DynamoDB.
+ """
+ data = {'TableName': table_name,
+ 'ProvisionedThroughput': provisioned_throughput}
+ json_input = json.dumps(data)
+ return self.make_request('UpdateTable', json_input)
+
+ def delete_table(self, table_name):
+ """
+ Deletes the table and all of it's data. After this request
+ the table will be in the DELETING state until DynamoDB
+ completes the delete operation.
+
+ :type table_name: str
+ :param table_name: The name of the table to delete.
+ """
+ data = {'TableName': table_name}
+ json_input = json.dumps(data)
+ return self.make_request('DeleteTable', json_input)
+
+ def get_item(self, table_name, key, attributes_to_get=None,
+ consistent_read=False, object_hook=None):
+ """
+ Return a set of attributes for an item that matches
+ the supplied key.
+
+ :type table_name: str
+ :param table_name: The name of the table containing the item.
+
+ :type key: dict
+ :param key: A Python version of the Key data structure
+ defined by DynamoDB.
+
+ :type attributes_to_get: list
+ :param attributes_to_get: A list of attribute names.
+ If supplied, only the specified attribute names will
+ be returned. Otherwise, all attributes will be returned.
+
+ :type consistent_read: bool
+ :param consistent_read: If True, a consistent read
+ request is issued. Otherwise, an eventually consistent
+ request is issued.
+ """
+ data = {'TableName': table_name,
+ 'Key': key}
+ if attributes_to_get:
+ data['AttributesToGet'] = attributes_to_get
+ if consistent_read:
+ data['ConsistentRead'] = True
+ json_input = json.dumps(data)
+ response = self.make_request('GetItem', json_input,
+ object_hook=object_hook)
+ if 'Item' not in response:
+ raise dynamodb_exceptions.DynamoDBKeyNotFoundError(
+ "Key does not exist."
+ )
+ return response
+
+ def batch_get_item(self, request_items, object_hook=None):
+ """
+ Return a set of attributes for a multiple items in
+ multiple tables using their primary keys.
+
+ :type request_items: dict
+ :param request_items: A Python version of the RequestItems
+ data structure defined by DynamoDB.
+ """
+ # If the list is empty, return empty response
+ if not request_items:
+ return {}
+ data = {'RequestItems': request_items}
+ json_input = json.dumps(data)
+ return self.make_request('BatchGetItem', json_input,
+ object_hook=object_hook)
+
+ def batch_write_item(self, request_items, object_hook=None):
+ """
+ This operation enables you to put or delete several items
+ across multiple tables in a single API call.
+
+ :type request_items: dict
+ :param request_items: A Python version of the RequestItems
+ data structure defined by DynamoDB.
+ """
+ data = {'RequestItems': request_items}
+ json_input = json.dumps(data)
+ return self.make_request('BatchWriteItem', json_input,
+ object_hook=object_hook)
+
+ def put_item(self, table_name, item,
+ expected=None, return_values=None,
+ object_hook=None):
+ """
+ Create a new item or replace an old item with a new
+ item (including all attributes). If an item already
+ exists in the specified table with the same primary
+ key, the new item will completely replace the old item.
+ You can perform a conditional put by specifying an
+ expected rule.
+
+ :type table_name: str
+ :param table_name: The name of the table in which to put the item.
+
+ :type item: dict
+ :param item: A Python version of the Item data structure
+ defined by DynamoDB.
+
+ :type expected: dict
+ :param expected: A Python version of the Expected
+ data structure defined by DynamoDB.
+
+ :type return_values: str
+ :param return_values: Controls the return of attribute
+ name-value pairs before then were changed. Possible
+ values are: None or 'ALL_OLD'. If 'ALL_OLD' is
+ specified and the item is overwritten, the content
+ of the old item is returned.
+ """
+ data = {'TableName': table_name,
+ 'Item': item}
+ if expected:
+ data['Expected'] = expected
+ if return_values:
+ data['ReturnValues'] = return_values
+ json_input = json.dumps(data)
+ return self.make_request('PutItem', json_input,
+ object_hook=object_hook)
+
+ def update_item(self, table_name, key, attribute_updates,
+ expected=None, return_values=None,
+ object_hook=None):
+ """
+ Edits an existing item's attributes. You can perform a conditional
+ update (insert a new attribute name-value pair if it doesn't exist,
+ or replace an existing name-value pair if it has certain expected
+ attribute values).
+
+ :type table_name: str
+ :param table_name: The name of the table.
+
+ :type key: dict
+ :param key: A Python version of the Key data structure
+ defined by DynamoDB which identifies the item to be updated.
+
+ :type attribute_updates: dict
+ :param attribute_updates: A Python version of the AttributeUpdates
+ data structure defined by DynamoDB.
+
+ :type expected: dict
+ :param expected: A Python version of the Expected
+ data structure defined by DynamoDB.
+
+ :type return_values: str
+ :param return_values: Controls the return of attribute
+ name-value pairs before then were changed. Possible
+ values are: None or 'ALL_OLD'. If 'ALL_OLD' is
+ specified and the item is overwritten, the content
+ of the old item is returned.
+ """
+ data = {'TableName': table_name,
+ 'Key': key,
+ 'AttributeUpdates': attribute_updates}
+ if expected:
+ data['Expected'] = expected
+ if return_values:
+ data['ReturnValues'] = return_values
+ json_input = json.dumps(data)
+ return self.make_request('UpdateItem', json_input,
+ object_hook=object_hook)
+
+ def delete_item(self, table_name, key,
+ expected=None, return_values=None,
+ object_hook=None):
+ """
+ Delete an item and all of it's attributes by primary key.
+ You can perform a conditional delete by specifying an
+ expected rule.
+
+ :type table_name: str
+ :param table_name: The name of the table containing the item.
+
+ :type key: dict
+ :param key: A Python version of the Key data structure
+ defined by DynamoDB.
+
+ :type expected: dict
+ :param expected: A Python version of the Expected
+ data structure defined by DynamoDB.
+
+ :type return_values: str
+ :param return_values: Controls the return of attribute
+ name-value pairs before then were changed. Possible
+ values are: None or 'ALL_OLD'. If 'ALL_OLD' is
+ specified and the item is overwritten, the content
+ of the old item is returned.
+ """
+ data = {'TableName': table_name,
+ 'Key': key}
+ if expected:
+ data['Expected'] = expected
+ if return_values:
+ data['ReturnValues'] = return_values
+ json_input = json.dumps(data)
+ return self.make_request('DeleteItem', json_input,
+ object_hook=object_hook)
+
+ def query(self, table_name, hash_key_value, range_key_conditions=None,
+ attributes_to_get=None, limit=None, consistent_read=False,
+ scan_index_forward=True, exclusive_start_key=None,
+ object_hook=None):
+ """
+ Perform a query of DynamoDB. This version is currently punting
+ and expecting you to provide a full and correct JSON body
+ which is passed as is to DynamoDB.
+
+ :type table_name: str
+ :param table_name: The name of the table to query.
+
+ :type hash_key_value: dict
+ :param key: A DynamoDB-style HashKeyValue.
+
+ :type range_key_conditions: dict
+ :param range_key_conditions: A Python version of the
+ RangeKeyConditions data structure.
+
+ :type attributes_to_get: list
+ :param attributes_to_get: A list of attribute names.
+ If supplied, only the specified attribute names will
+ be returned. Otherwise, all attributes will be returned.
+
+ :type limit: int
+ :param limit: The maximum number of items to return.
+
+ :type consistent_read: bool
+ :param consistent_read: If True, a consistent read
+ request is issued. Otherwise, an eventually consistent
+ request is issued.
+
+ :type scan_index_forward: bool
+ :param scan_index_forward: Specified forward or backward
+ traversal of the index. Default is forward (True).
+
+ :type exclusive_start_key: list or tuple
+ :param exclusive_start_key: Primary key of the item from
+ which to continue an earlier query. This would be
+ provided as the LastEvaluatedKey in that query.
+ """
+ data = {'TableName': table_name,
+ 'HashKeyValue': hash_key_value}
+ if range_key_conditions:
+ data['RangeKeyCondition'] = range_key_conditions
+ if attributes_to_get:
+ data['AttributesToGet'] = attributes_to_get
+ if limit:
+ data['Limit'] = limit
+ if consistent_read:
+ data['ConsistentRead'] = True
+ if scan_index_forward:
+ data['ScanIndexForward'] = True
+ else:
+ data['ScanIndexForward'] = False
+ if exclusive_start_key:
+ data['ExclusiveStartKey'] = exclusive_start_key
+ json_input = json.dumps(data)
+ return self.make_request('Query', json_input,
+ object_hook=object_hook)
+
+ def scan(self, table_name, scan_filter=None,
+ attributes_to_get=None, limit=None,
+ count=False, exclusive_start_key=None,
+ object_hook=None):
+ """
+ Perform a scan of DynamoDB. This version is currently punting
+ and expecting you to provide a full and correct JSON body
+ which is passed as is to DynamoDB.
+
+ :type table_name: str
+ :param table_name: The name of the table to scan.
+
+ :type scan_filter: dict
+ :param scan_filter: A Python version of the
+ ScanFilter data structure.
+
+ :type attributes_to_get: list
+ :param attributes_to_get: A list of attribute names.
+ If supplied, only the specified attribute names will
+ be returned. Otherwise, all attributes will be returned.
+
+ :type limit: int
+ :param limit: The maximum number of items to return.
+
+ :type count: bool
+ :param count: If True, Amazon DynamoDB returns a total
+ number of items for the Scan operation, even if the
+ operation has no matching items for the assigned filter.
+
+ :type exclusive_start_key: list or tuple
+ :param exclusive_start_key: Primary key of the item from
+ which to continue an earlier query. This would be
+ provided as the LastEvaluatedKey in that query.
+ """
+ data = {'TableName': table_name}
+ if scan_filter:
+ data['ScanFilter'] = scan_filter
+ if attributes_to_get:
+ data['AttributesToGet'] = attributes_to_get
+ if limit:
+ data['Limit'] = limit
+ if count:
+ data['Count'] = True
+ if exclusive_start_key:
+ data['ExclusiveStartKey'] = exclusive_start_key
+ json_input = json.dumps(data)
+ return self.make_request('Scan', json_input, object_hook=object_hook)
diff --git a/boto/dynamodb/layer2.py b/boto/dynamodb/layer2.py
new file mode 100644
index 0000000..45fd069
--- /dev/null
+++ b/boto/dynamodb/layer2.py
@@ -0,0 +1,726 @@
+# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2011 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+import base64
+
+from boto.dynamodb.layer1 import Layer1
+from boto.dynamodb.table import Table
+from boto.dynamodb.schema import Schema
+from boto.dynamodb.item import Item
+from boto.dynamodb.batch import BatchList, BatchWriteList
+from boto.dynamodb.types import get_dynamodb_type, dynamize_value, \
+ convert_num, convert_binary
+
+
+def item_object_hook(dct):
+ """
+ A custom object hook for use when decoding JSON item bodys.
+ This hook will transform Amazon DynamoDB JSON responses to something
+ that maps directly to native Python types.
+ """
+ if len(dct.keys()) > 1:
+ return dct
+ if 'S' in dct:
+ return dct['S']
+ if 'N' in dct:
+ return convert_num(dct['N'])
+ if 'SS' in dct:
+ return set(dct['SS'])
+ if 'NS' in dct:
+ return set(map(convert_num, dct['NS']))
+ if 'B' in dct:
+ return base64.b64decode(dct['B'])
+ if 'BS' in dct:
+ return set(map(convert_binary, dct['BS']))
+ return dct
+
+
+def table_generator(tgen):
+ """
+ A low-level generator used to page through results from
+ query and scan operations. This is used by
+ :class:`boto.dynamodb.layer2.TableGenerator` and is not intended
+ to be used outside of that context.
+ """
+ response = True
+ n = 0
+ while response:
+ if tgen.max_results and n == tgen.max_results:
+ break
+ if response is True:
+ pass
+ elif 'LastEvaluatedKey' in response:
+ lek = response['LastEvaluatedKey']
+ esk = tgen.table.layer2.dynamize_last_evaluated_key(lek)
+ tgen.kwargs['exclusive_start_key'] = esk
+ else:
+ break
+ response = tgen.callable(**tgen.kwargs)
+ if 'ConsumedCapacityUnits' in response:
+ tgen.consumed_units += response['ConsumedCapacityUnits']
+ for item in response['Items']:
+ if tgen.max_results and n == tgen.max_results:
+ break
+ yield tgen.item_class(tgen.table, attrs=item)
+ n += 1
+
+
+class TableGenerator:
+ """
+ This is an object that wraps up the table_generator function.
+ The only real reason to have this is that we want to be able
+ to accumulate and return the ConsumedCapacityUnits element that
+ is part of each response.
+
+ :ivar consumed_units: An integer that holds the number of
+ ConsumedCapacityUnits accumulated thus far for this
+ generator.
+ """
+
+ def __init__(self, table, callable, max_results, item_class, kwargs):
+ self.table = table
+ self.callable = callable
+ self.max_results = max_results
+ self.item_class = item_class
+ self.kwargs = kwargs
+ self.consumed_units = 0
+
+ def __iter__(self):
+ return table_generator(self)
+
+
+class Layer2(object):
+
+ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
+ is_secure=True, port=None, proxy=None, proxy_port=None,
+ debug=0, security_token=None, region=None,
+ validate_certs=True):
+ self.layer1 = Layer1(aws_access_key_id, aws_secret_access_key,
+ is_secure, port, proxy, proxy_port,
+ debug, security_token, region,
+ validate_certs=validate_certs)
+
+ def dynamize_attribute_updates(self, pending_updates):
+ """
+ Convert a set of pending item updates into the structure
+ required by Layer1.
+ """
+ d = {}
+ for attr_name in pending_updates:
+ action, value = pending_updates[attr_name]
+ if value is None:
+ # DELETE without an attribute value
+ d[attr_name] = {"Action": action}
+ else:
+ d[attr_name] = {"Action": action,
+ "Value": dynamize_value(value)}
+ return d
+
+ def dynamize_item(self, item):
+ d = {}
+ for attr_name in item:
+ d[attr_name] = dynamize_value(item[attr_name])
+ return d
+
+ def dynamize_range_key_condition(self, range_key_condition):
+ """
+ Convert a layer2 range_key_condition parameter into the
+ structure required by Layer1.
+ """
+ return range_key_condition.to_dict()
+
+ def dynamize_scan_filter(self, scan_filter):
+ """
+ Convert a layer2 scan_filter parameter into the
+ structure required by Layer1.
+ """
+ d = None
+ if scan_filter:
+ d = {}
+ for attr_name in scan_filter:
+ condition = scan_filter[attr_name]
+ d[attr_name] = condition.to_dict()
+ return d
+
+ def dynamize_expected_value(self, expected_value):
+ """
+ Convert an expected_value parameter into the data structure
+ required for Layer1.
+ """
+ d = None
+ if expected_value:
+ d = {}
+ for attr_name in expected_value:
+ attr_value = expected_value[attr_name]
+ if attr_value is True:
+ attr_value = {'Exists': True}
+ elif attr_value is False:
+ attr_value = {'Exists': False}
+ else:
+ val = dynamize_value(expected_value[attr_name])
+ attr_value = {'Value': val}
+ d[attr_name] = attr_value
+ return d
+
+ def dynamize_last_evaluated_key(self, last_evaluated_key):
+ """
+ Convert a last_evaluated_key parameter into the data structure
+ required for Layer1.
+ """
+ d = None
+ if last_evaluated_key:
+ hash_key = last_evaluated_key['HashKeyElement']
+ d = {'HashKeyElement': dynamize_value(hash_key)}
+ if 'RangeKeyElement' in last_evaluated_key:
+ range_key = last_evaluated_key['RangeKeyElement']
+ d['RangeKeyElement'] = dynamize_value(range_key)
+ return d
+
+ def build_key_from_values(self, schema, hash_key, range_key=None):
+ """
+ Build a Key structure to be used for accessing items
+ in Amazon DynamoDB. This method takes the supplied hash_key
+ and optional range_key and validates them against the
+ schema. If there is a mismatch, a TypeError is raised.
+ Otherwise, a Python dict version of a Amazon DynamoDB Key
+ data structure is returned.
+
+ :type hash_key: int, float, str, or unicode
+ :param hash_key: The hash key of the item you are looking for.
+ The type of the hash key should match the type defined in
+ the schema.
+
+ :type range_key: int, float, str or unicode
+ :param range_key: The range key of the item your are looking for.
+ This should be supplied only if the schema requires a
+ range key. The type of the range key should match the
+ type defined in the schema.
+ """
+ dynamodb_key = {}
+ dynamodb_value = dynamize_value(hash_key)
+ if dynamodb_value.keys()[0] != schema.hash_key_type:
+ msg = 'Hashkey must be of type: %s' % schema.hash_key_type
+ raise TypeError(msg)
+ dynamodb_key['HashKeyElement'] = dynamodb_value
+ if range_key is not None:
+ dynamodb_value = dynamize_value(range_key)
+ if dynamodb_value.keys()[0] != schema.range_key_type:
+ msg = 'RangeKey must be of type: %s' % schema.range_key_type
+ raise TypeError(msg)
+ dynamodb_key['RangeKeyElement'] = dynamodb_value
+ return dynamodb_key
+
+ def new_batch_list(self):
+ """
+ Return a new, empty :class:`boto.dynamodb.batch.BatchList`
+ object.
+ """
+ return BatchList(self)
+
+ def new_batch_write_list(self):
+ """
+ Return a new, empty :class:`boto.dynamodb.batch.BatchWriteList`
+ object.
+ """
+ return BatchWriteList(self)
+
+ def list_tables(self, limit=None):
+ """
+ Return a list of the names of all tables associated with the
+ current account and region.
+
+ :type limit: int
+ :param limit: The maximum number of tables to return.
+ """
+ tables = []
+ start_table = None
+ while not limit or len(tables) < limit:
+ this_round_limit = None
+ if limit:
+ this_round_limit = limit - len(tables)
+ this_round_limit = min(this_round_limit, 100)
+ result = self.layer1.list_tables(limit=this_round_limit, start_table=start_table)
+ tables.extend(result.get('TableNames', []))
+ start_table = result.get('LastEvaluatedTableName', None)
+ if not start_table:
+ break
+ return tables
+
+ def describe_table(self, name):
+ """
+ Retrieve information about an existing table.
+
+ :type name: str
+ :param name: The name of the desired table.
+
+ """
+ return self.layer1.describe_table(name)
+
+ def get_table(self, name):
+ """
+ Retrieve the Table object for an existing table.
+
+ :type name: str
+ :param name: The name of the desired table.
+
+ :rtype: :class:`boto.dynamodb.table.Table`
+ :return: A Table object representing the table.
+ """
+ response = self.layer1.describe_table(name)
+ return Table(self, response)
+
+ lookup = get_table
+
+ def create_table(self, name, schema, read_units, write_units):
+ """
+ Create a new Amazon DynamoDB table.
+
+ :type name: str
+ :param name: The name of the desired table.
+
+ :type schema: :class:`boto.dynamodb.schema.Schema`
+ :param schema: The Schema object that defines the schema used
+ by this table.
+
+ :type read_units: int
+ :param read_units: The value for ReadCapacityUnits.
+
+ :type write_units: int
+ :param write_units: The value for WriteCapacityUnits.
+
+ :rtype: :class:`boto.dynamodb.table.Table`
+ :return: A Table object representing the new Amazon DynamoDB table.
+ """
+ response = self.layer1.create_table(name, schema.dict,
+ {'ReadCapacityUnits': read_units,
+ 'WriteCapacityUnits': write_units})
+ return Table(self, response)
+
+ def update_throughput(self, table, read_units, write_units):
+ """
+ Update the ProvisionedThroughput for the Amazon DynamoDB Table.
+
+ :type table: :class:`boto.dynamodb.table.Table`
+ :param table: The Table object whose throughput is being updated.
+
+ :type read_units: int
+ :param read_units: The new value for ReadCapacityUnits.
+
+ :type write_units: int
+ :param write_units: The new value for WriteCapacityUnits.
+ """
+ response = self.layer1.update_table(table.name,
+ {'ReadCapacityUnits': read_units,
+ 'WriteCapacityUnits': write_units})
+ table.update_from_response(response)
+
+ def delete_table(self, table):
+ """
+ Delete this table and all items in it. After calling this
+ the Table objects status attribute will be set to 'DELETING'.
+
+ :type table: :class:`boto.dynamodb.table.Table`
+ :param table: The Table object that is being deleted.
+ """
+ response = self.layer1.delete_table(table.name)
+ table.update_from_response(response)
+
+ def create_schema(self, hash_key_name, hash_key_proto_value,
+ range_key_name=None, range_key_proto_value=None):
+ """
+ Create a Schema object used when creating a Table.
+
+ :type hash_key_name: str
+ :param hash_key_name: The name of the HashKey for the schema.
+
+ :type hash_key_proto_value: int|long|float|str|unicode
+ :param hash_key_proto_value: A sample or prototype of the type
+ of value you want to use for the HashKey. Alternatively,
+ you can also just pass in the Python type (e.g. int, float, etc.).
+
+ :type range_key_name: str
+ :param range_key_name: The name of the RangeKey for the schema.
+ This parameter is optional.
+
+ :type range_key_proto_value: int|long|float|str|unicode
+ :param range_key_proto_value: A sample or prototype of the type
+ of value you want to use for the RangeKey. Alternatively,
+ you can also pass in the Python type (e.g. int, float, etc.)
+ This parameter is optional.
+ """
+ schema = {}
+ hash_key = {}
+ hash_key['AttributeName'] = hash_key_name
+ hash_key_type = get_dynamodb_type(hash_key_proto_value)
+ hash_key['AttributeType'] = hash_key_type
+ schema['HashKeyElement'] = hash_key
+ if range_key_name and range_key_proto_value is not None:
+ range_key = {}
+ range_key['AttributeName'] = range_key_name
+ range_key_type = get_dynamodb_type(range_key_proto_value)
+ range_key['AttributeType'] = range_key_type
+ schema['RangeKeyElement'] = range_key
+ return Schema(schema)
+
+ def get_item(self, table, hash_key, range_key=None,
+ attributes_to_get=None, consistent_read=False,
+ item_class=Item):
+ """
+ Retrieve an existing item from the table.
+
+ :type table: :class:`boto.dynamodb.table.Table`
+ :param table: The Table object from which the item is retrieved.
+
+ :type hash_key: int|long|float|str|unicode
+ :param hash_key: The HashKey of the requested item. The
+ type of the value must match the type defined in the
+ schema for the table.
+
+ :type range_key: int|long|float|str|unicode
+ :param range_key: The optional RangeKey of the requested item.
+ The type of the value must match the type defined in the
+ schema for the table.
+
+ :type attributes_to_get: list
+ :param attributes_to_get: A list of attribute names.
+ If supplied, only the specified attribute names will
+ be returned. Otherwise, all attributes will be returned.
+
+ :type consistent_read: bool
+ :param consistent_read: If True, a consistent read
+ request is issued. Otherwise, an eventually consistent
+ request is issued.
+
+ :type item_class: Class
+ :param item_class: Allows you to override the class used
+ to generate the items. This should be a subclass of
+ :class:`boto.dynamodb.item.Item`
+ """
+ key = self.build_key_from_values(table.schema, hash_key, range_key)
+ response = self.layer1.get_item(table.name, key,
+ attributes_to_get, consistent_read,
+ object_hook=item_object_hook)
+ item = item_class(table, hash_key, range_key, response['Item'])
+ if 'ConsumedCapacityUnits' in response:
+ item.consumed_units = response['ConsumedCapacityUnits']
+ return item
+
+ def batch_get_item(self, batch_list):
+ """
+ Return a set of attributes for a multiple items in
+ multiple tables using their primary keys.
+
+ :type batch_list: :class:`boto.dynamodb.batch.BatchList`
+ :param batch_list: A BatchList object which consists of a
+ list of :class:`boto.dynamoddb.batch.Batch` objects.
+ Each Batch object contains the information about one
+ batch of objects that you wish to retrieve in this
+ request.
+ """
+ request_items = batch_list.to_dict()
+ return self.layer1.batch_get_item(request_items,
+ object_hook=item_object_hook)
+
+ def batch_write_item(self, batch_list):
+ """
+ Performs multiple Puts and Deletes in one batch.
+
+ :type batch_list: :class:`boto.dynamodb.batch.BatchWriteList`
+ :param batch_list: A BatchWriteList object which consists of a
+ list of :class:`boto.dynamoddb.batch.BatchWrite` objects.
+ Each Batch object contains the information about one
+ batch of objects that you wish to put or delete.
+ """
+ request_items = batch_list.to_dict()
+ return self.layer1.batch_write_item(request_items,
+ object_hook=item_object_hook)
+
+ def put_item(self, item, expected_value=None, return_values=None):
+ """
+ Store a new item or completely replace an existing item
+ in Amazon DynamoDB.
+
+ :type item: :class:`boto.dynamodb.item.Item`
+ :param item: The Item to write to Amazon DynamoDB.
+
+ :type expected_value: dict
+ :param expected_value: A dictionary of name/value pairs that you expect.
+ This dictionary should have name/value pairs where the name
+ is the name of the attribute and the value is either the value
+ you are expecting or False if you expect the attribute not to
+ exist.
+
+ :type return_values: str
+ :param return_values: Controls the return of attribute
+ name-value pairs before then were changed. Possible
+ values are: None or 'ALL_OLD'. If 'ALL_OLD' is
+ specified and the item is overwritten, the content
+ of the old item is returned.
+ """
+ expected_value = self.dynamize_expected_value(expected_value)
+ response = self.layer1.put_item(item.table.name,
+ self.dynamize_item(item),
+ expected_value, return_values,
+ object_hook=item_object_hook)
+ if 'ConsumedCapacityUnits' in response:
+ item.consumed_units = response['ConsumedCapacityUnits']
+ return response
+
+ def update_item(self, item, expected_value=None, return_values=None):
+ """
+ Commit pending item updates to Amazon DynamoDB.
+
+ :type item: :class:`boto.dynamodb.item.Item`
+ :param item: The Item to update in Amazon DynamoDB. It is expected
+ that you would have called the add_attribute, put_attribute
+ and/or delete_attribute methods on this Item prior to calling
+ this method. Those queued changes are what will be updated.
+
+ :type expected_value: dict
+ :param expected_value: A dictionary of name/value pairs that you
+ expect. This dictionary should have name/value pairs where the
+ name is the name of the attribute and the value is either the
+ value you are expecting or False if you expect the attribute
+ not to exist.
+
+ :type return_values: str
+ :param return_values: Controls the return of attribute name/value pairs
+ before they were updated. Possible values are: None, 'ALL_OLD',
+ 'UPDATED_OLD', 'ALL_NEW' or 'UPDATED_NEW'. If 'ALL_OLD' is
+ specified and the item is overwritten, the content of the old item
+ is returned. If 'ALL_NEW' is specified, then all the attributes of
+ the new version of the item are returned. If 'UPDATED_NEW' is
+ specified, the new versions of only the updated attributes are
+ returned.
+
+ """
+ expected_value = self.dynamize_expected_value(expected_value)
+ key = self.build_key_from_values(item.table.schema,
+ item.hash_key, item.range_key)
+ attr_updates = self.dynamize_attribute_updates(item._updates)
+
+ response = self.layer1.update_item(item.table.name, key,
+ attr_updates,
+ expected_value, return_values,
+ object_hook=item_object_hook)
+ item._updates.clear()
+ if 'ConsumedCapacityUnits' in response:
+ item.consumed_units = response['ConsumedCapacityUnits']
+ return response
+
+ def delete_item(self, item, expected_value=None, return_values=None):
+ """
+ Delete the item from Amazon DynamoDB.
+
+ :type item: :class:`boto.dynamodb.item.Item`
+ :param item: The Item to delete from Amazon DynamoDB.
+
+ :type expected_value: dict
+ :param expected_value: A dictionary of name/value pairs that you expect.
+ This dictionary should have name/value pairs where the name
+ is the name of the attribute and the value is either the value
+ you are expecting or False if you expect the attribute not to
+ exist.
+
+ :type return_values: str
+ :param return_values: Controls the return of attribute
+ name-value pairs before then were changed. Possible
+ values are: None or 'ALL_OLD'. If 'ALL_OLD' is
+ specified and the item is overwritten, the content
+ of the old item is returned.
+ """
+ expected_value = self.dynamize_expected_value(expected_value)
+ key = self.build_key_from_values(item.table.schema,
+ item.hash_key, item.range_key)
+ return self.layer1.delete_item(item.table.name, key,
+ expected=expected_value,
+ return_values=return_values,
+ object_hook=item_object_hook)
+
+ def query(self, table, hash_key, range_key_condition=None,
+ attributes_to_get=None, request_limit=None,
+ max_results=None, consistent_read=False,
+ scan_index_forward=True, exclusive_start_key=None,
+ item_class=Item):
+ """
+ Perform a query on the table.
+
+ :type table: :class:`boto.dynamodb.table.Table`
+ :param table: The Table object that is being queried.
+
+ :type hash_key: int|long|float|str|unicode
+ :param hash_key: The HashKey of the requested item. The
+ type of the value must match the type defined in the
+ schema for the table.
+
+ :type range_key_condition: :class:`boto.dynamodb.condition.Condition`
+ :param range_key_condition: A Condition object.
+ Condition object can be one of the following types:
+
+ EQ|LE|LT|GE|GT|BEGINS_WITH|BETWEEN
+
+ The only condition which expects or will accept two
+ values is 'BETWEEN', otherwise a single value should
+ be passed to the Condition constructor.
+
+ :type attributes_to_get: list
+ :param attributes_to_get: A list of attribute names.
+ If supplied, only the specified attribute names will
+ be returned. Otherwise, all attributes will be returned.
+
+ :type request_limit: int
+ :param request_limit: The maximum number of items to retrieve
+ from Amazon DynamoDB on each request. You may want to set
+ a specific request_limit based on the provisioned throughput
+ of your table. The default behavior is to retrieve as many
+ results as possible per request.
+
+ :type max_results: int
+ :param max_results: The maximum number of results that will
+ be retrieved from Amazon DynamoDB in total. For example,
+ if you only wanted to see the first 100 results from the
+ query, regardless of how many were actually available, you
+ could set max_results to 100 and the generator returned
+ from the query method will only yeild 100 results max.
+
+ :type consistent_read: bool
+ :param consistent_read: If True, a consistent read
+ request is issued. Otherwise, an eventually consistent
+ request is issued.
+
+ :type scan_index_forward: bool
+ :param scan_index_forward: Specified forward or backward
+ traversal of the index. Default is forward (True).
+
+ :type exclusive_start_key: list or tuple
+ :param exclusive_start_key: Primary key of the item from
+ which to continue an earlier query. This would be
+ provided as the LastEvaluatedKey in that query.
+
+ :type item_class: Class
+ :param item_class: Allows you to override the class used
+ to generate the items. This should be a subclass of
+ :class:`boto.dynamodb.item.Item`
+
+ :rtype: :class:`boto.dynamodb.layer2.TableGenerator`
+ """
+ if range_key_condition:
+ rkc = self.dynamize_range_key_condition(range_key_condition)
+ else:
+ rkc = None
+ if exclusive_start_key:
+ esk = self.build_key_from_values(table.schema,
+ *exclusive_start_key)
+ else:
+ esk = None
+ kwargs = {'table_name': table.name,
+ 'hash_key_value': dynamize_value(hash_key),
+ 'range_key_conditions': rkc,
+ 'attributes_to_get': attributes_to_get,
+ 'limit': request_limit,
+ 'consistent_read': consistent_read,
+ 'scan_index_forward': scan_index_forward,
+ 'exclusive_start_key': esk,
+ 'object_hook': item_object_hook}
+ return TableGenerator(table, self.layer1.query,
+ max_results, item_class, kwargs)
+
+ def scan(self, table, scan_filter=None,
+ attributes_to_get=None, request_limit=None, max_results=None,
+ count=False, exclusive_start_key=None, item_class=Item):
+ """
+ Perform a scan of DynamoDB.
+
+ :type table: :class:`boto.dynamodb.table.Table`
+ :param table: The Table object that is being scanned.
+
+ :type scan_filter: A dict
+ :param scan_filter: A dictionary where the key is the
+ attribute name and the value is a
+ :class:`boto.dynamodb.condition.Condition` object.
+ Valid Condition objects include:
+
+ * EQ - equal (1)
+ * NE - not equal (1)
+ * LE - less than or equal (1)
+ * LT - less than (1)
+ * GE - greater than or equal (1)
+ * GT - greater than (1)
+ * NOT_NULL - attribute exists (0, use None)
+ * NULL - attribute does not exist (0, use None)
+ * CONTAINS - substring or value in list (1)
+ * NOT_CONTAINS - absence of substring or value in list (1)
+ * BEGINS_WITH - substring prefix (1)
+ * IN - exact match in list (N)
+ * BETWEEN - >= first value, <= second value (2)
+
+ :type attributes_to_get: list
+ :param attributes_to_get: A list of attribute names.
+ If supplied, only the specified attribute names will
+ be returned. Otherwise, all attributes will be returned.
+
+ :type request_limit: int
+ :param request_limit: The maximum number of items to retrieve
+ from Amazon DynamoDB on each request. You may want to set
+ a specific request_limit based on the provisioned throughput
+ of your table. The default behavior is to retrieve as many
+ results as possible per request.
+
+ :type max_results: int
+ :param max_results: The maximum number of results that will
+ be retrieved from Amazon DynamoDB in total. For example,
+ if you only wanted to see the first 100 results from the
+ query, regardless of how many were actually available, you
+ could set max_results to 100 and the generator returned
+ from the query method will only yeild 100 results max.
+
+ :type count: bool
+ :param count: If True, Amazon DynamoDB returns a total
+ number of items for the Scan operation, even if the
+ operation has no matching items for the assigned filter.
+
+ :type exclusive_start_key: list or tuple
+ :param exclusive_start_key: Primary key of the item from
+ which to continue an earlier query. This would be
+ provided as the LastEvaluatedKey in that query.
+
+ :type item_class: Class
+ :param item_class: Allows you to override the class used
+ to generate the items. This should be a subclass of
+ :class:`boto.dynamodb.item.Item`
+
+ :rtype: :class:`boto.dynamodb.layer2.TableGenerator`
+ """
+ if exclusive_start_key:
+ esk = self.build_key_from_values(table.schema,
+ *exclusive_start_key)
+ else:
+ esk = None
+ kwargs = {'table_name': table.name,
+ 'scan_filter': self.dynamize_scan_filter(scan_filter),
+ 'attributes_to_get': attributes_to_get,
+ 'limit': request_limit,
+ 'count': count,
+ 'exclusive_start_key': esk,
+ 'object_hook': item_object_hook}
+ return TableGenerator(table, self.layer1.scan,
+ max_results, item_class, kwargs)
diff --git a/boto/dynamodb/schema.py b/boto/dynamodb/schema.py
new file mode 100644
index 0000000..34ff212
--- /dev/null
+++ b/boto/dynamodb/schema.py
@@ -0,0 +1,74 @@
+# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2011 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+
+class Schema(object):
+ """
+ Represents a DynamoDB schema.
+
+ :ivar hash_key_name: The name of the hash key of the schema.
+ :ivar hash_key_type: The DynamoDB type specification for the
+ hash key of the schema.
+ :ivar range_key_name: The name of the range key of the schema
+ or None if no range key is defined.
+ :ivar range_key_type: The DynamoDB type specification for the
+ range key of the schema or None if no range key is defined.
+ :ivar dict: The underlying Python dictionary that needs to be
+ passed to Layer1 methods.
+ """
+
+ def __init__(self, schema_dict):
+ self._dict = schema_dict
+
+ def __repr__(self):
+ if self.range_key_name:
+ s = 'Schema(%s:%s)' % (self.hash_key_name, self.range_key_name)
+ else:
+ s = 'Schema(%s)' % self.hash_key_name
+ return s
+
+ @property
+ def dict(self):
+ return self._dict
+
+ @property
+ def hash_key_name(self):
+ return self._dict['HashKeyElement']['AttributeName']
+
+ @property
+ def hash_key_type(self):
+ return self._dict['HashKeyElement']['AttributeType']
+
+ @property
+ def range_key_name(self):
+ name = None
+ if 'RangeKeyElement' in self._dict:
+ name = self._dict['RangeKeyElement']['AttributeName']
+ return name
+
+ @property
+ def range_key_type(self):
+ type = None
+ if 'RangeKeyElement' in self._dict:
+ type = self._dict['RangeKeyElement']['AttributeType']
+ return type
diff --git a/boto/dynamodb/table.py b/boto/dynamodb/table.py
new file mode 100644
index 0000000..ee73b1a
--- /dev/null
+++ b/boto/dynamodb/table.py
@@ -0,0 +1,490 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+from boto.dynamodb.batch import BatchList
+from boto.dynamodb.schema import Schema
+from boto.dynamodb.item import Item
+from boto.dynamodb import exceptions as dynamodb_exceptions
+import time
+
+class TableBatchGenerator(object):
+ """
+ A low-level generator used to page through results from
+ batch_get_item operations.
+
+ :ivar consumed_units: An integer that holds the number of
+ ConsumedCapacityUnits accumulated thus far for this
+ generator.
+ """
+
+ def __init__(self, table, keys, attributes_to_get=None):
+ self.table = table
+ self.keys = keys
+ self.consumed_units = 0
+ self.attributes_to_get = attributes_to_get
+
+ def _queue_unprocessed(self, res):
+ if not u'UnprocessedKeys' in res:
+ return
+ if not self.table.name in res[u'UnprocessedKeys']:
+ return
+
+ keys = res[u'UnprocessedKeys'][self.table.name][u'Keys']
+
+ for key in keys:
+ h = key[u'HashKeyElement']
+ r = key[u'RangeKeyElement'] if u'RangeKeyElement' in key else None
+ self.keys.append((h, r))
+
+ def __iter__(self):
+ while self.keys:
+ # Build the next batch
+ batch = BatchList(self.table.layer2)
+ batch.add_batch(self.table, self.keys[:100], self.attributes_to_get)
+ res = batch.submit()
+
+ # parse the results
+ if not self.table.name in res[u'Responses']:
+ continue
+ self.consumed_units += res[u'Responses'][self.table.name][u'ConsumedCapacityUnits']
+ for elem in res[u'Responses'][self.table.name][u'Items']:
+ yield elem
+
+ # re-queue un processed keys
+ self.keys = self.keys[100:]
+ self._queue_unprocessed(res)
+
+
+class Table(object):
+ """
+ An Amazon DynamoDB table.
+
+ :ivar name: The name of the table.
+ :ivar create_time: The date and time that the table was created.
+ :ivar status: The current status of the table. One of:
+ 'ACTIVE', 'UPDATING', 'DELETING'.
+ :ivar schema: A :class:`boto.dynamodb.schema.Schema` object representing
+ the schema defined for the table.
+ :ivar item_count: The number of items in the table. This value is
+ set only when the Table object is created or refreshed and
+ may not reflect the actual count.
+ :ivar size_bytes: Total size of the specified table, in bytes.
+ Amazon DynamoDB updates this value approximately every six hours.
+ Recent changes might not be reflected in this value.
+ :ivar read_units: The ReadCapacityUnits of the tables
+ Provisioned Throughput.
+ :ivar write_units: The WriteCapacityUnits of the tables
+ Provisioned Throughput.
+ :ivar schema: The Schema object associated with the table.
+ """
+
+ def __init__(self, layer2, response):
+ self.layer2 = layer2
+ self._dict = {}
+ self.update_from_response(response)
+
+ def __repr__(self):
+ return 'Table(%s)' % self.name
+
+ @property
+ def name(self):
+ return self._dict['TableName']
+
+ @property
+ def create_time(self):
+ return self._dict['CreationDateTime']
+
+ @property
+ def status(self):
+ return self._dict['TableStatus']
+
+ @property
+ def item_count(self):
+ return self._dict.get('ItemCount', 0)
+
+ @property
+ def size_bytes(self):
+ return self._dict.get('TableSizeBytes', 0)
+
+ @property
+ def schema(self):
+ return self._schema
+
+ @property
+ def read_units(self):
+ return self._dict['ProvisionedThroughput']['ReadCapacityUnits']
+
+ @property
+ def write_units(self):
+ return self._dict['ProvisionedThroughput']['WriteCapacityUnits']
+
+ def update_from_response(self, response):
+ """
+ Update the state of the Table object based on the response
+ data received from Amazon DynamoDB.
+ """
+ if 'Table' in response:
+ self._dict.update(response['Table'])
+ elif 'TableDescription' in response:
+ self._dict.update(response['TableDescription'])
+ if 'KeySchema' in self._dict:
+ self._schema = Schema(self._dict['KeySchema'])
+
+ def refresh(self, wait_for_active=False, retry_seconds=5):
+ """
+ Refresh all of the fields of the Table object by calling
+ the underlying DescribeTable request.
+
+ :type wait_for_active: bool
+ :param wait_for_active: If True, this command will not return
+ until the table status, as returned from Amazon DynamoDB, is
+ 'ACTIVE'.
+
+ :type retry_seconds: int
+ :param retry_seconds: If wait_for_active is True, this
+ parameter controls the number of seconds of delay between
+ calls to update_table in Amazon DynamoDB. Default is 5 seconds.
+ """
+ done = False
+ while not done:
+ response = self.layer2.describe_table(self.name)
+ self.update_from_response(response)
+ if wait_for_active:
+ if self.status == 'ACTIVE':
+ done = True
+ else:
+ time.sleep(retry_seconds)
+ else:
+ done = True
+
+ def update_throughput(self, read_units, write_units):
+ """
+ Update the ProvisionedThroughput for the Amazon DynamoDB Table.
+
+ :type read_units: int
+ :param read_units: The new value for ReadCapacityUnits.
+
+ :type write_units: int
+ :param write_units: The new value for WriteCapacityUnits.
+ """
+ self.layer2.update_throughput(self, read_units, write_units)
+
+ def delete(self):
+ """
+ Delete this table and all items in it. After calling this
+ the Table objects status attribute will be set to 'DELETING'.
+ """
+ self.layer2.delete_table(self)
+
+ def get_item(self, hash_key, range_key=None,
+ attributes_to_get=None, consistent_read=False,
+ item_class=Item):
+ """
+ Retrieve an existing item from the table.
+
+ :type hash_key: int|long|float|str|unicode
+ :param hash_key: The HashKey of the requested item. The
+ type of the value must match the type defined in the
+ schema for the table.
+
+ :type range_key: int|long|float|str|unicode
+ :param range_key: The optional RangeKey of the requested item.
+ The type of the value must match the type defined in the
+ schema for the table.
+
+ :type attributes_to_get: list
+ :param attributes_to_get: A list of attribute names.
+ If supplied, only the specified attribute names will
+ be returned. Otherwise, all attributes will be returned.
+
+ :type consistent_read: bool
+ :param consistent_read: If True, a consistent read
+ request is issued. Otherwise, an eventually consistent
+ request is issued.
+
+ :type item_class: Class
+ :param item_class: Allows you to override the class used
+ to generate the items. This should be a subclass of
+ :class:`boto.dynamodb.item.Item`
+ """
+ return self.layer2.get_item(self, hash_key, range_key,
+ attributes_to_get, consistent_read,
+ item_class)
+ lookup = get_item
+
+ def has_item(self, hash_key, range_key=None, consistent_read=False):
+ """
+ Checks the table to see if the Item with the specified ``hash_key``
+ exists. This may save a tiny bit of time/bandwidth over a
+ straight :py:meth:`get_item` if you have no intention to touch
+ the data that is returned, since this method specifically tells
+ Amazon not to return anything but the Item's key.
+
+ :type hash_key: int|long|float|str|unicode
+ :param hash_key: The HashKey of the requested item. The
+ type of the value must match the type defined in the
+ schema for the table.
+
+ :type range_key: int|long|float|str|unicode
+ :param range_key: The optional RangeKey of the requested item.
+ The type of the value must match the type defined in the
+ schema for the table.
+
+ :type consistent_read: bool
+ :param consistent_read: If True, a consistent read
+ request is issued. Otherwise, an eventually consistent
+ request is issued.
+
+ :rtype: bool
+ :returns: ``True`` if the Item exists, ``False`` if not.
+ """
+ try:
+ # Attempt to get the key. If it can't be found, it'll raise
+ # an exception.
+ self.get_item(hash_key, range_key=range_key,
+ # This minimizes the size of the response body.
+ attributes_to_get=[hash_key],
+ consistent_read=consistent_read)
+ except dynamodb_exceptions.DynamoDBKeyNotFoundError:
+ # Key doesn't exist.
+ return False
+ return True
+
+ def new_item(self, hash_key=None, range_key=None, attrs=None,
+ item_class=Item):
+ """
+ Return an new, unsaved Item which can later be PUT to
+ Amazon DynamoDB.
+
+ This method has explicit (but optional) parameters for
+ the hash_key and range_key values of the item. You can use
+ these explicit parameters when calling the method, such as::
+
+ >>> my_item = my_table.new_item(hash_key='a', range_key=1,
+ attrs={'key1': 'val1', 'key2': 'val2'})
+ >>> my_item
+ {u'bar': 1, u'foo': 'a', 'key1': 'val1', 'key2': 'val2'}
+
+ Or, if you prefer, you can simply put the hash_key and range_key
+ in the attrs dictionary itself, like this::
+
+ >>> attrs = {'foo': 'a', 'bar': 1, 'key1': 'val1', 'key2': 'val2'}
+ >>> my_item = my_table.new_item(attrs=attrs)
+ >>> my_item
+ {u'bar': 1, u'foo': 'a', 'key1': 'val1', 'key2': 'val2'}
+
+ The effect is the same.
+
+ .. note:
+ The explicit parameters take priority over the values in
+ the attrs dict. So, if you have a hash_key or range_key
+ in the attrs dict and you also supply either or both using
+ the explicit parameters, the values in the attrs will be
+ ignored.
+
+ :type hash_key: int|long|float|str|unicode
+ :param hash_key: The HashKey of the new item. The
+ type of the value must match the type defined in the
+ schema for the table.
+
+ :type range_key: int|long|float|str|unicode
+ :param range_key: The optional RangeKey of the new item.
+ The type of the value must match the type defined in the
+ schema for the table.
+
+ :type attrs: dict
+ :param attrs: A dictionary of key value pairs used to
+ populate the new item.
+
+ :type item_class: Class
+ :param item_class: Allows you to override the class used
+ to generate the items. This should be a subclass of
+ :class:`boto.dynamodb.item.Item`
+ """
+ return item_class(self, hash_key, range_key, attrs)
+
+ def query(self, hash_key, range_key_condition=None,
+ attributes_to_get=None, request_limit=None,
+ max_results=None, consistent_read=False,
+ scan_index_forward=True, exclusive_start_key=None,
+ item_class=Item):
+ """
+ Perform a query on the table.
+
+ :type hash_key: int|long|float|str|unicode
+ :param hash_key: The HashKey of the requested item. The
+ type of the value must match the type defined in the
+ schema for the table.
+
+ :type range_key_condition: :class:`boto.dynamodb.condition.Condition`
+ :param range_key_condition: A Condition object.
+ Condition object can be one of the following types:
+
+ EQ|LE|LT|GE|GT|BEGINS_WITH|BETWEEN
+
+ The only condition which expects or will accept two
+ values is 'BETWEEN', otherwise a single value should
+ be passed to the Condition constructor.
+
+ :type attributes_to_get: list
+ :param attributes_to_get: A list of attribute names.
+ If supplied, only the specified attribute names will
+ be returned. Otherwise, all attributes will be returned.
+
+ :type request_limit: int
+ :param request_limit: The maximum number of items to retrieve
+ from Amazon DynamoDB on each request. You may want to set
+ a specific request_limit based on the provisioned throughput
+ of your table. The default behavior is to retrieve as many
+ results as possible per request.
+
+ :type max_results: int
+ :param max_results: The maximum number of results that will
+ be retrieved from Amazon DynamoDB in total. For example,
+ if you only wanted to see the first 100 results from the
+ query, regardless of how many were actually available, you
+ could set max_results to 100 and the generator returned
+ from the query method will only yeild 100 results max.
+
+ :type consistent_read: bool
+ :param consistent_read: If True, a consistent read
+ request is issued. Otherwise, an eventually consistent
+ request is issued.
+
+ :type scan_index_forward: bool
+ :param scan_index_forward: Specified forward or backward
+ traversal of the index. Default is forward (True).
+
+ :type exclusive_start_key: list or tuple
+ :param exclusive_start_key: Primary key of the item from
+ which to continue an earlier query. This would be
+ provided as the LastEvaluatedKey in that query.
+
+ :type item_class: Class
+ :param item_class: Allows you to override the class used
+ to generate the items. This should be a subclass of
+ :class:`boto.dynamodb.item.Item`
+ """
+ return self.layer2.query(self, hash_key, range_key_condition,
+ attributes_to_get, request_limit,
+ max_results, consistent_read,
+ scan_index_forward, exclusive_start_key,
+ item_class=item_class)
+
+ def scan(self, scan_filter=None,
+ attributes_to_get=None, request_limit=None, max_results=None,
+ count=False, exclusive_start_key=None, item_class=Item):
+ """
+ Scan through this table, this is a very long
+ and expensive operation, and should be avoided if
+ at all possible.
+
+ :type scan_filter: A list of tuples
+ :param scan_filter: A list of tuples where each tuple consists
+ of an attribute name, a comparison operator, and either
+ a scalar or tuple consisting of the values to compare
+ the attribute to. Valid comparison operators are shown below
+ along with the expected number of values that should be supplied.
+
+ * EQ - equal (1)
+ * NE - not equal (1)
+ * LE - less than or equal (1)
+ * LT - less than (1)
+ * GE - greater than or equal (1)
+ * GT - greater than (1)
+ * NOT_NULL - attribute exists (0, use None)
+ * NULL - attribute does not exist (0, use None)
+ * CONTAINS - substring or value in list (1)
+ * NOT_CONTAINS - absence of substring or value in list (1)
+ * BEGINS_WITH - substring prefix (1)
+ * IN - exact match in list (N)
+ * BETWEEN - >= first value, <= second value (2)
+
+ :type attributes_to_get: list
+ :param attributes_to_get: A list of attribute names.
+ If supplied, only the specified attribute names will
+ be returned. Otherwise, all attributes will be returned.
+
+ :type request_limit: int
+ :param request_limit: The maximum number of items to retrieve
+ from Amazon DynamoDB on each request. You may want to set
+ a specific request_limit based on the provisioned throughput
+ of your table. The default behavior is to retrieve as many
+ results as possible per request.
+
+ :type max_results: int
+ :param max_results: The maximum number of results that will
+ be retrieved from Amazon DynamoDB in total. For example,
+ if you only wanted to see the first 100 results from the
+ query, regardless of how many were actually available, you
+ could set max_results to 100 and the generator returned
+ from the query method will only yeild 100 results max.
+
+ :type count: bool
+ :param count: If True, Amazon DynamoDB returns a total
+ number of items for the Scan operation, even if the
+ operation has no matching items for the assigned filter.
+
+ :type exclusive_start_key: list or tuple
+ :param exclusive_start_key: Primary key of the item from
+ which to continue an earlier query. This would be
+ provided as the LastEvaluatedKey in that query.
+
+ :type item_class: Class
+ :param item_class: Allows you to override the class used
+ to generate the items. This should be a subclass of
+ :class:`boto.dynamodb.item.Item`
+
+ :return: A TableGenerator (generator) object which will iterate over all results
+ :rtype: :class:`boto.dynamodb.layer2.TableGenerator`
+ """
+ return self.layer2.scan(self, scan_filter, attributes_to_get,
+ request_limit, max_results, count,
+ exclusive_start_key, item_class=item_class)
+
+ def batch_get_item(self, keys, attributes_to_get=None):
+ """
+ Return a set of attributes for a multiple items from a single table
+ using their primary keys. This abstraction removes the 100 Items per
+ batch limitations as well as the "UnprocessedKeys" logic.
+
+ :type keys: list
+ :param keys: A list of scalar or tuple values. Each element in the
+ list represents one Item to retrieve. If the schema for the
+ table has both a HashKey and a RangeKey, each element in the
+ list should be a tuple consisting of (hash_key, range_key). If
+ the schema for the table contains only a HashKey, each element
+ in the list should be a scalar value of the appropriate type
+ for the table schema. NOTE: The maximum number of items that
+ can be retrieved for a single operation is 100. Also, the
+ number of items retrieved is constrained by a 1 MB size limit.
+
+ :type attributes_to_get: list
+ :param attributes_to_get: A list of attribute names.
+ If supplied, only the specified attribute names will
+ be returned. Otherwise, all attributes will be returned.
+
+ :return: A TableBatchGenerator (generator) object which will iterate over all results
+ :rtype: :class:`boto.dynamodb.table.TableBatchGenerator`
+ """
+ return TableBatchGenerator(self, keys, attributes_to_get)
diff --git a/boto/dynamodb/types.py b/boto/dynamodb/types.py
new file mode 100644
index 0000000..5b33076
--- /dev/null
+++ b/boto/dynamodb/types.py
@@ -0,0 +1,138 @@
+# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2011 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+"""
+Some utility functions to deal with mapping Amazon DynamoDB types to
+Python types and vice-versa.
+"""
+import base64
+
+
+def is_num(n):
+ types = (int, long, float, bool)
+ return isinstance(n, types) or n in types
+
+
+def is_str(n):
+ return isinstance(n, basestring) or (isinstance(n, type) and
+ issubclass(n, basestring))
+
+
+def is_binary(n):
+ return isinstance(n, Binary)
+
+
+def convert_num(s):
+ if '.' in s:
+ n = float(s)
+ else:
+ n = int(s)
+ return n
+
+
+def convert_binary(n):
+ return Binary(base64.b64decode(n))
+
+
+def get_dynamodb_type(val):
+ """
+ Take a scalar Python value and return a string representing
+ the corresponding Amazon DynamoDB type. If the value passed in is
+ not a supported type, raise a TypeError.
+ """
+ dynamodb_type = None
+ if is_num(val):
+ dynamodb_type = 'N'
+ elif is_str(val):
+ dynamodb_type = 'S'
+ elif isinstance(val, (set, frozenset)):
+ if False not in map(is_num, val):
+ dynamodb_type = 'NS'
+ elif False not in map(is_str, val):
+ dynamodb_type = 'SS'
+ elif False not in map(is_binary, val):
+ dynamodb_type = 'BS'
+ elif isinstance(val, Binary):
+ dynamodb_type = 'B'
+ if dynamodb_type is None:
+ msg = 'Unsupported type "%s" for value "%s"' % (type(val), val)
+ raise TypeError(msg)
+ return dynamodb_type
+
+
+def dynamize_value(val):
+ """
+ Take a scalar Python value and return a dict consisting
+ of the Amazon DynamoDB type specification and the value that
+ needs to be sent to Amazon DynamoDB. If the type of the value
+ is not supported, raise a TypeError
+ """
+ def _str(val):
+ """
+ DynamoDB stores booleans as numbers. True is 1, False is 0.
+ This function converts Python booleans into DynamoDB friendly
+ representation.
+ """
+ if isinstance(val, bool):
+ return str(int(val))
+ return str(val)
+
+ dynamodb_type = get_dynamodb_type(val)
+ if dynamodb_type == 'N':
+ val = {dynamodb_type: _str(val)}
+ elif dynamodb_type == 'S':
+ val = {dynamodb_type: val}
+ elif dynamodb_type == 'NS':
+ val = {dynamodb_type: [str(n) for n in val]}
+ elif dynamodb_type == 'SS':
+ val = {dynamodb_type: [n for n in val]}
+ elif dynamodb_type == 'B':
+ val = {dynamodb_type: val.encode()}
+ elif dynamodb_type == 'BS':
+ val = {dynamodb_type: [n.encode() for n in val]}
+ return val
+
+
+class Binary(object):
+ def __init__(self, value):
+ self.value = value
+
+ def encode(self):
+ return base64.b64encode(self.value)
+
+ def __eq__(self, other):
+ if isinstance(other, Binary):
+ return self.value == other.value
+ else:
+ return self.value == other
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __repr__(self):
+ return 'Binary(%s)' % self.value
+
+ def __str__(self):
+ return self.value
+
+ def __hash__(self):
+ return hash(self.value)
diff --git a/boto/ec2/__init__.py b/boto/ec2/__init__.py
index ff9422b..963b6d9 100644
--- a/boto/ec2/__init__.py
+++ b/boto/ec2/__init__.py
@@ -14,7 +14,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -25,29 +25,31 @@
"""
from boto.ec2.connection import EC2Connection
+
def regions(**kw_params):
"""
Get all available regions for the EC2 service.
You may pass any of the arguments accepted by the EC2Connection
object's constructor as keyword arguments and they will be
passed along to the EC2Connection object.
-
+
:rtype: list
:return: A list of :class:`boto.ec2.regioninfo.RegionInfo`
"""
c = EC2Connection(**kw_params)
return c.get_all_regions()
+
def connect_to_region(region_name, **kw_params):
"""
- Given a valid region name, return a
+ Given a valid region name, return a
:class:`boto.ec2.connection.EC2Connection`.
Any additional parameters after the region_name are passed on to
the connect method of the region object.
:type: str
:param region_name: The name of the region to connect to.
-
+
:rtype: :class:`boto.ec2.connection.EC2Connection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
@@ -56,7 +58,8 @@
if region.name == region_name:
return region.connect(**kw_params)
return None
-
+
+
def get_region(region_name, **kw_params):
"""
Find and return a :class:`boto.ec2.regioninfo.RegionInfo` object
@@ -73,4 +76,3 @@
if region.name == region_name:
return region
return None
-
diff --git a/boto/ec2/address.py b/boto/ec2/address.py
index 770a904..9eadfaa 100644
--- a/boto/ec2/address.py
+++ b/boto/ec2/address.py
@@ -19,13 +19,22 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
-"""
-Represents an EC2 Elastic IP Address
-"""
from boto.ec2.ec2object import EC2Object
class Address(EC2Object):
+ """
+ Represents an EC2 Elastic IP Address
+
+ :ivar public_ip: The Elastic IP address.
+ :ivar instance_id: The instance the address is associated with (if any).
+ :ivar domain: Indicates whether the address is a EC2 address or a VPC address (standard|vpc).
+ :ivar allocation_id: The allocation ID for the address (VPC addresses only).
+ :ivar association_id: The association ID for the address (VPC addresses only).
+ :ivar network_interface_id: The network interface (if any) that the address is associated with (VPC addresses only).
+ :ivar network_interface_owner_id: The owner IID (VPC addresses only).
+ :ivar private_ip_address: The private IP address associated with the Elastic IP address (VPC addresses only).
+ """
def __init__(self, connection=None, public_ip=None, instance_id=None):
EC2Object.__init__(self, connection)
@@ -35,6 +44,9 @@
self.domain = None
self.allocation_id = None
self.association_id = None
+ self.network_interface_id = None
+ self.network_interface_owner_id = None
+ self.private_ip_address = None
def __repr__(self):
return 'Address:%s' % self.public_ip
@@ -50,18 +62,42 @@
self.allocation_id = value
elif name == 'associationId':
self.association_id = value
+ elif name == 'networkInterfaceId':
+ self.network_interface_id = value
+ elif name == 'networkInterfaceOwnerId':
+ self.network_interface_owner_id = value
+ elif name == 'privateIpAddress':
+ self.private_ip_address = value
else:
setattr(self, name, value)
def release(self):
- return self.connection.release_address(self.public_ip)
+ """
+ Free up this Elastic IP address.
+ :see: :meth:`boto.ec2.connection.EC2Connection.release_address`
+ """
+ if self.allocation_id:
+ return self.connection.release_address(None, self.allocation_id)
+ else:
+ return self.connection.release_address(self.public_ip)
delete = release
def associate(self, instance_id):
+ """
+ Associate this Elastic IP address with a currently running instance.
+ :see: :meth:`boto.ec2.connection.EC2Connection.associate_address`
+ """
return self.connection.associate_address(instance_id, self.public_ip)
def disassociate(self):
- return self.connection.disassociate_address(self.public_ip)
+ """
+ Disassociate this Elastic IP address from a currently running instance.
+ :see: :meth:`boto.ec2.connection.EC2Connection.disassociate_address`
+ """
+ if self.association_id:
+ return self.connection.disassociate_address(None, self.association_id)
+ else:
+ return self.connection.disassociate_address(self.public_ip)
diff --git a/boto/ec2/autoscale/__init__.py b/boto/ec2/autoscale/__init__.py
index fceac59..80c3c85 100644
--- a/boto/ec2/autoscale/__init__.py
+++ b/boto/ec2/autoscale/__init__.py
@@ -1,5 +1,7 @@
# Copyright (c) 2009-2011 Reza Lotun http://reza.lotun.name/
# Copyright (c) 2011 Jann Kleen
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
@@ -32,20 +34,25 @@
from boto.ec2.regioninfo import RegionInfo
from boto.ec2.autoscale.request import Request
from boto.ec2.autoscale.launchconfig import LaunchConfiguration
-from boto.ec2.autoscale.group import AutoScalingGroup, ProcessType
+from boto.ec2.autoscale.group import AutoScalingGroup
+from boto.ec2.autoscale.group import ProcessType
from boto.ec2.autoscale.activity import Activity
-from boto.ec2.autoscale.policy import AdjustmentType, MetricCollectionTypes, ScalingPolicy
+from boto.ec2.autoscale.policy import AdjustmentType
+from boto.ec2.autoscale.policy import MetricCollectionTypes
+from boto.ec2.autoscale.policy import ScalingPolicy
from boto.ec2.autoscale.instance import Instance
from boto.ec2.autoscale.scheduled import ScheduledUpdateGroupAction
-
+from boto.ec2.autoscale.tag import Tag
RegionData = {
- 'us-east-1' : 'autoscaling.us-east-1.amazonaws.com',
- 'us-west-1' : 'autoscaling.us-west-1.amazonaws.com',
- 'us-west-2' : 'autoscaling.us-west-2.amazonaws.com',
- 'eu-west-1' : 'autoscaling.eu-west-1.amazonaws.com',
- 'ap-northeast-1' : 'autoscaling.ap-northeast-1.amazonaws.com',
- 'ap-southeast-1' : 'autoscaling.ap-southeast-1.amazonaws.com'}
+ 'us-east-1': 'autoscaling.us-east-1.amazonaws.com',
+ 'us-west-1': 'autoscaling.us-west-1.amazonaws.com',
+ 'us-west-2': 'autoscaling.us-west-2.amazonaws.com',
+ 'sa-east-1': 'autoscaling.sa-east-1.amazonaws.com',
+ 'eu-west-1': 'autoscaling.eu-west-1.amazonaws.com',
+ 'ap-northeast-1': 'autoscaling.ap-northeast-1.amazonaws.com',
+ 'ap-southeast-1': 'autoscaling.ap-southeast-1.amazonaws.com'}
+
def regions():
"""
@@ -62,6 +69,7 @@
regions.append(region)
return regions
+
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
@@ -82,13 +90,15 @@
class AutoScaleConnection(AWSQueryConnection):
APIVersion = boto.config.get('Boto', 'autoscale_version', '2011-01-01')
DefaultRegionEndpoint = boto.config.get('Boto', 'autoscale_endpoint',
- 'autoscaling.amazonaws.com')
- DefaultRegionName = boto.config.get('Boto', 'autoscale_region_name', 'us-east-1')
+ 'autoscaling.us-east-1.amazonaws.com')
+ DefaultRegionName = boto.config.get('Boto', 'autoscale_region_name',
+ 'us-east-1')
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
- https_connection_factory=None, region=None, path='/'):
+ https_connection_factory=None, region=None, path='/',
+ security_token=None, validate_certs=True):
"""
Init method to create a new connection to the AutoScaling service.
@@ -105,10 +115,12 @@
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
- https_connection_factory, path=path)
+ https_connection_factory, path=path,
+ security_token=security_token,
+ validate_certs=validate_certs)
def _required_auth_capability(self):
- return ['ec2']
+ return ['hmac-v4']
def build_list_params(self, params, items, label):
"""
@@ -128,28 +140,25 @@
['us-east-1b',...]
"""
# different from EC2 list params
- for i in xrange(1, len(items)+1):
- if isinstance(items[i-1], dict):
- for k, v in items[i-1].iteritems():
+ for i in xrange(1, len(items) + 1):
+ if isinstance(items[i - 1], dict):
+ for k, v in items[i - 1].iteritems():
if isinstance(v, dict):
for kk, vv in v.iteritems():
params['%s.member.%d.%s.%s' % (label, i, k, kk)] = vv
else:
params['%s.member.%d.%s' % (label, i, k)] = v
- elif isinstance(items[i-1], basestring):
- params['%s.member.%d' % (label, i)] = items[i-1]
+ elif isinstance(items[i - 1], basestring):
+ params['%s.member.%d' % (label, i)] = items[i - 1]
def _update_group(self, op, as_group):
- params = {
- 'AutoScalingGroupName' : as_group.name,
- 'LaunchConfigurationName' : as_group.launch_config_name,
- 'MinSize' : as_group.min_size,
- 'MaxSize' : as_group.max_size,
- }
+ params = {'AutoScalingGroupName': as_group.name,
+ 'LaunchConfigurationName': as_group.launch_config_name,
+ 'MinSize': as_group.min_size,
+ 'MaxSize': as_group.max_size}
# get availability zone information (required param)
zones = as_group.availability_zones
- self.build_list_params(params, zones,
- 'AvailabilityZones')
+ self.build_list_params(params, zones, 'AvailabilityZones')
if as_group.desired_capacity:
params['DesiredCapacity'] = as_group.desired_capacity
if as_group.vpc_zone_identifier:
@@ -163,10 +172,14 @@
if as_group.placement_group:
params['PlacementGroup'] = as_group.placement_group
if op.startswith('Create'):
- # you can only associate load balancers with an autoscale group at creation time
+ # you can only associate load balancers with an autoscale
+ # group at creation time
if as_group.load_balancers:
self.build_list_params(params, as_group.load_balancers,
'LoadBalancerNames')
+ if as_group.tags:
+ for i, tag in enumerate(as_group.tags):
+ tag.build_params(params, i + 1)
return self.get_object(op, params, Request)
def create_auto_scaling_group(self, as_group):
@@ -181,9 +194,9 @@
and no scaling activities in progress.
"""
if(force_delete):
- params = {'AutoScalingGroupName' : name, 'ForceDelete' : 'true'}
+ params = {'AutoScalingGroupName': name, 'ForceDelete': 'true'}
else:
- params = {'AutoScalingGroupName' : name}
+ params = {'AutoScalingGroupName': name}
return self.get_object('DeleteAutoScalingGroup', params, Request)
def create_launch_configuration(self, launch_config):
@@ -192,13 +205,10 @@
:type launch_config: :class:`boto.ec2.autoscale.launchconfig.LaunchConfiguration`
:param launch_config: LaunchConfiguration object.
-
"""
- params = {
- 'ImageId' : launch_config.image_id,
- 'LaunchConfigurationName' : launch_config.name,
- 'InstanceType' : launch_config.instance_type,
- }
+ params = {'ImageId': launch_config.image_id,
+ 'LaunchConfigurationName': launch_config.name,
+ 'InstanceType': launch_config.instance_type}
if launch_config.key_name:
params['KeyName'] = launch_config.key_name
if launch_config.user_data:
@@ -214,9 +224,15 @@
self.build_list_params(params, launch_config.security_groups,
'SecurityGroups')
if launch_config.instance_monitoring:
- params['InstanceMonitoring.member.Enabled'] = 'true'
+ params['InstanceMonitoring.Enabled'] = 'true'
+ else:
+ params['InstanceMonitoring.Enabled'] = 'false'
+ if launch_config.spot_price is not None:
+ params['SpotPrice'] = str(launch_config.spot_price)
+ if launch_config.instance_profile_name is not None:
+ params['IamInstanceProfile'] = launch_config.instance_profile_name
return self.get_object('CreateLaunchConfiguration', params,
- Request, verb='POST')
+ Request, verb='POST')
def create_scaling_policy(self, scaling_policy):
"""
@@ -225,11 +241,10 @@
:type scaling_policy: :class:`boto.ec2.autoscale.policy.ScalingPolicy`
:param scaling_policy: ScalingPolicy object.
"""
- params = {'AdjustmentType' : scaling_policy.adjustment_type,
+ params = {'AdjustmentType': scaling_policy.adjustment_type,
'AutoScalingGroupName': scaling_policy.as_name,
- 'PolicyName' : scaling_policy.name,
- 'ScalingAdjustment' : scaling_policy.scaling_adjustment,}
-
+ 'PolicyName': scaling_policy.name,
+ 'ScalingAdjustment': scaling_policy.scaling_adjustment}
if scaling_policy.cooldown is not None:
params['Cooldown'] = scaling_policy.cooldown
@@ -243,7 +258,7 @@
Scaling group. Once this call completes, the launch configuration is no
longer available for use.
"""
- params = {'LaunchConfigurationName' : launch_config_name}
+ params = {'LaunchConfigurationName': launch_config_name}
return self.get_object('DeleteLaunchConfiguration', params, Request)
def get_all_groups(self, names=None, max_records=None, next_token=None):
@@ -264,7 +279,8 @@
:param max_records: Maximum amount of groups to return.
:rtype: list
- :returns: List of :class:`boto.ec2.autoscale.group.AutoScalingGroup` instances.
+ :returns: List of :class:`boto.ec2.autoscale.group.AutoScalingGroup`
+ instances.
"""
params = {}
if max_records:
@@ -291,11 +307,13 @@
:param max_records: Maximum amount of configurations to return.
:type next_token: str
- :param next_token: If you have more results than can be returned at once, pass in this
- parameter to page through all results.
+ :param next_token: If you have more results than can be returned
+ at once, pass in this parameter to page through all results.
:rtype: list
- :returns: List of :class:`boto.ec2.autoscale.launchconfig.LaunchConfiguration` instances.
+ :returns: List of
+ :class:`boto.ec2.autoscale.launchconfig.LaunchConfiguration`
+ instances.
"""
params = {}
max_records = kwargs.get('max_records', None)
@@ -310,7 +328,8 @@
return self.get_list('DescribeLaunchConfigurations', params,
[('member', LaunchConfiguration)])
- def get_all_activities(self, autoscale_group, activity_ids=None, max_records=None, next_token=None):
+ def get_all_activities(self, autoscale_group, activity_ids=None,
+ max_records=None, next_token=None):
"""
Get all activities for the given autoscaling group.
@@ -318,19 +337,21 @@
pages to retrieve. To get the next page, call this action again with
the returned token as the NextToken parameter
- :type autoscale_group: str or :class:`boto.ec2.autoscale.group.AutoScalingGroup` object
+ :type autoscale_group: str or
+ :class:`boto.ec2.autoscale.group.AutoScalingGroup` object
:param autoscale_group: The auto scaling group to get activities on.
:type max_records: int
:param max_records: Maximum amount of activities to return.
:rtype: list
- :returns: List of :class:`boto.ec2.autoscale.activity.Activity` instances.
+ :returns: List of
+ :class:`boto.ec2.autoscale.activity.Activity` instances.
"""
name = autoscale_group
if isinstance(autoscale_group, AutoScalingGroup):
name = autoscale_group.name
- params = {'AutoScalingGroupName' : name}
+ params = {'AutoScalingGroupName': name}
if max_records:
params['MaxRecords'] = max_records
if next_token:
@@ -345,11 +366,14 @@
"""
Deletes a previously scheduled action.
- :param str scheduled_action_name: The name of the action you want
+ :type scheduled_action_name: str
+ :param scheduled_action_name: The name of the action you want
to delete.
- :param str autoscale_group: The name of the autoscale group.
+
+ :type autoscale_group: str
+ :param autoscale_group: The name of the autoscale group.
"""
- params = {'ScheduledActionName' : scheduled_action_name}
+ params = {'ScheduledActionName': scheduled_action_name}
if autoscale_group:
params['AutoScalingGroupName'] = autoscale_group
return self.get_status('DeleteScheduledAction', params)
@@ -359,11 +383,14 @@
Terminates the specified instance. The desired group size can
also be adjusted, if desired.
- :param str instance_id: The ID of the instance to be terminated.
- :param bool decrement_capacity: Whether to decrement the size of the
+ :type instance_id: str
+ :param instance_id: The ID of the instance to be terminated.
+
+ :type decrement_capability: bool
+ :param decrement_capacity: Whether to decrement the size of the
autoscaling group or not.
"""
- params = {'InstanceId' : instance_id}
+ params = {'InstanceId': instance_id}
if decrement_capacity:
params['ShouldDecrementDesiredCapacity'] = 'true'
else:
@@ -387,7 +414,8 @@
return self.get_status('DeletePolicy', params)
def get_all_adjustment_types(self):
- return self.get_list('DescribeAdjustmentTypes', {}, [('member', AdjustmentType)])
+ return self.get_list('DescribeAdjustmentTypes', {},
+ [('member', AdjustmentType)])
def get_all_autoscaling_instances(self, instance_ids=None,
max_records=None, next_token=None):
@@ -402,13 +430,14 @@
:type instance_ids: list
:param instance_ids: List of Autoscaling Instance IDs which should be
- searched for.
+ searched for.
:type max_records: int
:param max_records: Maximum number of results to return.
:rtype: list
- :returns: List of :class:`boto.ec2.autoscale.activity.Activity` instances.
+ :returns: List of
+ :class:`boto.ec2.autoscale.instance.Instance` objects.
"""
params = {}
if instance_ids:
@@ -436,11 +465,12 @@
available. To get the additional records, repeat the request with the
response token as the NextToken parameter.
- If no group name or list of policy names are provided, all available policies
- are returned.
+ If no group name or list of policy names are provided, all
+ available policies are returned.
:type as_name: str
- :param as_name: the name of the :class:`boto.ec2.autoscale.group.AutoScalingGroup` to filter for.
+ :param as_name: The name of the
+ :class:`boto.ec2.autoscale.group.AutoScalingGroup` to filter for.
:type names: list
:param names: List of policy names which should be searched for.
@@ -461,47 +491,53 @@
[('member', ScalingPolicy)])
def get_all_scaling_process_types(self):
- """ Returns scaling process types for use in the ResumeProcesses and
+ """
+ Returns scaling process types for use in the ResumeProcesses and
SuspendProcesses actions.
"""
return self.get_list('DescribeScalingProcessTypes', {},
[('member', ProcessType)])
def suspend_processes(self, as_group, scaling_processes=None):
- """ Suspends Auto Scaling processes for an Auto Scaling group.
+ """
+ Suspends Auto Scaling processes for an Auto Scaling group.
:type as_group: string
:param as_group: The auto scaling group to suspend processes on.
:type scaling_processes: list
- :param scaling_processes: Processes you want to suspend. If omitted, all
- processes will be suspended.
+ :param scaling_processes: Processes you want to suspend. If omitted,
+ all processes will be suspended.
"""
- params = {'AutoScalingGroupName' : as_group}
+ params = {'AutoScalingGroupName': as_group}
if scaling_processes:
- self.build_list_params(params, scaling_processes, 'ScalingProcesses')
+ self.build_list_params(params, scaling_processes,
+ 'ScalingProcesses')
return self.get_status('SuspendProcesses', params)
def resume_processes(self, as_group, scaling_processes=None):
- """ Resumes Auto Scaling processes for an Auto Scaling group.
+ """
+ Resumes Auto Scaling processes for an Auto Scaling group.
:type as_group: string
:param as_group: The auto scaling group to resume processes on.
:type scaling_processes: list
:param scaling_processes: Processes you want to resume. If omitted, all
- processes will be resumed.
+ processes will be resumed.
"""
- params = {
- 'AutoScalingGroupName' : as_group
- }
+ params = {'AutoScalingGroupName': as_group}
+
if scaling_processes:
- self.build_list_params(params, scaling_processes, 'ScalingProcesses')
+ self.build_list_params(params, scaling_processes,
+ 'ScalingProcesses')
return self.get_status('ResumeProcesses', params)
- def create_scheduled_group_action(self, as_group, name, time, desired_capacity=None,
+ def create_scheduled_group_action(self, as_group, name, time,
+ desired_capacity=None,
min_size=None, max_size=None):
- """ Creates a scheduled scaling action for a Auto Scaling group. If you
+ """
+ Creates a scheduled scaling action for a Auto Scaling group. If you
leave a parameter unspecified, the corresponding value remains
unchanged in the affected Auto Scaling group.
@@ -515,8 +551,8 @@
:param time: The time for this action to start.
:type desired_capacity: int
- :param desired_capacity: The number of EC2 instances that should be running in
- this group.
+ :param desired_capacity: The number of EC2 instances that should
+ be running in this group.
:type min_size: int
:param min_size: The minimum size for the new auto scaling group.
@@ -524,11 +560,9 @@
:type max_size: int
:param max_size: The minimum size for the new auto scaling group.
"""
- params = {
- 'AutoScalingGroupName' : as_group,
- 'ScheduledActionName' : name,
- 'Time' : time.isoformat(),
- }
+ params = {'AutoScalingGroupName': as_group,
+ 'ScheduledActionName': name,
+ 'Time': time.isoformat()}
if desired_capacity is not None:
params['DesiredCapacity'] = desired_capacity
if min_size is not None:
@@ -537,18 +571,21 @@
params['MaxSize'] = max_size
return self.get_status('PutScheduledUpdateGroupAction', params)
- def get_all_scheduled_actions(self, as_group=None, start_time=None, end_time=None, scheduled_actions=None,
+ def get_all_scheduled_actions(self, as_group=None, start_time=None,
+ end_time=None, scheduled_actions=None,
max_records=None, next_token=None):
params = {}
if as_group:
params['AutoScalingGroupName'] = as_group
if scheduled_actions:
- self.build_list_params(params, scheduled_actions, 'ScheduledActionNames')
+ self.build_list_params(params, scheduled_actions,
+ 'ScheduledActionNames')
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
- return self.get_list('DescribeScheduledActions', params, [('member', ScheduledUpdateGroupAction)])
+ return self.get_list('DescribeScheduledActions', params,
+ [('member', ScheduledUpdateGroupAction)])
def disable_metrics_collection(self, as_group, metrics=None):
"""
@@ -556,9 +593,8 @@
specified in AutoScalingGroupName. You can specify the list of affected
metrics with the Metrics parameter.
"""
- params = {
- 'AutoScalingGroupName' : as_group,
- }
+ params = {'AutoScalingGroupName': as_group}
+
if metrics:
self.build_list_params(params, metrics, 'Metrics')
return self.get_status('DisableMetricsCollection', params)
@@ -578,30 +614,54 @@
:type granularity: string
:param granularity: The granularity to associate with the metrics to
- collect. Currently, the only legal granularity is "1Minute".
+ collect. Currently, the only legal granularity is "1Minute".
:type metrics: string list
:param metrics: The list of metrics to collect. If no metrics are
specified, all metrics are enabled.
"""
- params = {
- 'AutoScalingGroupName' : as_group,
- 'Granularity' : granularity,
- }
+ params = {'AutoScalingGroupName': as_group,
+ 'Granularity': granularity}
if metrics:
self.build_list_params(params, metrics, 'Metrics')
return self.get_status('EnableMetricsCollection', params)
def execute_policy(self, policy_name, as_group=None, honor_cooldown=None):
- params = {
- 'PolicyName' : policy_name,
- }
+ params = {'PolicyName': policy_name}
if as_group:
params['AutoScalingGroupName'] = as_group
if honor_cooldown:
params['HonorCooldown'] = honor_cooldown
return self.get_status('ExecutePolicy', params)
+ def put_notification_configuration(self, autoscale_group, topic, notification_types):
+ """
+ Configures an Auto Scaling group to send notifications when
+ specified events take place.
+
+ :type as_group: str or
+ :class:`boto.ec2.autoscale.group.AutoScalingGroup` object
+ :param as_group: The Auto Scaling group to put notification
+ configuration on.
+
+ :type topic: str
+ :param topic: The Amazon Resource Name (ARN) of the Amazon Simple
+ Notification Service (SNS) topic.
+
+ :type notification_types: list
+ :param notification_types: The type of events that will trigger
+ the notification.
+ """
+
+ name = autoscale_group
+ if isinstance(autoscale_group, AutoScalingGroup):
+ name = autoscale_group.name
+
+ params = {'AutoScalingGroupName': name,
+ 'TopicARN': topic}
+ self.build_list_params(params, notification_types, 'NotificationTypes')
+ return self.get_status('PutNotificationConfiguration', params)
+
def set_instance_health(self, instance_id, health_status,
should_respect_grace_period=True):
"""
@@ -612,22 +672,72 @@
:type health_status: str
:param health_status: The health status of the instance.
- "Healthy" means that the instance is
- healthy and should remain in service.
- "Unhealthy" means that the instance is
- unhealthy. Auto Scaling should terminate
- and replace it.
+ "Healthy" means that the instance is healthy and should remain
+ in service. "Unhealthy" means that the instance is unhealthy.
+ Auto Scaling should terminate and replace it.
:type should_respect_grace_period: bool
:param should_respect_grace_period: If True, this call should
- respect the grace period
- associated with the group.
+ respect the grace period associated with the group.
"""
- params = {'InstanceId' : instance_id,
- 'HealthStatus' : health_status}
+ params = {'InstanceId': instance_id,
+ 'HealthStatus': health_status}
if should_respect_grace_period:
params['ShouldRespectGracePeriod'] = 'true'
else:
params['ShouldRespectGracePeriod'] = 'false'
return self.get_status('SetInstanceHealth', params)
+ # Tag methods
+
+ def get_all_tags(self, filters=None, max_records=None, next_token=None):
+ """
+ Lists the Auto Scaling group tags.
+
+ This action supports pagination by returning a token if there
+ are more pages to retrieve. To get the next page, call this
+ action again with the returned token as the NextToken
+ parameter.
+
+ :type filters: dict
+ :param filters: The value of the filter type used to identify
+ the tags to be returned. NOT IMPLEMENTED YET.
+
+ :type max_records: int
+ :param max_records: Maximum number of tags to return.
+
+ :rtype: list
+ :returns: List of :class:`boto.ec2.autoscale.tag.Tag`
+ instances.
+ """
+ params = {}
+ if max_records:
+ params['MaxRecords'] = max_records
+ if next_token:
+ params['NextToken'] = next_token
+ return self.get_list('DescribeTags', params,
+ [('member', Tag)])
+
+ def create_or_update_tags(self, tags):
+ """
+ Creates new tags or updates existing tags for an Auto Scaling group.
+
+ :type tags: List of :class:`boto.ec2.autoscale.tag.Tag`
+ :param tags: The new or updated tags.
+ """
+ params = {}
+ for i, tag in enumerate(tags):
+ tag.build_params(params, i + 1)
+ return self.get_status('CreateOrUpdateTags', params, verb='POST')
+
+ def delete_tags(self, tags):
+ """
+ Deletes existing tags for an Auto Scaling group.
+
+ :type tags: List of :class:`boto.ec2.autoscale.tag.Tag`
+ :param tags: The new or updated tags.
+ """
+ params = {}
+ for i, tag in enumerate(tags):
+ tag.build_params(params, i + 1)
+ return self.get_status('DeleteTags', params, verb='POST')
diff --git a/boto/ec2/autoscale/group.py b/boto/ec2/autoscale/group.py
index eb65853..eb72f6f 100644
--- a/boto/ec2/autoscale/group.py
+++ b/boto/ec2/autoscale/group.py
@@ -19,12 +19,12 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
-
from boto.ec2.elb.listelement import ListElement
from boto.resultset import ResultSet
from boto.ec2.autoscale.launchconfig import LaunchConfiguration
from boto.ec2.autoscale.request import Request
from boto.ec2.autoscale.instance import Instance
+from boto.ec2.autoscale.tag import Tag
class ProcessType(object):
@@ -81,13 +81,24 @@
self.metric = value
+class TerminationPolicies(list):
+
+ def startElement(self, name, attrs, connection):
+ pass
+
+ def endElement(self, name, value, connection):
+ if name == 'member':
+ self.append(value)
+
+
class AutoScalingGroup(object):
def __init__(self, connection=None, name=None,
launch_config=None, availability_zones=None,
load_balancers=None, default_cooldown=None,
health_check_type=None, health_check_period=None,
- placement_group=None, vpc_zone_identifier=None, desired_capacity=None,
- min_size=None, max_size=None, **kwargs):
+ placement_group=None, vpc_zone_identifier=None,
+ desired_capacity=None, min_size=None, max_size=None,
+ tags=None, **kwargs):
"""
Creates a new AutoScalingGroup with the specified name.
@@ -103,24 +114,23 @@
:param availability_zones: List of availability zones (required).
:type default_cooldown: int
- :param default_cooldown: Number of seconds after a Scaling Activity completes
- before any further scaling activities can start.
+ :param default_cooldown: Number of seconds after a Scaling Activity
+ completes before any further scaling activities can start.
:type desired_capacity: int
:param desired_capacity: The desired capacity for the group.
:type health_check_period: str
- :param health_check_period: Length of time in seconds after a new EC2 instance
- comes into service that Auto Scaling starts checking its
- health.
+ :param health_check_period: Length of time in seconds after a new
+ EC2 instance comes into service that Auto Scaling starts
+ checking its health.
:type health_check_type: str
:param health_check_type: The service you want the health status from,
- Amazon EC2 or Elastic Load Balancer.
+ Amazon EC2 or Elastic Load Balancer.
- :type launch_config: str or LaunchConfiguration
- :param launch_config: Name of launch configuration (required).
-
+ :type launch_config_name: str or LaunchConfiguration
+ :param launch_config_name: Name of launch configuration (required).
:type load_balancers: list
:param load_balancers: List of load balancers.
@@ -133,21 +143,25 @@
:type placement_group: str
:param placement_group: Physical location of your cluster placement
- group created in Amazon EC2.
+ group created in Amazon EC2.
:type vpc_zone_identifier: str
- :param vpc_zone_identifier: The subnet identifier of the Virtual Private Cloud.
+ :param vpc_zone_identifier: The subnet identifier of the Virtual
+ Private Cloud.
:rtype: :class:`boto.ec2.autoscale.group.AutoScalingGroup`
:return: An autoscale group.
"""
- self.name = name or kwargs.get('group_name') # backwards compatibility
+ self.name = name or kwargs.get('group_name') # backwards compat
self.connection = connection
self.min_size = int(min_size) if min_size is not None else None
self.max_size = int(max_size) if max_size is not None else None
self.created_time = None
- default_cooldown = default_cooldown or kwargs.get('cooldown') # backwards compatibility
- self.default_cooldown = int(default_cooldown) if default_cooldown is not None else None
+ # backwards compatibility
+ default_cooldown = default_cooldown or kwargs.get('cooldown')
+ if default_cooldown is not None:
+ default_cooldown = int(default_cooldown)
+ self.default_cooldown = default_cooldown
self.launch_config_name = launch_config
if launch_config and isinstance(launch_config, LaunchConfiguration):
self.launch_config_name = launch_config.name
@@ -162,20 +176,20 @@
self.autoscaling_group_arn = None
self.vpc_zone_identifier = vpc_zone_identifier
self.instances = None
+ self.tags = tags or None
+ self.termination_policies = TerminationPolicies()
# backwards compatible access to 'cooldown' param
def _get_cooldown(self):
return self.default_cooldown
+
def _set_cooldown(self, val):
self.default_cooldown = val
+
cooldown = property(_get_cooldown, _set_cooldown)
def __repr__(self):
- return 'AutoScalingGroup<%s>: created:%s, minsize:%s, maxsize:%s, capacity:%s' % (self.name,
- self.created_time,
- self.min_size,
- self.max_size,
- self.desired_capacity)
+ return 'AutoScaleGroup<%s>' % self.name
def startElement(self, name, attrs, connection):
if name == 'Instances':
@@ -191,6 +205,11 @@
elif name == 'SuspendedProcesses':
self.suspended_processes = ResultSet([('member', SuspendedProcess)])
return self.suspended_processes
+ elif name == 'Tags':
+ self.tags = ResultSet([('member', Tag)])
+ return self.tags
+ elif name == 'TerminationPolicies':
+ return self.termination_policies
else:
return
@@ -214,7 +233,10 @@
elif name == 'PlacementGroup':
self.placement_group = value
elif name == 'HealthCheckGracePeriod':
- self.health_check_period = int(value)
+ try:
+ self.health_check_period = int(value)
+ except ValueError:
+ self.health_check_period = None
elif name == 'HealthCheckType':
self.health_check_type = value
elif name == 'VPCZoneIdentifier':
@@ -223,22 +245,25 @@
setattr(self, name, value)
def set_capacity(self, capacity):
- """ Set the desired capacity for the group. """
- params = {
- 'AutoScalingGroupName' : self.name,
- 'DesiredCapacity' : capacity,
- }
+ """
+ Set the desired capacity for the group.
+ """
+ params = {'AutoScalingGroupName': self.name,
+ 'DesiredCapacity': capacity}
req = self.connection.get_object('SetDesiredCapacity', params,
- Request)
+ Request)
self.connection.last_request = req
return req
def update(self):
- """ Sync local changes with AutoScaling group. """
+ """
+ Sync local changes with AutoScaling group.
+ """
return self.connection._update_group('UpdateAutoScalingGroup', self)
def shutdown_instances(self):
- """ Convenience method which shuts down all instances associated with
+ """
+ Convenience method which shuts down all instances associated with
this group.
"""
self.min_size = 0
@@ -247,23 +272,39 @@
self.update()
def delete(self, force_delete=False):
- """ Delete this auto-scaling group if no instances attached or no
+ """
+ Delete this auto-scaling group if no instances attached or no
scaling activities in progress.
"""
- return self.connection.delete_auto_scaling_group(self.name, force_delete)
+ return self.connection.delete_auto_scaling_group(self.name,
+ force_delete)
def get_activities(self, activity_ids=None, max_records=50):
"""
Get all activies for this group.
"""
- return self.connection.get_all_activities(self, activity_ids, max_records)
+ return self.connection.get_all_activities(self, activity_ids,
+ max_records)
+
+ def put_notification_configuration(self, topic, notification_types):
+ """
+ Configures an Auto Scaling group to send notifications when
+ specified events take place.
+ """
+ return self.connection.put_notification_configuration(self,
+ topic,
+ notification_types)
def suspend_processes(self, scaling_processes=None):
- """ Suspends Auto Scaling processes for an Auto Scaling group. """
+ """
+ Suspends Auto Scaling processes for an Auto Scaling group.
+ """
return self.connection.suspend_processes(self.name, scaling_processes)
def resume_processes(self, scaling_processes=None):
- """ Resumes Auto Scaling processes for an Auto Scaling group. """
+ """
+ Resumes Auto Scaling processes for an Auto Scaling group.
+ """
return self.connection.resume_processes(self.name, scaling_processes)
@@ -287,4 +328,3 @@
self.granularity = value
else:
setattr(self, name, value)
-
diff --git a/boto/ec2/autoscale/launchconfig.py b/boto/ec2/autoscale/launchconfig.py
index 2f55b24..e6e38fd 100644
--- a/boto/ec2/autoscale/launchconfig.py
+++ b/boto/ec2/autoscale/launchconfig.py
@@ -1,4 +1,5 @@
# Copyright (c) 2009 Reza Lotun http://reza.lotun.name/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
@@ -19,13 +20,15 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
-
from datetime import datetime
-import base64
from boto.resultset import ResultSet
from boto.ec2.elb.listelement import ListElement
+import boto.utils
+import base64
# this should use the corresponding object from boto.ec2
+
+
class Ebs(object):
def __init__(self, connection=None, snapshot_id=None, volume_size=None):
self.connection = connection
@@ -70,7 +73,8 @@
self.ebs = None
def __repr__(self):
- return 'BlockDeviceMapping(%s, %s)' % (self.device_name, self.virtual_name)
+ return 'BlockDeviceMapping(%s, %s)' % (self.device_name,
+ self.virtual_name)
def startElement(self, name, attrs, connection):
if name == 'Ebs':
@@ -89,7 +93,8 @@
key_name=None, security_groups=None, user_data=None,
instance_type='m1.small', kernel_id=None,
ramdisk_id=None, block_device_mappings=None,
- instance_monitoring=False):
+ instance_monitoring=False, spot_price=None,
+ instance_profile_name=None):
"""
A launch configuration.
@@ -98,14 +103,14 @@
:type image_id: str
:param image_id: Unique ID of the Amazon Machine Image (AMI) which was
- assigned during registration.
+ assigned during registration.
:type key_name: str
:param key_name: The name of the EC2 key pair.
:type security_groups: list
:param security_groups: Names of the security groups with which to
- associate the EC2 instances.
+ associate the EC2 instances.
:type user_data: str
:param user_data: The user data available to launched EC2 instances.
@@ -121,11 +126,20 @@
:type block_device_mappings: list
:param block_device_mappings: Specifies how block devices are exposed
- for instances
+ for instances
:type instance_monitoring: bool
:param instance_monitoring: Whether instances in group are launched
- with detailed monitoring.
+ with detailed monitoring.
+
+ :type spot_price: float
+ :param spot_price: The spot price you are bidding. Only applies
+ if you are building an autoscaling group with spot instances.
+
+ :type instance_profile_name: string
+ :param instance_profile_name: The name or the Amazon Resource
+ Name (ARN) of the instance profile associated with the IAM
+ role for the instance.
"""
self.connection = connection
self.name = name
@@ -141,6 +155,8 @@
self.user_data = user_data
self.created_time = None
self.instance_monitoring = instance_monitoring
+ self.spot_price = spot_price
+ self.instance_profile_name = instance_profile_name
self.launch_configuration_arn = None
def __repr__(self):
@@ -150,7 +166,8 @@
if name == 'SecurityGroups':
return self.security_groups
elif name == 'BlockDeviceMappings':
- self.block_device_mappings = ResultSet([('member', BlockDeviceMapping)])
+ self.block_device_mappings = ResultSet([('member',
+ BlockDeviceMapping)])
return self.block_device_mappings
elif name == 'InstanceMonitoring':
self.instance_monitoring = InstanceMonitoring(self)
@@ -166,24 +183,27 @@
elif name == 'ImageId':
self.image_id = value
elif name == 'CreatedTime':
- try:
- self.created_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
- except ValueError:
- self.created_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
+ self.created_time = boto.utils.parse_ts(value)
elif name == 'KernelId':
self.kernel_id = value
elif name == 'RamdiskId':
self.ramdisk_id = value
elif name == 'UserData':
- self.user_data = base64.b64decode(value)
+ try:
+ self.user_data = base64.b64decode(value)
+ except TypeError:
+ self.user_data = value
elif name == 'LaunchConfigurationARN':
self.launch_configuration_arn = value
elif name == 'InstanceMonitoring':
self.instance_monitoring = value
+ elif name == 'SpotPrice':
+ self.spot_price = float(value)
+ elif name == 'IamInstanceProfile':
+ self.instance_profile_name = value
else:
setattr(self, name, value)
def delete(self):
""" Delete this launch configuration. """
return self.connection.delete_launch_configuration(self.name)
-
diff --git a/boto/ec2/autoscale/tag.py b/boto/ec2/autoscale/tag.py
new file mode 100644
index 0000000..ad9641d
--- /dev/null
+++ b/boto/ec2/autoscale/tag.py
@@ -0,0 +1,84 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+
+class Tag(object):
+ """
+ A name/value tag on an AutoScalingGroup resource.
+
+ :ivar key: The key of the tag.
+ :ivar value: The value of the tag.
+ :ivar propagate_at_launch: Boolean value which specifies whether the
+ new tag will be applied to instances launched after the tag is created.
+ :ivar resource_id: The name of the autoscaling group.
+ :ivar resource_type: The only supported resource type at this time
+ is "auto-scaling-group".
+ """
+
+ def __init__(self, connection=None, key=None, value=None,
+ propagate_at_launch=False, resource_id=None,
+ resource_type='auto-scaling-group'):
+ self.connection = connection
+ self.key = key
+ self.value = value
+ self.propagate_at_launch = propagate_at_launch
+ self.resource_id = resource_id
+ self.resource_type = resource_type
+
+ def __repr__(self):
+ return 'Tag(%s=%s)' % (self.key, self.value)
+
+ def startElement(self, name, attrs, connection):
+ pass
+
+ def endElement(self, name, value, connection):
+ if name == 'Key':
+ self.key = value
+ elif name == 'Value':
+ self.value = value
+ elif name == 'PropogateAtLaunch':
+ if value.lower() == 'true':
+ self.propogate_at_launch = True
+ else:
+ self.propogate_at_launch = False
+ elif name == 'ResourceId':
+ self.resource_id = value
+ elif name == 'ResourceType':
+ self.resource_type = value
+
+ def build_params(self, params, i):
+ """
+ Populates a dictionary with the name/value pairs necessary
+ to identify this Tag in a request.
+ """
+ prefix = 'Tags.member.%d.' % i
+ params[prefix + 'ResourceId'] = self.resource_id
+ params[prefix + 'ResourceType'] = self.resource_type
+ params[prefix + 'Key'] = self.key
+ params[prefix + 'Value'] = self.value
+ if self.propagate_at_launch:
+ params[prefix + 'PropagateAtLaunch'] = 'true'
+ else:
+ params[prefix + 'PropagateAtLaunch'] = 'false'
+
+ def delete(self):
+ return self.connection.delete_tags([self])
diff --git a/boto/ec2/blockdevicemapping.py b/boto/ec2/blockdevicemapping.py
index 75be2a4..ca0e937 100644
--- a/boto/ec2/blockdevicemapping.py
+++ b/boto/ec2/blockdevicemapping.py
@@ -1,4 +1,5 @@
-# Copyright (c) 2009 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2009-2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
@@ -14,13 +15,17 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
+
class BlockDeviceType(object):
+ """
+ Represents parameters for a block device.
+ """
def __init__(self,
connection=None,
@@ -31,7 +36,9 @@
status=None,
attach_time=None,
delete_on_termination=False,
- size=None):
+ size=None,
+ volume_type=None,
+ iops=None):
self.connection = connection
self.ephemeral_name = ephemeral_name
self.no_device = no_device
@@ -41,18 +48,20 @@
self.attach_time = attach_time
self.delete_on_termination = delete_on_termination
self.size = size
+ self.volume_type = volume_type
+ self.iops = iops
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
- if name =='volumeId':
+ if name == 'volumeId':
self.volume_id = value
elif name == 'virtualName':
self.ephemeral_name = value
- elif name =='NoDevice':
+ elif name == 'NoDevice':
self.no_device = (value == 'true')
- elif name =='snapshotId':
+ elif name == 'snapshotId':
self.snapshot_id = value
elif name == 'volumeSize':
self.size = int(value)
@@ -61,19 +70,35 @@
elif name == 'attachTime':
self.attach_time = value
elif name == 'deleteOnTermination':
- if value == 'true':
- self.delete_on_termination = True
- else:
- self.delete_on_termination = False
+ self.delete_on_termination = (value == 'true')
+ elif name == 'volumeType':
+ self.volume_type = value
+ elif name == 'iops':
+ self.iops = int(value)
else:
setattr(self, name, value)
# for backwards compatibility
EBSBlockDeviceType = BlockDeviceType
+
class BlockDeviceMapping(dict):
+ """
+ Represents a collection of BlockDeviceTypes when creating ec2 instances.
+
+ Example:
+ dev_sda1 = BlockDeviceType()
+ dev_sda1.size = 100 # change root volume to 100GB instead of default
+ bdm = BlockDeviceMapping()
+ bdm['/dev/sda1'] = dev_sda1
+ reservation = image.run(..., block_device_map=bdm, ...)
+ """
def __init__(self, connection=None):
+ """
+ :type connection: :class:`boto.ec2.EC2Connection`
+ :param connection: Optional connection.
+ """
dict.__init__(self)
self.connection = connection
self.current_name = None
@@ -109,4 +134,8 @@
params['%s.Ebs.DeleteOnTermination' % pre] = 'true'
else:
params['%s.Ebs.DeleteOnTermination' % pre] = 'false'
+ if block_dev.volume_type:
+ params['%s.Ebs.VolumeType' % pre] = block_dev.volume_type
+ if block_dev.iops is not None:
+ params['%s.Ebs.Iops' % pre] = block_dev.iops
i += 1
diff --git a/boto/ec2/cloudwatch/__init__.py b/boto/ec2/cloudwatch/__init__.py
index d301167..5b8db5b 100644
--- a/boto/ec2/cloudwatch/__init__.py
+++ b/boto/ec2/cloudwatch/__init__.py
@@ -22,119 +22,6 @@
"""
This module provides an interface to the Elastic Compute Cloud (EC2)
CloudWatch service from AWS.
-
-The 5 Minute How-To Guide
--------------------------
-First, make sure you have something to monitor. You can either create a
-LoadBalancer or enable monitoring on an existing EC2 instance. To enable
-monitoring, you can either call the monitor_instance method on the
-EC2Connection object or call the monitor method on the Instance object.
-
-It takes a while for the monitoring data to start accumulating but once
-it does, you can do this:
-
->>> import boto
->>> c = boto.connect_cloudwatch()
->>> metrics = c.list_metrics()
->>> metrics
-[Metric:NetworkIn,
- Metric:NetworkOut,
- Metric:NetworkOut(InstanceType,m1.small),
- Metric:NetworkIn(InstanceId,i-e573e68c),
- Metric:CPUUtilization(InstanceId,i-e573e68c),
- Metric:DiskWriteBytes(InstanceType,m1.small),
- Metric:DiskWriteBytes(ImageId,ami-a1ffb63),
- Metric:NetworkOut(ImageId,ami-a1ffb63),
- Metric:DiskWriteOps(InstanceType,m1.small),
- Metric:DiskReadBytes(InstanceType,m1.small),
- Metric:DiskReadOps(ImageId,ami-a1ffb63),
- Metric:CPUUtilization(InstanceType,m1.small),
- Metric:NetworkIn(ImageId,ami-a1ffb63),
- Metric:DiskReadOps(InstanceType,m1.small),
- Metric:DiskReadBytes,
- Metric:CPUUtilization,
- Metric:DiskWriteBytes(InstanceId,i-e573e68c),
- Metric:DiskWriteOps(InstanceId,i-e573e68c),
- Metric:DiskWriteOps,
- Metric:DiskReadOps,
- Metric:CPUUtilization(ImageId,ami-a1ffb63),
- Metric:DiskReadOps(InstanceId,i-e573e68c),
- Metric:NetworkOut(InstanceId,i-e573e68c),
- Metric:DiskReadBytes(ImageId,ami-a1ffb63),
- Metric:DiskReadBytes(InstanceId,i-e573e68c),
- Metric:DiskWriteBytes,
- Metric:NetworkIn(InstanceType,m1.small),
- Metric:DiskWriteOps(ImageId,ami-a1ffb63)]
-
-The list_metrics call will return a list of all of the available metrics
-that you can query against. Each entry in the list is a Metric object.
-As you can see from the list above, some of the metrics are generic metrics
-and some have Dimensions associated with them (e.g. InstanceType=m1.small).
-The Dimension can be used to refine your query. So, for example, I could
-query the metric Metric:CPUUtilization which would create the desired statistic
-by aggregating cpu utilization data across all sources of information available
-or I could refine that by querying the metric
-Metric:CPUUtilization(InstanceId,i-e573e68c) which would use only the data
-associated with the instance identified by the instance ID i-e573e68c.
-
-Because for this example, I'm only monitoring a single instance, the set
-of metrics available to me are fairly limited. If I was monitoring many
-instances, using many different instance types and AMI's and also several
-load balancers, the list of available metrics would grow considerably.
-
-Once you have the list of available metrics, you can actually
-query the CloudWatch system for that metric. Let's choose the CPU utilization
-metric for our instance.
-
->>> m = metrics[5]
->>> m
-Metric:CPUUtilization(InstanceId,i-e573e68c)
-
-The Metric object has a query method that lets us actually perform
-the query against the collected data in CloudWatch. To call that,
-we need a start time and end time to control the time span of data
-that we are interested in. For this example, let's say we want the
-data for the previous hour:
-
->>> import datetime
->>> end = datetime.datetime.now()
->>> start = end - datetime.timedelta(hours=1)
-
-We also need to supply the Statistic that we want reported and
-the Units to use for the results. The Statistic can be one of these
-values:
-
-['Minimum', 'Maximum', 'Sum', 'Average', 'SampleCount']
-
-And Units must be one of the following:
-
-['Seconds', 'Percent', 'Bytes', 'Bits', 'Count',
-'Bytes/Second', 'Bits/Second', 'Count/Second']
-
-The query method also takes an optional parameter, period. This
-parameter controls the granularity (in seconds) of the data returned.
-The smallest period is 60 seconds and the value must be a multiple
-of 60 seconds. So, let's ask for the average as a percent:
-
->>> datapoints = m.query(start, end, 'Average', 'Percent')
->>> len(datapoints)
-60
-
-Our period was 60 seconds and our duration was one hour so
-we should get 60 data points back and we can see that we did.
-Each element in the datapoints list is a DataPoint object
-which is a simple subclass of a Python dict object. Each
-Datapoint object contains all of the information available
-about that particular data point.
-
->>> d = datapoints[0]
->>> d
-{u'Average': 0.0,
- u'SampleCount': 1.0,
- u'Timestamp': u'2009-05-21T19:55:00Z',
- u'Unit': u'Percent'}
-
-My server obviously isn't very busy right now!
"""
try:
import simplejson as json
@@ -143,18 +30,20 @@
from boto.connection import AWSQueryConnection
from boto.ec2.cloudwatch.metric import Metric
-from boto.ec2.cloudwatch.alarm import MetricAlarm, AlarmHistoryItem
+from boto.ec2.cloudwatch.alarm import MetricAlarm, MetricAlarms, AlarmHistoryItem
from boto.ec2.cloudwatch.datapoint import Datapoint
from boto.regioninfo import RegionInfo
import boto
RegionData = {
- 'us-east-1' : 'monitoring.us-east-1.amazonaws.com',
- 'us-west-1' : 'monitoring.us-west-1.amazonaws.com',
- 'us-west-2' : 'monitoring.us-west-2.amazonaws.com',
- 'eu-west-1' : 'monitoring.eu-west-1.amazonaws.com',
- 'ap-northeast-1' : 'monitoring.ap-northeast-1.amazonaws.com',
- 'ap-southeast-1' : 'monitoring.ap-southeast-1.amazonaws.com'}
+ 'us-east-1': 'monitoring.us-east-1.amazonaws.com',
+ 'us-west-1': 'monitoring.us-west-1.amazonaws.com',
+ 'us-west-2': 'monitoring.us-west-2.amazonaws.com',
+ 'sa-east-1': 'monitoring.sa-east-1.amazonaws.com',
+ 'eu-west-1': 'monitoring.eu-west-1.amazonaws.com',
+ 'ap-northeast-1': 'monitoring.ap-northeast-1.amazonaws.com',
+ 'ap-southeast-1': 'monitoring.ap-southeast-1.amazonaws.com'}
+
def regions():
"""
@@ -171,6 +60,7 @@
regions.append(region)
return regions
+
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
@@ -195,13 +85,13 @@
'us-east-1')
DefaultRegionEndpoint = boto.config.get('Boto',
'cloudwatch_region_endpoint',
- 'monitoring.amazonaws.com')
-
+ 'monitoring.us-east-1.amazonaws.com')
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
- https_connection_factory=None, region=None, path='/'):
+ https_connection_factory=None, region=None, path='/',
+ security_token=None, validate_certs=True):
"""
Init method to create a new connection to EC2 Monitoring Service.
@@ -213,41 +103,55 @@
self.DefaultRegionEndpoint)
self.region = region
+ # Ugly hack to get around both a bug in Python and a
+ # misconfigured SSL cert for the eu-west-1 endpoint
+ if self.region.name == 'eu-west-1':
+ validate_certs = False
+
AWSQueryConnection.__init__(self, aws_access_key_id,
- aws_secret_access_key,
+ aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
- https_connection_factory, path)
+ https_connection_factory, path,
+ security_token,
+ validate_certs=validate_certs)
def _required_auth_capability(self):
return ['ec2']
def build_dimension_param(self, dimension, params):
+ prefix = 'Dimensions.member'
+ i = 0
for dim_name in dimension:
dim_value = dimension[dim_name]
- if isinstance(dim_value, basestring):
- dim_value = [dim_value]
- for i, value in enumerate(dim_value):
- params['Dimensions.member.%d.Name' % (i+1)] = dim_name
- params['Dimensions.member.%d.Value' % (i+1)] = value
-
+ if dim_value:
+ if isinstance(dim_value, basestring):
+ dim_value = [dim_value]
+ for value in dim_value:
+ params['%s.%d.Name' % (prefix, i+1)] = dim_name
+ params['%s.%d.Value' % (prefix, i+1)] = value
+ i += 1
+ else:
+ params['%s.%d.Name' % (prefix, i+1)] = dim_name
+ i += 1
+
def build_list_params(self, params, items, label):
if isinstance(items, basestring):
items = [items]
for index, item in enumerate(items):
i = index + 1
if isinstance(item, dict):
- for k,v in item.iteritems():
+ for k, v in item.iteritems():
params[label % (i, 'Name')] = k
if v is not None:
params[label % (i, 'Value')] = v
else:
params[label % i] = item
- def build_put_params(self, params, name, value=None, timestamp=None,
+ def build_put_params(self, params, name, value=None, timestamp=None,
unit=None, dimensions=None, statistics=None):
- args = (name, value, unit, dimensions, statistics)
+ args = (name, value, unit, dimensions, statistics, timestamp)
length = max(map(lambda a: len(a) if isinstance(a, list) else 1, args))
def aslist(a):
@@ -257,18 +161,18 @@
return a
return [a] * length
- for index, (n, v, u, d, s) in enumerate(zip(*map(aslist, args))):
+ for index, (n, v, u, d, s, t) in enumerate(zip(*map(aslist, args))):
metric_data = {'MetricName': n}
if timestamp:
- metric_data['Timestamp'] = timestamp.isoformat()
-
+ metric_data['Timestamp'] = t.isoformat()
+
if unit:
metric_data['Unit'] = u
-
+
if dimensions:
self.build_dimension_param(d, metric_data)
-
+
if statistics:
metric_data['StatisticValues.Maximum'] = s['maximum']
metric_data['StatisticValues.Minimum'] = s['minimum']
@@ -294,20 +198,20 @@
:type period: integer
:param period: The granularity, in seconds, of the returned datapoints.
- Period must be at least 60 seconds and must be a multiple
- of 60. The default value is 60.
+ Period must be at least 60 seconds and must be a multiple
+ of 60. The default value is 60.
:type start_time: datetime
- :param start_time: The time stamp to use for determining the first
- datapoint to return. The value specified is
- inclusive; results include datapoints with the
- time stamp specified.
+ :param start_time: The time stamp to use for determining the
+ first datapoint to return. The value specified is
+ inclusive; results include datapoints with the time stamp
+ specified.
:type end_time: datetime
- :param end_time: The time stamp to use for determining the last
- datapoint to return. The value specified is
- exclusive; results will include datapoints up to
- the time stamp specified.
+ :param end_time: The time stamp to use for determining the
+ last datapoint to return. The value specified is
+ exclusive; results will include datapoints up to the time
+ stamp specified.
:type metric_name: string
:param metric_name: The metric name.
@@ -317,7 +221,7 @@
:type statistics: list
:param statistics: A list of statistics names Valid values:
- Average | Sum | SampleCount | Maximum | Minimum
+ Average | Sum | SampleCount | Maximum | Minimum
:type dimensions: dict
:param dimensions: A dictionary of dimension key/values where
@@ -325,16 +229,29 @@
is either a scalar value or an iterator
of values to be associated with that
dimension.
+
+ :type unit: string
+ :param unit: The unit for the metric. Value values are:
+ Seconds | Microseconds | Milliseconds | Bytes | Kilobytes |
+ Megabytes | Gigabytes | Terabytes | Bits | Kilobits |
+ Megabits | Gigabits | Terabits | Percent | Count |
+ Bytes/Second | Kilobytes/Second | Megabytes/Second |
+ Gigabytes/Second | Terabytes/Second | Bits/Second |
+ Kilobits/Second | Megabits/Second | Gigabits/Second |
+ Terabits/Second | Count/Second | None
+
:rtype: list
"""
- params = {'Period' : period,
- 'MetricName' : metric_name,
- 'Namespace' : namespace,
- 'StartTime' : start_time.isoformat(),
- 'EndTime' : end_time.isoformat()}
+ params = {'Period': period,
+ 'MetricName': metric_name,
+ 'Namespace': namespace,
+ 'StartTime': start_time.isoformat(),
+ 'EndTime': end_time.isoformat()}
self.build_list_params(params, statistics, 'Statistics.member.%d')
if dimensions:
self.build_dimension_param(dimensions, params)
+ if unit:
+ params['Unit'] = unit
return self.get_list('GetMetricStatistics', params,
[('member', Datapoint)])
@@ -345,30 +262,28 @@
data available.
:type next_token: str
- :param next_token: A maximum of 500 metrics will be returned at one
- time. If more results are available, the
- ResultSet returned will contain a non-Null
- next_token attribute. Passing that token as a
- parameter to list_metrics will retrieve the
- next page of metrics.
+ :param next_token: A maximum of 500 metrics will be returned
+ at one time. If more results are available, the ResultSet
+ returned will contain a non-Null next_token attribute.
+ Passing that token as a parameter to list_metrics will
+ retrieve the next page of metrics.
- :type dimension: dict
- :param dimension_filters: A dictionary containing name/value pairs
- that will be used to filter the results.
- The key in the dictionary is the name of
- a Dimension. The value in the dictionary
- is either a scalar value of that Dimension
- name that you want to filter on, a list
- of values to filter on or None if
- you want all metrics with that Dimension name.
+ :type dimensions: dict
+ :param dimensions: A dictionary containing name/value
+ pairs that will be used to filter the results. The key in
+ the dictionary is the name of a Dimension. The value in
+ the dictionary is either a scalar value of that Dimension
+ name that you want to filter on, a list of values to
+ filter on or None if you want all metrics with that
+ Dimension name.
:type metric_name: str
:param metric_name: The name of the Metric to filter against. If None,
- all Metric names will be returned.
+ all Metric names will be returned.
:type namespace: str
:param namespace: A Metric namespace to filter against (e.g. AWS/EC2).
- If None, Metrics from all namespaces will be returned.
+ If None, Metrics from all namespaces will be returned.
"""
params = {}
if next_token:
@@ -379,16 +294,16 @@
params['MetricName'] = metric_name
if namespace:
params['Namespace'] = namespace
-
+
return self.get_list('ListMetrics', params, [('member', Metric)])
-
- def put_metric_data(self, namespace, name, value=None, timestamp=None,
+
+ def put_metric_data(self, namespace, name, value=None, timestamp=None,
unit=None, dimensions=None, statistics=None):
"""
- Publishes metric data points to Amazon CloudWatch. Amazon Cloudwatch
- associates the data points with the specified metric. If the specified
- metric does not exist, Amazon CloudWatch creates the metric. If a list
- is specified for some, but not all, of the arguments, the remaining
+ Publishes metric data points to Amazon CloudWatch. Amazon Cloudwatch
+ associates the data points with the specified metric. If the specified
+ metric does not exist, Amazon CloudWatch creates the metric. If a list
+ is specified for some, but not all, of the arguments, the remaining
arguments are repeated a corresponding number of times.
:type namespace: str
@@ -401,11 +316,11 @@
:param value: The value for the metric.
:type timestamp: datetime or list
- :param timestamp: The time stamp used for the metric. If not specified,
+ :param timestamp: The time stamp used for the metric. If not specified,
the default value is set to the time the metric data was received.
-
+
:type unit: string or list
- :param unit: The unit of the metric. Valid Values: Seconds |
+ :param unit: The unit of the metric. Valid Values: Seconds |
Microseconds | Milliseconds | Bytes | Kilobytes |
Megabytes | Gigabytes | Terabytes | Bits | Kilobits |
Megabits | Gigabits | Terabits | Percent | Count |
@@ -413,12 +328,12 @@
Gigabytes/Second | Terabytes/Second | Bits/Second |
Kilobits/Second | Megabits/Second | Gigabits/Second |
Terabits/Second | Count/Second | None
-
+
:type dimensions: dict
- :param dimensions: Add extra name value pairs to associate
+ :param dimensions: Add extra name value pairs to associate
with the metric, i.e.:
{'name1': value1, 'name2': (value2, value3)}
-
+
:type statistics: dict or list
:param statistics: Use a statistic set instead of a value, for example::
@@ -428,8 +343,7 @@
self.build_put_params(params, name, value=value, timestamp=timestamp,
unit=unit, dimensions=dimensions, statistics=statistics)
- return self.get_status('PutMetricData', params)
-
+ return self.get_status('PutMetricData', params, verb="POST")
def describe_alarms(self, action_prefix=None, alarm_name_prefix=None,
alarm_names=None, max_records=None, state_value=None,
@@ -445,21 +359,21 @@
:type alarm_name_prefix: string
:param alarm_name_prefix: The alarm name prefix. AlarmNames cannot
- be specified if this parameter is specified.
+ be specified if this parameter is specified.
:type alarm_names: list
:param alarm_names: A list of alarm names to retrieve information for.
:type max_records: int
:param max_records: The maximum number of alarm descriptions
- to retrieve.
+ to retrieve.
:type state_value: string
:param state_value: The state value to be used in matching alarms.
:type next_token: string
:param next_token: The token returned by a previous call to
- indicate that there is more data.
+ indicate that there is more data.
:rtype list
"""
@@ -477,7 +391,7 @@
if state_value:
params['StateValue'] = state_value
return self.get_list('DescribeAlarms', params,
- [('member', MetricAlarm)])
+ [('MetricAlarms', MetricAlarms)])[0]
def describe_alarm_history(self, alarm_name=None,
start_date=None, end_date=None,
@@ -503,15 +417,15 @@
:type history_item_type: string
:param history_item_type: The type of alarm histories to retreive
- (ConfigurationUpdate | StateUpdate | Action)
+ (ConfigurationUpdate | StateUpdate | Action)
:type max_records: int
:param max_records: The maximum number of alarm descriptions
- to retrieve.
+ to retrieve.
:type next_token: string
:param next_token: The token returned by a previous call to indicate
- that there is more data.
+ that there is more data.
:rtype list
"""
@@ -545,26 +459,25 @@
:type period: int
:param period: The period in seconds over which the statistic
- is applied.
+ is applied.
:type statistic: string
:param statistic: The statistic for the metric.
- :param dimension_filters: A dictionary containing name/value pairs
- that will be used to filter the results.
- The key in the dictionary is the name of
- a Dimension. The value in the dictionary
- is either a scalar value of that Dimension
- name that you want to filter on, a list
- of values to filter on or None if
- you want all metrics with that Dimension name.
+ :param dimension_filters: A dictionary containing name/value
+ pairs that will be used to filter the results. The key in
+ the dictionary is the name of a Dimension. The value in
+ the dictionary is either a scalar value of that Dimension
+ name that you want to filter on, a list of values to
+ filter on or None if you want all metrics with that
+ Dimension name.
:type unit: string
:rtype list
"""
- params = {'MetricName' : metric_name,
- 'Namespace' : namespace}
+ params = {'MetricName': metric_name,
+ 'Namespace': namespace}
if period:
params['Period'] = period
if statistic:
@@ -593,14 +506,14 @@
:param alarm: MetricAlarm object.
"""
params = {
- 'AlarmName' : alarm.name,
- 'MetricName' : alarm.metric,
- 'Namespace' : alarm.namespace,
- 'Statistic' : alarm.statistic,
- 'ComparisonOperator' : alarm.comparison,
- 'Threshold' : alarm.threshold,
- 'EvaluationPeriods' : alarm.evaluation_periods,
- 'Period' : alarm.period,
+ 'AlarmName': alarm.name,
+ 'MetricName': alarm.metric,
+ 'Namespace': alarm.namespace,
+ 'Statistic': alarm.statistic,
+ 'ComparisonOperator': alarm.comparison,
+ 'Threshold': alarm.threshold,
+ 'EvaluationPeriods': alarm.evaluation_periods,
+ 'Period': alarm.period,
}
if alarm.actions_enabled is not None:
params['ActionsEnabled'] = alarm.actions_enabled
@@ -657,9 +570,9 @@
:type state_reason_data: string
:param state_reason_data: Reason string (will be jsonified).
"""
- params = {'AlarmName' : alarm_name,
- 'StateReason' : state_reason,
- 'StateValue' : state_value}
+ params = {'AlarmName': alarm_name,
+ 'StateReason': state_reason,
+ 'StateValue': state_value}
if state_reason_data:
params['StateReasonData'] = json.dumps(state_reason_data)
@@ -686,4 +599,3 @@
params = {}
self.build_list_params(params, alarm_names, 'AlarmNames.member.%s')
return self.get_status('DisableAlarmActions', params)
-
diff --git a/boto/ec2/cloudwatch/alarm.py b/boto/ec2/cloudwatch/alarm.py
index f81157d..b0b9fd0 100644
--- a/boto/ec2/cloudwatch/alarm.py
+++ b/boto/ec2/cloudwatch/alarm.py
@@ -23,11 +23,32 @@
from datetime import datetime
from boto.resultset import ResultSet
from boto.ec2.cloudwatch.listelement import ListElement
+from boto.ec2.cloudwatch.dimension import Dimension
+
try:
import simplejson as json
except ImportError:
import json
+
+class MetricAlarms(list):
+ def __init__(self, connection=None):
+ """
+ Parses a list of MetricAlarms.
+ """
+ list.__init__(self)
+ self.connection = connection
+
+ def startElement(self, name, attrs, connection):
+ if name == 'member':
+ metric_alarm = MetricAlarm(connection)
+ self.append(metric_alarm)
+ return metric_alarm
+
+ def endElement(self, name, value, connection):
+ pass
+
+
class MetricAlarm(object):
OK = 'OK'
@@ -35,10 +56,10 @@
INSUFFICIENT_DATA = 'INSUFFICIENT_DATA'
_cmp_map = {
- '>=' : 'GreaterThanOrEqualToThreshold',
- '>' : 'GreaterThanThreshold',
- '<' : 'LessThanThreshold',
- '<=' : 'LessThanOrEqualToThreshold',
+ '>=': 'GreaterThanOrEqualToThreshold',
+ '>': 'GreaterThanThreshold',
+ '<': 'LessThanThreshold',
+ '<=': 'LessThanOrEqualToThreshold',
}
_rev_cmp_map = dict((v, k) for (k, v) in _cmp_map.iteritems())
@@ -156,6 +177,9 @@
elif name == 'OKActions':
self.ok_actions = ListElement()
return self.ok_actions
+ elif name == 'Dimensions':
+ self.dimensions = Dimension()
+ return self.dimensions
else:
pass
@@ -266,7 +290,7 @@
self.ok_actions.append(action_arn)
def delete(self):
- self.connection.delete_alarms([self])
+ self.connection.delete_alarms([self.name])
class AlarmHistoryItem(object):
def __init__(self, connection=None):
diff --git a/boto/ec2/cloudwatch/dimension.py b/boto/ec2/cloudwatch/dimension.py
new file mode 100644
index 0000000..42c8a88
--- /dev/null
+++ b/boto/ec2/cloudwatch/dimension.py
@@ -0,0 +1,38 @@
+# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+class Dimension(dict):
+
+ def startElement(self, name, attrs, connection):
+ pass
+
+ def endElement(self, name, value, connection):
+ if name == 'Name':
+ self._name = value
+ elif name == 'Value':
+ if self._name in self:
+ self[self._name].append(value)
+ else:
+ self[self._name] = [value]
+ else:
+ setattr(self, name, value)
+
diff --git a/boto/ec2/cloudwatch/metric.py b/boto/ec2/cloudwatch/metric.py
index cda02d8..9c19b94 100644
--- a/boto/ec2/cloudwatch/metric.py
+++ b/boto/ec2/cloudwatch/metric.py
@@ -1,4 +1,6 @@
-# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
@@ -21,22 +23,8 @@
#
from boto.ec2.cloudwatch.alarm import MetricAlarm
+from boto.ec2.cloudwatch.dimension import Dimension
-class Dimension(dict):
-
- def startElement(self, name, attrs, connection):
- pass
-
- def endElement(self, name, value, connection):
- if name == 'Name':
- self._name = value
- elif name == 'Value':
- if self._name in self:
- self[self._name].append(value)
- else:
- self[self._name] = [value]
- else:
- setattr(self, name, value)
class Metric(object):
@@ -72,6 +60,46 @@
setattr(self, name, value)
def query(self, start_time, end_time, statistics, unit=None, period=60):
+ """
+ :type start_time: datetime
+ :param start_time: The time stamp to use for determining the
+ first datapoint to return. The value specified is
+ inclusive; results include datapoints with the time stamp
+ specified.
+
+ :type end_time: datetime
+ :param end_time: The time stamp to use for determining the
+ last datapoint to return. The value specified is
+ exclusive; results will include datapoints up to the time
+ stamp specified.
+
+ :type statistics: list
+ :param statistics: A list of statistics names Valid values:
+ Average | Sum | SampleCount | Maximum | Minimum
+
+ :type dimensions: dict
+ :param dimensions: A dictionary of dimension key/values where
+ the key is the dimension name and the value
+ is either a scalar value or an iterator
+ of values to be associated with that
+ dimension.
+
+ :type unit: string
+ :param unit: The unit for the metric. Value values are:
+ Seconds | Microseconds | Milliseconds | Bytes | Kilobytes |
+ Megabytes | Gigabytes | Terabytes | Bits | Kilobits |
+ Megabits | Gigabits | Terabits | Percent | Count |
+ Bytes/Second | Kilobytes/Second | Megabytes/Second |
+ Gigabytes/Second | Terabytes/Second | Bits/Second |
+ Kilobits/Second | Megabits/Second | Gigabits/Second |
+ Terabits/Second | Count/Second | None
+
+ :type period: integer
+ :param period: The granularity, in seconds, of the returned datapoints.
+ Period must be at least 60 seconds and must be a multiple
+ of 60. The default value is 60.
+
+ """
if not isinstance(statistics, list):
statistics = [statistics]
return self.connection.get_metric_statistics(period,
@@ -88,6 +116,21 @@
statistic, enabled=True, description=None,
dimensions=None, alarm_actions=None, ok_actions=None,
insufficient_data_actions=None, unit=None):
+ """
+ Creates or updates an alarm and associates it with this metric.
+ Optionally, this operation can associate one or more
+ Amazon Simple Notification Service resources with the alarm.
+
+ When this operation creates an alarm, the alarm state is immediately
+ set to INSUFFICIENT_DATA. The alarm is evaluated and its StateValue is
+ set appropriately. Any actions associated with the StateValue is then
+ executed.
+
+ When updating an existing alarm, its StateValue is left unchanged.
+
+ :type alarm: boto.ec2.cloudwatch.alarm.MetricAlarm
+ :param alarm: MetricAlarm object.
+ """
if not dimensions:
dimensions = self.dimensions
alarm = MetricAlarm(self.connection, name, self.name,
@@ -101,12 +144,32 @@
def describe_alarms(self, period=None, statistic=None,
dimensions=None, unit=None):
+ """
+ Retrieves all alarms for this metric. Specify a statistic, period,
+ or unit to filter the set of alarms further.
+
+ :type period: int
+ :param period: The period in seconds over which the statistic
+ is applied.
+
+ :type statistic: string
+ :param statistic: The statistic for the metric.
+
+ :param dimension_filters: A dictionary containing name/value
+ pairs that will be used to filter the results. The key in
+ the dictionary is the name of a Dimension. The value in
+ the dictionary is either a scalar value of that Dimension
+ name that you want to filter on, a list of values to
+ filter on or None if you want all metrics with that
+ Dimension name.
+
+ :type unit: string
+
+ :rtype list
+ """
return self.connection.describe_alarms_for_metric(self.name,
self.namespace,
period,
statistic,
dimensions,
unit)
-
-
-
diff --git a/boto/ec2/connection.py b/boto/ec2/connection.py
index f94f7f2..029c796 100644
--- a/boto/ec2/connection.py
+++ b/boto/ec2/connection.py
@@ -1,5 +1,6 @@
-# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
@@ -28,6 +29,7 @@
import warnings
from datetime import datetime
from datetime import timedelta
+
import boto
from boto.connection import AWSQueryConnection
from boto.resultset import ResultSet
@@ -36,7 +38,7 @@
from boto.ec2.instance import ConsoleOutput, InstanceAttribute
from boto.ec2.keypair import KeyPair
from boto.ec2.address import Address
-from boto.ec2.volume import Volume
+from boto.ec2.volume import Volume, VolumeAttribute
from boto.ec2.snapshot import Snapshot
from boto.ec2.snapshot import SnapshotAttribute
from boto.ec2.zone import Zone
@@ -45,22 +47,27 @@
from boto.ec2.instanceinfo import InstanceInfo
from boto.ec2.reservedinstance import ReservedInstancesOffering
from boto.ec2.reservedinstance import ReservedInstance
+from boto.ec2.reservedinstance import ReservedInstanceListing
from boto.ec2.spotinstancerequest import SpotInstanceRequest
from boto.ec2.spotpricehistory import SpotPriceHistory
from boto.ec2.spotdatafeedsubscription import SpotDatafeedSubscription
from boto.ec2.bundleinstance import BundleInstanceTask
from boto.ec2.placementgroup import PlacementGroup
from boto.ec2.tag import Tag
+from boto.ec2.instancestatus import InstanceStatusSet
+from boto.ec2.volumestatus import VolumeStatusSet
+from boto.ec2.networkinterface import NetworkInterface
from boto.exception import EC2ResponseError
#boto.set_stream_logger('ec2')
+
class EC2Connection(AWSQueryConnection):
- APIVersion = boto.config.get('Boto', 'ec2_version', '2011-11-01')
+ APIVersion = boto.config.get('Boto', 'ec2_version', '2012-08-15')
DefaultRegionName = boto.config.get('Boto', 'ec2_region_name', 'us-east-1')
DefaultRegionEndpoint = boto.config.get('Boto', 'ec2_region_endpoint',
- 'ec2.amazonaws.com')
+ 'ec2.us-east-1.amazonaws.com')
ResponseError = EC2ResponseError
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
@@ -68,7 +75,8 @@
proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
- api_version=None, security_token=None):
+ api_version=None, security_token=None,
+ validate_certs=True):
"""
Init method to create a new connection to EC2.
"""
@@ -82,7 +90,8 @@
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path,
- security_token)
+ security_token,
+ validate_certs=validate_certs)
if api_version:
self.APIVersion = api_version
@@ -115,7 +124,7 @@
value = [value]
j = 1
for v in value:
- params['Filter.%d.Value.%d' % (i,j)] = v
+ params['Filter.%d.Value.%d' % (i, j)] = v
j += 1
i += 1
@@ -180,7 +189,7 @@
self.build_list_params(params, kernel_ids, 'ImageId')
if owners:
self.build_list_params(params, owners, 'Owner')
- filter = {'image-type' : 'kernel'}
+ filter = {'image-type': 'kernel'}
self.build_filter_params(params, filter)
return self.get_list('DescribeImages', params,
[('item', Image)], verb='POST')
@@ -204,7 +213,7 @@
self.build_list_params(params, ramdisk_ids, 'ImageId')
if owners:
self.build_list_params(params, owners, 'Owner')
- filter = {'image-type' : 'ramdisk'}
+ filter = {'image-type': 'ramdisk'}
self.build_filter_params(params, filter)
return self.get_list('DescribeImages', params,
[('item', Image)], verb='POST')
@@ -221,7 +230,7 @@
"""
try:
return self.get_all_images(image_ids=[image_id])[0]
- except IndexError: # None of those images available
+ except IndexError: # None of those images available
return None
def register_image(self, name=None, description=None, image_location=None,
@@ -238,24 +247,23 @@
:type image_location: string
:param image_location: Full path to your AMI manifest in
- Amazon S3 storage.
- Only used for S3-based AMI's.
+ Amazon S3 storage. Only used for S3-based AMI's.
:type architecture: string
:param architecture: The architecture of the AMI. Valid choices are:
- i386 | x86_64
+ * i386
+ * x86_64
:type kernel_id: string
:param kernel_id: The ID of the kernel with which to launch
- the instances
+ the instances
:type root_device_name: string
:param root_device_name: The root device name (e.g. /dev/sdh)
:type block_device_map: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping`
:param block_device_map: A BlockDeviceMapping data structure
- describing the EBS volumes associated
- with the Image.
+ describing the EBS volumes associated with the Image.
:rtype: string
:return: The new image id
@@ -336,8 +344,8 @@
:rtype: string
:return: The new image id
"""
- params = {'InstanceId' : instance_id,
- 'Name' : name}
+ params = {'InstanceId': instance_id,
+ 'Name': name}
if description:
params['Description'] = description
if no_reboot:
@@ -365,8 +373,8 @@
:return: An ImageAttribute object representing the value of the
attribute requested
"""
- params = {'ImageId' : image_id,
- 'Attribute' : attribute}
+ params = {'ImageId': image_id,
+ 'Attribute': attribute}
return self.get_object('DescribeImageAttribute', params,
ImageAttribute, verb='POST')
@@ -397,9 +405,9 @@
product code can be associated with an AMI. Once
set, the product code cannot be changed or reset.
"""
- params = {'ImageId' : image_id,
- 'Attribute' : attribute,
- 'OperationType' : operation}
+ params = {'ImageId': image_id,
+ 'Attribute': attribute,
+ 'OperationType': operation}
if user_ids:
self.build_list_params(params, user_ids, 'UserId')
if groups:
@@ -421,8 +429,8 @@
:rtype: bool
:return: Whether the operation succeeded or not
"""
- params = {'ImageId' : image_id,
- 'Attribute' : attribute}
+ params = {'ImageId': image_id,
+ 'Attribute': attribute}
return self.get_status('ResetImageAttribute', params, verb='POST')
# Instance methods
@@ -435,14 +443,12 @@
:param instance_ids: A list of strings of instance IDs
:type filters: dict
- :param filters: Optional filters that can be used to limit
- the results returned. Filters are provided
- in the form of a dictionary consisting of
- filter names as the key and filter values
- as the value. The set of allowable filter
- names/values is dependent on the request
- being performed. Check the EC2 API guide
- for details.
+ :param filters: Optional filters that can be used to limit the
+ results returned. Filters are provided in the form of a
+ dictionary consisting of filter names as the key and
+ filter values as the value. The set of allowable filter
+ names/values is dependent on the request being performed.
+ Check the EC2 API guide for details.
:rtype: list
:return: A list of :class:`boto.ec2.instance.Reservation`
@@ -463,6 +469,48 @@
return self.get_list('DescribeInstances', params,
[('item', Reservation)], verb='POST')
+ def get_all_instance_status(self, instance_ids=None,
+ max_results=None, next_token=None,
+ filters=None):
+ """
+ Retrieve all the instances in your account scheduled for maintenance.
+
+ :type instance_ids: list
+ :param instance_ids: A list of strings of instance IDs
+
+ :type max_results: int
+ :param max_results: The maximum number of paginated instance
+ items per response.
+
+ :type next_token: str
+ :param next_token: A string specifying the next paginated set
+ of results to return.
+
+ :type filters: dict
+ :param filters: Optional filters that can be used to limit
+ the results returned. Filters are provided
+ in the form of a dictionary consisting of
+ filter names as the key and filter values
+ as the value. The set of allowable filter
+ names/values is dependent on the request
+ being performed. Check the EC2 API guide
+ for details.
+
+ :rtype: list
+ :return: A list of instances that have maintenance scheduled.
+ """
+ params = {}
+ if instance_ids:
+ self.build_list_params(params, instance_ids, 'InstanceId')
+ if max_results:
+ params['MaxResults'] = max_results
+ if next_token:
+ params['NextToken'] = next_token
+ if filters:
+ self.build_filter_params(params, filters)
+ return self.get_object('DescribeInstanceStatus', params,
+ InstanceStatusSet, verb='POST')
+
def run_instances(self, image_id, min_count=1, max_count=1,
key_name=None, security_groups=None,
user_data=None, addressing_type=None,
@@ -474,25 +522,29 @@
instance_initiated_shutdown_behavior=None,
private_ip_address=None,
placement_group=None, client_token=None,
- security_group_ids=None):
+ security_group_ids=None,
+ additional_info=None, instance_profile_name=None,
+ instance_profile_arn=None, tenancy=None,
+ ebs_optimized=False):
"""
Runs an image on EC2.
:type image_id: string
- :param image_id: The ID of the image to run
+ :param image_id: The ID of the image to run.
:type min_count: int
- :param min_count: The minimum number of instances to launch
+ :param min_count: The minimum number of instances to launch.
:type max_count: int
- :param max_count: The maximum number of instances to launch
+ :param max_count: The maximum number of instances to launch.
:type key_name: string
- :param key_name: The name of the key pair with which to launch instances
+ :param key_name: The name of the key pair with which to
+ launch instances.
:type security_groups: list of strings
:param security_groups: The names of the security groups with which to
- associate instances
+ associate instances
:type user_data: string
:param user_data: The user data passed to the launched instances
@@ -500,81 +552,107 @@
:type instance_type: string
:param instance_type: The type of instance to run:
- * m1.small
- * m1.large
- * m1.xlarge
- * c1.medium
- * c1.xlarge
- * m2.xlarge
- * m2.2xlarge
- * m2.4xlarge
- * cc1.4xlarge
- * t1.micro
+ * t1.micro
+ * m1.small
+ * m1.medium
+ * m1.large
+ * m1.xlarge
+ * c1.medium
+ * c1.xlarge
+ * m2.xlarge
+ * m2.2xlarge
+ * m2.4xlarge
+ * cc1.4xlarge
+ * cg1.4xlarge
+ * cc2.8xlarge
:type placement: string
- :param placement: The availability zone in which to launch the instances
+ :param placement: The availability zone in which to launch
+ the instances.
:type kernel_id: string
:param kernel_id: The ID of the kernel with which to launch the
- instances
+ instances.
:type ramdisk_id: string
:param ramdisk_id: The ID of the RAM disk with which to launch the
- instances
+ instances.
:type monitoring_enabled: bool
- :param monitoring_enabled: Enable CloudWatch monitoring on the instance.
+ :param monitoring_enabled: Enable CloudWatch monitoring on
+ the instance.
:type subnet_id: string
:param subnet_id: The subnet ID within which to launch the instances
- for VPC.
+ for VPC.
:type private_ip_address: string
- :param private_ip_address: If you're using VPC, you can optionally use
- this parameter to assign the instance a
- specific available IP address from the
- subnet (e.g., 10.0.0.25).
+ :param private_ip_address: If you're using VPC, you can
+ optionally use this parameter to assign the instance a
+ specific available IP address from the subnet (e.g.,
+ 10.0.0.25).
:type block_device_map: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping`
:param block_device_map: A BlockDeviceMapping data structure
- describing the EBS volumes associated
- with the Image.
+ describing the EBS volumes associated with the Image.
:type disable_api_termination: bool
:param disable_api_termination: If True, the instances will be locked
- and will not be able to be terminated
- via the API.
+ and will not be able to be terminated via the API.
:type instance_initiated_shutdown_behavior: string
:param instance_initiated_shutdown_behavior: Specifies whether the
- instance stops or
- terminates on
- instance-initiated
- shutdown.
- Valid values are:
+ instance stops or terminates on instance-initiated shutdown.
+ Valid values are:
- * stop
- * terminate
+ * stop
+ * terminate
:type placement_group: string
:param placement_group: If specified, this is the name of the placement
- group in which the instance(s) will be launched.
+ group in which the instance(s) will be launched.
:type client_token: string
:param client_token: Unique, case-sensitive identifier you provide
- to ensure idempotency of the request.
- Maximum 64 ASCII characters
+ to ensure idempotency of the request. Maximum 64 ASCII characters.
+
+ :type security_group_ids: list of strings
+ :param security_group_ids: The ID of the VPC security groups with
+ which to associate instances.
+
+ :type additional_info: string
+ :param additional_info: Specifies additional information to make
+ available to the instance(s).
+
+ :type tenancy: string
+ :param tenancy: The tenancy of the instance you want to
+ launch. An instance with a tenancy of 'dedicated' runs on
+ single-tenant hardware and can only be launched into a
+ VPC. Valid values are:"default" or "dedicated".
+ NOTE: To use dedicated tenancy you MUST specify a VPC
+ subnet-ID as well.
+
+ :type instance_profile_arn: string
+ :param instance_profile_arn: The Amazon resource name (ARN) of
+ the IAM Instance Profile (IIP) to associate with the instances.
+
+ :type instance_profile_name: string
+ :param instance_profile_name: The name of
+ the IAM Instance Profile (IIP) to associate with the instances.
+
+ :type ebs_optimized: bool
+ :param ebs_optimized: Whether the instance is optimized for
+ EBS I/O. This optimization provides dedicated throughput
+ to Amazon EBS and an optimized configuration stack to
+ provide optimal EBS I/O performance. This optimization
+ isn't available with all instance types.
:rtype: Reservation
:return: The :class:`boto.ec2.instance.Reservation` associated with
the request for machines
-
- :type security_group_ids: list of strings
- :param security_group_ids: The ID of the VPC security groups with
- which to associate instances
"""
- params = {'ImageId':image_id,
- 'MinCount':min_count,
+ params = {'ImageId': image_id,
+ 'MinCount': min_count,
'MaxCount': max_count}
if key_name:
params['KeyName'] = key_name
@@ -582,7 +660,7 @@
l = []
for group in security_group_ids:
if isinstance(group, SecurityGroup):
- l.append(group.name)
+ l.append(group.id)
else:
l.append(group)
self.build_list_params(params, l, 'SecurityGroupId')
@@ -604,6 +682,8 @@
params['Placement.AvailabilityZone'] = placement
if placement_group:
params['Placement.GroupName'] = placement_group
+ if tenancy:
+ params['Placement.Tenancy'] = tenancy
if kernel_id:
params['KernelId'] = kernel_id
if ramdisk_id:
@@ -623,7 +703,16 @@
params['InstanceInitiatedShutdownBehavior'] = val
if client_token:
params['ClientToken'] = client_token
- return self.get_object('RunInstances', params, Reservation, verb='POST')
+ if additional_info:
+ params['AdditionalInfo'] = additional_info
+ if instance_profile_name:
+ params['IamInstanceProfile.Name'] = instance_profile_name
+ if instance_profile_arn:
+ params['IamInstanceProfile.Arn'] = instance_profile_arn
+ if ebs_optimized:
+ params['EbsOptimized'] = 'true'
+ return self.get_object('RunInstances', params, Reservation,
+ verb='POST')
def terminate_instances(self, instance_ids=None):
"""
@@ -706,8 +795,8 @@
return self.get_status('RebootInstances', params)
def confirm_product_instance(self, product_code, instance_id):
- params = {'ProductCode' : product_code,
- 'InstanceId' : instance_id}
+ params = {'ProductCode': product_code,
+ 'InstanceId': instance_id}
rs = self.get_object('ConfirmProductInstance', params,
ResultSet, verb='POST')
return (rs.status, rs.ownerId)
@@ -723,18 +812,26 @@
:type attribute: string
:param attribute: The attribute you need information about
- Valid choices are:
+ Valid choices are:
- * instanceType|kernel|ramdisk|userData|
- * disableApiTermination|
- * instanceInitiatedShutdownBehavior|
- * rootDeviceName|blockDeviceMapping
+ * instanceType
+ * kernel
+ * ramdisk
+ * userData
+ * disableApiTermination
+ * instanceInitiatedShutdownBehavior
+ * rootDeviceName
+ * blockDeviceMapping
+ * productCodes
+ * sourceDestCheck
+ * groupSet
+ * ebsOptimized
:rtype: :class:`boto.ec2.image.InstanceAttribute`
:return: An InstanceAttribute object representing the value of the
attribute requested
"""
- params = {'InstanceId' : instance_id}
+ params = {'InstanceId': instance_id}
if attribute:
params['Attribute'] = attribute
return self.get_object('DescribeInstanceAttribute', params,
@@ -750,14 +847,15 @@
:type attribute: string
:param attribute: The attribute you wish to change.
- * AttributeName - Expected value (default)
- * instanceType - A valid instance type (m1.small)
- * kernel - Kernel ID (None)
- * ramdisk - Ramdisk ID (None)
- * userData - Base64 encoded String (None)
- * disableApiTermination - Boolean (true)
- * instanceInitiatedShutdownBehavior - stop|terminate
- * rootDeviceName - device name (None)
+ * instanceType - A valid instance type (m1.small)
+ * kernel - Kernel ID (None)
+ * ramdisk - Ramdisk ID (None)
+ * userData - Base64 encoded String (None)
+ * disableApiTermination - Boolean (true)
+ * instanceInitiatedShutdownBehavior - stop|terminate
+ * sourceDestCheck - Boolean (true)
+ * groupSet - Set of Security Groups or IDs
+ * ebsOptimized - Boolean (false)
:type value: string
:param value: The new value for the attribute
@@ -766,15 +864,29 @@
:return: Whether the operation succeeded or not
"""
# Allow a bool to be passed in for value of disableApiTermination
- if attribute == 'disableApiTermination':
+ bool_reqs = ('disableapitermination',
+ 'sourcedestcheck',
+ 'ebsoptimized')
+ if attribute.lower() in bool_reqs:
if isinstance(value, bool):
if value:
value = 'true'
else:
value = 'false'
- params = {'InstanceId' : instance_id,
- 'Attribute' : attribute,
- 'Value' : value}
+
+ params = {'InstanceId': instance_id}
+
+ # groupSet is handled differently from other arguments
+ if attribute.lower() == 'groupset':
+ for idx, sg in enumerate(value):
+ if isinstance(sg, SecurityGroup):
+ sg = sg.id
+ params['GroupId.%s' % (idx + 1)] = sg
+ else:
+ # for backwards compatibility handle lowercase first letter
+ attribute = attribute[0].upper() + attribute[1:]
+ params['%s.Value' % attribute] = value
+
return self.get_status('ModifyInstanceAttribute', params, verb='POST')
def reset_instance_attribute(self, instance_id, attribute):
@@ -791,8 +903,8 @@
:rtype: bool
:return: Whether the operation succeeded or not
"""
- params = {'InstanceId' : instance_id,
- 'Attribute' : attribute}
+ params = {'InstanceId': instance_id,
+ 'Attribute': attribute}
return self.get_status('ResetInstanceAttribute', params, verb='POST')
# Spot Instances
@@ -806,14 +918,12 @@
:param request_ids: A list of strings of spot instance request IDs
:type filters: dict
- :param filters: Optional filters that can be used to limit
- the results returned. Filters are provided
- in the form of a dictionary consisting of
- filter names as the key and filter values
- as the value. The set of allowable filter
- names/values is dependent on the request
- being performed. Check the EC2 API guide
- for details.
+ :param filters: Optional filters that can be used to limit the
+ results returned. Filters are provided in the form of a
+ dictionary consisting of filter names as the key and
+ filter values as the value. The set of allowable filter
+ names/values is dependent on the request being performed.
+ Check the EC2 API guide for details.
:rtype: list
:return: A list of
@@ -843,23 +953,30 @@
:type start_time: str
:param start_time: An indication of how far back to provide price
- changes for. An ISO8601 DateTime string.
+ changes for. An ISO8601 DateTime string.
:type end_time: str
:param end_time: An indication of how far forward to provide price
- changes for. An ISO8601 DateTime string.
+ changes for. An ISO8601 DateTime string.
:type instance_type: str
:param instance_type: Filter responses to a particular instance type.
:type product_description: str
:param product_description: Filter responses to a particular platform.
- Valid values are currently: "Linux/UNIX",
- "SUSE Linux", and "Windows"
+ Valid values are currently:
+
+ * Linux/UNIX
+ * SUSE Linux
+ * Windows
+ * Linux/UNIX (Amazon VPC)
+ * SUSE Linux (Amazon VPC)
+ * Windows (Amazon VPC)
:type availability_zone: str
:param availability_zone: The availability zone for which prices
- should be returned
+ should be returned. If not specified, data for all
+ availability zones will be returned.
:rtype: list
:return: A list tuples containing price and timestamp.
@@ -886,7 +1003,12 @@
instance_type='m1.small', placement=None,
kernel_id=None, ramdisk_id=None,
monitoring_enabled=False, subnet_id=None,
- block_device_map=None):
+ placement_group=None,
+ block_device_map=None,
+ instance_profile_arn=None,
+ instance_profile_name=None,
+ security_group_ids=None,
+ ebs_optimized=False):
"""
Request instances on the spot market at a particular price.
@@ -911,19 +1033,19 @@
:type launch_group: str
:param launch_group: If supplied, all requests will be fulfilled
- as a group.
+ as a group.
:type availability_zone_group: str
:param availability_zone_group: If supplied, all requests will be
- fulfilled within a single
- availability zone.
+ fulfilled within a single availability zone.
:type key_name: string
- :param key_name: The name of the key pair with which to launch instances
+ :param key_name: The name of the key pair with which to
+ launch instances
:type security_groups: list of strings
:param security_groups: The names of the security groups with which to
- associate instances
+ associate instances
:type user_data: string
:param user_data: The user data passed to the launched instances
@@ -931,47 +1053,72 @@
:type instance_type: string
:param instance_type: The type of instance to run:
- * m1.small
- * m1.large
- * m1.xlarge
- * c1.medium
- * c1.xlarge
- * m2.xlarge
- * m2.2xlarge
- * m2.4xlarge
- * cc1.4xlarge
- * t1.micro
+ * m1.small
+ * m1.large
+ * m1.xlarge
+ * c1.medium
+ * c1.xlarge
+ * m2.xlarge
+ * m2.2xlarge
+ * m2.4xlarge
+ * cc1.4xlarge
+ * t1.micro
:type placement: string
- :param placement: The availability zone in which to launch the instances
+ :param placement: The availability zone in which to launch
+ the instances
:type kernel_id: string
:param kernel_id: The ID of the kernel with which to launch the
- instances
+ instances
:type ramdisk_id: string
:param ramdisk_id: The ID of the RAM disk with which to launch the
- instances
+ instances
:type monitoring_enabled: bool
- :param monitoring_enabled: Enable CloudWatch monitoring on the instance.
+ :param monitoring_enabled: Enable CloudWatch monitoring on
+ the instance.
:type subnet_id: string
:param subnet_id: The subnet ID within which to launch the instances
- for VPC.
+ for VPC.
+
+ :type placement_group: string
+ :param placement_group: If specified, this is the name of the placement
+ group in which the instance(s) will be launched.
:type block_device_map: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping`
:param block_device_map: A BlockDeviceMapping data structure
- describing the EBS volumes associated
- with the Image.
+ describing the EBS volumes associated with the Image.
+
+ :type security_group_ids: list of strings
+ :param security_group_ids: The ID of the VPC security groups with
+ which to associate instances.
+
+ :type instance_profile_arn: string
+ :param instance_profile_arn: The Amazon resource name (ARN) of
+ the IAM Instance Profile (IIP) to associate with the instances.
+
+ :type instance_profile_name: string
+ :param instance_profile_name: The name of
+ the IAM Instance Profile (IIP) to associate with the instances.
+
+ :type ebs_optimized: bool
+ :param ebs_optimized: Whether the instance is optimized for
+ EBS I/O. This optimization provides dedicated throughput
+ to Amazon EBS and an optimized configuration stack to
+ provide optimal EBS I/O performance. This optimization
+ isn't available with all instance types.
:rtype: Reservation
:return: The :class:`boto.ec2.spotinstancerequest.SpotInstanceRequest`
associated with the request for machines
"""
- params = {'LaunchSpecification.ImageId':image_id,
- 'Type' : type,
- 'SpotPrice' : price}
+ ls = 'LaunchSpecification'
+ params = {'%s.ImageId' % ls: image_id,
+ 'Type': type,
+ 'SpotPrice': price}
if count:
params['InstanceCount'] = count
if valid_from:
@@ -983,7 +1130,16 @@
if availability_zone_group:
params['AvailabilityZoneGroup'] = availability_zone_group
if key_name:
- params['LaunchSpecification.KeyName'] = key_name
+ params['%s.KeyName' % ls] = key_name
+ if security_group_ids:
+ l = []
+ for group in security_group_ids:
+ if isinstance(group, SecurityGroup):
+ l.append(group.id)
+ else:
+ l.append(group)
+ self.build_list_params(params, l,
+ '%s.SecurityGroupId' % ls)
if security_groups:
l = []
for group in security_groups:
@@ -991,31 +1147,37 @@
l.append(group.name)
else:
l.append(group)
- self.build_list_params(params, l,
- 'LaunchSpecification.SecurityGroup')
+ self.build_list_params(params, l, '%s.SecurityGroup' % ls)
if user_data:
- params['LaunchSpecification.UserData'] = base64.b64encode(user_data)
+ params['%s.UserData' % ls] = base64.b64encode(user_data)
if addressing_type:
- params['LaunchSpecification.AddressingType'] = addressing_type
+ params['%s.AddressingType' % ls] = addressing_type
if instance_type:
- params['LaunchSpecification.InstanceType'] = instance_type
+ params['%s.InstanceType' % ls] = instance_type
if placement:
- params['LaunchSpecification.Placement.AvailabilityZone'] = placement
+ params['%s.Placement.AvailabilityZone' % ls] = placement
if kernel_id:
- params['LaunchSpecification.KernelId'] = kernel_id
+ params['%s.KernelId' % ls] = kernel_id
if ramdisk_id:
- params['LaunchSpecification.RamdiskId'] = ramdisk_id
+ params['%s.RamdiskId' % ls] = ramdisk_id
if monitoring_enabled:
- params['LaunchSpecification.Monitoring.Enabled'] = 'true'
+ params['%s.Monitoring.Enabled' % ls] = 'true'
if subnet_id:
- params['LaunchSpecification.SubnetId'] = subnet_id
+ params['%s.SubnetId' % ls] = subnet_id
+ if placement_group:
+ params['%s.Placement.GroupName' % ls] = placement_group
if block_device_map:
- block_device_map.build_list_params(params, 'LaunchSpecification.')
+ block_device_map.build_list_params(params, '%s.' % ls)
+ if instance_profile_name:
+ params['%s.IamInstanceProfile.Name' % ls] = instance_profile_name
+ if instance_profile_arn:
+ params['%s.IamInstanceProfile.Arn' % ls] = instance_profile_arn
+ if ebs_optimized:
+ params['%s.EbsOptimized' % ls] = 'true'
return self.get_list('RequestSpotInstances', params,
[('item', SpotInstanceRequest)],
verb='POST')
-
def cancel_spot_instance_requests(self, request_ids):
"""
Cancel the specified Spot Instance Requests.
@@ -1060,7 +1222,7 @@
:rtype: :class:`boto.ec2.spotdatafeedsubscription.SpotDatafeedSubscription`
:return: The datafeed subscription object or None
"""
- params = {'Bucket' : bucket}
+ params = {'Bucket': bucket}
if prefix:
params['Prefix'] = prefix
return self.get_object('CreateSpotDatafeedSubscription',
@@ -1161,12 +1323,64 @@
return self.get_object('AllocateAddress', params, Address, verb='POST')
- def associate_address(self, instance_id, public_ip=None, allocation_id=None):
+ def assign_private_ip_addresses(self, network_interface_id=None,
+ private_ip_addresses=None,
+ secondary_private_ip_address_count=None,
+ allow_reassignment=False):
+ """
+ Assigns one or more secondary private IP addresses to a network
+ interface in Amazon VPC.
+
+ :type network_interface_id: string
+ :param network_interface_id: The network interface to which the IP
+ address will be assigned.
+
+ :type private_ip_addresses: list
+ :param private_ip_addresses: Assigns the specified IP addresses as
+ secondary IP addresses to the network interface.
+
+ :type secondary_private_ip_address_count: int
+ :param secondary_private_ip_address_count: The number of secondary IP
+ addresses to assign to the network interface. You cannot specify
+ this parameter when also specifying private_ip_addresses.
+
+ :type allow_reassignment: bool
+ :param allow_reassignment: Specifies whether to allow an IP address
+ that is already assigned to another network interface or instance
+ to be reassigned to the specified network interface.
+
+ :rtype: bool
+ :return: True if successful
+ """
+ params = {}
+
+ if network_interface_id is not None:
+ params['NetworkInterfaceId'] = network_interface_id
+
+ if private_ip_addresses is not None:
+ self.build_list_params(params, private_ip_addresses,
+ 'PrivateIpAddress')
+ elif secondary_private_ip_address_count is not None:
+ params['SecondaryPrivateIpAddressCount'] = \
+ secondary_private_ip_address_count
+
+ if allow_reassignment:
+ params['AllowReassignment'] = 'true'
+
+ return self.get_status('AssignPrivateIpAddresses', params, verb='POST')
+
+ def associate_address(self, instance_id=None, public_ip=None,
+ allocation_id=None, network_interface_id=None,
+ private_ip_address=None, allow_reassociation=False):
"""
Associate an Elastic IP address with a currently running instance.
This requires one of ``public_ip`` or ``allocation_id`` depending
on if you're associating a VPC address or a plain EC2 address.
+ When using an Allocation ID, make sure to pass ``None`` for ``public_ip``
+ as EC2 expects a single parameter and if ``public_ip`` is passed boto
+ will preference that instead of ``allocation_id``.
+
:type instance_id: string
:param instance_id: The ID of the instance
@@ -1176,16 +1390,40 @@
:type allocation_id: string
:param allocation_id: The allocation ID for a VPC-based elastic IP.
+ :type network_interface_id: string
+ :param network_interface_id: The network interface ID to which
+ elastic IP is to be assigned to
+
+ :type private_ip_address: string
+ :param private_ip_address: The primary or secondary private IP address
+ to associate with the Elastic IP address.
+
+ :type allow_reassociation: bool
+ :param allow_reassociation: Specify this option to allow an Elastic IP
+ address that is already associated with another network interface
+ or instance to be re-associated with the specified instance or
+ interface.
+
:rtype: bool
:return: True if successful
"""
- params = { 'InstanceId' : instance_id }
+ params = {}
+ if instance_id is not None:
+ params['InstanceId'] = instance_id
+ elif network_interface_id is not None:
+ params['NetworkInterfaceId'] = network_interface_id
if public_ip is not None:
params['PublicIp'] = public_ip
elif allocation_id is not None:
params['AllocationId'] = allocation_id
+ if private_ip_address is not None:
+ params['PrivateIpAddress'] = private_ip_address
+
+ if allow_reassociation:
+ params['AllowReassociation'] = 'true'
+
return self.get_status('AssociateAddress', params, verb='POST')
def disassociate_address(self, public_ip=None, association_id=None):
@@ -1212,13 +1450,23 @@
def release_address(self, public_ip=None, allocation_id=None):
"""
- Free up an Elastic IP address.
+ Free up an Elastic IP address. Pass a public IP address to
+ release an EC2 Elastic IP address and an AllocationId to
+ release a VPC Elastic IP address. You should only pass
+ one value.
+
+ This requires one of ``public_ip`` or ``allocation_id`` depending
+ on if you're associating a VPC address or a plain EC2 address.
+
+ When using an Allocation ID, make sure to pass ``None`` for ``public_ip``
+ as EC2 expects a single parameter and if ``public_ip`` is passed boto
+ will preference that instead of ``allocation_id``.
:type public_ip: string
:param public_ip: The public IP address for EC2 elastic IPs.
:type allocation_id: string
- :param allocation_id: The ID for VPC elastic IPs.
+ :param allocation_id: The Allocation ID for VPC elastic IPs.
:rtype: bool
:return: True if successful
@@ -1232,6 +1480,35 @@
return self.get_status('ReleaseAddress', params, verb='POST')
+ def unassign_private_ip_addresses(self, network_interface_id=None,
+ private_ip_addresses=None):
+ """
+ Unassigns one or more secondary private IP addresses from a network
+ interface in Amazon VPC.
+
+ :type network_interface_id: string
+ :param network_interface_id: The network interface from which the
+ secondary private IP address will be unassigned.
+
+ :type private_ip_addresses: list
+ :param private_ip_addresses: Specifies the secondary private IP
+ addresses that you want to unassign from the network interface.
+
+ :rtype: bool
+ :return: True if successful
+ """
+ params = {}
+
+ if network_interface_id is not None:
+ params['NetworkInterfaceId'] = network_interface_id
+
+ if private_ip_addresses is not None:
+ self.build_list_params(params, private_ip_addresses,
+ 'PrivateIpAddress')
+
+ return self.get_status('UnassignPrivateIpAddresses', params,
+ verb='POST')
+
# Volume methods
def get_all_volumes(self, volume_ids=None, filters=None):
@@ -1264,7 +1541,103 @@
return self.get_list('DescribeVolumes', params,
[('item', Volume)], verb='POST')
- def create_volume(self, size, zone, snapshot=None):
+ def get_all_volume_status(self, volume_ids=None,
+ max_results=None, next_token=None,
+ filters=None):
+ """
+ Retrieve the status of one or more volumes.
+
+ :type volume_ids: list
+ :param volume_ids: A list of strings of volume IDs
+
+ :type max_results: int
+ :param max_results: The maximum number of paginated instance
+ items per response.
+
+ :type next_token: str
+ :param next_token: A string specifying the next paginated set
+ of results to return.
+
+ :type filters: dict
+ :param filters: Optional filters that can be used to limit
+ the results returned. Filters are provided
+ in the form of a dictionary consisting of
+ filter names as the key and filter values
+ as the value. The set of allowable filter
+ names/values is dependent on the request
+ being performed. Check the EC2 API guide
+ for details.
+
+ :rtype: list
+ :return: A list of volume status.
+ """
+ params = {}
+ if volume_ids:
+ self.build_list_params(params, volume_ids, 'VolumeId')
+ if max_results:
+ params['MaxResults'] = max_results
+ if next_token:
+ params['NextToken'] = next_token
+ if filters:
+ self.build_filter_params(params, filters)
+ return self.get_object('DescribeVolumeStatus', params,
+ VolumeStatusSet, verb='POST')
+
+ def enable_volume_io(self, volume_id):
+ """
+ Enables I/O operations for a volume that had I/O operations
+ disabled because the data on the volume was potentially inconsistent.
+
+ :type volume_id: str
+ :param volume_id: The ID of the volume.
+
+ :rtype: bool
+ :return: True if successful
+ """
+ params = {'VolumeId': volume_id}
+ return self.get_status('EnableVolumeIO', params, verb='POST')
+
+ def get_volume_attribute(self, volume_id,
+ attribute='autoEnableIO'):
+ """
+ Describes attribute of the volume.
+
+ :type volume_id: str
+ :param volume_id: The ID of the volume.
+
+ :type attribute: str
+ :param attribute: The requested attribute. Valid values are:
+
+ * autoEnableIO
+
+ :rtype: list of :class:`boto.ec2.volume.VolumeAttribute`
+ :return: The requested Volume attribute
+ """
+ params = {'VolumeId': volume_id, 'Attribute': attribute}
+ return self.get_object('DescribeVolumeAttribute', params,
+ VolumeAttribute, verb='POST')
+
+ def modify_volume_attribute(self, volume_id, attribute, new_value):
+ """
+ Changes an attribute of an Volume.
+
+ :type volume_id: string
+ :param volume_id: The volume id you wish to change
+
+ :type attribute: string
+ :param attribute: The attribute you wish to change. Valid values are:
+ AutoEnableIO.
+
+ :type new_value: string
+ :param new_value: The new value of the attribute.
+ """
+ params = {'VolumeId': volume_id}
+ if attribute == 'AutoEnableIO':
+ params['AutoEnableIO.Value'] = new_value
+ return self.get_status('ModifyVolumeAttribute', params, verb='POST')
+
+ def create_volume(self, size, zone, snapshot=None,
+ volume_type=None, iops=None):
"""
Create a new EBS Volume.
@@ -1275,17 +1648,30 @@
:param zone: The availability zone in which the Volume will be created.
:type snapshot: string or :class:`boto.ec2.snapshot.Snapshot`
- :param snapshot: The snapshot from which the new Volume will be created.
+ :param snapshot: The snapshot from which the new Volume will be
+ created.
+
+ :type volume_type: string
+ :param volume_type: The type of the volume. (optional). Valid
+ values are: standard | io1.
+
+ :type iops: int
+ :param iops: The provisioned IOPs you want to associate with
+ this volume. (optional)
"""
if isinstance(zone, Zone):
zone = zone.name
- params = {'AvailabilityZone' : zone}
+ params = {'AvailabilityZone': zone}
if size:
params['Size'] = size
if snapshot:
if isinstance(snapshot, Snapshot):
snapshot = snapshot.id
params['SnapshotId'] = snapshot
+ if volume_type:
+ params['VolumeType'] = volume_type
+ if iops:
+ params['Iops'] = str(iops)
return self.get_object('CreateVolume', params, Volume, verb='POST')
def delete_volume(self, volume_id):
@@ -1319,9 +1705,9 @@
:rtype: bool
:return: True if successful
"""
- params = {'InstanceId' : instance_id,
- 'VolumeId' : volume_id,
- 'Device' : device}
+ params = {'InstanceId': instance_id,
+ 'VolumeId': volume_id,
+ 'Device': device}
return self.get_status('AttachVolume', params, verb='POST')
def detach_volume(self, volume_id, instance_id=None,
@@ -1334,25 +1720,26 @@
:type instance_id: str
:param instance_id: The ID of the EC2 instance from which it will
- be detached.
+ be detached.
:type device: str
:param device: The device on the instance through which the
- volume is exposted (e.g. /dev/sdh)
+ volume is exposted (e.g. /dev/sdh)
:type force: bool
- :param force: Forces detachment if the previous detachment attempt did
- not occur cleanly. This option can lead to data loss or
- a corrupted file system. Use this option only as a last
- resort to detach a volume from a failed instance. The
- instance will not have an opportunity to flush file system
- caches nor file system meta data. If you use this option,
- you must perform file system check and repair procedures.
+ :param force: Forces detachment if the previous detachment
+ attempt did not occur cleanly. This option can lead to
+ data loss or a corrupted file system. Use this option only
+ as a last resort to detach a volume from a failed
+ instance. The instance will not have an opportunity to
+ flush file system caches nor file system meta data. If you
+ use this option, you must perform file system check and
+ repair procedures.
:rtype: bool
:return: True if successful
"""
- params = {'VolumeId' : volume_id}
+ params = {'VolumeId': volume_id}
if instance_id:
params['InstanceId'] = instance_id
if device:
@@ -1425,7 +1812,7 @@
:rtype: bool
:return: True if successful
"""
- params = {'VolumeId' : volume_id}
+ params = {'VolumeId': volume_id}
if description:
params['Description'] = description[0:255]
snapshot = self.get_object('CreateSnapshot', params,
@@ -1440,8 +1827,8 @@
params = {'SnapshotId': snapshot_id}
return self.get_status('DeleteSnapshot', params, verb='POST')
- def trim_snapshots(self, hourly_backups = 8, daily_backups = 7,
- weekly_backups = 4):
+ def trim_snapshots(self, hourly_backups=8, daily_backups=7,
+ weekly_backups=4):
"""
Trim excess snapshots, based on when they were taken. More current
snapshots are retained, with the number retained decreasing as you
@@ -1521,10 +1908,9 @@
if temp.__contains__(t) == False:
temp.append(t)
- target_backup_times = temp
- # make the oldeest dates first, and make sure the month start
+ # sort to make the oldest dates first, and make sure the month start
# and last four week's start are in the proper order
- target_backup_times.sort()
+ target_backup_times = sorted(temp)
# get all the snapshots, sort them by date and time, and
# organize them into one array for each volume:
@@ -1587,7 +1973,6 @@
time_period_number += 1
snap_found_for_this_time_period = False
-
def get_snapshot_attribute(self, snapshot_id,
attribute='createVolumePermission'):
"""
@@ -1605,7 +1990,7 @@
:rtype: list of :class:`boto.ec2.snapshotattribute.SnapshotAttribute`
:return: The requested Snapshot attribute
"""
- params = {'Attribute' : attribute}
+ params = {'Attribute': attribute}
if snapshot_id:
params['SnapshotId'] = snapshot_id
return self.get_object('DescribeSnapshotAttribute', params,
@@ -1622,23 +2007,23 @@
:type attribute: string
:param attribute: The attribute you wish to change. Valid values are:
- createVolumePermission
+ createVolumePermission
:type operation: string
:param operation: Either add or remove (this is required for changing
- snapshot ermissions)
+ snapshot ermissions)
:type user_ids: list
:param user_ids: The Amazon IDs of users to add/remove attributes
:type groups: list
:param groups: The groups to add/remove attributes. The only valid
- value at this time is 'all'.
+ value at this time is 'all'.
"""
- params = {'SnapshotId' : snapshot_id,
- 'Attribute' : attribute,
- 'OperationType' : operation}
+ params = {'SnapshotId': snapshot_id,
+ 'Attribute': attribute,
+ 'OperationType': operation}
if user_ids:
self.build_list_params(params, user_ids, 'UserId')
if groups:
@@ -1659,8 +2044,8 @@
:rtype: bool
:return: Whether the operation succeeded or not
"""
- params = {'SnapshotId' : snapshot_id,
- 'Attribute' : attribute}
+ params = {'SnapshotId': snapshot_id,
+ 'Attribute': attribute}
return self.get_status('ResetSnapshotAttribute', params, verb='POST')
# Keypair methods
@@ -1671,17 +2056,15 @@
:type keynames: list
:param keynames: A list of the names of keypairs to retrieve.
- If not provided, all key pairs will be returned.
+ If not provided, all key pairs will be returned.
:type filters: dict
- :param filters: Optional filters that can be used to limit
- the results returned. Filters are provided
- in the form of a dictionary consisting of
- filter names as the key and filter values
- as the value. The set of allowable filter
- names/values is dependent on the request
- being performed. Check the EC2 API guide
- for details.
+ :param filters: Optional filters that can be used to limit the
+ results returned. Filters are provided in the form of a
+ dictionary consisting of filter names as the key and
+ filter values as the value. The set of allowable filter
+ names/values is dependent on the request being performed.
+ Check the EC2 API guide for details.
:rtype: list
:return: A list of :class:`boto.ec2.keypair.KeyPair`
@@ -1726,7 +2109,7 @@
The material attribute of the new KeyPair object
will contain the the unencrypted PEM encoded RSA private key.
"""
- params = {'KeyName':key_name}
+ params = {'KeyName': key_name}
return self.get_object('CreateKeyPair', params, KeyPair, verb='POST')
def delete_key_pair(self, key_name):
@@ -1736,7 +2119,7 @@
:type key_name: string
:param key_name: The name of the keypair to delete
"""
- params = {'KeyName':key_name}
+ params = {'KeyName': key_name}
return self.get_status('DeleteKeyPair', params, verb='POST')
def import_key_pair(self, key_name, public_key_material):
@@ -1772,13 +2155,14 @@
will contain the the unencrypted PEM encoded RSA private key.
"""
public_key_material = base64.b64encode(public_key_material)
- params = {'KeyName' : key_name,
- 'PublicKeyMaterial' : public_key_material}
+ params = {'KeyName': key_name,
+ 'PublicKeyMaterial': public_key_material}
return self.get_object('ImportKeyPair', params, KeyPair, verb='POST')
# SecurityGroup methods
- def get_all_security_groups(self, groupnames=None, group_ids=None, filters=None):
+ def get_all_security_groups(self, groupnames=None, group_ids=None,
+ filters=None):
"""
Get all security groups associated with your account in a region.
@@ -1834,10 +2218,8 @@
:rtype: :class:`boto.ec2.securitygroup.SecurityGroup`
:return: The newly created :class:`boto.ec2.keypair.KeyPair`.
"""
- params = {
- 'GroupName': name,
- 'GroupDescription': description
- }
+ params = {'GroupName': name,
+ 'GroupDescription': description}
if vpc_id is not None:
params['VpcId'] = vpc_id
@@ -1846,6 +2228,8 @@
SecurityGroup, verb='POST')
group.name = name
group.description = description
+ if vpc_id is not None:
+ group.vpc_id = vpc_id
return group
def delete_security_group(self, name=None, group_id=None):
@@ -1884,15 +2268,15 @@
:type group_name: string
:param group_name: The name of the security group you are adding
- the rule to.
+ the rule to.
:type src_security_group_name: string
:param src_security_group_name: The name of the security group you are
- granting access to.
+ granting access to.
:type src_security_group_owner_id: string
:param src_security_group_owner_id: The ID of the owner of the security
- group you are granting access to.
+ group you are granting access to.
:type ip_protocol: string
:param ip_protocol: Either tcp | udp | icmp
@@ -1905,7 +2289,7 @@
:type to_port: string
:param to_port: The CIDR block you are providing access to.
- See http://goo.gl/Yj5QC
+ See http://goo.gl/Yj5QC
:rtype: bool
:return: True if successful.
@@ -1928,7 +2312,8 @@
def authorize_security_group(self, group_name=None,
src_security_group_name=None,
src_security_group_owner_id=None,
- ip_protocol=None, from_port=None, to_port=None,
+ ip_protocol=None,
+ from_port=None, to_port=None,
cidr_ip=None, group_id=None,
src_security_group_group_id=None):
"""
@@ -1940,15 +2325,15 @@
:type group_name: string
:param group_name: The name of the security group you are adding
- the rule to.
+ the rule to.
:type src_security_group_name: string
:param src_security_group_name: The name of the security group you are
- granting access to.
+ granting access to.
:type src_security_group_owner_id: string
:param src_security_group_owner_id: The ID of the owner of the security
- group you are granting access to.
+ group you are granting access to.
:type ip_protocol: string
:param ip_protocol: Either tcp | udp | icmp
@@ -1959,21 +2344,19 @@
:type to_port: int
:param to_port: The ending port number you are enabling
- :type cidr_ip: string
+ :type cidr_ip: string or list of strings
:param cidr_ip: The CIDR block you are providing access to.
- See http://goo.gl/Yj5QC
+ See http://goo.gl/Yj5QC
:type group_id: string
- :param group_id: ID of the EC2 or VPC security group to modify.
- This is required for VPC security groups and
- can be used instead of group_name for EC2
- security groups.
+ :param group_id: ID of the EC2 or VPC security group to
+ modify. This is required for VPC security groups and can
+ be used instead of group_name for EC2 security groups.
- :type group_id: string
- :param group_id: ID of the EC2 or VPC source security group.
- This is required for VPC security groups and
- can be used instead of group_name for EC2
- security groups.
+ :type src_security_group_group_id: string
+ :param src_security_group_group_id: The ID of the security
+ group you are granting access to. Can be used instead of
+ src_security_group_name
:rtype: bool
:return: True if successful.
@@ -2006,7 +2389,11 @@
if to_port is not None:
params['IpPermissions.1.ToPort'] = to_port
if cidr_ip:
- params['IpPermissions.1.IpRanges.1.CidrIp'] = cidr_ip
+ if not isinstance(cidr_ip, list):
+ cidr_ip = [cidr_ip]
+ for i, single_cidr_ip in enumerate(cidr_ip):
+ params['IpPermissions.1.IpRanges.%d.CidrIp' % (i+1)] = \
+ single_cidr_ip
return self.get_status('AuthorizeSecurityGroupIngress',
params, verb='POST')
@@ -2037,7 +2424,7 @@
if src_group_id is not None:
params['IpPermissions.1.Groups.1.GroupId'] = src_group_id
if cidr_ip is not None:
- params['IpPermissions.1.Groups.1.CidrIp'] = cidr_ip
+ params['IpPermissions.1.IpRanges.1.CidrIp'] = cidr_ip
return self.get_status('AuthorizeSecurityGroupEgress',
params, verb='POST')
@@ -2084,18 +2471,6 @@
:param to_port: The CIDR block you are revoking access to.
http://goo.gl/Yj5QC
- :type group_id: string
- :param group_id: ID of the EC2 or VPC security group to modify.
- This is required for VPC security groups and
- can be used instead of group_name for EC2
- security groups.
-
- :type group_id: string
- :param group_id: ID of the EC2 or VPC source security group.
- This is required for VPC security groups and
- can be used instead of group_name for EC2
- security groups.
-
:rtype: bool
:return: True if successful.
"""
@@ -2114,7 +2489,8 @@
params['CidrIp'] = cidr_ip
return self.get_status('RevokeSecurityGroupIngress', params)
- def revoke_security_group(self, group_name=None, src_security_group_name=None,
+ def revoke_security_group(self, group_name=None,
+ src_security_group_name=None,
src_security_group_owner_id=None,
ip_protocol=None, from_port=None, to_port=None,
cidr_ip=None, group_id=None,
@@ -2128,15 +2504,15 @@
:type group_name: string
:param group_name: The name of the security group you are removing
- the rule from.
+ the rule from.
:type src_security_group_name: string
:param src_security_group_name: The name of the security group you are
- revoking access to.
+ revoking access to.
:type src_security_group_owner_id: string
:param src_security_group_owner_id: The ID of the owner of the security
- group you are revoking access to.
+ group you are revoking access to.
:type ip_protocol: string
:param ip_protocol: Either tcp | udp | icmp
@@ -2149,7 +2525,17 @@
:type cidr_ip: string
:param cidr_ip: The CIDR block you are revoking access to.
- See http://goo.gl/Yj5QC
+ See http://goo.gl/Yj5QC
+
+ :type group_id: string
+ :param group_id: ID of the EC2 or VPC security group to
+ modify. This is required for VPC security groups and can
+ be used instead of group_name for EC2 security groups.
+
+ :type src_security_group_group_id: string
+ :param src_security_group_group_id: The ID of the security group
+ for which you are revoking access. Can be used instead
+ of src_security_group_name
:rtype: bool
:return: True if successful.
@@ -2192,14 +2578,15 @@
src_group_id=None,
cidr_ip=None):
"""
- Remove an existing egress rule from an existing VPC security group.
- You need to pass in an ip_protocol, from_port and to_port range only
- if the protocol you are using is port-based. You also need to pass in either
- a src_group_id or cidr_ip.
+ Remove an existing egress rule from an existing VPC security
+ group. You need to pass in an ip_protocol, from_port and
+ to_port range only if the protocol you are using is
+ port-based. You also need to pass in either a src_group_id or
+ cidr_ip.
:type group_name: string
:param group_id: The name of the security group you are removing
- the rule from.
+ the rule from.
:type ip_protocol: string
:param ip_protocol: Either tcp | udp | icmp | -1
@@ -2211,16 +2598,17 @@
:param to_port: The ending port number you are disabling
:type src_group_id: src_group_id
- :param src_group_id: The source security group you are revoking access to.
+ :param src_group_id: The source security group you are
+ revoking access to.
:type cidr_ip: string
:param cidr_ip: The CIDR block you are revoking access to.
- See http://goo.gl/Yj5QC
+ See http://goo.gl/Yj5QC
:rtype: bool
:return: True if successful.
"""
-
+
params = {}
if group_id:
params['GroupId'] = group_id
@@ -2266,7 +2654,7 @@
self.build_list_params(params, region_names, 'RegionName')
if filters:
self.build_filter_params(params, filters)
- regions = self.get_list('DescribeRegions', params,
+ regions = self.get_list('DescribeRegions', params,
[('item', RegionInfo)], verb='POST')
for region in regions:
region.connection_cls = EC2Connection
@@ -2276,17 +2664,26 @@
# Reservation methods
#
- def get_all_reserved_instances_offerings(self, reserved_instances_id=None,
+ def get_all_reserved_instances_offerings(self,
+ reserved_instances_offering_ids=None,
instance_type=None,
availability_zone=None,
product_description=None,
- filters=None):
+ filters=None,
+ instance_tenancy=None,
+ offering_type=None,
+ include_marketplace=None,
+ min_duration=None,
+ max_duration=None,
+ max_instance_count=None,
+ next_token=None,
+ max_results=None):
"""
Describes Reserved Instance offerings that are available for purchase.
- :type reserved_instances_id: str
- :param reserved_instances_id: Displays Reserved Instances with the
- specified offering IDs.
+ :type reserved_instances_offering_ids: list
+ :param reserved_instances_id: One or more Reserved Instances
+ offering IDs.
:type instance_type: str
:param instance_type: Displays Reserved Instances of the specified
@@ -2310,12 +2707,49 @@
being performed. Check the EC2 API guide
for details.
+ :type instance_tenancy: string
+ :param instance_tenancy: The tenancy of the Reserved Instance offering.
+ A Reserved Instance with tenancy of dedicated will run on
+ single-tenant hardware and can only be launched within a VPC.
+
+ :type offering_type: string
+ :param offering_type: The Reserved Instance offering type.
+ Valid Values:
+ * Heavy Utilization
+ * Medium Utilization
+ * Light Utilization
+
+ :type include_marketplace: bool
+ :param include_marketplace: Include Marketplace offerings in the
+ response.
+
+ :type min_duration: int :param min_duration: Minimum duration (in
+ seconds) to filter when searching for offerings.
+
+ :type max_duration: int
+ :param max_duration: Maximum duration (in seconds) to filter when
+ searching for offerings.
+
+ :type max_instance_count: int
+ :param max_instance_count: Maximum number of instances to filter when
+ searching for offerings.
+
+ :type next_token: string
+ :param next_token: Token to use when requesting the next paginated set
+ of offerings.
+
+ :type max_results: int
+ :param max_results: Maximum number of offerings to return per call.
+
:rtype: list
- :return: A list of :class:`boto.ec2.reservedinstance.ReservedInstancesOffering`
+ :return: A list of
+ :class:`boto.ec2.reservedinstance.ReservedInstancesOffering`.
+
"""
params = {}
- if reserved_instances_id:
- params['ReservedInstancesId'] = reserved_instances_id
+ if reserved_instances_offering_ids is not None:
+ self.build_list_params(params, reserved_instances_offering_ids,
+ 'ReservedInstancesOfferingId')
if instance_type:
params['InstanceType'] = instance_type
if availability_zone:
@@ -2324,6 +2758,25 @@
params['ProductDescription'] = product_description
if filters:
self.build_filter_params(params, filters)
+ if instance_tenancy is not None:
+ params['InstanceTenancy'] = instance_tenancy
+ if offering_type is not None:
+ params['OfferingType'] = offering_type
+ if include_marketplace is not None:
+ if include_marketplace:
+ params['IncludeMarketplace'] = 'true'
+ else:
+ params['IncludeMarketplace'] = 'false'
+ if min_duration is not None:
+ params['MinDuration'] = str(min_duration)
+ if max_duration is not None:
+ params['MaxDuration'] = str(max_duration)
+ if max_instance_count is not None:
+ params['MaxInstanceCount'] = str(max_instance_count)
+ if next_token is not None:
+ params['NextToken'] = next_token
+ if max_results is not None:
+ params['MaxResults'] = str(max_results)
return self.get_list('DescribeReservedInstancesOfferings',
params, [('item', ReservedInstancesOffering)],
@@ -2363,7 +2816,7 @@
def purchase_reserved_instance_offering(self,
reserved_instances_offering_id,
- instance_count=1):
+ instance_count=1, limit_price=None):
"""
Purchase a Reserved Instance for use with your account.
** CAUTION **
@@ -2378,15 +2831,107 @@
:param instance_count: The number of Reserved Instances to purchase.
Default value is 1.
+ :type limit_price: tuple
+ :param instance_count: Limit the price on the total order.
+ Must be a tuple of (amount, currency_code), for example:
+ (100.0, 'USD').
+
:rtype: :class:`boto.ec2.reservedinstance.ReservedInstance`
:return: The newly created Reserved Instance
"""
params = {
- 'ReservedInstancesOfferingId' : reserved_instances_offering_id,
- 'InstanceCount' : instance_count}
+ 'ReservedInstancesOfferingId': reserved_instances_offering_id,
+ 'InstanceCount': instance_count}
+ if limit_price is not None:
+ params['LimitPrice.Amount'] = str(limit_price[0])
+ params['LimitPrice.CurrencyCode'] = str(limit_price[1])
return self.get_object('PurchaseReservedInstancesOffering', params,
ReservedInstance, verb='POST')
+ def create_reserved_instances_listing(self, reserved_instances_id, instance_count,
+ price_schedules, client_token):
+ """Creates a new listing for Reserved Instances.
+
+ Creates a new listing for Amazon EC2 Reserved Instances that will be
+ sold in the Reserved Instance Marketplace. You can submit one Reserved
+ Instance listing at a time.
+
+ The Reserved Instance Marketplace matches sellers who want to resell
+ Reserved Instance capacity that they no longer need with buyers who
+ want to purchase additional capacity. Reserved Instances bought and
+ sold through the Reserved Instance Marketplace work like any other
+ Reserved Instances.
+
+ If you want to sell your Reserved Instances, you must first register as
+ a Seller in the Reserved Instance Marketplace. After completing the
+ registration process, you can create a Reserved Instance Marketplace
+ listing of some or all of your Reserved Instances, and specify the
+ upfront price you want to receive for them. Your Reserved Instance
+ listings then become available for purchase.
+
+ :type reserved_instances_id: string
+ :param reserved_instances_id: The ID of the Reserved Instance that
+ will be listed.
+
+ :type instance_count: int
+ :param instance_count: The number of instances that are a part of a
+ Reserved Instance account that will be listed in the Reserved
+ Instance Marketplace. This number should be less than or equal to
+ the instance count associated with the Reserved Instance ID
+ specified in this call.
+
+ :type price_schedules: List of tuples
+ :param price_schedules: A list specifying the price of the Reserved
+ Instance for each month remaining in the Reserved Instance term.
+ Each tuple contains two elements, the price and the term. For
+ example, for an instance that 11 months remaining in its term,
+ we can have a price schedule with an upfront price of $2.50.
+ At 8 months remaining we can drop the price down to $2.00.
+ This would be expressed as::
+
+ price_schedules=[('2.50', 11), ('2.00', 8)]
+
+ :type client_token: string
+ :param client_token: Unique, case-sensitive identifier you provide
+ to ensure idempotency of the request. Maximum 64 ASCII characters.
+
+ :rtype: list
+ :return: A list of
+ :class:`boto.ec2.reservedinstance.ReservedInstanceListing`
+
+ """
+ params = {
+ 'ReservedInstancesId': reserved_instances_id,
+ 'InstanceCount': str(instance_count),
+ 'ClientToken': client_token,
+ }
+ for i, schedule in enumerate(price_schedules):
+ price, term = schedule
+ params['PriceSchedules.%s.Price' % i] = str(price)
+ params['PriceSchedules.%s.Term' % i] = str(term)
+ return self.get_list('CreateReservedInstancesListing',
+ params, [('item', ReservedInstanceListing)], verb='POST')
+
+ def cancel_reserved_instances_listing(
+ self, reserved_instances_listing_ids=None):
+ """Cancels the specified Reserved Instance listing.
+
+ :type reserved_instances_listing_ids: List of strings
+ :param reserved_instances_listing_ids: The ID of the
+ Reserved Instance listing to be cancelled.
+
+ :rtype: list
+ :return: A list of
+ :class:`boto.ec2.reservedinstance.ReservedInstanceListing`
+
+ """
+ params = {}
+ if reserved_instances_listing_ids is not None:
+ self.build_list_params(params, reserved_instances_listing_ids,
+ 'ReservedInstancesListingId')
+ return self.get_list('CancelReservedInstancesListing',
+ params, [('item', ReservedInstanceListing)], verb='POST')
+
#
# Monitoring
#
@@ -2473,10 +3018,10 @@
user's image into Amazon S3.
"""
- params = {'InstanceId' : instance_id,
- 'Storage.S3.Bucket' : s3_bucket,
- 'Storage.S3.Prefix' : s3_prefix,
- 'Storage.S3.UploadPolicy' : s3_upload_policy}
+ params = {'InstanceId': instance_id,
+ 'Storage.S3.Bucket': s3_bucket,
+ 'Storage.S3.Prefix': s3_prefix,
+ 'Storage.S3.UploadPolicy': s3_upload_policy}
s3auth = boto.auth.get_auth_handler(None, boto.config,
self.provider, ['s3'])
params['Storage.S3.AWSAccessKeyId'] = self.aws_access_key_id
@@ -2522,7 +3067,7 @@
:param bundle_id: The identifier of the bundle task to cancel.
"""
- params = {'BundleId' : bundle_id}
+ params = {'BundleId': bundle_id}
return self.get_object('CancelBundleTask', params,
BundleInstanceTask, verb='POST')
@@ -2535,7 +3080,7 @@
password for.
"""
- params = {'InstanceId' : instance_id}
+ params = {'InstanceId': instance_id}
rs = self.get_object('GetPasswordData', params, ResultSet, verb='POST')
return rs.passwordData
@@ -2606,8 +3151,7 @@
# Tag methods
def build_tag_param_list(self, params, tags):
- keys = tags.keys()
- keys.sort()
+ keys = sorted(tags.keys())
i = 1
for key in keys:
value = tags[key]
@@ -2682,3 +3226,117 @@
self.build_tag_param_list(params, tags)
return self.get_status('DeleteTags', params, verb='POST')
+ # Network Interface methods
+
+ def get_all_network_interfaces(self, filters=None):
+ """
+ Retrieve all of the Elastic Network Interfaces (ENI's)
+ associated with your account.
+
+ :type filters: dict
+ :param filters: Optional filters that can be used to limit
+ the results returned. Filters are provided
+ in the form of a dictionary consisting of
+ filter names as the key and filter values
+ as the value. The set of allowable filter
+ names/values is dependent on the request
+ being performed. Check the EC2 API guide
+ for details.
+
+ :rtype: list
+ :return: A list of :class:`boto.ec2.networkinterface.NetworkInterface`
+ """
+ params = {}
+ if filters:
+ self.build_filter_params(params, filters)
+ return self.get_list('DescribeNetworkInterfaces', params,
+ [('item', NetworkInterface)], verb='POST')
+
+ def create_network_interface(self, subnet_id, private_ip_address=None,
+ description=None, groups=None):
+ """
+ Creates a network interface in the specified subnet.
+
+ :type subnet_id: str
+ :param subnet_id: The ID of the subnet to associate with the
+ network interface.
+
+ :type private_ip_address: str
+ :param private_ip_address: The private IP address of the
+ network interface. If not supplied, one will be chosen
+ for you.
+
+ :type description: str
+ :param description: The description of the network interface.
+
+ :type groups: list
+ :param groups: Lists the groups for use by the network interface.
+ This can be either a list of group ID's or a list of
+ :class:`boto.ec2.securitygroup.SecurityGroup` objects.
+
+ :rtype: :class:`boto.ec2.networkinterface.NetworkInterface`
+ :return: The newly created network interface.
+ """
+ params = {'SubnetId': subnet_id}
+ if private_ip_address:
+ params['PrivateIpAddress'] = private_ip_address
+ if description:
+ params['Description'] = description
+ if groups:
+ ids = []
+ for group in groups:
+ if isinstance(group, SecurityGroup):
+ ids.append(group.id)
+ else:
+ ids.append(group)
+ self.build_list_params(params, ids, 'SecurityGroupId')
+ return self.get_object('CreateNetworkInterface', params,
+ NetworkInterface, verb='POST')
+
+ def attach_network_interface(self, network_interface_id,
+ instance_id, device_index):
+ """
+ Attaches a network interface to an instance.
+
+ :type network_interface_id: str
+ :param network_interface_id: The ID of the network interface to attach.
+
+ :type instance_id: str
+ :param instance_id: The ID of the instance that will be attached
+ to the network interface.
+
+ :type device_index: int
+ :param device_index: The index of the device for the network
+ interface attachment on the instance.
+ """
+ params = {'NetworkInterfaceId': network_interface_id,
+ 'InstanceId': instance_id,
+ 'DeviceIndex': device_index}
+ return self.get_status('AttachNetworkInterface', params, verb='POST')
+
+ def detach_network_interface(self, attachement_id, force=False):
+ """
+ Detaches a network interface from an instance.
+
+ :type attachment_id: str
+ :param attachment_id: The ID of the attachment.
+
+ :type force: bool
+ :param force: Set to true to force a detachment.
+
+ """
+ params = {'AttachmentId': network_interface_id}
+ if force:
+ params['Force'] = 'true'
+ return self.get_status('DetachNetworkInterface', params, verb='POST')
+
+ def delete_network_interface(self, network_interface_id):
+ """
+ Delete the specified network interface.
+
+ :type network_interface_id: str
+ :param network_interface_id: The ID of the network interface to delete.
+
+ """
+ params = {'NetworkInterfaceId': network_interface_id}
+ return self.get_status('DeleteNetworkInterface', params, verb='POST')
diff --git a/boto/ec2/elb/__init__.py b/boto/ec2/elb/__init__.py
index def3631..9a5e324 100644
--- a/boto/ec2/elb/__init__.py
+++ b/boto/ec2/elb/__init__.py
@@ -32,16 +32,18 @@
import boto
RegionData = {
- 'us-east-1' : 'elasticloadbalancing.us-east-1.amazonaws.com',
- 'us-west-1' : 'elasticloadbalancing.us-west-1.amazonaws.com',
- 'us-west-2' : 'elasticloadbalancing.us-west-2.amazonaws.com',
- 'eu-west-1' : 'elasticloadbalancing.eu-west-1.amazonaws.com',
- 'ap-northeast-1' : 'elasticloadbalancing.ap-northeast-1.amazonaws.com',
- 'ap-southeast-1' : 'elasticloadbalancing.ap-southeast-1.amazonaws.com'}
+ 'us-east-1': 'elasticloadbalancing.us-east-1.amazonaws.com',
+ 'us-west-1': 'elasticloadbalancing.us-west-1.amazonaws.com',
+ 'us-west-2': 'elasticloadbalancing.us-west-2.amazonaws.com',
+ 'sa-east-1': 'elasticloadbalancing.sa-east-1.amazonaws.com',
+ 'eu-west-1': 'elasticloadbalancing.eu-west-1.amazonaws.com',
+ 'ap-northeast-1': 'elasticloadbalancing.ap-northeast-1.amazonaws.com',
+ 'ap-southeast-1': 'elasticloadbalancing.ap-southeast-1.amazonaws.com'}
+
def regions():
"""
- Get all available regions for the SDB service.
+ Get all available regions for the ELB service.
:rtype: list
:return: A list of :class:`boto.RegionInfo` instances
@@ -54,6 +56,7 @@
regions.append(region)
return regions
+
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
@@ -70,22 +73,24 @@
return region.connect(**kw_params)
return None
+
class ELBConnection(AWSQueryConnection):
- APIVersion = boto.config.get('Boto', 'elb_version', '2011-04-05')
+ APIVersion = boto.config.get('Boto', 'elb_version', '2011-11-15')
DefaultRegionName = boto.config.get('Boto', 'elb_region_name', 'us-east-1')
DefaultRegionEndpoint = boto.config.get('Boto', 'elb_region_endpoint',
- 'elasticloadbalancing.amazonaws.com')
+ 'elasticloadbalancing.us-east-1.amazonaws.com')
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=False, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
- https_connection_factory=None, region=None, path='/'):
+ https_connection_factory=None, region=None, path='/',
+ security_token=None, validate_certs=True):
"""
Init method to create a new connection to EC2 Load Balancing Service.
- B{Note:} The region argument is overridden by the region specified in
- the boto configuration file.
+ .. note:: The region argument is overridden by the region specified in
+ the boto configuration file.
"""
if not region:
region = RegionInfo(self, self.DefaultRegionName,
@@ -96,7 +101,9 @@
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
- https_connection_factory, path)
+ https_connection_factory, path,
+ security_token,
+ validate_certs=validate_certs)
def _required_auth_capability(self):
return ['ec2']
@@ -112,10 +119,11 @@
Retrieve all load balancers associated with your account.
:type load_balancer_names: list
- :param load_balancer_names: An optional list of load balancer names
+ :keyword load_balancer_names: An optional list of load balancer names.
- :rtype: list
- :return: A list of :class:`boto.ec2.elb.loadbalancer.LoadBalancer`
+ :rtype: :py:class:`boto.resultset.ResultSet`
+ :return: A ResultSet containing instances of
+ :class:`boto.ec2.elb.loadbalancer.LoadBalancer`
"""
params = {}
if load_balancer_names:
@@ -124,9 +132,14 @@
return self.get_list('DescribeLoadBalancers', params,
[('member', LoadBalancer)])
- def create_load_balancer(self, name, zones, listeners):
+ def create_load_balancer(self, name, zones, listeners, subnets=None,
+ security_groups=None, scheme='internet-facing'):
"""
- Create a new load balancer for your account.
+ Create a new load balancer for your account. By default the load
+ balancer will be created in EC2. To create a load balancer inside a
+ VPC, parameter zones must be set to None and subnets must not be None.
+ The load balancer will be automatically created under the VPC that
+ contains the subnet(s) specified.
:type name: string
:param name: The mnemonic name associated with the new load balancer
@@ -136,63 +149,96 @@
:type listeners: List of tuples
:param listeners: Each tuple contains three or four values,
- (LoadBalancerPortNumber, InstancePortNumber,
- Protocol, [SSLCertificateId])
- where LoadBalancerPortNumber and InstancePortNumber
- are integer values between 1 and 65535, Protocol is a
- string containing either 'TCP', 'HTTP' or 'HTTPS';
- SSLCertificateID is the ARN of a AWS AIM certificate,
- and must be specified when doing HTTPS.
+ (LoadBalancerPortNumber, InstancePortNumber, Protocol,
+ [SSLCertificateId]) where LoadBalancerPortNumber and
+ InstancePortNumber are integer values between 1 and 65535,
+ Protocol is a string containing either 'TCP', 'HTTP' or
+ 'HTTPS'; SSLCertificateID is the ARN of a AWS AIM
+ certificate, and must be specified when doing HTTPS.
+
+ :type subnets: list of strings
+ :param subnets: A list of subnet IDs in your VPC to attach to
+ your LoadBalancer.
+
+ :type security_groups: list of strings
+ :param security_groups: The security groups assigned to your
+ LoadBalancer within your VPC.
+
+ :type scheme: string
+ :param scheme: The type of a LoadBalancer. By default, Elastic
+ Load Balancing creates an internet-facing LoadBalancer with
+ a publicly resolvable DNS name, which resolves to public IP
+ addresses.
+
+ Specify the value internal for this option to create an
+ internal LoadBalancer with a DNS name that resolves to
+ private IP addresses.
+
+ This option is only available for LoadBalancers attached
+ to an Amazon VPC.
:rtype: :class:`boto.ec2.elb.loadbalancer.LoadBalancer`
:return: The newly created :class:`boto.ec2.elb.loadbalancer.LoadBalancer`
"""
- params = {'LoadBalancerName' : name}
+ params = {'LoadBalancerName': name}
for index, listener in enumerate(listeners):
i = index + 1
+ protocol = listener[2].upper()
params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0]
params['Listeners.member.%d.InstancePort' % i] = listener[1]
params['Listeners.member.%d.Protocol' % i] = listener[2]
- if listener[2]=='HTTPS':
+ if protocol == 'HTTPS' or protocol == 'SSL':
params['Listeners.member.%d.SSLCertificateId' % i] = listener[3]
- self.build_list_params(params, zones, 'AvailabilityZones.member.%d')
+ if zones:
+ self.build_list_params(params, zones, 'AvailabilityZones.member.%d')
+
+ if subnets:
+ self.build_list_params(params, subnets, 'Subnets.member.%d')
+
+ if security_groups:
+ self.build_list_params(params, security_groups,
+ 'SecurityGroups.member.%d')
+
load_balancer = self.get_object('CreateLoadBalancer',
params, LoadBalancer)
load_balancer.name = name
load_balancer.listeners = listeners
load_balancer.availability_zones = zones
+ load_balancer.subnets = subnets
+ load_balancer.security_groups = security_groups
return load_balancer
def create_load_balancer_listeners(self, name, listeners):
"""
- Creates a Listener (or group of listeners) for an existing Load Balancer
+ Creates a Listener (or group of listeners) for an existing
+ Load Balancer
:type name: string
:param name: The name of the load balancer to create the listeners for
:type listeners: List of tuples
:param listeners: Each tuple contains three values,
- (LoadBalancerPortNumber, InstancePortNumber, Protocol,
- [SSLCertificateId])
- where LoadBalancerPortNumber and InstancePortNumber are
- integer values between 1 and 65535, Protocol is a
- string containing either 'TCP', 'HTTP' or 'HTTPS';
- SSLCertificateID is the ARN of a AWS AIM certificate,
- and must be specified when doing HTTPS.
+ (LoadBalancerPortNumber, InstancePortNumber, Protocol,
+ [SSLCertificateId]) where LoadBalancerPortNumber and
+ InstancePortNumber are integer values between 1 and 65535,
+ Protocol is a string containing either 'TCP', 'HTTP',
+ 'HTTPS', or 'SSL'; SSLCertificateID is the ARN of a AWS
+ AIM certificate, and must be specified when doing HTTPS or
+ SSL.
:return: The status of the request
"""
- params = {'LoadBalancerName' : name}
+ params = {'LoadBalancerName': name}
for index, listener in enumerate(listeners):
i = index + 1
+ protocol = listener[2].upper()
params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0]
params['Listeners.member.%d.InstancePort' % i] = listener[1]
params['Listeners.member.%d.Protocol' % i] = listener[2]
- if listener[2]=='HTTPS':
+ if protocol == 'HTTPS' or protocol == 'SSL':
params['Listeners.member.%d.SSLCertificateId' % i] = listener[3]
return self.get_status('CreateLoadBalancerListeners', params)
-
def delete_load_balancer(self, name):
"""
Delete a Load Balancer from your account.
@@ -215,7 +261,7 @@
:return: The status of the request
"""
- params = {'LoadBalancerName' : name}
+ params = {'LoadBalancerName': name}
for index, port in enumerate(ports):
params['LoadBalancerPorts.member.%d' % (index + 1)] = port
return self.get_status('DeleteLoadBalancerListeners', params)
@@ -237,7 +283,7 @@
:return: An updated list of zones for this Load Balancer.
"""
- params = {'LoadBalancerName' : load_balancer_name}
+ params = {'LoadBalancerName': load_balancer_name}
self.build_list_params(params, zones_to_add,
'AvailabilityZones.member.%d')
return self.get_list('EnableAvailabilityZonesForLoadBalancer',
@@ -261,7 +307,7 @@
:return: An updated list of zones for this Load Balancer.
"""
- params = {'LoadBalancerName' : load_balancer_name}
+ params = {'LoadBalancerName': load_balancer_name}
self.build_list_params(params, zones_to_remove,
'AvailabilityZones.member.%d')
return self.get_list('DisableAvailabilityZonesForLoadBalancer',
@@ -281,7 +327,7 @@
:return: An updated list of instances for this Load Balancer.
"""
- params = {'LoadBalancerName' : load_balancer_name}
+ params = {'LoadBalancerName': load_balancer_name}
self.build_list_params(params, instances,
'Instances.member.%d.InstanceId')
return self.get_list('RegisterInstancesWithLoadBalancer',
@@ -301,7 +347,7 @@
:return: An updated list of instances for this Load Balancer.
"""
- params = {'LoadBalancerName' : load_balancer_name}
+ params = {'LoadBalancerName': load_balancer_name}
self.build_list_params(params, instances,
'Instances.member.%d.InstanceId')
return self.get_list('DeregisterInstancesFromLoadBalancer',
@@ -323,7 +369,7 @@
:return: list of state info for instances in this Load Balancer.
"""
- params = {'LoadBalancerName' : load_balancer_name}
+ params = {'LoadBalancerName': load_balancer_name}
if instances:
self.build_list_params(params, instances,
'Instances.member.%d.InstanceId')
@@ -344,12 +390,12 @@
:rtype: :class:`boto.ec2.elb.healthcheck.HealthCheck`
:return: The updated :class:`boto.ec2.elb.healthcheck.HealthCheck`
"""
- params = {'LoadBalancerName' : name,
- 'HealthCheck.Timeout' : health_check.timeout,
- 'HealthCheck.Target' : health_check.target,
- 'HealthCheck.Interval' : health_check.interval,
- 'HealthCheck.UnhealthyThreshold' : health_check.unhealthy_threshold,
- 'HealthCheck.HealthyThreshold' : health_check.healthy_threshold}
+ params = {'LoadBalancerName': name,
+ 'HealthCheck.Timeout': health_check.timeout,
+ 'HealthCheck.Target': health_check.target,
+ 'HealthCheck.Interval': health_check.interval,
+ 'HealthCheck.UnhealthyThreshold': health_check.unhealthy_threshold,
+ 'HealthCheck.HealthyThreshold': health_check.healthy_threshold}
return self.get_object('ConfigureHealthCheck', params, HealthCheck)
def set_lb_listener_SSL_certificate(self, lb_name, lb_port,
@@ -359,11 +405,9 @@
connections. The specified certificate replaces any prior certificate
that was used on the same LoadBalancer and port.
"""
- params = {
- 'LoadBalancerName' : lb_name,
- 'LoadBalancerPort' : lb_port,
- 'SSLCertificateId' : ssl_certificate_id,
- }
+ params = {'LoadBalancerName': lb_name,
+ 'LoadBalancerPort': lb_port,
+ 'SSLCertificateId': ssl_certificate_id}
return self.get_status('SetLoadBalancerListenerSSLCertificate', params)
def create_app_cookie_stickiness_policy(self, name, lb_name, policy_name):
@@ -382,11 +426,9 @@
If the application cookie is explicitly removed or expires, the session
stops being sticky until a new application cookie is issued.
"""
- params = {
- 'CookieName' : name,
- 'LoadBalancerName' : lb_name,
- 'PolicyName' : policy_name,
- }
+ params = {'CookieName': name,
+ 'LoadBalancerName': lb_name,
+ 'PolicyName': policy_name}
return self.get_status('CreateAppCookieStickinessPolicy', params)
def create_lb_cookie_stickiness_policy(self, cookie_expiration_period,
@@ -409,11 +451,9 @@
on the cookie expiration time, which is specified in the policy
configuration.
"""
- params = {
- 'CookieExpirationPeriod' : cookie_expiration_period,
- 'LoadBalancerName' : lb_name,
- 'PolicyName' : policy_name,
- }
+ params = {'CookieExpirationPeriod': cookie_expiration_period,
+ 'LoadBalancerName': lb_name,
+ 'PolicyName': policy_name}
return self.get_status('CreateLBCookieStickinessPolicy', params)
def delete_lb_policy(self, lb_name, policy_name):
@@ -421,10 +461,8 @@
Deletes a policy from the LoadBalancer. The specified policy must not
be enabled for any listeners.
"""
- params = {
- 'LoadBalancerName' : lb_name,
- 'PolicyName' : policy_name,
- }
+ params = {'LoadBalancerName': lb_name,
+ 'PolicyName': policy_name}
return self.get_status('DeleteLoadBalancerPolicy', params)
def set_lb_policies_of_listener(self, lb_name, lb_port, policies):
@@ -433,11 +471,71 @@
balancer. Currently only zero (0) or one (1) policy can be associated
with a listener.
"""
- params = {
- 'LoadBalancerName' : lb_name,
- 'LoadBalancerPort' : lb_port,
- }
+ params = {'LoadBalancerName': lb_name,
+ 'LoadBalancerPort': lb_port}
self.build_list_params(params, policies, 'PolicyNames.member.%d')
return self.get_status('SetLoadBalancerPoliciesOfListener', params)
+ def apply_security_groups_to_lb(self, name, security_groups):
+ """
+ Applies security groups to the load balancer.
+ Applying security groups that are already registered with the
+ Load Balancer has no effect.
+ :type name: string
+ :param name: The name of the Load Balancer
+
+ :type security_groups: List of strings
+ :param security_groups: The name of the security group(s) to add.
+
+ :rtype: List of strings
+ :return: An updated list of security groups for this Load Balancer.
+
+ """
+ params = {'LoadBalancerName': name}
+ self.build_list_params(params, security_groups,
+ 'SecurityGroups.member.%d')
+ return self.get_list('ApplySecurityGroupsToLoadBalancer',
+ params, None)
+
+ def attach_lb_to_subnets(self, name, subnets):
+ """
+ Attaches load balancer to one or more subnets.
+ Attaching subnets that are already registered with the
+ Load Balancer has no effect.
+
+ :type name: string
+ :param name: The name of the Load Balancer
+
+ :type subnets: List of strings
+ :param subnets: The name of the subnet(s) to add.
+
+ :rtype: List of strings
+ :return: An updated list of subnets for this Load Balancer.
+
+ """
+ params = {'LoadBalancerName': name}
+ self.build_list_params(params, subnets,
+ 'Subnets.member.%d')
+ return self.get_list('AttachLoadBalancerToSubnets',
+ params, None)
+
+ def detach_lb_from_subnets(self, name, subnets):
+ """
+ Detaches load balancer from one or more subnets.
+
+ :type name: string
+ :param name: The name of the Load Balancer
+
+ :type subnets: List of strings
+ :param subnets: The name of the subnet(s) to detach.
+
+ :rtype: List of strings
+ :return: An updated list of subnets for this Load Balancer.
+
+ """
+ params = {'LoadBalancerName': name}
+ self.build_list_params(params, subnets,
+ 'Subnets.member.%d')
+ return self.get_list('DettachLoadBalancerFromSubnets',
+ params, None)
diff --git a/boto/ec2/elb/healthcheck.py b/boto/ec2/elb/healthcheck.py
index 5b47d62..6661ea1 100644
--- a/boto/ec2/elb/healthcheck.py
+++ b/boto/ec2/elb/healthcheck.py
@@ -21,11 +21,22 @@
class HealthCheck(object):
"""
- Represents an EC2 Access Point Health Check
+ Represents an EC2 Access Point Health Check. See
+ :ref:`elb-configuring-a-health-check` for a walkthrough on configuring
+ load balancer health checks.
"""
-
def __init__(self, access_point=None, interval=30, target=None,
healthy_threshold=3, timeout=5, unhealthy_threshold=5):
+ """
+ :ivar str access_point: The name of the load balancer this
+ health check is associated with.
+ :ivar int interval: Specifies how many seconds there are between
+ health checks.
+ :ivar str target: Determines what to check on an instance. See the
+ Amazon HealthCheck_ documentation for possible Target values.
+
+ .. _HealthCheck: http://docs.amazonwebservices.com/ElasticLoadBalancing/latest/APIReference/API_HealthCheck.html
+ """
self.access_point = access_point
self.interval = interval
self.target = target
@@ -54,6 +65,15 @@
setattr(self, name, value)
def update(self):
+ """
+ In the case where you have accessed an existing health check on a
+ load balancer, this method applies this instance's health check
+ values to the load balancer it is attached to.
+
+ .. note:: This method will not do anything if the :py:attr:`access_point`
+ attribute isn't set, as is the case with a newly instantiated
+ HealthCheck instance.
+ """
if not self.access_point:
return
diff --git a/boto/ec2/elb/instancestate.py b/boto/ec2/elb/instancestate.py
index 4a9b0d4..37a4727 100644
--- a/boto/ec2/elb/instancestate.py
+++ b/boto/ec2/elb/instancestate.py
@@ -26,6 +26,17 @@
def __init__(self, load_balancer=None, description=None,
state=None, instance_id=None, reason_code=None):
+ """
+ :ivar boto.ec2.elb.loadbalancer.LoadBalancer load_balancer: The
+ load balancer this instance is registered to.
+ :ivar str description: A description of the instance.
+ :ivar str instance_id: The EC2 instance ID.
+ :ivar str reason_code: Provides information about the cause of
+ an OutOfService instance. Specifically, it indicates whether the
+ cause is Elastic Load Balancing or the instance behind the
+ LoadBalancer.
+ :ivar str state: Specifies the current state of the instance.
+ """
self.load_balancer = load_balancer
self.description = description
self.state = state
diff --git a/boto/ec2/elb/listelement.py b/boto/ec2/elb/listelement.py
index 5be4599..3529041 100644
--- a/boto/ec2/elb/listelement.py
+++ b/boto/ec2/elb/listelement.py
@@ -20,6 +20,10 @@
# IN THE SOFTWARE.
class ListElement(list):
+ """
+ A :py:class:`list` subclass that has some additional methods for interacting
+ with Amazon's XML API.
+ """
def startElement(self, name, attrs, connection):
pass
diff --git a/boto/ec2/elb/listener.py b/boto/ec2/elb/listener.py
index a8807c0..bbb49d0 100644
--- a/boto/ec2/elb/listener.py
+++ b/boto/ec2/elb/listener.py
@@ -19,6 +19,8 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
+from boto.ec2.elb.listelement import ListElement
+
class Listener(object):
"""
Represents an EC2 Load Balancer Listener tuple
@@ -31,6 +33,7 @@
self.instance_port = instance_port
self.protocol = protocol
self.ssl_certificate_id = ssl_certificate_id
+ self.policy_names = ListElement()
def __repr__(self):
r = "(%d, %d, '%s'" % (self.load_balancer_port, self.instance_port, self.protocol)
@@ -40,6 +43,8 @@
return r
def startElement(self, name, attrs, connection):
+ if name == 'PolicyNames':
+ return self.policy_names
return None
def endElement(self, name, value, connection):
diff --git a/boto/ec2/elb/loadbalancer.py b/boto/ec2/elb/loadbalancer.py
index df360ec..efb7151 100644
--- a/boto/ec2/elb/loadbalancer.py
+++ b/boto/ec2/elb/loadbalancer.py
@@ -1,4 +1,5 @@
-# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
@@ -27,12 +28,43 @@
from boto.ec2.instanceinfo import InstanceInfo
from boto.resultset import ResultSet
+
class LoadBalancer(object):
"""
- Represents an EC2 Load Balancer
+ Represents an EC2 Load Balancer.
"""
def __init__(self, connection=None, name=None, endpoints=None):
+ """
+ :ivar boto.ec2.elb.ELBConnection connection: The connection this load
+ balancer was instance was instantiated from.
+ :ivar list listeners: A list of tuples in the form of
+ ``(<Inbound port>, <Outbound port>, <Protocol>)``
+ :ivar boto.ec2.elb.healthcheck.HealthCheck health_check: The health
+ check policy for this load balancer.
+ :ivar boto.ec2.elb.policies.Policies policies: Cookie stickiness and
+ other policies.
+ :ivar str dns_name: The external DNS name for the balancer.
+ :ivar str created_time: A date+time string showing when the
+ load balancer was created.
+ :ivar list instances: A list of :py:class:`boto.ec2.instanceinfo.InstanceInfo`
+ instances, representing the EC2 instances this load balancer is
+ distributing requests to.
+ :ivar list availability_zones: The availability zones this balancer
+ covers.
+ :ivar str canonical_hosted_zone_name: Current CNAME for the balancer.
+ :ivar str canonical_hosted_zone_name_id: The Route 53 hosted zone
+ ID of this balancer. Needed when creating an Alias record in a
+ Route 53 hosted zone.
+ :ivar boto.ec2.elb.securitygroup.SecurityGroup source_security_group:
+ The security group that you can use as part of your inbound rules
+ for your load balancer back-end instances to disallow traffic
+ from sources other than your load balancer.
+ :ivar list subnets: A list of subnets this balancer is on.
+ :ivar list security_groups: A list of additional security groups that
+ have been applied.
+ :ivar str vpc_id: The ID of the VPC that this ELB resides within.
+ """
self.connection = connection
self.name = name
self.listeners = None
@@ -45,6 +77,10 @@
self.canonical_hosted_zone_name = None
self.canonical_hosted_zone_name_id = None
self.source_security_group = None
+ self.subnets = ListElement()
+ self.security_groups = ListElement()
+ self.vpc_id = None
+ self.scheme = None
def __repr__(self):
return 'LoadBalancer:%s' % self.name
@@ -67,6 +103,12 @@
elif name == 'SourceSecurityGroup':
self.source_security_group = SecurityGroup()
return self.source_security_group
+ elif name == 'Subnets':
+ return self.subnets
+ elif name == 'SecurityGroups':
+ return self.security_groups
+ elif name == 'VPCId':
+ pass
else:
return None
@@ -83,6 +125,10 @@
self.canonical_hosted_zone_name = value
elif name == 'CanonicalHostedZoneNameID':
self.canonical_hosted_zone_name_id = value
+ elif name == 'VPCId':
+ self.vpc_id = value
+ elif name == 'Scheme':
+ self.scheme = value
else:
setattr(self, name, value)
@@ -115,49 +161,70 @@
def register_instances(self, instances):
"""
- Add instances to this Load Balancer
- All instances must be in the same region as the Load Balancer.
- Adding endpoints that are already registered with the Load Balancer
- has no effect.
+ Adds instances to this load balancer. All instances must be in the same
+ region as the load balancer. Adding endpoints that are already
+ registered with the load balancer has no effect.
- :type zones: string or List of instance id's
- :param zones: The name of the endpoint(s) to add.
+ :param list instances: List of instance IDs (strings) that you'd like
+ to add to this load balancer.
"""
if isinstance(instances, str) or isinstance(instances, unicode):
instances = [instances]
- new_instances = self.connection.register_instances(self.name, instances)
+ new_instances = self.connection.register_instances(self.name,
+ instances)
self.instances = new_instances
def deregister_instances(self, instances):
"""
- Remove instances from this Load Balancer.
- Removing instances that are not registered with the Load Balancer
- has no effect.
+ Remove instances from this load balancer. Removing instances that are
+ not registered with the load balancer has no effect.
- :type zones: string or List of instance id's
- :param zones: The name of the endpoint(s) to add.
+ :param list instances: List of instance IDs (strings) that you'd like
+ to remove from this load balancer.
"""
if isinstance(instances, str) or isinstance(instances, unicode):
instances = [instances]
- new_instances = self.connection.deregister_instances(self.name, instances)
+ new_instances = self.connection.deregister_instances(self.name,
+ instances)
self.instances = new_instances
def delete(self):
"""
- Delete this load balancer
+ Delete this load balancer.
"""
return self.connection.delete_load_balancer(self.name)
def configure_health_check(self, health_check):
+ """
+ Configures the health check behavior for the instances behind this
+ load balancer. See :ref:`elb-configuring-a-health-check` for a
+ walkthrough.
+
+ :param boto.ec2.elb.healthcheck.HealthCheck health_check: A
+ HealthCheck instance that tells the load balancer how to check
+ its instances for health.
+ """
return self.connection.configure_health_check(self.name, health_check)
def get_instance_health(self, instances=None):
+ """
+ Returns a list of :py:class:`boto.ec2.elb.instancestate.InstanceState`
+ objects, which show the health of the instances attached to this
+ load balancer.
+
+ :rtype: list
+ :returns: A list of
+ :py:class:`InstanceState <boto.ec2.elb.instancestate.InstanceState>`
+ instances, representing the instances
+ attached to this load balancer.
+ """
return self.connection.describe_instance_health(self.name, instances)
def create_listeners(self, listeners):
- return self.connection.create_load_balancer_listeners(self.name, listeners)
+ return self.connection.create_load_balancer_listeners(self.name,
+ listeners)
def create_listener(self, inPort, outPort=None, proto="tcp"):
if outPort == None:
@@ -165,7 +232,8 @@
return self.create_listeners([(inPort, outPort, proto)])
def delete_listeners(self, listeners):
- return self.connection.delete_load_balancer_listeners(self.name, listeners)
+ return self.connection.delete_load_balancer_listeners(self.name,
+ listeners)
def delete_listener(self, inPort):
return self.delete_listeners([inPort])
@@ -178,14 +246,65 @@
return self.connection.delete_lb_policy(self.name, policy_name)
def set_policies_of_listener(self, lb_port, policies):
- return self.connection.set_lb_policies_of_listener(self.name, lb_port, policies)
+ return self.connection.set_lb_policies_of_listener(self.name,
+ lb_port,
+ policies)
- def create_cookie_stickiness_policy(self, cookie_expiration_period, policy_name):
+ def create_cookie_stickiness_policy(self, cookie_expiration_period,
+ policy_name):
return self.connection.create_lb_cookie_stickiness_policy(cookie_expiration_period, self.name, policy_name)
def create_app_cookie_stickiness_policy(self, name, policy_name):
- return self.connection.create_app_cookie_stickiness_policy(name, self.name, policy_name)
+ return self.connection.create_app_cookie_stickiness_policy(name,
+ self.name,
+ policy_name)
def set_listener_SSL_certificate(self, lb_port, ssl_certificate_id):
- return self.connection.set_lb_listener_SSL_certificate(self.name, lb_port, ssl_certificate_id)
+ return self.connection.set_lb_listener_SSL_certificate(self.name,
+ lb_port,
+ ssl_certificate_id)
+ def attach_subnets(self, subnets):
+ """
+ Attaches load balancer to one or more subnets.
+ Attaching subnets that are already registered with the
+ Load Balancer has no effect.
+
+ :type subnets: string or List of strings
+ :param subnets: The name of the subnet(s) to add.
+
+ """
+ if isinstance(subnets, str) or isinstance(subnets, unicode):
+ subnets = [subnets]
+ new_subnets = self.connection.attach_lb_to_subnets(self.name, subnets)
+ self.subnets = new_subnets
+
+ def detach_subnets(self, subnets):
+ """
+ Detaches load balancer from one or more subnets.
+
+ :type subnets: string or List of strings
+ :param subnets: The name of the subnet(s) to detach.
+
+ """
+ if isinstance(subnets, str) or isinstance(subnets, unicode):
+ subnets = [subnets]
+ new_subnets = self.connection.detach_lb_to_subnets(self.name, subnets)
+ self.subnets = new_subnets
+
+ def apply_security_groups(self, security_groups):
+ """
+ Applies security groups to the load balancer.
+ Applying security groups that are already registered with the
+ Load Balancer has no effect.
+
+ :type security_groups: string or List of strings
+ :param security_groups: The name of the security group(s) to add.
+
+ """
+ if isinstance(security_groups, str) or \
+ isinstance(security_groups, unicode):
+ security_groups = [security_groups]
+ new_sgs = self.connection.apply_security_groups_to_lb(
+ self.name, security_groups)
+ self.security_groups = new_sgs
diff --git a/boto/ec2/elb/policies.py b/boto/ec2/elb/policies.py
index 7bf5455..c25a51f 100644
--- a/boto/ec2/elb/policies.py
+++ b/boto/ec2/elb/policies.py
@@ -78,9 +78,11 @@
if name == 'AppCookieStickinessPolicies':
rs = ResultSet([('member', AppCookieStickinessPolicy)])
self.app_cookie_stickiness_policies = rs
+ return rs
elif name == 'LBCookieStickinessPolicies':
rs = ResultSet([('member', LBCookieStickinessPolicy)])
self.lb_cookie_stickiness_policies = rs
+ return rs
def endElement(self, name, value, connection):
return
diff --git a/boto/ec2/group.py b/boto/ec2/group.py
new file mode 100644
index 0000000..9e017b8
--- /dev/null
+++ b/boto/ec2/group.py
@@ -0,0 +1,39 @@
+# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+class Group:
+
+ def __init__(self, parent=None):
+ self.id = None
+ self.name = None
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'groupId':
+ self.id = value
+ elif name == 'groupName':
+ self.name = value
+ else:
+ setattr(self, name, value)
+
diff --git a/boto/ec2/image.py b/boto/ec2/image.py
index de1b5d2..f00e55a 100644
--- a/boto/ec2/image.py
+++ b/boto/ec2/image.py
@@ -160,7 +160,10 @@
disable_api_termination=False,
instance_initiated_shutdown_behavior=None,
private_ip_address=None,
- placement_group=None, security_group_ids=None):
+ placement_group=None, security_group_ids=None,
+ additional_info=None, instance_profile_name=None,
+ instance_profile_arn=None, tenancy=None):
+
"""
Runs this instance.
@@ -229,11 +232,30 @@
:param placement_group: If specified, this is the name of the placement
group in which the instance(s) will be launched.
- :rtype: Reservation
- :return: The :class:`boto.ec2.instance.Reservation` associated with the request for machines
+ :type additional_info: string
+ :param additional_info: Specifies additional information to make
+ available to the instance(s)
:type security_group_ids:
:param security_group_ids:
+
+ :type instance_profile_name: string
+ :param instance_profile_name: The name of an IAM instance profile to use.
+
+ :type instance_profile_arn: string
+ :param instance_profile_arn: The ARN of an IAM instance profile to use.
+
+ :type tenancy: string
+ :param tenancy: The tenancy of the instance you want to launch. An
+ instance with a tenancy of 'dedicated' runs on
+ single-tenant hardware and can only be launched into a
+ VPC. Valid values are: "default" or "dedicated".
+ NOTE: To use dedicated tenancy you MUST specify a VPC
+ subnet-ID as well.
+
+ :rtype: Reservation
+ :return: The :class:`boto.ec2.instance.Reservation` associated with the request for machines
+
"""
return self.connection.run_instances(self.id, min_count, max_count,
@@ -245,7 +267,11 @@
block_device_map, disable_api_termination,
instance_initiated_shutdown_behavior,
private_ip_address, placement_group,
- security_group_ids=security_group_ids)
+ security_group_ids=security_group_ids,
+ additional_info=additional_info,
+ instance_profile_name=instance_profile_name,
+ instance_profile_arn=instance_profile_arn,
+ tenancy=tenancy)
def deregister(self, delete_snapshot=False):
return self.connection.deregister_image(self.id, delete_snapshot)
@@ -300,17 +326,17 @@
if name == 'launchPermission':
self.name = 'launch_permission'
elif name == 'group':
- if self.attrs.has_key('groups'):
+ if 'groups' in self.attrs:
self.attrs['groups'].append(value)
else:
self.attrs['groups'] = [value]
elif name == 'userId':
- if self.attrs.has_key('user_ids'):
+ if 'user_ids' in self.attrs:
self.attrs['user_ids'].append(value)
else:
self.attrs['user_ids'] = [value]
elif name == 'productCode':
- if self.attrs.has_key('product_codes'):
+ if 'product_codes' in self.attrs:
self.attrs['product_codes'].append(value)
else:
self.attrs['product_codes'] = [value]
diff --git a/boto/ec2/instance.py b/boto/ec2/instance.py
index a397f95..7435788 100644
--- a/boto/ec2/instance.py
+++ b/boto/ec2/instance.py
@@ -1,5 +1,6 @@
-# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
@@ -15,7 +16,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -29,8 +30,86 @@
from boto.ec2.address import Address
from boto.ec2.blockdevicemapping import BlockDeviceMapping
from boto.ec2.image import ProductCodes
+from boto.ec2.networkinterface import NetworkInterface
+from boto.ec2.group import Group
import base64
+
+class InstanceState(object):
+ """
+ The state of the instance.
+
+ :ivar code: The low byte represents the state. The high byte is an
+ opaque internal value and should be ignored. Valid values:
+
+ * 0 (pending)
+ * 16 (running)
+ * 32 (shutting-down)
+ * 48 (terminated)
+ * 64 (stopping)
+ * 80 (stopped)
+
+ :ivar name: The name of the state of the instance. Valid values:
+
+ * "pending"
+ * "running"
+ * "shutting-down"
+ * "terminated"
+ * "stopping"
+ * "stopped"
+ """
+ def __init__(self, code=0, name=None):
+ self.code = code
+ self.name = name
+
+ def __repr__(self):
+ return '%s(%d)' % (self.name, self.code)
+
+ def startElement(self, name, attrs, connection):
+ pass
+
+ def endElement(self, name, value, connection):
+ if name == 'code':
+ self.code = int(value)
+ elif name == 'name':
+ self.name = value
+ else:
+ setattr(self, name, value)
+
+
+class InstancePlacement(object):
+ """
+ The location where the instance launched.
+
+ :ivar zone: The Availability Zone of the instance.
+ :ivar group_name: The name of the placement group the instance is
+ in (for cluster compute instances).
+ :ivar tenancy: The tenancy of the instance (if the instance is
+ running within a VPC). An instance with a tenancy of dedicated
+ runs on single-tenant hardware.
+ """
+ def __init__(self, zone=None, group_name=None, tenancy=None):
+ self.zone = zone
+ self.group_name = group_name
+ self.tenancy = tenancy
+
+ def __repr__(self):
+ return self.zone
+
+ def startElement(self, name, attrs, connection):
+ pass
+
+ def endElement(self, name, value, connection):
+ if name == 'availabilityZone':
+ self.zone = value
+ elif name == 'groupName':
+ self.group_name = value
+ elif name == 'tenancy':
+ self.tenancy = value
+ else:
+ setattr(self, name, value)
+
+
class Reservation(EC2Object):
"""
Represents a Reservation response object.
@@ -42,7 +121,6 @@
:ivar instances: A list of Instance objects launched in this
Reservation.
"""
-
def __init__(self, connection=None):
EC2Object.__init__(self, connection)
self.id = None
@@ -74,31 +152,77 @@
def stop_all(self):
for instance in self.instances:
instance.stop()
-
+
+
class Instance(TaggedEC2Object):
-
+ """
+ Represents an instance.
+
+ :ivar id: The unique ID of the Instance.
+ :ivar groups: A list of Group objects representing the security
+ groups associated with the instance.
+ :ivar public_dns_name: The public dns name of the instance.
+ :ivar private_dns_name: The private dns name of the instance.
+ :ivar state: The string representation of the instance's current state.
+ :ivar state_code: An integer representation of the instance's
+ current state.
+ :ivar previous_state: The string representation of the instance's
+ previous state.
+ :ivar previous_state_code: An integer representation of the
+ instance's current state.
+ :ivar key_name: The name of the SSH key associated with the instance.
+ :ivar instance_type: The type of instance (e.g. m1.small).
+ :ivar launch_time: The time the instance was launched.
+ :ivar image_id: The ID of the AMI used to launch this instance.
+ :ivar placement: The availability zone in which the instance is running.
+ :ivar placement_group: The name of the placement group the instance
+ is in (for cluster compute instances).
+ :ivar placement_tenancy: The tenancy of the instance, if the instance
+ is running within a VPC. An instance with a tenancy of dedicated
+ runs on a single-tenant hardware.
+ :ivar kernel: The kernel associated with the instance.
+ :ivar ramdisk: The ramdisk associated with the instance.
+ :ivar architecture: The architecture of the image (i386|x86_64).
+ :ivar hypervisor: The hypervisor used.
+ :ivar virtualization_type: The type of virtualization used.
+ :ivar product_codes: A list of product codes associated with this instance.
+ :ivar ami_launch_index: This instances position within it's launch group.
+ :ivar monitored: A boolean indicating whether monitoring is enabled or not.
+ :ivar spot_instance_request_id: The ID of the spot instance request
+ if this is a spot instance.
+ :ivar subnet_id: The VPC Subnet ID, if running in VPC.
+ :ivar vpc_id: The VPC ID, if running in VPC.
+ :ivar private_ip_address: The private IP address of the instance.
+ :ivar ip_address: The public IP address of the instance.
+ :ivar platform: Platform of the instance (e.g. Windows)
+ :ivar root_device_name: The name of the root device.
+ :ivar root_device_type: The root device type (ebs|instance-store).
+ :ivar block_device_mapping: The Block Device Mapping for the instance.
+ :ivar state_reason: The reason for the most recent state transition.
+ :ivar groups: List of security Groups associated with the instance.
+ :ivar interfaces: List of Elastic Network Interfaces associated with
+ this instance.
+ :ivar ebs_optimized: Whether instance is using optimized EBS volumes
+ or not.
+ :ivar instance_profile: A Python dict containing the instance
+ profile id and arn associated with this instance.
+ """
+
def __init__(self, connection=None):
TaggedEC2Object.__init__(self, connection)
self.id = None
self.dns_name = None
self.public_dns_name = None
self.private_dns_name = None
- self.state = None
- self.state_code = None
self.key_name = None
- self.shutdown_state = None
- self.previous_state = None
self.instance_type = None
- self.instance_class = None
self.launch_time = None
self.image_id = None
- self.placement = None
self.kernel = None
self.ramdisk = None
self.product_codes = ProductCodes()
self.ami_launch_index = None
self.monitored = False
- self.instance_class = None
self.spot_instance_request_id = None
self.subnet_id = None
self.vpc_id = None
@@ -113,11 +237,57 @@
self.state_reason = None
self.group_name = None
self.client_token = None
+ self.eventsSet = None
self.groups = []
+ self.platform = None
+ self.interfaces = []
+ self.hypervisor = None
+ self.virtualization_type = None
+ self.architecture = None
+ self.instance_profile = None
+ self._previous_state = None
+ self._state = InstanceState()
+ self._placement = InstancePlacement()
def __repr__(self):
return 'Instance:%s' % self.id
+ @property
+ def state(self):
+ return self._state.name
+
+ @property
+ def state_code(self):
+ return self._state.code
+
+ @property
+ def previous_state(self):
+ if self._previous_state:
+ return self._previous_state.name
+ return None
+
+ @property
+ def previous_state_code(self):
+ if self._previous_state:
+ return self._previous_state.code
+ return 0
+
+ @property
+ def state(self):
+ return self._state.name
+
+ @property
+ def placement(self):
+ return self._placement.zone
+
+ @property
+ def placement_group(self):
+ return self._placement.group_name
+
+ @property
+ def placement_tenancy(self):
+ return self._placement.tenancy
+
def startElement(self, name, attrs, connection):
retval = TaggedEC2Object.startElement(self, name, attrs, connection)
if retval is not None:
@@ -130,11 +300,28 @@
elif name == 'productCodes':
return self.product_codes
elif name == 'stateReason':
- self.state_reason = StateReason()
+ self.state_reason = SubParse('stateReason')
return self.state_reason
elif name == 'groupSet':
self.groups = ResultSet([('item', Group)])
return self.groups
+ elif name == "eventsSet":
+ self.eventsSet = SubParse('eventsSet')
+ return self.eventsSet
+ elif name == 'networkInterfaceSet':
+ self.interfaces = ResultSet([('item', NetworkInterface)])
+ elif name == 'iamInstanceProfile':
+ self.instance_profile = SubParse('iamInstanceProfile')
+ return self.instance_profile
+ elif name == 'currentState':
+ return self._state
+ elif name == 'previousState':
+ self._previous_state = InstanceState()
+ return self._previous_state
+ elif name == 'instanceState':
+ return self._state
+ elif name == 'placement':
+ return self._placement
return None
def endElement(self, name, value, connection):
@@ -151,8 +338,6 @@
self.key_name = value
elif name == 'amiLaunchIndex':
self.ami_launch_index = value
- elif name == 'shutdownState':
- self.shutdown_state = value
elif name == 'previousState':
self.previous_state = value
elif name == 'name':
@@ -165,18 +350,14 @@
self.state_code = value
elif name == 'instanceType':
self.instance_type = value
- elif name == 'instanceClass':
- self.instance_class = value
elif name == 'rootDeviceName':
self.root_device_name = value
elif name == 'rootDeviceType':
self.root_device_type = value
elif name == 'launchTime':
self.launch_time = value
- elif name == 'availabilityZone':
- self.placement = value
- elif name == 'placement':
- pass
+ elif name == 'platform':
+ self.platform = value
elif name == 'kernelId':
self.kernel = value
elif name == 'ramdiskId':
@@ -186,8 +367,6 @@
if value == 'enabled':
self.monitored = True
self._in_monitoring_element = False
- elif name == 'instanceClass':
- self.instance_class = value
elif name == 'spotInstanceRequestId':
self.spot_instance_request_id = value
elif name == 'subnetId':
@@ -210,6 +389,16 @@
self.group_name = value
elif name == 'clientToken':
self.client_token = value
+ elif name == "eventsSet":
+ self.events = value
+ elif name == 'hypervisor':
+ self.hypervisor = value
+ elif name == 'virtualizationType':
+ self.virtualization_type = value
+ elif name == 'architecture':
+ self.architecture = value
+ elif name == 'ebsOptimized':
+ self.ebs_optimized = (value == 'true')
else:
setattr(self, name, value)
@@ -252,7 +441,7 @@
:type force: bool
:param force: Forces the instance to stop
-
+
:rtype: list
:return: A list of the instances stopped
"""
@@ -300,11 +489,20 @@
:type attribute: string
:param attribute: The attribute you need information about
- Valid choices are:
- instanceType|kernel|ramdisk|userData|
- disableApiTermination|
- instanceInitiatedShutdownBehavior|
- rootDeviceName|blockDeviceMapping
+ Valid choices are:
+
+ * instanceType
+ * kernel
+ * ramdisk
+ * userData
+ * disableApiTermination
+ * instanceInitiatedShutdownBehavior
+ * rootDeviceName
+ * blockDeviceMapping
+ * productCodes
+ * sourceDestCheck
+ * groupSet
+ * ebsOptimized
:rtype: :class:`boto.ec2.image.InstanceAttribute`
:return: An InstanceAttribute object representing the value of the
@@ -318,14 +516,16 @@
:type attribute: string
:param attribute: The attribute you wish to change.
- AttributeName - Expected value (default)
- instanceType - A valid instance type (m1.small)
- kernel - Kernel ID (None)
- ramdisk - Ramdisk ID (None)
- userData - Base64 encoded String (None)
- disableApiTermination - Boolean (true)
- instanceInitiatedShutdownBehavior - stop|terminate
- rootDeviceName - device name (None)
+
+ * instanceType - A valid instance type (m1.small)
+ * kernel - Kernel ID (None)
+ * ramdisk - Ramdisk ID (None)
+ * userData - Base64 encoded String (None)
+ * disableApiTermination - Boolean (true)
+ * instanceInitiatedShutdownBehavior - stop|terminate
+ * sourceDestCheck - Boolean (true)
+ * groupSet - Set of Security Groups or IDs
+ * ebsOptimized - Boolean (false)
:type value: string
:param value: The new value for the attribute
@@ -349,23 +549,7 @@
"""
return self.connection.reset_instance_attribute(self.id, attribute)
-class Group:
- def __init__(self, parent=None):
- self.id = None
- self.name = None
-
- def startElement(self, name, attrs, connection):
- return None
-
- def endElement(self, name, value, connection):
- if name == 'groupId':
- self.id = value
- elif name == 'groupName':
- self.name = value
- else:
- setattr(self, name, value)
-
class ConsoleOutput:
def __init__(self, parent=None):
@@ -387,10 +571,12 @@
else:
setattr(self, name, value)
+
class InstanceAttribute(dict):
ValidValues = ['instanceType', 'kernel', 'ramdisk', 'userData',
- 'disableApiTermination', 'instanceInitiatedShutdownBehavior',
+ 'disableApiTermination',
+ 'instanceInitiatedShutdownBehavior',
'rootDeviceName', 'blockDeviceMapping', 'sourceDestCheck',
'groupSet']
@@ -416,19 +602,24 @@
elif name == 'requestId':
self.request_id = value
elif name == 'value':
+ if value == 'true':
+ value = True
+ elif value == 'false':
+ value = False
self._current_value = value
elif name in self.ValidValues:
self[name] = self._current_value
-class StateReason(dict):
- def __init__(self, parent=None):
+class SubParse(dict):
+
+ def __init__(self, section, parent=None):
dict.__init__(self)
+ self.section = section
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
- if name != 'stateReason':
+ if name != self.section:
self[name] = value
-
diff --git a/boto/ec2/instanceinfo.py b/boto/ec2/instanceinfo.py
index 6efbaed..623ba17 100644
--- a/boto/ec2/instanceinfo.py
+++ b/boto/ec2/instanceinfo.py
@@ -25,6 +25,10 @@
"""
def __init__(self, connection=None, id=None, state=None):
+ """
+ :ivar str id: The instance's EC2 ID.
+ :ivar str state: Specifies the current status of the instance.
+ """
self.connection = connection
self.id = id
self.state = state
diff --git a/boto/ec2/instancestatus.py b/boto/ec2/instancestatus.py
new file mode 100644
index 0000000..3a9b543
--- /dev/null
+++ b/boto/ec2/instancestatus.py
@@ -0,0 +1,207 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+class Details(dict):
+ """
+ A dict object that contains name/value pairs which provide
+ more detailed information about the status of the system
+ or the instance.
+ """
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'name':
+ self._name = value
+ elif name == 'status':
+ self[self._name] = value
+ else:
+ setattr(self, name, value)
+
+class Event(object):
+ """
+ A status event for an instance.
+
+ :ivar code: A string indicating the event type.
+ :ivar description: A string describing the reason for the event.
+ :ivar not_before: A datestring describing the earliest time for
+ the event.
+ :ivar not_after: A datestring describing the latest time for
+ the event.
+ """
+
+ def __init__(self, code=None, description=None,
+ not_before=None, not_after=None):
+ self.code = code
+ self.description = description
+ self.not_before = not_before
+ self.not_after = not_after
+
+ def __repr__(self):
+ return 'Event:%s' % self.code
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'code':
+ self.code = value
+ elif name == 'description':
+ self.description = value
+ elif name == 'notBefore':
+ self.not_before = value
+ elif name == 'notAfter':
+ self.not_after = value
+ else:
+ setattr(self, name, value)
+
+class Status(object):
+ """
+ A generic Status object used for system status and instance status.
+
+ :ivar status: A string indicating overall status.
+ :ivar details: A dict containing name-value pairs which provide
+ more details about the current status.
+ """
+
+ def __init__(self, status=None, details=None):
+ self.status = status
+ if not details:
+ details = Details()
+ self.details = details
+
+ def __repr__(self):
+ return 'Status:%s' % self.status
+
+ def startElement(self, name, attrs, connection):
+ if name == 'details':
+ return self.details
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'status':
+ self.status = value
+ else:
+ setattr(self, name, value)
+
+class EventSet(list):
+
+ def startElement(self, name, attrs, connection):
+ if name == 'item':
+ event = Event()
+ self.append(event)
+ return event
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ setattr(self, name, value)
+
+class InstanceStatus(object):
+ """
+ Represents an EC2 Instance status as reported by
+ DescribeInstanceStatus request.
+
+ :ivar id: The instance identifier.
+ :ivar zone: The availability zone of the instance.
+ :ivar events: A list of events relevant to the instance.
+ :ivar state_code: An integer representing the current state
+ of the instance.
+ :ivar state_name: A string describing the current state
+ of the instance.
+ :ivar system_status: A Status object that reports impaired
+ functionality that stems from issues related to the systems
+ that support an instance, such as such as hardware failures
+ and network connectivity problems.
+ :ivar instance_status: A Status object that reports impaired
+ functionality that arises from problems internal to the instance.
+ """
+
+ def __init__(self, id=None, zone=None, events=None,
+ state_code=None, state_name=None):
+ self.id = id
+ self.zone = zone
+ self.events = events
+ self.state_code = state_code
+ self.state_name = state_name
+ self.system_status = Status()
+ self.instance_status = Status()
+
+ def __repr__(self):
+ return 'InstanceStatus:%s' % self.id
+
+ def startElement(self, name, attrs, connection):
+ if name == 'eventsSet':
+ self.events = EventSet()
+ return self.events
+ elif name == 'systemStatus':
+ return self.system_status
+ elif name == 'instanceStatus':
+ return self.instance_status
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'instanceId':
+ self.id = value
+ elif name == 'availabilityZone':
+ self.zone = value
+ elif name == 'code':
+ self.state_code = int(value)
+ elif name == 'name':
+ self.state_name = value
+ else:
+ setattr(self, name, value)
+
+class InstanceStatusSet(list):
+ """
+ A list object that contains the results of a call to
+ DescribeInstanceStatus request. Each element of the
+ list will be an InstanceStatus object.
+
+ :ivar next_token: If the response was truncated by
+ the EC2 service, the next_token attribute of the
+ object will contain the string that needs to be
+ passed in to the next request to retrieve the next
+ set of results.
+ """
+
+ def __init__(self, connection=None):
+ list.__init__(self)
+ self.connection = connection
+ self.next_token = None
+
+ def startElement(self, name, attrs, connection):
+ if name == 'item':
+ status = InstanceStatus()
+ self.append(status)
+ return status
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'NextToken':
+ self.next_token = value
+ setattr(self, name, value)
+
diff --git a/boto/ec2/launchspecification.py b/boto/ec2/launchspecification.py
index a574a38..037a8b0 100644
--- a/boto/ec2/launchspecification.py
+++ b/boto/ec2/launchspecification.py
@@ -1,4 +1,5 @@
-# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
@@ -26,7 +27,9 @@
from boto.ec2.ec2object import EC2Object
from boto.resultset import ResultSet
from boto.ec2.blockdevicemapping import BlockDeviceMapping
-from boto.ec2.instance import Group
+from boto.ec2.group import Group
+from boto.ec2.instance import SubParse
+
class GroupList(list):
@@ -36,9 +39,10 @@
def endElement(self, name, value, connection):
if name == 'groupId':
self.append(value)
-
+
+
class LaunchSpecification(EC2Object):
-
+
def __init__(self, connection=None):
EC2Object.__init__(self, connection)
self.key_name = None
@@ -52,6 +56,8 @@
self.subnet_id = None
self._in_monitoring_element = False
self.block_device_mapping = None
+ self.instance_profile = None
+ self.ebs_optimized = False
def __repr__(self):
return 'LaunchSpecification(%s)' % self.image_id
@@ -65,6 +71,9 @@
elif name == 'blockDeviceMapping':
self.block_device_mapping = BlockDeviceMapping()
return self.block_device_mapping
+ elif name == 'iamInstanceProfile':
+ self.instance_profile = SubParse('iamInstanceProfile')
+ return self.instance_profile
else:
return None
@@ -90,7 +99,7 @@
if value == 'enabled':
self.monitored = True
self._in_monitoring_element = False
+ elif name == 'ebsOptimized':
+ self.ebs_optimized = (value == 'true')
else:
setattr(self, name, value)
-
-
diff --git a/boto/ec2/networkinterface.py b/boto/ec2/networkinterface.py
new file mode 100644
index 0000000..2658e3f
--- /dev/null
+++ b/boto/ec2/networkinterface.py
@@ -0,0 +1,163 @@
+# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Represents an EC2 Elastic Network Interface
+"""
+from boto.ec2.ec2object import TaggedEC2Object
+from boto.resultset import ResultSet
+from boto.ec2.group import Group
+
+class Attachment(object):
+ """
+ :ivar id: The ID of the attachment.
+ :ivar instance_id: The ID of the instance.
+ :ivar device_index: The index of this device.
+ :ivar status: The status of the device.
+ :ivar attach_time: The time the device was attached.
+ :ivar delete_on_termination: Whether the device will be deleted
+ when the instance is terminated.
+ """
+
+ def __init__(self):
+ self.id = None
+ self.instance_id = None
+ self.instance_owner_id = None
+ self.device_index = 0
+ self.status = None
+ self.attach_time = None
+ self.delete_on_termination = False
+
+ def __repr__(self):
+ return 'Attachment:%s' % self.id
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'attachmentId':
+ self.id = value
+ elif name == 'instanceId':
+ self.instance_id = value
+ elif name == 'instanceOwnerId':
+ self.instance_owner_id = value
+ elif name == 'status':
+ self.status = value
+ elif name == 'attachTime':
+ self.attach_time = value
+ elif name == 'deleteOnTermination':
+ if value.lower() == 'true':
+ self.delete_on_termination = True
+ else:
+ self.delete_on_termination = False
+ else:
+ setattr(self, name, value)
+
+class NetworkInterface(TaggedEC2Object):
+ """
+ An Elastic Network Interface.
+
+ :ivar id: The ID of the ENI.
+ :ivar subnet_id: The ID of the VPC subnet.
+ :ivar vpc_id: The ID of the VPC.
+ :ivar description: The description.
+ :ivar owner_id: The ID of the owner of the ENI.
+ :ivar requester_managed:
+ :ivar status: The interface's status (available|in-use).
+ :ivar mac_address: The MAC address of the interface.
+ :ivar private_ip_address: The IP address of the interface within
+ the subnet.
+ :ivar source_dest_check: Flag to indicate whether to validate
+ network traffic to or from this network interface.
+ :ivar groups: List of security groups associated with the interface.
+ :ivar attachment: The attachment object.
+ """
+
+ def __init__(self, connection=None):
+ TaggedEC2Object.__init__(self, connection)
+ self.id = None
+ self.subnet_id = None
+ self.vpc_id = None
+ self.availability_zone = None
+ self.description = None
+ self.owner_id = None
+ self.requester_managed = False
+ self.status = None
+ self.mac_address = None
+ self.private_ip_address = None
+ self.source_dest_check = None
+ self.groups = []
+ self.attachment = None
+
+ def __repr__(self):
+ return 'NetworkInterface:%s' % self.id
+
+ def startElement(self, name, attrs, connection):
+ retval = TaggedEC2Object.startElement(self, name, attrs, connection)
+ if retval is not None:
+ return retval
+ if name == 'groupSet':
+ self.groups = ResultSet([('item', Group)])
+ return self.groups
+ elif name == 'attachment':
+ self.attachment = Attachment()
+ return self.attachment
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'networkInterfaceId':
+ self.id = value
+ elif name == 'subnetId':
+ self.subnet_id = value
+ elif name == 'vpcId':
+ self.vpc_id = value
+ elif name == 'availabilityZone':
+ self.availability_zone = value
+ elif name == 'description':
+ self.description = value
+ elif name == 'ownerId':
+ self.owner_id = value
+ elif name == 'requesterManaged':
+ if value.lower() == 'true':
+ self.requester_managed = True
+ else:
+ self.requester_managed = False
+ elif name == 'status':
+ self.status = value
+ elif name == 'macAddress':
+ self.mac_address = value
+ elif name == 'privateIpAddress':
+ self.private_ip_address = value
+ elif name == 'sourceDestCheck':
+ if value.lower() == 'true':
+ self.source_dest_check = True
+ else:
+ self.source_dest_check = False
+ else:
+ setattr(self, name, value)
+
+ def delete(self):
+ return self.connection.delete_network_interface(self.id)
+
+
+
+
diff --git a/boto/ec2/reservedinstance.py b/boto/ec2/reservedinstance.py
index 1d35c1d..e71c1ad 100644
--- a/boto/ec2/reservedinstance.py
+++ b/boto/ec2/reservedinstance.py
@@ -14,18 +14,22 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
+from boto.resultset import ResultSet
from boto.ec2.ec2object import EC2Object
+
class ReservedInstancesOffering(EC2Object):
-
+
def __init__(self, connection=None, id=None, instance_type=None,
availability_zone=None, duration=None, fixed_price=None,
- usage_price=None, description=None):
+ usage_price=None, description=None, instance_tenancy=None,
+ currency_code=None, offering_type=None,
+ recurring_charges=None, pricing_details=None):
EC2Object.__init__(self, connection)
self.id = id
self.instance_type = instance_type
@@ -34,11 +38,22 @@
self.fixed_price = fixed_price
self.usage_price = usage_price
self.description = description
+ self.instance_tenancy = instance_tenancy
+ self.currency_code = currency_code
+ self.offering_type = offering_type
+ self.recurring_charges = recurring_charges
+ self.pricing_details = pricing_details
def __repr__(self):
return 'ReservedInstanceOffering:%s' % self.id
def startElement(self, name, attrs, connection):
+ if name == 'recurringCharges':
+ self.recurring_charges = ResultSet([('item', RecurringCharge)])
+ return self.recurring_charges
+ elif name == 'pricingDetailsSet':
+ self.pricing_details = ResultSet([('item', PricingDetail)])
+ return self.pricing_details
return None
def endElement(self, name, value, connection):
@@ -49,15 +64,21 @@
elif name == 'availabilityZone':
self.availability_zone = value
elif name == 'duration':
- self.duration = value
+ self.duration = int(value)
elif name == 'fixedPrice':
self.fixed_price = value
elif name == 'usagePrice':
self.usage_price = value
elif name == 'productDescription':
self.description = value
- else:
- setattr(self, name, value)
+ elif name == 'instanceTenancy':
+ self.instance_tenancy = value
+ elif name == 'currencyCode':
+ self.currency_code = value
+ elif name == 'offeringType':
+ self.offering_type = value
+ elif name == 'marketplace':
+ self.marketplace = True if value == 'true' else False
def describe(self):
print 'ID=%s' % self.id
@@ -71,6 +92,31 @@
def purchase(self, instance_count=1):
return self.connection.purchase_reserved_instance_offering(self.id, instance_count)
+
+class RecurringCharge(object):
+ def __init__(self, connection=None, frequency=None, amount=None):
+ self.frequency = frequency
+ self.amount = amount
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ setattr(self, name, value)
+
+
+class PricingDetail(object):
+ def __init__(self, connection=None, price=None, count=None):
+ self.price = price
+ self.count = count
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ setattr(self, name, value)
+
+
class ReservedInstance(ReservedInstancesOffering):
def __init__(self, connection=None, id=None, instance_type=None,
@@ -95,3 +141,84 @@
self.state = value
else:
ReservedInstancesOffering.endElement(self, name, value, connection)
+
+
+class ReservedInstanceListing(EC2Object):
+ def __init__(self, connection=None, listing_id=None, id=None,
+ create_date=None, update_date=None,
+ status=None, status_message=None, client_token=None):
+ self.connection = connection
+ self.listing_id = listing_id
+ self.id = id
+ self.create_date = create_date
+ self.update_date = update_date
+ self.status = status
+ self.status_message = status_message
+ self.client_token = client_token
+
+ def startElement(self, name, attrs, connection):
+ if name == 'instanceCounts':
+ self.instance_counts = ResultSet([('item', InstanceCount)])
+ return self.instance_counts
+ elif name == 'priceSchedules':
+ self.price_schedules = ResultSet([('item', PriceSchedule)])
+ return self.price_schedules
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'reservedInstancesListingId':
+ self.listing_id = value
+ elif name == 'reservedInstancesId':
+ self.id = value
+ elif name == 'createDate':
+ self.create_date = value
+ elif name == 'updateDate':
+ self.update_date = value
+ elif name == 'status':
+ self.status = value
+ elif name == 'statusMessage':
+ self.status_message = value
+ else:
+ setattr(self, name, value)
+
+
+class InstanceCount(object):
+ def __init__(self, connection=None, state=None, instance_count=None):
+ self.state = state
+ self.instance_count = instance_count
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'state':
+ self.state = value
+ elif name == 'instanceCount':
+ self.instance_count = int(value)
+ else:
+ setattr(self, name, value)
+
+
+class PriceSchedule(object):
+ def __init__(self, connection=None, term=None, price=None,
+ currency_code=None, active=None):
+ self.connection = connection
+ self.term = term
+ self.price = price
+ self.currency_code = currency_code
+ self.active = active
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'term':
+ self.term = int(value)
+ elif name == 'price':
+ self.price = value
+ elif name == 'currencyCode':
+ self.currency_code = value
+ elif name == 'active':
+ self.active = True if value == 'true' else False
+ else:
+ setattr(self, name, value)
diff --git a/boto/ec2/securitygroup.py b/boto/ec2/securitygroup.py
index af7811b..1b3c0ad 100644
--- a/boto/ec2/securitygroup.py
+++ b/boto/ec2/securitygroup.py
@@ -15,7 +15,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -27,7 +27,7 @@
from boto.exception import BotoClientError
class SecurityGroup(TaggedEC2Object):
-
+
def __init__(self, connection=None, owner_id=None,
name=None, description=None, id=None):
TaggedEC2Object.__init__(self, connection)
@@ -74,7 +74,7 @@
else:
raise Exception(
'Unexpected value of status %s for group %s'%(
- value,
+ value,
self.name
)
)
@@ -82,10 +82,13 @@
setattr(self, name, value)
def delete(self):
- return self.connection.delete_security_group(self.name)
+ if self.vpc_id:
+ return self.connection.delete_security_group(group_id=self.id)
+ else:
+ return self.connection.delete_security_group(self.name)
def add_rule(self, ip_protocol, from_port, to_port,
- src_group_name, src_group_owner_id, cidr_ip):
+ src_group_name, src_group_owner_id, cidr_ip, src_group_group_id):
"""
Add a rule to the SecurityGroup object. Note that this method
only changes the local version of the object. No information
@@ -96,10 +99,10 @@
rule.from_port = from_port
rule.to_port = to_port
self.rules.append(rule)
- rule.add_grant(src_group_name, src_group_owner_id, cidr_ip)
+ rule.add_grant(src_group_name, src_group_owner_id, cidr_ip, src_group_group_id)
def remove_rule(self, ip_protocol, from_port, to_port,
- src_group_name, src_group_owner_id, cidr_ip):
+ src_group_name, src_group_owner_id, cidr_ip, src_group_group_id):
"""
Remove a rule to the SecurityGroup object. Note that this method
only changes the local version of the object. No information
@@ -113,7 +116,7 @@
target_rule = rule
target_grant = None
for grant in rule.grants:
- if grant.name == src_group_name:
+ if grant.name == src_group_name or grant.group_id == src_group_group_id:
if grant.owner_id == src_group_owner_id:
if grant.cidr_ip == cidr_ip:
target_grant = grant
@@ -130,7 +133,7 @@
OR ip_protocol, from_port, to_port,
and cidr_ip. In other words, either you are authorizing another
group or you are authorizing some ip-based rule.
-
+
:type ip_protocol: string
:param ip_protocol: Either tcp | udp | icmp
@@ -140,55 +143,86 @@
:type to_port: int
:param to_port: The ending port number you are enabling
- :type cidr_ip: string
+ :type cidr_ip: string or list of strings
:param cidr_ip: The CIDR block you are providing access to.
See http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing
:type src_group: :class:`boto.ec2.securitygroup.SecurityGroup` or
:class:`boto.ec2.securitygroup.GroupOrCIDR`
:param src_group: The Security Group you are granting access to.
-
+
:rtype: bool
:return: True if successful.
"""
+ group_name = None
+ if not self.vpc_id:
+ group_name = self.name
+ group_id = None
+ if self.vpc_id:
+ group_id = self.id
+ src_group_name = None
+ src_group_owner_id = None
+ src_group_group_id = None
if src_group:
cidr_ip = None
- src_group_name = src_group.name
src_group_owner_id = src_group.owner_id
- else:
- src_group_name = None
- src_group_owner_id = None
- status = self.connection.authorize_security_group(self.name,
+ if not self.vpc_id:
+ src_group_name = src_group.name
+ else:
+ if hasattr(src_group, 'group_id'):
+ src_group_group_id = src_group.group_id
+ else:
+ src_group_group_id = src_group.id
+ status = self.connection.authorize_security_group(group_name,
src_group_name,
src_group_owner_id,
ip_protocol,
from_port,
to_port,
- cidr_ip)
+ cidr_ip,
+ group_id,
+ src_group_group_id)
if status:
- self.add_rule(ip_protocol, from_port, to_port, src_group_name,
- src_group_owner_id, cidr_ip)
+ if not isinstance(cidr_ip, list):
+ cidr_ip = [cidr_ip]
+ for single_cidr_ip in cidr_ip:
+ self.add_rule(ip_protocol, from_port, to_port, src_group_name,
+ src_group_owner_id, single_cidr_ip, src_group_group_id)
return status
def revoke(self, ip_protocol=None, from_port=None, to_port=None,
cidr_ip=None, src_group=None):
+ group_name = None
+ if not self.vpc_id:
+ group_name = self.name
+ group_id = None
+ if self.vpc_id:
+ group_id = self.id
+ src_group_name = None
+ src_group_owner_id = None
+ src_group_group_id = None
if src_group:
- cidr_ip=None
- src_group_name = src_group.name
+ cidr_ip = None
src_group_owner_id = src_group.owner_id
- else:
- src_group_name = None
- src_group_owner_id = None
- status = self.connection.revoke_security_group(self.name,
+ if not self.vpc_id:
+ src_group_name = src_group.name
+ else:
+ if hasattr(src_group, 'group_id'):
+ src_group_group_id = src_group.group_id
+ else:
+ src_group_group_id = src_group.id
+ status = self.connection.revoke_security_group(group_name,
src_group_name,
src_group_owner_id,
ip_protocol,
from_port,
to_port,
- cidr_ip)
+ cidr_ip,
+ group_id,
+ src_group_group_id)
if status:
self.remove_rule(ip_protocol, from_port, to_port, src_group_name,
- src_group_owner_id, cidr_ip)
+ src_group_owner_id, cidr_ip, src_group_group_id)
return status
def copy_to_region(self, region, name=None):
@@ -204,7 +238,7 @@
:type name: string
:param name: The name of the copy. If not supplied, the copy
will have the same name as this security group.
-
+
:rtype: :class:`boto.ec2.securitygroup.SecurityGroup`
:return: The new security group.
"""
@@ -215,11 +249,11 @@
sg = rconn.create_security_group(name or self.name, self.description)
source_groups = []
for rule in self.rules:
- grant = rule.grants[0]
for grant in rule.grants:
- if grant.name:
- if grant.name not in source_groups:
- source_groups.append(grant.name)
+ grant_nom = grant.name or grant.group_id
+ if grant_nom:
+ if grant_nom not in source_groups:
+ source_groups.append(grant_nom)
sg.authorize(None, None, None, None, grant)
else:
sg.authorize(rule.ip_protocol, rule.from_port, rule.to_port,
@@ -245,7 +279,7 @@
return instances
class IPPermissionsList(list):
-
+
def startElement(self, name, attrs, connection):
if name == 'item':
self.append(IPPermissions(self))
@@ -254,7 +288,7 @@
def endElement(self, name, value, connection):
pass
-
+
class IPPermissions(object):
def __init__(self, parent=None):
@@ -284,9 +318,10 @@
else:
setattr(self, name, value)
- def add_grant(self, name=None, owner_id=None, cidr_ip=None):
+ def add_grant(self, name=None, owner_id=None, cidr_ip=None, group_id=None):
grant = GroupOrCIDR(self)
grant.owner_id = owner_id
+ grant.group_id = group_id
grant.name = name
grant.cidr_ip = cidr_ip
self.grants.append(grant)
@@ -296,6 +331,7 @@
def __init__(self, parent=None):
self.owner_id = None
+ self.group_id = None
self.name = None
self.cidr_ip = None
@@ -303,7 +339,7 @@
if self.cidr_ip:
return '%s' % self.cidr_ip
else:
- return '%s-%s' % (self.name, self.owner_id)
+ return '%s-%s' % (self.name or self.group_id, self.owner_id)
def startElement(self, name, attrs, connection):
return None
@@ -311,10 +347,11 @@
def endElement(self, name, value, connection):
if name == 'userId':
self.owner_id = value
+ elif name == 'groupId':
+ self.group_id = value
elif name == 'groupName':
self.name = value
if name == 'cidrIp':
self.cidr_ip = value
else:
setattr(self, name, value)
-
diff --git a/boto/ec2/snapshot.py b/boto/ec2/snapshot.py
index d52abe4..d2c4b2b 100644
--- a/boto/ec2/snapshot.py
+++ b/boto/ec2/snapshot.py
@@ -24,6 +24,7 @@
Represents an EC2 Elastic Block Store Snapshot
"""
from boto.ec2.ec2object import TaggedEC2Object
+from boto.ec2.zone import Zone
class Snapshot(TaggedEC2Object):
@@ -37,6 +38,7 @@
self.progress = None
self.start_time = None
self.owner_id = None
+ self.owner_alias = None
self.volume_size = None
self.description = None
@@ -54,6 +56,8 @@
self.start_time = value
elif name == 'ownerId':
self.owner_id = value
+ elif name == 'ownerAlias':
+ self.owner_alias = value
elif name == 'volumeSize':
try:
self.volume_size = int(value)
@@ -111,6 +115,30 @@
return self.connection.reset_snapshot_attribute(self.id,
self.AttrName)
+ def create_volume(self, zone, size=None, volume_type=None, iops=None):
+ """
+ Create a new EBS Volume from this Snapshot
+
+ :type zone: string or :class:`boto.ec2.zone.Zone`
+ :param zone: The availability zone in which the Volume will be created.
+
+ :type size: int
+ :param size: The size of the new volume, in GiB. (optional). Defaults to
+ the size of the snapshot.
+
+ :type volume_type: string
+ :param volume_type: The type of the volume. (optional). Valid
+ values are: standard | io1.
+
+ :type iops: int
+ :param iops: The provisioned IOPs you want to associate with
+ this volume. (optional)
+ """
+ if isinstance(zone, Zone):
+ zone = zone.name
+ return self.connection.create_volume(size, zone, self.id, volume_type, iops)
+
+
class SnapshotAttribute:
def __init__(self, parent=None):
@@ -124,12 +152,12 @@
if name == 'createVolumePermission':
self.name = 'create_volume_permission'
elif name == 'group':
- if self.attrs.has_key('groups'):
+ if 'groups' in self.attrs:
self.attrs['groups'].append(value)
else:
self.attrs['groups'] = [value]
elif name == 'userId':
- if self.attrs.has_key('user_ids'):
+ if 'user_ids' in self.attrs:
self.attrs['user_ids'].append(value)
else:
self.attrs['user_ids'] = [value]
diff --git a/boto/ec2/spotinstancerequest.py b/boto/ec2/spotinstancerequest.py
index 06acb0f..a3562ac 100644
--- a/boto/ec2/spotinstancerequest.py
+++ b/boto/ec2/spotinstancerequest.py
@@ -15,7 +15,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -27,6 +27,7 @@
from boto.ec2.ec2object import TaggedEC2Object
from boto.ec2.launchspecification import LaunchSpecification
+
class SpotInstanceStateFault(object):
def __init__(self, code=None, message=None):
@@ -46,8 +47,9 @@
self.message = value
setattr(self, name, value)
+
class SpotInstanceRequest(TaggedEC2Object):
-
+
def __init__(self, connection=None):
TaggedEC2Object.__init__(self, connection)
self.id = None
@@ -58,6 +60,7 @@
self.valid_from = None
self.valid_until = None
self.launch_group = None
+ self.launched_availability_zone = None
self.product_description = None
self.availability_zone_group = None
self.create_time = None
@@ -89,8 +92,6 @@
self.type = value
elif name == 'state':
self.state = value
- elif name == 'productDescription':
- self.product_description = value
elif name == 'validFrom':
self.valid_from = value
elif name == 'validUntil':
@@ -99,15 +100,16 @@
self.launch_group = value
elif name == 'availabilityZoneGroup':
self.availability_zone_group = value
- elif name == 'createTime':
- self.create_time = value
+ elif name == 'launchedAvailabilityZone':
+ self.launched_availability_zone = value
elif name == 'instanceId':
self.instance_id = value
+ elif name == 'createTime':
+ self.create_time = value
+ elif name == 'productDescription':
+ self.product_description = value
else:
setattr(self, name, value)
def cancel(self):
self.connection.cancel_spot_instance_requests([self.id])
-
-
-
diff --git a/boto/ec2/volume.py b/boto/ec2/volume.py
index 57f2cb1..bc5befc 100644
--- a/boto/ec2/volume.py
+++ b/boto/ec2/volume.py
@@ -1,5 +1,6 @@
-# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
@@ -15,7 +16,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -23,10 +24,28 @@
"""
Represents an EC2 Elastic Block Storage Volume
"""
+from boto.resultset import ResultSet
+from boto.ec2.tag import Tag
from boto.ec2.ec2object import TaggedEC2Object
+
class Volume(TaggedEC2Object):
-
+ """
+ Represents an EBS volume.
+
+ :ivar id: The unique ID of the volume.
+ :ivar create_time: The timestamp of when the volume was created.
+ :ivar status: The status of the volume.
+ :ivar size: The size (in GB) of the volume.
+ :ivar snapshot_id: The ID of the snapshot this volume was created
+ from, if applicable.
+ :ivar attach_data: An AttachmentSet object.
+ :ivar zone: The availability zone this volume is in.
+ :ivar type: The type of volume (standard or consistent-iops)
+ :ivar iops: If this volume is of type consistent-iops, this is
+ the number of IOPS provisioned (10-300).
+ """
+
def __init__(self, connection=None):
TaggedEC2Object.__init__(self, connection)
self.id = None
@@ -36,6 +55,8 @@
self.snapshot_id = None
self.attach_data = None
self.zone = None
+ self.type = None
+ self.iops = None
def __repr__(self):
return 'Volume:%s' % self.id
@@ -48,7 +69,7 @@
self.attach_data = AttachmentSet()
return self.attach_data
elif name == 'tagSet':
- self.tags = boto.resultset.ResultSet([('item', Tag)])
+ self.tags = ResultSet([('item', Tag)])
return self.tags
else:
return None
@@ -67,6 +88,10 @@
self.snapshot_id = value
elif name == 'availabilityZone':
self.zone = value
+ elif name == 'volumeType':
+ self.type = value
+ elif name == 'iops':
+ self.iops = int(value)
else:
setattr(self, name, value)
@@ -84,7 +109,9 @@
raise a ValueError exception if no data is
returned from EC2.
"""
- rs = self.connection.get_all_volumes([self.id])
+ # Check the resultset since Eucalyptus ignores the volumeId param
+ unfiltered_rs = self.connection.get_all_volumes([self.id])
+ rs = [x for x in unfiltered_rs if x.id == self.id]
if len(rs) > 0:
self._update(rs[0])
elif validate:
@@ -122,13 +149,14 @@
Detach this EBS volume from an EC2 instance.
:type force: bool
- :param force: Forces detachment if the previous detachment attempt did
- not occur cleanly. This option can lead to data loss or
- a corrupted file system. Use this option only as a last
- resort to detach a volume from a failed instance. The
- instance will not have an opportunity to flush file system
- caches nor file system meta data. If you use this option,
- you must perform file system check and repair procedures.
+ :param force: Forces detachment if the previous detachment
+ attempt did not occur cleanly. This option can lead to
+ data loss or a corrupted file system. Use this option only
+ as a last resort to detach a volume from a failed
+ instance. The instance will not have an opportunity to
+ flush file system caches nor file system meta data. If you
+ use this option, you must perform file system check and
+ repair procedures.
:rtype: bool
:return: True if successful
@@ -139,17 +167,19 @@
device = None
if self.attach_data:
device = self.attach_data.device
- return self.connection.detach_volume(self.id, instance_id, device, force)
+ return self.connection.detach_volume(self.id, instance_id,
+ device, force)
def create_snapshot(self, description=None):
"""
Create a snapshot of this EBS Volume.
:type description: str
- :param description: A description of the snapshot. Limited to 256 characters.
-
- :rtype: bool
- :return: True if successful
+ :param description: A description of the snapshot.
+ Limited to 256 characters.
+
+ :rtype: :class:`boto.ec2.snapshot.Snapshot`
+ :return: The created Snapshot object
"""
return self.connection.create_snapshot(self.id, description)
@@ -176,17 +206,20 @@
those for this volume.
:type owner: str
- :param owner: If present, only the snapshots owned by the specified user
- will be returned. Valid values are:
- self | amazon | AWS Account ID
+ :param owner: If present, only the snapshots owned by the
+ specified user will be returned. Valid values are:
+
+ * self
+ * amazon
+ * AWS Account ID
:type restorable_by: str
- :param restorable_by: If present, only the snapshots that are restorable
- by the specified account id will be returned.
+ :param restorable_by: If present, only the snapshots that
+ are restorable by the specified account id will be returned.
:rtype: list of L{boto.ec2.snapshot.Snapshot}
:return: The requested Snapshot objects
-
+
"""
rs = self.connection.get_all_snapshots(owner=owner,
restorable_by=restorable_by)
@@ -196,8 +229,18 @@
mine.append(snap)
return mine
+
class AttachmentSet(object):
-
+ """
+ Represents an EBS attachmentset.
+
+ :ivar id: The unique ID of the volume.
+ :ivar instance_id: The unique ID of the attached instance
+ :ivar status: The status of the attachment
+ :ivar attach_time: Attached since
+ :ivar device: The device the instance has mapped
+ """
+
def __init__(self):
self.id = None
self.instance_id = None
@@ -210,7 +253,7 @@
def startElement(self, name, attrs, connection):
pass
-
+
def endElement(self, name, value, connection):
if name == 'volumeId':
self.id = value
@@ -225,3 +268,26 @@
else:
setattr(self, name, value)
+
+class VolumeAttribute:
+
+ def __init__(self, parent=None):
+ self.id = None
+ self._key_name = None
+ self.attrs = {}
+
+ def startElement(self, name, attrs, connection):
+ if name == 'autoEnableIO':
+ self._key_name = name
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'value':
+ if value.lower() == 'true':
+ self.attrs[self._key_name] = True
+ else:
+ self.attrs[self._key_name] = False
+ elif name == 'volumeId':
+ self.id = value
+ else:
+ setattr(self, name, value)
diff --git a/boto/ec2/volumestatus.py b/boto/ec2/volumestatus.py
new file mode 100644
index 0000000..7bbc173
--- /dev/null
+++ b/boto/ec2/volumestatus.py
@@ -0,0 +1,200 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from boto.ec2.instancestatus import Status, Details
+
+class Event(object):
+ """
+ A status event for an instance.
+
+ :ivar type: The type of the event.
+ :ivar id: The ID of the event.
+ :ivar description: A string describing the reason for the event.
+ :ivar not_before: A datestring describing the earliest time for
+ the event.
+ :ivar not_after: A datestring describing the latest time for
+ the event.
+ """
+
+ def __init__(self, type=None, id=None, description=None,
+ not_before=None, not_after=None):
+ self.type = type
+ self.id = id
+ self.description = description
+ self.not_before = not_before
+ self.not_after = not_after
+
+ def __repr__(self):
+ return 'Event:%s' % self.type
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'eventType':
+ self.type = value
+ elif name == 'eventId':
+ self.id = value
+ elif name == 'description':
+ self.description = value
+ elif name == 'notBefore':
+ self.not_before = value
+ elif name == 'notAfter':
+ self.not_after = value
+ else:
+ setattr(self, name, value)
+
+class EventSet(list):
+
+ def startElement(self, name, attrs, connection):
+ if name == 'item':
+ event = Event()
+ self.append(event)
+ return event
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ setattr(self, name, value)
+
+class Action(object):
+ """
+ An action for an instance.
+
+ :ivar code: The code for the type of the action.
+ :ivar id: The ID of the event.
+ :ivar type: The type of the event.
+ :ivar description: A description of the action.
+ """
+
+ def __init__(self, code=None, id=None, description=None, type=None):
+ self.code = code
+ self.id = id
+ self.type = type
+ self.description = description
+
+ def __repr__(self):
+ return 'Action:%s' % self.code
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'eventType':
+ self.type = value
+ elif name == 'eventId':
+ self.id = value
+ elif name == 'description':
+ self.description = value
+ elif name == 'code':
+ self.code = value
+ else:
+ setattr(self, name, value)
+
+class ActionSet(list):
+
+ def startElement(self, name, attrs, connection):
+ if name == 'item':
+ action = Action()
+ self.append(action)
+ return action
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ setattr(self, name, value)
+
+class VolumeStatus(object):
+ """
+ Represents an EC2 Volume status as reported by
+ DescribeVolumeStatus request.
+
+ :ivar id: The volume identifier.
+ :ivar zone: The availability zone of the volume
+ :ivar volume_status: A Status object that reports impaired
+ functionality that arises from problems internal to the instance.
+ :ivar events: A list of events relevant to the instance.
+ :ivar actions: A list of events relevant to the instance.
+ """
+
+ def __init__(self, id=None, zone=None):
+ self.id = id
+ self.zone = zone
+ self.volume_status = Status()
+ self.events = None
+ self.actions = None
+
+ def __repr__(self):
+ return 'VolumeStatus:%s' % self.id
+
+ def startElement(self, name, attrs, connection):
+ if name == 'eventsSet':
+ self.events = EventSet()
+ return self.events
+ elif name == 'actionsSet':
+ self.actions = ActionSet()
+ return self.actions
+ elif name == 'volumeStatus':
+ return self.volume_status
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'volumeId':
+ self.id = value
+ elif name == 'availabilityZone':
+ self.zone = value
+ else:
+ setattr(self, name, value)
+
+class VolumeStatusSet(list):
+ """
+ A list object that contains the results of a call to
+ DescribeVolumeStatus request. Each element of the
+ list will be an VolumeStatus object.
+
+ :ivar next_token: If the response was truncated by
+ the EC2 service, the next_token attribute of the
+ object will contain the string that needs to be
+ passed in to the next request to retrieve the next
+ set of results.
+ """
+
+ def __init__(self, connection=None):
+ list.__init__(self)
+ self.connection = connection
+ self.next_token = None
+
+ def startElement(self, name, attrs, connection):
+ if name == 'item':
+ status = VolumeStatus()
+ self.append(status)
+ return status
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'NextToken':
+ self.next_token = value
+ setattr(self, name, value)
+
diff --git a/boto/ec2/zone.py b/boto/ec2/zone.py
index aec79b2..44068d4 100644
--- a/boto/ec2/zone.py
+++ b/boto/ec2/zone.py
@@ -24,21 +24,54 @@
"""
from boto.ec2.ec2object import EC2Object
+class MessageSet(list):
+ """
+ A list object that contains messages associated with
+ an availability zone.
+ """
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'message':
+ self.append(value)
+ else:
+ setattr(self, name, value)
+
class Zone(EC2Object):
+ """
+ Represents an Availability Zone.
+
+ :ivar name: The name of the zone.
+ :ivar state: The current state of the zone.
+ :ivar region_name: The name of the region the zone is associated with.
+ :ivar messages: A list of messages related to the zone.
+ """
def __init__(self, connection=None):
EC2Object.__init__(self, connection)
self.name = None
self.state = None
+ self.region_name = None
+ self.messages = None
def __repr__(self):
return 'Zone:%s' % self.name
+ def startElement(self, name, attrs, connection):
+ if name == 'messageSet':
+ self.messages = MessageSet()
+ return self.messages
+ return None
+
def endElement(self, name, value, connection):
if name == 'zoneName':
self.name = value
elif name == 'zoneState':
self.state = value
+ elif name == 'regionName':
+ self.region_name = value
else:
setattr(self, name, value)
diff --git a/boto/emr/__init__.py b/boto/emr/__init__.py
index 3c33f9a..09ad2b4 100644
--- a/boto/emr/__init__.py
+++ b/boto/emr/__init__.py
@@ -1,4 +1,7 @@
# Copyright (c) 2010 Spotify AB
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
@@ -26,5 +29,42 @@
from connection import EmrConnection
from step import Step, StreamingStep, JarStep
from bootstrap_action import BootstrapAction
+from boto.regioninfo import RegionInfo
+def regions():
+ """
+ Get all available regions for the Amazon Elastic MapReduce service.
+
+ :rtype: list
+ :return: A list of :class:`boto.regioninfo.RegionInfo`
+ """
+ return [RegionInfo(name='us-east-1',
+ endpoint='elasticmapreduce.us-east-1.amazonaws.com',
+ connection_cls=EmrConnection),
+ RegionInfo(name='us-west-1',
+ endpoint='us-west-1.elasticmapreduce.amazonaws.com',
+ connection_cls=EmrConnection),
+ RegionInfo(name='us-west-2',
+ endpoint='us-west-2.elasticmapreduce.amazonaws.com',
+ connection_cls=EmrConnection),
+ RegionInfo(name='ap-northeast-1',
+ endpoint='ap-northeast-1.elasticmapreduce.amazonaws.com',
+ connection_cls=EmrConnection),
+ RegionInfo(name='ap-southeast-1',
+ endpoint='ap-southeast-1.elasticmapreduce.amazonaws.com',
+ connection_cls=EmrConnection),
+ RegionInfo(name='eu-west-1',
+ endpoint='eu-west-1.elasticmapreduce.amazonaws.com',
+ connection_cls=EmrConnection),
+ RegionInfo(name='sa-east-1',
+ endpoint='sa-east-1.elasticmapreduce.amazonaws.com',
+ connection_cls=EmrConnection),
+ ]
+
+
+def connect_to_region(region_name, **kw_params):
+ for region in regions():
+ if region.name == region_name:
+ return region.connect(**kw_params)
+ return None
diff --git a/boto/emr/connection.py b/boto/emr/connection.py
index b1effcf..cae8ed1 100644
--- a/boto/emr/connection.py
+++ b/boto/emr/connection.py
@@ -29,17 +29,19 @@
import boto.utils
from boto.ec2.regioninfo import RegionInfo
from boto.emr.emrobject import JobFlow, RunJobFlowResponse
-from boto.emr.emrobject import AddInstanceGroupsResponse, ModifyInstanceGroupsResponse
+from boto.emr.emrobject import AddInstanceGroupsResponse
+from boto.emr.emrobject import ModifyInstanceGroupsResponse
from boto.emr.step import JarStep
from boto.connection import AWSQueryConnection
from boto.exception import EmrResponseError
+
class EmrConnection(AWSQueryConnection):
APIVersion = boto.config.get('Boto', 'emr_version', '2009-03-31')
DefaultRegionName = boto.config.get('Boto', 'emr_region_name', 'us-east-1')
DefaultRegionEndpoint = boto.config.get('Boto', 'emr_region_endpoint',
- 'elasticmapreduce.amazonaws.com')
+ 'elasticmapreduce.us-east-1.amazonaws.com')
ResponseError = EmrResponseError
# Constants for AWS Console debugging
@@ -49,16 +51,20 @@
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
- https_connection_factory=None, region=None, path='/'):
+ https_connection_factory=None, region=None, path='/',
+ security_token=None, validate_certs=True):
if not region:
- region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint)
+ region = RegionInfo(self, self.DefaultRegionName,
+ self.DefaultRegionEndpoint)
self.region = region
AWSQueryConnection.__init__(self, aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
- https_connection_factory, path)
+ https_connection_factory, path,
+ security_token,
+ validate_certs=validate_certs)
def _required_auth_capability(self):
return ['emr']
@@ -134,7 +140,7 @@
:type steps: list(boto.emr.Step)
:param steps: A list of steps to add to the job
"""
- if type(steps) != types.ListType:
+ if not isinstance(steps, types.ListType):
steps = [steps]
params = {}
params['JobFlowId'] = jobflow_id
@@ -151,99 +157,145 @@
Adds instance groups to a running cluster.
:type jobflow_id: str
- :param jobflow_id: The id of the jobflow which will take the new instance groups
+ :param jobflow_id: The id of the jobflow which will take the
+ new instance groups
+
:type instance_groups: list(boto.emr.InstanceGroup)
:param instance_groups: A list of instance groups to add to the job
"""
- if type(instance_groups) != types.ListType:
+ if not isinstance(instance_groups, types.ListType):
instance_groups = [instance_groups]
params = {}
params['JobFlowId'] = jobflow_id
params.update(self._build_instance_group_list_args(instance_groups))
- return self.get_object('AddInstanceGroups', params, AddInstanceGroupsResponse, verb='POST')
+ return self.get_object('AddInstanceGroups', params,
+ AddInstanceGroupsResponse, verb='POST')
def modify_instance_groups(self, instance_group_ids, new_sizes):
"""
- Modify the number of nodes and configuration settings in an instance group.
+ Modify the number of nodes and configuration settings in an
+ instance group.
:type instance_group_ids: list(str)
- :param instance_group_ids: A list of the ID's of the instance groups to be modified
+ :param instance_group_ids: A list of the ID's of the instance
+ groups to be modified
+
:type new_sizes: list(int)
:param new_sizes: A list of the new sizes for each instance group
"""
- if type(instance_group_ids) != types.ListType:
+ if not isinstance(instance_group_ids, types.ListType):
instance_group_ids = [instance_group_ids]
- if type(new_sizes) != types.ListType:
+ if not isinstance(new_sizes, types.ListType):
new_sizes = [new_sizes]
instance_groups = zip(instance_group_ids, new_sizes)
params = {}
for k, ig in enumerate(instance_groups):
- #could be wrong - the example amazon gives uses InstanceRequestCount,
- #while the api documentation says InstanceCount
+ # could be wrong - the example amazon gives uses
+ # InstanceRequestCount, while the api documentation
+ # says InstanceCount
params['InstanceGroups.member.%d.InstanceGroupId' % (k+1) ] = ig[0]
params['InstanceGroups.member.%d.InstanceCount' % (k+1) ] = ig[1]
- return self.get_object('ModifyInstanceGroups', params, ModifyInstanceGroupsResponse, verb='POST')
+ return self.get_object('ModifyInstanceGroups', params,
+ ModifyInstanceGroupsResponse, verb='POST')
- def run_jobflow(self, name, log_uri, ec2_keyname=None, availability_zone=None,
+ def run_jobflow(self, name, log_uri=None, ec2_keyname=None,
+ availability_zone=None,
master_instance_type='m1.small',
slave_instance_type='m1.small', num_instances=1,
action_on_failure='TERMINATE_JOB_FLOW', keep_alive=False,
enable_debugging=False,
- hadoop_version='0.20',
+ hadoop_version=None,
steps=[],
bootstrap_actions=[],
instance_groups=None,
- additional_info=None):
+ additional_info=None,
+ ami_version=None,
+ api_params=None):
"""
Runs a job flow
-
:type name: str
:param name: Name of the job flow
+
:type log_uri: str
:param log_uri: URI of the S3 bucket to place logs
+
:type ec2_keyname: str
:param ec2_keyname: EC2 key used for the instances
+
:type availability_zone: str
:param availability_zone: EC2 availability zone of the cluster
+
:type master_instance_type: str
:param master_instance_type: EC2 instance type of the master
+
:type slave_instance_type: str
:param slave_instance_type: EC2 instance type of the slave nodes
+
:type num_instances: int
:param num_instances: Number of instances in the Hadoop cluster
+
:type action_on_failure: str
:param action_on_failure: Action to take if a step terminates
+
:type keep_alive: bool
- :param keep_alive: Denotes whether the cluster should stay alive upon completion
+ :param keep_alive: Denotes whether the cluster should stay
+ alive upon completion
+
:type enable_debugging: bool
- :param enable_debugging: Denotes whether AWS console debugging should be enabled.
+ :param enable_debugging: Denotes whether AWS console debugging
+ should be enabled.
+
+ :type hadoop_version: str
+ :param hadoop_version: Version of Hadoop to use. This no longer
+ defaults to '0.20' and now uses the AMI default.
+
:type steps: list(boto.emr.Step)
:param steps: List of steps to add with the job
+
:type bootstrap_actions: list(boto.emr.BootstrapAction)
- :param bootstrap_actions: List of bootstrap actions that run before Hadoop starts.
+ :param bootstrap_actions: List of bootstrap actions that run
+ before Hadoop starts.
+
:type instance_groups: list(boto.emr.InstanceGroup)
- :param instance_groups: Optional list of instance groups to use when creating
- this job. NB: When provided, this argument supersedes
- num_instances and master/slave_instance_type.
+ :param instance_groups: Optional list of instance groups to
+ use when creating this job.
+ NB: When provided, this argument supersedes num_instances
+ and master/slave_instance_type.
+
+ :type ami_version: str
+ :param ami_version: Amazon Machine Image (AMI) version to use
+ for instances. Values accepted by EMR are '1.0', '2.0', and
+ 'latest'; EMR currently defaults to '1.0' if you don't set
+ 'ami_version'.
+
:type additional_info: JSON str
:param additional_info: A JSON string for selecting additional features
+
+ :type api_params: dict
+ :param api_params: a dictionary of additional parameters to pass
+ directly to the EMR API (so you don't have to upgrade boto to
+ use new EMR features). You can also delete an API parameter
+ by setting it to None.
+
:rtype: str
:return: The jobflow id
"""
params = {}
if action_on_failure:
params['ActionOnFailure'] = action_on_failure
+ if log_uri:
+ params['LogUri'] = log_uri
params['Name'] = name
- params['LogUri'] = log_uri
# Common instance args
common_params = self._build_instance_common_args(ec2_keyname,
availability_zone,
- keep_alive, hadoop_version)
+ keep_alive,
+ hadoop_version)
params.update(common_params)
# NB: according to the AWS API's error message, we must
@@ -284,19 +336,31 @@
bootstrap_action_args = [self._build_bootstrap_action_args(bootstrap_action) for bootstrap_action in bootstrap_actions]
params.update(self._build_bootstrap_action_list(bootstrap_action_args))
+ if ami_version:
+ params['AmiVersion'] = ami_version
+
if additional_info is not None:
params['AdditionalInfo'] = additional_info
+ if api_params:
+ for key, value in api_params.iteritems():
+ if value is None:
+ params.pop(key, None)
+ else:
+ params[key] = value
+
response = self.get_object(
'RunJobFlow', params, RunJobFlowResponse, verb='POST')
return response.jobflowid
- def set_termination_protection(self, jobflow_id, termination_protection_status):
+ def set_termination_protection(self, jobflow_id,
+ termination_protection_status):
"""
Set termination protection on specified Elastic MapReduce job flows
:type jobflow_ids: list or str
:param jobflow_ids: A list of job flow IDs
+
:type termination_protection_status: bool
:param termination_protection_status: Termination protection status
"""
@@ -308,7 +372,6 @@
return self.get_status('SetTerminationProtection', params, verb='POST')
-
def _build_bootstrap_action_args(self, bootstrap_action):
bootstrap_action_params = {}
bootstrap_action_params['ScriptBootstrapAction.Path'] = bootstrap_action.path
@@ -341,7 +404,7 @@
return step_params
def _build_bootstrap_action_list(self, bootstrap_actions):
- if type(bootstrap_actions) != types.ListType:
+ if not isinstance(bootstrap_actions, types.ListType):
bootstrap_actions = [bootstrap_actions]
params = {}
@@ -351,7 +414,7 @@
return params
def _build_step_list(self, steps):
- if type(steps) != types.ListType:
+ if not isinstance(steps, types.ListType):
steps = [steps]
params = {}
@@ -368,10 +431,11 @@
use in making a RunJobFlow request.
"""
params = {
- 'Instances.KeepJobFlowAliveWhenNoSteps' : str(keep_alive).lower(),
- 'Instances.HadoopVersion' : hadoop_version
+ 'Instances.KeepJobFlowAliveWhenNoSteps': str(keep_alive).lower(),
}
+ if hadoop_version:
+ params['Instances.HadoopVersion'] = hadoop_version
if ec2_keyname:
params['Instances.Ec2KeyName'] = ec2_keyname
if availability_zone:
@@ -386,11 +450,9 @@
(string), and a number of instances. Returns a comparable dict
for use in making a RunJobFlow request.
"""
- params = {
- 'Instances.MasterInstanceType' : master_instance_type,
- 'Instances.SlaveInstanceType' : slave_instance_type,
- 'Instances.InstanceCount' : num_instances,
- }
+ params = {'Instances.MasterInstanceType': master_instance_type,
+ 'Instances.SlaveInstanceType': slave_instance_type,
+ 'Instances.InstanceCount': num_instances}
return params
def _build_instance_group_args(self, instance_group):
@@ -399,13 +461,11 @@
properly prefixed, can be used for describing InstanceGroups in
RunJobFlow or AddInstanceGroups requests.
"""
- params = {
- 'InstanceCount' : instance_group.num_instances,
- 'InstanceRole' : instance_group.role,
- 'InstanceType' : instance_group.type,
- 'Name' : instance_group.name,
- 'Market' : instance_group.market
- }
+ params = {'InstanceCount': instance_group.num_instances,
+ 'InstanceRole': instance_group.role,
+ 'InstanceType': instance_group.type,
+ 'Name': instance_group.name,
+ 'Market': instance_group.market}
if instance_group.market == 'SPOT':
params['BidPrice'] = instance_group.bidprice
return params
@@ -416,7 +476,7 @@
a comparable dict for use in making a RunJobFlow or AddInstanceGroups
request.
"""
- if type(instance_groups) != types.ListType:
+ if not isinstance(instance_groups, types.ListType):
instance_groups = [instance_groups]
params = {}
diff --git a/boto/emr/emrobject.py b/boto/emr/emrobject.py
index 3430b98..c088812 100644
--- a/boto/emr/emrobject.py
+++ b/boto/emr/emrobject.py
@@ -128,6 +128,7 @@
class JobFlow(EmrObject):
Fields = set([
+ 'AmiVersion',
'AvailabilityZone',
'CreationDateTime',
'Ec2KeyName',
diff --git a/boto/emr/step.py b/boto/emr/step.py
index 15dfe88..a538903 100644
--- a/boto/emr/step.py
+++ b/boto/emr/step.py
@@ -191,3 +191,79 @@
self.name, self.mapper, self.reducer, self.action_on_failure,
self.cache_files, self.cache_archives, self.step_args,
self.input, self.output, self._jar)
+
+class ScriptRunnerStep(JarStep):
+
+ ScriptRunnerJar = 's3n://us-east-1.elasticmapreduce/libs/script-runner/script-runner.jar'
+
+ def __init__(self, name, **kw):
+ JarStep.__init__(self, name, self.ScriptRunnerJar, **kw)
+
+class PigBase(ScriptRunnerStep):
+
+ BaseArgs = ['s3n://us-east-1.elasticmapreduce/libs/pig/pig-script',
+ '--base-path', 's3n://us-east-1.elasticmapreduce/libs/pig/']
+
+class InstallPigStep(PigBase):
+ """
+ Install pig on emr step
+ """
+
+ InstallPigName = 'Install Pig'
+
+ def __init__(self, pig_versions='latest'):
+ step_args = []
+ step_args.extend(self.BaseArgs)
+ step_args.extend(['--install-pig'])
+ step_args.extend(['--pig-versions', pig_versions])
+ ScriptRunnerStep.__init__(self, self.InstallPigName, step_args=step_args)
+
+class PigStep(PigBase):
+ """
+ Pig script step
+ """
+
+ def __init__(self, name, pig_file, pig_versions='latest', pig_args=[]):
+ step_args = []
+ step_args.extend(self.BaseArgs)
+ step_args.extend(['--pig-versions', pig_versions])
+ step_args.extend(['--run-pig-script', '--args', '-f', pig_file])
+ step_args.extend(pig_args)
+ ScriptRunnerStep.__init__(self, name, step_args=step_args)
+
+class HiveBase(ScriptRunnerStep):
+
+ BaseArgs = ['s3n://us-east-1.elasticmapreduce/libs/hive/hive-script',
+ '--base-path', 's3n://us-east-1.elasticmapreduce/libs/hive/']
+
+class InstallHiveStep(HiveBase):
+ """
+ Install Hive on EMR step
+ """
+ InstallHiveName = 'Install Hive'
+
+ def __init__(self, hive_versions = 'latest', hive_site = None):
+ step_args = []
+ step_args.extend(self.BaseArgs)
+ step_args.extend(['--install-hive'])
+ step_args.extend(['--hive-versions', hive_versions])
+ if hive_site is not None:
+ step_args.extend(['--hive-site=%s' % hive_site])
+ ScriptRunnerStep.__init__(self, self.InstallHiveName, step_args = step_args)
+
+
+class HiveStep(HiveBase):
+ """
+ Hive script step
+ """
+
+ def __init__(self, name, hive_file, hive_versions = 'latest',
+ hive_args = None):
+ step_args = []
+ step_args.extend(self.BaseArgs)
+ step_args.extend(['--hive-versions', hive_versions])
+ step_args.extend(['--hive-script', '--args', '-f', hive_file])
+ if hive_args is not None:
+ step_args.extend(hive_args)
+ ScriptRunnerStep.__init__(self, name, step_args = step_args)
+
diff --git a/boto/exception.py b/boto/exception.py
index bfdb052..590f3d1 100644
--- a/boto/exception.py
+++ b/boto/exception.py
@@ -16,7 +16,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -85,7 +85,7 @@
try:
h = handler.XmlHandler(self, self)
xml.sax.parseString(self.body, h)
- except xml.sax.SAXParseException, pe:
+ except (TypeError, xml.sax.SAXParseException), pe:
# Remove unparsable message body so we don't include garbage
# in exception. But first, save self.body in self.error_message
# because occasionally we get error messages from Eucalyptus
@@ -298,6 +298,56 @@
for p in ('errors'):
setattr(self, p, None)
+class DynamoDBResponseError(BotoServerError):
+ """
+ This exception expects the fully parsed and decoded JSON response
+ body to be passed as the body parameter.
+
+ :ivar status: The HTTP status code.
+ :ivar reason: The HTTP reason message.
+ :ivar body: The Python dict that represents the decoded JSON
+ response body.
+ :ivar error_message: The full description of the AWS error encountered.
+ :ivar error_code: A short string that identifies the AWS error
+ (e.g. ConditionalCheckFailedException)
+ """
+
+ def __init__(self, status, reason, body=None, *args):
+ self.status = status
+ self.reason = reason
+ self.body = body
+ if self.body:
+ self.error_message = self.body.get('message', None)
+ self.error_code = self.body.get('__type', None)
+ if self.error_code:
+ self.error_code = self.error_code.split('#')[-1]
+
+
+class SWFResponseError(BotoServerError):
+ """
+ This exception expects the fully parsed and decoded JSON response
+ body to be passed as the body parameter.
+
+ :ivar status: The HTTP status code.
+ :ivar reason: The HTTP reason message.
+ :ivar body: The Python dict that represents the decoded JSON
+ response body.
+ :ivar error_message: The full description of the AWS error encountered.
+ :ivar error_code: A short string that identifies the AWS error
+ (e.g. ConditionalCheckFailedException)
+ """
+
+ def __init__(self, status, reason, body=None, *args):
+ self.status = status
+ self.reason = reason
+ self.body = body
+ if self.body:
+ self.error_message = self.body.get('message', None)
+ self.error_code = self.body.get('__type', None)
+ if self.error_code:
+ self.error_code = self.error_code.split('#')[-1]
+
+
class EmrResponseError(BotoServerError):
"""
Error in response from EMR
@@ -352,9 +402,6 @@
"""
pass
-class FPSResponseError(BotoServerError):
- pass
-
class InvalidUriError(Exception):
"""Exception raised when URI is invalid."""
@@ -369,6 +416,13 @@
Exception.__init__(self, message)
self.message = message
+class InvalidCorsError(Exception):
+ """Exception raised when CORS XML is invalid."""
+
+ def __init__(self, message):
+ Exception.__init__(self, message)
+ self.message = message
+
class NoAuthHandlerFound(Exception):
"""Is raised when no auth handlers were found ready to authenticate."""
pass
@@ -401,7 +455,7 @@
ABORT_CUR_PROCESS = 'ABORT_CUR_PROCESS'
# ABORT means the resumable transfer failed in a way that it does not
- # make sense to continue in the current process, and further that the
+ # make sense to continue in the current process, and further that the
# current tracker ID should not be preserved (in a tracker file if one
# was specified at resumable upload start time). If the user tries again
# later (e.g., a separate run of gsutil) it will get a new resumable
diff --git a/boto/file/key.py b/boto/file/key.py
index 6f66eda..d39c8c6 100755
--- a/boto/file/key.py
+++ b/boto/file/key.py
@@ -74,7 +74,10 @@
key_file = self.fp
else:
key_file = open(self.full_path, 'rb')
- shutil.copyfileobj(key_file, fp)
+ try:
+ shutil.copyfileobj(key_file, fp)
+ finally:
+ key_file.close()
def set_contents_from_file(self, fp, headers=None, replace=True, cb=None,
num_cb=10, policy=None, md5=None):
@@ -119,8 +122,10 @@
if not replace and os.path.exists(self.full_path):
return
key_file = open(self.full_path, 'wb')
- shutil.copyfileobj(fp, key_file)
- key_file.close()
+ try:
+ shutil.copyfileobj(fp, key_file)
+ finally:
+ key_file.close()
def get_contents_as_string(self, headers=None, cb=None, num_cb=10,
torrent=False):
@@ -152,3 +157,12 @@
def is_stream(self):
return (self.key_type & self.KEY_STREAM)
+
+ def close(self):
+ """
+ Closes fp associated with underlying file.
+ Caller should call this method when done with this class, to avoid
+ using up OS resources (e.g., when iterating over a large number
+ of files).
+ """
+ self.fp.close()
diff --git a/boto/fps/__init__.py b/boto/fps/__init__.py
index 2f44483..d69b7f0 100644
--- a/boto/fps/__init__.py
+++ b/boto/fps/__init__.py
@@ -14,10 +14,8 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
-
-
diff --git a/boto/fps/connection.py b/boto/fps/connection.py
index 24b04d9..3b9057e 100644
--- a/boto/fps/connection.py
+++ b/boto/fps/connection.py
@@ -1,5 +1,6 @@
+# Copyright (c) 2012 Andy Davidoff http://www.disruptek.com/
+# Copyright (c) 2010 Jason R. Coombs http://www.jaraco.com/
# Copyright (c) 2008 Chris Moyer http://coredumped.org/
-# Copyringt (c) 2010 Jason R. Coombs http://www.jaraco.com/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
@@ -15,406 +16,354 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
-import base64
-import hmac
-import hashlib
import urllib
-import xml.sax
import uuid
-import boto
-import boto.utils
-from boto import handler
from boto.connection import AWSQueryConnection
-from boto.resultset import ResultSet
-from boto.exception import FPSResponseError
+from boto.fps.exception import ResponseErrorFactory
+from boto.fps.response import ResponseFactory
+import boto.fps.response
+
+__all__ = ['FPSConnection']
+
+decorated_attrs = ('action', 'response')
+
+
+def add_attrs_from(func, to):
+ for attr in decorated_attrs:
+ setattr(to, attr, getattr(func, attr, None))
+ return to
+
+
+def complex_amounts(*fields):
+ def decorator(func):
+ def wrapper(self, *args, **kw):
+ for field in filter(kw.has_key, fields):
+ amount = kw.pop(field)
+ kw[field + '.Value'] = getattr(amount, 'Value', str(amount))
+ kw[field + '.CurrencyCode'] = getattr(amount, 'CurrencyCode',
+ self.currencycode)
+ return func(self, *args, **kw)
+ wrapper.__doc__ = "{0}\nComplex Amounts: {1}".format(func.__doc__,
+ ', '.join(fields))
+ return add_attrs_from(func, to=wrapper)
+ return decorator
+
+
+def requires(*groups):
+
+ def decorator(func):
+
+ def wrapper(*args, **kw):
+ hasgroup = lambda x: len(x) == len(filter(kw.has_key, x))
+ if 1 != len(filter(hasgroup, groups)):
+ message = ' OR '.join(['+'.join(g) for g in groups])
+ message = "{0} requires {1} argument(s)" \
+ "".format(getattr(func, 'action', 'Method'), message)
+ raise KeyError(message)
+ return func(*args, **kw)
+ message = ' OR '.join(['+'.join(g) for g in groups])
+ wrapper.__doc__ = "{0}\nRequired: {1}".format(func.__doc__,
+ message)
+ return add_attrs_from(func, to=wrapper)
+ return decorator
+
+
+def needs_caller_reference(func):
+
+ def wrapper(*args, **kw):
+ kw.setdefault('CallerReference', uuid.uuid4())
+ return func(*args, **kw)
+ wrapper.__doc__ = "{0}\nUses CallerReference, defaults " \
+ "to uuid.uuid4()".format(func.__doc__)
+ return add_attrs_from(func, to=wrapper)
+
+
+def api_action(*api):
+
+ def decorator(func):
+ action = ''.join(api or map(str.capitalize, func.func_name.split('_')))
+ response = ResponseFactory(action)
+ if hasattr(boto.fps.response, action + 'Response'):
+ response = getattr(boto.fps.response, action + 'Response')
+
+ def wrapper(self, *args, **kw):
+ return func(self, action, response, *args, **kw)
+ wrapper.action, wrapper.response = action, response
+ wrapper.__doc__ = "FPS {0} API call\n{1}".format(action,
+ func.__doc__)
+ return wrapper
+ return decorator
+
class FPSConnection(AWSQueryConnection):
- APIVersion = '2007-01-08'
+ APIVersion = '2010-08-28'
+ ResponseError = ResponseErrorFactory
+ currencycode = 'USD'
- def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
- is_secure=True, port=None, proxy=None, proxy_port=None,
- proxy_user=None, proxy_pass=None,
- host='fps.sandbox.amazonaws.com', debug=0,
- https_connection_factory=None, path="/"):
- AWSQueryConnection.__init__(self, aws_access_key_id,
- aws_secret_access_key,
- is_secure, port, proxy, proxy_port,
- proxy_user, proxy_pass, host, debug,
- https_connection_factory, path)
-
+ def __init__(self, *args, **kw):
+ self.currencycode = kw.pop('CurrencyCode', self.currencycode)
+ kw.setdefault('host', 'fps.sandbox.amazonaws.com')
+ AWSQueryConnection.__init__(self, *args, **kw)
+
def _required_auth_capability(self):
return ['fps']
- def install_payment_instruction(self, instruction,
- token_type="Unrestricted",
- transaction_id=None):
+ @needs_caller_reference
+ @complex_amounts('SettlementAmount')
+ @requires(['CreditInstrumentId', 'SettlementAmount.Value',
+ 'SenderTokenId', 'SettlementAmount.CurrencyCode'])
+ @api_action()
+ def settle_debt(self, action, response, **kw):
+ """Allows a caller to initiate a transaction that atomically
+ transfers money from a sender's payment instrument to the
+ recipient, while decreasing corresponding debt balance.
"""
- InstallPaymentInstruction
- instruction: The PaymentInstruction to send, for example:
-
- MyRole=='Caller' orSay 'Roles do not match';
-
- token_type: Defaults to "Unrestricted"
- transaction_id: Defaults to a new ID
+ return self.get_object(action, kw, response)
+
+ @requires(['TransactionId'])
+ @api_action()
+ def get_transaction_status(self, action, response, **kw):
+ """Gets the latest status of a transaction.
"""
+ return self.get_object(action, kw, response)
- if(transaction_id == None):
- transaction_id = uuid.uuid4()
- params = {}
- params['PaymentInstruction'] = instruction
- params['TokenType'] = token_type
- params['CallerReference'] = transaction_id
- response = self.make_request("InstallPaymentInstruction", params)
- return response
-
- def install_caller_instruction(self, token_type="Unrestricted",
- transaction_id=None):
+ @requires(['StartDate'])
+ @api_action()
+ def get_account_activity(self, action, response, **kw):
+ """Returns transactions for a given date range.
"""
- Set us up as a caller
- This will install a new caller_token into the FPS section.
- This should really only be called to regenerate the caller token.
+ return self.get_object(action, kw, response)
+
+ @requires(['TransactionId'])
+ @api_action()
+ def get_transaction(self, action, response, **kw):
+ """Returns all details of a transaction.
"""
- response = self.install_payment_instruction("MyRole=='Caller';",
- token_type=token_type,
- transaction_id=transaction_id)
- body = response.read()
- if(response.status == 200):
- rs = ResultSet()
- h = handler.XmlHandler(rs, self)
- xml.sax.parseString(body, h)
- caller_token = rs.TokenId
- try:
- boto.config.save_system_option("FPS", "caller_token",
- caller_token)
- except(IOError):
- boto.config.save_user_option("FPS", "caller_token",
- caller_token)
- return caller_token
- else:
- raise FPSResponseError(response.status, response.reason, body)
+ return self.get_object(action, kw, response)
- def install_recipient_instruction(self, token_type="Unrestricted",
- transaction_id=None):
+ @api_action()
+ def get_outstanding_debt_balance(self, action, response):
+ """Returns the total outstanding balance for all the credit
+ instruments for the given creditor account.
"""
- Set us up as a Recipient
- This will install a new caller_token into the FPS section.
- This should really only be called to regenerate the recipient token.
+ return self.get_object(action, {}, response)
+
+ @requires(['PrepaidInstrumentId'])
+ @api_action()
+ def get_prepaid_balance(self, action, response, **kw):
+ """Returns the balance available on the given prepaid instrument.
"""
- response = self.install_payment_instruction("MyRole=='Recipient';",
- token_type=token_type,
- transaction_id=transaction_id)
- body = response.read()
- if(response.status == 200):
- rs = ResultSet()
- h = handler.XmlHandler(rs, self)
- xml.sax.parseString(body, h)
- recipient_token = rs.TokenId
- try:
- boto.config.save_system_option("FPS", "recipient_token",
- recipient_token)
- except(IOError):
- boto.config.save_user_option("FPS", "recipient_token",
- recipient_token)
+ return self.get_object(action, kw, response)
- return recipient_token
- else:
- raise FPSResponseError(response.status, response.reason, body)
-
- def make_marketplace_registration_url(self, returnURL, pipelineName,
- maxFixedFee=0.0, maxVariableFee=0.0,
- recipientPaysFee=True, **params):
+ @api_action()
+ def get_total_prepaid_liability(self, action, response):
+ """Returns the total liability held by the given account
+ corresponding to all the prepaid instruments owned by the
+ account.
"""
- Generate the URL with the signature required for signing up a recipient
+ return self.get_object(action, {}, response)
+
+ @api_action()
+ def get_account_balance(self, action, response):
+ """Returns the account balance for an account in real time.
"""
- # use the sandbox authorization endpoint if we're using the
- # sandbox for API calls.
- endpoint_host = 'authorize.payments.amazon.com'
- if 'sandbox' in self.host:
- endpoint_host = 'authorize.payments-sandbox.amazon.com'
- base = "/cobranded-ui/actions/start"
+ return self.get_object(action, {}, response)
- params['callerKey'] = str(self.aws_access_key_id)
- params['returnURL'] = str(returnURL)
- params['pipelineName'] = str(pipelineName)
- params['maxFixedFee'] = str(maxFixedFee)
- params['maxVariableFee'] = str(maxVariableFee)
- params['recipientPaysFee'] = str(recipientPaysFee)
- params["signatureMethod"] = 'HmacSHA256'
- params["signatureVersion"] = '2'
+ @needs_caller_reference
+ @requires(['PaymentInstruction', 'TokenType'])
+ @api_action()
+ def install_payment_instruction(self, action, response, **kw):
+ """Installs a payment instruction for caller.
+ """
+ return self.get_object(action, kw, response)
- if(not params.has_key('callerReference')):
- params['callerReference'] = str(uuid.uuid4())
+ @needs_caller_reference
+ @requires(['returnURL', 'pipelineName'])
+ def cbui_url(self, **kw):
+ """Generate a signed URL for the Co-Branded service API given
+ arguments as payload.
+ """
+ sandbox = 'sandbox' in self.host and 'payments-sandbox' or 'payments'
+ endpoint = 'authorize.{0}.amazon.com'.format(sandbox)
+ base = '/cobranded-ui/actions/start'
- parts = ''
- for k in sorted(params.keys()):
- parts += "&%s=%s" % (k, urllib.quote(params[k], '~'))
+ validpipelines = ('SingleUse', 'MultiUse', 'Recurring', 'Recipient',
+ 'SetupPrepaid', 'SetupPostpaid', 'EditToken')
+ assert kw['pipelineName'] in validpipelines, "Invalid pipelineName"
+ kw.update({
+ 'signatureMethod': 'HmacSHA256',
+ 'signatureVersion': '2',
+ })
+ kw.setdefault('callerKey', self.aws_access_key_id)
- canonical = '\n'.join(['GET',
- str(endpoint_host).lower(),
- base,
- parts[1:]])
+ safestr = lambda x: x is not None and str(x) or ''
+ safequote = lambda x: urllib.quote(safestr(x), safe='~')
+ payload = sorted([(k, safequote(v)) for k, v in kw.items()])
+ encoded = lambda p: '&'.join([k + '=' + v for k, v in p])
+ canonical = '\n'.join(['GET', endpoint, base, encoded(payload)])
signature = self._auth_handler.sign_string(canonical)
- params["signature"] = signature
+ payload += [('signature', safequote(signature))]
+ payload.sort()
- urlsuffix = ''
- for k in sorted(params.keys()):
- urlsuffix += "&%s=%s" % (k, urllib.quote(params[k], '~'))
- urlsuffix = urlsuffix[1:] # strip the first &
-
- fmt = "https://%(endpoint_host)s%(base)s?%(urlsuffix)s"
- final = fmt % vars()
- return final
+ return 'https://{0}{1}?{2}'.format(endpoint, base, encoded(payload))
+ @needs_caller_reference
+ @complex_amounts('TransactionAmount')
+ @requires(['SenderTokenId', 'TransactionAmount.Value',
+ 'TransactionAmount.CurrencyCode'])
+ @api_action()
+ def reserve(self, action, response, **kw):
+ """Reserve API is part of the Reserve and Settle API conjunction
+ that serve the purpose of a pay where the authorization and
+ settlement have a timing difference.
+ """
+ return self.get_object(action, kw, response)
- def make_url(self, returnURL, paymentReason, pipelineName,
- transactionAmount, **params):
+ @needs_caller_reference
+ @complex_amounts('TransactionAmount')
+ @requires(['SenderTokenId', 'TransactionAmount.Value',
+ 'TransactionAmount.CurrencyCode'])
+ @api_action()
+ def pay(self, action, response, **kw):
+ """Allows calling applications to move money from a sender to
+ a recipient.
"""
- Generate the URL with the signature required for a transaction
- """
- # use the sandbox authorization endpoint if we're using the
- # sandbox for API calls.
- endpoint_host = 'authorize.payments.amazon.com'
- if 'sandbox' in self.host:
- endpoint_host = 'authorize.payments-sandbox.amazon.com'
- base = "/cobranded-ui/actions/start"
+ return self.get_object(action, kw, response)
- params['callerKey'] = str(self.aws_access_key_id)
- params['returnURL'] = str(returnURL)
- params['paymentReason'] = str(paymentReason)
- params['pipelineName'] = pipelineName
- params['transactionAmount'] = transactionAmount
- params["signatureMethod"] = 'HmacSHA256'
- params["signatureVersion"] = '2'
-
- if(not params.has_key('callerReference')):
- params['callerReference'] = str(uuid.uuid4())
+ @requires(['TransactionId'])
+ @api_action()
+ def cancel(self, action, response, **kw):
+ """Cancels an ongoing transaction and puts it in cancelled state.
+ """
+ return self.get_object(action, kw, response)
- parts = ''
- for k in sorted(params.keys()):
- parts += "&%s=%s" % (k, urllib.quote(params[k], '~'))
+ @complex_amounts('TransactionAmount')
+ @requires(['ReserveTransactionId', 'TransactionAmount.Value',
+ 'TransactionAmount.CurrencyCode'])
+ @api_action()
+ def settle(self, action, response, **kw):
+ """The Settle API is used in conjunction with the Reserve API and
+ is used to settle previously reserved transaction.
+ """
+ return self.get_object(action, kw, response)
- canonical = '\n'.join(['GET',
- str(endpoint_host).lower(),
- base,
- parts[1:]])
+ @complex_amounts('RefundAmount')
+ @requires(['TransactionId', 'RefundAmount.Value',
+ 'CallerReference', 'RefundAmount.CurrencyCode'])
+ @api_action()
+ def refund(self, action, response, **kw):
+ """Refunds a previously completed transaction.
+ """
+ return self.get_object(action, kw, response)
- signature = self._auth_handler.sign_string(canonical)
- params["signature"] = signature
+ @requires(['RecipientTokenId'])
+ @api_action()
+ def get_recipient_verification_status(self, action, response, **kw):
+ """Returns the recipient status.
+ """
+ return self.get_object(action, kw, response)
- urlsuffix = ''
- for k in sorted(params.keys()):
- urlsuffix += "&%s=%s" % (k, urllib.quote(params[k], '~'))
- urlsuffix = urlsuffix[1:] # strip the first &
-
- fmt = "https://%(endpoint_host)s%(base)s?%(urlsuffix)s"
- final = fmt % vars()
- return final
+ @requires(['CallerReference'], ['TokenId'])
+ @api_action()
+ def get_token_by_caller(self, action, response, **kw):
+ """Returns the details of a particular token installed by this
+ calling application using the subway co-branded UI.
+ """
+ return self.get_object(action, kw, response)
- def pay(self, transactionAmount, senderTokenId,
- recipientTokenId=None, callerTokenId=None,
- chargeFeeTo="Recipient",
- callerReference=None, senderReference=None, recipientReference=None,
- senderDescription=None, recipientDescription=None,
- callerDescription=None, metadata=None,
- transactionDate=None, reserve=False):
+ @requires(['UrlEndPoint', 'HttpParameters'])
+ @api_action()
+ def verify_signature(self, action, response, **kw):
+ """Verify the signature that FPS sent in IPN or callback urls.
"""
- Make a payment transaction. You must specify the amount.
- This can also perform a Reserve request if 'reserve' is set to True.
- """
- params = {}
- params['SenderTokenId'] = senderTokenId
- # this is for 2008-09-17 specification
- params['TransactionAmount.Amount'] = str(transactionAmount)
- params['TransactionAmount.CurrencyCode'] = "USD"
- #params['TransactionAmount'] = str(transactionAmount)
- params['ChargeFeeTo'] = chargeFeeTo
-
- params['RecipientTokenId'] = (
- recipientTokenId if recipientTokenId is not None
- else boto.config.get("FPS", "recipient_token")
- )
- params['CallerTokenId'] = (
- callerTokenId if callerTokenId is not None
- else boto.config.get("FPS", "caller_token")
- )
- if(transactionDate != None):
- params['TransactionDate'] = transactionDate
- if(senderReference != None):
- params['SenderReference'] = senderReference
- if(recipientReference != None):
- params['RecipientReference'] = recipientReference
- if(senderDescription != None):
- params['SenderDescription'] = senderDescription
- if(recipientDescription != None):
- params['RecipientDescription'] = recipientDescription
- if(callerDescription != None):
- params['CallerDescription'] = callerDescription
- if(metadata != None):
- params['MetaData'] = metadata
- if(callerReference == None):
- callerReference = uuid.uuid4()
- params['CallerReference'] = callerReference
-
- if reserve:
- response = self.make_request("Reserve", params)
- else:
- response = self.make_request("Pay", params)
- body = response.read()
- if(response.status == 200):
- rs = ResultSet()
- h = handler.XmlHandler(rs, self)
- xml.sax.parseString(body, h)
- return rs
- else:
- raise FPSResponseError(response.status, response.reason, body)
-
- def get_transaction_status(self, transactionId):
- """
- Returns the status of a given transaction.
- """
- params = {}
- params['TransactionId'] = transactionId
-
- response = self.make_request("GetTransactionStatus", params)
- body = response.read()
- if(response.status == 200):
- rs = ResultSet()
- h = handler.XmlHandler(rs, self)
- xml.sax.parseString(body, h)
- return rs
- else:
- raise FPSResponseError(response.status, response.reason, body)
-
- def cancel(self, transactionId, description=None):
- """
- Cancels a reserved or pending transaction.
- """
- params = {}
- params['transactionId'] = transactionId
- if(description != None):
- params['description'] = description
-
- response = self.make_request("Cancel", params)
- body = response.read()
- if(response.status == 200):
- rs = ResultSet()
- h = handler.XmlHandler(rs, self)
- xml.sax.parseString(body, h)
- return rs
- else:
- raise FPSResponseError(response.status, response.reason, body)
-
- def settle(self, reserveTransactionId, transactionAmount=None):
- """
- Charges for a reserved payment.
- """
- params = {}
- params['ReserveTransactionId'] = reserveTransactionId
- if(transactionAmount != None):
- params['TransactionAmount'] = transactionAmount
-
- response = self.make_request("Settle", params)
- body = response.read()
- if(response.status == 200):
- rs = ResultSet()
- h = handler.XmlHandler(rs, self)
- xml.sax.parseString(body, h)
- return rs
- else:
- raise FPSResponseError(response.status, response.reason, body)
-
- def refund(self, callerReference, transactionId, refundAmount=None,
- callerDescription=None):
- """
- Refund a transaction. This refunds the full amount by default
- unless 'refundAmount' is specified.
- """
- params = {}
- params['CallerReference'] = callerReference
- params['TransactionId'] = transactionId
- if(refundAmount != None):
- params['RefundAmount'] = refundAmount
- if(callerDescription != None):
- params['CallerDescription'] = callerDescription
-
- response = self.make_request("Refund", params)
- body = response.read()
- if(response.status == 200):
- rs = ResultSet()
- h = handler.XmlHandler(rs, self)
- xml.sax.parseString(body, h)
- return rs
- else:
- raise FPSResponseError(response.status, response.reason, body)
-
- def get_recipient_verification_status(self, recipientTokenId):
- """
- Test that the intended recipient has a verified Amazon Payments account.
- """
- params ={}
- params['RecipientTokenId'] = recipientTokenId
-
- response = self.make_request("GetRecipientVerificationStatus", params)
- body = response.read()
- if(response.status == 200):
- rs = ResultSet()
- h = handler.XmlHandler(rs, self)
- xml.sax.parseString(body, h)
- return rs
- else:
- raise FPSResponseError(response.status, response.reason, body)
-
- def get_token_by_caller_reference(self, callerReference):
- """
- Returns details about the token specified by 'CallerReference'.
- """
- params ={}
- params['CallerReference'] = callerReference
-
- response = self.make_request("GetTokenByCaller", params)
- body = response.read()
- if(response.status == 200):
- rs = ResultSet()
- h = handler.XmlHandler(rs, self)
- xml.sax.parseString(body, h)
- return rs
- else:
- raise FPSResponseError(response.status, response.reason, body)
-
- def get_token_by_caller_token(self, tokenId):
- """
- Returns details about the token specified by 'TokenId'.
- """
- params ={}
- params['TokenId'] = tokenId
-
- response = self.make_request("GetTokenByCaller", params)
- body = response.read()
- if(response.status == 200):
- rs = ResultSet()
- h = handler.XmlHandler(rs, self)
- xml.sax.parseString(body, h)
- return rs
- else:
- raise FPSResponseError(response.status, response.reason, body)
+ return self.get_object(action, kw, response)
- def verify_signature(self, end_point_url, http_parameters):
- params = dict(
- UrlEndPoint = end_point_url,
- HttpParameters = http_parameters,
- )
- response = self.make_request("VerifySignature", params)
- body = response.read()
- if(response.status != 200):
- raise FPSResponseError(response.status, response.reason, body)
- rs = ResultSet()
- h = handler.XmlHandler(rs, self)
- xml.sax.parseString(body, h)
- return rs
+ @api_action()
+ def get_tokens(self, action, response, **kw):
+ """Returns a list of tokens installed on the given account.
+ """
+ return self.get_object(action, kw, response)
+
+ @requires(['TokenId'])
+ @api_action()
+ def get_token_usage(self, action, response, **kw):
+ """Returns the usage of a token.
+ """
+ return self.get_object(action, kw, response)
+
+ @requires(['TokenId'])
+ @api_action()
+ def cancel_token(self, action, response, **kw):
+ """Cancels any token installed by the calling application on
+ its own account.
+ """
+ return self.get_object(action, kw, response)
+
+ @needs_caller_reference
+ @complex_amounts('FundingAmount')
+ @requires(['PrepaidInstrumentId', 'FundingAmount.Value',
+ 'SenderTokenId', 'FundingAmount.CurrencyCode'])
+ @api_action()
+ def fund_prepaid(self, action, response, **kw):
+ """Funds the prepaid balance on the given prepaid instrument.
+ """
+ return self.get_object(action, kw, response)
+
+ @requires(['CreditInstrumentId'])
+ @api_action()
+ def get_debt_balance(self, action, response, **kw):
+ """Returns the balance corresponding to the given credit instrument.
+ """
+ return self.get_object(action, kw, response)
+
+ @needs_caller_reference
+ @complex_amounts('AdjustmentAmount')
+ @requires(['CreditInstrumentId', 'AdjustmentAmount.Value',
+ 'AdjustmentAmount.CurrencyCode'])
+ @api_action()
+ def write_off_debt(self, action, response, **kw):
+ """Allows a creditor to write off the debt balance accumulated
+ partially or fully at any time.
+ """
+ return self.get_object(action, kw, response)
+
+ @requires(['SubscriptionId'])
+ @api_action()
+ def get_transactions_for_subscription(self, action, response, **kw):
+ """Returns the transactions for a given subscriptionID.
+ """
+ return self.get_object(action, kw, response)
+
+ @requires(['SubscriptionId'])
+ @api_action()
+ def get_subscription_details(self, action, response, **kw):
+ """Returns the details of Subscription for a given subscriptionID.
+ """
+ return self.get_object(action, kw, response)
+
+ @needs_caller_reference
+ @complex_amounts('RefundAmount')
+ @requires(['SubscriptionId'])
+ @api_action()
+ def cancel_subscription_and_refund(self, action, response, **kw):
+ """Cancels a subscription.
+ """
+ message = "If you specify a RefundAmount, " \
+ "you must specify CallerReference."
+ assert not 'RefundAmount.Value' in kw \
+ or 'CallerReference' in kw, message
+ return self.get_object(action, kw, response)
+
+ @requires(['TokenId'])
+ @api_action()
+ def get_payment_instruction(self, action, response, **kw):
+ """Gets the payment instruction of a token.
+ """
+ return self.get_object(action, kw, response)
diff --git a/boto/fps/exception.py b/boto/fps/exception.py
new file mode 100644
index 0000000..bebb86b
--- /dev/null
+++ b/boto/fps/exception.py
@@ -0,0 +1,344 @@
+from boto.exception import BotoServerError
+
+
+class ResponseErrorFactory(BotoServerError):
+
+ def __new__(cls, *args, **kw):
+ error = BotoServerError(*args, **kw)
+ newclass = globals().get(error.error_code, ResponseError)
+ obj = newclass.__new__(newclass, *args, **kw)
+ obj.__dict__.update(error.__dict__)
+ return obj
+
+
+class ResponseError(BotoServerError):
+ """Undefined response error.
+ """
+ retry = False
+
+ def __repr__(self):
+ return '{0}({1}, {2},\n\t{3})'.format(self.__class__.__name__,
+ self.status, self.reason,
+ self.error_message)
+
+ def __str__(self):
+ return 'FPS Response Error: {0.status} {0.__class__.__name__} {1}\n' \
+ '{2}\n' \
+ '{0.error_message}'.format(self,
+ self.retry and '(Retriable)' or '',
+ self.__doc__.strip())
+
+
+class RetriableResponseError(ResponseError):
+ retry = True
+
+
+class AccessFailure(RetriableResponseError):
+ """Account cannot be accessed.
+ """
+
+
+class AccountClosed(RetriableResponseError):
+ """Account is not active.
+ """
+
+
+class AccountLimitsExceeded(RetriableResponseError):
+ """The spending or receiving limit on the account is exceeded.
+ """
+
+
+class AmountOutOfRange(ResponseError):
+ """The transaction amount is more than the allowed range.
+ """
+
+
+class AuthFailure(RetriableResponseError):
+ """AWS was not able to validate the provided access credentials.
+ """
+
+
+class ConcurrentModification(RetriableResponseError):
+ """A retriable error can happen when two processes try to modify the
+ same data at the same time.
+ """
+
+
+class DuplicateRequest(ResponseError):
+ """A different request associated with this caller reference already
+ exists.
+ """
+
+
+class InactiveInstrument(ResponseError):
+ """Payment instrument is inactive.
+ """
+
+
+class IncompatibleTokens(ResponseError):
+ """The transaction could not be completed because the tokens have
+ incompatible payment instructions.
+ """
+
+
+class InstrumentAccessDenied(ResponseError):
+ """The external calling application is not the recipient for this
+ postpaid or prepaid instrument.
+ """
+
+
+class InstrumentExpired(ResponseError):
+ """The prepaid or the postpaid instrument has expired.
+ """
+
+
+class InsufficientBalance(RetriableResponseError):
+ """The sender, caller, or recipient's account balance has
+ insufficient funds to complete the transaction.
+ """
+
+
+class InternalError(RetriableResponseError):
+ """A retriable error that happens due to some transient problem in
+ the system.
+ """
+
+
+class InvalidAccountState(RetriableResponseError):
+ """The account is either suspended or closed.
+ """
+
+
+class InvalidAccountState_Caller(RetriableResponseError):
+ """The developer account cannot participate in the transaction.
+ """
+
+
+class InvalidAccountState_Recipient(RetriableResponseError):
+ """Recipient account cannot participate in the transaction.
+ """
+
+
+class InvalidAccountState_Sender(RetriableResponseError):
+ """Sender account cannot participate in the transaction.
+ """
+
+
+class InvalidCallerReference(ResponseError):
+ """The Caller Reference does not have a token associated with it.
+ """
+
+
+class InvalidClientTokenId(ResponseError):
+ """The AWS Access Key Id you provided does not exist in our records.
+ """
+
+
+class InvalidDateRange(ResponseError):
+ """The end date specified is before the start date or the start date
+ is in the future.
+ """
+
+
+class InvalidParams(ResponseError):
+ """One or more parameters in the request is invalid.
+ """
+
+
+class InvalidPaymentInstrument(ResponseError):
+ """The payment method used in the transaction is invalid.
+ """
+
+
+class InvalidPaymentMethod(ResponseError):
+ """Specify correct payment method.
+ """
+
+
+class InvalidRecipientForCCTransaction(ResponseError):
+ """This account cannot receive credit card payments.
+ """
+
+
+class InvalidSenderRoleForAccountType(ResponseError):
+ """This token cannot be used for this operation.
+ """
+
+
+class InvalidTokenId(ResponseError):
+ """You did not install the token that you are trying to cancel.
+ """
+
+
+class InvalidTokenId_Recipient(ResponseError):
+ """The recipient token specified is either invalid or canceled.
+ """
+
+
+class InvalidTokenId_Sender(ResponseError):
+ """The sender token specified is either invalid or canceled or the
+ token is not active.
+ """
+
+
+class InvalidTokenType(ResponseError):
+ """An invalid operation was performed on the token, for example,
+ getting the token usage information on a single use token.
+ """
+
+
+class InvalidTransactionId(ResponseError):
+ """The specified transaction could not be found or the caller did not
+ execute the transaction or this is not a Pay or Reserve call.
+ """
+
+
+class InvalidTransactionState(ResponseError):
+ """The transaction is not complete, or it has temporarily failed.
+ """
+
+
+class NotMarketplaceApp(RetriableResponseError):
+ """This is not an marketplace application or the caller does not
+ match either the sender or the recipient.
+ """
+
+
+class OriginalTransactionFailed(ResponseError):
+ """The original transaction has failed.
+ """
+
+
+class OriginalTransactionIncomplete(RetriableResponseError):
+ """The original transaction is still in progress.
+ """
+
+
+class PaymentInstrumentNotCC(ResponseError):
+ """The payment method specified in the transaction is not a credit
+ card. You can only use a credit card for this transaction.
+ """
+
+
+class PaymentMethodNotDefined(ResponseError):
+ """Payment method is not defined in the transaction.
+ """
+
+
+class PrepaidFundingLimitExceeded(RetriableResponseError):
+ """An attempt has been made to fund the prepaid instrument
+ at a level greater than its recharge limit.
+ """
+
+
+class RefundAmountExceeded(ResponseError):
+ """The refund amount is more than the refundable amount.
+ """
+
+
+class SameSenderAndRecipient(ResponseError):
+ """The sender and receiver are identical, which is not allowed.
+ """
+
+
+class SameTokenIdUsedMultipleTimes(ResponseError):
+ """This token is already used in earlier transactions.
+ """
+
+
+class SenderNotOriginalRecipient(ResponseError):
+ """The sender in the refund transaction is not
+ the recipient of the original transaction.
+ """
+
+
+class SettleAmountGreaterThanDebt(ResponseError):
+ """The amount being settled or written off is
+ greater than the current debt.
+ """
+
+
+class SettleAmountGreaterThanReserveAmount(ResponseError):
+ """The amount being settled is greater than the reserved amount.
+ """
+
+
+class SignatureDoesNotMatch(ResponseError):
+ """The request signature calculated by Amazon does not match the
+ signature you provided.
+ """
+
+
+class TokenAccessDenied(ResponseError):
+ """Permission to cancel the token is denied.
+ """
+
+
+class TokenNotActive(ResponseError):
+ """The token is canceled.
+ """
+
+
+class TokenNotActive_Recipient(ResponseError):
+ """The recipient token is canceled.
+ """
+
+
+class TokenNotActive_Sender(ResponseError):
+ """The sender token is canceled.
+ """
+
+
+class TokenUsageError(ResponseError):
+ """The token usage limit is exceeded.
+ """
+
+
+class TransactionDenied(ResponseError):
+ """The transaction is not allowed.
+ """
+
+
+class TransactionFullyRefundedAlready(ResponseError):
+ """The transaction has already been completely refunded.
+ """
+
+
+class TransactionTypeNotRefundable(ResponseError):
+ """You cannot refund this transaction.
+ """
+
+
+class UnverifiedAccount_Recipient(ResponseError):
+ """The recipient's account must have a verified bank account or a
+ credit card before this transaction can be initiated.
+ """
+
+
+class UnverifiedAccount_Sender(ResponseError):
+ """The sender's account must have a verified U.S. credit card or
+ a verified U.S bank account before this transaction can be
+ initiated.
+ """
+
+
+class UnverifiedBankAccount(ResponseError):
+ """A verified bank account should be used for this transaction.
+ """
+
+
+class UnverifiedEmailAddress_Caller(ResponseError):
+ """The caller account must have a verified email address.
+ """
+
+
+class UnverifiedEmailAddress_Recipient(ResponseError):
+ """The recipient account must have a verified
+ email address for receiving payments.
+ """
+
+
+class UnverifiedEmailAddress_Sender(ResponseError):
+ """The sender account must have a verified
+ email address for this payment.
+ """
diff --git a/boto/fps/response.py b/boto/fps/response.py
new file mode 100644
index 0000000..fa77b2d
--- /dev/null
+++ b/boto/fps/response.py
@@ -0,0 +1,175 @@
+from decimal import Decimal
+
+
+def ResponseFactory(action):
+ class FPSResponse(Response):
+ _action = action
+ _Result = globals().get(action + 'Result', ResponseElement)
+
+ # due to nodes receiving their closing tags
+ def endElement(self, name, value, connection):
+ if name != action + 'Response':
+ Response.endElement(self, name, value, connection)
+ return FPSResponse
+
+
+class ResponseElement(object):
+ def __init__(self, connection=None, name=None):
+ if connection is not None:
+ self._connection = connection
+ self._name = name or self.__class__.__name__
+
+ @property
+ def connection(self):
+ return self._connection
+
+ def __repr__(self):
+ render = lambda pair: '{!s}: {!r}'.format(*pair)
+ do_show = lambda pair: not pair[0].startswith('_')
+ attrs = filter(do_show, self.__dict__.items())
+ return '{0}({1})'.format(self.__class__.__name__,
+ ', '.join(map(render, attrs)))
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ # due to nodes receiving their closing tags
+ def endElement(self, name, value, connection):
+ if name != self._name:
+ setattr(self, name, value)
+
+
+class Response(ResponseElement):
+ _action = 'Undefined'
+
+ def startElement(self, name, attrs, connection):
+ if name == 'ResponseMetadata':
+ setattr(self, name, ResponseElement(name=name))
+ elif name == self._action + 'Result':
+ setattr(self, name, self._Result(name=name))
+ else:
+ return ResponseElement.startElement(self, name, attrs, connection)
+ return getattr(self, name)
+
+
+class ComplexAmount(ResponseElement):
+ def __repr__(self):
+ return '{0} {1}'.format(self.CurrencyCode, self.Value)
+
+ def __float__(self):
+ return float(self.Value)
+
+ def __str__(self):
+ return str(self.Value)
+
+ def startElement(self, name, attrs, connection):
+ if name not in ('CurrencyCode', 'Value'):
+ message = 'Unrecognized tag {0} in ComplexAmount'.format(name)
+ raise AssertionError(message)
+ return ResponseElement.startElement(self, name, attrs, connection)
+
+ def endElement(self, name, value, connection):
+ if name == 'Value':
+ value = Decimal(value)
+ ResponseElement.endElement(self, name, value, connection)
+
+
+class AmountCollection(ResponseElement):
+ def startElement(self, name, attrs, connection):
+ setattr(self, name, ComplexAmount(name=name))
+ return getattr(self, name)
+
+
+class AccountBalance(AmountCollection):
+ def startElement(self, name, attrs, connection):
+ if name == 'AvailableBalances':
+ setattr(self, name, AmountCollection(name=name))
+ return getattr(self, name)
+ return AmountCollection.startElement(self, name, attrs, connection)
+
+
+class GetAccountBalanceResult(ResponseElement):
+ def startElement(self, name, attrs, connection):
+ if name == 'AccountBalance':
+ setattr(self, name, AccountBalance(name=name))
+ return getattr(self, name)
+ return Response.startElement(self, name, attrs, connection)
+
+
+class GetTotalPrepaidLiabilityResult(ResponseElement):
+ def startElement(self, name, attrs, connection):
+ if name == 'OutstandingPrepaidLiability':
+ setattr(self, name, AmountCollection(name=name))
+ return getattr(self, name)
+ return Response.startElement(self, name, attrs, connection)
+
+
+class GetPrepaidBalanceResult(ResponseElement):
+ def startElement(self, name, attrs, connection):
+ if name == 'PrepaidBalance':
+ setattr(self, name, AmountCollection(name=name))
+ return getattr(self, name)
+ return Response.startElement(self, name, attrs, connection)
+
+
+class GetOutstandingDebtBalanceResult(ResponseElement):
+ def startElement(self, name, attrs, connection):
+ if name == 'OutstandingDebt':
+ setattr(self, name, AmountCollection(name=name))
+ return getattr(self, name)
+ return Response.startElement(self, name, attrs, connection)
+
+
+class TransactionPart(ResponseElement):
+ def startElement(self, name, attrs, connection):
+ if name == 'FeesPaid':
+ setattr(self, name, ComplexAmount(name=name))
+ return getattr(self, name)
+ return ResponseElement.startElement(self, name, attrs, connection)
+
+
+class Transaction(ResponseElement):
+ def __init__(self, *args, **kw):
+ self.TransactionPart = []
+ ResponseElement.__init__(self, *args, **kw)
+
+ def startElement(self, name, attrs, connection):
+ if name == 'TransactionPart':
+ getattr(self, name).append(TransactionPart(name=name))
+ return getattr(self, name)[-1]
+ if name in ('TransactionAmount', 'FPSFees', 'Balance'):
+ setattr(self, name, ComplexAmount(name=name))
+ return getattr(self, name)
+ return ResponseElement.startElement(self, name, attrs, connection)
+
+
+class GetAccountActivityResult(ResponseElement):
+ def __init__(self, *args, **kw):
+ self.Transaction = []
+ ResponseElement.__init__(self, *args, **kw)
+
+ def startElement(self, name, attrs, connection):
+ if name == 'Transaction':
+ getattr(self, name).append(Transaction(name=name))
+ return getattr(self, name)[-1]
+ return ResponseElement.startElement(self, name, attrs, connection)
+
+
+class GetTransactionResult(ResponseElement):
+ def startElement(self, name, attrs, connection):
+ if name == 'Transaction':
+ setattr(self, name, Transaction(name=name))
+ return getattr(self, name)
+ return ResponseElement.startElement(self, name, attrs, connection)
+
+
+class GetTokensResult(ResponseElement):
+ def __init__(self, *args, **kw):
+ self.Token = []
+ ResponseElement.__init__(self, *args, **kw)
+
+ def startElement(self, name, attrs, connection):
+ if name == 'Token':
+ getattr(self, name).append(ResponseElement(name=name))
+ return getattr(self, name)[-1]
+ return ResponseElement.startElement(self, name, attrs, connection)
diff --git a/boto/fps/test/test_install_caller_instruction.py b/boto/fps/test/test_install_caller_instruction.py
deleted file mode 100644
index 8095914..0000000
--- a/boto/fps/test/test_install_caller_instruction.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from boto.fps.connection import FPSConnection
-conn = FPSConnection()
-conn.install_caller_instruction()
-conn.install_recipient_instruction()
diff --git a/boto/fps/test/test_verify_signature.py b/boto/fps/test/test_verify_signature.py
deleted file mode 100644
index 10c6b61..0000000
--- a/boto/fps/test/test_verify_signature.py
+++ /dev/null
@@ -1,6 +0,0 @@
-from boto.fps.connection import FPSConnection
-conn = FPSConnection()
-# example response from the docs
-params = 'expiry=08%2F2015&signature=ynDukZ9%2FG77uSJVb5YM0cadwHVwYKPMKOO3PNvgADbv6VtymgBxeOWEhED6KGHsGSvSJnMWDN%2FZl639AkRe9Ry%2F7zmn9CmiM%2FZkp1XtshERGTqi2YL10GwQpaH17MQqOX3u1cW4LlyFoLy4celUFBPq1WM2ZJnaNZRJIEY%2FvpeVnCVK8VIPdY3HMxPAkNi5zeF2BbqH%2BL2vAWef6vfHkNcJPlOuOl6jP4E%2B58F24ni%2B9ek%2FQH18O4kw%2FUJ7ZfKwjCCI13%2BcFybpofcKqddq8CuUJj5Ii7Pdw1fje7ktzHeeNhF0r9siWcYmd4JaxTP3NmLJdHFRq2T%2FgsF3vK9m3gw%3D%3D&signatureVersion=2&signatureMethod=RSA-SHA1&certificateUrl=https%3A%2F%2Ffps.sandbox.amazonaws.com%2Fcerts%2F090909%2FPKICert.pem&tokenID=A5BB3HUNAZFJ5CRXIPH72LIODZUNAUZIVP7UB74QNFQDSQ9MN4HPIKISQZWPLJXF&status=SC&callerReference=callerReferenceMultiUse1'
-endpoint = 'http://vamsik.desktop.amazon.com:8080/ipn.jsp'
-conn.verify_signature(endpoint, params)
diff --git a/boto/glacier/__init__.py b/boto/glacier/__init__.py
new file mode 100644
index 0000000..a65733b
--- /dev/null
+++ b/boto/glacier/__init__.py
@@ -0,0 +1,57 @@
+# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2011 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+from boto.ec2.regioninfo import RegionInfo
+
+
+def regions():
+ """
+ Get all available regions for the Amazon Glacier service.
+
+ :rtype: list
+ :return: A list of :class:`boto.regioninfo.RegionInfo`
+ """
+ from boto.glacier.layer2 import Layer2
+ return [RegionInfo(name='us-east-1',
+ endpoint='glacier.us-east-1.amazonaws.com',
+ connection_cls=Layer2),
+ RegionInfo(name='us-west-1',
+ endpoint='glacier.us-west-1.amazonaws.com',
+ connection_cls=Layer2),
+ RegionInfo(name='us-west-2',
+ endpoint='glacier.us-west-2.amazonaws.com',
+ connection_cls=Layer2),
+ RegionInfo(name='ap-northeast-1',
+ endpoint='glacier.ap-northeast-1.amazonaws.com',
+ connection_cls=Layer2),
+ RegionInfo(name='eu-west-1',
+ endpoint='glacier.eu-west-1.amazonaws.com',
+ connection_cls=Layer2),
+ ]
+
+
+def connect_to_region(region_name, **kw_params):
+ for region in regions():
+ if region.name == region_name:
+ return region.connect(**kw_params)
+ return None
diff --git a/boto/glacier/concurrent.py b/boto/glacier/concurrent.py
new file mode 100644
index 0000000..b993c67
--- /dev/null
+++ b/boto/glacier/concurrent.py
@@ -0,0 +1,213 @@
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+import os
+import math
+import threading
+import hashlib
+import time
+import logging
+from Queue import Queue, Empty
+
+from .writer import chunk_hashes, tree_hash, bytes_to_hex
+from .exceptions import UploadArchiveError
+
+
+DEFAULT_PART_SIZE = 4 * 1024 * 1024
+_END_SENTINEL = object()
+log = logging.getLogger('boto.glacier.concurrent')
+
+
+class ConcurrentUploader(object):
+ """Concurrently upload an archive to glacier.
+
+ This class uses a thread pool to concurrently upload an archive
+ to glacier using the multipart upload API.
+
+ The threadpool is completely managed by this class and is
+ transparent to the users of this class.
+
+ """
+ def __init__(self, api, vault_name, part_size=DEFAULT_PART_SIZE,
+ num_threads=10):
+ """
+ :type api: :class:`boto.glacier.layer1.Layer1`
+ :param api: A layer1 glacier object.
+
+ :type vault_name: str
+ :param vault_name: The name of the vault.
+
+ :type part_size: int
+ :param part_size: The size, in bytes, of the chunks to use when uploading
+ the archive parts. The part size must be a megabyte multiplied by
+ a power of two.
+
+ """
+ self._api = api
+ self._vault_name = vault_name
+ self._part_size = part_size
+ self._num_threads = num_threads
+ self._threads = []
+
+ def upload(self, filename, description=None):
+ """Concurrently create an archive.
+
+ :type file: str
+ :param file: The filename to upload
+
+ :type description: str
+ :param description: The description of the archive.
+
+ :rtype: str
+ :return: The archive id of the newly created archive.
+
+ """
+ fileobj = open(filename, 'rb')
+ total_size = os.fstat(fileobj.fileno()).st_size
+ total_parts = int(math.ceil(total_size / float(self._part_size)))
+ hash_chunks = [None] * total_parts
+ worker_queue = Queue()
+ result_queue = Queue()
+ response = self._api.initiate_multipart_upload(self._vault_name,
+ self._part_size,
+ description)
+ upload_id = response['UploadId']
+ # The basic idea is to add the chunks (the offsets not the actual
+ # contents) to a work queue, start up a thread pool, let the crank
+ # through the items in the work queue, and then place their results
+ # in a result queue which we use to complete the multipart upload.
+ self._add_work_items_to_queue(total_parts, worker_queue)
+ self._start_upload_threads(result_queue, upload_id,
+ worker_queue, filename)
+ try:
+ self._wait_for_upload_threads(hash_chunks, result_queue, total_parts)
+ except UploadArchiveError, e:
+ log.debug("An error occurred while uploading an archive, aborting "
+ "multipart upload.")
+ self._api.abort_multipart_upload(self._vault_name, upload_id)
+ raise e
+ log.debug("Completing upload.")
+ response = self._api.complete_multipart_upload(
+ self._vault_name, upload_id, bytes_to_hex(tree_hash(hash_chunks)),
+ total_size)
+ log.debug("Upload finished.")
+ return response['ArchiveId']
+
+ def _wait_for_upload_threads(self, hash_chunks, result_queue, total_parts):
+ for _ in xrange(total_parts):
+ result = result_queue.get()
+ if isinstance(result, Exception):
+ log.debug("An error was found in the result queue, terminating "
+ "threads: %s", result)
+ self._shutdown_threads()
+ raise UploadArchiveError("An error occurred while uploading "
+ "an archive: %s" % result)
+ # Each unit of work returns the tree hash for the given part
+ # number, which we use at the end to compute the tree hash of
+ # the entire archive.
+ part_number, tree_sha256 = result
+ hash_chunks[part_number] = tree_sha256
+ self._shutdown_threads()
+
+ def _shutdown_threads(self):
+ log.debug("Shutting down threads.")
+ for thread in self._threads:
+ thread.should_continue = False
+ for thread in self._threads:
+ thread.join()
+ log.debug("Threads have exited.")
+
+ def _start_upload_threads(self, result_queue, upload_id, worker_queue, filename):
+ log.debug("Starting threads.")
+ for _ in xrange(self._num_threads):
+ thread = UploadWorkerThread(self._api, self._vault_name, filename,
+ upload_id, worker_queue, result_queue)
+ time.sleep(0.2)
+ thread.start()
+ self._threads.append(thread)
+
+ def _add_work_items_to_queue(self, total_parts, worker_queue):
+ log.debug("Adding work items to queue.")
+ for i in xrange(total_parts):
+ worker_queue.put((i, self._part_size))
+ for i in xrange(self._num_threads):
+ worker_queue.put(_END_SENTINEL)
+
+
+class UploadWorkerThread(threading.Thread):
+ def __init__(self, api, vault_name, filename, upload_id,
+ worker_queue, result_queue, num_retries=5,
+ time_between_retries=5,
+ retry_exceptions=Exception):
+ threading.Thread.__init__(self)
+ self._api = api
+ self._vault_name = vault_name
+ self._filename = filename
+ self._fileobj = open(filename, 'rb')
+ self._worker_queue = worker_queue
+ self._result_queue = result_queue
+ self._upload_id = upload_id
+ self._num_retries = num_retries
+ self._time_between_retries = time_between_retries
+ self._retry_exceptions = retry_exceptions
+ self.should_continue = True
+
+ def run(self):
+ while self.should_continue:
+ try:
+ work = self._worker_queue.get(timeout=1)
+ except Empty:
+ continue
+ if work is _END_SENTINEL:
+ return
+ result = self._process_chunk(work)
+ self._result_queue.put(result)
+
+ def _process_chunk(self, work):
+ result = None
+ for _ in xrange(self._num_retries):
+ try:
+ result = self._upload_chunk(work)
+ break
+ except self._retry_exceptions, e:
+ log.error("Exception caught uploading part number %s for "
+ "vault %s, filename: %s", work[0], self._vault_name,
+ self._filename)
+ time.sleep(self._time_between_retries)
+ result = e
+ return result
+
+ def _upload_chunk(self, work):
+ part_number, part_size = work
+ start_byte = part_number * part_size
+ self._fileobj.seek(start_byte)
+ contents = self._fileobj.read(part_size)
+ linear_hash = hashlib.sha256(contents).hexdigest()
+ tree_hash_bytes = tree_hash(chunk_hashes(contents))
+ byte_range = (start_byte, start_byte + len(contents) - 1)
+ log.debug("Uploading chunk %s of size %s", part_number, part_size)
+ response = self._api.upload_part(self._vault_name, self._upload_id,
+ linear_hash,
+ bytes_to_hex(tree_hash_bytes),
+ byte_range, contents)
+ # Reading the response allows the connection to be reused.
+ response.read()
+ return (part_number, tree_hash_bytes)
diff --git a/boto/glacier/exceptions.py b/boto/glacier/exceptions.py
new file mode 100644
index 0000000..e525880
--- /dev/null
+++ b/boto/glacier/exceptions.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+import json
+
+
+class UnexpectedHTTPResponseError(Exception):
+ def __init__(self, expected_responses, response):
+ self.status = response.status
+ self.body = response.read()
+ self.code = None
+ try:
+ body = json.loads(self.body)
+ self.code = body["code"]
+ msg = 'Expected %s, got ' % expected_responses
+ msg += '(%d, code=%s, message=%s)' % (response.status,
+ self.code,
+ body["message"])
+ except Exception:
+ msg = 'Expected %s, got (%d, %s)' % (expected_responses,
+ response.status,
+ self.body)
+ super(UnexpectedHTTPResponseError, self).__init__(msg)
+
+
+class UploadArchiveError(Exception):
+ pass
+
+
+class DownloadArchiveError(Exception):
+ pass
+
+
+class TreeHashDoesNotMatchError(DownloadArchiveError):
+ pass
diff --git a/boto/glacier/job.py b/boto/glacier/job.py
new file mode 100644
index 0000000..62f0758
--- /dev/null
+++ b/boto/glacier/job.py
@@ -0,0 +1,136 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+import math
+import socket
+
+from .exceptions import TreeHashDoesNotMatchError, DownloadArchiveError
+from .writer import bytes_to_hex, chunk_hashes, tree_hash
+
+
+class Job(object):
+
+ DefaultPartSize = 4 * 1024 * 1024
+
+ ResponseDataElements = (('Action', 'action', None),
+ ('ArchiveId', 'archive_id', None),
+ ('ArchiveSizeInBytes', 'archive_size', 0),
+ ('Completed', 'completed', False),
+ ('CompletionDate', 'completion_date', None),
+ ('CreationDate', 'creation_date', None),
+ ('InventorySizeInBytes', 'inventory_size', 0),
+ ('JobDescription', 'description', None),
+ ('JobId', 'id', None),
+ ('SHA256TreeHash', 'sha256_treehash', None),
+ ('SNSTopic', 'sns_topic', None),
+ ('StatusCode', 'status_code', None),
+ ('StatusMessage', 'status_message', None),
+ ('VaultARN', 'arn', None))
+
+ def __init__(self, vault, response_data=None):
+ self.vault = vault
+ if response_data:
+ for response_name, attr_name, default in self.ResponseDataElements:
+ setattr(self, attr_name, response_data[response_name])
+ else:
+ for response_name, attr_name, default in self.ResponseDataElements:
+ setattr(self, attr_name, default)
+
+ def __repr__(self):
+ return 'Job(%s)' % self.arn
+
+ def get_output(self, byte_range=None):
+ """
+ This operation downloads the output of the job. Depending on
+ the job type you specified when you initiated the job, the
+ output will be either the content of an archive or a vault
+ inventory.
+
+ You can download all the job output or download a portion of
+ the output by specifying a byte range. In the case of an
+ archive retrieval job, depending on the byte range you
+ specify, Amazon Glacier returns the checksum for the portion
+ of the data. You can compute the checksum on the client and
+ verify that the values match to ensure the portion you
+ downloaded is the correct data.
+
+ :type byte_range: tuple
+ :param range: A tuple of integer specifying the slice (in bytes)
+ of the archive you want to receive
+ """
+ return self.vault.layer1.get_job_output(self.vault.name,
+ self.id,
+ byte_range)
+
+ def download_to_file(self, filename, chunk_size=DefaultPartSize,
+ verify_hashes=True, retry_exceptions=(socket.error,)):
+ """Download an archive to a file.
+
+ :type filename: str
+ :param filename: The name of the file where the archive
+ contents will be saved.
+
+ :type chunk_size: int
+ :param chunk_size: The chunk size to use when downloading
+ the archive.
+
+ :type verify_hashes: bool
+ :param verify_hashes: Indicates whether or not to verify
+ the tree hashes for each downloaded chunk.
+
+ """
+ num_chunks = int(math.ceil(self.archive_size / float(chunk_size)))
+ with open(filename, 'wb') as output_file:
+ self._download_to_fileob(output_file, num_chunks, chunk_size,
+ verify_hashes, retry_exceptions)
+
+ def _download_to_fileob(self, fileobj, num_chunks, chunk_size, verify_hashes,
+ retry_exceptions):
+ for i in xrange(num_chunks):
+ byte_range = ((i * chunk_size), ((i + 1) * chunk_size) - 1)
+ data, expected_tree_hash = self._download_byte_range(
+ byte_range, retry_exceptions)
+ if verify_hashes:
+ actual_tree_hash = bytes_to_hex(tree_hash(chunk_hashes(data)))
+ if expected_tree_hash != actual_tree_hash:
+ raise TreeHashDoesNotMatchError(
+ "The calculated tree hash %s does not match the "
+ "expected tree hash %s for the byte range %s" % (
+ actual_tree_hash, expected_tree_hash, byte_range))
+ fileobj.write(data)
+
+ def _download_byte_range(self, byte_range, retry_exceptions):
+ # You can occasionally get socket.errors when downloading
+ # chunks from Glacier, so each chunk can be retried up
+ # to 5 times.
+ for _ in xrange(5):
+ try:
+ response = self.get_output(byte_range)
+ data = response.read()
+ expected_tree_hash = response['TreeHash']
+ return data, expected_tree_hash
+ except retry_exceptions, e:
+ continue
+ else:
+ raise DownloadArchiveError("There was an error downloading"
+ "byte range %s: %s" % (byte_range,
+ e))
diff --git a/boto/glacier/layer1.py b/boto/glacier/layer1.py
new file mode 100644
index 0000000..1888a8e
--- /dev/null
+++ b/boto/glacier/layer1.py
@@ -0,0 +1,625 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+import os
+import json
+import urllib
+
+import boto.glacier
+from boto.connection import AWSAuthConnection
+from .exceptions import UnexpectedHTTPResponseError
+from .response import GlacierResponse
+
+
+class Layer1(AWSAuthConnection):
+
+ Version = '2012-06-01'
+ """Glacier API version."""
+
+ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
+ account_id='-', is_secure=True, port=None,
+ proxy=None, proxy_port=None,
+ proxy_user=None, proxy_pass=None, debug=0,
+ https_connection_factory=None, path='/',
+ provider='aws', security_token=None,
+ suppress_consec_slashes=True,
+ region=None, region_name='us-east-1'):
+
+ if not region:
+ for reg in boto.glacier.regions():
+ if reg.name == region_name:
+ region = reg
+ break
+
+ self.region = region
+ self.account_id = account_id
+ AWSAuthConnection.__init__(self, region.endpoint,
+ aws_access_key_id, aws_secret_access_key,
+ True, port, proxy, proxy_port,
+ proxy_user, proxy_pass, debug,
+ https_connection_factory,
+ path, provider, security_token,
+ suppress_consec_slashes)
+
+ def _required_auth_capability(self):
+ return ['hmac-v4']
+
+ def make_request(self, verb, resource, headers=None,
+ data='', ok_responses=(200,), params=None,
+ response_headers=None):
+ if headers is None:
+ headers = {}
+ headers['x-amz-glacier-version'] = self.Version
+ uri = '/%s/%s' % (self.account_id, resource)
+ response = AWSAuthConnection.make_request(self, verb, uri,
+ params=params,
+ headers=headers,
+ data=data)
+ if response.status in ok_responses:
+ return GlacierResponse(response, response_headers)
+ else:
+ # create glacier-specific exceptions
+ raise UnexpectedHTTPResponseError(ok_responses, response)
+
+ # Vaults
+
+ def list_vaults(self, limit=None, marker=None):
+ """
+ This operation lists all vaults owned by the calling user’s
+ account. The list returned in the response is ASCII-sorted by
+ vault name.
+
+ By default, this operation returns up to 1,000 items. If there
+ are more vaults to list, the marker field in the response body
+ contains the vault Amazon Resource Name (ARN) at which to
+ continue the list with a new List Vaults request; otherwise,
+ the marker field is null. In your next List Vaults request you
+ set the marker parameter to the value Amazon Glacier returned
+ in the responses to your previous List Vaults request. You can
+ also limit the number of vaults returned in the response by
+ specifying the limit parameter in the request.
+
+ :type limit: int
+ :param limit: The maximum number of items returned in the
+ response. If you don't specify a value, the List Vaults
+ operation returns up to 1,000 items.
+
+ :type marker: str
+ :param marker: A string used for pagination. marker specifies
+ the vault ARN after which the listing of vaults should
+ begin. (The vault specified by marker is not included in
+ the returned list.) Get the marker value from a previous
+ List Vaults response. You need to include the marker only
+ if you are continuing the pagination of results started in
+ a previous List Vaults request. Specifying an empty value
+ ("") for the marker returns a list of vaults starting
+ from the first vault.
+ """
+ params = {}
+ if limit:
+ params['limit'] = limit
+ if marker:
+ params['marker'] = marker
+ return self.make_request('GET', 'vaults', params=params)
+
+ def describe_vault(self, vault_name):
+ """
+ This operation returns information about a vault, including
+ the vault Amazon Resource Name (ARN), the date the vault was
+ created, the number of archives contained within the vault,
+ and the total size of all the archives in the vault. The
+ number of archives and their total size are as of the last
+ vault inventory Amazon Glacier generated. Amazon Glacier
+ generates vault inventories approximately daily. This means
+ that if you add or remove an archive from a vault, and then
+ immediately send a Describe Vault request, the response might
+ not reflect the changes.
+
+ :type vault_name: str
+ :param vault_name: The name of the new vault
+ """
+ uri = 'vaults/%s' % vault_name
+ return self.make_request('GET', uri)
+
+ def create_vault(self, vault_name):
+ """
+ This operation creates a new vault with the specified name.
+ The name of the vault must be unique within a region for an
+ AWS account. You can create up to 1,000 vaults per
+ account. For information on creating more vaults, go to the
+ Amazon Glacier product detail page.
+
+ You must use the following guidelines when naming a vault.
+
+ Names can be between 1 and 255 characters long.
+
+ Allowed characters are a–z, A–Z, 0–9, '_' (underscore),
+ '-' (hyphen), and '.' (period).
+
+ This operation is idempotent, you can send the same request
+ multiple times and it has no further effect after the first
+ time Amazon Glacier creates the specified vault.
+
+ :type vault_name: str
+ :param vault_name: The name of the new vault
+ """
+ uri = 'vaults/%s' % vault_name
+ return self.make_request('PUT', uri, ok_responses=(201,),
+ response_headers=[('Location', 'Location')])
+
+ def delete_vault(self, vault_name):
+ """
+ This operation deletes a vault. Amazon Glacier will delete a
+ vault only if there are no archives in the vault as per the
+ last inventory and there have been no writes to the vault
+ since the last inventory. If either of these conditions is not
+ satisfied, the vault deletion fails (that is, the vault is not
+ removed) and Amazon Glacier returns an error.
+
+ This operation is idempotent, you can send the same request
+ multiple times and it has no further effect after the first
+ time Amazon Glacier delete the specified vault.
+
+ :type vault_name: str
+ :param vault_name: The name of the new vault
+ """
+ uri = 'vaults/%s' % vault_name
+ return self.make_request('DELETE', uri, ok_responses=(204,))
+
+ def get_vault_notifications(self, vault_name):
+ """
+ This operation retrieves the notification-configuration
+ subresource set on the vault.
+
+ :type vault_name: str
+ :param vault_name: The name of the new vault
+ """
+ uri = 'vaults/%s/notification-configuration' % vault_name
+ return self.make_request('GET', uri)
+
+ def set_vault_notifications(self, vault_name, notification_config):
+ """
+ This operation retrieves the notification-configuration
+ subresource set on the vault.
+
+ :type vault_name: str
+ :param vault_name: The name of the new vault
+
+ :type notification_config: dict
+ :param notification_config: A Python dictionary containing
+ an SNS Topic and events for which you want Amazon Glacier
+ to send notifications to the topic. Possible events are:
+
+ * ArchiveRetrievalCompleted - occurs when a job that was
+ initiated for an archive retrieval is completed.
+ * InventoryRetrievalCompleted - occurs when a job that was
+ initiated for an inventory retrieval is completed.
+
+ The format of the dictionary is:
+
+ {'SNSTopic': 'mytopic',
+ 'Events': [event1,...]}
+ """
+ uri = 'vaults/%s/notification-configuration' % vault_name
+ json_config = json.dumps(notification_config)
+ return self.make_request('PUT', uri, data=json_config,
+ ok_responses=(204,))
+
+ def delete_vault_notifications(self, vault_name):
+ """
+ This operation deletes the notification-configuration
+ subresource set on the vault.
+
+ :type vault_name: str
+ :param vault_name: The name of the new vault
+ """
+ uri = 'vaults/%s/notification-configuration' % vault_name
+ return self.make_request('DELETE', uri, ok_responses=(204,))
+
+ # Jobs
+
+ def list_jobs(self, vault_name, completed=None, status_code=None,
+ limit=None, marker=None):
+ """
+ This operation lists jobs for a vault including jobs that are
+ in-progress and jobs that have recently finished.
+
+ :type vault_name: str
+ :param vault_name: The name of the vault.
+
+ :type completed: boolean
+ :param completed: Specifies the state of the jobs to return.
+ If a value of True is passed, only completed jobs will
+ be returned. If a value of False is passed, only
+ uncompleted jobs will be returned. If no value is
+ passed, all jobs will be returned.
+
+ :type status_code: string
+ :param status_code: Specifies the type of job status to return.
+ Valid values are: InProgress|Succeeded|Failed. If not
+ specified, jobs with all status codes are returned.
+
+ :type limit: int
+ :param limit: The maximum number of items returned in the
+ response. If you don't specify a value, the List Jobs
+ operation returns up to 1,000 items.
+
+ :type marker: str
+ :param marker: An opaque string used for pagination. marker
+ specifies the job at which the listing of jobs should
+ begin. Get the marker value from a previous List Jobs
+ response. You need only include the marker if you are
+ continuing the pagination of results started in a previous
+ List Jobs request.
+
+ """
+ params = {}
+ if limit:
+ params['limit'] = limit
+ if marker:
+ params['marker'] = marker
+ if status_code:
+ params['statuscode'] = status_code
+ if completed is not None:
+ params['completed'] = 'true' if completed else 'false'
+ uri = 'vaults/%s/jobs' % vault_name
+ return self.make_request('GET', uri, params=params)
+
+ def describe_job(self, vault_name, job_id):
+ """
+ This operation returns information about a job you previously
+ initiated, including the job initiation date, the user who
+ initiated the job, the job status code/message and the Amazon
+ Simple Notification Service (Amazon SNS) topic to notify after
+ Amazon Glacier completes the job.
+
+ :type vault_name: str
+ :param vault_name: The name of the new vault
+
+ :type job_id: str
+ :param job_id: The ID of the job.
+ """
+ uri = 'vaults/%s/jobs/%s' % (vault_name, job_id)
+ return self.make_request('GET', uri, ok_responses=(200,))
+
+ def initiate_job(self, vault_name, job_data):
+ """
+ This operation initiates a job of the specified
+ type. Retrieving an archive or a vault inventory are
+ asynchronous operations that require you to initiate a job. It
+ is a two-step process:
+
+ * Initiate a retrieval job.
+ * After the job completes, download the bytes.
+
+ The retrieval is executed asynchronously. When you initiate
+ a retrieval job, Amazon Glacier creates a job and returns a
+ job ID in the response.
+
+ :type vault_name: str
+ :param vault_name: The name of the new vault
+
+ :type job_data: dict
+ :param job_data: A Python dictionary containing the
+ information about the requested job. The dictionary
+ can contain the following attributes:
+
+ * ArchiveId - The ID of the archive you want to retrieve.
+ This field is required only if the Type is set to
+ archive-retrieval.
+ * Description - The optional description for the job.
+ * Format - When initiating a job to retrieve a vault
+ inventory, you can optionally add this parameter to
+ specify the output format. Valid values are: CSV|JSON.
+ * SNSTopic - The Amazon SNS topic ARN where Amazon Glacier
+ sends a notification when the job is completed and the
+ output is ready for you to download.
+ * Type - The job type. Valid values are:
+ archive-retrieval|inventory-retrieval
+ """
+ uri = 'vaults/%s/jobs' % vault_name
+ response_headers = [('x-amz-job-id', u'JobId'),
+ ('Location', u'Location')]
+ json_job_data = json.dumps(job_data)
+ return self.make_request('POST', uri, data=json_job_data,
+ ok_responses=(202,),
+ response_headers=response_headers)
+
+ def get_job_output(self, vault_name, job_id, byte_range=None):
+ """
+ This operation downloads the output of the job you initiated
+ using Initiate a Job. Depending on the job type
+ you specified when you initiated the job, the output will be
+ either the content of an archive or a vault inventory.
+
+ You can download all the job output or download a portion of
+ the output by specifying a byte range. In the case of an
+ archive retrieval job, depending on the byte range you
+ specify, Amazon Glacier returns the checksum for the portion
+ of the data. You can compute the checksum on the client and
+ verify that the values match to ensure the portion you
+ downloaded is the correct data.
+
+ :type vault_name: str :param
+ :param vault_name: The name of the new vault
+
+ :type job_id: str
+ :param job_id: The ID of the job.
+
+ :type byte_range: tuple
+ :param range: A tuple of integers specifying the slice (in bytes)
+ of the archive you want to receive
+ """
+ response_headers = [('x-amz-sha256-tree-hash', u'TreeHash'),
+ ('Content-Range', u'ContentRange'),
+ ('Content-Type', u'ContentType')]
+ headers = None
+ if byte_range:
+ headers = {'Range': 'bytes=%d-%d' % byte_range}
+ uri = 'vaults/%s/jobs/%s/output' % (vault_name, job_id)
+ response = self.make_request('GET', uri, headers=headers,
+ ok_responses=(200, 206),
+ response_headers=response_headers)
+ return response
+
+ # Archives
+
+ def upload_archive(self, vault_name, archive,
+ linear_hash, tree_hash, description=None):
+ """
+ This operation adds an archive to a vault. For a successful
+ upload, your data is durably persisted. In response, Amazon
+ Glacier returns the archive ID in the x-amz-archive-id header
+ of the response. You should save the archive ID returned so
+ that you can access the archive later.
+
+ :type vault_name: str :param
+ :param vault_name: The name of the vault
+
+ :type archive: bytes
+ :param archive: The data to upload.
+
+ :type linear_hash: str
+ :param linear_hash: The SHA256 checksum (a linear hash) of the
+ payload.
+
+ :type tree_hash: str
+ :param tree_hash: The user-computed SHA256 tree hash of the
+ payload. For more information on computing the
+ tree hash, see http://goo.gl/u7chF.
+
+ :type description: str
+ :param description: An optional description of the archive.
+ """
+ response_headers = [('x-amz-archive-id', u'ArchiveId'),
+ ('Location', u'Location'),
+ ('x-amz-sha256-tree-hash', u'TreeHash')]
+ uri = 'vaults/%s/archives' % vault_name
+ try:
+ content_length = str(len(archive))
+ except TypeError:
+ # If a file like object is provided, try to retrieve
+ # the file size via fstat.
+ content_length = str(os.fstat(archive.fileno()).st_size)
+ headers = {'x-amz-content-sha256': linear_hash,
+ 'x-amz-sha256-tree-hash': tree_hash,
+ 'Content-Length': content_length}
+ if description:
+ headers['x-amz-archive-description'] = description
+ return self.make_request('POST', uri, headers=headers,
+ data=archive, ok_responses=(201,),
+ response_headers=response_headers)
+
+ def delete_archive(self, vault_name, archive_id):
+ """
+ This operation deletes an archive from a vault.
+
+ :type vault_name: str
+ :param vault_name: The name of the new vault
+
+ :type archive_id: str
+ :param archive_id: The ID for the archive to be deleted.
+ """
+ uri = 'vaults/%s/archives/%s' % (vault_name, archive_id)
+ return self.make_request('DELETE', uri, ok_responses=(204,))
+
+ # Multipart
+
+ def initiate_multipart_upload(self, vault_name, part_size,
+ description=None):
+ """
+ Initiate a multipart upload. Amazon Glacier creates a
+ multipart upload resource and returns it's ID. You use this
+ ID in subsequent multipart upload operations.
+
+ :type vault_name: str
+ :param vault_name: The name of the vault.
+
+ :type description: str
+ :param description: An optional description of the archive.
+
+ :type part_size: int
+ :param part_size: The size of each part except the last, in bytes.
+ The part size must be a multiple of 1024 KB multiplied by
+ a power of 2. The minimum allowable part size is 1MB and the
+ maximum is 4GB.
+ """
+ response_headers = [('x-amz-multipart-upload-id', u'UploadId'),
+ ('Location', u'Location')]
+ headers = {'x-amz-part-size': str(part_size)}
+ if description:
+ headers['x-amz-archive-description'] = description
+ uri = 'vaults/%s/multipart-uploads' % vault_name
+ response = self.make_request('POST', uri, headers=headers,
+ ok_responses=(201,),
+ response_headers=response_headers)
+ return response
+
+ def complete_multipart_upload(self, vault_name, upload_id,
+ sha256_treehash, archive_size):
+ """
+ Call this to inform Amazon Glacier that all of the archive parts
+ have been uploaded and Amazon Glacier can now assemble the archive
+ from the uploaded parts.
+
+ :type vault_name: str
+ :param vault_name: The name of the vault.
+
+ :type upload_id: str
+ :param upload_id: The unique ID associated with this upload
+ operation.
+
+ :type sha256_treehash: str
+ :param sha256_treehash: The SHA256 tree hash of the entire
+ archive. It is the tree hash of SHA256 tree hash of the
+ individual parts. If the value you specify in the request
+ does not match the SHA256 tree hash of the final assembled
+ archive as computed by Amazon Glacier, Amazon Glacier
+ returns an error and the request fails.
+
+ :type archive_size: int
+ :param archive_size: The total size, in bytes, of the entire
+ archive. This value should be the sum of all the sizes of
+ the individual parts that you uploaded.
+ """
+ response_headers = [('x-amz-archive-id', u'ArchiveId'),
+ ('Location', u'Location')]
+ headers = {'x-amz-sha256-tree-hash': sha256_treehash,
+ 'x-amz-archive-size': str(archive_size)}
+ uri = 'vaults/%s/multipart-uploads/%s' % (vault_name, upload_id)
+ response = self.make_request('POST', uri, headers=headers,
+ ok_responses=(201,),
+ response_headers=response_headers)
+ return response
+
+ def abort_multipart_upload(self, vault_name, upload_id):
+ """
+ Call this to abort a multipart upload identified by the upload ID.
+
+ :type vault_name: str
+ :param vault_name: The name of the vault.
+
+ :type upload_id: str
+ :param upload_id: The unique ID associated with this upload
+ operation.
+ """
+ uri = 'vaults/%s/multipart-uploads/%s' % (vault_name, upload_id)
+ return self.make_request('DELETE', uri, ok_responses=(204,))
+
+ def list_multipart_uploads(self, vault_name, limit=None, marker=None):
+ """
+ Lists in-progress multipart uploads for the specified vault.
+
+ :type vault_name: str
+ :param vault_name: The name of the vault.
+
+ :type limit: int
+ :param limit: The maximum number of items returned in the
+ response. If you don't specify a value, the operation
+ returns up to 1,000 items.
+
+ :type marker: str
+ :param marker: An opaque string used for pagination. marker
+ specifies the item at which the listing should
+ begin. Get the marker value from a previous
+ response. You need only include the marker if you are
+ continuing the pagination of results started in a previous
+ request.
+ """
+ params = {}
+ if limit:
+ params['limit'] = limit
+ if marker:
+ params['marker'] = marker
+ uri = 'vaults/%s/multipart-uploads' % vault_name
+ return self.make_request('GET', uri, params=params)
+
+ def list_parts(self, vault_name, upload_id, limit=None, marker=None):
+ """
+ Lists in-progress multipart uploads for the specified vault.
+
+ :type vault_name: str
+ :param vault_name: The name of the vault.
+
+ :type upload_id: str
+ :param upload_id: The unique ID associated with this upload
+ operation.
+
+ :type limit: int
+ :param limit: The maximum number of items returned in the
+ response. If you don't specify a value, the operation
+ returns up to 1,000 items.
+
+ :type marker: str
+ :param marker: An opaque string used for pagination. marker
+ specifies the item at which the listing should
+ begin. Get the marker value from a previous
+ response. You need only include the marker if you are
+ continuing the pagination of results started in a previous
+ request.
+ """
+ params = {}
+ if limit:
+ params['limit'] = limit
+ if marker:
+ params['marker'] = marker
+ uri = 'vaults/%s/multipart-uploads/%s' % (vault_name, upload_id)
+ return self.make_request('GET', uri, params=params)
+
+ def upload_part(self, vault_name, upload_id, linear_hash,
+ tree_hash, byte_range, part_data):
+ """
+ Lists in-progress multipart uploads for the specified vault.
+
+ :type vault_name: str
+ :param vault_name: The name of the vault.
+
+ :type linear_hash: str
+ :param linear_hash: The SHA256 checksum (a linear hash) of the
+ payload.
+
+ :type tree_hash: str
+ :param tree_hash: The user-computed SHA256 tree hash of the
+ payload. For more information on computing the
+ tree hash, see http://goo.gl/u7chF.
+
+ :type upload_id: str
+ :param upload_id: The unique ID associated with this upload
+ operation.
+
+ :type byte_range: tuple of ints
+ :param byte_range: Identfies the range of bytes in the assembled
+ archive that will be uploaded in this part.
+
+ :type part_data: bytes
+ :param part_data: The data to be uploaded for the part
+ """
+ headers = {'x-amz-content-sha256': linear_hash,
+ 'x-amz-sha256-tree-hash': tree_hash,
+ 'Content-Range': 'bytes %d-%d/*' % byte_range}
+ response_headers = [('x-amz-sha256-tree-hash', u'TreeHash')]
+ uri = 'vaults/%s/multipart-uploads/%s' % (vault_name, upload_id)
+ return self.make_request('PUT', uri, headers=headers,
+ data=part_data, ok_responses=(204,),
+ response_headers=response_headers)
diff --git a/boto/glacier/layer2.py b/boto/glacier/layer2.py
new file mode 100644
index 0000000..e519ca8
--- /dev/null
+++ b/boto/glacier/layer2.py
@@ -0,0 +1,93 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+from .layer1 import Layer1
+from .vault import Vault
+
+
+class Layer2(object):
+ """
+ Provides a more pythonic and friendly interface to Glacier based on Layer1
+ """
+
+ def __init__(self, *args, **kwargs):
+ # Accept a passed in layer1, mainly to allow easier testing
+ if "layer1" in kwargs:
+ self.layer1 = kwargs["layer1"]
+ else:
+ self.layer1 = Layer1(*args, **kwargs)
+
+ def create_vault(self, name):
+ """Creates a vault.
+
+ :type name: str
+ :param name: The name of the vault
+
+ :rtype: :class:`boto.glacier.vault.Vault`
+ :return: A Vault object representing the vault.
+ """
+ self.layer1.create_vault(name)
+ return self.get_vault(name)
+
+ def delete_vault(self, name):
+ """Delete a vault.
+
+ This operation deletes a vault. Amazon Glacier will delete a
+ vault only if there are no archives in the vault as per the
+ last inventory and there have been no writes to the vault
+ since the last inventory. If either of these conditions is not
+ satisfied, the vault deletion fails (that is, the vault is not
+ removed) and Amazon Glacier returns an error.
+
+ This operation is idempotent, you can send the same request
+ multiple times and it has no further effect after the first
+ time Amazon Glacier delete the specified vault.
+
+ :type vault_name: str
+ :param vault_name: The name of the vault to delete.
+ """
+ return self.layer1.delete_vault(name)
+
+ def get_vault(self, name):
+ """
+ Get an object representing a named vault from Glacier. This
+ operation does not check if the vault actually exists.
+
+ :type name: str
+ :param name: The name of the vault
+
+ :rtype: :class:`boto.glacier.vault.Vault`
+ :return: A Vault object representing the vault.
+ """
+ response_data = self.layer1.describe_vault(name)
+ return Vault(self.layer1, response_data)
+
+ def list_vaults(self):
+ """
+ Return a list of all vaults associated with the account ID.
+
+ :rtype: List of :class:`boto.glacier.vault.Vault`
+ :return: A list of Vault objects.
+ """
+ response_data = self.layer1.list_vaults()
+ return [Vault(self.layer1, rd) for rd in response_data['VaultList']]
diff --git a/boto/glacier/response.py b/boto/glacier/response.py
new file mode 100644
index 0000000..57bd4e4
--- /dev/null
+++ b/boto/glacier/response.py
@@ -0,0 +1,47 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+import json
+
+class GlacierResponse(dict):
+ """
+ Represents a response from Glacier layer1. It acts as a dictionary
+ containing the combined keys received via JSON in the body (if
+ supplied) and headers.
+ """
+ def __init__(self, http_response, response_headers):
+ self.http_response = http_response
+ self.status = http_response.status
+ self[u'RequestId'] = http_response.getheader('x-amzn-requestid')
+ if response_headers:
+ for header_name, item_name in response_headers:
+ self[item_name] = http_response.getheader(header_name)
+ if http_response.getheader('Content-Type') == 'application/json':
+ body = json.loads(http_response.read())
+ self.update(body)
+ size = http_response.getheader('Content-Length', None)
+ if size is not None:
+ self.size = size
+
+ def read(self, amt=None):
+ "Reads and returns the response body, or up to the next amt bytes."
+ return self.http_response.read(amt)
diff --git a/boto/glacier/vault.py b/boto/glacier/vault.py
new file mode 100644
index 0000000..4d0e072
--- /dev/null
+++ b/boto/glacier/vault.py
@@ -0,0 +1,271 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+from .job import Job
+from .writer import Writer, compute_hashes_from_fileobj
+from .concurrent import ConcurrentUploader
+import os.path
+
+_MEGABYTE = 1024 * 1024
+
+
+class Vault(object):
+
+ DefaultPartSize = 4 * _MEGABYTE
+ SingleOperationThreshold = 100 * _MEGABYTE
+
+ ResponseDataElements = (('VaultName', 'name', None),
+ ('VaultARN', 'arn', None),
+ ('CreationDate', 'creation_date', None),
+ ('LastInventoryDate', 'last_inventory_date', None),
+ ('SizeInBytes', 'size', 0),
+ ('NumberOfArchives', 'number_of_archives', 0))
+
+ def __init__(self, layer1, response_data=None):
+ self.layer1 = layer1
+ if response_data:
+ for response_name, attr_name, default in self.ResponseDataElements:
+ value = response_data[response_name]
+ if isinstance(value, unicode):
+ value = value.encode('utf8')
+ setattr(self, attr_name, value)
+ else:
+ for response_name, attr_name, default in self.ResponseDataElements:
+ setattr(self, attr_name, default)
+
+ def __repr__(self):
+ return 'Vault("%s")' % self.arn
+
+ def delete(self):
+ """
+ Delete's this vault. WARNING!
+ """
+ self.layer1.delete_vault(self.name)
+
+ def upload_archive(self, filename):
+ """
+ Adds an archive to a vault. For archives greater than 100MB the
+ multipart upload will be used.
+
+ :type file: str
+ :param file: A filename to upload
+
+ :rtype: str
+ :return: The archive id of the newly created archive
+ """
+ if os.path.getsize(filename) > self.SingleOperationThreshold:
+ return self.create_archive_from_file(filename)
+ return self._upload_archive_single_operation(filename)
+
+ def _upload_archive_single_operation(self, filename):
+ """
+ Adds an archive to a vault in a single operation. It's recommended for
+ archives less than 100MB
+ :type file: str
+ :param file: A filename to upload
+
+ :rtype: str
+ :return: The archive id of the newly created archive
+ """
+ with open(filename, 'rb') as fileobj:
+ linear_hash, tree_hash = compute_hashes_from_fileobj(fileobj)
+ fileobj.seek(0)
+ response = self.layer1.upload_archive(self.name, fileobj,
+ linear_hash, tree_hash)
+ return response['ArchiveId']
+
+ def create_archive_writer(self, part_size=DefaultPartSize,
+ description=None):
+ """
+ Create a new archive and begin a multi-part upload to it.
+ Returns a file-like object to which the data for the archive
+ can be written. Once all the data is written the file-like
+ object should be closed, you can then call the get_archive_id
+ method on it to get the ID of the created archive.
+
+ :type part_size: int
+ :param part_size: The part size for the multipart upload.
+
+ :rtype: :class:`boto.glaicer.writer.Writer`
+ :return: A Writer object that to which the archive data
+ should be written.
+ """
+ response = self.layer1.initiate_multipart_upload(self.name,
+ part_size,
+ description)
+ return Writer(self, response['UploadId'], part_size=part_size)
+
+ def create_archive_from_file(self, filename=None, file_obj=None):
+ """
+ Create a new archive and upload the data from the given file
+ or file-like object.
+
+ :type filename: str
+ :param filename: A filename to upload
+
+ :type file_obj: file
+ :param file_obj: A file-like object to upload
+
+ :rtype: str
+ :return: The archive id of the newly created archive
+ """
+ if not file_obj:
+ file_obj = open(filename, "rb")
+
+ writer = self.create_archive_writer()
+ while True:
+ data = file_obj.read(self.DefaultPartSize)
+ if not data:
+ break
+ writer.write(data)
+ writer.close()
+ return writer.get_archive_id()
+
+ def concurrent_create_archive_from_file(self, filename):
+ """
+ Create a new archive from a file and upload the given
+ file.
+
+ This is a convenience method around the
+ :class:`boto.glacier.concurrent.ConcurrentUploader`
+ class. This method will perform a multipart upload
+ and upload the parts of the file concurrently.
+
+ :type filename: str
+ :param filename: A filename to upload
+
+ :raises: `boto.glacier.exception.UploadArchiveError` is an error
+ occurs during the upload process.
+
+ :rtype: str
+ :return: The archive id of the newly created archive
+
+ """
+ uploader = ConcurrentUploader(self.layer1, self.name)
+ archive_id = uploader.upload(filename)
+ return archive_id
+
+ def retrieve_archive(self, archive_id, sns_topic=None,
+ description=None):
+ """
+ Initiate a archive retrieval job to download the data from an
+ archive. You will need to wait for the notification from
+ Amazon (via SNS) before you can actually download the data,
+ this takes around 4 hours.
+
+ :type archive_id: str
+ :param archive_id: The id of the archive
+
+ :type description: str
+ :param description: An optional description for the job.
+
+ :type sns_topic: str
+ :param sns_topic: The Amazon SNS topic ARN where Amazon Glacier
+ sends notification when the job is completed and the output
+ is ready for you to download.
+
+ :rtype: :class:`boto.glacier.job.Job`
+ :return: A Job object representing the retrieval job.
+ """
+ job_data = {'Type': 'archive-retrieval',
+ 'ArchiveId': archive_id}
+ if sns_topic is not None:
+ job_data['SNSTopic'] = sns_topic
+ if description is not None:
+ job_data['Description'] = description
+
+ response = self.layer1.initiate_job(self.name, job_data)
+ return self.get_job(response['JobId'])
+
+ def retrieve_inventory(self, sns_topic=None,
+ description=None):
+ """
+ Initiate a inventory retrieval job to list the items in the
+ vault. You will need to wait for the notification from
+ Amazon (via SNS) before you can actually download the data,
+ this takes around 4 hours.
+
+ :type description: str
+ :param description: An optional description for the job.
+
+ :type sns_topic: str
+ :param sns_topic: The Amazon SNS topic ARN where Amazon Glacier
+ sends notification when the job is completed and the output
+ is ready for you to download.
+
+ :rtype: :class:`boto.glacier.job.Job`
+ :return: A Job object representing the retrieval job.
+ """
+ job_data = {'Type': 'inventory-retrieval'}
+ if sns_topic is not None:
+ job_data['SNSTopic'] = sns_topic
+ if description is not None:
+ job_data['Description'] = description
+
+ response = self.layer1.initiate_job(self.name, job_data)
+ return response['JobId']
+
+ def delete_archive(self, archive_id):
+ """
+ This operation deletes an archive from the vault.
+
+ :type archive_id: str
+ :param archive_id: The ID for the archive to be deleted.
+ """
+ return self.layer1.delete_archive(self.name, archive_id)
+
+ def get_job(self, job_id):
+ """
+ Get an object representing a job in progress.
+
+ :type job_id: str
+ :param job_id: The ID of the job
+
+ :rtype: :class:`boto.glaicer.job.Job`
+ :return: A Job object representing the job.
+ """
+ response_data = self.layer1.describe_job(self.name, job_id)
+ return Job(self, response_data)
+
+ def list_jobs(self, completed=None, status_code=None):
+ """
+ Return a list of Job objects related to this vault.
+
+ :type completed: boolean
+ :param completed: Specifies the state of the jobs to return.
+ If a value of True is passed, only completed jobs will
+ be returned. If a value of False is passed, only
+ uncompleted jobs will be returned. If no value is
+ passed, all jobs will be returned.
+
+ :type status_code: string
+ :param status_code: Specifies the type of job status to return.
+ Valid values are: InProgress|Succeeded|Failed. If not
+ specified, jobs with all status codes are returned.
+
+ :rtype: list of :class:`boto.glaicer.job.Job`
+ :return: A list of Job objects related to this vault.
+ """
+ response_data = self.layer1.list_jobs(self.name, completed,
+ status_code)
+ return [Job(self, jd) for jd in response_data['JobList']]
diff --git a/boto/glacier/writer.py b/boto/glacier/writer.py
new file mode 100644
index 0000000..42db994
--- /dev/null
+++ b/boto/glacier/writer.py
@@ -0,0 +1,170 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/
+# Tree hash implementation from Aaron Brady bradya@gmail.com
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+import urllib
+import hashlib
+import math
+import json
+
+
+_ONE_MEGABYTE = 1024 * 1024
+
+
+def chunk_hashes(bytestring, chunk_size=_ONE_MEGABYTE):
+ chunk_count = int(math.ceil(len(bytestring) / float(chunk_size)))
+ hashes = []
+ for i in xrange(chunk_count):
+ start = i * chunk_size
+ end = (i + 1) * chunk_size
+ hashes.append(hashlib.sha256(bytestring[start:end]).digest())
+ return hashes
+
+
+def tree_hash(fo):
+ """
+ Given a hash of each 1MB chunk (from chunk_hashes) this will hash
+ together adjacent hashes until it ends up with one big one. So a
+ tree of hashes.
+ """
+ hashes = []
+ hashes.extend(fo)
+ while len(hashes) > 1:
+ new_hashes = []
+ while True:
+ if len(hashes) > 1:
+ first = hashes.pop(0)
+ second = hashes.pop(0)
+ new_hashes.append(hashlib.sha256(first + second).digest())
+ elif len(hashes) == 1:
+ only = hashes.pop(0)
+ new_hashes.append(only)
+ else:
+ break
+ hashes.extend(new_hashes)
+ return hashes[0]
+
+
+def compute_hashes_from_fileobj(fileobj, chunk_size=1024 * 1024):
+ """Compute the linear and tree hash from a fileobj.
+
+ This function will compute the linear/tree hash of a fileobj
+ in a single pass through the fileobj.
+
+ :param fileobj: A file like object.
+
+ :param chunk_size: The size of the chunks to use for the tree
+ hash. This is also the buffer size used to read from
+ `fileobj`.
+
+ :rtype: tuple
+ :return: A tuple of (linear_hash, tree_hash). Both hashes
+ are returned in hex.
+
+ """
+ linear_hash = hashlib.sha256()
+ chunks = []
+ chunk = fileobj.read(chunk_size)
+ while chunk:
+ linear_hash.update(chunk)
+ chunks.append(hashlib.sha256(chunk).digest())
+ chunk = fileobj.read(chunk_size)
+ return linear_hash.hexdigest(), bytes_to_hex(tree_hash(chunks))
+
+
+def bytes_to_hex(str):
+ return ''.join(["%02x" % ord(x) for x in str]).strip()
+
+
+class Writer(object):
+ """
+ Presents a file-like object for writing to a Amazon Glacier
+ Archive. The data is written using the multi-part upload API.
+ """
+ def __init__(self, vault, upload_id, part_size):
+ self.vault = vault
+ self.upload_id = upload_id
+ self.part_size = part_size
+
+ self._buffer_size = 0
+ self._uploaded_size = 0
+ self._buffer = []
+ self._tree_hashes = []
+
+ self.archive_location = None
+ self.closed = False
+
+ def send_part(self):
+ buf = "".join(self._buffer)
+ # Put back any data remaining over the part size into the
+ # buffer
+ if len(buf) > self.part_size:
+ self._buffer = [buf[self.part_size:]]
+ self._buffer_size = len(self._buffer[0])
+ else:
+ self._buffer = []
+ self._buffer_size = 0
+ # The part we will send
+ part = buf[:self.part_size]
+ # Create a request and sign it
+ part_tree_hash = tree_hash(chunk_hashes(part))
+ self._tree_hashes.append(part_tree_hash)
+
+ hex_tree_hash = bytes_to_hex(part_tree_hash)
+ linear_hash = hashlib.sha256(part).hexdigest()
+ content_range = (self._uploaded_size,
+ (self._uploaded_size + len(part)) - 1)
+ response = self.vault.layer1.upload_part(self.vault.name,
+ self.upload_id,
+ linear_hash,
+ hex_tree_hash,
+ content_range, part)
+ self._uploaded_size += len(part)
+
+ def write(self, str):
+ if self.closed:
+ raise ValueError("I/O operation on closed file")
+ if str == "":
+ return
+ self._buffer.append(str)
+ self._buffer_size += len(str)
+ while self._buffer_size > self.part_size:
+ self.send_part()
+
+ def close(self):
+ if self.closed:
+ return
+ if self._buffer_size > 0:
+ self.send_part()
+ # Complete the multiplart glacier upload
+ hex_tree_hash = bytes_to_hex(tree_hash(self._tree_hashes))
+ response = self.vault.layer1.complete_multipart_upload(self.vault.name,
+ self.upload_id,
+ hex_tree_hash,
+ self._uploaded_size)
+ self.archive_id = response['ArchiveId']
+ self.closed = True
+
+ def get_archive_id(self):
+ self.close()
+ return self.archive_id
diff --git a/boto/gs/acl.py b/boto/gs/acl.py
index a7517c6..7df3e58 100755
--- a/boto/gs/acl.py
+++ b/boto/gs/acl.py
@@ -25,6 +25,7 @@
ACCESS_CONTROL_LIST = 'AccessControlList'
ALL_AUTHENTICATED_USERS = 'AllAuthenticatedUsers'
ALL_USERS = 'AllUsers'
+DISPLAY_NAME = 'DisplayName'
DOMAIN = 'Domain'
EMAIL_ADDRESS = 'EmailAddress'
ENTRY = 'Entry'
@@ -93,19 +94,19 @@
self.entries.entry_list.append(entry)
def startElement(self, name, attrs, connection):
- if name == OWNER:
+ if name.lower() == OWNER.lower():
self.owner = User(self)
return self.owner
- elif name == ENTRIES:
+ elif name.lower() == ENTRIES.lower():
self.entries = Entries(self)
return self.entries
else:
return None
def endElement(self, name, value, connection):
- if name == OWNER:
+ if name.lower() == OWNER.lower():
pass
- elif name == ENTRIES:
+ elif name.lower() == ENTRIES.lower():
pass
else:
setattr(self, name, value)
@@ -137,7 +138,7 @@
return '<Entries: %s>' % ', '.join(entries_repr)
def startElement(self, name, attrs, connection):
- if name == ENTRY:
+ if name.lower() == ENTRY.lower():
entry = Entry(self)
self.entry_list.append(entry)
return entry
@@ -145,7 +146,7 @@
return None
def endElement(self, name, value, connection):
- if name == ENTRY:
+ if name.lower() == ENTRY.lower():
pass
else:
setattr(self, name, value)
@@ -172,21 +173,36 @@
return '<%s: %s>' % (self.scope.__repr__(), self.permission.__repr__())
def startElement(self, name, attrs, connection):
- if name == SCOPE:
- if not TYPE in attrs:
+ if name.lower() == SCOPE.lower():
+ # The following if statement used to look like this:
+ # if not TYPE in attrs:
+ # which caused problems because older versions of the
+ # AttributesImpl class in the xml.sax library neglected to include
+ # a __contains__() method (which Python calls to implement the
+ # 'in' operator). So when you use the in operator, like the if
+ # statement above, Python invokes the __getiter__() method with
+ # index 0, which raises an exception. More recent versions of
+ # xml.sax include the __contains__() method, rendering the in
+ # operator functional. The work-around here is to formulate the
+ # if statement as below, which is the legal way to query
+ # AttributesImpl for containment (and is also how the added
+ # __contains__() method works). At one time gsutil disallowed
+ # xmlplus-based parsers, until this more specific problem was
+ # determined.
+ if TYPE not in attrs:
raise InvalidAclError('Missing "%s" in "%s" part of ACL' %
(TYPE, SCOPE))
self.scope = Scope(self, attrs[TYPE])
return self.scope
- elif name == PERMISSION:
+ elif name.lower() == PERMISSION.lower():
pass
else:
return None
def endElement(self, name, value, connection):
- if name == SCOPE:
+ if name.lower() == SCOPE.lower():
pass
- elif name == PERMISSION:
+ elif name.lower() == PERMISSION.lower():
value = value.strip()
if not value in SupportedPermissions:
raise InvalidAclError('Invalid Permission "%s"' % value)
@@ -203,15 +219,17 @@
class Scope:
- # Map from Scope type to list of allowed sub-elems.
+ # Map from Scope type.lower() to lower-cased list of allowed sub-elems.
ALLOWED_SCOPE_TYPE_SUB_ELEMS = {
- ALL_AUTHENTICATED_USERS : [],
- ALL_USERS : [],
- GROUP_BY_DOMAIN : [DOMAIN],
- GROUP_BY_EMAIL : [EMAIL_ADDRESS, NAME],
- GROUP_BY_ID : [ID, NAME],
- USER_BY_EMAIL : [EMAIL_ADDRESS, NAME],
- USER_BY_ID : [ID, NAME]
+ ALL_AUTHENTICATED_USERS.lower() : [],
+ ALL_USERS.lower() : [],
+ GROUP_BY_DOMAIN.lower() : [DOMAIN.lower()],
+ GROUP_BY_EMAIL.lower() : [
+ DISPLAY_NAME.lower(), EMAIL_ADDRESS.lower(), NAME.lower()],
+ GROUP_BY_ID.lower() : [DISPLAY_NAME.lower(), ID.lower(), NAME.lower()],
+ USER_BY_EMAIL.lower() : [
+ DISPLAY_NAME.lower(), EMAIL_ADDRESS.lower(), NAME.lower()],
+ USER_BY_ID.lower() : [DISPLAY_NAME.lower(), ID.lower(), NAME.lower()]
}
def __init__(self, parent, type=None, id=None, name=None,
@@ -222,7 +240,7 @@
self.id = id
self.domain = domain
self.email_address = email_address
- if not self.ALLOWED_SCOPE_TYPE_SUB_ELEMS.has_key(self.type):
+ if self.type.lower() not in self.ALLOWED_SCOPE_TYPE_SUB_ELEMS:
raise InvalidAclError('Invalid %s %s "%s" ' %
(SCOPE, TYPE, self.type))
@@ -240,36 +258,40 @@
return '<%s>' % self.type
def startElement(self, name, attrs, connection):
- if not name in self.ALLOWED_SCOPE_TYPE_SUB_ELEMS[self.type]:
+ if (not name.lower() in
+ self.ALLOWED_SCOPE_TYPE_SUB_ELEMS[self.type.lower()]):
raise InvalidAclError('Element "%s" not allowed in %s %s "%s" ' %
(name, SCOPE, TYPE, self.type))
return None
def endElement(self, name, value, connection):
value = value.strip()
- if name == DOMAIN:
+ if name.lower() == DOMAIN.lower():
self.domain = value
- elif name == EMAIL_ADDRESS:
+ elif name.lower() == EMAIL_ADDRESS.lower():
self.email_address = value
- elif name == ID:
+ elif name.lower() == ID.lower():
self.id = value
- elif name == NAME:
+ elif name.lower() == NAME.lower():
self.name = value
else:
setattr(self, name, value)
def to_xml(self):
s = '<%s type="%s">' % (SCOPE, self.type)
- if self.type == ALL_AUTHENTICATED_USERS or self.type == ALL_USERS:
+ if (self.type.lower() == ALL_AUTHENTICATED_USERS.lower()
+ or self.type.lower() == ALL_USERS.lower()):
pass
- elif self.type == GROUP_BY_DOMAIN:
+ elif self.type.lower() == GROUP_BY_DOMAIN.lower():
s += '<%s>%s</%s>' % (DOMAIN, self.domain, DOMAIN)
- elif self.type == GROUP_BY_EMAIL or self.type == USER_BY_EMAIL:
+ elif (self.type.lower() == GROUP_BY_EMAIL.lower()
+ or self.type.lower() == USER_BY_EMAIL.lower()):
s += '<%s>%s</%s>' % (EMAIL_ADDRESS, self.email_address,
EMAIL_ADDRESS)
if self.name:
s += '<%s>%s</%s>' % (NAME, self.name, NAME)
- elif self.type == GROUP_BY_ID or self.type == USER_BY_ID:
+ elif (self.type.lower() == GROUP_BY_ID.lower()
+ or self.type.lower() == USER_BY_ID.lower()):
s += '<%s>%s</%s>' % (ID, self.id, ID)
if self.name:
s += '<%s>%s</%s>' % (NAME, self.name, NAME)
diff --git a/boto/gs/bucket.py b/boto/gs/bucket.py
index eae1be3..d86b89d 100644
--- a/boto/gs/bucket.py
+++ b/boto/gs/bucket.py
@@ -14,7 +14,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -24,25 +24,32 @@
from boto.exception import InvalidAclError
from boto.gs.acl import ACL, CannedACLStrings
from boto.gs.acl import SupportedPermissions as GSPermissions
+from boto.gs.cors import Cors
from boto.gs.key import Key as GSKey
from boto.s3.acl import Policy
from boto.s3.bucket import Bucket as S3Bucket
import xml.sax
-# constants for default object ACL and standard acl in http query args
+# constants for http query args
DEF_OBJ_ACL = 'defaultObjectAcl'
STANDARD_ACL = 'acl'
+CORS_ARG = 'cors'
class Bucket(S3Bucket):
+ WebsiteBody = ('<?xml version="1.0" encoding="UTF-8"?>\n'
+ '<WebsiteConfiguration>%s%s</WebsiteConfiguration>')
+ WebsiteMainPageFragment = '<MainPageSuffix>%s</MainPageSuffix>'
+ WebsiteErrorFragment = '<NotFoundPage>%s</NotFoundPage>'
def __init__(self, connection=None, name=None, key_class=GSKey):
super(Bucket, self).__init__(connection, name, key_class)
def set_acl(self, acl_or_str, key_name='', headers=None, version_id=None):
- """sets or changes a bucket's acl. We include a version_id argument
- to support a polymorphic interface for callers, however,
- version_id is not relevant for Google Cloud Storage buckets
- and is therefore ignored here."""
+ """sets or changes a bucket's or key's acl (depending on whether a
+ key_name was passed). We include a version_id argument to support a
+ polymorphic interface for callers, however, version_id is not relevant
+ for Google Cloud Storage buckets and is therefore ignored here."""
+ key_name = key_name or ''
if isinstance(acl_or_str, Policy):
raise InvalidAclError('Attempt to set S3 Policy on GS ACL')
elif isinstance(acl_or_str, ACL):
@@ -51,18 +58,19 @@
self.set_canned_acl(acl_or_str, key_name, headers=headers)
def set_def_acl(self, acl_or_str, key_name='', headers=None):
- """sets or changes a bucket's default object acl"""
+ """sets or changes a bucket's default object acl. The key_name argument
+ is ignored since keys have no default ACL property."""
if isinstance(acl_or_str, Policy):
raise InvalidAclError('Attempt to set S3 Policy on GS ACL')
elif isinstance(acl_or_str, ACL):
- self.set_def_xml_acl(acl_or_str.to_xml(), key_name, headers=headers)
+ self.set_def_xml_acl(acl_or_str.to_xml(), '', headers=headers)
else:
- self.set_def_canned_acl(acl_or_str, key_name, headers=headers)
+ self.set_def_canned_acl(acl_or_str, '', headers=headers)
def get_acl_helper(self, key_name, headers, query_args):
"""provides common functionality for get_acl() and get_def_acl()"""
response = self.connection.make_request('GET', self.name, key_name,
- query_args=query_args,
+ query_args=query_args,
headers=headers)
body = response.read()
if response.status == 200:
@@ -76,17 +84,18 @@
def get_acl(self, key_name='', headers=None, version_id=None):
"""returns a bucket's acl. We include a version_id argument
- to support a polymorphic interface for callers, however,
- version_id is not relevant for Google Cloud Storage buckets
- and is therefore ignored here."""
+ to support a polymorphic interface for callers, however,
+ version_id is not relevant for Google Cloud Storage buckets
+ and is therefore ignored here."""
return self.get_acl_helper(key_name, headers, STANDARD_ACL)
def get_def_acl(self, key_name='', headers=None):
- """returns a bucket's default object acl"""
- return self.get_acl_helper(key_name, headers, DEF_OBJ_ACL)
+ """returns a bucket's default object acl. The key_name argument is
+ ignored since keys have no default ACL property."""
+ return self.get_acl_helper('', headers, DEF_OBJ_ACL)
def set_canned_acl_helper(self, acl_str, key_name, headers, query_args):
- """provides common functionality for set_canned_acl() and
+ """provides common functionality for set_canned_acl() and
set_def_canned_acl()"""
assert acl_str in CannedACLStrings
@@ -102,26 +111,56 @@
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
- def set_canned_acl(self, acl_str, key_name='', headers=None,
+ def set_canned_acl(self, acl_str, key_name='', headers=None,
version_id=None):
- """sets or changes a bucket's acl to a predefined (canned) value.
- We include a version_id argument to support a polymorphic
- interface for callers, however, version_id is not relevant for
- Google Cloud Storage buckets and is therefore ignored here."""
- return self.set_canned_acl_helper(acl_str, key_name, headers,
+ """sets or changes a bucket's acl to a predefined (canned) value.
+ We include a version_id argument to support a polymorphic
+ interface for callers, however, version_id is not relevant for
+ Google Cloud Storage buckets and is therefore ignored here."""
+ return self.set_canned_acl_helper(acl_str, key_name, headers,
STANDARD_ACL)
def set_def_canned_acl(self, acl_str, key_name='', headers=None):
- """sets or changes a bucket's default object acl to a predefined
- (canned) value"""
- return self.set_canned_acl_helper(acl_str, key_name, headers,
+ """sets or changes a bucket's default object acl to a predefined
+ (canned) value. The key_name argument is ignored since keys have no
+ default ACL property."""
+ return self.set_canned_acl_helper(acl_str, '', headers,
query_args=DEF_OBJ_ACL)
def set_def_xml_acl(self, acl_str, key_name='', headers=None):
- """sets or changes a bucket's default object"""
- return self.set_xml_acl(acl_str, key_name, headers,
+ """sets or changes a bucket's default object ACL. The key_name argument
+ is ignored since keys have no default ACL property."""
+ return self.set_xml_acl(acl_str, '', headers,
query_args=DEF_OBJ_ACL)
+ def get_cors(self, headers=None):
+ """returns a bucket's CORS XML"""
+ response = self.connection.make_request('GET', self.name,
+ query_args=CORS_ARG,
+ headers=headers)
+ body = response.read()
+ if response.status == 200:
+ # Success - parse XML and return Cors object.
+ cors = Cors()
+ h = handler.XmlHandler(cors, self)
+ xml.sax.parseString(body, h)
+ return cors
+ else:
+ raise self.connection.provider.storage_response_error(
+ response.status, response.reason, body)
+
+ def set_cors(self, cors, headers=None):
+ """sets or changes a bucket's CORS XML."""
+ cors_xml = cors.encode('UTF-8')
+ response = self.connection.make_request('PUT', self.name,
+ data=cors_xml,
+ query_args=CORS_ARG,
+ headers=headers)
+ body = response.read()
+ if response.status != 200:
+ raise self.connection.provider.storage_response_error(
+ response.status, response.reason, body)
+
# Method with same signature as boto.s3.bucket.Bucket.add_email_grant(),
# to allow polymorphic treatment at application layer.
def add_email_grant(self, permission, email_address,
@@ -130,16 +169,16 @@
Convenience method that provides a quick way to add an email grant
to a bucket. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL
- and then PUT's the new ACL back to GS.
-
+ and then PUT's the new ACL back to GCS.
+
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, FULL_CONTROL).
-
+
:type email_address: string
:param email_address: The email address associated with the GS
account your are granting the permission to.
-
+
:type recursive: boolean
:param recursive: A boolean value to controls whether the call
will apply the grant to all keys within the bucket
@@ -163,18 +202,19 @@
# to allow polymorphic treatment at application layer.
def add_user_grant(self, permission, user_id, recursive=False, headers=None):
"""
- Convenience method that provides a quick way to add a canonical user grant to a bucket.
- This method retrieves the current ACL, creates a new grant based on the parameters
- passed in, adds that grant to the ACL and then PUTs the new ACL back to GS.
-
+ Convenience method that provides a quick way to add a canonical user
+ grant to a bucket. This method retrieves the current ACL, creates a new
+ grant based on the parameters passed in, adds that grant to the ACL and
+ then PUTs the new ACL back to GCS.
+
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ|WRITE|FULL_CONTROL)
-
+
:type user_id: string
- :param user_id: The canonical user id associated with the GS account you are granting
- the permission to.
-
+ :param user_id: The canonical user id associated with the GS account
+ you are granting the permission to.
+
:type recursive: bool
:param recursive: A boolean value to controls whether the call
will apply the grant to all keys within the bucket
@@ -200,7 +240,7 @@
Convenience method that provides a quick way to add an email group
grant to a bucket. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
- then PUT's the new ACL back to GS.
+ then PUT's the new ACL back to GCS.
:type permission: string
:param permission: The permission being granted. Should be one of:
@@ -243,8 +283,7 @@
xml_str = '<?xml version="1.0" encoding="UTF-8"?><Logging/>'
self.set_subresource('logging', xml_str, headers=headers)
- def enable_logging(self, target_bucket, target_prefix=None, headers=None,
- canned_acl=None):
+ def enable_logging(self, target_bucket, target_prefix=None, headers=None):
if isinstance(target_bucket, Bucket):
target_bucket = target_bucket.name
xml_str = '<?xml version="1.0" encoding="UTF-8"?><Logging>'
@@ -252,9 +291,98 @@
if target_prefix:
xml_str = (xml_str +
'<LogObjectPrefix>%s</LogObjectPrefix>' % target_prefix)
- if canned_acl:
- xml_str = (xml_str +
- '<PredefinedAcl>%s</PredefinedAcl>' % canned_acl)
xml_str = xml_str + '</Logging>'
self.set_subresource('logging', xml_str, headers=headers)
+
+ def configure_website(self, main_page_suffix=None, error_key=None,
+ headers=None):
+ """
+ Configure this bucket to act as a website
+
+ :type suffix: str
+ :param suffix: Suffix that is appended to a request that is for a
+ "directory" on the website endpoint (e.g. if the suffix
+ is index.html and you make a request to
+ samplebucket/images/ the data that is returned will
+ be for the object with the key name images/index.html).
+ The suffix must not be empty and must not include a
+ slash character. This parameter is optional and the
+ property is disabled if excluded.
+
+
+ :type error_key: str
+ :param error_key: The object key name to use when a 400
+ error occurs. This parameter is optional and the
+ property is disabled if excluded.
+
+ """
+ if main_page_suffix:
+ main_page_frag = self.WebsiteMainPageFragment % main_page_suffix
+ else:
+ main_page_frag = ''
+
+ if error_key:
+ error_frag = self.WebsiteErrorFragment % error_key
+ else:
+ error_frag = ''
+
+ body = self.WebsiteBody % (main_page_frag, error_frag)
+ response = self.connection.make_request('PUT', self.name, data=body,
+ query_args='websiteConfig',
+ headers=headers)
+ body = response.read()
+ if response.status == 200:
+ return True
+ else:
+ raise self.connection.provider.storage_response_error(
+ response.status, response.reason, body)
+
+ def get_website_configuration(self, headers=None):
+ """
+ Returns the current status of website configuration on the bucket.
+
+ :rtype: dict
+ :returns: A dictionary containing a Python representation
+ of the XML response from GCS. The overall structure is:
+
+ * WebsiteConfiguration
+ * MainPageSuffix: suffix that is appended to request that
+ is for a "directory" on the website endpoint
+ * NotFoundPage: name of an object to serve when site visitors
+ encounter a 404
+ """
+ return self.get_website_configuration_xml(self, headers)[0]
+
+ def get_website_configuration_with_xml(self, headers=None):
+ """
+ Returns the current status of website configuration on the bucket as
+ unparsed XML.
+
+ :rtype: 2-Tuple
+ :returns: 2-tuple containing:
+ 1) A dictionary containing a Python representation
+ of the XML response from GCS. The overall structure is:
+ * WebsiteConfiguration
+ * MainPageSuffix: suffix that is appended to request that
+ is for a "directory" on the website endpoint
+ * NotFoundPage: name of an object to serve when site visitors
+ encounter a 404
+ 2) unparsed XML describing the bucket's website configuration.
+ """
+ response = self.connection.make_request('GET', self.name,
+ query_args='websiteConfig', headers=headers)
+ body = response.read()
+ boto.log.debug(body)
+
+ if response.status != 200:
+ raise self.connection.provider.storage_response_error(
+ response.status, response.reason, body)
+
+ e = boto.jsonresponse.Element()
+ h = boto.jsonresponse.XmlHandler(e, None)
+ h.parse(body)
+ return e, body
+
+ def delete_website_configuration(self, headers=None):
+ self.configure_website(headers=headers)
diff --git a/boto/gs/connection.py b/boto/gs/connection.py
index cf79ed7..20b0220 100755
--- a/boto/gs/connection.py
+++ b/boto/gs/connection.py
@@ -37,11 +37,13 @@
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None,
host=DefaultHost, debug=0, https_connection_factory=None,
- calling_format=SubdomainCallingFormat(), path='/'):
+ calling_format=SubdomainCallingFormat(), path='/',
+ suppress_consec_slashes=True):
S3Connection.__init__(self, gs_access_key_id, gs_secret_access_key,
is_secure, port, proxy, proxy_port, proxy_user, proxy_pass,
host, debug, https_connection_factory, calling_format, path,
- "google", Bucket)
+ "google", Bucket,
+ suppress_consec_slashes=suppress_consec_slashes)
def create_bucket(self, bucket_name, headers=None,
location=Location.DEFAULT, policy=None):
diff --git a/boto/gs/cors.py b/boto/gs/cors.py
new file mode 100755
index 0000000..e5dd918
--- /dev/null
+++ b/boto/gs/cors.py
@@ -0,0 +1,169 @@
+# Copyright 2012 Google Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import types
+from boto.gs.user import User
+from boto.exception import InvalidCorsError
+from xml.sax import handler
+
+# Relevant tags for the CORS XML document.
+CORS_CONFIG = 'CorsConfig'
+CORS = 'Cors'
+ORIGINS = 'Origins'
+ORIGIN = 'Origin'
+METHODS = 'Methods'
+METHOD = 'Method'
+HEADERS = 'ResponseHeaders'
+HEADER = 'ResponseHeader'
+MAXAGESEC = 'MaxAgeSec'
+
+class Cors(handler.ContentHandler):
+ """Encapsulates the CORS configuration XML document"""
+ def __init__(self):
+ # List of CORS elements found within a CorsConfig element.
+ self.cors = []
+ # List of collections (e.g. Methods, ResponseHeaders, Origins)
+ # found within a CORS element. We use a list of lists here
+ # instead of a dictionary because the collections need to be
+ # preserved in the order in which they appear in the input XML
+ # document (and Python dictionary keys are inherently unordered).
+ # The elements on this list are two element tuples of the form
+ # (collection name, [list of collection contents]).
+ self.collections = []
+ # Lists of elements within a collection. Again a list is needed to
+ # preserve ordering but also because the same element may appear
+ # multiple times within a collection.
+ self.elements = []
+ # Dictionary mapping supported collection names to element types
+ # which may be contained within each.
+ self.legal_collections = {
+ ORIGINS : [ORIGIN],
+ METHODS : [METHOD],
+ HEADERS : [HEADER],
+ MAXAGESEC: []
+ }
+ # List of supported element types within any collection, used for
+ # checking validadity of a parsed element name.
+ self.legal_elements = [ORIGIN, METHOD, HEADER]
+
+ self.parse_level = 0
+ self.collection = None
+ self.element = None
+
+ def validateParseLevel(self, tag, level):
+ """Verify parse level for a given tag."""
+ if self.parse_level != level:
+ raise InvalidCorsError('Invalid tag %s at parse level %d: ' %
+ (tag, self.parse_level))
+
+ def startElement(self, name, attrs, connection):
+ """SAX XML logic for parsing new element found."""
+ if name == CORS_CONFIG:
+ self.validateParseLevel(name, 0)
+ self.parse_level += 1;
+ elif name == CORS:
+ self.validateParseLevel(name, 1)
+ self.parse_level += 1;
+ elif name in self.legal_collections:
+ self.validateParseLevel(name, 2)
+ self.parse_level += 1;
+ self.collection = name
+ elif name in self.legal_elements:
+ self.validateParseLevel(name, 3)
+ # Make sure this tag is found inside a collection tag.
+ if self.collection is None:
+ raise InvalidCorsError('Tag %s found outside collection' % name)
+ # Make sure this tag is allowed for the current collection tag.
+ if name not in self.legal_collections[self.collection]:
+ raise InvalidCorsError('Tag %s not allowed in %s collection' %
+ (name, self.collection))
+ self.element = name
+ else:
+ raise InvalidCorsError('Unsupported tag ' + name)
+
+ def endElement(self, name, value, connection):
+ """SAX XML logic for parsing new element found."""
+ if name == CORS_CONFIG:
+ self.validateParseLevel(name, 1)
+ self.parse_level -= 1;
+ elif name == CORS:
+ self.validateParseLevel(name, 2)
+ self.parse_level -= 1;
+ # Terminating a CORS element, save any collections we found
+ # and re-initialize collections list.
+ self.cors.append(self.collections)
+ self.collections = []
+ elif name in self.legal_collections:
+ self.validateParseLevel(name, 3)
+ if name != self.collection:
+ raise InvalidCorsError('Mismatched start and end tags (%s/%s)' %
+ (self.collection, name))
+ self.parse_level -= 1;
+ if not self.legal_collections[name]:
+ # If this collection doesn't contain any sub-elements, store
+ # a tuple of name and this tag's element value.
+ self.collections.append((name, value.strip()))
+ else:
+ # Otherwise, we're terminating a collection of sub-elements,
+ # so store a tuple of name and list of contained elements.
+ self.collections.append((name, self.elements))
+ self.elements = []
+ self.collection = None
+ elif name in self.legal_elements:
+ self.validateParseLevel(name, 3)
+ # Make sure this tag is found inside a collection tag.
+ if self.collection is None:
+ raise InvalidCorsError('Tag %s found outside collection' % name)
+ # Make sure this end tag is allowed for the current collection tag.
+ if name not in self.legal_collections[self.collection]:
+ raise InvalidCorsError('Tag %s not allowed in %s collection' %
+ (name, self.collection))
+ if name != self.element:
+ raise InvalidCorsError('Mismatched start and end tags (%s/%s)' %
+ (self.element, name))
+ # Terminating an element tag, add it to the list of elements
+ # for the current collection.
+ self.elements.append((name, value.strip()))
+ self.element = None
+ else:
+ raise InvalidCorsError('Unsupported end tag ' + name)
+
+ def to_xml(self):
+ """Convert CORS object into XML string representation."""
+ s = '<' + CORS_CONFIG + '>'
+ for collections in self.cors:
+ s += '<' + CORS + '>'
+ for (collection, elements_or_value) in collections:
+ assert collection is not None
+ s += '<' + collection + '>'
+ # If collection elements has type string, append atomic value,
+ # otherwise, append sequence of values in named tags.
+ if isinstance(elements_or_value, types.StringTypes):
+ s += elements_or_value
+ else:
+ for (name, value) in elements_or_value:
+ assert name is not None
+ assert value is not None
+ s += '<' + name + '>' + value + '</' + name + '>'
+ s += '</' + collection + '>'
+ s += '</' + CORS + '>'
+ s += '</' + CORS_CONFIG + '>'
+ return s
diff --git a/boto/gs/key.py b/boto/gs/key.py
index de6e6f4..3c76cc5 100644
--- a/boto/gs/key.py
+++ b/boto/gs/key.py
@@ -19,7 +19,9 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
+import os
import StringIO
+from boto.exception import BotoClientError
from boto.s3.key import Key as S3Key
class Key(S3Key):
@@ -110,7 +112,7 @@
def set_contents_from_file(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
- res_upload_handler=None):
+ res_upload_handler=None, size=None, rewind=False):
"""
Store an object in GS using the name of the Key object as the
key in GS and the contents of the file pointed to by 'fp' as the
@@ -158,38 +160,90 @@
:param res_upload_handler: If provided, this handler will perform the
upload.
+ :type size: int
+ :param size: (optional) The Maximum number of bytes to read from
+ the file pointer (fp). This is useful when uploading
+ a file in multiple parts where you are splitting the
+ file up into different ranges to be uploaded. If not
+ specified, the default behaviour is to read all bytes
+ from the file pointer. Less bytes may be available.
+ Notes:
+
+ 1. The "size" parameter currently cannot be used when
+ a resumable upload handler is given but is still
+ useful for uploading part of a file as implemented
+ by the parent class.
+ 2. At present Google Cloud Storage does not support
+ multipart uploads.
+
+ :type rewind: bool
+ :param rewind: (optional) If True, the file pointer (fp) will be
+ rewound to the start before any bytes are read from
+ it. The default behaviour is False which reads from
+ the current position of the file pointer (fp).
+
+ :rtype: int
+ :return: The number of bytes written to the key.
+
TODO: At some point we should refactor the Bucket and Key classes,
to move functionality common to all providers into a parent class,
and provider-specific functionality into subclasses (rather than
just overriding/sharing code the way it currently works).
"""
provider = self.bucket.connection.provider
+ if res_upload_handler and size:
+ # could use size instead of file_length if provided but...
+ raise BotoClientError('"size" param not supported for resumable uploads.')
headers = headers or {}
if policy:
headers[provider.acl_header] = policy
+
+ if rewind:
+ # caller requests reading from beginning of fp.
+ fp.seek(0, os.SEEK_SET)
+ else:
+ spos = fp.tell()
+ fp.seek(0, os.SEEK_END)
+ if fp.tell() == spos:
+ fp.seek(0, os.SEEK_SET)
+ if fp.tell() != spos:
+ # Raise an exception as this is likely a programming error
+ # whereby there is data before the fp but nothing after it.
+ fp.seek(spos)
+ raise AttributeError(
+ 'fp is at EOF. Use rewind option or seek() to data start.')
+ # seek back to the correct position.
+ fp.seek(spos)
+
if hasattr(fp, 'name'):
self.path = fp.name
if self.bucket != None:
- if not md5:
- md5 = self.compute_md5(fp)
+ if size:
+ self.size = size
else:
- # Even if md5 is provided, still need to set size of content.
- fp.seek(0, 2)
- self.size = fp.tell()
- fp.seek(0)
- self.md5 = md5[0]
- self.base64md5 = md5[1]
+ # If md5 is provided, still need to size so
+ # calculate based on bytes to end of content
+ spos = fp.tell()
+ fp.seek(0, os.SEEK_END)
+ self.size = fp.tell() - spos
+ fp.seek(spos)
+ size = self.size
+
if self.name == None:
+ if md5 == None:
+ md5 = self.compute_md5(fp, size)
+ self.md5 = md5[0]
+ self.base64md5 = md5[1]
+
self.name = self.md5
if not replace:
- k = self.bucket.lookup(self.name)
- if k:
+ if self.bucket.lookup(self.name):
return
if res_upload_handler:
res_upload_handler.send_file(self, fp, headers, cb, num_cb)
else:
# Not a resumable transfer so use basic send_file mechanism.
- self.send_file(fp, headers, cb, num_cb)
+ self.send_file(fp, headers, cb, num_cb, size=size)
def set_contents_from_filename(self, filename, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
@@ -241,6 +295,10 @@
:param res_upload_handler: If provided, this handler will perform the
upload.
"""
+ # Clear out any previously computed md5 hashes, since we are setting the content.
+ self.md5 = None
+ self.base64md5 = None
+
fp = open(filename, 'rb')
self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5, res_upload_handler)
@@ -291,6 +349,11 @@
param, if present, will be used as the MD5 values
of the file. Otherwise, the checksum will be computed.
"""
+
+ # Clear out any previously computed md5 hashes, since we are setting the content.
+ self.md5 = None
+ self.base64md5 = None
+
if isinstance(s, unicode):
s = s.encode("utf-8")
fp = StringIO.StringIO(s)
diff --git a/boto/gs/resumable_upload_handler.py b/boto/gs/resumable_upload_handler.py
index a60d91d..decdb5c 100644
--- a/boto/gs/resumable_upload_handler.py
+++ b/boto/gs/resumable_upload_handler.py
@@ -34,6 +34,10 @@
from boto.exception import InvalidUriError
from boto.exception import ResumableTransferDisposition
from boto.exception import ResumableUploadException
+try:
+ from hashlib import md5
+except ImportError:
+ from md5 import md5
"""
Handler for Google Cloud Storage resumable uploads. See
@@ -144,15 +148,12 @@
"""
parse_result = urlparse.urlparse(uri)
if (parse_result.scheme.lower() not in ['http', 'https'] or
- not parse_result.netloc or not parse_result.query):
- raise InvalidUriError('Invalid tracker URI (%s)' % uri)
- qdict = cgi.parse_qs(parse_result.query)
- if not qdict or not 'upload_id' in qdict:
+ not parse_result.netloc):
raise InvalidUriError('Invalid tracker URI (%s)' % uri)
self.tracker_uri = uri
self.tracker_uri_host = parse_result.netloc
- self.tracker_uri_path = '%s/?%s' % (parse_result.netloc,
- parse_result.query)
+ self.tracker_uri_path = '%s?%s' % (
+ parse_result.path, parse_result.query)
self.server_has_bytes = 0
def get_tracker_uri(self):
@@ -204,7 +205,13 @@
"""
resp = self._query_server_state(conn, file_length)
if resp.status == 200:
- return (0, file_length) # Completed upload.
+ # To handle the boundary condition where the server has the complete
+ # file, we return (server_start, file_length-1). That way the
+ # calling code can always simply read up through server_end. (If we
+ # didn't handle this boundary condition here, the caller would have
+ # to check whether server_end == file_length and read one fewer byte
+ # in that case.)
+ return (0, file_length - 1) # Completed upload.
if resp.status != 308:
# This means the server didn't have any state for the given
# upload ID, which can happen (for example) if the caller saved
@@ -298,7 +305,7 @@
self._save_tracker_uri_to_file()
def _upload_file_bytes(self, conn, http_conn, fp, file_length,
- total_bytes_uploaded, cb, num_cb):
+ total_bytes_uploaded, cb, num_cb, md5sum):
"""
Makes one attempt to upload file bytes, using an existing resumable
upload connection.
@@ -309,6 +316,8 @@
"""
buf = fp.read(self.BUFFER_SIZE)
if cb:
+ # The cb_count represents the number of full buffers to send between
+ # cb executions.
if num_cb > 2:
cb_count = file_length / self.BUFFER_SIZE / (num_cb-2)
elif num_cb < 0:
@@ -324,9 +333,13 @@
# 'bytes 0-0/1' would actually mean you're sending a 1-byte file).
put_headers = {}
if file_length:
- range_header = self._build_content_range_header(
- '%d-%d' % (total_bytes_uploaded, file_length - 1),
- file_length)
+ if total_bytes_uploaded == file_length:
+ range_header = self._build_content_range_header(
+ '*', file_length)
+ else:
+ range_header = self._build_content_range_header(
+ '%d-%d' % (total_bytes_uploaded, file_length - 1),
+ file_length)
put_headers['Content-Range'] = range_header
# Set Content-Length to the total bytes we'll send with this PUT.
put_headers['Content-Length'] = str(file_length - total_bytes_uploaded)
@@ -343,6 +356,7 @@
http_conn.set_debuglevel(0)
while buf:
http_conn.send(buf)
+ md5sum.update(buf)
total_bytes_uploaded += len(buf)
if cb:
i += 1
@@ -378,7 +392,7 @@
(resp.status, resp.reason), disposition)
def _attempt_resumable_upload(self, key, fp, file_length, headers, cb,
- num_cb):
+ num_cb, md5sum):
"""
Attempts a resumable upload.
@@ -394,7 +408,29 @@
(server_start, server_end) = (
self._query_server_pos(conn, file_length))
self.server_has_bytes = server_start
- key=key
+
+ if server_end:
+ # If the server already has some of the content, we need to
+ # update the md5 with the bytes that have already been
+ # uploaded to ensure we get a complete hash in the end.
+ print 'Catching up md5 for resumed upload'
+ fp.seek(0)
+ # Read local file's bytes through position server has. For
+ # example, if server has (0, 3) we want to read 3-0+1=4 bytes.
+ bytes_to_go = server_end + 1
+ while bytes_to_go:
+ chunk = fp.read(min(key.BufferSize, bytes_to_go))
+ if not chunk:
+ raise ResumableUploadException(
+ 'Hit end of file during resumable upload md5 '
+ 'catchup. This should not happen under\n'
+ 'normal circumstances, as it indicates the '
+ 'server has more bytes of this transfer\nthan'
+ ' the current file size. Restarting upload.',
+ ResumableTransferDisposition.START_OVER)
+ md5sum.update(chunk)
+ bytes_to_go -= len(chunk)
+
if conn.debug >= 1:
print 'Resuming transfer.'
except ResumableUploadException, e:
@@ -410,17 +446,7 @@
if self.upload_start_point is None:
self.upload_start_point = server_end
- if server_end == file_length:
- # Boundary condition: complete file was already uploaded (e.g.,
- # user interrupted a previous upload attempt after the upload
- # completed but before the gsutil tracker file was deleted). Set
- # total_bytes_uploaded to server_end so we'll attempt to upload
- # no more bytes but will still make final HTTP request and get
- # back the response (which contains the etag we need to compare
- # at the end).
- total_bytes_uploaded = server_end
- else:
- total_bytes_uploaded = server_end + 1
+ total_bytes_uploaded = server_end + 1
fp.seek(total_bytes_uploaded)
conn = key.bucket.connection
@@ -437,14 +463,16 @@
# and can report that progress on next attempt.
try:
return self._upload_file_bytes(conn, http_conn, fp, file_length,
- total_bytes_uploaded, cb, num_cb)
+ total_bytes_uploaded, cb, num_cb, md5sum)
except (ResumableUploadException, socket.error):
resp = self._query_server_state(conn, file_length)
if resp.status == 400:
raise ResumableUploadException('Got 400 response from server '
'state query after failed resumable upload attempt. This '
- 'can happen if the file size changed between upload '
- 'attempts', ResumableTransferDisposition.ABORT)
+ 'can happen for various reasons, including specifying an '
+ 'invalid request (e.g., an invalid canned ACL) or if the '
+ 'file size changed between upload attempts',
+ ResumableTransferDisposition.ABORT)
else:
raise
finally:
@@ -473,56 +501,116 @@
'(incorrect uploaded object deleted)',
ResumableTransferDisposition.ABORT)
+ def handle_resumable_upload_exception(self, e, debug):
+ if (e.disposition == ResumableTransferDisposition.ABORT_CUR_PROCESS):
+ if debug >= 1:
+ print('Caught non-retryable ResumableUploadException (%s); '
+ 'aborting but retaining tracker file' % e.message)
+ raise
+ elif (e.disposition == ResumableTransferDisposition.ABORT):
+ if debug >= 1:
+ print('Caught non-retryable ResumableUploadException (%s); '
+ 'aborting and removing tracker file' % e.message)
+ self._remove_tracker_file()
+ raise
+ else:
+ if debug >= 1:
+ print('Caught ResumableUploadException (%s) - will retry' %
+ e.message)
+
+ def track_progress_less_iterations(self, server_had_bytes_before_attempt,
+ roll_back_md5=True, debug=0):
+ # At this point we had a re-tryable failure; see if made progress.
+ if self.server_has_bytes > server_had_bytes_before_attempt:
+ self.progress_less_iterations = 0 # If progress, reset counter.
+ else:
+ self.progress_less_iterations += 1
+ if roll_back_md5:
+ # Rollback any potential md5sum updates, as we did not
+ # make any progress in this iteration.
+ self.md5sum = self.md5sum_before_attempt
+
+ if self.progress_less_iterations > self.num_retries:
+ # Don't retry any longer in the current process.
+ raise ResumableUploadException(
+ 'Too many resumable upload attempts failed without '
+ 'progress. You might try this upload again later',
+ ResumableTransferDisposition.ABORT_CUR_PROCESS)
+
+ # Use binary exponential backoff to desynchronize client requests
+ sleep_time_secs = random.random() * (2**self.progress_less_iterations)
+ if debug >= 1:
+ print ('Got retryable failure (%d progress-less in a row).\n'
+ 'Sleeping %3.1f seconds before re-trying' %
+ (self.progress_less_iterations, sleep_time_secs))
+ time.sleep(sleep_time_secs)
+
def send_file(self, key, fp, headers, cb=None, num_cb=10):
"""
Upload a file to a key into a bucket on GS, using GS resumable upload
protocol.
-
+
:type key: :class:`boto.s3.key.Key` or subclass
:param key: The Key object to which data is to be uploaded
-
+
:type fp: file-like object
:param fp: The file pointer to upload
-
+
:type headers: dict
:param headers: The headers to pass along with the PUT request
-
+
:type cb: function
:param cb: a callback function that will be called to report progress on
the upload. The callback should accept two integer parameters, the
first representing the number of bytes that have been successfully
transmitted to GS, and the second representing the total number of
bytes that need to be transmitted.
-
+
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter, this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be called
during the file transfer. Providing a negative integer will cause
your callback to be called with each buffer read.
-
+
Raises ResumableUploadException if a problem occurs during the transfer.
"""
if not headers:
headers = {}
+ # If Content-Type header is present and set to None, remove it.
+ # This is gsutil's way of asking boto to refrain from auto-generating
+ # that header.
+ CT = 'Content-Type'
+ if CT in headers and headers[CT] is None:
+ del headers[CT]
fp.seek(0, os.SEEK_END)
file_length = fp.tell()
fp.seek(0)
debug = key.bucket.connection.debug
+ # Compute the MD5 checksum on the fly.
+ self.md5sum = md5()
+
# Use num-retries from constructor if one was provided; else check
# for a value specified in the boto config file; else default to 5.
if self.num_retries is None:
self.num_retries = config.getint('Boto', 'num_retries', 5)
- progress_less_iterations = 0
+ self.progress_less_iterations = 0
while True: # Retry as long as we're making progress.
server_had_bytes_before_attempt = self.server_has_bytes
+ self.md5sum_before_attempt = self.md5sum.copy()
try:
etag = self._attempt_resumable_upload(key, fp, file_length,
- headers, cb, num_cb)
+ headers, cb, num_cb,
+ self.md5sum)
+
+ # Get the final md5 for the uploaded content.
+ hd = self.md5sum.hexdigest()
+ key.md5, key.base64md5 = key.get_md5_from_hexdigest(hd)
+
# Upload succceded, so remove the tracker file (if have one).
self._remove_tracker_file()
self._check_final_md5(key, etag)
@@ -540,43 +628,7 @@
# opened the next time an HTTP request is sent).
key.bucket.connection.connection.close()
except ResumableUploadException, e:
- if (e.disposition ==
- ResumableTransferDisposition.ABORT_CUR_PROCESS):
- if debug >= 1:
- print('Caught non-retryable ResumableUploadException '
- '(%s); aborting but retaining tracker file' %
- e.message)
- raise
- elif (e.disposition ==
- ResumableTransferDisposition.ABORT):
- if debug >= 1:
- print('Caught non-retryable ResumableUploadException '
- '(%s); aborting and removing tracker file' %
- e.message)
- self._remove_tracker_file()
- raise
- else:
- if debug >= 1:
- print('Caught ResumableUploadException (%s) - will '
- 'retry' % e.message)
+ self.handle_resumable_upload_exception(e, debug)
- # At this point we had a re-tryable failure; see if made progress.
- if self.server_has_bytes > server_had_bytes_before_attempt:
- progress_less_iterations = 0
- else:
- progress_less_iterations += 1
-
- if progress_less_iterations > self.num_retries:
- # Don't retry any longer in the current process.
- raise ResumableUploadException(
- 'Too many resumable upload attempts failed without '
- 'progress. You might try this upload again later',
- ResumableTransferDisposition.ABORT_CUR_PROCESS)
-
- # Use binary exponential backoff to desynchronize client requests
- sleep_time_secs = random.random() * (2**progress_less_iterations)
- if debug >= 1:
- print ('Got retryable failure (%d progress-less in a row).\n'
- 'Sleeping %3.1f seconds before re-trying' %
- (progress_less_iterations, sleep_time_secs))
- time.sleep(sleep_time_secs)
+ self.track_progress_less_iterations(server_had_bytes_before_attempt,
+ True, debug)
diff --git a/boto/iam/__init__.py b/boto/iam/__init__.py
index 498d736..71cf717 100644
--- a/boto/iam/__init__.py
+++ b/boto/iam/__init__.py
@@ -15,7 +15,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -23,5 +23,52 @@
# this is here for backward compatibility
# originally, the IAMConnection class was defined here
from connection import IAMConnection
-
-
+from boto.regioninfo import RegionInfo
+
+
+class IAMRegionInfo(RegionInfo):
+
+ def connect(self, **kw_params):
+ """
+ Connect to this Region's endpoint. Returns an connection
+ object pointing to the endpoint associated with this region.
+ You may pass any of the arguments accepted by the connection
+ class's constructor as keyword arguments and they will be
+ passed along to the connection object.
+
+ :rtype: Connection object
+ :return: The connection to this regions endpoint
+ """
+ if self.connection_cls:
+ return self.connection_cls(host=self.endpoint, **kw_params)
+
+
+def regions():
+ """
+ Get all available regions for the IAM service.
+
+ :rtype: list
+ :return: A list of :class:`boto.regioninfo.RegionInfo` instances
+ """
+ return [IAMRegionInfo(name='universal',
+ endpoint='iam.amazonaws.com',
+ connection_cls=IAMConnection)
+ ]
+
+
+def connect_to_region(region_name, **kw_params):
+ """
+ Given a valid region name, return a
+ :class:`boto.iam.connection.IAMConnection`.
+
+ :type: str
+ :param region_name: The name of the region to connect to.
+
+ :rtype: :class:`boto.iam.connection.IAMConnection` or ``None``
+ :return: A connection to the given region, or None if an invalid region
+ name is given
+ """
+ for region in regions():
+ if region.name == region_name:
+ return region.connect(**kw_params)
+ return None
diff --git a/boto/iam/connection.py b/boto/iam/connection.py
index ae68f33..9827602 100644
--- a/boto/iam/connection.py
+++ b/boto/iam/connection.py
@@ -15,17 +15,27 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
+try:
+ import json
+except ImportError:
+ import simplejson as json
import boto
import boto.jsonresponse
+from boto.resultset import ResultSet
from boto.iam.summarymap import SummaryMap
from boto.connection import AWSQueryConnection
-#boto.set_stream_logger('iam')
+
+ASSUME_ROLE_POLICY_DOCUMENT = json.dumps({
+ 'Statement': [{'Principal': {'Service': ['ec2.amazonaws.com']},
+ 'Effect': 'Allow',
+ 'Action': ['sts:AssumeRole']}]})
+
class IAMConnection(AWSQueryConnection):
@@ -35,19 +45,21 @@
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, host='iam.amazonaws.com',
debug=0, https_connection_factory=None,
- path='/'):
+ path='/', security_token=None, validate_certs=True):
AWSQueryConnection.__init__(self, aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy,
proxy_port, proxy_user, proxy_pass,
host, debug, https_connection_factory,
- path)
+ path, security_token,
+ validate_certs=validate_certs)
def _required_auth_capability(self):
- return ['iam']
+ #return ['iam']
+ return ['hmac-v4']
def get_response(self, action, params, path='/', parent=None,
- verb='GET', list_marker='Set'):
+ verb='POST', list_marker='Set'):
"""
Utility method to handle calls to IAM and parsing of responses.
"""
@@ -70,26 +82,24 @@
#
# Group methods
#
-
+
def get_all_groups(self, path_prefix='/', marker=None, max_items=None):
"""
List the groups that have the specified path prefix.
:type path_prefix: string
:param path_prefix: If provided, only groups whose paths match
- the provided prefix will be returned.
+ the provided prefix will be returned.
:type marker: string
- :param marker: Use this only when paginating results and only in
- follow-up request after you've received a response
- where the results are truncated. Set this to the
- value of the Marker element in the response you
- just received.
+ :param marker: Use this only when paginating results and only
+ in follow-up request after you've received a response
+ where the results are truncated. Set this to the value of
+ the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
- the maximum number of groups you want in the
- response.
+ the maximum number of groups you want in the response.
"""
params = {}
if path_prefix:
@@ -100,7 +110,7 @@
params['MaxItems'] = max_items
return self.get_response('ListGroups', params,
list_marker='Groups')
-
+
def get_group(self, group_name, marker=None, max_items=None):
"""
Return a list of users that are in the specified group.
@@ -109,24 +119,22 @@
:param group_name: The name of the group whose information should
be returned.
:type marker: string
- :param marker: Use this only when paginating results and only in
- follow-up request after you've received a response
- where the results are truncated. Set this to the
- value of the Marker element in the response you
- just received.
+ :param marker: Use this only when paginating results and only
+ in follow-up request after you've received a response
+ where the results are truncated. Set this to the value of
+ the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
- the maximum number of groups you want in the
- response.
+ the maximum number of groups you want in the response.
"""
- params = {'GroupName' : group_name}
+ params = {'GroupName': group_name}
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('GetGroup', params, list_marker='Users')
-
+
def create_group(self, group_name, path='/'):
"""
Create a group.
@@ -138,8 +146,8 @@
:param path: The path to the group (Optional). Defaults to /.
"""
- params = {'GroupName' : group_name,
- 'Path' : path}
+ params = {'GroupName': group_name,
+ 'Path': path}
return self.get_response('CreateGroup', params)
def delete_group(self, group_name):
@@ -151,7 +159,7 @@
:param group_name: The name of the group to delete.
"""
- params = {'GroupName' : group_name}
+ params = {'GroupName': group_name}
return self.get_response('DeleteGroup', params)
def update_group(self, group_name, new_group_name=None, new_path=None):
@@ -163,14 +171,14 @@
:type new_group_name: string
:param new_group_name: If provided, the name of the group will be
- changed to this name.
+ changed to this name.
:type new_path: string
:param new_path: If provided, the path of the group will be
- changed to this path.
+ changed to this path.
"""
- params = {'GroupName' : group_name}
+ params = {'GroupName': group_name}
if new_group_name:
params['NewGroupName'] = new_group_name
if new_path:
@@ -188,8 +196,8 @@
:param user_name: The to be added to the group.
"""
- params = {'GroupName' : group_name,
- 'UserName' : user_name}
+ params = {'GroupName': group_name,
+ 'UserName': user_name}
return self.get_response('AddUserToGroup', params)
def remove_user_from_group(self, group_name, user_name):
@@ -203,8 +211,8 @@
:param user_name: The user to remove from the group.
"""
- params = {'GroupName' : group_name,
- 'UserName' : user_name}
+ params = {'GroupName': group_name,
+ 'UserName': user_name}
return self.get_response('RemoveUserFromGroup', params)
def put_group_policy(self, group_name, policy_name, policy_json):
@@ -219,11 +227,11 @@
:type policy_json: string
:param policy_json: The policy document.
-
+
"""
- params = {'GroupName' : group_name,
- 'PolicyName' : policy_name,
- 'PolicyDocument' : policy_json}
+ params = {'GroupName': group_name,
+ 'PolicyName': policy_name,
+ 'PolicyDocument': policy_json}
return self.get_response('PutGroupPolicy', params, verb='POST')
def get_all_group_policies(self, group_name, marker=None, max_items=None):
@@ -234,18 +242,16 @@
:param group_name: The name of the group the policy is associated with.
:type marker: string
- :param marker: Use this only when paginating results and only in
- follow-up request after you've received a response
- where the results are truncated. Set this to the
- value of the Marker element in the response you
- just received.
+ :param marker: Use this only when paginating results and only
+ in follow-up request after you've received a response
+ where the results are truncated. Set this to the value of
+ the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
- the maximum number of groups you want in the
- response.
+ the maximum number of groups you want in the response.
"""
- params = {'GroupName' : group_name}
+ params = {'GroupName': group_name}
if marker:
params['Marker'] = marker
if max_items:
@@ -262,10 +268,10 @@
:type policy_name: string
:param policy_name: The policy document to get.
-
+
"""
- params = {'GroupName' : group_name,
- 'PolicyName' : policy_name}
+ params = {'GroupName': group_name,
+ 'PolicyName': policy_name}
return self.get_response('GetGroupPolicy', params, verb='POST')
def delete_group_policy(self, group_name, policy_name):
@@ -277,10 +283,10 @@
:type policy_name: string
:param policy_name: The policy document to delete.
-
+
"""
- params = {'GroupName' : group_name,
- 'PolicyName' : policy_name}
+ params = {'GroupName': group_name,
+ 'PolicyName': policy_name}
return self.get_response('DeleteGroupPolicy', params, verb='POST')
def get_all_users(self, path_prefix='/', marker=None, max_items=None):
@@ -289,31 +295,29 @@
:type path_prefix: string
:param path_prefix: If provided, only users whose paths match
- the provided prefix will be returned.
+ the provided prefix will be returned.
:type marker: string
- :param marker: Use this only when paginating results and only in
- follow-up request after you've received a response
- where the results are truncated. Set this to the
- value of the Marker element in the response you
- just received.
+ :param marker: Use this only when paginating results and only
+ in follow-up request after you've received a response
+ where the results are truncated. Set this to the value of
+ the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
- the maximum number of groups you want in the
- response.
+ the maximum number of groups you want in the response.
"""
- params = {'PathPrefix' : path_prefix}
+ params = {'PathPrefix': path_prefix}
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('ListUsers', params, list_marker='Users')
-
+
#
# User methods
#
-
+
def create_user(self, user_name, path='/'):
"""
Create a user.
@@ -323,11 +327,11 @@
:type path: string
:param path: The path in which the user will be created.
- Defaults to /.
+ Defaults to /.
"""
- params = {'UserName' : user_name,
- 'Path' : path}
+ params = {'UserName': user_name,
+ 'Path': path}
return self.get_response('CreateUser', params)
def delete_user(self, user_name):
@@ -341,7 +345,7 @@
:param user_name: The name of the user to delete.
"""
- params = {'UserName' : user_name}
+ params = {'UserName': user_name}
return self.get_response('DeleteUser', params)
def get_user(self, user_name=None):
@@ -353,9 +357,7 @@
:type user_name: string
:param user_name: The name of the user to delete.
- If not specified, defaults to user making
- request.
-
+ If not specified, defaults to user making request.
"""
params = {}
if user_name:
@@ -371,20 +373,20 @@
:type new_user_name: string
:param new_user_name: If provided, the username of the user will be
- changed to this username.
+ changed to this username.
:type new_path: string
:param new_path: If provided, the path of the user will be
- changed to this path.
+ changed to this path.
"""
- params = {'UserName' : user_name}
+ params = {'UserName': user_name}
if new_user_name:
params['NewUserName'] = new_user_name
if new_path:
params['NewPath'] = new_path
return self.get_response('UpdateUser', params)
-
+
def get_all_user_policies(self, user_name, marker=None, max_items=None):
"""
List the names of the policies associated with the specified user.
@@ -393,18 +395,16 @@
:param user_name: The name of the user the policy is associated with.
:type marker: string
- :param marker: Use this only when paginating results and only in
- follow-up request after you've received a response
- where the results are truncated. Set this to the
- value of the Marker element in the response you
- just received.
+ :param marker: Use this only when paginating results and only
+ in follow-up request after you've received a response
+ where the results are truncated. Set this to the value of
+ the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
- the maximum number of groups you want in the
- response.
+ the maximum number of groups you want in the response.
"""
- params = {'UserName' : user_name}
+ params = {'UserName': user_name}
if marker:
params['Marker'] = marker
if max_items:
@@ -424,11 +424,11 @@
:type policy_json: string
:param policy_json: The policy document.
-
+
"""
- params = {'UserName' : user_name,
- 'PolicyName' : policy_name,
- 'PolicyDocument' : policy_json}
+ params = {'UserName': user_name,
+ 'PolicyName': policy_name,
+ 'PolicyDocument': policy_json}
return self.get_response('PutUserPolicy', params, verb='POST')
def get_user_policy(self, user_name, policy_name):
@@ -440,10 +440,10 @@
:type policy_name: string
:param policy_name: The policy document to get.
-
+
"""
- params = {'UserName' : user_name,
- 'PolicyName' : policy_name}
+ params = {'UserName': user_name,
+ 'PolicyName': policy_name}
return self.get_response('GetUserPolicy', params, verb='POST')
def delete_user_policy(self, user_name, policy_name):
@@ -455,10 +455,10 @@
:type policy_name: string
:param policy_name: The policy document to delete.
-
+
"""
- params = {'UserName' : user_name,
- 'PolicyName' : policy_name}
+ params = {'UserName': user_name,
+ 'PolicyName': policy_name}
return self.get_response('DeleteUserPolicy', params, verb='POST')
def get_groups_for_user(self, user_name, marker=None, max_items=None):
@@ -469,29 +469,27 @@
:param user_name: The name of the user to list groups for.
:type marker: string
- :param marker: Use this only when paginating results and only in
- follow-up request after you've received a response
- where the results are truncated. Set this to the
- value of the Marker element in the response you
- just received.
+ :param marker: Use this only when paginating results and only
+ in follow-up request after you've received a response
+ where the results are truncated. Set this to the value of
+ the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
- the maximum number of groups you want in the
- response.
+ the maximum number of groups you want in the response.
"""
- params = {'UserName' : user_name}
+ params = {'UserName': user_name}
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('ListGroupsForUser', params,
list_marker='Groups')
-
+
#
# Access Keys
#
-
+
def get_all_access_keys(self, user_name, marker=None, max_items=None):
"""
Get all access keys associated with an account.
@@ -500,18 +498,16 @@
:param user_name: The username of the user
:type marker: string
- :param marker: Use this only when paginating results and only in
- follow-up request after you've received a response
- where the results are truncated. Set this to the
- value of the Marker element in the response you
- just received.
+ :param marker: Use this only when paginating results and only
+ in follow-up request after you've received a response
+ where the results are truncated. Set this to the value of
+ the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
- the maximum number of groups you want in the
- response.
+ the maximum number of groups you want in the response.
"""
- params = {'UserName' : user_name}
+ params = {'UserName': user_name}
if marker:
params['Marker'] = marker
if max_items:
@@ -531,7 +527,7 @@
:param user_name: The username of the user
"""
- params = {'UserName' : user_name}
+ params = {'UserName': user_name}
return self.get_response('CreateAccessKey', params)
def update_access_key(self, access_key_id, status, user_name=None):
@@ -553,8 +549,8 @@
:param user_name: The username of user (optional).
"""
- params = {'AccessKeyId' : access_key_id,
- 'Status' : status}
+ params = {'AccessKeyId': access_key_id,
+ 'Status': status}
if user_name:
params['UserName'] = user_name
return self.get_response('UpdateAccessKey', params)
@@ -573,7 +569,7 @@
:param user_name: The username of the user
"""
- params = {'AccessKeyId' : access_key_id}
+ params = {'AccessKeyId': access_key_id}
if user_name:
params['UserName'] = user_name
return self.get_response('DeleteAccessKey', params)
@@ -581,7 +577,7 @@
#
# Signing Certificates
#
-
+
def get_all_signing_certs(self, marker=None, max_items=None,
user_name=None):
"""
@@ -591,17 +587,15 @@
on the AWS Access Key ID used to sign the request.
:type marker: string
- :param marker: Use this only when paginating results and only in
- follow-up request after you've received a response
- where the results are truncated. Set this to the
- value of the Marker element in the response you
- just received.
+ :param marker: Use this only when paginating results and only
+ in follow-up request after you've received a response
+ where the results are truncated. Set this to the value of
+ the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
- the maximum number of groups you want in the
- response.
-
+ the maximum number of groups you want in the response.
+
:type user_name: string
:param user_name: The username of the user
@@ -633,8 +627,8 @@
:type user_name: string
:param user_name: The username of the user
"""
- params = {'CertificateId' : cert_id,
- 'Status' : status}
+ params = {'CertificateId': cert_id,
+ 'Status': status}
if user_name:
params['UserName'] = user_name
return self.get_response('UpdateSigningCertificate', params)
@@ -654,7 +648,7 @@
:param user_name: The username of the user
"""
- params = {'CertificateBody' : cert_body}
+ params = {'CertificateBody': cert_body}
if user_name:
params['UserName'] = user_name
return self.get_response('UploadSigningCertificate', params,
@@ -674,7 +668,7 @@
:param cert_id: The ID of the certificate.
"""
- params = {'CertificateId' : cert_id}
+ params = {'CertificateId': cert_id}
if user_name:
params['UserName'] = user_name
return self.get_response('DeleteSigningCertificate', params)
@@ -682,8 +676,8 @@
#
# Server Certificates
#
-
- def get_all_server_certs(self, path_prefix='/',
+
+ def list_server_certs(self, path_prefix='/',
marker=None, max_items=None):
"""
Lists the server certificates that have the specified path prefix.
@@ -691,20 +685,18 @@
:type path_prefix: string
:param path_prefix: If provided, only certificates whose paths match
- the provided prefix will be returned.
+ the provided prefix will be returned.
:type marker: string
- :param marker: Use this only when paginating results and only in
- follow-up request after you've received a response
- where the results are truncated. Set this to the
- value of the Marker element in the response you
- just received.
+ :param marker: Use this only when paginating results and only
+ in follow-up request after you've received a response
+ where the results are truncated. Set this to the value of
+ the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
- the maximum number of groups you want in the
- response.
-
+ the maximum number of groups you want in the response.
+
"""
params = {}
if path_prefix:
@@ -717,6 +709,10 @@
params,
list_marker='ServerCertificateMetadataList')
+ # Preserves backwards compatibility.
+ # TODO: Look into deprecating this eventually?
+ get_all_server_certs = list_server_certs
+
def update_server_cert(self, cert_name, new_cert_name=None,
new_path=None):
"""
@@ -724,18 +720,18 @@
:type cert_name: string
:param cert_name: The name of the server certificate that you want
- to update.
+ to update.
:type new_cert_name: string
:param new_cert_name: The new name for the server certificate.
- Include this only if you are updating the
- server certificate's name.
+ Include this only if you are updating the
+ server certificate's name.
:type new_path: string
:param new_path: If provided, the path of the certificate will be
changed to this path.
"""
- params = {'ServerCertificateName' : cert_name}
+ params = {'ServerCertificateName': cert_name}
if new_cert_name:
params['NewServerCertificateName'] = new_cert_name
if new_path:
@@ -752,28 +748,27 @@
:type cert_name: string
:param cert_name: The name for the server certificate. Do not
- include the path in this value.
+ include the path in this value.
:type cert_body: string
:param cert_body: The contents of the public key certificate
- in PEM-encoded format.
+ in PEM-encoded format.
:type private_key: string
:param private_key: The contents of the private key in
- PEM-encoded format.
+ PEM-encoded format.
:type cert_chain: string
:param cert_chain: The contents of the certificate chain. This
- is typically a concatenation of the PEM-encoded
- public key certificates of the chain.
+ is typically a concatenation of the PEM-encoded
+ public key certificates of the chain.
:type path: string
:param path: The path for the server certificate.
-
"""
- params = {'ServerCertificateName' : cert_name,
- 'CertificateBody' : cert_body,
- 'PrivateKey' : private_key}
+ params = {'ServerCertificateName': cert_name,
+ 'CertificateBody': cert_body,
+ 'PrivateKey': private_key}
if cert_chain:
params['CertificateChain'] = cert_chain
if path:
@@ -787,10 +782,10 @@
:type cert_name: string
:param cert_name: The name of the server certificate you want
- to retrieve information about.
-
+ to retrieve information about.
+
"""
- params = {'ServerCertificateName' : cert_name}
+ params = {'ServerCertificateName': cert_name}
return self.get_response('GetServerCertificate', params)
def delete_server_cert(self, cert_name):
@@ -799,16 +794,16 @@
:type cert_name: string
:param cert_name: The name of the server certificate you want
- to delete.
+ to delete.
"""
- params = {'ServerCertificateName' : cert_name}
+ params = {'ServerCertificateName': cert_name}
return self.get_response('DeleteServerCertificate', params)
#
# MFA Devices
#
-
+
def get_all_mfa_devices(self, user_name, marker=None, max_items=None):
"""
Get all MFA devices associated with an account.
@@ -817,19 +812,17 @@
:param user_name: The username of the user
:type marker: string
- :param marker: Use this only when paginating results and only in
- follow-up request after you've received a response
- where the results are truncated. Set this to the
- value of the Marker element in the response you
- just received.
+ :param marker: Use this only when paginating results and only
+ in follow-up request after you've received a response
+ where the results are truncated. Set this to the value of
+ the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
- the maximum number of groups you want in the
- response.
-
+ the maximum number of groups you want in the response.
+
"""
- params = {'UserName' : user_name}
+ params = {'UserName': user_name}
if marker:
params['Marker'] = marker
if max_items:
@@ -845,23 +838,23 @@
:type user_name: string
:param user_name: The username of the user
-
+
:type serial_number: string
:param seriasl_number: The serial number which uniquely identifies
- the MFA device.
+ the MFA device.
:type auth_code_1: string
:param auth_code_1: An authentication code emitted by the device.
:type auth_code_2: string
:param auth_code_2: A subsequent authentication code emitted
- by the device.
+ by the device.
"""
- params = {'UserName' : user_name,
- 'SerialNumber' : serial_number,
- 'AuthenticationCode1' : auth_code_1,
- 'AuthenticationCode2' : auth_code_2}
+ params = {'UserName': user_name,
+ 'SerialNumber': serial_number,
+ 'AuthenticationCode1': auth_code_1,
+ 'AuthenticationCode2': auth_code_2}
return self.get_response('EnableMFADevice', params)
def deactivate_mfa_device(self, user_name, serial_number):
@@ -871,14 +864,14 @@
:type user_name: string
:param user_name: The username of the user
-
+
:type serial_number: string
:param seriasl_number: The serial number which uniquely identifies
- the MFA device.
+ the MFA device.
"""
- params = {'UserName' : user_name,
- 'SerialNumber' : serial_number}
+ params = {'UserName': user_name,
+ 'SerialNumber': serial_number}
return self.get_response('DeactivateMFADevice', params)
def resync_mfa_device(self, user_name, serial_number,
@@ -888,23 +881,23 @@
:type user_name: string
:param user_name: The username of the user
-
+
:type serial_number: string
:param seriasl_number: The serial number which uniquely identifies
- the MFA device.
+ the MFA device.
:type auth_code_1: string
:param auth_code_1: An authentication code emitted by the device.
:type auth_code_2: string
:param auth_code_2: A subsequent authentication code emitted
- by the device.
+ by the device.
"""
- params = {'UserName' : user_name,
- 'SerialNumber' : serial_number,
- 'AuthenticationCode1' : auth_code_1,
- 'AuthenticationCode2' : auth_code_2}
+ params = {'UserName': user_name,
+ 'SerialNumber': serial_number,
+ 'AuthenticationCode1': auth_code_1,
+ 'AuthenticationCode2': auth_code_2}
return self.get_response('ResyncMFADevice', params)
#
@@ -914,14 +907,14 @@
def get_login_profiles(self, user_name):
"""
Retrieves the login profile for the specified user.
-
+
:type user_name: string
:param user_name: The username of the user
-
+
"""
- params = {'UserName' : user_name}
+ params = {'UserName': user_name}
return self.get_response('GetLoginProfile', params)
-
+
def create_login_profile(self, user_name, password):
"""
Creates a login profile for the specified user, give the user the
@@ -934,8 +927,8 @@
:param password: The new password for the user
"""
- params = {'UserName' : user_name,
- 'Password' : password}
+ params = {'UserName': user_name,
+ 'Password': password}
return self.get_response('CreateLoginProfile', params)
def delete_login_profile(self, user_name):
@@ -946,7 +939,7 @@
:param user_name: The name of the user to delete.
"""
- params = {'UserName' : user_name}
+ params = {'UserName': user_name}
return self.get_response('DeleteLoginProfile', params)
def update_login_profile(self, user_name, password):
@@ -960,10 +953,10 @@
:param password: The new password for the user
"""
- params = {'UserName' : user_name,
- 'Password' : password}
+ params = {'UserName': user_name,
+ 'Password': password}
return self.get_response('UpdateLoginProfile', params)
-
+
def create_account_alias(self, alias):
"""
Creates a new alias for the AWS account.
@@ -972,11 +965,11 @@
http://goo.gl/ToB7G
:type alias: string
- :param alias: The alias to attach to the account.
+ :param alias: The alias to attach to the account.
"""
params = {'AccountAlias': alias}
return self.get_response('CreateAccountAlias', params)
-
+
def delete_account_alias(self, alias):
"""
Deletes an alias for the AWS account.
@@ -989,14 +982,14 @@
"""
params = {'AccountAlias': alias}
return self.get_response('DeleteAccountAlias', params)
-
+
def get_account_alias(self):
"""
Get the alias for the current account.
This is referred to in the docs as list_account_aliases,
but it seems you can only have one account alias currently.
-
+
For more information on account id aliases, please see
http://goo.gl/ToB7G
"""
@@ -1023,10 +1016,306 @@
This is referred to in the docs as list_account_aliases,
but it seems you can only have one account alias currently.
-
+
For more information on account id aliases, please see
http://goo.gl/ToB7G
"""
return self.get_object('GetAccountSummary', {}, SummaryMap)
-
+ #
+ # IAM Roles
+ #
+
+ def add_role_to_instance_profile(self, instance_profile_name, role_name):
+ """
+ Adds the specified role to the specified instance profile.
+
+ :type instance_profile_name: string
+ :param instance_profile_name: Name of the instance profile to update.
+
+ :type role_name: string
+ :param role_name: Name of the role to add.
+ """
+ return self.get_response('AddRoleToInstanceProfile',
+ {'InstanceProfileName': instance_profile_name,
+ 'RoleName': role_name})
+
+ def create_instance_profile(self, instance_profile_name, path=None):
+ """
+ Creates a new instance profile.
+
+ :type instance_profile_name: string
+ :param instance_profile_name: Name of the instance profile to create.
+
+ :type path: string
+ :param path: The path to the instance profile.
+ """
+ params = {'InstanceProfileName': instance_profile_name}
+ if path is not None:
+ params['Path'] = path
+ return self.get_response('CreateInstanceProfile', params)
+
+ def create_role(self, role_name, assume_role_policy_document=None, path=None):
+ """
+ Creates a new role for your AWS account.
+
+ The policy grants permission to an EC2 instance to assume the role.
+ The policy is URL-encoded according to RFC 3986. Currently, only EC2
+ instances can assume roles.
+
+ :type role_name: string
+ :param role_name: Name of the role to create.
+
+ :type assume_role_policy_document: string
+ :param assume_role_policy_document: The policy that grants an entity
+ permission to assume the role.
+
+ :type path: string
+ :param path: The path to the instance profile.
+ """
+ params = {'RoleName': role_name}
+ if assume_role_policy_document is None:
+ # This is the only valid assume_role_policy_document currently, so
+ # this is used as a default value if no assume_role_policy_document
+ # is provided.
+ params['AssumeRolePolicyDocument'] = ASSUME_ROLE_POLICY_DOCUMENT
+ else:
+ params['AssumeRolePolicyDocument'] = assume_role_policy_document
+ if path is not None:
+ params['Path'] = path
+ return self.get_response('CreateRole', params)
+
+ def delete_instance_profile(self, instance_profile_name):
+ """
+ Deletes the specified instance profile. The instance profile must not
+ have an associated role.
+
+ :type instance_profile_name: string
+ :param instance_profile_name: Name of the instance profile to delete.
+ """
+ return self.get_response(
+ 'DeleteInstanceProfile',
+ {'InstanceProfileName': instance_profile_name})
+
+ def delete_role(self, role_name):
+ """
+ Deletes the specified role. The role must not have any policies
+ attached.
+
+ :type role_name: string
+ :param role_name: Name of the role to delete.
+ """
+ return self.get_response('DeleteRole', {'RoleName': role_name})
+
+ def delete_role_policy(self, role_name, policy_name):
+ """
+ Deletes the specified policy associated with the specified role.
+
+ :type role_name: string
+ :param role_name: Name of the role associated with the policy.
+
+ :type policy_name: string
+ :param policy_name: Name of the policy to delete.
+ """
+ return self.get_response(
+ 'DeleteRolePolicy',
+ {'RoleName': role_name, 'PolicyName': policy_name})
+
+ def get_instance_profile(self, instance_profile_name):
+ """
+ Retrieves information about the specified instance profile, including
+ the instance profile's path, GUID, ARN, and role.
+
+ :type instance_profile_name: string
+ :param instance_profile_name: Name of the instance profile to get
+ information about.
+ """
+ return self.get_response('GetInstanceProfile', {'InstanceProfileName':
+ instance_profile_name})
+
+ def get_role(self, role_name):
+ """
+ Retrieves information about the specified role, including the role's
+ path, GUID, ARN, and the policy granting permission to EC2 to assume
+ the role.
+
+ :type role_name: string
+ :param role_name: Name of the role associated with the policy.
+ """
+ return self.get_response('GetRole', {'RoleName': role_name})
+
+ def get_role_policy(self, role_name, policy_name):
+ """
+ Retrieves the specified policy document for the specified role.
+
+ :type role_name: string
+ :param role_name: Name of the role associated with the policy.
+
+ :type policy_name: string
+ :param policy_name: Name of the policy to get.
+ """
+ return self.get_response('GetRolePolicy',
+ {'RoleName': role_name,
+ 'PolicyName': policy_name})
+
+ def list_instance_profiles(self, path_prefix=None, marker=None,
+ max_items=None):
+ """
+ Lists the instance profiles that have the specified path prefix. If
+ there are none, the action returns an empty list.
+
+ :type path_prefix: string
+ :param path_prefix: The path prefix for filtering the results. For
+ example: /application_abc/component_xyz/, which would get all
+ instance profiles whose path starts with
+ /application_abc/component_xyz/.
+
+ :type marker: string
+ :param marker: Use this parameter only when paginating results, and
+ only in a subsequent request after you've received a response
+ where the results are truncated. Set it to the value of the
+ Marker element in the response you just received.
+
+ :type max_items: int
+ :param max_items: Use this parameter only when paginating results to
+ indicate the maximum number of user names you want in the response.
+ """
+ params = {}
+ if path_prefix is not None:
+ params['PathPrefix'] = path_prefix
+ if marker is not None:
+ params['Marker'] = marker
+ if max_items is not None:
+ params['MaxItems'] = max_items
+
+ return self.get_response('ListInstanceProfiles', params,
+ list_marker='InstanceProfiles')
+
+ def list_instance_profiles_for_role(self, role_name, marker=None,
+ max_items=None):
+ """
+ Lists the instance profiles that have the specified associated role. If
+ there are none, the action returns an empty list.
+
+ :type role_name: string
+ :param role_name: The name of the role to list instance profiles for.
+
+ :type marker: string
+ :param marker: Use this parameter only when paginating results, and
+ only in a subsequent request after you've received a response
+ where the results are truncated. Set it to the value of the
+ Marker element in the response you just received.
+
+ :type max_items: int
+ :param max_items: Use this parameter only when paginating results to
+ indicate the maximum number of user names you want in the response.
+ """
+ params = {'RoleName': role_name}
+ if marker is not None:
+ params['Marker'] = marker
+ if max_items is not None:
+ params['MaxItems'] = max_items
+ return self.get_response('ListInstanceProfilesForRole', params,
+ list_marker='InstanceProfiles')
+
+ def list_role_policies(self, role_name, marker=None, max_items=None):
+ """
+ Lists the names of the policies associated with the specified role. If
+ there are none, the action returns an empty list.
+
+ :type role_name: string
+ :param role_name: The name of the role to list policies for.
+
+ :type marker: string
+ :param marker: Use this parameter only when paginating results, and
+ only in a subsequent request after you've received a response
+ where the results are truncated. Set it to the value of the
+ marker element in the response you just received.
+
+ :type max_items: int
+ :param max_items: Use this parameter only when paginating results to
+ indicate the maximum number of user names you want in the response.
+ """
+ params = {'RoleName': role_name}
+ if marker is not None:
+ params['Marker'] = marker
+ if max_items is not None:
+ params['MaxItems'] = max_items
+ return self.get_response('ListRolePolicies', params,
+ list_marker='PolicyNames')
+
+ def list_roles(self, path_prefix=None, marker=None, max_items=None):
+ """
+ Lists the roles that have the specified path prefix. If there are none,
+ the action returns an empty list.
+
+ :type path_prefix: string
+ :param path_prefix: The path prefix for filtering the results.
+
+ :type marker: string
+ :param marker: Use this parameter only when paginating results, and
+ only in a subsequent request after you've received a response
+ where the results are truncated. Set it to the value of the
+ marker element in the response you just received.
+
+ :type max_items: int
+ :param max_items: Use this parameter only when paginating results to
+ indicate the maximum number of user names you want in the response.
+ """
+ params = {}
+ if path_prefix is not None:
+ params['PathPrefix'] = path_prefix
+ if marker is not None:
+ params['Marker'] = marker
+ if max_items is not None:
+ params['MaxItems'] = max_items
+ return self.get_response('ListRoles', params, list_marker='Roles')
+
+ def put_role_policy(self, role_name, policy_name, policy_document):
+ """
+ Adds (or updates) a policy document associated with the specified role.
+
+ :type role_name: string
+ :param role_name: Name of the role to associate the policy with.
+
+ :type policy_name: string
+ :param policy_name: Name of the policy document.
+
+ :type policy_document: string
+ :param policy_document: The policy document.
+ """
+ return self.get_response('PutRolePolicy',
+ {'RoleName': role_name,
+ 'PolicyName': policy_name,
+ 'PolicyDocument': policy_document})
+
+ def remove_role_from_instance_profile(self, instance_profile_name,
+ role_name):
+ """
+ Removes the specified role from the specified instance profile.
+
+ :type instance_profile_name: string
+ :param instance_profile_name: Name of the instance profile to update.
+
+ :type role_name: string
+ :param role_name: Name of the role to remove.
+ """
+ return self.get_response('RemoveRoleFromInstanceProfile',
+ {'InstanceProfileName': instance_profile_name,
+ 'RoleName': role_name})
+
+ def update_assume_role_policy(self, role_name, policy_document):
+ """
+ Updates the policy that grants an entity permission to assume a role.
+ Currently, only an Amazon EC2 instance can assume a role.
+
+ :type role_name: string
+ :param role_name: Name of the role to update.
+
+ :type policy_document: string
+ :param policy_document: The policy that grants an entity permission to
+ assume the role.
+ """
+ return self.get_response('UpdateAssumeRolePolicy',
+ {'RoleName': role_name,
+ 'PolicyDocument': policy_document})
diff --git a/boto/jsonresponse.py b/boto/jsonresponse.py
index 9433815..01e1f54 100644
--- a/boto/jsonresponse.py
+++ b/boto/jsonresponse.py
@@ -134,12 +134,15 @@
def startElement(self, name, attrs, connection):
for lm in self.list_marker:
if name.endswith(lm):
- l = ListElement(self.connection, name, self.item_marker,
- pythonize_name=self.pythonize_name)
+ l = ListElement(self.connection, name,
+ self.list_marker, self.item_marker,
+ self.pythonize_name)
setattr(self, self.get_name(name), l)
return l
if name in self.item_marker:
e = Element(self.connection, name, parent=self,
+ list_marker=self.list_marker,
+ item_marker=self.item_marker,
pythonize_name=self.pythonize_name)
self.append(e)
return e
diff --git a/boto/manage/cmdshell.py b/boto/manage/cmdshell.py
index 2275fa0..60f281d 100644
--- a/boto/manage/cmdshell.py
+++ b/boto/manage/cmdshell.py
@@ -54,7 +54,7 @@
username=self.uname,
pkey=self._pkey)
return
- except socket.error, (value,message):
+ except socket.error, (value, message):
if value == 61 or value == 111:
print 'SSH Connection refused, will retry in 5 seconds'
time.sleep(5)
@@ -143,7 +143,7 @@
channel = self._ssh_client.get_transport().open_session()
channel.get_pty()
channel.exec_command(command)
- return channel.recv(1024)
+ return channel
def close(self):
transport = self._ssh_client.get_transport()
@@ -173,7 +173,7 @@
return os.path.exists(path)
def shell(self):
- raise NotImplementedError, 'shell not supported with LocalClient'
+ raise NotImplementedError('shell not supported with LocalClient')
def run(self):
boto.log.info('running:%s' % self.command)
diff --git a/boto/manage/server.py b/boto/manage/server.py
index 3c7a303..2a2b1f1 100644
--- a/boto/manage/server.py
+++ b/boto/manage/server.py
@@ -489,19 +489,19 @@
def delete(self):
if self.production:
- raise ValueError, "Can't delete a production server"
+ raise ValueError("Can't delete a production server")
#self.stop()
Model.delete(self)
def stop(self):
if self.production:
- raise ValueError, "Can't delete a production server"
+ raise ValueError("Can't delete a production server")
if self._instance:
self._instance.stop()
def terminate(self):
if self.production:
- raise ValueError, "Can't delete a production server"
+ raise ValueError("Can't delete a production server")
if self._instance:
self._instance.terminate()
diff --git a/boto/manage/task.py b/boto/manage/task.py
index 2f9d7d0..1c37c69 100644
--- a/boto/manage/task.py
+++ b/boto/manage/task.py
@@ -155,7 +155,7 @@
self.queue = self.sqs.lookup(queue_name)
def poll(self, wait=60, vtimeout=60):
- while 1:
+ while True:
m = self.queue.read(vtimeout)
if m:
task = Task.get_by_id(m.get_body())
diff --git a/boto/manage/volume.py b/boto/manage/volume.py
index 66a458f..49237d4 100644
--- a/boto/manage/volume.py
+++ b/boto/manage/volume.py
@@ -24,11 +24,11 @@
from boto.sdb.db.property import StringProperty, IntegerProperty, ListProperty, ReferenceProperty, CalculatedProperty
from boto.manage.server import Server
from boto.manage import propget
+import boto.utils
import boto.ec2
import time
import traceback
from contextlib import closing
-import dateutil.parser
import datetime
@@ -191,10 +191,10 @@
for snapshot in rs:
if snapshot.volume_id in all_vols:
if snapshot.progress == '100%':
- snapshot.date = dateutil.parser.parse(snapshot.start_time)
+ snapshot.date = boto.utils.parse_ts(snapshot.start_time)
snapshot.keep = True
snaps.append(snapshot)
- snaps.sort(cmp=lambda x,y: cmp(x.date, y.date))
+ snaps.sort(cmp=lambda x, y: cmp(x.date, y.date))
return snaps
def attach(self, server=None):
@@ -219,7 +219,7 @@
def checkfs(self, use_cmd=None):
if self.server == None:
- raise ValueError, 'server attribute must be set to run this command'
+ raise ValueError('server attribute must be set to run this command')
# detemine state of file system on volume, only works if attached
if use_cmd:
cmd = use_cmd
@@ -234,7 +234,7 @@
def wait(self):
if self.server == None:
- raise ValueError, 'server attribute must be set to run this command'
+ raise ValueError('server attribute must be set to run this command')
with closing(self.server.get_cmdshell()) as cmd:
# wait for the volume device to appear
cmd = self.server.get_cmdshell()
@@ -244,7 +244,7 @@
def format(self):
if self.server == None:
- raise ValueError, 'server attribute must be set to run this command'
+ raise ValueError('server attribute must be set to run this command')
status = None
with closing(self.server.get_cmdshell()) as cmd:
if not self.checkfs(cmd):
@@ -254,7 +254,7 @@
def mount(self):
if self.server == None:
- raise ValueError, 'server attribute must be set to run this command'
+ raise ValueError('server attribute must be set to run this command')
boto.log.info('handle_mount_point')
with closing(self.server.get_cmdshell()) as cmd:
cmd = self.server.get_cmdshell()
@@ -376,7 +376,7 @@
for snap in partial_week[1:]:
snap.keep = False
# Keep the first snapshot of each week for the previous 4 weeks
- for i in range(0,4):
+ for i in range(0, 4):
weeks_worth = self.get_snapshot_range(snaps, week_boundary-one_week, week_boundary)
if len(weeks_worth) > 1:
for snap in weeks_worth[1:]:
diff --git a/boto/mturk/connection.py b/boto/mturk/connection.py
index e59da15..7de938b 100644
--- a/boto/mturk/connection.py
+++ b/boto/mturk/connection.py
@@ -38,7 +38,7 @@
class MTurkConnection(AWSQueryConnection):
- APIVersion = '2008-08-02'
+ APIVersion = '2012-03-25'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
@@ -94,7 +94,8 @@
if qual_req is not None:
params.update(qual_req.get_as_params())
- return self._process_request('RegisterHITType', params)
+ return self._process_request('RegisterHITType', params, [('HITTypeId', HITTypeId)])
+
def set_email_notification(self, hit_type, email, event_types=None):
"""
@@ -115,7 +116,7 @@
Common SetHITTypeNotification operation to set notification for a
specified HIT type
"""
- assert type(hit_type) is str, "hit_type argument should be a string."
+ assert isinstance(hit_type, str), "hit_type argument should be a string."
params = {'HITTypeId': hit_type}
@@ -175,9 +176,9 @@
# Handle basic required arguments and set up params dict
params = {'Question': question_param.get_as_xml(),
- 'LifetimeInSeconds' :
+ 'LifetimeInSeconds':
self.duration_as_seconds(lifetime),
- 'MaxAssignments' : max_assignments,
+ 'MaxAssignments': max_assignments,
}
# if hit type specified then add it
@@ -279,7 +280,7 @@
page_size = 100
search_rs = self.search_hits(page_size=page_size)
total_records = int(search_rs.TotalNumResults)
- get_page_hits = lambda(page): self.search_hits(page_size=page_size, page_number=page)
+ get_page_hits = lambda page: self.search_hits(page_size=page_size, page_number=page)
page_nums = self._get_pages(page_size, total_records)
hit_sets = itertools.imap(get_page_hits, page_nums)
return itertools.chain.from_iterable(hit_sets)
@@ -350,7 +351,7 @@
def approve_assignment(self, assignment_id, feedback=None):
"""
"""
- params = {'AssignmentId' : assignment_id,}
+ params = {'AssignmentId': assignment_id,}
if feedback:
params['RequesterFeedback'] = feedback
return self._process_request('ApproveAssignment', params)
@@ -358,15 +359,23 @@
def reject_assignment(self, assignment_id, feedback=None):
"""
"""
- params = {'AssignmentId' : assignment_id,}
+ params = {'AssignmentId': assignment_id,}
if feedback:
params['RequesterFeedback'] = feedback
return self._process_request('RejectAssignment', params)
+ def approve_rejected_assignment(self, assignment_id, feedback=None):
+ """
+ """
+ params = {'AssignmentId' : assignment_id, }
+ if feedback:
+ params['RequesterFeedback'] = feedback
+ return self._process_request('ApproveRejectedAssignment', params)
+
def get_hit(self, hit_id, response_groups=None):
"""
"""
- params = {'HITId' : hit_id,}
+ params = {'HITId': hit_id,}
# Handle optional response groups argument
if response_groups:
self.build_list_params(params, response_groups, 'ResponseGroup')
@@ -382,7 +391,7 @@
Reviewing. Similarly, only Reviewing HITs can be reverted back to a
status of Reviewable.
"""
- params = {'HITId' : hit_id,}
+ params = {'HITId': hit_id,}
if revert:
params['Revert'] = revert
return self._process_request('SetHITAsReviewing', params)
@@ -404,7 +413,7 @@
It is not possible to re-enable a HIT once it has been disabled.
To make the work from a disabled HIT available again, create a new HIT.
"""
- params = {'HITId' : hit_id,}
+ params = {'HITId': hit_id,}
# Handle optional response groups argument
if response_groups:
self.build_list_params(params, response_groups, 'ResponseGroup')
@@ -421,7 +430,7 @@
reviewable, then call GetAssignmentsForHIT to retrieve the
assignments. Disposing of a HIT removes the HIT from the
results of a call to GetReviewableHITs. """
- params = {'HITId' : hit_id,}
+ params = {'HITId': hit_id,}
return self._process_request('DisposeHIT', params)
def expire_hit(self, hit_id):
@@ -438,7 +447,7 @@
submitted, the expired HIT becomes"reviewable", and will be
returned by a call to GetReviewableHITs.
"""
- params = {'HITId' : hit_id,}
+ params = {'HITId': hit_id,}
return self._process_request('ForceExpireHIT', params)
def extend_hit(self, hit_id, assignments_increment=None, expiration_increment=None):
@@ -460,7 +469,7 @@
(assignments_increment is not None and expiration_increment is not None):
raise ValueError("Must specify either assignments_increment or expiration_increment, but not both")
- params = {'HITId' : hit_id,}
+ params = {'HITId': hit_id,}
if assignments_increment:
params['MaxAssignmentsIncrement'] = assignments_increment
if expiration_increment:
@@ -567,9 +576,9 @@
"""
- params = {'Name' : name,
- 'Description' : description,
- 'QualificationTypeStatus' : status,
+ params = {'Name': name,
+ 'Description': description,
+ 'QualificationTypeStatus': status,
}
if retry_delay is not None:
params['RetryDelayInSeconds'] = retry_delay
@@ -590,7 +599,7 @@
# Eventually someone will write an AnswerKey class.
if auto_granted:
- assert(test is False)
+ assert(test is None)
params['AutoGranted'] = True
params['AutoGrantedValue'] = auto_granted_value
@@ -636,7 +645,7 @@
params['Test'] = test.get_as_xml()
if test_duration is not None:
- params['TestDuration'] = test_duration
+ params['TestDurationInSeconds'] = test_duration
if answer_key is not None:
if isinstance(answer_key, basestring):
@@ -751,11 +760,11 @@
Returns a comma+space-separated string of keywords from either
a list or a string
"""
- if type(keywords) is list:
+ if isinstance(keywords, list):
keywords = ', '.join(keywords)
- if type(keywords) is str:
+ if isinstance(keywords, str):
final_keywords = keywords
- elif type(keywords) is unicode:
+ elif isinstance(keywords, unicode):
final_keywords = keywords.encode('utf-8')
elif keywords is None:
final_keywords = ""
@@ -820,6 +829,13 @@
# are we there yet?
expired = property(_has_expired)
+class HITTypeId(BaseAutoResultElement):
+ """
+ Class to extract an HITTypeId structure from a response
+ """
+
+ pass
+
class Qualification(BaseAutoResultElement):
"""
Class to extract an Qualification structure from a response (used in
@@ -909,6 +925,4 @@
if name == 'QuestionIdentifier':
self.qid = value
elif name in ['FreeText', 'SelectionIdentifier', 'OtherSelectionText'] and self.qid:
- self.fields.append((self.qid,value))
- elif name == 'Answer':
- self.qid = None
+ self.fields.append( value )
diff --git a/boto/mturk/qualification.py b/boto/mturk/qualification.py
index 6b620ec..8272d6d 100644
--- a/boto/mturk/qualification.py
+++ b/boto/mturk/qualification.py
@@ -35,7 +35,7 @@
for n, req in enumerate(self.requirements):
reqparams = req.get_as_params()
for rp in reqparams:
- params['QualificationRequirement.%s.%s' % ((n+1),rp) ] = reqparams[rp]
+ params['QualificationRequirement.%s.%s' % ((n+1), rp) ] = reqparams[rp]
return params
diff --git a/boto/mturk/question.py b/boto/mturk/question.py
index bf16b3e..ab4f970 100644
--- a/boto/mturk/question.py
+++ b/boto/mturk/question.py
@@ -250,8 +250,8 @@
def get_attributes(self):
pairs = zip(self.attribute_names, self.attribute_values)
attrs = ' '.join(
- '%s="%d"' % (name,value)
- for (name,value) in pairs
+ '%s="%d"' % (name, value)
+ for (name, value) in pairs
if value is not None
)
return attrs
@@ -281,6 +281,15 @@
def __init__(self, pattern, error_text=None, flags=None):
self.attribute_values = pattern, error_text, flags
+ def get_attributes(self):
+ pairs = zip(self.attribute_names, self.attribute_values)
+ attrs = ' '.join(
+ '%s="%s"' % (name, value)
+ for (name, value) in pairs
+ if value is not None
+ )
+ return attrs
+
class NumberOfLinesSuggestion(object):
template = '<NumberOfLinesSuggestion>%(num_lines)s</NumberOfLinesSuggestion>'
@@ -312,7 +321,7 @@
return self.template % vars()
class FileUploadAnswer(object):
- template = """<FileUploadAnswer><MinFileSizeInBytes>%(min_bytes)d</MinFileSizeInBytes><MaxFileSizeInBytes>%(max_bytes)d</MaxFileSizeInBytes></FileUploadAnswer>"""
+ template = """<FileUploadAnswer><MaxFileSizeInBytes>%(max_bytes)d</MaxFileSizeInBytes><MinFileSizeInBytes>%(min_bytes)d</MinFileSizeInBytes></FileUploadAnswer>"""
def __init__(self, min_bytes, max_bytes):
assert 0 <= min_bytes <= max_bytes <= 2*10**9
@@ -379,7 +388,7 @@
if self.other:
# add OtherSelection element as xml if available
if hasattr(self.other, 'get_as_xml'):
- assert type(self.other) == FreeTextAnswer, 'OtherSelection can only be a FreeTextAnswer'
+ assert isinstance(self.other, FreeTextAnswer), 'OtherSelection can only be a FreeTextAnswer'
selections_xml += self.other.get_as_xml().replace('FreeTextAnswer', 'OtherSelection')
else:
selections_xml += "<OtherSelection />"
diff --git a/boto/mturk/test/__init__.py b/boto/mturk/test/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/boto/mturk/test/__init__.py
+++ /dev/null
diff --git a/boto/mturk/test/_init_environment.py b/boto/mturk/test/_init_environment.py
deleted file mode 100644
index e709785..0000000
--- a/boto/mturk/test/_init_environment.py
+++ /dev/null
@@ -1,24 +0,0 @@
-import os
-import functools
-
-live_connection = False
-mturk_host = 'mechanicalturk.sandbox.amazonaws.com'
-external_url = 'http://www.example.com/'
-
-try:
- local = os.path.join(os.path.dirname(__file__), 'local.py')
- execfile(local)
-except:
- pass
-
-if live_connection:
- #TODO: you must set the auth credentials to something valid
- from boto.mturk.connection import MTurkConnection
-else:
- # Here the credentials must be set, but it doesn't matter what
- # they're set to.
- os.environ.setdefault('AWS_ACCESS_KEY_ID', 'foo')
- os.environ.setdefault('AWS_SECRET_ACCESS_KEY', 'bar')
- from mocks import MTurkConnection
-
-SetHostMTurkConnection = functools.partial(MTurkConnection, host=mturk_host)
diff --git a/boto/mturk/test/common.py b/boto/mturk/test/common.py
deleted file mode 100644
index 40e2726..0000000
--- a/boto/mturk/test/common.py
+++ /dev/null
@@ -1,44 +0,0 @@
-import unittest
-import uuid
-import datetime
-
-from boto.mturk.question import (
- Question, QuestionContent, AnswerSpecification, FreeTextAnswer,
-)
-from _init_environment import SetHostMTurkConnection
-
-class MTurkCommon(unittest.TestCase):
- def setUp(self):
- self.conn = SetHostMTurkConnection()
-
- @staticmethod
- def get_question():
- # create content for a question
- qn_content = QuestionContent()
- qn_content.append_field('Title', 'Boto no hit type question content')
- qn_content.append_field('Text', 'What is a boto no hit type?')
-
- # create the question specification
- qn = Question(identifier=str(uuid.uuid4()),
- content=qn_content,
- answer_spec=AnswerSpecification(FreeTextAnswer()))
- return qn
-
- @staticmethod
- def get_hit_params():
- return dict(
- lifetime=datetime.timedelta(minutes=65),
- max_assignments=2,
- title='Boto create_hit title',
- description='Boto create_hit description',
- keywords=['boto', 'test'],
- reward=0.23,
- duration=datetime.timedelta(minutes=6),
- approval_delay=60*60,
- annotation='An annotation from boto create_hit test',
- response_groups=['Minimal',
- 'HITDetail',
- 'HITQuestion',
- 'HITAssignmentSummary',],
- )
-
diff --git a/boto/mturk/test/create_hit_external.py b/boto/mturk/test/create_hit_external.py
deleted file mode 100644
index 9e955a6..0000000
--- a/boto/mturk/test/create_hit_external.py
+++ /dev/null
@@ -1,17 +0,0 @@
-import unittest
-import uuid
-import datetime
-from boto.mturk.question import ExternalQuestion
-
-from _init_environment import SetHostMTurkConnection, external_url
-
-class Test(unittest.TestCase):
- def test_create_hit_external(self):
- q = ExternalQuestion(external_url=external_url, frame_height=800)
- conn = SetHostMTurkConnection()
- keywords=['boto', 'test', 'doctest']
- create_hit_rs = conn.create_hit(question=q, lifetime=60*65,max_assignments=2,title="Boto External Question Test", keywords=keywords,reward = 0.05, duration=60*6,approval_delay=60*60, annotation='An annotation from boto external question test', response_groups=['Minimal','HITDetail','HITQuestion','HITAssignmentSummary',])
- assert(create_hit_rs.status == True)
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/boto/mturk/test/support.py b/boto/mturk/test/support.py
deleted file mode 100644
index 16b86e6..0000000
--- a/boto/mturk/test/support.py
+++ /dev/null
@@ -1,8 +0,0 @@
-
-import sys
-
-# use unittest2 under Python 2.6 and earlier.
-if sys.version_info >= (2,7):
- import unittest
-else:
- import unittest2 as unittest
diff --git a/boto/mws/__init__.py b/boto/mws/__init__.py
new file mode 100644
index 0000000..d69b7f0
--- /dev/null
+++ b/boto/mws/__init__.py
@@ -0,0 +1,21 @@
+# Copyright (c) 2008, Chris Moyer http://coredumped.org
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
diff --git a/boto/mws/connection.py b/boto/mws/connection.py
new file mode 100644
index 0000000..8141b3c
--- /dev/null
+++ b/boto/mws/connection.py
@@ -0,0 +1,777 @@
+# Copyright (c) 2012 Andy Davidoff http://www.disruptek.com/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+import xml.sax
+import hashlib
+import base64
+from boto.connection import AWSQueryConnection
+from boto.mws.exception import ResponseErrorFactory
+from boto.mws.response import ResponseFactory, ResponseElement
+from boto.handler import XmlHandler
+import boto.mws.response
+
+__all__ = ['MWSConnection']
+
+api_version_path = {
+ 'Feeds': ('2009-01-01', 'Merchant', '/'),
+ 'Reports': ('2009-01-01', 'Merchant', '/'),
+ 'Orders': ('2011-01-01', 'SellerId', '/Orders/2011-01-01'),
+ 'Products': ('2011-10-01', 'SellerId', '/Products/2011-10-01'),
+ 'Sellers': ('2011-07-01', 'SellerId', '/Sellers/2011-07-01'),
+ 'Inbound': ('2010-10-01', 'SellerId',
+ '/FulfillmentInboundShipment/2010-10-01'),
+ 'Outbound': ('2010-10-01', 'SellerId',
+ '/FulfillmentOutboundShipment/2010-10-01'),
+ 'Inventory': ('2010-10-01', 'SellerId',
+ '/FulfillmentInventory/2010-10-01'),
+}
+content_md5 = lambda c: base64.encodestring(hashlib.md5(c).digest()).strip()
+decorated_attrs = ('action', 'response', 'section',
+ 'quota', 'restore', 'version')
+
+
+def add_attrs_from(func, to):
+ for attr in decorated_attrs:
+ setattr(to, attr, getattr(func, attr, None))
+ return to
+
+
+def structured_lists(*fields):
+
+ def decorator(func):
+
+ def wrapper(self, *args, **kw):
+ for key, acc in [f.split('.') for f in fields]:
+ if key in kw:
+ newkey = key + '.' + acc + (acc and '.' or '')
+ for i in range(len(kw[key])):
+ kw[newkey + str(i + 1)] = kw[key][i]
+ kw.pop(key)
+ return func(self, *args, **kw)
+ wrapper.__doc__ = "{0}\nLists: {1}".format(func.__doc__,
+ ', '.join(fields))
+ return add_attrs_from(func, to=wrapper)
+ return decorator
+
+
+def http_body(field):
+
+ def decorator(func):
+
+ def wrapper(*args, **kw):
+ if filter(lambda x: not x in kw, (field, 'content_type')):
+ message = "{0} requires {1} and content_type arguments for " \
+ "building HTTP body".format(func.action, field)
+ raise KeyError(message)
+ kw['body'] = kw.pop(field)
+ kw['headers'] = {
+ 'Content-Type': kw.pop('content_type'),
+ 'Content-MD5': content_md5(kw['body']),
+ }
+ return func(*args, **kw)
+ wrapper.__doc__ = "{0}\nRequired HTTP Body: " \
+ "{1}".format(func.__doc__, field)
+ return add_attrs_from(func, to=wrapper)
+ return decorator
+
+
+def destructure_object(value, into={}, prefix=''):
+ if isinstance(value, ResponseElement):
+ for name, attr in value.__dict__.items():
+ if name.startswith('_'):
+ continue
+ destructure_object(attr, into=into, prefix=prefix + '.' + name)
+ elif filter(lambda x: isinstance(value, x), (list, set, tuple)):
+ for index, element in [(prefix + '.' + str(i + 1), value[i])
+ for i in range(len(value))]:
+ destructure_object(element, into=into, prefix=index)
+ elif isinstance(value, bool):
+ into[prefix] = str(value).lower()
+ else:
+ into[prefix] = value
+
+
+def structured_objects(*fields):
+
+ def decorator(func):
+
+ def wrapper(*args, **kw):
+ for field in filter(kw.has_key, fields):
+ destructure_object(kw.pop(field), into=kw, prefix=field)
+ return func(*args, **kw)
+ wrapper.__doc__ = "{0}\nObjects: {1}".format(func.__doc__,
+ ', '.join(fields))
+ return add_attrs_from(func, to=wrapper)
+ return decorator
+
+
+def requires(*groups):
+
+ def decorator(func):
+
+ def wrapper(*args, **kw):
+ hasgroup = lambda x: len(x) == len(filter(kw.has_key, x))
+ if 1 != len(filter(hasgroup, groups)):
+ message = ' OR '.join(['+'.join(g) for g in groups])
+ message = "{0} requires {1} argument(s)" \
+ "".format(func.action, message)
+ raise KeyError(message)
+ return func(*args, **kw)
+ message = ' OR '.join(['+'.join(g) for g in groups])
+ wrapper.__doc__ = "{0}\nRequired: {1}".format(func.__doc__,
+ message)
+ return add_attrs_from(func, to=wrapper)
+ return decorator
+
+
+def exclusive(*groups):
+
+ def decorator(func):
+
+ def wrapper(*args, **kw):
+ hasgroup = lambda x: len(x) == len(filter(kw.has_key, x))
+ if len(filter(hasgroup, groups)) not in (0, 1):
+ message = ' OR '.join(['+'.join(g) for g in groups])
+ message = "{0} requires either {1}" \
+ "".format(func.action, message)
+ raise KeyError(message)
+ return func(*args, **kw)
+ message = ' OR '.join(['+'.join(g) for g in groups])
+ wrapper.__doc__ = "{0}\nEither: {1}".format(func.__doc__,
+ message)
+ return add_attrs_from(func, to=wrapper)
+ return decorator
+
+
+def dependent(field, *groups):
+
+ def decorator(func):
+
+ def wrapper(*args, **kw):
+ hasgroup = lambda x: len(x) == len(filter(kw.has_key, x))
+ if field in kw and 1 > len(filter(hasgroup, groups)):
+ message = ' OR '.join(['+'.join(g) for g in groups])
+ message = "{0} argument {1} requires {2}" \
+ "".format(func.action, field, message)
+ raise KeyError(message)
+ return func(*args, **kw)
+ message = ' OR '.join(['+'.join(g) for g in groups])
+ wrapper.__doc__ = "{0}\n{1} requires: {2}".format(func.__doc__,
+ field,
+ message)
+ return add_attrs_from(func, to=wrapper)
+ return decorator
+
+
+def requires_some_of(*fields):
+
+ def decorator(func):
+
+ def wrapper(*args, **kw):
+ if not filter(kw.has_key, fields):
+ message = "{0} requires at least one of {1} argument(s)" \
+ "".format(func.action, ', '.join(fields))
+ raise KeyError(message)
+ return func(*args, **kw)
+ wrapper.__doc__ = "{0}\nSome Required: {1}".format(func.__doc__,
+ ', '.join(fields))
+ return add_attrs_from(func, to=wrapper)
+ return decorator
+
+
+def boolean_arguments(*fields):
+
+ def decorator(func):
+
+ def wrapper(*args, **kw):
+ for field in filter(lambda x: isinstance(kw.get(x), bool), fields):
+ kw[field] = str(kw[field]).lower()
+ return func(*args, **kw)
+ wrapper.__doc__ = "{0}\nBooleans: {1}".format(func.__doc__,
+ ', '.join(fields))
+ return add_attrs_from(func, to=wrapper)
+ return decorator
+
+
+def api_action(section, quota, restore, *api):
+
+ def decorator(func, quota=int(quota), restore=float(restore)):
+ version, accesskey, path = api_version_path[section]
+ action = ''.join(api or map(str.capitalize, func.func_name.split('_')))
+ if hasattr(boto.mws.response, action + 'Response'):
+ response = getattr(boto.mws.response, action + 'Response')
+ else:
+ response = ResponseFactory(action)
+
+ def wrapper(self, *args, **kw):
+ kw.setdefault(accesskey, getattr(self, accesskey, None))
+ if kw[accesskey] is None:
+ message = "{0} requires {1} argument. Set the " \
+ "MWSConnection.{2} attribute?" \
+ "".format(action, accesskey, accesskey)
+ raise KeyError(message)
+ kw['Action'] = action
+ kw['Version'] = version
+ return func(self, path, response, *args, **kw)
+ for attr in decorated_attrs:
+ setattr(wrapper, attr, locals().get(attr))
+ wrapper.__doc__ = "MWS {0}/{1} API call; quota={2} restore={3:.2f}\n" \
+ "{4}".format(action, version, quota, restore,
+ func.__doc__)
+ return wrapper
+ return decorator
+
+
+class MWSConnection(AWSQueryConnection):
+
+ ResponseError = ResponseErrorFactory
+
+ def __init__(self, *args, **kw):
+ kw.setdefault('host', 'mws.amazonservices.com')
+ self.Merchant = kw.pop('Merchant', None) or kw.get('SellerId')
+ self.SellerId = kw.pop('SellerId', None) or self.Merchant
+ AWSQueryConnection.__init__(self, *args, **kw)
+
+ def _required_auth_capability(self):
+ return ['mws']
+
+ def post_request(self, path, params, cls, body='', headers={}, isXML=True):
+ """Make a POST request, optionally with a content body,
+ and return the response, optionally as raw text.
+ Modelled off of the inherited get_object/make_request flow.
+ """
+ request = self.build_base_http_request('POST', path, None, data=body,
+ params=params, headers=headers, host=self.server_name())
+ response = self._mexe(request, override_num_retries=None)
+ body = response.read()
+ boto.log.debug(body)
+ if not body:
+ boto.log.error('Null body %s' % body)
+ raise self.ResponseError(response.status, response.reason, body)
+ if response.status != 200:
+ boto.log.error('%s %s' % (response.status, response.reason))
+ boto.log.error('%s' % body)
+ raise self.ResponseError(response.status, response.reason, body)
+ if not isXML:
+ digest = response.getheader('Content-MD5')
+ assert content_md5(body) == digest
+ return body
+ obj = cls(self)
+ h = XmlHandler(obj, self)
+ xml.sax.parseString(body, h)
+ return obj
+
+ @boolean_arguments('PurgeAndReplace')
+ @http_body('FeedContent')
+ @structured_lists('MarketplaceIdList.Id')
+ @requires(['FeedType'])
+ @api_action('Feeds', 15, 120)
+ def submit_feed(self, path, response, headers={}, body='', **kw):
+ """Uploads a feed for processing by Amazon MWS.
+ """
+ return self.post_request(path, kw, response, body=body,
+ headers=headers)
+
+ @structured_lists('FeedSubmissionIdList.Id', 'FeedTypeList.Type',
+ 'FeedProcessingStatusList.Status')
+ @api_action('Feeds', 10, 45)
+ def get_feed_submission_list(self, path, response, **kw):
+ """Returns a list of all feed submissions submitted in the
+ previous 90 days.
+ """
+ return self.post_request(path, kw, response)
+
+ @requires(['NextToken'])
+ @api_action('Feeds', 0, 0)
+ def get_feed_submission_list_by_next_token(self, path, response, **kw):
+ """Returns a list of feed submissions using the NextToken parameter.
+ """
+ return self.post_request(path, kw, response)
+
+ @structured_lists('FeedTypeList.Type', 'FeedProcessingStatusList.Status')
+ @api_action('Feeds', 10, 45)
+ def get_feed_submission_count(self, path, response, **kw):
+ """Returns a count of the feeds submitted in the previous 90 days.
+ """
+ return self.post_request(path, kw, response)
+
+ @structured_lists('FeedSubmissionIdList.Id', 'FeedTypeList.Type')
+ @api_action('Feeds', 10, 45)
+ def cancel_feed_submissions(self, path, response, **kw):
+ """Cancels one or more feed submissions and returns a
+ count of the feed submissions that were canceled.
+ """
+ return self.post_request(path, kw, response)
+
+ @requires(['FeedSubmissionId'])
+ @api_action('Feeds', 15, 60)
+ def get_feed_submission_result(self, path, response, **kw):
+ """Returns the feed processing report.
+ """
+ return self.post_request(path, kw, response, isXML=False)
+
+ def get_service_status(self, **kw):
+ """Instruct the user on how to get service status.
+ """
+ message = "Use {0}.get_(section)_service_status(), " \
+ "where (section) is one of the following: " \
+ "{1}".format(self.__class__.__name__,
+ ', '.join(map(str.lower, api_version_path.keys())))
+ raise AttributeError(message)
+
+ @structured_lists('MarketplaceIdList.Id')
+ @boolean_arguments('ReportOptions=ShowSalesChannel')
+ @requires(['ReportType'])
+ @api_action('Reports', 15, 60)
+ def request_report(self, path, response, **kw):
+ """Creates a report request and submits the request to Amazon MWS.
+ """
+ return self.post_request(path, kw, response)
+
+ @structured_lists('ReportRequestIdList.Id', 'ReportTypeList.Type',
+ 'ReportProcessingStatusList.Status')
+ @api_action('Reports', 10, 45)
+ def get_report_request_list(self, path, response, **kw):
+ """Returns a list of report requests that you can use to get the
+ ReportRequestId for a report.
+ """
+ return self.post_request(path, kw, response)
+
+ @requires(['NextToken'])
+ @api_action('Reports', 0, 0)
+ def get_report_request_list_by_next_token(self, path, response, **kw):
+ """Returns a list of report requests using the NextToken,
+ which was supplied by a previous request to either
+ GetReportRequestListByNextToken or GetReportRequestList, where
+ the value of HasNext was true in that previous request.
+ """
+ return self.post_request(path, kw, response)
+
+ @structured_lists('ReportTypeList.Type',
+ 'ReportProcessingStatusList.Status')
+ @api_action('Reports', 10, 45)
+ def get_report_request_count(self, path, response, **kw):
+ """Returns a count of report requests that have been submitted
+ to Amazon MWS for processing.
+ """
+ return self.post_request(path, kw, response)
+
+ @api_action('Reports', 10, 45)
+ def cancel_report_requests(self, path, response, **kw):
+ """Cancel one or more report requests, returning the count of the
+ canceled report requests and the report request information.
+ """
+ return self.post_request(path, kw, response)
+
+ @boolean_arguments('Acknowledged')
+ @structured_lists('ReportRequestIdList.Id', 'ReportTypeList.Type')
+ @api_action('Reports', 10, 60)
+ def get_report_list(self, path, response, **kw):
+ """Returns a list of reports that were created in the previous
+ 90 days that match the query parameters.
+ """
+ return self.post_request(path, kw, response)
+
+ @requires(['NextToken'])
+ @api_action('Reports', 0, 0)
+ def get_report_list_by_next_token(self, path, response, **kw):
+ """Returns a list of reports using the NextToken, which
+ was supplied by a previous request to either
+ GetReportListByNextToken or GetReportList, where the
+ value of HasNext was true in the previous call.
+ """
+ return self.post_request(path, kw, response)
+
+ @boolean_arguments('Acknowledged')
+ @structured_lists('ReportTypeList.Type')
+ @api_action('Reports', 10, 45)
+ def get_report_count(self, path, response, **kw):
+ """Returns a count of the reports, created in the previous 90 days,
+ with a status of _DONE_ and that are available for download.
+ """
+ return self.post_request(path, kw, response)
+
+ @requires('ReportId')
+ @api_action('Reports', 15, 60)
+ def get_report(self, path, response, **kw):
+ """Returns the contents of a report.
+ """
+ return self.post_request(path, kw, response, isXML=False)
+
+ @requires('ReportType', 'Schedule')
+ @api_action('Reports', 10, 45)
+ def manage_report_schedule(self, path, response, **kw):
+ """Creates, updates, or deletes a report request schedule for
+ a specified report type.
+ """
+ return self.post_request(path, kw, response)
+
+ @structured_lists('ReportTypeList.Type')
+ @api_action('Reports', 10, 45)
+ def get_report_schedule_list(self, path, response, **kw):
+ """Returns a list of order report requests that are scheduled
+ to be submitted to Amazon MWS for processing.
+ """
+ return self.post_request(path, kw, response)
+
+ @requires(['NextToken'])
+ @api_action('Reports', 0, 0)
+ def get_report_schedule_list_by_next_token(self, path, response, **kw):
+ """Returns a list of report requests using the NextToken,
+ which was supplied by a previous request to either
+ GetReportScheduleListByNextToken or GetReportScheduleList,
+ where the value of HasNext was true in that previous request.
+ """
+ return self.post_request(path, kw, response)
+
+ @structured_lists('ReportTypeList.Type')
+ @api_action('Reports', 10, 45)
+ def get_report_schedule_count(self, path, response, **kw):
+ """Returns a count of order report requests that are scheduled
+ to be submitted to Amazon MWS.
+ """
+ return self.post_request(path, kw, response)
+
+ @boolean_arguments('Acknowledged')
+ @requires('ReportIdList')
+ @structured_lists('ReportIdList.Id')
+ @api_action('Reports', 10, 45)
+ def update_report_acknowledgements(self, path, response, **kw):
+ """Updates the acknowledged status of one or more reports.
+ """
+ return self.post_request(path, kw, response)
+
+ @requires('ShipFromAddress', 'InboundShipmentPlanRequestItems')
+ @structured_objects('ShipFromAddress', 'InboundShipmentPlanRequestItems')
+ @api_action('Inbound', 30, 0.5)
+ def create_inbound_shipment_plan(self, path, response, **kw):
+ """Returns the information required to create an inbound shipment.
+ """
+ return self.post_request(path, kw, response)
+
+ @requires('ShipmentId', 'InboundShipmentHeader', 'InboundShipmentItems')
+ @structured_objects('InboundShipmentHeader', 'InboundShipmentItems')
+ @api_action('Inbound', 30, 0.5)
+ def create_inbound_shipment(self, path, response, **kw):
+ """Creates an inbound shipment.
+ """
+ return self.post_request(path, kw, response)
+
+ @requires('ShipmentId')
+ @structured_objects('InboundShipmentHeader', 'InboundShipmentItems')
+ @api_action('Inbound', 30, 0.5)
+ def update_inbound_shipment(self, path, response, **kw):
+ """Updates an existing inbound shipment. Amazon documentation
+ is ambiguous as to whether the InboundShipmentHeader and
+ InboundShipmentItems arguments are required.
+ """
+ return self.post_request(path, kw, response)
+
+ @requires_some_of('ShipmentIdList', 'ShipmentStatusList')
+ @structured_lists('ShipmentIdList.Id', 'ShipmentStatusList.Status')
+ @api_action('Inbound', 30, 0.5)
+ def list_inbound_shipments(self, path, response, **kw):
+ """Returns a list of inbound shipments based on criteria that
+ you specify.
+ """
+ return self.post_request(path, kw, response)
+
+ @requires(['NextToken'])
+ @api_action('Inbound', 30, 0.5)
+ def list_inbound_shipments_by_next_token(self, path, response, **kw):
+ """Returns the next page of inbound shipments using the NextToken
+ parameter.
+ """
+ return self.post_request(path, kw, response)
+
+ @requires(['ShipmentId'], ['LastUpdatedAfter', 'LastUpdatedBefore'])
+ @api_action('Inbound', 30, 0.5)
+ def list_inbound_shipment_items(self, path, response, **kw):
+ """Returns a list of items in a specified inbound shipment, or a
+ list of items that were updated within a specified time frame.
+ """
+ return self.post_request(path, kw, response)
+
+ @requires(['NextToken'])
+ @api_action('Inbound', 30, 0.5)
+ def list_inbound_shipment_items_by_next_token(self, path, response, **kw):
+ """Returns the next page of inbound shipment items using the
+ NextToken parameter.
+ """
+ return self.post_request(path, kw, response)
+
+ @api_action('Inbound', 2, 300, 'GetServiceStatus')
+ def get_inbound_service_status(self, path, response, **kw):
+ """Returns the operational status of the Fulfillment Inbound
+ Shipment API section.
+ """
+ return self.post_request(path, kw, response)
+
+ @requires(['SellerSkus'], ['QueryStartDateTime'])
+ @structured_lists('SellerSkus.member')
+ @api_action('Inventory', 30, 0.5)
+ def list_inventory_supply(self, path, response, **kw):
+ """Returns information about the availability of a seller's
+ inventory.
+ """
+ return self.post_request(path, kw, response)
+
+ @requires(['NextToken'])
+ @api_action('Inventory', 30, 0.5)
+ def list_inventory_supply_by_next_token(self, path, response, **kw):
+ """Returns the next page of information about the availability
+ of a seller's inventory using the NextToken parameter.
+ """
+ return self.post_request(path, kw, response)
+
+ @api_action('Inventory', 2, 300, 'GetServiceStatus')
+ def get_inventory_service_status(self, path, response, **kw):
+ """Returns the operational status of the Fulfillment Inventory
+ API section.
+ """
+ return self.post_request(path, kw, response)
+
+ @structured_objects('Address', 'Items')
+ @requires('Address', 'Items')
+ @api_action('Outbound', 30, 0.5)
+ def get_fulfillment_preview(self, path, response, **kw):
+ """Returns a list of fulfillment order previews based on items
+ and shipping speed categories that you specify.
+ """
+ return self.post_request(path, kw, response)
+
+ @structured_objects('Address', 'Items')
+ @requires('SellerFulfillmentOrderId', 'DisplayableOrderId',
+ 'ShippingSpeedCategory', 'DisplayableOrderDateTime',
+ 'DestinationAddress', 'DisplayableOrderComment')
+ @api_action('Outbound', 30, 0.5)
+ def create_fulfillment_order(self, path, response, **kw):
+ """Requests that Amazon ship items from the seller's inventory
+ to a destination address.
+ """
+ return self.post_request(path, kw, response)
+
+ @requires('SellerFulfillmentOrderId')
+ @api_action('Outbound', 30, 0.5)
+ def get_fulfillment_order(self, path, response, **kw):
+ """Returns a fulfillment order based on a specified
+ SellerFulfillmentOrderId.
+ """
+ return self.post_request(path, kw, response)
+
+ @api_action('Outbound', 30, 0.5)
+ def list_all_fulfillment_orders(self, path, response, **kw):
+ """Returns a list of fulfillment orders fulfilled after (or
+ at) a specified date or by fulfillment method.
+ """
+ return self.post_request(path, kw, response)
+
+ @requires(['NextToken'])
+ @api_action('Outbound', 30, 0.5)
+ def list_all_fulfillment_orders_by_next_token(self, path, response, **kw):
+ """Returns the next page of inbound shipment items using the
+ NextToken parameter.
+ """
+ return self.post_request(path, kw, response)
+
+ @requires(['SellerFulfillmentOrderId'])
+ @api_action('Outbound', 30, 0.5)
+ def cancel_fulfillment_order(self, path, response, **kw):
+ """Requests that Amazon stop attempting to fulfill an existing
+ fulfillment order.
+ """
+ return self.post_request(path, kw, response)
+
+ @api_action('Outbound', 2, 300, 'GetServiceStatus')
+ def get_outbound_service_status(self, path, response, **kw):
+ """Returns the operational status of the Fulfillment Outbound
+ API section.
+ """
+ return self.post_request(path, kw, response)
+
+ @requires(['CreatedAfter'], ['LastUpdatedAfter'])
+ @exclusive(['CreatedAfter'], ['LastUpdatedAfter'])
+ @dependent('CreatedBefore', ['CreatedAfter'])
+ @exclusive(['LastUpdatedAfter'], ['BuyerEmail'], ['SellerOrderId'])
+ @dependent('LastUpdatedBefore', ['LastUpdatedAfter'])
+ @exclusive(['CreatedAfter'], ['LastUpdatedBefore'])
+ @requires(['MarketplaceId'])
+ @structured_objects('OrderTotal', 'ShippingAddress',
+ 'PaymentExecutionDetail')
+ @structured_lists('MarketplaceId.Id', 'OrderStatus.Status',
+ 'FulfillmentChannel.Channel', 'PaymentMethod.')
+ @api_action('Orders', 6, 60)
+ def list_orders(self, path, response, **kw):
+ """Returns a list of orders created or updated during a time
+ frame that you specify.
+ """
+ toggle = set(('FulfillmentChannel.Channel.1',
+ 'OrderStatus.Status.1', 'PaymentMethod.1',
+ 'LastUpdatedAfter', 'LastUpdatedBefore'))
+ for do, dont in {
+ 'BuyerEmail': toggle.union(['SellerOrderId']),
+ 'SellerOrderId': toggle.union(['BuyerEmail']),
+ }.items():
+ if do in kw and filter(kw.has_key, dont):
+ message = "Don't include {0} when specifying " \
+ "{1}".format(' or '.join(dont), do)
+ raise AssertionError(message)
+ return self.post_request(path, kw, response)
+
+ @requires(['NextToken'])
+ @api_action('Orders', 6, 60)
+ def list_orders_by_next_token(self, path, response, **kw):
+ """Returns the next page of orders using the NextToken value
+ that was returned by your previous request to either
+ ListOrders or ListOrdersByNextToken.
+ """
+ return self.post_request(path, kw, response)
+
+ @requires(['AmazonOrderId'])
+ @structured_lists('AmazonOrderId.Id')
+ @api_action('Orders', 6, 60)
+ def get_order(self, path, response, **kw):
+ """Returns an order for each AmazonOrderId that you specify.
+ """
+ return self.post_request(path, kw, response)
+
+ @requires(['AmazonOrderId'])
+ @api_action('Orders', 30, 2)
+ def list_order_items(self, path, response, **kw):
+ """Returns order item information for an AmazonOrderId that
+ you specify.
+ """
+ return self.post_request(path, kw, response)
+
+ @requires(['NextToken'])
+ @api_action('Orders', 30, 2)
+ def list_order_items_by_next_token(self, path, response, **kw):
+ """Returns the next page of order items using the NextToken
+ value that was returned by your previous request to either
+ ListOrderItems or ListOrderItemsByNextToken.
+ """
+ return self.post_request(path, kw, response)
+
+ @api_action('Orders', 2, 300, 'GetServiceStatus')
+ def get_orders_service_status(self, path, response, **kw):
+ """Returns the operational status of the Orders API section.
+ """
+ return self.post_request(path, kw, response)
+
+ @requires(['MarketplaceId', 'Query'])
+ @api_action('Products', 20, 20)
+ def list_matching_products(self, path, response, **kw):
+ """Returns a list of products and their attributes, ordered
+ by relevancy, based on a search query that you specify.
+ """
+ return self.post_request(path, kw, response)
+
+ @requires(['MarketplaceId', 'ASINList'])
+ @structured_lists('ASINList.ASIN')
+ @api_action('Products', 20, 20)
+ def get_matching_product(self, path, response, **kw):
+ """Returns a list of products and their attributes, based on
+ a list of ASIN values that you specify.
+ """
+ return self.post_request(path, kw, response)
+
+ @requires(['MarketplaceId', 'IdType', 'IdList'])
+ @structured_lists('IdList.Id')
+ @api_action('Products', 20, 20)
+ def get_matching_product_for_id(self, path, response, **kw):
+ """Returns a list of products and their attributes, based on
+ a list of Product IDs that you specify.
+ """
+ return self.post_request(path, kw, response)
+
+ @requires(['MarketplaceId', 'SellerSKUList'])
+ @structured_lists('SellerSKUList.SellerSKU')
+ @api_action('Products', 20, 10, 'GetCompetitivePricingForSKU')
+ def get_competitive_pricing_for_sku(self, path, response, **kw):
+ """Returns the current competitive pricing of a product,
+ based on the SellerSKUs and MarketplaceId that you specify.
+ """
+ return self.post_request(path, kw, response)
+
+ @requires(['MarketplaceId', 'ASINList'])
+ @structured_lists('ASINList.ASIN')
+ @api_action('Products', 20, 10, 'GetCompetitivePricingForASIN')
+ def get_competitive_pricing_for_asin(self, path, response, **kw):
+ """Returns the current competitive pricing of a product,
+ based on the ASINs and MarketplaceId that you specify.
+ """
+ return self.post_request(path, kw, response)
+
+ @requires(['MarketplaceId', 'SellerSKUList'])
+ @structured_lists('SellerSKUList.SellerSKU')
+ @api_action('Products', 20, 5, 'GetLowestOfferListingsForSKU')
+ def get_lowest_offer_listings_for_sku(self, path, response, **kw):
+ """Returns the lowest price offer listings for a specific
+ product by item condition and SellerSKUs.
+ """
+ return self.post_request(path, kw, response)
+
+ @requires(['MarketplaceId', 'ASINList'])
+ @structured_lists('ASINList.ASIN')
+ @api_action('Products', 20, 5, 'GetLowestOfferListingsForASIN')
+ def get_lowest_offer_listings_for_asin(self, path, response, **kw):
+ """Returns the lowest price offer listings for a specific
+ product by item condition and ASINs.
+ """
+ return self.post_request(path, kw, response)
+
+ @requires(['MarketplaceId', 'SellerSKU'])
+ @api_action('Products', 20, 20, 'GetProductCategoriesForSKU')
+ def get_product_categories_for_sku(self, path, response, **kw):
+ """Returns the product categories that a SellerSKU belongs to.
+ """
+ return self.post_request(path, kw, response)
+
+ @requires(['MarketplaceId', 'ASIN'])
+ @api_action('Products', 20, 20, 'GetProductCategoriesForASIN')
+ def get_product_categories_for_asin(self, path, response, **kw):
+ """Returns the product categories that an ASIN belongs to.
+ """
+ return self.post_request(path, kw, response)
+
+ @api_action('Products', 2, 300, 'GetServiceStatus')
+ def get_products_service_status(self, path, response, **kw):
+ """Returns the operational status of the Products API section.
+ """
+ return self.post_request(path, kw, response)
+
+ @api_action('Sellers', 15, 60)
+ def list_marketplace_participations(self, path, response, **kw):
+ """Returns a list of marketplaces that the seller submitting
+ the request can sell in, and a list of participations that
+ include seller-specific information in that marketplace.
+ """
+ return self.post_request(path, kw, response)
+
+ @requires(['NextToken'])
+ @api_action('Sellers', 15, 60)
+ def list_marketplace_participations_by_next_token(self, path, response,
+ **kw):
+ """Returns the next page of marketplaces and participations
+ using the NextToken value that was returned by your
+ previous request to either ListMarketplaceParticipations
+ or ListMarketplaceParticipationsByNextToken.
+ """
+ return self.post_request(path, kw, response)
diff --git a/boto/mws/exception.py b/boto/mws/exception.py
new file mode 100644
index 0000000..d84df4a
--- /dev/null
+++ b/boto/mws/exception.py
@@ -0,0 +1,75 @@
+# Copyright (c) 2012 Andy Davidoff http://www.disruptek.com/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+from boto.exception import BotoServerError
+
+
+class ResponseErrorFactory(BotoServerError):
+
+ def __new__(cls, *args, **kw):
+ error = BotoServerError(*args, **kw)
+ try:
+ newclass = globals()[error.error_code]
+ except KeyError:
+ newclass = ResponseError
+ obj = newclass.__new__(newclass, *args, **kw)
+ obj.__dict__.update(error.__dict__)
+ return obj
+
+
+class ResponseError(BotoServerError):
+ """
+ Undefined response error.
+ """
+ retry = False
+
+ def __repr__(self):
+ return '{0}({1}, {2},\n\t{3})'.format(self.__class__.__name__,
+ self.status, self.reason,
+ self.error_message)
+
+ def __str__(self):
+ return 'MWS Response Error: {0.status} {0.__class__.__name__} {1}\n' \
+ '{2}\n' \
+ '{0.error_message}'.format(self,
+ self.retry and '(Retriable)' or '',
+ self.__doc__.strip())
+
+
+class RetriableResponseError(ResponseError):
+ retry = True
+
+
+class InvalidParameterValue(ResponseError):
+ """
+ One or more parameter values in the request is invalid.
+ """
+
+
+class InvalidParameter(ResponseError):
+ """
+ One or more parameters in the request is invalid.
+ """
+
+
+class InvalidAddress(ResponseError):
+ """
+ Invalid address.
+ """
diff --git a/boto/mws/response.py b/boto/mws/response.py
new file mode 100644
index 0000000..c95aadb
--- /dev/null
+++ b/boto/mws/response.py
@@ -0,0 +1,646 @@
+# Copyright (c) 2012 Andy Davidoff http://www.disruptek.com/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+from decimal import Decimal
+
+
+class ComplexType(dict):
+ _value = 'Value'
+
+ def __repr__(self):
+ return '{0}{1}'.format(getattr(self, self._value, None), self.copy())
+
+ def __str__(self):
+ return str(getattr(self, self._value, ''))
+
+
+class DeclarativeType(object):
+ def __init__(self, _hint=None, **kw):
+ if _hint is not None:
+ self._hint = _hint
+ else:
+ class JITResponse(ResponseElement):
+ pass
+ self._hint = JITResponse
+ for name, value in kw.items():
+ setattr(self._hint, name, value)
+ self._value = None
+
+ def setup(self, parent, name, *args, **kw):
+ self._parent = parent
+ self._name = name
+ self._clone = self.__class__(self._hint)
+ self._clone._parent = parent
+ self._clone._name = name
+ setattr(self._parent, self._name, self._clone)
+
+ def start(self, *args, **kw):
+ raise NotImplemented
+
+ def end(self, *args, **kw):
+ raise NotImplemented
+
+ def teardown(self, *args, **kw):
+ if self._value is None:
+ delattr(self._parent, self._name)
+ else:
+ setattr(self._parent, self._name, self._value)
+
+
+class Element(DeclarativeType):
+ def start(self, *args, **kw):
+ self._value = self._hint(parent=self._parent, **kw)
+ return self._value
+
+ def end(self, *args, **kw):
+ pass
+
+
+class SimpleList(DeclarativeType):
+ def __init__(self, *args, **kw):
+ DeclarativeType.__init__(self, *args, **kw)
+ self._value = []
+
+ def teardown(self, *args, **kw):
+ if self._value == []:
+ self._value = None
+ DeclarativeType.teardown(self, *args, **kw)
+
+ def start(self, *args, **kw):
+ return None
+
+ def end(self, name, value, *args, **kw):
+ self._value.append(value)
+
+
+class ElementList(SimpleList):
+ def start(self, *args, **kw):
+ value = self._hint(parent=self._parent, **kw)
+ self._value += [value]
+ return self._value[-1]
+
+ def end(self, *args, **kw):
+ pass
+
+
+class MemberList(ElementList):
+ def __init__(self, *args, **kw):
+ self._this = kw.get('this')
+ ElementList.__init__(self, *args, **kw)
+
+ def start(self, attrs={}, **kw):
+ Class = self._this or self._parent._type_for(self._name, attrs)
+ if issubclass(self._hint, ResponseElement):
+ ListClass = ElementList
+ else:
+ ListClass = SimpleList
+ setattr(Class, Class._member, ListClass(self._hint))
+ self._value = Class(attrs=attrs, parent=self._parent, **kw)
+ return self._value
+
+ def end(self, *args, **kw):
+ self._value = getattr(self._value, self._value._member)
+ ElementList.end(self, *args, **kw)
+
+
+def ResponseFactory(action):
+ result = globals().get(action + 'Result', ResponseElement)
+
+ class MWSResponse(Response):
+ _name = action + 'Response'
+
+ setattr(MWSResponse, action + 'Result', Element(result))
+ return MWSResponse
+
+
+def strip_namespace(func):
+ def wrapper(self, name, *args, **kw):
+ if self._namespace is not None:
+ if name.startswith(self._namespace + ':'):
+ name = name[len(self._namespace + ':'):]
+ return func(self, name, *args, **kw)
+ return wrapper
+
+
+class ResponseElement(dict):
+ _override = {}
+ _member = 'member'
+ _name = None
+ _namespace = None
+
+ def __init__(self, connection=None, name=None, parent=None, attrs={}):
+ if parent is not None and self._namespace is None:
+ self._namespace = parent._namespace
+ if connection is not None:
+ self._connection = connection
+ self._name = name or self._name or self.__class__.__name__
+ self._declared('setup', attrs=attrs)
+ dict.__init__(self, attrs.copy())
+
+ def _declared(self, op, **kw):
+ def inherit(obj):
+ result = {}
+ for cls in getattr(obj, '__bases__', ()):
+ result.update(inherit(cls))
+ result.update(obj.__dict__)
+ return result
+
+ scope = inherit(self.__class__)
+ scope.update(self.__dict__)
+ declared = lambda attr: isinstance(attr[1], DeclarativeType)
+ for name, node in filter(declared, scope.items()):
+ getattr(node, op)(self, name, parentname=self._name, **kw)
+
+ @property
+ def connection(self):
+ return self._connection
+
+ def __repr__(self):
+ render = lambda pair: '{0!s}: {1!r}'.format(*pair)
+ do_show = lambda pair: not pair[0].startswith('_')
+ attrs = filter(do_show, self.__dict__.items())
+ name = self.__class__.__name__
+ if name == 'JITResponse':
+ name = '^{0}^'.format(self._name or '')
+ return '{0}{1!r}({2})'.format(
+ name, self.copy(), ', '.join(map(render, attrs)))
+
+ def _type_for(self, name, attrs):
+ return self._override.get(name, globals().get(name, ResponseElement))
+
+ @strip_namespace
+ def startElement(self, name, attrs, connection):
+ attribute = getattr(self, name, None)
+ if isinstance(attribute, DeclarativeType):
+ return attribute.start(name=name, attrs=attrs,
+ connection=connection)
+ elif attrs.getLength():
+ setattr(self, name, ComplexType(attrs.copy()))
+ else:
+ return None
+
+ @strip_namespace
+ def endElement(self, name, value, connection):
+ attribute = getattr(self, name, None)
+ if name == self._name:
+ self._declared('teardown')
+ elif isinstance(attribute, DeclarativeType):
+ attribute.end(name=name, value=value, connection=connection)
+ elif isinstance(attribute, ComplexType):
+ setattr(attribute, attribute._value, value)
+ else:
+ setattr(self, name, value)
+
+
+class Response(ResponseElement):
+ ResponseMetadata = Element()
+
+ @property
+ def _result(self):
+ return getattr(self, self._action + 'Result', None)
+
+ @property
+ def _action(self):
+ return (self._name or self.__class__.__name__)[:-len('Response')]
+
+
+class ResponseResultList(Response):
+ _ResultClass = ResponseElement
+
+ def __init__(self, *args, **kw):
+ setattr(self, self._action + 'Result', ElementList(self._ResultClass))
+ Response.__init__(self, *args, **kw)
+
+
+class FeedSubmissionInfo(ResponseElement):
+ pass
+
+
+class SubmitFeedResult(ResponseElement):
+ FeedSubmissionInfo = Element(FeedSubmissionInfo)
+
+
+class GetFeedSubmissionListResult(ResponseElement):
+ FeedSubmissionInfo = ElementList(FeedSubmissionInfo)
+
+
+class GetFeedSubmissionListByNextTokenResult(GetFeedSubmissionListResult):
+ pass
+
+
+class GetFeedSubmissionCountResult(ResponseElement):
+ pass
+
+
+class CancelFeedSubmissionsResult(GetFeedSubmissionListResult):
+ pass
+
+
+class GetServiceStatusResult(ResponseElement):
+ Messages = Element(Messages=ElementList())
+
+
+class ReportRequestInfo(ResponseElement):
+ pass
+
+
+class RequestReportResult(ResponseElement):
+ ReportRequestInfo = Element()
+
+
+class GetReportRequestListResult(RequestReportResult):
+ ReportRequestInfo = Element()
+
+
+class GetReportRequestListByNextTokenResult(GetReportRequestListResult):
+ pass
+
+
+class CancelReportRequestsResult(RequestReportResult):
+ pass
+
+
+class GetReportListResult(ResponseElement):
+ ReportInfo = Element()
+
+
+class GetReportListByNextTokenResult(GetReportListResult):
+ pass
+
+
+class ManageReportScheduleResult(ResponseElement):
+ ReportSchedule = Element()
+
+
+class GetReportScheduleListResult(ManageReportScheduleResult):
+ pass
+
+
+class GetReportScheduleListByNextTokenResult(GetReportScheduleListResult):
+ pass
+
+
+class UpdateReportAcknowledgementsResult(GetReportListResult):
+ pass
+
+
+class CreateInboundShipmentPlanResult(ResponseElement):
+ InboundShipmentPlans = MemberList(ShipToAddress=Element(),
+ Items=MemberList())
+
+
+class ListInboundShipmentsResult(ResponseElement):
+ ShipmentData = MemberList(Element(ShipFromAddress=Element()))
+
+
+class ListInboundShipmentsByNextTokenResult(ListInboundShipmentsResult):
+ pass
+
+
+class ListInboundShipmentItemsResult(ResponseElement):
+ ItemData = MemberList()
+
+
+class ListInboundShipmentItemsByNextTokenResult(ListInboundShipmentItemsResult):
+ pass
+
+
+class ListInventorySupplyResult(ResponseElement):
+ InventorySupplyList = MemberList(
+ EarliestAvailability=Element(),
+ SupplyDetail=MemberList(\
+ EarliestAvailabileToPick=Element(),
+ LatestAvailableToPick=Element(),
+ )
+ )
+
+
+class ListInventorySupplyByNextTokenResult(ListInventorySupplyResult):
+ pass
+
+
+class ComplexAmount(ResponseElement):
+ _amount = 'Value'
+
+ def __repr__(self):
+ return '{0} {1}'.format(self.CurrencyCode, getattr(self, self._amount))
+
+ def __float__(self):
+ return float(getattr(self, self._amount))
+
+ def __str__(self):
+ return str(getattr(self, self._amount))
+
+ @strip_namespace
+ def startElement(self, name, attrs, connection):
+ if name not in ('CurrencyCode', self._amount):
+ message = 'Unrecognized tag {0} in ComplexAmount'.format(name)
+ raise AssertionError(message)
+ return ResponseElement.startElement(self, name, attrs, connection)
+
+ @strip_namespace
+ def endElement(self, name, value, connection):
+ if name == self._amount:
+ value = Decimal(value)
+ ResponseElement.endElement(self, name, value, connection)
+
+
+class ComplexMoney(ComplexAmount):
+ _amount = 'Amount'
+
+
+class ComplexWeight(ResponseElement):
+ def __repr__(self):
+ return '{0} {1}'.format(self.Value, self.Unit)
+
+ def __float__(self):
+ return float(self.Value)
+
+ def __str__(self):
+ return str(self.Value)
+
+ @strip_namespace
+ def startElement(self, name, attrs, connection):
+ if name not in ('Unit', 'Value'):
+ message = 'Unrecognized tag {0} in ComplexWeight'.format(name)
+ raise AssertionError(message)
+ return ResponseElement.startElement(self, name, attrs, connection)
+
+ @strip_namespace
+ def endElement(self, name, value, connection):
+ if name == 'Value':
+ value = Decimal(value)
+ ResponseElement.endElement(self, name, value, connection)
+
+
+class Dimension(ComplexType):
+ _value = 'Value'
+
+
+class ComplexDimensions(ResponseElement):
+ _dimensions = ('Height', 'Length', 'Width', 'Weight')
+
+ def __repr__(self):
+ values = [getattr(self, key, None) for key in self._dimensions]
+ values = filter(None, values)
+ return 'x'.join(map('{0.Value:0.2f}{0[Units]}'.format, values))
+
+ @strip_namespace
+ def startElement(self, name, attrs, connection):
+ if name not in self._dimensions:
+ message = 'Unrecognized tag {0} in ComplexDimensions'.format(name)
+ raise AssertionError(message)
+ setattr(self, name, Dimension(attrs.copy()))
+
+ @strip_namespace
+ def endElement(self, name, value, connection):
+ if name in self._dimensions:
+ value = Decimal(value or '0')
+ ResponseElement.endElement(self, name, value, connection)
+
+
+class FulfillmentPreviewItem(ResponseElement):
+ EstimatedShippingWeight = Element(ComplexWeight)
+
+
+class FulfillmentPreview(ResponseElement):
+ EstimatedShippingWeight = Element(ComplexWeight)
+ EstimatedFees = MemberList(\
+ Element(\
+ Amount=Element(ComplexAmount),
+ ),
+ )
+ UnfulfillablePreviewItems = MemberList(FulfillmentPreviewItem)
+ FulfillmentPreviewShipments = MemberList(\
+ FulfillmentPreviewItems=MemberList(FulfillmentPreviewItem),
+ )
+
+
+class GetFulfillmentPreviewResult(ResponseElement):
+ FulfillmentPreviews = MemberList(FulfillmentPreview)
+
+
+class FulfillmentOrder(ResponseElement):
+ DestinationAddress = Element()
+ NotificationEmailList = MemberList(str)
+
+
+class GetFulfillmentOrderResult(ResponseElement):
+ FulfillmentOrder = Element(FulfillmentOrder)
+ FulfillmentShipment = MemberList(Element(\
+ FulfillmentShipmentItem=MemberList(),
+ FulfillmentShipmentPackage=MemberList(),
+ )
+ )
+ FulfillmentOrderItem = MemberList()
+
+
+class ListAllFulfillmentOrdersResult(ResponseElement):
+ FulfillmentOrders = MemberList(FulfillmentOrder)
+
+
+class ListAllFulfillmentOrdersByNextTokenResult(ListAllFulfillmentOrdersResult):
+ pass
+
+
+class Image(ResponseElement):
+ pass
+
+
+class AttributeSet(ResponseElement):
+ ItemDimensions = Element(ComplexDimensions)
+ ListPrice = Element(ComplexMoney)
+ PackageDimensions = Element(ComplexDimensions)
+ SmallImage = Element(Image)
+
+
+class ItemAttributes(AttributeSet):
+ Languages = Element(Language=ElementList())
+
+ def __init__(self, *args, **kw):
+ names = ('Actor', 'Artist', 'Author', 'Creator', 'Director',
+ 'Feature', 'Format', 'GemType', 'MaterialType',
+ 'MediaType', 'OperatingSystem', 'Platform')
+ for name in names:
+ setattr(self, name, SimpleList())
+ AttributeSet.__init__(self, *args, **kw)
+
+
+class VariationRelationship(ResponseElement):
+ Identifiers = Element(MarketplaceASIN=Element(),
+ SKUIdentifier=Element())
+ GemType = SimpleList()
+ MaterialType = SimpleList()
+ OperatingSystem = SimpleList()
+
+
+class Price(ResponseElement):
+ LandedPrice = Element(ComplexMoney)
+ ListingPrice = Element(ComplexMoney)
+ Shipping = Element(ComplexMoney)
+
+
+class CompetitivePrice(ResponseElement):
+ Price = Element(Price)
+
+
+class CompetitivePriceList(ResponseElement):
+ CompetitivePrice = ElementList(CompetitivePrice)
+
+
+class CompetitivePricing(ResponseElement):
+ CompetitivePrices = Element(CompetitivePriceList)
+ NumberOfOfferListings = SimpleList()
+ TradeInValue = Element(ComplexMoney)
+
+
+class SalesRank(ResponseElement):
+ pass
+
+
+class LowestOfferListing(ResponseElement):
+ Qualifiers = Element(ShippingTime=Element())
+ Price = Element(Price)
+
+
+class Product(ResponseElement):
+ _namespace = 'ns2'
+ Identifiers = Element(MarketplaceASIN=Element(),
+ SKUIdentifier=Element())
+ AttributeSets = Element(\
+ ItemAttributes=ElementList(ItemAttributes),
+ )
+ Relationships = Element(\
+ VariationParent=ElementList(VariationRelationship),
+ )
+ CompetitivePricing = ElementList(CompetitivePricing)
+ SalesRankings = Element(\
+ SalesRank=ElementList(SalesRank),
+ )
+ LowestOfferListings = Element(\
+ LowestOfferListing=ElementList(LowestOfferListing),
+ )
+
+
+class ListMatchingProductsResult(ResponseElement):
+ Products = Element(Product=ElementList(Product))
+
+
+class ProductsBulkOperationResult(ResponseElement):
+ Product = Element(Product)
+ Error = Element()
+
+
+class ProductsBulkOperationResponse(ResponseResultList):
+ _ResultClass = ProductsBulkOperationResult
+
+
+class GetMatchingProductResponse(ProductsBulkOperationResponse):
+ pass
+
+
+class GetMatchingProductForIdResult(ListMatchingProductsResult):
+ pass
+
+
+class GetCompetitivePricingForSKUResponse(ProductsBulkOperationResponse):
+ pass
+
+
+class GetCompetitivePricingForASINResponse(ProductsBulkOperationResponse):
+ pass
+
+
+class GetLowestOfferListingsForSKUResponse(ProductsBulkOperationResponse):
+ pass
+
+
+class GetLowestOfferListingsForASINResponse(ProductsBulkOperationResponse):
+ pass
+
+
+class ProductCategory(ResponseElement):
+
+ def __init__(self, *args, **kw):
+ setattr(self, 'Parent', Element(ProductCategory))
+ ResponseElement.__init__(self, *args, **kw)
+
+
+class GetProductCategoriesResult(ResponseElement):
+ Self = Element(ProductCategory)
+
+
+class GetProductCategoriesForSKUResult(GetProductCategoriesResult):
+ pass
+
+
+class GetProductCategoriesForASINResult(GetProductCategoriesResult):
+ pass
+
+
+class Order(ResponseElement):
+ OrderTotal = Element(ComplexMoney)
+ ShippingAddress = Element()
+ PaymentExecutionDetail = Element(\
+ PaymentExecutionDetailItem=ElementList(\
+ PaymentExecutionDetailItem=Element(\
+ Payment=Element(ComplexMoney)
+ )
+ )
+ )
+
+
+class ListOrdersResult(ResponseElement):
+ Orders = Element(Order=ElementList(Order))
+
+
+class ListOrdersByNextTokenResult(ListOrdersResult):
+ pass
+
+
+class GetOrderResult(ListOrdersResult):
+ pass
+
+
+class OrderItem(ResponseElement):
+ ItemPrice = Element(ComplexMoney)
+ ShippingPrice = Element(ComplexMoney)
+ GiftWrapPrice = Element(ComplexMoney)
+ ItemTax = Element(ComplexMoney)
+ ShippingTax = Element(ComplexMoney)
+ GiftWrapTax = Element(ComplexMoney)
+ ShippingDiscount = Element(ComplexMoney)
+ PromotionDiscount = Element(ComplexMoney)
+ PromotionIds = SimpleList()
+ CODFee = Element(ComplexMoney)
+ CODFeeDiscount = Element(ComplexMoney)
+
+
+class ListOrderItemsResult(ResponseElement):
+ OrderItems = Element(OrderItem=ElementList(OrderItem))
+
+
+class ListMarketplaceParticipationsResult(ResponseElement):
+ ListParticipations = Element(Participation=ElementList())
+ ListMarketplaces = Element(Marketplace=ElementList())
+
+
+class ListMarketplaceParticipationsByNextTokenResult(ListMarketplaceParticipationsResult):
+ pass
diff --git a/boto/provider.py b/boto/provider.py
index 7e9f640..dc1172c 100644
--- a/boto/provider.py
+++ b/boto/provider.py
@@ -27,6 +27,8 @@
"""
import os
+from datetime import datetime
+
import boto
from boto import config
from boto.gs.acl import ACL
@@ -34,6 +36,7 @@
from boto.s3.acl import CannedACLStrings as CannedS3ACLStrings
from boto.s3.acl import Policy
+
HEADER_PREFIX_KEY = 'header_prefix'
METADATA_PREFIX_KEY = 'metadata_prefix'
@@ -44,6 +47,7 @@
AUTH_HEADER_KEY = 'auth-header'
COPY_SOURCE_HEADER_KEY = 'copy-source-header'
COPY_SOURCE_VERSION_ID_HEADER_KEY = 'copy-source-version-id-header'
+COPY_SOURCE_RANGE_HEADER_KEY = 'copy-source-range-header'
DELETE_MARKER_HEADER_KEY = 'delete-marker-header'
DATE_HEADER_KEY = 'date-header'
METADATA_DIRECTIVE_HEADER_KEY = 'metadata-directive-header'
@@ -64,90 +68,98 @@
class Provider(object):
CredentialMap = {
- 'aws' : ('aws_access_key_id', 'aws_secret_access_key'),
- 'google' : ('gs_access_key_id', 'gs_secret_access_key'),
+ 'aws': ('aws_access_key_id', 'aws_secret_access_key'),
+ 'google': ('gs_access_key_id', 'gs_secret_access_key'),
}
AclClassMap = {
- 'aws' : Policy,
- 'google' : ACL
+ 'aws': Policy,
+ 'google': ACL
}
CannedAclsMap = {
- 'aws' : CannedS3ACLStrings,
- 'google' : CannedGSACLStrings
+ 'aws': CannedS3ACLStrings,
+ 'google': CannedGSACLStrings
}
HostKeyMap = {
- 'aws' : 's3',
- 'google' : 'gs'
+ 'aws': 's3',
+ 'google': 'gs'
}
ChunkedTransferSupport = {
- 'aws' : False,
- 'google' : True
+ 'aws': False,
+ 'google': True
+ }
+
+ MetadataServiceSupport = {
+ 'aws': True,
+ 'google': False
}
# If you update this map please make sure to put "None" for the
# right-hand-side for any headers that don't apply to a provider, rather
# than simply leaving that header out (which would cause KeyErrors).
HeaderInfoMap = {
- 'aws' : {
- HEADER_PREFIX_KEY : AWS_HEADER_PREFIX,
- METADATA_PREFIX_KEY : AWS_HEADER_PREFIX + 'meta-',
- ACL_HEADER_KEY : AWS_HEADER_PREFIX + 'acl',
- AUTH_HEADER_KEY : 'AWS',
- COPY_SOURCE_HEADER_KEY : AWS_HEADER_PREFIX + 'copy-source',
- COPY_SOURCE_VERSION_ID_HEADER_KEY : AWS_HEADER_PREFIX +
+ 'aws': {
+ HEADER_PREFIX_KEY: AWS_HEADER_PREFIX,
+ METADATA_PREFIX_KEY: AWS_HEADER_PREFIX + 'meta-',
+ ACL_HEADER_KEY: AWS_HEADER_PREFIX + 'acl',
+ AUTH_HEADER_KEY: 'AWS',
+ COPY_SOURCE_HEADER_KEY: AWS_HEADER_PREFIX + 'copy-source',
+ COPY_SOURCE_VERSION_ID_HEADER_KEY: AWS_HEADER_PREFIX +
'copy-source-version-id',
- DATE_HEADER_KEY : AWS_HEADER_PREFIX + 'date',
- DELETE_MARKER_HEADER_KEY : AWS_HEADER_PREFIX + 'delete-marker',
- METADATA_DIRECTIVE_HEADER_KEY : AWS_HEADER_PREFIX +
+ COPY_SOURCE_RANGE_HEADER_KEY: AWS_HEADER_PREFIX +
+ 'copy-source-range',
+ DATE_HEADER_KEY: AWS_HEADER_PREFIX + 'date',
+ DELETE_MARKER_HEADER_KEY: AWS_HEADER_PREFIX + 'delete-marker',
+ METADATA_DIRECTIVE_HEADER_KEY: AWS_HEADER_PREFIX +
'metadata-directive',
- RESUMABLE_UPLOAD_HEADER_KEY : None,
- SECURITY_TOKEN_HEADER_KEY : AWS_HEADER_PREFIX + 'security-token',
- SERVER_SIDE_ENCRYPTION_KEY : AWS_HEADER_PREFIX + 'server-side-encryption',
- VERSION_ID_HEADER_KEY : AWS_HEADER_PREFIX + 'version-id',
- STORAGE_CLASS_HEADER_KEY : AWS_HEADER_PREFIX + 'storage-class',
- MFA_HEADER_KEY : AWS_HEADER_PREFIX + 'mfa',
+ RESUMABLE_UPLOAD_HEADER_KEY: None,
+ SECURITY_TOKEN_HEADER_KEY: AWS_HEADER_PREFIX + 'security-token',
+ SERVER_SIDE_ENCRYPTION_KEY: AWS_HEADER_PREFIX + 'server-side-encryption',
+ VERSION_ID_HEADER_KEY: AWS_HEADER_PREFIX + 'version-id',
+ STORAGE_CLASS_HEADER_KEY: AWS_HEADER_PREFIX + 'storage-class',
+ MFA_HEADER_KEY: AWS_HEADER_PREFIX + 'mfa',
},
- 'google' : {
- HEADER_PREFIX_KEY : GOOG_HEADER_PREFIX,
- METADATA_PREFIX_KEY : GOOG_HEADER_PREFIX + 'meta-',
- ACL_HEADER_KEY : GOOG_HEADER_PREFIX + 'acl',
- AUTH_HEADER_KEY : 'GOOG1',
- COPY_SOURCE_HEADER_KEY : GOOG_HEADER_PREFIX + 'copy-source',
- COPY_SOURCE_VERSION_ID_HEADER_KEY : GOOG_HEADER_PREFIX +
+ 'google': {
+ HEADER_PREFIX_KEY: GOOG_HEADER_PREFIX,
+ METADATA_PREFIX_KEY: GOOG_HEADER_PREFIX + 'meta-',
+ ACL_HEADER_KEY: GOOG_HEADER_PREFIX + 'acl',
+ AUTH_HEADER_KEY: 'GOOG1',
+ COPY_SOURCE_HEADER_KEY: GOOG_HEADER_PREFIX + 'copy-source',
+ COPY_SOURCE_VERSION_ID_HEADER_KEY: GOOG_HEADER_PREFIX +
'copy-source-version-id',
- DATE_HEADER_KEY : GOOG_HEADER_PREFIX + 'date',
- DELETE_MARKER_HEADER_KEY : GOOG_HEADER_PREFIX + 'delete-marker',
- METADATA_DIRECTIVE_HEADER_KEY : GOOG_HEADER_PREFIX +
+ COPY_SOURCE_RANGE_HEADER_KEY: None,
+ DATE_HEADER_KEY: GOOG_HEADER_PREFIX + 'date',
+ DELETE_MARKER_HEADER_KEY: GOOG_HEADER_PREFIX + 'delete-marker',
+ METADATA_DIRECTIVE_HEADER_KEY: GOOG_HEADER_PREFIX +
'metadata-directive',
- RESUMABLE_UPLOAD_HEADER_KEY : GOOG_HEADER_PREFIX + 'resumable',
- SECURITY_TOKEN_HEADER_KEY : GOOG_HEADER_PREFIX + 'security-token',
- SERVER_SIDE_ENCRYPTION_KEY : None,
+ RESUMABLE_UPLOAD_HEADER_KEY: GOOG_HEADER_PREFIX + 'resumable',
+ SECURITY_TOKEN_HEADER_KEY: GOOG_HEADER_PREFIX + 'security-token',
+ SERVER_SIDE_ENCRYPTION_KEY: None,
# Note that this version header is not to be confused with
# the Google Cloud Storage 'x-goog-api-version' header.
- VERSION_ID_HEADER_KEY : GOOG_HEADER_PREFIX + 'version-id',
- STORAGE_CLASS_HEADER_KEY : None,
- MFA_HEADER_KEY : None,
+ VERSION_ID_HEADER_KEY: GOOG_HEADER_PREFIX + 'version-id',
+ STORAGE_CLASS_HEADER_KEY: None,
+ MFA_HEADER_KEY: None,
}
}
ErrorMap = {
- 'aws' : {
- STORAGE_COPY_ERROR : boto.exception.S3CopyError,
- STORAGE_CREATE_ERROR : boto.exception.S3CreateError,
- STORAGE_DATA_ERROR : boto.exception.S3DataError,
- STORAGE_PERMISSIONS_ERROR : boto.exception.S3PermissionsError,
- STORAGE_RESPONSE_ERROR : boto.exception.S3ResponseError,
+ 'aws': {
+ STORAGE_COPY_ERROR: boto.exception.S3CopyError,
+ STORAGE_CREATE_ERROR: boto.exception.S3CreateError,
+ STORAGE_DATA_ERROR: boto.exception.S3DataError,
+ STORAGE_PERMISSIONS_ERROR: boto.exception.S3PermissionsError,
+ STORAGE_RESPONSE_ERROR: boto.exception.S3ResponseError,
},
- 'google' : {
- STORAGE_COPY_ERROR : boto.exception.GSCopyError,
- STORAGE_CREATE_ERROR : boto.exception.GSCreateError,
- STORAGE_DATA_ERROR : boto.exception.GSDataError,
- STORAGE_PERMISSIONS_ERROR : boto.exception.GSPermissionsError,
- STORAGE_RESPONSE_ERROR : boto.exception.GSResponseError,
+ 'google': {
+ STORAGE_COPY_ERROR: boto.exception.GSCopyError,
+ STORAGE_CREATE_ERROR: boto.exception.GSCreateError,
+ STORAGE_DATA_ERROR: boto.exception.GSDataError,
+ STORAGE_PERMISSIONS_ERROR: boto.exception.GSPermissionsError,
+ STORAGE_RESPONSE_ERROR: boto.exception.GSResponseError,
}
}
@@ -160,6 +172,7 @@
self.name = name
self.acl_class = self.AclClassMap[self.name]
self.canned_acls = self.CannedAclsMap[self.name]
+ self._credential_expiry_time = None
self.get_credentials(access_key, secret_key)
self.configure_headers()
self.configure_errors()
@@ -168,25 +181,102 @@
if config.has_option('Credentials', host_opt_name):
self.host = config.get('Credentials', host_opt_name)
+ def get_access_key(self):
+ if self._credentials_need_refresh():
+ self._populate_keys_from_metadata_server()
+ return self._access_key
+
+ def set_access_key(self, value):
+ self._access_key = value
+
+ access_key = property(get_access_key, set_access_key)
+
+ def get_secret_key(self):
+ if self._credentials_need_refresh():
+ self._populate_keys_from_metadata_server()
+ return self._secret_key
+
+ def set_secret_key(self, value):
+ self._secret_key = value
+
+ secret_key = property(get_secret_key, set_secret_key)
+
+ def get_security_token(self):
+ if self._credentials_need_refresh():
+ self._populate_keys_from_metadata_server()
+ return self._security_token
+
+ def set_security_token(self, value):
+ self._security_token = value
+
+ security_token = property(get_security_token, set_security_token)
+
+ def _credentials_need_refresh(self):
+ if self._credential_expiry_time is None:
+ return False
+ else:
+ # The credentials should be refreshed if they're going to expire
+ # in less than 5 minutes.
+ delta = self._credential_expiry_time - datetime.utcnow()
+ # python2.6 does not have timedelta.total_seconds() so we have
+ # to calculate this ourselves. This is straight from the
+ # datetime docs.
+ seconds_left = (
+ (delta.microseconds + (delta.seconds + delta.days * 24 * 3600)
+ * 10**6) / 10**6)
+ if seconds_left < (5 * 60):
+ boto.log.debug("Credentials need to be refreshed.")
+ return True
+ else:
+ return False
+
+
def get_credentials(self, access_key=None, secret_key=None):
access_key_name, secret_key_name = self.CredentialMap[self.name]
if access_key is not None:
self.access_key = access_key
- elif os.environ.has_key(access_key_name.upper()):
+ elif access_key_name.upper() in os.environ:
self.access_key = os.environ[access_key_name.upper()]
elif config.has_option('Credentials', access_key_name):
self.access_key = config.get('Credentials', access_key_name)
if secret_key is not None:
self.secret_key = secret_key
- elif os.environ.has_key(secret_key_name.upper()):
+ elif secret_key_name.upper() in os.environ:
self.secret_key = os.environ[secret_key_name.upper()]
elif config.has_option('Credentials', secret_key_name):
self.secret_key = config.get('Credentials', secret_key_name)
- if isinstance(self.secret_key, unicode):
+
+ if ((self._access_key is None or self._secret_key is None) and
+ self.MetadataServiceSupport[self.name]):
+ self._populate_keys_from_metadata_server()
+ self._secret_key = self._convert_key_to_str(self._secret_key)
+
+ def _populate_keys_from_metadata_server(self):
+ # get_instance_metadata is imported here because of a circular
+ # dependency.
+ boto.log.debug("Retrieving credentials from metadata server.")
+ from boto.utils import get_instance_metadata
+ timeout = config.getfloat('Boto', 'metadata_service_timeout', 1.0)
+ metadata = get_instance_metadata(timeout=timeout, num_retries=1)
+ # I'm assuming there's only one role on the instance profile.
+ if metadata and 'iam' in metadata:
+ security = metadata['iam']['security-credentials'].values()[0]
+ self._access_key = security['AccessKeyId']
+ self._secret_key = self._convert_key_to_str(security['SecretAccessKey'])
+ self._security_token = security['Token']
+ expires_at = security['Expiration']
+ self._credential_expiry_time = datetime.strptime(
+ expires_at, "%Y-%m-%dT%H:%M:%SZ")
+ boto.log.debug("Retrieved credentials will expire in %s at: %s",
+ self._credential_expiry_time - datetime.now(), expires_at)
+
+ def _convert_key_to_str(self, key):
+ if isinstance(key, unicode):
# the secret key must be bytes and not unicode to work
# properly with hmac.new (see http://bugs.python.org/issue5285)
- self.secret_key = str(self.secret_key)
+ return str(key)
+ return key
def configure_headers(self):
header_info_map = self.HeaderInfoMap[self.name]
@@ -197,6 +287,8 @@
self.copy_source_header = header_info_map[COPY_SOURCE_HEADER_KEY]
self.copy_source_version_id = header_info_map[
COPY_SOURCE_VERSION_ID_HEADER_KEY]
+ self.copy_source_range_header = header_info_map[
+ COPY_SOURCE_RANGE_HEADER_KEY]
self.date_header = header_info_map[DATE_HEADER_KEY]
self.delete_marker = header_info_map[DELETE_MARKER_HEADER_KEY]
self.metadata_directive_header = (
diff --git a/boto/rds/__init__.py b/boto/rds/__init__.py
index bf69b1c..8190eef 100644
--- a/boto/rds/__init__.py
+++ b/boto/rds/__init__.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2009 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2009-2012 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
@@ -30,6 +30,7 @@
from boto.rds.event import Event
from boto.rds.regioninfo import RDSRegionInfo
+
def regions():
"""
Get all available regions for the RDS service.
@@ -38,30 +39,33 @@
:return: A list of :class:`boto.rds.regioninfo.RDSRegionInfo`
"""
return [RDSRegionInfo(name='us-east-1',
- endpoint='rds.us-east-1.amazonaws.com'),
+ endpoint='rds.amazonaws.com'),
RDSRegionInfo(name='eu-west-1',
endpoint='rds.eu-west-1.amazonaws.com'),
RDSRegionInfo(name='us-west-1',
endpoint='rds.us-west-1.amazonaws.com'),
RDSRegionInfo(name='us-west-2',
endpoint='rds.us-west-2.amazonaws.com'),
+ RDSRegionInfo(name='sa-east-1',
+ endpoint='rds.sa-east-1.amazonaws.com'),
RDSRegionInfo(name='ap-northeast-1',
endpoint='rds.ap-northeast-1.amazonaws.com'),
RDSRegionInfo(name='ap-southeast-1',
endpoint='rds.ap-southeast-1.amazonaws.com')
]
+
def connect_to_region(region_name, **kw_params):
"""
- Given a valid region name, return a
- :class:`boto.ec2.connection.EC2Connection`.
+ Given a valid region name, return a
+ :class:`boto.rds.RDSConnection`.
Any additional parameters after the region_name are passed on to
the connect method of the region object.
:type: str
:param region_name: The name of the region to connect to.
- :rtype: :class:`boto.ec2.connection.EC2Connection` or ``None``
+ :rtype: :class:`boto.rds.RDSConnection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
@@ -72,16 +76,18 @@
#boto.set_stream_logger('rds')
+
class RDSConnection(AWSQueryConnection):
DefaultRegionName = 'us-east-1'
- DefaultRegionEndpoint = 'rds.amazonaws.com'
+ DefaultRegionEndpoint = 'rds.us-east-1.amazonaws.com'
APIVersion = '2011-04-01'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
- https_connection_factory=None, region=None, path='/'):
+ https_connection_factory=None, region=None, path='/',
+ security_token=None, validate_certs=True):
if not region:
region = RDSRegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
@@ -91,7 +97,9 @@
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
- https_connection_factory, path)
+ https_connection_factory, path,
+ security_token,
+ validate_certs=validate_certs)
def _required_auth_capability(self):
return ['rds']
@@ -131,16 +139,43 @@
return self.get_list('DescribeDBInstances', params,
[('DBInstance', DBInstance)])
- def create_dbinstance(self, id, allocated_storage, instance_class,
- master_username, master_password, port=3306,
- engine='MySQL5.1', db_name=None, param_group=None,
- security_groups=None, availability_zone=None,
+ def create_dbinstance(self,
+ id,
+ allocated_storage,
+ instance_class,
+ master_username,
+ master_password,
+ port=3306,
+ engine='MySQL5.1',
+ db_name=None,
+ param_group=None,
+ security_groups=None,
+ availability_zone=None,
preferred_maintenance_window=None,
backup_retention_period=None,
preferred_backup_window=None,
multi_az=False,
engine_version=None,
- auto_minor_version_upgrade=True):
+ auto_minor_version_upgrade=True,
+ character_set_name = None,
+ db_subnet_group_name = None,
+ license_model = None,
+ option_group_name = None,
+ ):
+ # API version: 2012-04-23
+ # Parameter notes:
+ # =================
+ # id should be db_instance_identifier according to API docs but has been left
+ # id for backwards compatibility
+ #
+ # security_groups should be db_security_groups according to API docs but has been left
+ # security_groups for backwards compatibility
+ #
+ # master_password should be master_user_password according to API docs but has been left
+ # master_password for backwards compatibility
+ #
+ # instance_class should be db_instance_class according to API docs but has been left
+ # instance_class for backwards compatibility
"""
Create a new DBInstance.
@@ -152,7 +187,16 @@
:type allocated_storage: int
:param allocated_storage: Initially allocated storage size, in GBs.
- Valid values are [5-1024]
+ Valid values are depending on the engine value.
+
+ * MySQL = 5--1024
+ * oracle-se1 = 10--1024
+ * oracle-se = 10--1024
+ * oracle-ee = 10--1024
+ * sqlserver-ee = 200--1024
+ * sqlserver-se = 200--1024
+ * sqlserver-ex = 30--1024
+ * sqlserver-web = 30--1024
:type instance_class: str
:param instance_class: The compute and memory capacity of
@@ -166,24 +210,68 @@
* db.m2.4xlarge
:type engine: str
- :param engine: Name of database engine. Must be MySQL5.1 for now.
+ :param engine: Name of database engine. Defaults to MySQL but can be;
+
+ * MySQL
+ * oracle-se1
+ * oracle-se
+ * oracle-ee
+ * sqlserver-ee
+ * sqlserver-se
+ * sqlserver-ex
+ * sqlserver-web
:type master_username: str
:param master_username: Name of master user for the DBInstance.
- Must be 1-15 alphanumeric characters, first
- must be a letter.
+
+ * MySQL must be;
+ - 1--16 alphanumeric characters
+ - first character must be a letter
+ - cannot be a reserved MySQL word
+
+ * Oracle must be:
+ - 1--30 alphanumeric characters
+ - first character must be a letter
+ - cannot be a reserved Oracle word
+
+ * SQL Server must be:
+ - 1--128 alphanumeric characters
+ - first character must be a letter
+ - cannot be a reserver SQL Server word
:type master_password: str
:param master_password: Password of master user for the DBInstance.
- Must be 4-16 alphanumeric characters.
+
+ * MySQL must be 8--41 alphanumeric characters
+
+ * Oracle must be 8--30 alphanumeric characters
+
+ * SQL Server must be 8--128 alphanumeric characters.
:type port: int
:param port: Port number on which database accepts connections.
- Valid values [1115-65535]. Defaults to 3306.
+ Valid values [1115-65535].
+
+ * MySQL defaults to 3306
+
+ * Oracle defaults to 1521
+
+ * SQL Server defaults to 1433 and _cannot_ be 1434 or 3389
:type db_name: str
- :param db_name: Name of a database to create when the DBInstance
- is created. Default is to create no databases.
+ :param db_name: * MySQL:
+ Name of a database to create when the DBInstance
+ is created. Default is to create no databases.
+
+ Must contain 1--64 alphanumeric characters and cannot
+ be a reserved MySQL word.
+
+ * Oracle:
+ The Oracle System ID (SID) of the created DB instances.
+ Default is ORCL. Cannot be longer than 8 characters.
+
+ * SQL Server:
+ Not applicable and must be None.
:type param_group: str
:param param_group: Name of DBParameterGroup to associate with
@@ -191,8 +279,8 @@
no parameter groups will be used.
:type security_groups: list of str or list of DBSecurityGroup objects
- :param security_groups: List of names of DBSecurityGroup to authorize on
- this DBInstance.
+ :param security_groups: List of names of DBSecurityGroup to
+ authorize on this DBInstance.
:type availability_zone: str
:param availability_zone: Name of the availability zone to place
@@ -218,8 +306,18 @@
:param multi_az: If True, specifies the DB Instance will be
deployed in multiple availability zones.
+ For Microsoft SQL Server, must be set to false. You cannot set
+ the AvailabilityZone parameter if the MultiAZ parameter is
+ set to true.
+
:type engine_version: str
- :param engine_version: Version number of the database engine to use.
+ :param engine_version: The version number of the database engine to use.
+
+ * MySQL format example: 5.1.42
+
+ * Oracle format example: 11.2.0.2.v2
+
+ * SQL Server format example: 10.50.2789.0.v1
:type auto_minor_version_upgrade: bool
:param auto_minor_version_upgrade: Indicates that minor engine
@@ -227,22 +325,78 @@
automatically to the Read Replica
during the maintenance window.
Default is True.
+ :type character_set_name: str
+ :param character_set_name: For supported engines, indicates that the DB Instance
+ should be associated with the specified CharacterSet.
+
+ :type db_subnet_group_name: str
+ :param db_subnet_group_name: A DB Subnet Group to associate with this DB Instance.
+ If there is no DB Subnet Group, then it is a non-VPC DB
+ instance.
+
+ :type license_model: str
+ :param license_model: License model information for this DB Instance.
+
+ Valid values are;
+ - license-included
+ - bring-your-own-license
+ - general-public-license
+
+ All license types are not supported on all engines.
+
+ :type option_group_name: str
+ :param option_group_name: Indicates that the DB Instance should be associated
+ with the specified option group.
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The new db instance.
"""
- params = {'DBInstanceIdentifier' : id,
- 'AllocatedStorage' : allocated_storage,
- 'DBInstanceClass' : instance_class,
- 'Engine' : engine,
- 'MasterUsername' : master_username,
- 'MasterUserPassword' : master_password}
- if port:
- params['Port'] = port
- if db_name:
- params['DBName'] = db_name
- if param_group:
- params['DBParameterGroupName'] = param_group
+ # boto argument alignment with AWS API parameter names:
+ # =====================================================
+ # arg => AWS parameter
+ # allocated_storage => AllocatedStorage
+ # auto_minor_version_update => AutoMinorVersionUpgrade
+ # availability_zone => AvailabilityZone
+ # backup_retention_period => BackupRetentionPeriod
+ # character_set_name => CharacterSetName
+ # db_instance_class => DBInstanceClass
+ # db_instance_identifier => DBInstanceIdentifier
+ # db_name => DBName
+ # db_parameter_group_name => DBParameterGroupName
+ # db_security_groups => DBSecurityGroups.member.N
+ # db_subnet_group_name => DBSubnetGroupName
+ # engine => Engine
+ # engine_version => EngineVersion
+ # license_model => LicenseModel
+ # master_username => MasterUsername
+ # master_user_password => MasterUserPassword
+ # multi_az => MultiAZ
+ # option_group_name => OptionGroupName
+ # port => Port
+ # preferred_backup_window => PreferredBackupWindow
+ # preferred_maintenance_window => PreferredMaintenanceWindow
+ params = {
+ 'AllocatedStorage': allocated_storage,
+ 'AutoMinorVersionUpgrade': str(auto_minor_version_upgrade).lower() if auto_minor_version_upgrade else None,
+ 'AvailabilityZone': availability_zone,
+ 'BackupRetentionPeriod': backup_retention_period,
+ 'CharacterSetName': character_set_name,
+ 'DBInstanceClass': instance_class,
+ 'DBInstanceIdentifier': id,
+ 'DBName': db_name,
+ 'DBParameterGroupName': param_group,
+ 'DBSubnetGroupName': db_subnet_group_name,
+ 'Engine': engine,
+ 'EngineVersion': engine_version,
+ 'LicenseModel': license_model,
+ 'MasterUsername': master_username,
+ 'MasterUserPassword': master_password,
+ 'MultiAZ': str(multi_az).lower() if multi_az else None,
+ 'OptionGroupName': option_group_name,
+ 'Port': port,
+ 'PreferredBackupWindow': preferred_backup_window,
+ 'PreferredMaintenanceWindow': preferred_maintenance_window,
+ }
if security_groups:
l = []
for group in security_groups:
@@ -251,20 +405,10 @@
else:
l.append(group)
self.build_list_params(params, l, 'DBSecurityGroups.member')
- if availability_zone:
- params['AvailabilityZone'] = availability_zone
- if preferred_maintenance_window:
- params['PreferredMaintenanceWindow'] = preferred_maintenance_window
- if backup_retention_period is not None:
- params['BackupRetentionPeriod'] = backup_retention_period
- if preferred_backup_window:
- params['PreferredBackupWindow'] = preferred_backup_window
- if multi_az:
- params['MultiAZ'] = 'true'
- if engine_version:
- params['EngineVersion'] = engine_version
- if auto_minor_version_upgrade is False:
- params['AutoMinorVersionUpgrade'] = 'false'
+
+ # Remove any params set to None
+ for k, v in params.items():
+ if not v: del(params[k])
return self.get_object('CreateDBInstance', params, DBInstance)
@@ -320,8 +464,8 @@
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The new db instance.
"""
- params = {'DBInstanceIdentifier' : id,
- 'SourceDBInstanceIdentifier' : source_id}
+ params = {'DBInstanceIdentifier': id,
+ 'SourceDBInstanceIdentifier': source_id}
if instance_class:
params['DBInstanceClass'] = instance_class
if port:
@@ -407,7 +551,7 @@
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The modified db instance.
"""
- params = {'DBInstanceIdentifier' : id}
+ params = {'DBInstanceIdentifier': id}
if param_group:
params['DBParameterGroupName'] = param_group
if security_groups:
@@ -459,7 +603,7 @@
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The deleted db instance.
"""
- params = {'DBInstanceIdentifier' : id}
+ params = {'DBInstanceIdentifier': id}
if skip_final_snapshot:
params['SkipFinalSnapshot'] = 'true'
else:
@@ -467,7 +611,6 @@
params['FinalDBSnapshotIdentifier'] = final_snapshot_id
return self.get_object('DeleteDBInstance', params, DBInstance)
-
def reboot_dbinstance(self, id):
"""
Reboot DBInstance.
@@ -478,7 +621,7 @@
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The rebooting db instance.
"""
- params = {'DBInstanceIdentifier' : id}
+ params = {'DBInstanceIdentifier': id}
return self.get_object('RebootDBInstance', params, DBInstance)
# DBParameterGroup methods
@@ -539,7 +682,7 @@
:rtype: :class:`boto.ec2.parametergroup.ParameterGroup`
:return: The ParameterGroup
"""
- params = {'DBParameterGroupName' : groupname}
+ params = {'DBParameterGroupName': groupname}
if source:
params['Source'] = source
if max_records:
@@ -568,7 +711,7 @@
"""
params = {'DBParameterGroupName': name,
'DBParameterGroupFamily': engine,
- 'Description' : description}
+ 'Description': description}
return self.get_object('CreateDBParameterGroup', params, ParameterGroup)
def modify_parameter_group(self, name, parameters=None):
@@ -604,7 +747,7 @@
:param parameters: The parameters to reset. If not supplied,
all parameters will be reset.
"""
- params = {'DBParameterGroupName':name}
+ params = {'DBParameterGroupName': name}
if reset_all_params:
params['ResetAllParameters'] = 'true'
else:
@@ -621,7 +764,7 @@
:type key_name: string
:param key_name: The name of the DBSecurityGroup to delete
"""
- params = {'DBParameterGroupName':name}
+ params = {'DBParameterGroupName': name}
return self.get_status('DeleteDBParameterGroup', params)
# DBSecurityGroup methods
@@ -673,7 +816,7 @@
:rtype: :class:`boto.rds.dbsecuritygroup.DBSecurityGroup`
:return: The newly created DBSecurityGroup
"""
- params = {'DBSecurityGroupName':name}
+ params = {'DBSecurityGroupName': name}
if description:
params['DBSecurityGroupDescription'] = description
group = self.get_object('CreateDBSecurityGroup', params,
@@ -689,7 +832,7 @@
:type key_name: string
:param key_name: The name of the DBSecurityGroup to delete
"""
- params = {'DBSecurityGroupName':name}
+ params = {'DBSecurityGroupName': name}
return self.get_status('DeleteDBSecurityGroup', params)
def authorize_dbsecurity_group(self, group_name, cidr_ip=None,
@@ -720,7 +863,7 @@
:rtype: bool
:return: True if successful.
"""
- params = {'DBSecurityGroupName':group_name}
+ params = {'DBSecurityGroupName': group_name}
if ec2_security_group_name:
params['EC2SecurityGroupName'] = ec2_security_group_name
if ec2_security_group_owner_id:
@@ -757,7 +900,7 @@
:rtype: bool
:return: True if successful.
"""
- params = {'DBSecurityGroupName':group_name}
+ params = {'DBSecurityGroupName': group_name}
if ec2_security_group_name:
params['EC2SecurityGroupName'] = ec2_security_group_name
if ec2_security_group_owner_id:
@@ -826,8 +969,8 @@
:rtype: :class:`boto.rds.dbsnapshot.DBSnapshot`
:return: The newly created DBSnapshot
"""
- params = {'DBSnapshotIdentifier' : snapshot_id,
- 'DBInstanceIdentifier' : dbinstance_id}
+ params = {'DBSnapshotIdentifier': snapshot_id,
+ 'DBInstanceIdentifier': dbinstance_id}
return self.get_object('CreateDBSnapshot', params, DBSnapshot)
def delete_dbsnapshot(self, identifier):
@@ -837,13 +980,14 @@
:type identifier: string
:param identifier: The identifier of the DBSnapshot to delete
"""
- params = {'DBSnapshotIdentifier' : identifier}
+ params = {'DBSnapshotIdentifier': identifier}
return self.get_object('DeleteDBSnapshot', params, DBSnapshot)
def restore_dbinstance_from_dbsnapshot(self, identifier, instance_id,
instance_class, port=None,
- availability_zone=None):
-
+ availability_zone=None,
+ multi_az=None,
+ auto_minor_version_upgrade=None):
"""
Create a new DBInstance from a DB snapshot.
@@ -868,16 +1012,32 @@
:param availability_zone: Name of the availability zone to place
DBInstance into.
+ :type multi_az: bool
+ :param multi_az: If True, specifies the DB Instance will be
+ deployed in multiple availability zones.
+ Default is the API default.
+
+ :type auto_minor_version_upgrade: bool
+ :param auto_minor_version_upgrade: Indicates that minor engine
+ upgrades will be applied
+ automatically to the Read Replica
+ during the maintenance window.
+ Default is the API default.
+
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The newly created DBInstance
"""
- params = {'DBSnapshotIdentifier' : identifier,
- 'DBInstanceIdentifier' : instance_id,
- 'DBInstanceClass' : instance_class}
+ params = {'DBSnapshotIdentifier': identifier,
+ 'DBInstanceIdentifier': instance_id,
+ 'DBInstanceClass': instance_class}
if port:
params['Port'] = port
if availability_zone:
params['AvailabilityZone'] = availability_zone
+ if multi_az is not None:
+ params['MultiAZ'] = str(multi_az).lower()
+ if auto_minor_version_upgrade is not None:
+ params['AutoMinorVersionUpgrade'] = str(auto_minor_version_upgrade).lower()
return self.get_object('RestoreDBInstanceFromDBSnapshot',
params, DBInstance)
@@ -923,8 +1083,8 @@
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The newly created DBInstance
"""
- params = {'SourceDBInstanceIdentifier' : source_instance_id,
- 'TargetDBInstanceIdentifier' : target_instance_id}
+ params = {'SourceDBInstanceIdentifier': source_instance_id,
+ 'TargetDBInstanceIdentifier': target_instance_id}
if use_latest:
params['UseLatestRestorableTime'] = 'true'
elif restore_time:
@@ -996,5 +1156,3 @@
if marker:
params['Marker'] = marker
return self.get_list('DescribeEvents', params, [('Event', Event)])
-
-
diff --git a/boto/rds/dbinstance.py b/boto/rds/dbinstance.py
index 02f9af6..f6c2787 100644
--- a/boto/rds/dbinstance.py
+++ b/boto/rds/dbinstance.py
@@ -14,7 +14,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -22,11 +22,50 @@
from boto.rds.dbsecuritygroup import DBSecurityGroup
from boto.rds.parametergroup import ParameterGroup
+
class DBInstance(object):
"""
- Represents a RDS DBInstance
+ Represents a RDS DBInstance
+
+ Properties reference available from the AWS documentation at
+ http://goo.gl/sC2Kn
+
+ :ivar connection: connection
+ :ivar id: The name and identifier of the DBInstance
+ :ivar create_time: The date and time of creation
+ :ivar engine: The database engine being used
+ :ivar status: The status of the database in a string. e.g. "available"
+ :ivar allocated_storage: The size of the disk in gigabytes (int).
+ :ivar endpoint: A tuple that describes the hostname and port of
+ the instance. This is only available when the database is
+ in status "available".
+ :ivar instance_class: Contains the name of the compute and memory
+ capacity class of the DB Instance.
+ :ivar master_username: The username that is set as master username
+ at creation time.
+ :ivar parameter_group: Provides the list of DB Parameter Groups
+ applied to this DB Instance.
+ :ivar security_group: Provides List of DB Security Group elements
+ containing only DBSecurityGroup.Name and DBSecurityGroup.Status
+ subelements.
+ :ivar availability_zone: Specifies the name of the Availability Zone
+ the DB Instance is located in.
+ :ivar backup_retention_period: Specifies the number of days for
+ which automatic DB Snapshots are retained.
+ :ivar preferred_backup_window: Specifies the daily time range during
+ which automated backups are created if automated backups are
+ enabled, as determined by the backup_retention_period.
+ :ivar preferred_maintenance_window: Specifies the weekly time
+ range (in UTC) during which system maintenance can occur. (string)
+ :ivar latest_restorable_time: Specifies the latest time to which
+ a database can be restored with point-in-time restore. TODO: type?
+ :ivar multi_az: Boolean that specifies if the DB Instance is a
+ Multi-AZ deployment.
+ :ivar pending_modified_values: Specifies that changes to the
+ DB Instance are pending. This element is only included when changes
+ are pending. Specific changes are identified by subelements.
"""
-
+
def __init__(self, connection=None, id=None):
self.connection = connection
self.id = id
@@ -112,10 +151,10 @@
def snapshot(self, snapshot_id):
"""
Create a new DB snapshot of this DBInstance.
-
+
:type identifier: string
:param identifier: The identifier for the DBSnapshot
-
+
:rtype: :class:`boto.rds.dbsnapshot.DBSnapshot`
:return: The newly created DBSnapshot
"""
@@ -124,7 +163,7 @@
def reboot(self):
"""
Reboot this DBInstance
-
+
:rtype: :class:`boto.rds.dbsnapshot.DBSnapshot`
:return: The newly created DBSnapshot
"""
@@ -137,10 +176,9 @@
:type validate: bool
:param validate: By default, if EC2 returns no data about the
- instance the update method returns quietly. If
- the validate param is True, however, it will
- raise a ValueError exception if no data is
- returned from EC2.
+ instance the update method returns quietly. If the
+ validate param is True, however, it will raise a
+ ValueError exception if no data is returned from EC2.
"""
rs = self.connection.get_all_dbinstances(self.id)
if len(rs) > 0:
@@ -151,21 +189,19 @@
raise ValueError('%s is not a valid Instance ID' % self.id)
return self.status
-
def stop(self, skip_final_snapshot=False, final_snapshot_id=''):
"""
Delete this DBInstance.
:type skip_final_snapshot: bool
- :param skip_final_snapshot: This parameter determines whether a final
- db snapshot is created before the instance
- is deleted. If True, no snapshot is created.
- If False, a snapshot is created before
- deleting the instance.
+ :param skip_final_snapshot: This parameter determines whether
+ a final db snapshot is created before the instance is
+ deleted. If True, no snapshot is created. If False, a
+ snapshot is created before deleting the instance.
:type final_snapshot_id: str
:param final_snapshot_id: If a final snapshot is requested, this
- is the identifier used for that snapshot.
+ is the identifier used for that snapshot.
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The deleted db instance.
@@ -186,57 +222,54 @@
Modify this DBInstance.
:type security_groups: list of str or list of DBSecurityGroup objects
- :param security_groups: List of names of DBSecurityGroup to authorize on
- this DBInstance.
+ :param security_groups: List of names of DBSecurityGroup to
+ authorize on this DBInstance.
:type preferred_maintenance_window: str
- :param preferred_maintenance_window: The weekly time range (in UTC)
- during which maintenance can
- occur.
- Default is Sun:05:00-Sun:09:00
+ :param preferred_maintenance_window: The weekly time range (in
+ UTC) during which maintenance can occur. Default is
+ Sun:05:00-Sun:09:00
:type master_password: str
:param master_password: Password of master user for the DBInstance.
- Must be 4-15 alphanumeric characters.
+ Must be 4-15 alphanumeric characters.
:type allocated_storage: int
:param allocated_storage: The new allocated storage size, in GBs.
- Valid values are [5-1024]
+ Valid values are [5-1024]
:type instance_class: str
:param instance_class: The compute and memory capacity of the
- DBInstance. Changes will be applied at
- next maintenance window unless
- apply_immediately is True.
+ DBInstance. Changes will be applied at next maintenance
+ window unless apply_immediately is True.
- Valid values are:
-
- * db.m1.small
- * db.m1.large
- * db.m1.xlarge
- * db.m2.xlarge
- * db.m2.2xlarge
- * db.m2.4xlarge
+ Valid values are:
+
+ * db.m1.small
+ * db.m1.large
+ * db.m1.xlarge
+ * db.m2.xlarge
+ * db.m2.2xlarge
+ * db.m2.4xlarge
:type apply_immediately: bool
- :param apply_immediately: If true, the modifications will be applied
- as soon as possible rather than waiting for
- the next preferred maintenance window.
+ :param apply_immediately: If true, the modifications will be
+ applied as soon as possible rather than waiting for the
+ next preferred maintenance window.
:type backup_retention_period: int
- :param backup_retention_period: The number of days for which automated
- backups are retained. Setting this to
- zero disables automated backups.
+ :param backup_retention_period: The number of days for which
+ automated backups are retained. Setting this to zero
+ disables automated backups.
:type preferred_backup_window: str
- :param preferred_backup_window: The daily time range during which
- automated backups are created (if
- enabled). Must be in h24:mi-hh24:mi
- format (UTC).
+ :param preferred_backup_window: The daily time range during
+ which automated backups are created (if enabled). Must be
+ in h24:mi-hh24:mi format (UTC).
:type multi_az: bool
:param multi_az: If True, specifies the DB Instance will be
- deployed in multiple availability zones.
+ deployed in multiple availability zones.
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The modified db instance.
@@ -252,7 +285,8 @@
preferred_backup_window,
multi_az,
apply_immediately)
-
+
+
class PendingModifiedValues(dict):
def startElement(self, name, attrs, connection):
@@ -261,4 +295,3 @@
def endElement(self, name, value, connection):
if name != 'PendingModifiedValues':
self[name] = value
-
diff --git a/boto/rds/dbsecuritygroup.py b/boto/rds/dbsecuritygroup.py
index 1555ca0..6a69ddb 100644
--- a/boto/rds/dbsecuritygroup.py
+++ b/boto/rds/dbsecuritygroup.py
@@ -25,7 +25,19 @@
from boto.ec2.securitygroup import SecurityGroup
class DBSecurityGroup(object):
+ """
+ Represents an RDS database security group
+ Properties reference available from the AWS documentation at http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_DeleteDBSecurityGroup.html
+
+ :ivar Status: The current status of the security group. Possibile values are [ active, ? ]. Reference documentation lacks specifics of possibilities
+ :ivar connection: boto.rds.RDSConnection associated with the current object
+ :ivar description: The description of the security group
+ :ivar ec2_groups: List of EC2SecurityGroup objects that this security group PERMITS
+ :ivar ip_ranges: List of IPRange objects (containing CIDR addresses) that this security group PERMITS
+ :ivar name: Name of the security group
+ :ivar owner_id: ID of the owner of the security group. Can be 'None'
+ """
def __init__(self, connection=None, owner_id=None,
name=None, description=None):
self.connection = connection
@@ -117,6 +129,9 @@
self.name, cidr_ip=cidr_ip)
class IPRange(object):
+ """
+ Describes a CIDR address range for use in a DBSecurityGroup
+ """
def __init__(self, parent=None):
self.parent = parent
@@ -138,6 +153,9 @@
setattr(self, name, value)
class EC2SecurityGroup(object):
+ """
+ Describes an EC2 security group for use in a DBSecurityGroup
+ """
def __init__(self, parent=None):
self.parent = parent
@@ -156,5 +174,4 @@
elif name == 'EC2SecurityGroupOwnerId':
self.owner_id = value
else:
- setattr(self, name, value)
-
+ setattr(self, name, value)
\ No newline at end of file
diff --git a/boto/rds/dbsnapshot.py b/boto/rds/dbsnapshot.py
index 78d0230..0ea7f94 100644
--- a/boto/rds/dbsnapshot.py
+++ b/boto/rds/dbsnapshot.py
@@ -22,6 +22,22 @@
class DBSnapshot(object):
"""
Represents a RDS DB Snapshot
+
+ Properties reference available from the AWS documentation at http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_DBSnapshot.html
+
+ :ivar EngineVersion: Specifies the version of the database engine
+ :ivar LicenseModel: License model information for the restored DB instance
+ :ivar allocated_storage: Specifies the allocated storage size in gigabytes (GB)
+ :ivar availability_zone: Specifies the name of the Availability Zone the DB Instance was located in at the time of the DB Snapshot
+ :ivar connection: boto.rds.RDSConnection associated with the current object
+ :ivar engine: Specifies the name of the database engine
+ :ivar id: Specifies the identifier for the DB Snapshot (DBSnapshotIdentifier)
+ :ivar instance_create_time: Specifies the time (UTC) when the snapshot was taken
+ :ivar instance_id: Specifies the the DBInstanceIdentifier of the DB Instance this DB Snapshot was created from (DBInstanceIdentifier)
+ :ivar master_username: Provides the master username for the DB Instance
+ :ivar port: Specifies the port that the database engine was listening on at the time of the snapshot
+ :ivar snapshot_create_time: Provides the time (UTC) when the snapshot was taken
+ :ivar status: Specifies the status of this DB Snapshot. Possible values are [ available, backing-up, creating, deleted, deleting, failed, modifying, rebooting, resetting-master-credentials ]
"""
def __init__(self, connection=None, id=None):
@@ -68,7 +84,4 @@
elif name == 'SnapshotTime':
self.time = value
else:
- setattr(self, name, value)
-
-
-
+ setattr(self, name, value)
\ No newline at end of file
diff --git a/boto/rds/parametergroup.py b/boto/rds/parametergroup.py
index 44d00e2..e52890c 100644
--- a/boto/rds/parametergroup.py
+++ b/boto/rds/parametergroup.py
@@ -14,7 +14,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -28,7 +28,7 @@
self.description = None
self.engine = None
self._current_param = None
-
+
def __repr__(self):
return 'ParameterGroup:%s' % self.name
@@ -60,7 +60,7 @@
def get_params(self):
pg = self.connection.get_all_dbparameters(self.name)
self.update(pg)
-
+
def add_param(self, name, value, apply_method):
param = Parameter()
param.name = name
@@ -79,12 +79,12 @@
ValidSources = ['user', 'system', 'engine-default']
ValidApplyTypes = ['static', 'dynamic']
ValidApplyMethods = ['immediate', 'pending-reboot']
-
+
def __init__(self, group=None, name=None):
self.group = group
self.name = name
self._value = None
- self.type = str
+ self.type = 'string'
self.source = None
self.is_modifiable = True
self.description = None
@@ -127,18 +127,18 @@
prefix = 'Parameters.member.%d.' % i
if self.name:
d[prefix+'ParameterName'] = self.name
- if self._value:
+ if self._value is not None:
d[prefix+'ParameterValue'] = self._value
if self.apply_type:
d[prefix+'ApplyMethod'] = self.apply_method
def _set_string_value(self, value):
if not isinstance(value, str) or isinstance(value, unicode):
- raise ValueError, 'value must be of type str'
+ raise ValueError('value must be of type str')
if self.allowed_values:
choices = self.allowed_values.split(',')
if value not in choices:
- raise ValueError, 'value must be in %s' % self.allowed_values
+ raise ValueError('value must be in %s' % self.allowed_values)
self._value = value
def _set_integer_value(self, value):
@@ -148,10 +148,10 @@
if self.allowed_values:
min, max = self.allowed_values.split('-')
if value < int(min) or value > int(max):
- raise ValueError, 'range is %s' % self.allowed_values
+ raise ValueError('range is %s' % self.allowed_values)
self._value = value
else:
- raise ValueError, 'value must be integer'
+ raise ValueError('value must be integer')
def _set_boolean_value(self, value):
if isinstance(value, bool):
@@ -162,8 +162,8 @@
else:
self._value = False
else:
- raise ValueError, 'value must be boolean'
-
+ raise ValueError('value must be boolean')
+
def set_value(self, value):
if self.type == 'string':
self._set_string_value(value)
@@ -172,7 +172,7 @@
elif self.type == 'boolean':
self._set_boolean_value(value)
else:
- raise TypeError, 'unknown type (%s)' % self.type
+ raise TypeError('unknown type (%s)' % self.type)
def get_value(self):
if self._value == None:
@@ -188,7 +188,7 @@
self._set_boolean_value(self._value)
return self._value
else:
- raise TypeError, 'unknown type (%s)' % self.type
+ raise TypeError('unknown type (%s)' % self.type)
value = property(get_value, set_value, 'The value of the parameter')
diff --git a/boto/regioninfo.py b/boto/regioninfo.py
index 907385f..6e936b3 100644
--- a/boto/regioninfo.py
+++ b/boto/regioninfo.py
@@ -16,16 +16,17 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
+
class RegionInfo(object):
"""
Represents an AWS Region
"""
-
+
def __init__(self, connection=None, name=None, endpoint=None,
connection_cls=None):
self.connection = connection
@@ -54,11 +55,9 @@
You may pass any of the arguments accepted by the connection
class's constructor as keyword arguments and they will be
passed along to the connection object.
-
+
:rtype: Connection object
:return: The connection to this regions endpoint
"""
if self.connection_cls:
return self.connection_cls(region=self, **kw_params)
-
-
diff --git a/boto/resultset.py b/boto/resultset.py
index 075fc5e..080290e 100644
--- a/boto/resultset.py
+++ b/boto/resultset.py
@@ -19,6 +19,8 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
+from boto.s3.user import User
+
class ResultSet(list):
"""
The ResultSet is used to pass results back from the Amazon services
@@ -48,7 +50,9 @@
self.markers = []
self.marker = None
self.key_marker = None
+ self.next_marker = None # avail when delimiter used
self.next_key_marker = None
+ self.next_upload_id_marker = None
self.next_version_id_marker = None
self.version_id_marker = None
self.is_truncated = False
@@ -61,6 +65,12 @@
obj = t[1](connection)
self.append(obj)
return obj
+ if name == 'Owner':
+ # Makes owner available for get_service and
+ # perhaps other lists where not handled by
+ # another element.
+ self.owner = User()
+ return self.owner
return None
def to_boolean(self, value, true_value='true'):
@@ -76,6 +86,8 @@
self.marker = value
elif name == 'KeyMarker':
self.key_marker = value
+ elif name == 'NextMarker':
+ self.next_marker = value
elif name == 'NextKeyMarker':
self.next_key_marker = value
elif name == 'VersionIdMarker':
@@ -90,6 +102,8 @@
self.bucket = value
elif name == 'MaxUploads':
self.max_uploads = int(value)
+ elif name == 'MaxItems':
+ self.max_items = int(value)
elif name == 'Prefix':
self.prefix = value
elif name == 'return':
diff --git a/boto/roboto/awsqueryrequest.py b/boto/roboto/awsqueryrequest.py
index 9e05ac6..6d95071 100644
--- a/boto/roboto/awsqueryrequest.py
+++ b/boto/roboto/awsqueryrequest.py
@@ -223,14 +223,14 @@
filter_names = [f['name'] for f in self.Filters]
unknown_filters = [f for f in filters if f not in filter_names]
if unknown_filters:
- raise FilterError, 'Unknown filters: %s' % unknown_filters
+ raise FilterError('Unknown filters: %s' % unknown_filters)
for i, filter in enumerate(self.Filters):
name = filter['name']
if name in filters:
self.request_params['Filter.%d.Name' % (i+1)] = name
for j, value in enumerate(boto.utils.mklist(filters[name])):
Encoder.encode(filter, self.request_params, value,
- 'Filter.%d.Value.%d' % (i+1,j+1))
+ 'Filter.%d.Value.%d' % (i+1, j+1))
def process_args(self, **args):
"""
diff --git a/boto/route53/__init__.py b/boto/route53/__init__.py
index d404bc7..3546d25 100644
--- a/boto/route53/__init__.py
+++ b/boto/route53/__init__.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
@@ -15,7 +15,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -24,3 +24,52 @@
# this is here for backward compatibility
# originally, the Route53Connection class was defined here
from connection import Route53Connection
+from boto.regioninfo import RegionInfo
+
+
+class Route53RegionInfo(RegionInfo):
+
+ def connect(self, **kw_params):
+ """
+ Connect to this Region's endpoint. Returns an connection
+ object pointing to the endpoint associated with this region.
+ You may pass any of the arguments accepted by the connection
+ class's constructor as keyword arguments and they will be
+ passed along to the connection object.
+
+ :rtype: Connection object
+ :return: The connection to this regions endpoint
+ """
+ if self.connection_cls:
+ return self.connection_cls(host=self.endpoint, **kw_params)
+
+
+def regions():
+ """
+ Get all available regions for the Route53 service.
+
+ :rtype: list
+ :return: A list of :class:`boto.regioninfo.RegionInfo` instances
+ """
+ return [Route53RegionInfo(name='universal',
+ endpoint='route53.amazonaws.com',
+ connection_cls=Route53Connection)
+ ]
+
+
+def connect_to_region(region_name, **kw_params):
+ """
+ Given a valid region name, return a
+ :class:`boto.route53.connection.Route53Connection`.
+
+ :type: str
+ :param region_name: The name of the region to connect to.
+
+ :rtype: :class:`boto.route53.connection.Route53Connection` or ``None``
+ :return: A connection to the given region, or None if an invalid region
+ name is given
+ """
+ for region in regions():
+ if region.name == region_name:
+ return region.connect(**kw_params)
+ return None
diff --git a/boto/route53/connection.py b/boto/route53/connection.py
index 7c3f1b8..9e6b38d 100644
--- a/boto/route53/connection.py
+++ b/boto/route53/connection.py
@@ -15,16 +15,16 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
-
import xml.sax
import time
import uuid
import urllib
+
import boto
from boto.connection import AWSAuthConnection
from boto import handler
@@ -41,25 +41,29 @@
<Comment>%(comment)s</Comment>
</HostedZoneConfig>
</CreateHostedZoneRequest>"""
-
+
#boto.set_stream_logger('dns')
+
class Route53Connection(AWSAuthConnection):
DefaultHost = 'route53.amazonaws.com'
"""The default Route53 API endpoint to connect to."""
- Version = '2011-05-05'
+ Version = '2012-02-29'
"""Route53 API version."""
- XMLNameSpace = 'https://route53.amazonaws.com/doc/2011-05-05/'
+ XMLNameSpace = 'https://route53.amazonaws.com/doc/2012-02-29/'
"""XML schema for this Route53 API version."""
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
port=None, proxy=None, proxy_port=None,
- host=DefaultHost, debug=0):
+ host=DefaultHost, debug=0, security_token=None,
+ validate_certs=True):
AWSAuthConnection.__init__(self, host,
- aws_access_key_id, aws_secret_access_key,
- True, port, proxy, proxy_port, debug=debug)
+ aws_access_key_id, aws_secret_access_key,
+ True, port, proxy, proxy_port, debug=debug,
+ security_token=security_token,
+ validate_certs=validate_certs)
def _required_auth_capability(self):
return ['route53']
@@ -68,10 +72,12 @@
if params:
pairs = []
for key, val in params.iteritems():
- if val is None: continue
+ if val is None:
+ continue
pairs.append(key + '=' + urllib.quote(str(val)))
path += '?' + '&'.join(pairs)
- return AWSAuthConnection.make_request(self, action, path, headers, data)
+ return AWSAuthConnection.make_request(self, action, path,
+ headers, data)
# Hosted Zones
@@ -101,7 +107,7 @@
h.parse(body)
if zone_list:
e['ListHostedZonesResponse']['HostedZones'].extend(zone_list)
- while e['ListHostedZonesResponse'].has_key('NextMarker'):
+ while 'NextMarker' in e['ListHostedZonesResponse']:
next_marker = e['ListHostedZonesResponse']['NextMarker']
zone_list = e['ListHostedZonesResponse']['HostedZones']
e = self.get_all_hosted_zones(next_marker, zone_list)
@@ -110,7 +116,7 @@
def get_hosted_zone(self, hosted_zone_id):
"""
Get detailed information about a particular Hosted Zone.
-
+
:type hosted_zone_id: str
:param hosted_zone_id: The unique identifier for the Hosted Zone
@@ -129,11 +135,28 @@
h.parse(body)
return e
+ def get_hosted_zone_by_name(self, hosted_zone_name):
+ """
+ Get detailed information about a particular Hosted Zone.
+
+ :type hosted_zone_name: str
+ :param hosted_zone_name: The fully qualified domain name for the Hosted
+ Zone
+
+ """
+ if hosted_zone_name[-1] != '.':
+ hosted_zone_name += '.'
+ all_hosted_zones = self.get_all_hosted_zones()
+ for zone in all_hosted_zones['ListHostedZonesResponse']['HostedZones']:
+ #check that they gave us the FQDN for their zone
+ if zone['Name'] == hosted_zone_name:
+ return self.get_hosted_zone(zone['Id'].split('/')[-1])
+
def create_hosted_zone(self, domain_name, caller_ref=None, comment=''):
"""
Create a new Hosted Zone. Returns a Python data structure with
information about the newly created Hosted Zone.
-
+
:type domain_name: str
:param domain_name: The name of the domain. This should be a
fully-specified domain, and should end with a final period
@@ -153,20 +176,20 @@
use that.
:type comment: str
- :param comment: Any comments you want to include about the hosted
+ :param comment: Any comments you want to include about the hosted
zone.
"""
if caller_ref is None:
caller_ref = str(uuid.uuid4())
- params = {'name' : domain_name,
- 'caller_ref' : caller_ref,
- 'comment' : comment,
- 'xmlns' : self.XMLNameSpace}
+ params = {'name': domain_name,
+ 'caller_ref': caller_ref,
+ 'comment': comment,
+ 'xmlns': self.XMLNameSpace}
xml = HZXML % params
uri = '/%s/hostedzone' % self.Version
response = self.make_request('POST', uri,
- {'Content-Type' : 'text/xml'}, xml)
+ {'Content-Type': 'text/xml'}, xml)
body = response.read()
boto.log.debug(body)
if response.status == 201:
@@ -179,7 +202,7 @@
raise exception.DNSServerError(response.status,
response.reason,
body)
-
+
def delete_hosted_zone(self, hosted_zone_id):
uri = '/%s/hostedzone/%s' % (self.Version, hosted_zone_id)
response = self.make_request('DELETE', uri)
@@ -201,7 +224,7 @@
"""
Retrieve the Resource Record Sets defined for this Hosted Zone.
Returns the raw XML data returned by the Route53 call.
-
+
:type hosted_zone_id: str
:param hosted_zone_id: The unique identifier for the Hosted Zone
@@ -281,7 +304,7 @@
"""
uri = '/%s/hostedzone/%s/rrset' % (self.Version, hosted_zone_id)
response = self.make_request('POST', uri,
- {'Content-Type' : 'text/xml'},
+ {'Content-Type': 'text/xml'},
xml_body)
body = response.read()
boto.log.debug(body)
diff --git a/boto/route53/record.py b/boto/route53/record.py
index 6e91a83..f954645 100644
--- a/boto/route53/record.py
+++ b/boto/route53/record.py
@@ -1,4 +1,6 @@
# Copyright (c) 2010 Chris Moyer http://coredumped.org/
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
@@ -15,7 +17,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -24,9 +26,16 @@
from boto.resultset import ResultSet
class ResourceRecordSets(ResultSet):
+ """
+ A list of resource records.
+
+ :ivar hosted_zone_id: The ID of the hosted zone.
+ :ivar comment: A comment that will be stored with the change.
+ :ivar changes: A list of changes.
+ """
ChangeResourceRecordSetsBody = """<?xml version="1.0" encoding="UTF-8"?>
- <ChangeResourceRecordSetsRequest xmlns="https://route53.amazonaws.com/doc/2011-05-05/">
+ <ChangeResourceRecordSetsRequest xmlns="https://route53.amazonaws.com/doc/2012-02-29/">
<ChangeBatch>
<Comment>%(comment)s</Comment>
<Changes>%(changes)s</Changes>
@@ -38,7 +47,6 @@
%(record)s
</Change>"""
-
def __init__(self, connection=None, hosted_zone_id=None, comment=None):
self.connection = connection
self.hosted_zone_id = hosted_zone_id
@@ -51,9 +59,65 @@
def __repr__(self):
return '<ResourceRecordSets: %s>' % self.hosted_zone_id
- def add_change(self, action, name, type, ttl=600, alias_hosted_zone_id=None, alias_dns_name=None):
- """Add a change request"""
- change = Record(name, type, ttl, alias_hosted_zone_id=alias_hosted_zone_id, alias_dns_name=alias_dns_name)
+ def add_change(self, action, name, type, ttl=600,
+ alias_hosted_zone_id=None, alias_dns_name=None, identifier=None,
+ weight=None, region=None):
+ """
+ Add a change request to the set.
+
+ :type action: str
+ :param action: The action to perform ('CREATE'|'DELETE')
+
+ :type name: str
+ :param name: The name of the domain you want to perform the action on.
+
+ :type type: str
+ :param type: The DNS record type. Valid values are:
+
+ * A
+ * AAAA
+ * CNAME
+ * MX
+ * NS
+ * PTR
+ * SOA
+ * SPF
+ * SRV
+ * TXT
+
+ :type ttl: int
+ :param ttl: The resource record cache time to live (TTL), in seconds.
+
+ :type alias_hosted_zone_id: str
+ :param alias_dns_name: *Alias resource record sets only* The value
+ of the hosted zone ID, CanonicalHostedZoneNameId, for
+ the LoadBalancer.
+
+ :type alias_dns_name: str
+ :param alias_hosted_zone_id: *Alias resource record sets only*
+ Information about the domain to which you are redirecting traffic.
+
+ :type identifier: str
+ :param identifier: *Weighted and latency-based resource record sets
+ only* An identifier that differentiates among multiple resource
+ record sets that have the same combination of DNS name and type.
+
+ :type weight: int
+ :param weight: *Weighted resource record sets only* Among resource
+ record sets that have the same combination of DNS name and type,
+ a value that determines what portion of traffic for the current
+ resource record set is routed to the associated location
+
+ :type region: str
+ :param region: *Latency-based resource record sets only* Among resource
+ record sets that have the same combination of DNS name and type,
+ a value that determines which region this should be associated with
+ for the latency-based routing
+ """
+ change = Record(name, type, ttl,
+ alias_hosted_zone_id=alias_hosted_zone_id,
+ alias_dns_name=alias_dns_name, identifier=identifier,
+ weight=weight, region=region)
self.changes.append([action, change])
return change
@@ -75,7 +139,7 @@
return self.connection.change_rrsets(self.hosted_zone_id, self.to_xml())
def endElement(self, name, value, connection):
- """Overwritten to also add the NextRecordName and
+ """Overwritten to also add the NextRecordName and
NextRecordType to the base object"""
if name == 'NextRecordName':
self.next_record_name = value
@@ -104,9 +168,20 @@
XMLBody = """<ResourceRecordSet>
<Name>%(name)s</Name>
<Type>%(type)s</Type>
+ %(weight)s
%(body)s
</ResourceRecordSet>"""
+ WRRBody = """
+ <SetIdentifier>%(identifier)s</SetIdentifier>
+ <Weight>%(weight)s</Weight>
+ """
+
+ RRRBody = """
+ <SetIdentifier>%(identifier)s</SetIdentifier>
+ <Region>%(region)s</Region>
+ """
+
ResourceRecordsBody = """
<TTL>%(ttl)s</TTL>
<ResourceRecords>
@@ -122,7 +197,11 @@
<DNSName>%s</DNSName>
</AliasTarget>"""
- def __init__(self, name=None, type=None, ttl=600, resource_records=None, alias_hosted_zone_id=None, alias_dns_name=None):
+
+
+ def __init__(self, name=None, type=None, ttl=600, resource_records=None,
+ alias_hosted_zone_id=None, alias_dns_name=None, identifier=None,
+ weight=None, region=None):
self.name = name
self.type = type
self.ttl = ttl
@@ -131,7 +210,10 @@
self.resource_records = resource_records
self.alias_hosted_zone_id = alias_hosted_zone_id
self.alias_dns_name = alias_dns_name
-
+ self.identifier = identifier
+ self.weight = weight
+ self.region = region
+
def add_value(self, value):
"""Add a resource record value"""
self.resource_records.append(value)
@@ -155,20 +237,35 @@
"ttl": self.ttl,
"records": records,
}
+ weight = ""
+ if self.identifier != None and self.weight != None:
+ weight = self.WRRBody % {"identifier": self.identifier, "weight":
+ self.weight}
+ elif self.identifier != None and self.region != None:
+ weight = self.RRRBody % {"identifier": self.identifier, "region":
+ self.region}
+
params = {
"name": self.name,
"type": self.type,
+ "weight": weight,
"body": body,
}
return self.XMLBody % params
def to_print(self):
+ rr = ""
if self.alias_hosted_zone_id != None and self.alias_dns_name != None:
# Show alias
- return 'ALIAS ' + self.alias_hosted_zone_id + ' ' + self.alias_dns_name
+ rr = 'ALIAS ' + self.alias_hosted_zone_id + ' ' + self.alias_dns_name
else:
# Show resource record(s)
- return ",".join(self.resource_records)
+ rr = ",".join(self.resource_records)
+
+ if self.identifier != None and self.weight != None:
+ rr += ' (WRR id=%s, w=%s)' % (self.identifier, self.weight)
+
+ return rr
def endElement(self, name, value, connection):
if name == 'Name':
@@ -183,6 +280,12 @@
self.alias_hosted_zone_id = value
elif name == 'DNSName':
self.alias_dns_name = value
+ elif name == 'SetIdentifier':
+ self.identifier = value
+ elif name == 'Weight':
+ self.weight = value
+ elif name == 'Region':
+ self.region = value
def startElement(self, name, attrs, connection):
return None
diff --git a/boto/s3/__init__.py b/boto/s3/__init__.py
index f3f4c1e..5db0d62 100644
--- a/boto/s3/__init__.py
+++ b/boto/s3/__init__.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
@@ -16,9 +16,63 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
+from boto.regioninfo import RegionInfo
+
+
+class S3RegionInfo(RegionInfo):
+
+ def connect(self, **kw_params):
+ """
+ Connect to this Region's endpoint. Returns an connection
+ object pointing to the endpoint associated with this region.
+ You may pass any of the arguments accepted by the connection
+ class's constructor as keyword arguments and they will be
+ passed along to the connection object.
+
+ :rtype: Connection object
+ :return: The connection to this regions endpoint
+ """
+ if self.connection_cls:
+ return self.connection_cls(host=self.endpoint, **kw_params)
+
+
+def regions():
+ """
+ Get all available regions for the Amazon S3 service.
+
+ :rtype: list
+ :return: A list of :class:`boto.regioninfo.RegionInfo`
+ """
+ from .connection import S3Connection
+ return [S3RegionInfo(name='us-east-1',
+ endpoint='s3.amazonaws.com',
+ connection_cls=S3Connection),
+ S3RegionInfo(name='us-west-1',
+ endpoint='s3-us-west-1.amazonaws.com',
+ connection_cls=S3Connection),
+ S3RegionInfo(name='us-west-2',
+ endpoint='s3-us-west-2.amazonaws.com',
+ connection_cls=S3Connection),
+ S3RegionInfo(name='ap-northeast-1',
+ endpoint='s3-ap-northeast-1.amazonaws.com',
+ connection_cls=S3Connection),
+ S3RegionInfo(name='ap-southeast-1',
+ endpoint='s3-ap-southeast-1.amazonaws.com',
+ connection_cls=S3Connection),
+ S3RegionInfo(name='eu-west-1',
+ endpoint='s3-eu-west-1.amazonaws.com',
+ connection_cls=S3Connection),
+ ]
+
+
+def connect_to_region(region_name, **kw_params):
+ for region in regions():
+ if region.name == region_name:
+ return region.connect(**kw_params)
+ return None
diff --git a/boto/s3/acl.py b/boto/s3/acl.py
index 6039683..a7bca8c 100644
--- a/boto/s3/acl.py
+++ b/boto/s3/acl.py
@@ -24,7 +24,8 @@
CannedACLStrings = ['private', 'public-read',
'public-read-write', 'authenticated-read',
- 'bucket-owner-read', 'bucket-owner-full-control']
+ 'bucket-owner-read', 'bucket-owner-full-control',
+ 'log-delivery-write']
class Policy:
diff --git a/boto/s3/bucket.py b/boto/s3/bucket.py
index 10200e3..078f056 100644
--- a/boto/s3/bucket.py
+++ b/boto/s3/bucket.py
@@ -16,7 +16,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -24,28 +24,42 @@
import boto
from boto import handler
from boto.resultset import ResultSet
+from boto.exception import BotoClientError
from boto.s3.acl import Policy, CannedACLStrings, Grant
from boto.s3.key import Key
from boto.s3.prefix import Prefix
from boto.s3.deletemarker import DeleteMarker
from boto.s3.multipart import MultiPartUpload
from boto.s3.multipart import CompleteMultiPartUpload
+from boto.s3.multidelete import MultiDeleteResult
+from boto.s3.multidelete import Error
from boto.s3.bucketlistresultset import BucketListResultSet
from boto.s3.bucketlistresultset import VersionedBucketListResultSet
from boto.s3.bucketlistresultset import MultiPartUploadListResultSet
+from boto.s3.lifecycle import Lifecycle
+from boto.s3.tagging import Tags
+from boto.s3.cors import CORSConfiguration
+from boto.s3.bucketlogging import BucketLogging
import boto.jsonresponse
import boto.utils
import xml.sax
+import xml.sax.saxutils
+import StringIO
import urllib
import re
+import base64
from collections import defaultdict
# as per http://goo.gl/BDuud (02/19/2011)
-class S3WebsiteEndpointTranslate:
- trans_region = defaultdict(lambda :'s3-website-us-east-1')
- trans_region['EU'] = 's3-website-eu-west-1'
+
+class S3WebsiteEndpointTranslate:
+
+ trans_region = defaultdict(lambda: 's3-website-us-east-1')
+ trans_region['eu-west-1'] = 's3-website-eu-west-1'
trans_region['us-west-1'] = 's3-website-us-west-1'
+ trans_region['us-west-2'] = 's3-website-us-west-2'
+ trans_region['sa-east-1'] = 's3-website-sa-east-1'
trans_region['ap-northeast-1'] = 's3-website-ap-northeast-1'
trans_region['ap-southeast-1'] = 's3-website-ap-southeast-1'
@@ -55,19 +69,8 @@
S3Permissions = ['READ', 'WRITE', 'READ_ACP', 'WRITE_ACP', 'FULL_CONTROL']
-class Bucket(object):
- BucketLoggingBody = """<?xml version="1.0" encoding="UTF-8"?>
- <BucketLoggingStatus xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
- <LoggingEnabled>
- <TargetBucket>%s</TargetBucket>
- <TargetPrefix>%s</TargetPrefix>
- </LoggingEnabled>
- </BucketLoggingStatus>"""
-
- EmptyBucketLoggingBody = """<?xml version="1.0" encoding="UTF-8"?>
- <BucketLoggingStatus xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
- </BucketLoggingStatus>"""
+class Bucket(object):
LoggingGroup = 'http://acs.amazonaws.com/groups/s3/LogDelivery'
@@ -126,7 +129,7 @@
bucket so that when you call bucket.new_key() or when you get a listing
of keys in the bucket you will get an instances of your key class
rather than the default.
-
+
:type key_class: class
:param key_class: A subclass of Key that can be more specific
"""
@@ -135,44 +138,59 @@
def lookup(self, key_name, headers=None):
"""
Deprecated: Please use get_key method.
-
+
:type key_name: string
:param key_name: The name of the key to retrieve
-
+
:rtype: :class:`boto.s3.key.Key`
:returns: A Key object from this bucket.
"""
return self.get_key(key_name, headers=headers)
-
- def get_key(self, key_name, headers=None, version_id=None):
+
+ def get_key(self, key_name, headers=None, version_id=None,
+ response_headers=None):
"""
Check to see if a particular key exists within the bucket. This
method uses a HEAD request to check for the existance of the key.
Returns: An instance of a Key object or None
-
+
:type key_name: string
:param key_name: The name of the key to retrieve
-
+
+ :type response_headers: dict
+ :param response_headers: A dictionary containing HTTP
+ headers/values that will override any headers associated
+ with the stored object in the response. See
+ http://goo.gl/EWOPb for details.
+
:rtype: :class:`boto.s3.key.Key`
:returns: A Key object from this bucket.
"""
+ query_args = []
if version_id:
- query_args = 'versionId=%s' % version_id
+ query_args.append('versionId=%s' % version_id)
+ if response_headers:
+ for rk, rv in response_headers.iteritems():
+ query_args.append('%s=%s' % (rk, urllib.quote(rv)))
+ if query_args:
+ query_args = '&'.join(query_args)
else:
query_args = None
response = self.connection.make_request('HEAD', self.name, key_name,
headers=headers,
query_args=query_args)
+ response.read()
# Allow any success status (2xx) - for example this lets us
# support Range gets, which return status 206:
- if response.status/100 == 2:
- response.read()
+ if response.status / 100 == 2:
k = self.key_class(self)
provider = self.connection.provider
k.metadata = boto.utils.get_aws_metadata(response.msg, provider)
k.etag = response.getheader('etag')
k.content_type = response.getheader('content-type')
k.content_encoding = response.getheader('content-encoding')
+ k.content_disposition = response.getheader('content-disposition')
+ k.content_language = response.getheader('content-language')
k.last_modified = response.getheader('last-modified')
# the following machinations are a workaround to the fact that
# apache/fastcgi omits the content-length header on HEAD
@@ -190,7 +208,6 @@
return k
else:
if response.status == 404:
- response.read()
return None
else:
raise self.connection.provider.storage_response_error(
@@ -202,7 +219,7 @@
BucketListResultSet that automatically handles all of the result
paging, etc. from S3. You just need to keep iterating until
there are no more results.
-
+
Called with no arguments, this will return an iterator object across
all keys within the bucket.
@@ -213,23 +230,23 @@
as Content-Type and user metadata are not available in the XML.
Therefore, if you want these additional metadata fields you will
have to do a HEAD request on the Key in the bucket.
-
+
:type prefix: string
:param prefix: allows you to limit the listing to a particular
- prefix. For example, if you call the method with
- prefix='/foo/' then the iterator will only cycle
- through the keys that begin with the string '/foo/'.
-
+ prefix. For example, if you call the method with
+ prefix='/foo/' then the iterator will only cycle through
+ the keys that begin with the string '/foo/'.
+
:type delimiter: string
:param delimiter: can be used in conjunction with the prefix
- to allow you to organize and browse your keys
- hierarchically. See:
- http://docs.amazonwebservices.com/AmazonS3/2006-03-01/
- for more details.
-
+ to allow you to organize and browse your keys
+ hierarchically. See:
+ http://docs.amazonwebservices.com/AmazonS3/2006-03-01/ for
+ more details.
+
:type marker: string
:param marker: The "marker" of where you are in the result set
-
+
:rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet`
:return: an instance of a BucketListResultSet that handles paging, etc
"""
@@ -238,34 +255,35 @@
def list_versions(self, prefix='', delimiter='', key_marker='',
version_id_marker='', headers=None):
"""
- List version objects within a bucket. This returns an instance of an
- VersionedBucketListResultSet that automatically handles all of the result
- paging, etc. from S3. You just need to keep iterating until
- there are no more results.
- Called with no arguments, this will return an iterator object across
+ List version objects within a bucket. This returns an
+ instance of an VersionedBucketListResultSet that automatically
+ handles all of the result paging, etc. from S3. You just need
+ to keep iterating until there are no more results. Called
+ with no arguments, this will return an iterator object across
all keys within the bucket.
-
+
:type prefix: string
:param prefix: allows you to limit the listing to a particular
- prefix. For example, if you call the method with
- prefix='/foo/' then the iterator will only cycle
- through the keys that begin with the string '/foo/'.
-
+ prefix. For example, if you call the method with
+ prefix='/foo/' then the iterator will only cycle through
+ the keys that begin with the string '/foo/'.
+
:type delimiter: string
:param delimiter: can be used in conjunction with the prefix
- to allow you to organize and browse your keys
- hierarchically. See:
- http://docs.amazonwebservices.com/AmazonS3/2006-03-01/
- for more details.
-
+ to allow you to organize and browse your keys
+ hierarchically. See:
+ http://docs.amazonwebservices.com/AmazonS3/2006-03-01/ for
+ more details.
+
:type marker: string
:param marker: The "marker" of where you are in the result set
-
+
:rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet`
:return: an instance of a BucketListResultSet that handles paging, etc
"""
- return VersionedBucketListResultSet(self, prefix, delimiter, key_marker,
- version_id_marker, headers)
+ return VersionedBucketListResultSet(self, prefix, delimiter,
+ key_marker, version_id_marker,
+ headers)
def list_multipart_uploads(self, key_marker='',
upload_id_marker='',
@@ -275,10 +293,10 @@
instance of an MultiPartUploadListResultSet that automatically
handles all of the result paging, etc. from S3. You just need
to keep iterating until there are no more results.
-
+
:type marker: string
:param marker: The "marker" of where you are in the result set
-
+
:rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet`
:return: an instance of a BucketListResultSet that handles paging, etc
"""
@@ -317,32 +335,32 @@
def get_all_keys(self, headers=None, **params):
"""
- A lower-level method for listing contents of a bucket.
- This closely models the actual S3 API and requires you to manually
- handle the paging of results. For a higher-level method
- that handles the details of paging for you, you can use the list method.
-
+ A lower-level method for listing contents of a bucket. This
+ closely models the actual S3 API and requires you to manually
+ handle the paging of results. For a higher-level method that
+ handles the details of paging for you, you can use the list
+ method.
+
:type max_keys: int
:param max_keys: The maximum number of keys to retrieve
-
+
:type prefix: string
:param prefix: The prefix of the keys you want to retrieve
-
+
:type marker: string
:param marker: The "marker" of where you are in the result set
-
- :type delimiter: string
+
+ :type delimiter: string
:param delimiter: If this optional, Unicode string parameter
- is included with your request, then keys that
- contain the same string between the prefix and
- the first occurrence of the delimiter will be
- rolled up into a single result element in the
- CommonPrefixes collection. These rolled-up keys
- are not returned elsewhere in the response.
+ is included with your request, then keys that contain the
+ same string between the prefix and the first occurrence of
+ the delimiter will be rolled up into a single result
+ element in the CommonPrefixes collection. These rolled-up
+ keys are not returned elsewhere in the response.
:rtype: ResultSet
:return: The result from S3 listing the keys requested
-
+
"""
return self._get_all([('Contents', self.key_class),
('CommonPrefixes', Prefix)],
@@ -350,37 +368,36 @@
def get_all_versions(self, headers=None, **params):
"""
- A lower-level, version-aware method for listing contents of a bucket.
- This closely models the actual S3 API and requires you to manually
- handle the paging of results. For a higher-level method
- that handles the details of paging for you, you can use the list method.
-
+ A lower-level, version-aware method for listing contents of a
+ bucket. This closely models the actual S3 API and requires
+ you to manually handle the paging of results. For a
+ higher-level method that handles the details of paging for
+ you, you can use the list method.
+
:type max_keys: int
:param max_keys: The maximum number of keys to retrieve
-
+
:type prefix: string
:param prefix: The prefix of the keys you want to retrieve
-
+
:type key_marker: string
:param key_marker: The "marker" of where you are in the result set
- with respect to keys.
-
+ with respect to keys.
+
:type version_id_marker: string
:param version_id_marker: The "marker" of where you are in the result
- set with respect to version-id's.
-
- :type delimiter: string
+ set with respect to version-id's.
+
+ :type delimiter: string
:param delimiter: If this optional, Unicode string parameter
- is included with your request, then keys that
- contain the same string between the prefix and
- the first occurrence of the delimiter will be
- rolled up into a single result element in the
- CommonPrefixes collection. These rolled-up keys
- are not returned elsewhere in the response.
+ is included with your request, then keys that contain the
+ same string between the prefix and the first occurrence of
+ the delimiter will be rolled up into a single result
+ element in the CommonPrefixes collection. These rolled-up
+ keys are not returned elsewhere in the response.
:rtype: ResultSet
:return: The result from S3 listing the keys requested
-
"""
return self._get_all([('Version', self.key_class),
('CommonPrefixes', Prefix),
@@ -394,81 +411,182 @@
actual S3 API and requires you to manually handle the paging
of results. For a higher-level method that handles the
details of paging for you, you can use the list method.
-
+
:type max_uploads: int
:param max_uploads: The maximum number of uploads to retrieve.
- Default value is 1000.
-
- :type key_marker: string
- :param key_marker: Together with upload_id_marker, this parameter
- specifies the multipart upload after which listing
- should begin. If upload_id_marker is not specified,
- only the keys lexicographically greater than the
- specified key_marker will be included in the list.
+ Default value is 1000.
- If upload_id_marker is specified, any multipart
- uploads for a key equal to the key_marker might
- also be included, provided those multipart uploads
- have upload IDs lexicographically greater than the
- specified upload_id_marker.
-
+ :type key_marker: string
+ :param key_marker: Together with upload_id_marker, this
+ parameter specifies the multipart upload after which
+ listing should begin. If upload_id_marker is not
+ specified, only the keys lexicographically greater than
+ the specified key_marker will be included in the list.
+
+ If upload_id_marker is specified, any multipart uploads
+ for a key equal to the key_marker might also be included,
+ provided those multipart uploads have upload IDs
+ lexicographically greater than the specified
+ upload_id_marker.
+
:type upload_id_marker: string
:param upload_id_marker: Together with key-marker, specifies
- the multipart upload after which listing
- should begin. If key_marker is not specified,
- the upload_id_marker parameter is ignored.
- Otherwise, any multipart uploads for a key
- equal to the key_marker might be included
- in the list only if they have an upload ID
- lexicographically greater than the specified
- upload_id_marker.
+ the multipart upload after which listing should begin. If
+ key_marker is not specified, the upload_id_marker
+ parameter is ignored. Otherwise, any multipart uploads
+ for a key equal to the key_marker might be included in the
+ list only if they have an upload ID lexicographically
+ greater than the specified upload_id_marker.
-
:rtype: ResultSet
:return: The result from S3 listing the uploads requested
-
+
"""
- return self._get_all([('Upload', MultiPartUpload)],
+ return self._get_all([('Upload', MultiPartUpload),
+ ('CommonPrefixes', Prefix)],
'uploads', headers, **params)
def new_key(self, key_name=None):
"""
Creates a new key
-
+
:type key_name: string
:param key_name: The name of the key to create
-
+
:rtype: :class:`boto.s3.key.Key` or subclass
:returns: An instance of the newly created key object
"""
+ if not key_name:
+ raise ValueError('Empty key names are not allowed')
return self.key_class(self, key_name)
def generate_url(self, expires_in, method='GET', headers=None,
- force_http=False, response_headers=None):
+ force_http=False, response_headers=None,
+ expires_in_absolute=False):
return self.connection.generate_url(expires_in, method, self.name,
headers=headers,
force_http=force_http,
- response_headers=response_headers)
+ response_headers=response_headers,
+ expires_in_absolute=expires_in_absolute)
+
+ def delete_keys(self, keys, quiet=False, mfa_token=None, headers=None):
+ """
+ Deletes a set of keys using S3's Multi-object delete API. If a
+ VersionID is specified for that key then that version is removed.
+ Returns a MultiDeleteResult Object, which contains Deleted
+ and Error elements for each key you ask to delete.
+
+ :type keys: list
+ :param keys: A list of either key_names or (key_name, versionid) pairs
+ or a list of Key instances.
+
+ :type quiet: boolean
+ :param quiet: In quiet mode the response includes only keys
+ where the delete operation encountered an error. For a
+ successful deletion, the operation does not return any
+ information about the delete in the response body.
+
+ :type mfa_token: tuple or list of strings
+ :param mfa_token: A tuple or list consisting of the serial
+ number from the MFA device and the current value of the
+ six-digit token associated with the device. This value is
+ required anytime you are deleting versioned objects from a
+ bucket that has the MFADelete option on the bucket.
+
+ :returns: An instance of MultiDeleteResult
+ """
+ ikeys = iter(keys)
+ result = MultiDeleteResult(self)
+ provider = self.connection.provider
+ query_args = 'delete'
+
+ def delete_keys2(hdrs):
+ hdrs = hdrs or {}
+ data = u"""<?xml version="1.0" encoding="UTF-8"?>"""
+ data += u"<Delete>"
+ if quiet:
+ data += u"<Quiet>true</Quiet>"
+ count = 0
+ while count < 1000:
+ try:
+ key = ikeys.next()
+ except StopIteration:
+ break
+ if isinstance(key, basestring):
+ key_name = key
+ version_id = None
+ elif isinstance(key, tuple) and len(key) == 2:
+ key_name, version_id = key
+ elif (isinstance(key, Key) or isinstance(key, DeleteMarker)) and key.name:
+ key_name = key.name
+ version_id = key.version_id
+ else:
+ if isinstance(key, Prefix):
+ key_name = key.name
+ code = 'PrefixSkipped' # Don't delete Prefix
+ else:
+ key_name = repr(key) # try get a string
+ code = 'InvalidArgument' # other unknown type
+ message = 'Invalid. No delete action taken for this object.'
+ error = Error(key_name, code=code, message=message)
+ result.errors.append(error)
+ continue
+ count += 1
+ data += u"<Object><Key>%s</Key>" % xml.sax.saxutils.escape(key_name)
+ if version_id:
+ data += u"<VersionId>%s</VersionId>" % version_id
+ data += u"</Object>"
+ data += u"</Delete>"
+ if count <= 0:
+ return False # no more
+ data = data.encode('utf-8')
+ fp = StringIO.StringIO(data)
+ md5 = boto.utils.compute_md5(fp)
+ hdrs['Content-MD5'] = md5[1]
+ hdrs['Content-Type'] = 'text/xml'
+ if mfa_token:
+ hdrs[provider.mfa_header] = ' '.join(mfa_token)
+ response = self.connection.make_request('POST', self.name,
+ headers=hdrs,
+ query_args=query_args,
+ data=data)
+ body = response.read()
+ if response.status == 200:
+ h = handler.XmlHandler(result, self)
+ xml.sax.parseString(body, h)
+ return count >= 1000 # more?
+ else:
+ raise provider.storage_response_error(response.status,
+ response.reason,
+ body)
+ while delete_keys2(headers):
+ pass
+ return result
def delete_key(self, key_name, headers=None,
version_id=None, mfa_token=None):
"""
Deletes a key from the bucket. If a version_id is provided,
only that version of the key will be deleted.
-
+
:type key_name: string
:param key_name: The key name to delete
:type version_id: string
:param version_id: The version ID (optional)
-
+
:type mfa_token: tuple or list of strings
- :param mfa_token: A tuple or list consisting of the serial number
- from the MFA device and the current value of
- the six-digit token associated with the device.
- This value is required anytime you are
- deleting versioned objects from a bucket
- that has the MFADelete option on the bucket.
+ :param mfa_token: A tuple or list consisting of the serial
+ number from the MFA device and the current value of the
+ six-digit token associated with the device. This value is
+ required anytime you are deleting versioned objects from a
+ bucket that has the MFADelete option on the bucket.
+
+ :rtype: :class:`boto.s3.key.Key` or subclass
+ :returns: A key object holding information on what was
+ deleted. The Caller can see if a delete_marker was
+ created or removed and what version_id the delete created
+ or removed.
"""
provider = self.connection.provider
if version_id:
@@ -486,11 +604,17 @@
if response.status != 204:
raise provider.storage_response_error(response.status,
response.reason, body)
+ else:
+ # return a key object with information on what was deleted.
+ k = self.key_class(self)
+ k.name = key_name
+ k.handle_version_headers(response)
+ return k
def copy_key(self, new_key_name, src_bucket_name,
src_key_name, metadata=None, src_version_id=None,
storage_class='STANDARD', preserve_acl=False,
- encrypt_key=False):
+ encrypt_key=False, headers=None, query_args=None):
"""
Create a new key in the bucket by copying another existing key.
@@ -505,46 +629,46 @@
:type src_version_id: string
:param src_version_id: The version id for the key. This param
- is optional. If not specified, the newest
- version of the key will be copied.
+ is optional. If not specified, the newest version of the
+ key will be copied.
:type metadata: dict
- :param metadata: Metadata to be associated with new key.
- If metadata is supplied, it will replace the
- metadata of the source key being copied.
- If no metadata is supplied, the source key's
- metadata will be copied to the new key.
+ :param metadata: Metadata to be associated with new key. If
+ metadata is supplied, it will replace the metadata of the
+ source key being copied. If no metadata is supplied, the
+ source key's metadata will be copied to the new key.
:type storage_class: string
- :param storage_class: The storage class of the new key.
- By default, the new key will use the
- standard storage class. Possible values are:
- STANDARD | REDUCED_REDUNDANCY
+ :param storage_class: The storage class of the new key. By
+ default, the new key will use the standard storage class.
+ Possible values are: STANDARD | REDUCED_REDUNDANCY
:type preserve_acl: bool
- :param preserve_acl: If True, the ACL from the source key
- will be copied to the destination
- key. If False, the destination key
- will have the default ACL.
- Note that preserving the ACL in the
- new key object will require two
- additional API calls to S3, one to
- retrieve the current ACL and one to
- set that ACL on the new object. If
- you don't care about the ACL, a value
- of False will be significantly more
- efficient.
+ :param preserve_acl: If True, the ACL from the source key will
+ be copied to the destination key. If False, the
+ destination key will have the default ACL. Note that
+ preserving the ACL in the new key object will require two
+ additional API calls to S3, one to retrieve the current
+ ACL and one to set that ACL on the new object. If you
+ don't care about the ACL, a value of False will be
+ significantly more efficient.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
- be encrypted on the server-side by S3 and
- will be stored in an encrypted form while
- at rest in S3.
+ be encrypted on the server-side by S3 and will be stored
+ in an encrypted form while at rest in S3.
+
+ :type headers: dict
+ :param headers: A dictionary of header name/value pairs.
+
+ :type query_args: string
+ :param query_args: A string of additional querystring arguments
+ to append to the request
:rtype: :class:`boto.s3.key.Key` or subclass
:returns: An instance of the newly created key object
"""
- headers = {}
+ headers = headers or {}
provider = self.connection.provider
src_key_name = boto.utils.get_utf8_value(src_key_name)
if preserve_acl:
@@ -557,18 +681,19 @@
headers[provider.server_side_encryption_header] = 'AES256'
src = '%s/%s' % (src_bucket_name, urllib.quote(src_key_name))
if src_version_id:
- src += '?version_id=%s' % src_version_id
- headers = {provider.copy_source_header : str(src)}
+ src += '?versionId=%s' % src_version_id
+ headers[provider.copy_source_header] = str(src)
# make sure storage_class_header key exists before accessing it
- if provider.storage_class_header:
+ if provider.storage_class_header and storage_class:
headers[provider.storage_class_header] = storage_class
- if metadata:
+ if metadata is not None:
headers[provider.metadata_directive_header] = 'REPLACE'
headers = boto.utils.merge_meta(headers, metadata, provider)
- else:
+ elif not query_args: # Can't use this header with multi-part copy.
headers[provider.metadata_directive_header] = 'COPY'
response = self.connection.make_request('PUT', self.name, new_key_name,
- headers=headers)
+ headers=headers,
+ query_args=query_args)
body = response.read()
if response.status == 200:
key = self.new_key(new_key_name)
@@ -591,7 +716,7 @@
if headers:
headers[self.connection.provider.acl_header] = acl_str
else:
- headers={self.connection.provider.acl_header: acl_str}
+ headers = {self.connection.provider.acl_header: acl_str}
query_args = 'acl'
if version_id:
@@ -621,7 +746,7 @@
if version_id:
query_args += '&versionId=%s' % version_id
response = self.connection.make_request('PUT', self.name, key_name,
- data=acl_str.encode('ISO-8859-1'),
+ data=acl_str.encode('UTF-8'),
query_args=query_args,
headers=headers)
body = response.read()
@@ -654,7 +779,7 @@
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
- def set_subresource(self, subresource, value, key_name = '', headers=None,
+ def set_subresource(self, subresource, value, key_name='', headers=None,
version_id=None):
"""
Set a subresource for a bucket or key.
@@ -667,15 +792,15 @@
:type key_name: string
:param key_name: The key to operate on, or None to operate on the
- bucket.
+ bucket.
:type headers: dict
:param headers: Additional HTTP headers to include in the request.
:type src_version_id: string
- :param src_version_id: Optional. The version id of the key to operate
- on. If not specified, operate on the newest
- version.
+ :param src_version_id: Optional. The version id of the key to
+ operate on. If not specified, operate on the newest
+ version.
"""
if not subresource:
raise TypeError('set_subresource called with subresource=None')
@@ -701,15 +826,15 @@
:type key_name: string
:param key_name: The key to operate on, or None to operate on the
- bucket.
+ bucket.
:type headers: dict
:param headers: Additional HTTP headers to include in the request.
:type src_version_id: string
- :param src_version_id: Optional. The version id of the key to operate
- on. If not specified, operate on the newest
- version.
+ :param src_version_id: Optional. The version id of the key to
+ operate on. If not specified, operate on the newest
+ version.
:rtype: string
:returns: The value of the subresource.
@@ -741,23 +866,22 @@
to a bucket. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL
and then PUT's the new ACL back to S3.
-
+
:type permission: string
:param permission: The permission being granted. Should be one of:
- (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
-
+ (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
+
:type email_address: string
:param email_address: The email address associated with the AWS
- account your are granting the permission to.
-
+ account your are granting the permission to.
+
:type recursive: boolean
- :param recursive: A boolean value to controls whether the command
- will apply the grant to all keys within the bucket
- or not. The default value is False. By passing a
- True value, the call will iterate through all keys
- in the bucket and apply the same grant to each key.
- CAUTION: If you have a lot of keys, this could take
- a long time!
+ :param recursive: A boolean value to controls whether the
+ command will apply the grant to all keys within the bucket
+ or not. The default value is False. By passing a True
+ value, the call will iterate through all keys in the
+ bucket and apply the same grant to each key. CAUTION: If
+ you have a lot of keys, this could take a long time!
"""
if permission not in S3Permissions:
raise self.connection.provider.storage_permissions_error(
@@ -776,27 +900,26 @@
user grant to a bucket. This method retrieves the current ACL,
creates a new grant based on the parameters passed in, adds that
grant to the ACL and then PUT's the new ACL back to S3.
-
+
:type permission: string
:param permission: The permission being granted. Should be one of:
- (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
-
+ (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
+
:type user_id: string
:param user_id: The canonical user id associated with the AWS
- account your are granting the permission to.
-
+ account your are granting the permission to.
+
:type recursive: boolean
- :param recursive: A boolean value to controls whether the command
- will apply the grant to all keys within the bucket
- or not. The default value is False. By passing a
- True value, the call will iterate through all keys
- in the bucket and apply the same grant to each key.
- CAUTION: If you have a lot of keys, this could take
- a long time!
-
+ :param recursive: A boolean value to controls whether the
+ command will apply the grant to all keys within the bucket
+ or not. The default value is False. By passing a True
+ value, the call will iterate through all keys in the
+ bucket and apply the same grant to each key. CAUTION: If
+ you have a lot of keys, this could take a long time!
+
:type display_name: string
:param display_name: An option string containing the user's
- Display Name. Only required on Walrus.
+ Display Name. Only required on Walrus.
"""
if permission not in S3Permissions:
raise self.connection.provider.storage_permissions_error(
@@ -820,8 +943,7 @@
:rtype: str
:return: The LocationConstraint for the bucket or the empty
- string if no constraint was specified when bucket
- was created.
+ string if no constraint was specified when bucket was created.
"""
response = self.connection.make_request('GET', self.name,
query_args='location')
@@ -835,21 +957,20 @@
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
- def enable_logging(self, target_bucket, target_prefix='', headers=None):
- if isinstance(target_bucket, Bucket):
- target_bucket = target_bucket.name
- body = self.BucketLoggingBody % (target_bucket, target_prefix)
- response = self.connection.make_request('PUT', self.name, data=body,
- query_args='logging', headers=headers)
- body = response.read()
- if response.status == 200:
- return True
- else:
- raise self.connection.provider.storage_response_error(
- response.status, response.reason, body)
-
- def disable_logging(self, headers=None):
- body = self.EmptyBucketLoggingBody
+ def set_xml_logging(self, logging_str, headers=None):
+ """
+ Set logging on a bucket directly to the given xml string.
+
+ :type logging_str: unicode string
+ :param logging_str: The XML for the bucketloggingstatus which
+ will be set. The string will be converted to utf-8 before
+ it is sent. Usually, you will obtain this XML from the
+ BucketLogging object.
+
+ :rtype: bool
+ :return: True if ok or raises an exception.
+ """
+ body = logging_str.encode('utf-8')
response = self.connection.make_request('PUT', self.name, data=body,
query_args='logging', headers=headers)
body = response.read()
@@ -859,17 +980,65 @@
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
+ def enable_logging(self, target_bucket, target_prefix='',
+ grants=None, headers=None):
+ """
+ Enable logging on a bucket.
+
+ :type target_bucket: bucket or string
+ :param target_bucket: The bucket to log to.
+
+ :type target_prefix: string
+ :param target_prefix: The prefix which should be prepended to the
+ generated log files written to the target_bucket.
+
+ :type grants: list of Grant objects
+ :param grants: A list of extra permissions which will be granted on
+ the log files which are created.
+
+ :rtype: bool
+ :return: True if ok or raises an exception.
+ """
+ if isinstance(target_bucket, Bucket):
+ target_bucket = target_bucket.name
+ blogging = BucketLogging(target=target_bucket, prefix=target_prefix,
+ grants=grants)
+ return self.set_xml_logging(blogging.to_xml(), headers=headers)
+
+ def disable_logging(self, headers=None):
+ """
+ Disable logging on a bucket.
+
+ :rtype: bool
+ :return: True if ok or raises an exception.
+ """
+ blogging = BucketLogging()
+ return self.set_xml_logging(blogging.to_xml(), headers=headers)
+
def get_logging_status(self, headers=None):
+ """
+ Get the logging status for this bucket.
+
+ :rtype: :class:`boto.s3.bucketlogging.BucketLogging`
+ :return: A BucketLogging object for this bucket.
+ """
response = self.connection.make_request('GET', self.name,
query_args='logging', headers=headers)
body = response.read()
if response.status == 200:
- return body
+ blogging = BucketLogging()
+ h = handler.XmlHandler(blogging, self)
+ xml.sax.parseString(body, h)
+ return blogging
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def set_as_logging_target(self, headers=None):
+ """
+ Setup the current bucket as a logging target by granting the necessary
+ permissions to the LogDelivery group to write log files to this bucket.
+ """
policy = self.get_acl(headers=headers)
g1 = Grant(permission='WRITE', type='Group', uri=self.LoggingGroup)
g2 = Grant(permission='READ_ACP', type='Group', uri=self.LoggingGroup)
@@ -897,33 +1066,31 @@
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
-
+
def configure_versioning(self, versioning, mfa_delete=False,
mfa_token=None, headers=None):
"""
Configure versioning for this bucket.
-
+
..note:: This feature is currently in beta.
-
+
:type versioning: bool
:param versioning: A boolean indicating whether version is
- enabled (True) or disabled (False).
+ enabled (True) or disabled (False).
:type mfa_delete: bool
- :param mfa_delete: A boolean indicating whether the Multi-Factor
- Authentication Delete feature is enabled (True)
- or disabled (False). If mfa_delete is enabled
- then all Delete operations will require the
- token from your MFA device to be passed in
- the request.
+ :param mfa_delete: A boolean indicating whether the
+ Multi-Factor Authentication Delete feature is enabled
+ (True) or disabled (False). If mfa_delete is enabled then
+ all Delete operations will require the token from your MFA
+ device to be passed in the request.
:type mfa_token: tuple or list of strings
- :param mfa_token: A tuple or list consisting of the serial number
- from the MFA device and the current value of
- the six-digit token associated with the device.
- This value is required when you are changing
- the status of the MfaDelete property of
- the bucket.
+ :param mfa_token: A tuple or list consisting of the serial
+ number from the MFA device and the current value of the
+ six-digit token associated with the device. This value is
+ required when you are changing the status of the MfaDelete
+ property of the bucket.
"""
if versioning:
ver = 'Enabled'
@@ -947,18 +1114,18 @@
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
-
+
def get_versioning_status(self, headers=None):
"""
Returns the current status of versioning on the bucket.
:rtype: dict
:returns: A dictionary containing a key named 'Versioning'
- that can have a value of either Enabled, Disabled,
- or Suspended. Also, if MFADelete has ever been enabled
- on the bucket, the dictionary will contain a key
- named 'MFADelete' which will have a value of either
- Enabled or Suspended.
+ that can have a value of either Enabled, Disabled, or
+ Suspended. Also, if MFADelete has ever been enabled on the
+ bucket, the dictionary will contain a key named
+ 'MFADelete' which will have a value of either Enabled or
+ Suspended.
"""
response = self.connection.make_request('GET', self.name,
query_args='versioning', headers=headers)
@@ -977,22 +1144,82 @@
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
+ def configure_lifecycle(self, lifecycle_config, headers=None):
+ """
+ Configure lifecycle for this bucket.
+
+ :type lifecycle_config: :class:`boto.s3.lifecycle.Lifecycle`
+ :param lifecycle_config: The lifecycle configuration you want
+ to configure for this bucket.
+ """
+ fp = StringIO.StringIO(lifecycle_config.to_xml())
+ md5 = boto.utils.compute_md5(fp)
+ if headers is None:
+ headers = {}
+ headers['Content-MD5'] = md5[1]
+ headers['Content-Type'] = 'text/xml'
+ response = self.connection.make_request('PUT', self.name,
+ data=fp.getvalue(),
+ query_args='lifecycle',
+ headers=headers)
+ body = response.read()
+ if response.status == 200:
+ return True
+ else:
+ raise self.connection.provider.storage_response_error(
+ response.status, response.reason, body)
+
+ def get_lifecycle_config(self, headers=None):
+ """
+ Returns the current lifecycle configuration on the bucket.
+
+ :rtype: :class:`boto.s3.lifecycle.Lifecycle`
+ :returns: A LifecycleConfig object that describes all current
+ lifecycle rules in effect for the bucket.
+ """
+ response = self.connection.make_request('GET', self.name,
+ query_args='lifecycle', headers=headers)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status == 200:
+ lifecycle = Lifecycle()
+ h = handler.XmlHandler(lifecycle, self)
+ xml.sax.parseString(body, h)
+ return lifecycle
+ else:
+ raise self.connection.provider.storage_response_error(
+ response.status, response.reason, body)
+
+ def delete_lifecycle_configuration(self, headers=None):
+ """
+ Removes all lifecycle configuration from the bucket.
+ """
+ response = self.connection.make_request('DELETE', self.name,
+ query_args='lifecycle',
+ headers=headers)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status == 204:
+ return True
+ else:
+ raise self.connection.provider.storage_response_error(
+ response.status, response.reason, body)
+
def configure_website(self, suffix, error_key='', headers=None):
"""
Configure this bucket to act as a website
:type suffix: str
:param suffix: Suffix that is appended to a request that is for a
- "directory" on the website endpoint (e.g. if the suffix
- is index.html and you make a request to
- samplebucket/images/ the data that is returned will
- be for the object with the key name images/index.html).
- The suffix must not be empty and must not include a
- slash character.
+ "directory" on the website endpoint (e.g. if the suffix is
+ index.html and you make a request to samplebucket/images/
+ the data that is returned will be for the object with the
+ key name images/index.html). The suffix must not be empty
+ and must not include a slash character.
:type error_key: str
:param error_key: The object key name to use when a 4XX class
- error occurs. This is optional.
+ error occurs. This is optional.
"""
if error_key:
@@ -1009,38 +1236,58 @@
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
-
+
def get_website_configuration(self, headers=None):
"""
Returns the current status of website configuration on the bucket.
:rtype: dict
:returns: A dictionary containing a Python representation
- of the XML response from S3. The overall structure is:
+ of the XML response from S3. The overall structure is:
- * WebsiteConfiguration
-
- * IndexDocument
-
- * Suffix : suffix that is appended to request that
+ * WebsiteConfiguration
+
+ * IndexDocument
+
+ * Suffix : suffix that is appended to request that
+ is for a "directory" on the website endpoint
+ * ErrorDocument
+
+ * Key : name of object to serve when an error occurs
+ """
+ return self.get_website_configuration_xml(self, headers)[0]
+
+ def get_website_configuration_with_xml(self, headers=None):
+ """
+ Returns the current status of website configuration on the bucket as
+ unparsed XML.
+
+ :rtype: 2-Tuple
+ :returns: 2-tuple containing:
+ 1) A dictionary containing a Python representation
+ of the XML response from GCS. The overall structure is:
+ * WebsiteConfiguration
+ * IndexDocument
+ * Suffix : suffix that is appended to request that
is for a "directory" on the website endpoint
- * ErrorDocument
-
- * Key : name of object to serve when an error occurs
+ * ErrorDocument
+ * Key : name of object to serve when an error occurs
+ 2) unparsed XML describing the bucket's website configuration.
"""
response = self.connection.make_request('GET', self.name,
query_args='website', headers=headers)
body = response.read()
boto.log.debug(body)
- if response.status == 200:
- e = boto.jsonresponse.Element()
- h = boto.jsonresponse.XmlHandler(e, None)
- h.parse(body)
- return e
- else:
+
+ if response.status != 200:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
+ e = boto.jsonresponse.Element()
+ h = boto.jsonresponse.XmlHandler(e, None)
+ h.parse(body)
+ return e, body
+
def delete_website_configuration(self, headers=None):
"""
Removes all website configuration from the bucket.
@@ -1067,6 +1314,10 @@
return '.'.join(l)
def get_policy(self, headers=None):
+ """
+ Returns the JSON policy associated with the bucket. The policy
+ is returned as an uninterpreted JSON string.
+ """
response = self.connection.make_request('GET', self.name,
query_args='policy', headers=headers)
body = response.read()
@@ -1077,6 +1328,12 @@
response.status, response.reason, body)
def set_policy(self, policy, headers=None):
+ """
+ Add or replace the JSON policy associated with the bucket.
+
+ :type policy: str
+ :param policy: The JSON policy as a string.
+ """
response = self.connection.make_request('PUT', self.name,
data=policy,
query_args='policy',
@@ -1099,46 +1356,130 @@
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
-
+
+ def set_cors_xml(self, cors_xml, headers=None):
+ """
+ Set the CORS (Cross-Origin Resource Sharing) for a bucket.
+
+ :type cors_xml: str
+ :param cors_xml: The XML document describing your desired
+ CORS configuration. See the S3 documentation for details
+ of the exact syntax required.
+ """
+ fp = StringIO.StringIO(cors_xml)
+ md5 = boto.utils.compute_md5(fp)
+ if headers is None:
+ headers = {}
+ headers['Content-MD5'] = md5[1]
+ headers['Content-Type'] = 'text/xml'
+ response = self.connection.make_request('PUT', self.name,
+ data=fp.getvalue(),
+ query_args='cors',
+ headers=headers)
+ body = response.read()
+ if response.status == 200:
+ return True
+ else:
+ raise self.connection.provider.storage_response_error(
+ response.status, response.reason, body)
+
+ def set_cors(self, cors_config, headers=None):
+ """
+ Set the CORS for this bucket given a boto CORSConfiguration
+ object.
+
+ :type cors_config: :class:`boto.s3.cors.CORSConfiguration`
+ :param cors_config: The CORS configuration you want
+ to configure for this bucket.
+ """
+ return self.set_cors_xml(cors_config.to_xml())
+
+ def get_cors_xml(self, headers=None):
+ """
+ Returns the current CORS configuration on the bucket as an
+ XML document.
+ """
+ response = self.connection.make_request('GET', self.name,
+ query_args='cors', headers=headers)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status == 200:
+ return body
+ else:
+ raise self.connection.provider.storage_response_error(
+ response.status, response.reason, body)
+
+ def get_cors(self, headers=None):
+ """
+ Returns the current CORS configuration on the bucket.
+
+ :rtype: :class:`boto.s3.cors.CORSConfiguration`
+ :returns: A CORSConfiguration object that describes all current
+ CORS rules in effect for the bucket.
+ """
+ body = self.get_cors_xml(headers)
+ cors = CORSConfiguration()
+ h = handler.XmlHandler(cors, self)
+ xml.sax.parseString(body, h)
+ return cors
+
+ def delete_cors(self, headers=None):
+ """
+ Removes all CORS configuration from the bucket.
+ """
+ response = self.connection.make_request('DELETE', self.name,
+ query_args='cors',
+ headers=headers)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status == 204:
+ return True
+ else:
+ raise self.connection.provider.storage_response_error(
+ response.status, response.reason, body)
def initiate_multipart_upload(self, key_name, headers=None,
reduced_redundancy=False,
- metadata=None, encrypt_key=False):
+ metadata=None, encrypt_key=False,
+ policy=None):
"""
Start a multipart upload operation.
:type key_name: string
- :param key_name: The name of the key that will ultimately result from
- this multipart upload operation. This will be exactly
- as the key appears in the bucket after the upload
- process has been completed.
+ :param key_name: The name of the key that will ultimately
+ result from this multipart upload operation. This will be
+ exactly as the key appears in the bucket after the upload
+ process has been completed.
:type headers: dict
:param headers: Additional HTTP headers to send and store with the
- resulting key in S3.
+ resulting key in S3.
:type reduced_redundancy: boolean
- :param reduced_redundancy: In multipart uploads, the storage class is
- specified when initiating the upload,
- not when uploading individual parts. So
- if you want the resulting key to use the
- reduced redundancy storage class set this
- flag when you initiate the upload.
+ :param reduced_redundancy: In multipart uploads, the storage
+ class is specified when initiating the upload, not when
+ uploading individual parts. So if you want the resulting
+ key to use the reduced redundancy storage class set this
+ flag when you initiate the upload.
:type metadata: dict
:param metadata: Any metadata that you would like to set on the key
- that results from the multipart upload.
-
+ that results from the multipart upload.
+
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
- be encrypted on the server-side by S3 and
- will be stored in an encrypted form while
- at rest in S3.
+ be encrypted on the server-side by S3 and will be stored
+ in an encrypted form while at rest in S3.
+
+ :type policy: :class:`boto.s3.acl.CannedACLStrings`
+ :param policy: A canned ACL policy that will be applied to the
+ new key (once completed) in S3.
"""
query_args = 'uploads'
provider = self.connection.provider
- if headers is None:
- headers = {}
+ headers = headers or {}
+ if policy:
+ headers[provider.acl_header] = policy
if reduced_redundancy:
storage_class_header = provider.storage_class_header
if storage_class_header:
@@ -1165,7 +1506,7 @@
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
-
+
def complete_multipart_upload(self, key_name, upload_id,
xml_body, headers=None):
"""
@@ -1190,11 +1531,19 @@
resp = CompleteMultiPartUpload(self)
h = handler.XmlHandler(resp, self)
xml.sax.parseString(body, h)
+ # Use a dummy key to parse various response headers
+ # for versioning, encryption info and then explicitly
+ # set the completed MPU object values from key.
+ k = self.key_class(self)
+ k.handle_version_headers(response)
+ k.handle_encryption_headers(response)
+ resp.version_id = k.version_id
+ resp.encrypted = k.encrypted
return resp
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
-
+
def cancel_multipart_upload(self, key_name, upload_id, headers=None):
query_args = 'uploadId=%s' % upload_id
response = self.connection.make_request('DELETE', self.name, key_name,
@@ -1205,6 +1554,55 @@
if response.status != 204:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
-
+
def delete(self, headers=None):
return self.connection.delete_bucket(self.name, headers=headers)
+
+ def get_tags(self):
+ response = self.get_xml_tags()
+ tags = Tags()
+ h = handler.XmlHandler(tags, self)
+ xml.sax.parseString(response, h)
+ return tags
+
+ def get_xml_tags(self):
+ response = self.connection.make_request('GET', self.name,
+ query_args='tagging',
+ headers=None)
+ body = response.read()
+ if response.status == 200:
+ return body
+ else:
+ raise self.connection.provider.storage_response_error(
+ response.status, response.reason, body)
+
+ def set_xml_tags(self, tag_str, headers=None, query_args='tagging'):
+ if headers is None:
+ headers = {}
+ md5 = boto.utils.compute_md5(StringIO.StringIO(tag_str))
+ headers['Content-MD5'] = md5[1]
+ headers['Content-Type'] = 'text/xml'
+ response = self.connection.make_request('PUT', self.name,
+ data=tag_str.encode('utf-8'),
+ query_args=query_args,
+ headers=headers)
+ body = response.read()
+ if response.status != 204:
+ raise self.connection.provider.storage_response_error(
+ response.status, response.reason, body)
+ return True
+
+ def set_tags(self, tags, headers=None):
+ return self.set_xml_tags(tags.to_xml(), headers=headers)
+
+ def delete_tags(self, headers=None):
+ response = self.connection.make_request('DELETE', self.name,
+ query_args='tagging',
+ headers=headers)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status == 204:
+ return True
+ else:
+ raise self.connection.provider.storage_response_error(
+ response.status, response.reason, body)
diff --git a/boto/s3/bucketlistresultset.py b/boto/s3/bucketlistresultset.py
index 14b0f5d..73b60c9 100644
--- a/boto/s3/bucketlistresultset.py
+++ b/boto/s3/bucketlistresultset.py
@@ -31,7 +31,7 @@
for k in rs:
yield k
if k:
- marker = k.name
+ marker = rs.next_marker or k.name
more_results= rs.is_truncated
class BucketListResultSet:
diff --git a/boto/s3/bucketlogging.py b/boto/s3/bucketlogging.py
new file mode 100644
index 0000000..9e3c050
--- /dev/null
+++ b/boto/s3/bucketlogging.py
@@ -0,0 +1,83 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import xml.sax.saxutils
+from acl import Grant
+
+class BucketLogging:
+
+ def __init__(self, target=None, prefix=None, grants=None):
+ self.target = target
+ self.prefix = prefix
+ if grants is None:
+ self.grants = []
+ else:
+ self.grants = grants
+
+ def __repr__(self):
+ if self.target is None:
+ return "<BucketLoggingStatus: Disabled>"
+ grants = []
+ for g in self.grants:
+ if g.type == 'CanonicalUser':
+ u = g.display_name
+ elif g.type == 'Group':
+ u = g.uri
+ else:
+ u = g.email_address
+ grants.append("%s = %s" % (u, g.permission))
+ return "<BucketLoggingStatus: %s/%s (%s)>" % (self.target, self.prefix, ", ".join(grants))
+
+ def add_grant(self, grant):
+ self.grants.append(grant)
+
+ def startElement(self, name, attrs, connection):
+ if name == 'Grant':
+ self.grants.append(Grant())
+ return self.grants[-1]
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'TargetBucket':
+ self.target = value
+ elif name == 'TargetPrefix':
+ self.prefix = value
+ else:
+ setattr(self, name, value)
+
+ def to_xml(self):
+ # caller is responsible to encode to utf-8
+ s = u'<?xml version="1.0" encoding="UTF-8"?>'
+ s += u'<BucketLoggingStatus xmlns="http://doc.s3.amazonaws.com/2006-03-01">'
+ if self.target is not None:
+ s += u'<LoggingEnabled>'
+ s += u'<TargetBucket>%s</TargetBucket>' % self.target
+ prefix = self.prefix or ''
+ s += u'<TargetPrefix>%s</TargetPrefix>' % xml.sax.saxutils.escape(prefix)
+ if self.grants:
+ s += '<TargetGrants>'
+ for grant in self.grants:
+ s += grant.to_xml()
+ s += '</TargetGrants>'
+ s += u'</LoggingEnabled>'
+ s += u'</BucketLoggingStatus>'
+ return s
diff --git a/boto/s3/connection.py b/boto/s3/connection.py
index 80209b7..f17ab40 100644
--- a/boto/s3/connection.py
+++ b/boto/s3/connection.py
@@ -16,35 +16,38 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import xml.sax
-import urllib, base64
+import urllib
+import base64
import time
+
import boto.utils
from boto.connection import AWSAuthConnection
from boto import handler
from boto.s3.bucket import Bucket
from boto.s3.key import Key
from boto.resultset import ResultSet
-from boto.exception import BotoClientError
+from boto.exception import BotoClientError, S3ResponseError
+
def check_lowercase_bucketname(n):
"""
Bucket names must not contain uppercase characters. We check for
this by appending a lowercase character and testing with islower().
Note this also covers cases like numeric bucket names with dashes.
-
+
>>> check_lowercase_bucketname("Aaaa")
Traceback (most recent call last):
...
BotoClientError: S3Error: Bucket names cannot contain upper-case
characters when using either the sub-domain or virtual hosting calling
format.
-
+
>>> check_lowercase_bucketname("1234-5678-9123")
True
>>> check_lowercase_bucketname("abcdefg1234")
@@ -56,6 +59,7 @@
"hosting calling format.")
return True
+
def assert_case_insensitive(f):
def wrapper(*args, **kwargs):
if len(args) == 3 and check_lowercase_bucketname(args[2]):
@@ -63,6 +67,7 @@
return f(*args, **kwargs)
return wrapper
+
class _CallingFormat(object):
def get_bucket_server(self, server, bucket):
@@ -91,20 +96,23 @@
key = boto.utils.get_utf8_value(key)
return '/%s' % urllib.quote(key)
+
class SubdomainCallingFormat(_CallingFormat):
@assert_case_insensitive
def get_bucket_server(self, server, bucket):
return '%s.%s' % (bucket, server)
+
class VHostCallingFormat(_CallingFormat):
@assert_case_insensitive
def get_bucket_server(self, server, bucket):
return bucket
+
class OrdinaryCallingFormat(_CallingFormat):
-
+
def get_bucket_server(self, server, bucket):
return server
@@ -115,21 +123,27 @@
path_base += "%s/" % bucket
return path_base + urllib.quote(key)
+
class ProtocolIndependentOrdinaryCallingFormat(OrdinaryCallingFormat):
-
+
def build_url_base(self, connection, protocol, server, bucket, key=''):
url_base = '//'
url_base += self.build_host(server, bucket)
url_base += connection.get_path(self.build_path_base(bucket, key))
return url_base
+
class Location:
- DEFAULT = '' # US Classic Region
+
+ DEFAULT = '' # US Classic Region
EU = 'EU'
USWest = 'us-west-1'
+ USWest2 = 'us-west-2'
+ SAEast = 'sa-east-1'
APNortheast = 'ap-northeast-1'
APSoutheast = 'ap-southeast-1'
+
class S3Connection(AWSAuthConnection):
DefaultHost = 's3.amazonaws.com'
@@ -140,17 +154,25 @@
proxy_user=None, proxy_pass=None,
host=DefaultHost, debug=0, https_connection_factory=None,
calling_format=SubdomainCallingFormat(), path='/',
- provider='aws', bucket_class=Bucket, security_token=None):
+ provider='aws', bucket_class=Bucket, security_token=None,
+ suppress_consec_slashes=True, anon=False,
+ validate_certs=None):
self.calling_format = calling_format
self.bucket_class = bucket_class
+ self.anon = anon
AWSAuthConnection.__init__(self, host,
aws_access_key_id, aws_secret_access_key,
is_secure, port, proxy, proxy_port, proxy_user, proxy_pass,
debug=debug, https_connection_factory=https_connection_factory,
- path=path, provider=provider, security_token=security_token)
+ path=path, provider=provider, security_token=security_token,
+ suppress_consec_slashes=suppress_consec_slashes,
+ validate_certs=validate_certs)
def _required_auth_capability(self):
- return ['s3']
+ if self.anon:
+ return ['anon']
+ else:
+ return ['s3']
def __iter__(self):
for bucket in self.get_all_buckets():
@@ -164,7 +186,7 @@
Set the Bucket class associated with this bucket. By default, this
would be the boto.s3.key.Bucket class but if you want to subclass that
for some reason this allows you to associate your new class.
-
+
:type bucket_class: class
:param bucket_class: A subclass of Bucket that can be more specific
"""
@@ -174,7 +196,7 @@
"""
Taken from the AWS book Python examples and modified for use with boto
"""
- assert type(expiration_time) == time.struct_time, \
+ assert isinstance(expiration_time, time.struct_time), \
'Policy document must include a valid expiration Time object'
# Convert conditions object mappings to condition statements
@@ -182,7 +204,6 @@
return '{"expiration": "%s",\n"conditions": [%s]}' % \
(time.strftime(boto.utils.ISO8601, expiration_time), ",".join(conditions))
-
def build_post_form_args(self, bucket_name, key, expires_in = 6000,
acl = None, success_action_redirect = None,
max_content_length = None,
@@ -190,50 +211,53 @@
conditions=None):
"""
Taken from the AWS book Python examples and modified for use with boto
- This only returns the arguments required for the post form, not the actual form
- This does not return the file input field which also needs to be added
-
+ This only returns the arguments required for the post form, not the
+ actual form. This does not return the file input field which also
+ needs to be added
+
+ :type bucket_name: string
:param bucket_name: Bucket to submit to
- :type bucket_name: string
-
- :param key: Key name, optionally add ${filename} to the end to attach the submitted filename
+
:type key: string
-
- :param expires_in: Time (in seconds) before this expires, defaults to 6000
+ :param key: Key name, optionally add ${filename} to the end to
+ attach the submitted filename
+
:type expires_in: integer
-
- :param acl: ACL rule to use, if any
+ :param expires_in: Time (in seconds) before this expires, defaults
+ to 6000
+
:type acl: :class:`boto.s3.acl.ACL`
-
+ :param acl: ACL rule to use, if any
+
+ :type success_action_redirect: string
:param success_action_redirect: URL to redirect to on success
- :type success_action_redirect: string
-
+
+ :type max_content_length: integer
:param max_content_length: Maximum size for this file
- :type max_content_length: integer
-
+
:type http_method: string
:param http_method: HTTP Method to use, "http" or "https"
-
-
+
:rtype: dict
- :return: A dictionary containing field names/values as well as a url to POST to
-
+ :return: A dictionary containing field names/values as well as
+ a url to POST to
+
.. code-block:: python
-
+
{
- "action": action_url_to_post_to,
- "fields": [
+ "action": action_url_to_post_to,
+ "fields": [
{
- "name": field_name,
+ "name": field_name,
"value": field_value
- },
+ },
{
- "name": field_name2,
+ "name": field_name2,
"value": field_value2
- }
- ]
+ }
+ ]
}
-
+
"""
if fields == None:
fields = []
@@ -264,7 +288,8 @@
fields.append({"name": "policy", "value": policy_b64})
# Add the AWS access key as the 'AWSAccessKeyId' field
- fields.append({"name": "AWSAccessKeyId", "value": self.aws_access_key_id})
+ fields.append({"name": "AWSAccessKeyId",
+ "value": self.aws_access_key_id})
# Add signature for encoded policy document as the 'AWSAccessKeyId' field
signature = self._auth_handler.sign_string(policy_b64)
@@ -272,50 +297,63 @@
fields.append({"name": "key", "value": key})
# HTTPS protocol will be used if the secure HTTP option is enabled.
- url = '%s://%s/' % (http_method, self.calling_format.build_host(self.server_name(), bucket_name))
+ url = '%s://%s/' % (http_method,
+ self.calling_format.build_host(self.server_name(),
+ bucket_name))
return {"action": url, "fields": fields}
-
def generate_url(self, expires_in, method, bucket='', key='', headers=None,
- query_auth=True, force_http=False, response_headers=None):
- if not headers:
- headers = {}
- expires = int(time.time() + expires_in)
+ query_auth=True, force_http=False, response_headers=None,
+ expires_in_absolute=False, version_id=None):
+ headers = headers or {}
+ if expires_in_absolute:
+ expires = int(expires_in)
+ else:
+ expires = int(time.time() + expires_in)
auth_path = self.calling_format.build_auth_path(bucket, key)
auth_path = self.get_path(auth_path)
- # Arguments to override response headers become part of the canonical
- # string to be signed.
+ # optional version_id and response_headers need to be added to
+ # the query param list.
+ extra_qp = []
+ if version_id is not None:
+ extra_qp.append("versionId=%s" % version_id)
if response_headers:
- response_hdrs = ["%s=%s" % (k, v) for k, v in
- response_headers.items()]
+ for k, v in response_headers.items():
+ extra_qp.append("%s=%s" % (k, urllib.quote(v)))
+ if self.provider.security_token:
+ headers['x-amz-security-token'] = self.provider.security_token
+ if extra_qp:
delimiter = '?' if '?' not in auth_path else '&'
- auth_path = "%s%s%s" % (auth_path, delimiter, '&'.join(response_hdrs))
- else:
- response_headers = {}
+ auth_path += delimiter + '&'.join(extra_qp)
c_string = boto.utils.canonical_string(method, auth_path, headers,
expires, self.provider)
b64_hmac = self._auth_handler.sign_string(c_string)
- encoded_canonical = urllib.quote_plus(b64_hmac)
+ encoded_canonical = urllib.quote(b64_hmac, safe='')
self.calling_format.build_path_base(bucket, key)
if query_auth:
query_part = '?' + self.QueryString % (encoded_canonical, expires,
self.aws_access_key_id)
- # The response headers must also be GET parameters in the URL.
- headers.update(response_headers)
- hdrs = [ '%s=%s'%(name, urllib.quote(val)) for name,val in headers.items() ]
- q_str = '&'.join(hdrs)
- if q_str:
- query_part += '&' + q_str
else:
query_part = ''
+ if headers:
+ hdr_prefix = self.provider.header_prefix
+ for k, v in headers.items():
+ if k.startswith(hdr_prefix):
+ # headers used for sig generation must be
+ # included in the url also.
+ extra_qp.append("%s=%s" % (k, urllib.quote(v)))
+ if extra_qp:
+ delimiter = '?' if not query_part else '&'
+ query_part += delimiter + '&'.join(extra_qp)
if force_http:
protocol = 'http'
port = 80
else:
protocol = self.protocol
port = self.port
- return self.calling_format.build_url_base(self, protocol, self.server_name(port),
+ return self.calling_format.build_url_base(self, protocol,
+ self.server_name(port),
bucket, key) + query_part
def get_all_buckets(self, headers=None):
@@ -343,7 +381,7 @@
:return: A string containing the canonical user id.
"""
rs = self.get_all_buckets(headers=headers)
- return rs.ID
+ return rs.owner.id
def get_bucket(self, bucket_name, validate=True, headers=None):
bucket = self.bucket_class(self, bucket_name)
@@ -366,16 +404,17 @@
:type bucket_name: string
:param bucket_name: The name of the new bucket
-
+
:type headers: dict
:param headers: Additional headers to pass along with the request to AWS.
:type location: :class:`boto.s3.connection.Location`
:param location: The location of the new bucket
-
+
:type policy: :class:`boto.s3.acl.CannedACLStrings`
- :param policy: A canned ACL policy that will be applied to the new key in S3.
-
+ :param policy: A canned ACL policy that will be applied to the
+ new key in S3.
+
"""
check_lowercase_bucketname(bucket_name)
@@ -383,12 +422,12 @@
if headers:
headers[self.provider.acl_header] = policy
else:
- headers = {self.provider.acl_header : policy}
+ headers = {self.provider.acl_header: policy}
if location == Location.DEFAULT:
data = ''
else:
- data = '<CreateBucketConstraint><LocationConstraint>' + \
- location + '</LocationConstraint></CreateBucketConstraint>'
+ data = '<CreateBucketConfiguration><LocationConstraint>' + \
+ location + '</LocationConstraint></CreateBucketConfiguration>'
response = self.make_request('PUT', bucket_name, headers=headers,
data=data)
body = response.read()
@@ -427,4 +466,3 @@
return AWSAuthConnection.make_request(self, method, path, headers,
data, host, auth_path, sender,
override_num_retries=override_num_retries)
-
diff --git a/boto/s3/cors.py b/boto/s3/cors.py
new file mode 100644
index 0000000..d97ee89
--- /dev/null
+++ b/boto/s3/cors.py
@@ -0,0 +1,210 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+
+class CORSRule(object):
+ """
+ CORS rule for a bucket.
+
+ :ivar id: A unique identifier for the rule. The ID value can be
+ up to 255 characters long. The IDs help you find a rule in
+ the configuration.
+
+ :ivar allowed_methods: An HTTP method that you want to allow the
+ origin to execute. Each CORSRule must identify at least one
+ origin and one method. Valid values are:
+ GET|PUT|HEAD|POST|DELETE
+
+ :ivar allowed_origin: An origin that you want to allow cross-domain
+ requests from. This can contain at most one * wild character.
+ Each CORSRule must identify at least one origin and one method.
+ The origin value can include at most one '*' wild character.
+ For example, "http://*.example.com". You can also specify
+ only * as the origin value allowing all origins cross-domain access.
+
+ :ivar allowed_header: Specifies which headers are allowed in a
+ pre-flight OPTIONS request via the
+ Access-Control-Request-Headers header. Each header name
+ specified in the Access-Control-Request-Headers header must
+ have a corresponding entry in the rule. Amazon S3 will send
+ only the allowed headers in a response that were requested.
+ This can contain at most one * wild character.
+
+ :ivar max_age_seconds: The time in seconds that your browser is to
+ cache the preflight response for the specified resource.
+
+ :ivar expose_header: One or more headers in the response that you
+ want customers to be able to access from their applications
+ (for example, from a JavaScript XMLHttpRequest object). You
+ add one ExposeHeader element in the rule for each header.
+ """
+
+ def __init__(self, allowed_method=None, allowed_origin=None,
+ id=None, allowed_header=None, max_age_seconds=None,
+ expose_header=None):
+ if allowed_method is None:
+ allowed_method = []
+ self.allowed_method = allowed_method
+ if allowed_origin is None:
+ allowed_origin = []
+ self.allowed_origin = allowed_origin
+ self.id = id
+ if allowed_header is None:
+ allowed_header = []
+ self.allowed_header = allowed_header
+ self.max_age_seconds = max_age_seconds
+ if expose_header is None:
+ expose_header = []
+ self.expose_header = expose_header
+
+ def __repr__(self):
+ return '<Rule: %s>' % self.id
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'ID':
+ self.id = value
+ elif name == 'AllowedMethod':
+ self.allowed_method.append(value)
+ elif name == 'AllowedOrigin':
+ self.allowed_origin.append(value)
+ elif name == 'AllowedHeader':
+ self.allowed_header.append(value)
+ elif name == 'MaxAgeSeconds':
+ self.max_age_seconds = int(value)
+ elif name == 'ExposeHeader':
+ self.expose_header.append(value)
+ else:
+ setattr(self, name, value)
+
+ def to_xml(self):
+ s = '<CORSRule>'
+ for allowed_method in self.allowed_method:
+ s += '<AllowedMethod>%s</AllowedMethod>' % allowed_method
+ for allowed_origin in self.allowed_origin:
+ s += '<AllowedOrigin>%s</AllowedOrigin>' % allowed_origin
+ for allowed_header in self.allowed_header:
+ s += '<AllowedHeader>%s</AllowedHeader>' % allowed_header
+ for expose_header in self.expose_header:
+ s += '<ExposeHeader>%s</ExposeHeader>' % expose_header
+ if self.max_age_seconds:
+ s += '<MaxAgeSeconds>%d</MaxAgeSeconds>' % self.max_age_seconds
+ if self.id:
+ s += '<ID>%s</ID>' % self.id
+ s += '</CORSRule>'
+ return s
+
+
+class CORSConfiguration(list):
+ """
+ A container for the rules associated with a CORS configuration.
+ """
+
+ def startElement(self, name, attrs, connection):
+ if name == 'CORSRule':
+ rule = CORSRule()
+ self.append(rule)
+ return rule
+ return None
+
+ def endElement(self, name, value, connection):
+ setattr(self, name, value)
+
+ def to_xml(self):
+ """
+ Returns a string containing the XML version of the Lifecycle
+ configuration as defined by S3.
+ """
+ s = '<CORSConfiguration>'
+ for rule in self:
+ s += rule.to_xml()
+ s += '</CORSConfiguration>'
+ return s
+
+ def add_rule(self, allowed_method, allowed_origin,
+ id=None, allowed_header=None, max_age_seconds=None,
+ expose_header=None):
+ """
+ Add a rule to this CORS configuration. This only adds
+ the rule to the local copy. To install the new rule(s) on
+ the bucket, you need to pass this CORS config object
+ to the set_cors method of the Bucket object.
+
+ :type allowed_methods: list of str
+ :param allowed_methods: An HTTP method that you want to allow the
+ origin to execute. Each CORSRule must identify at least one
+ origin and one method. Valid values are:
+ GET|PUT|HEAD|POST|DELETE
+
+ :type allowed_origin: list of str
+ :param allowed_origin: An origin that you want to allow cross-domain
+ requests from. This can contain at most one * wild character.
+ Each CORSRule must identify at least one origin and one method.
+ The origin value can include at most one '*' wild character.
+ For example, "http://*.example.com". You can also specify
+ only * as the origin value allowing all origins
+ cross-domain access.
+
+ :type id: str
+ :param id: A unique identifier for the rule. The ID value can be
+ up to 255 characters long. The IDs help you find a rule in
+ the configuration.
+
+ :type allowed_header: list of str
+ :param allowed_header: Specifies which headers are allowed in a
+ pre-flight OPTIONS request via the
+ Access-Control-Request-Headers header. Each header name
+ specified in the Access-Control-Request-Headers header must
+ have a corresponding entry in the rule. Amazon S3 will send
+ only the allowed headers in a response that were requested.
+ This can contain at most one * wild character.
+
+ :type max_age_seconds: int
+ :param max_age_seconds: The time in seconds that your browser is to
+ cache the preflight response for the specified resource.
+
+ :type expose_header: list of str
+ :param expose_header: One or more headers in the response that you
+ want customers to be able to access from their applications
+ (for example, from a JavaScript XMLHttpRequest object). You
+ add one ExposeHeader element in the rule for each header.
+ """
+ if not isinstance(allowed_method, (list, tuple)):
+ allowed_method = [allowed_method]
+ if not isinstance(allowed_origin, (list, tuple)):
+ allowed_origin = [allowed_origin]
+ if not isinstance(allowed_origin, (list, tuple)):
+ if allowed_origin is None:
+ allowed_origin = []
+ else:
+ allowed_origin = [allowed_origin]
+ if not isinstance(expose_header, (list, tuple)):
+ if expose_header is None:
+ expose_header = []
+ else:
+ expose_header = [expose_header]
+ rule = CORSRule(allowed_method, allowed_origin, id, allowed_header,
+ max_age_seconds, expose_header)
+ self.append(rule)
diff --git a/boto/s3/deletemarker.py b/boto/s3/deletemarker.py
index 3462d42..c2dac19 100644
--- a/boto/s3/deletemarker.py
+++ b/boto/s3/deletemarker.py
@@ -25,6 +25,7 @@
def __init__(self, bucket=None, name=None):
self.bucket = bucket
self.name = name
+ self.version_id = None
self.is_latest = False
self.last_modified = None
self.owner = None
@@ -38,10 +39,10 @@
def endElement(self, name, value, connection):
if name == 'Key':
- self.name = value.encode('utf-8')
+ self.name = value
elif name == 'IsLatest':
if value == 'true':
- self.is_lastest = True
+ self.is_latest = True
else:
self.is_latest = False
elif name == 'LastModified':
diff --git a/boto/s3/key.py b/boto/s3/key.py
index 18829c2..c8ec4ef 100644
--- a/boto/s3/key.py
+++ b/boto/s3/key.py
@@ -1,5 +1,6 @@
-# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011, Nexenta Systems Inc.
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
@@ -26,11 +27,14 @@
import rfc822
import StringIO
import base64
+import math
+import urllib
import boto.utils
from boto.exception import BotoClientError
from boto.provider import Provider
from boto.s3.user import User
from boto import UserAgent
+from boto.utils import compute_md5
try:
from hashlib import md5
except ImportError:
@@ -50,8 +54,11 @@
self.cache_control = None
self.content_type = self.DefaultContentType
self.content_encoding = None
+ self.content_disposition = None
+ self.content_language = None
self.filename = None
self.etag = None
+ self.is_latest = False
self.last_modified = None
self.owner = None
self.storage_class = 'STANDARD'
@@ -130,7 +137,7 @@
else:
self.delete_marker = False
- def open_read(self, headers=None, query_args=None,
+ def open_read(self, headers=None, query_args='',
override_num_retries=None, response_headers=None):
"""
Open this key for reading
@@ -139,17 +146,18 @@
:param headers: Headers to pass in the web request
:type query_args: string
- :param query_args: Arguments to pass in the query string (ie, 'torrent')
+ :param query_args: Arguments to pass in the query string
+ (ie, 'torrent')
:type override_num_retries: int
:param override_num_retries: If not None will override configured
- num_retries parameter for underlying GET.
+ num_retries parameter for underlying GET.
:type response_headers: dict
- :param response_headers: A dictionary containing HTTP headers/values
- that will override any headers associated with
- the stored object in the response.
- See http://goo.gl/EWOPb for details.
+ :param response_headers: A dictionary containing HTTP
+ headers/values that will override any headers associated
+ with the stored object in the response. See
+ http://goo.gl/EWOPb for details.
"""
if self.resp == None:
self.mode = 'r'
@@ -166,7 +174,7 @@
response_headers = self.resp.msg
self.metadata = boto.utils.get_aws_metadata(response_headers,
provider)
- for name,value in response_headers.items():
+ for name, value in response_headers.items():
# To get correct size for Range GETs, use Content-Range
# header if one was returned. If not, use Content-Length
# header.
@@ -182,10 +190,14 @@
self.content_type = value
elif name.lower() == 'content-encoding':
self.content_encoding = value
+ elif name.lower() == 'content-language':
+ self.content_language = value
elif name.lower() == 'last-modified':
self.last_modified = value
elif name.lower() == 'cache-control':
self.cache_control = value
+ elif name.lower() == 'content-disposition':
+ self.content_disposition = value
self.handle_version_headers(self.resp)
self.handle_encryption_headers(self.resp)
@@ -199,7 +211,7 @@
:type override_num_retries: int
:param override_num_retries: If not None will override configured
- num_retries parameter for underlying PUT.
+ num_retries parameter for underlying PUT.
"""
raise BotoClientError('Not Implemented')
@@ -217,6 +229,7 @@
raise BotoClientError('Invalid mode: %s' % mode)
closed = False
+
def close(self):
if self.resp:
self.resp.read()
@@ -251,7 +264,8 @@
self.close()
return data
- def change_storage_class(self, new_storage_class, dst_bucket=None):
+ def change_storage_class(self, new_storage_class, dst_bucket=None,
+ validate_dst_bucket=True):
"""
Change the storage class of an existing key.
Depending on whether a different destination bucket is supplied
@@ -262,29 +276,33 @@
:type new_storage_class: string
:param new_storage_class: The new storage class for the Key.
- Possible values are:
- * STANDARD
- * REDUCED_REDUNDANCY
+ Possible values are:
+ * STANDARD
+ * REDUCED_REDUNDANCY
:type dst_bucket: string
:param dst_bucket: The name of a destination bucket. If not
- provided the current bucket of the key
- will be used.
+ provided the current bucket of the key will be used.
+ :type validate_dst_bucket: bool
+ :param validate_dst_bucket: If True, will validate the dst_bucket
+ by using an extra list request.
"""
if new_storage_class == 'STANDARD':
return self.copy(self.bucket.name, self.name,
- reduced_redundancy=False, preserve_acl=True)
+ reduced_redundancy=False, preserve_acl=True,
+ validate_dst_bucket=validate_dst_bucket)
elif new_storage_class == 'REDUCED_REDUNDANCY':
return self.copy(self.bucket.name, self.name,
- reduced_redundancy=True, preserve_acl=True)
+ reduced_redundancy=True, preserve_acl=True,
+ validate_dst_bucket=validate_dst_bucket)
else:
raise BotoClientError('Invalid storage class: %s' %
new_storage_class)
def copy(self, dst_bucket, dst_key, metadata=None,
reduced_redundancy=False, preserve_acl=False,
- encrypt_key=False):
+ encrypt_key=False, validate_dst_bucket=True):
"""
Copy this Key to another bucket.
@@ -295,45 +313,42 @@
:param dst_key: The name of the destination key
:type metadata: dict
- :param metadata: Metadata to be associated with new key.
- If metadata is supplied, it will replace the
- metadata of the source key being copied.
- If no metadata is supplied, the source key's
- metadata will be copied to the new key.
+ :param metadata: Metadata to be associated with new key. If
+ metadata is supplied, it will replace the metadata of the
+ source key being copied. If no metadata is supplied, the
+ source key's metadata will be copied to the new key.
:type reduced_redundancy: bool
- :param reduced_redundancy: If True, this will force the storage
- class of the new Key to be
- REDUCED_REDUNDANCY regardless of the
- storage class of the key being copied.
- The Reduced Redundancy Storage (RRS)
- feature of S3, provides lower
- redundancy at lower storage cost.
+ :param reduced_redundancy: If True, this will force the
+ storage class of the new Key to be REDUCED_REDUNDANCY
+ regardless of the storage class of the key being copied.
+ The Reduced Redundancy Storage (RRS) feature of S3,
+ provides lower redundancy at lower storage cost.
:type preserve_acl: bool
- :param preserve_acl: If True, the ACL from the source key
- will be copied to the destination
- key. If False, the destination key
- will have the default ACL.
- Note that preserving the ACL in the
- new key object will require two
- additional API calls to S3, one to
- retrieve the current ACL and one to
- set that ACL on the new object. If
- you don't care about the ACL, a value
- of False will be significantly more
- efficient.
+ :param preserve_acl: If True, the ACL from the source key will
+ be copied to the destination key. If False, the
+ destination key will have the default ACL. Note that
+ preserving the ACL in the new key object will require two
+ additional API calls to S3, one to retrieve the current
+ ACL and one to set that ACL on the new object. If you
+ don't care about the ACL, a value of False will be
+ significantly more efficient.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
- be encrypted on the server-side by S3 and
- will be stored in an encrypted form while
- at rest in S3.
-
+ be encrypted on the server-side by S3 and will be stored
+ in an encrypted form while at rest in S3.
+
+ :type validate_dst_bucket: bool
+ :param validate_dst_bucket: If True, will validate the dst_bucket
+ by using an extra list request.
+
:rtype: :class:`boto.s3.key.Key` or subclass
:returns: An instance of the newly created key object
"""
- dst_bucket = self.bucket.connection.lookup(dst_bucket)
+ dst_bucket = self.bucket.connection.lookup(dst_bucket,
+ validate_dst_bucket)
if reduced_redundancy:
storage_class = 'REDUCED_REDUNDANCY'
else:
@@ -353,9 +368,14 @@
def endElement(self, name, value, connection):
if name == 'Key':
- self.name = value.encode('utf-8')
+ self.name = value
elif name == 'ETag':
self.etag = value
+ elif name == 'IsLatest':
+ if value == 'true':
+ self.is_latest = True
+ else:
+ self.is_latest = False
elif name == 'LastModified':
self.last_modified = value
elif name == 'Size':
@@ -417,7 +437,9 @@
return self.bucket.set_canned_acl('public-read', self.name, headers)
def generate_url(self, expires_in, method='GET', headers=None,
- query_auth=True, force_http=False, response_headers=None):
+ query_auth=True, force_http=False, response_headers=None,
+ expires_in_absolute=False, version_id=None,
+ policy=None, reduced_redundancy=False, encrypt_key=False):
"""
Generate a URL to access this key.
@@ -426,7 +448,7 @@
:type method: string
:param method: The method to use for retrieving the file
- (default is GET)
+ (default is GET)
:type headers: dict
:param headers: Any headers to pass along in the request
@@ -434,56 +456,136 @@
:type query_auth: bool
:param query_auth:
+ :type force_http: bool
+ :param force_http: If True, http will be used instead of https.
+
+ :type response_headers: dict
+ :param response_headers: A dictionary containing HTTP
+ headers/values that will override any headers associated
+ with the stored object in the response. See
+ http://goo.gl/EWOPb for details.
+
+ :type expires_in_absolute: bool
+ :param expires_in_absolute:
+
+ :type version_id: string
+ :param version_id: The version_id of the object to GET. If specified
+ this overrides any value in the key.
+
+ :type policy: :class:`boto.s3.acl.CannedACLStrings`
+ :param policy: A canned ACL policy that will be applied to the
+ new key in S3.
+
+ :type reduced_redundancy: bool
+ :param reduced_redundancy: If True, this will set the storage
+ class of the new Key to be REDUCED_REDUNDANCY. The Reduced
+ Redundancy Storage (RRS) feature of S3, provides lower
+ redundancy at lower storage cost.
+
+ :type encrypt_key: bool
+ :param encrypt_key: If True, the new copy of the object will
+ be encrypted on the server-side by S3 and will be stored
+ in an encrypted form while at rest in S3.
+
:rtype: string
:return: The URL to access the key
"""
+ provider = self.bucket.connection.provider
+ version_id = version_id or self.version_id
+ if headers is None:
+ headers = {}
+ else:
+ headers = headers.copy()
+
+ # add headers accordingly (usually PUT case)
+ if policy:
+ headers[provider.acl_header] = policy
+ if reduced_redundancy:
+ self.storage_class = 'REDUCED_REDUNDANCY'
+ if provider.storage_class_header:
+ headers[provider.storage_class_header] = self.storage_class
+ if encrypt_key:
+ headers[provider.server_side_encryption_header] = 'AES256'
+ headers = boto.utils.merge_meta(headers, self.metadata, provider)
+
return self.bucket.connection.generate_url(expires_in, method,
self.bucket.name, self.name,
headers, query_auth,
force_http,
- response_headers)
+ response_headers,
+ expires_in_absolute,
+ version_id)
def send_file(self, fp, headers=None, cb=None, num_cb=10,
- query_args=None, chunked_transfer=False):
+ query_args=None, chunked_transfer=False, size=None):
"""
Upload a file to a key into a bucket on S3.
:type fp: file
- :param fp: The file pointer to upload
+ :param fp: The file pointer to upload. The file pointer must
+ point point at the offset from which you wish to upload.
+ ie. if uploading the full file, it should point at the
+ start of the file. Normally when a file is opened for
+ reading, the fp will point at the first byte. See the
+ bytes parameter below for more info.
:type headers: dict
:param headers: The headers to pass along with the PUT request
:type cb: function
:param cb: a callback function that will be called to report
- progress on the upload. The callback should accept
- two integer parameters, the first representing the
- number of bytes that have been successfully
- transmitted to S3 and the second representing the
- size of the to be transmitted object.
+ progress on the upload. The callback should accept two
+ integer parameters, the first representing the number of
+ bytes that have been successfully transmitted to S3 and
+ the second representing the size of the to be transmitted
+ object.
:type num_cb: int
- :param num_cb: (optional) If a callback is specified with the cb
- parameter this parameter determines the granularity
- of the callback by defining the maximum number of
- times the callback will be called during the file
- transfer. Providing a negative integer will cause
- your callback to be called with each buffer read.
+ :param num_cb: (optional) If a callback is specified with the
+ cb parameter this parameter determines the granularity of
+ the callback by defining the maximum number of times the
+ callback will be called during the file
+ transfer. Providing a negative integer will cause your
+ callback to be called with each buffer read.
+ :type size: int
+ :param size: (optional) The Maximum number of bytes to read
+ from the file pointer (fp). This is useful when uploading
+ a file in multiple parts where you are splitting the file
+ up into different ranges to be uploaded. If not specified,
+ the default behaviour is to read all bytes from the file
+ pointer. Less bytes may be available.
"""
provider = self.bucket.connection.provider
+ try:
+ spos = fp.tell()
+ except IOError:
+ spos = None
+ self.read_from_stream = False
def sender(http_conn, method, path, data, headers):
+ # This function is called repeatedly for temporary retries
+ # so we must be sure the file pointer is pointing at the
+ # start of the data.
+ if spos is not None and spos != fp.tell():
+ fp.seek(spos)
+ elif spos is None and self.read_from_stream:
+ # if seek is not supported, and we've read from this
+ # stream already, then we need to abort retries to
+ # avoid setting bad data.
+ raise provider.storage_data_error(
+ 'Cannot retry failed request. fp does not support seeking.')
+
http_conn.putrequest(method, path)
for key in headers:
http_conn.putheader(key, headers[key])
http_conn.endheaders()
- if chunked_transfer:
- # MD5 for the stream has to be calculated on the fly, as
- # we don't know the size of the stream before hand.
+
+ # Calculate all MD5 checksums on the fly, if not already computed
+ if not self.base64md5:
m = md5()
else:
- fp.seek(0)
+ m = None
save_debug = self.bucket.connection.debug
self.bucket.connection.debug = 0
@@ -493,48 +595,76 @@
# Use the getattr approach to allow this to work in AppEngine.
if getattr(http_conn, 'debuglevel', 0) < 3:
http_conn.set_debuglevel(0)
+
+ data_len = 0
if cb:
- if chunked_transfer:
+ if size:
+ cb_size = size
+ elif self.size:
+ cb_size = self.size
+ else:
+ cb_size = 0
+ if chunked_transfer and cb_size == 0:
# For chunked Transfer, we call the cb for every 1MB
- # of data transferred.
- cb_count = (1024 * 1024)/self.BufferSize
- self.size = 0
- elif num_cb > 2:
- cb_count = self.size / self.BufferSize / (num_cb-2)
+ # of data transferred, except when we know size.
+ cb_count = (1024 * 1024) / self.BufferSize
+ elif num_cb > 1:
+ cb_count = int(math.ceil(cb_size / self.BufferSize / (num_cb - 1.0)))
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
- i = total_bytes = 0
- cb(total_bytes, self.size)
- l = fp.read(self.BufferSize)
- while len(l) > 0:
+ i = 0
+ cb(data_len, cb_size)
+
+ bytes_togo = size
+ if bytes_togo and bytes_togo < self.BufferSize:
+ chunk = fp.read(bytes_togo)
+ else:
+ chunk = fp.read(self.BufferSize)
+ if spos is None:
+ # read at least something from a non-seekable fp.
+ self.read_from_stream = True
+ while chunk:
+ chunk_len = len(chunk)
+ data_len += chunk_len
if chunked_transfer:
- http_conn.send('%x;\r\n' % len(l))
- http_conn.send(l)
+ http_conn.send('%x;\r\n' % chunk_len)
+ http_conn.send(chunk)
http_conn.send('\r\n')
else:
- http_conn.send(l)
+ http_conn.send(chunk)
+ if m:
+ m.update(chunk)
+ if bytes_togo:
+ bytes_togo -= chunk_len
+ if bytes_togo <= 0:
+ break
if cb:
- total_bytes += len(l)
i += 1
if i == cb_count or cb_count == -1:
- cb(total_bytes, self.size)
+ cb(data_len, cb_size)
i = 0
- if chunked_transfer:
- m.update(l)
- l = fp.read(self.BufferSize)
+ if bytes_togo and bytes_togo < self.BufferSize:
+ chunk = fp.read(bytes_togo)
+ else:
+ chunk = fp.read(self.BufferSize)
+
+ self.size = data_len
+
+ if m:
+ # Use the chunked trailer for the digest
+ hd = m.hexdigest()
+ self.md5, self.base64md5 = self.get_md5_from_hexdigest(hd)
+
if chunked_transfer:
http_conn.send('0\r\n')
+ # http_conn.send("Content-MD5: %s\r\n" % self.base64md5)
http_conn.send('\r\n')
- if cb:
- self.size = total_bytes
- # Get the md5 which is calculated on the fly.
- self.md5 = m.hexdigest()
- else:
- fp.seek(0)
- if cb:
- cb(total_bytes, self.size)
+
+ if cb and (cb_count <= 1 or i > 0) and data_len > 0:
+ cb(data_len, cb_size)
+
response = http_conn.getresponse()
body = response.read()
http_conn.set_debuglevel(save_debug)
@@ -545,7 +675,7 @@
return response
elif response.status >= 200 and response.status <= 299:
self.etag = response.getheader('etag')
- if self.etag != '"%s"' % self.md5:
+ if self.etag != '"%s"' % self.md5:
raise provider.storage_data_error(
'ETag from S3 did not match computed MD5')
return response
@@ -558,14 +688,22 @@
else:
headers = headers.copy()
headers['User-Agent'] = UserAgent
- if self.base64md5:
- headers['Content-MD5'] = self.base64md5
if self.storage_class != 'STANDARD':
headers[provider.storage_class_header] = self.storage_class
- if headers.has_key('Content-Encoding'):
+ if 'Content-Encoding' in headers:
self.content_encoding = headers['Content-Encoding']
- if headers.has_key('Content-Type'):
- self.content_type = headers['Content-Type']
+ if 'Content-Language' in headers:
+ self.content_encoding = headers['Content-Language']
+ if 'Content-Type' in headers:
+ # Some use cases need to suppress sending of the Content-Type
+ # header and depend on the receiving server to set the content
+ # type. This can be achieved by setting headers['Content-Type']
+ # to None when calling this method.
+ if headers['Content-Type'] is None:
+ # Delete null Content-Type value to skip sending that header.
+ del headers['Content-Type']
+ else:
+ self.content_type = headers['Content-Type']
elif self.path:
self.content_type = mimetypes.guess_type(self.path)[0]
if self.content_type == None:
@@ -573,7 +711,13 @@
headers['Content-Type'] = self.content_type
else:
headers['Content-Type'] = self.content_type
- if not chunked_transfer:
+ if self.base64md5:
+ headers['Content-MD5'] = self.base64md5
+ if chunked_transfer:
+ headers['Transfer-Encoding'] = 'chunked'
+ #if not self.base64md5:
+ # headers['Trailer'] = "Content-MD5"
+ else:
headers['Content-Length'] = str(self.size)
headers['Expect'] = '100-Continue'
headers = boto.utils.merge_meta(headers, self.metadata, provider)
@@ -583,50 +727,57 @@
query_args=query_args)
self.handle_version_headers(resp, force=True)
- def compute_md5(self, fp):
+ def compute_md5(self, fp, size=None):
"""
:type fp: file
- :param fp: File pointer to the file to MD5 hash. The file pointer
- will be reset to the beginning of the file before the
- method returns.
+ :param fp: File pointer to the file to MD5 hash. The file
+ pointer will be reset to the same position before the
+ method returns.
+
+ :type size: int
+ :param size: (optional) The Maximum number of bytes to read
+ from the file pointer (fp). This is useful when uploading
+ a file in multiple parts where the file is being split
+ inplace into different parts. Less bytes may be available.
:rtype: tuple
- :return: A tuple containing the hex digest version of the MD5 hash
- as the first element and the base64 encoded version of the
- plain digest as the second element.
+ :return: A tuple containing the hex digest version of the MD5
+ hash as the first element and the base64 encoded version
+ of the plain digest as the second element.
"""
- m = md5()
- fp.seek(0)
- s = fp.read(self.BufferSize)
- while s:
- m.update(s)
- s = fp.read(self.BufferSize)
- hex_md5 = m.hexdigest()
- base64md5 = base64.encodestring(m.digest())
- if base64md5[-1] == '\n':
- base64md5 = base64md5[0:-1]
- self.size = fp.tell()
- fp.seek(0)
- return (hex_md5, base64md5)
+ tup = compute_md5(fp, size=size)
+ # Returned values are MD5 hash, base64 encoded MD5 hash, and data size.
+ # The internal implementation of compute_md5() needs to return the
+ # data size but we don't want to return that value to the external
+ # caller because it changes the class interface (i.e. it might
+ # break some code) so we consume the third tuple value here and
+ # return the remainder of the tuple to the caller, thereby preserving
+ # the existing interface.
+ self.size = tup[2]
+ return tup[0:2]
def set_contents_from_stream(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None,
- reduced_redundancy=False, query_args=None):
+ reduced_redundancy=False, query_args=None,
+ size=None):
"""
Store an object using the name of the Key object as the key in
cloud and the contents of the data stream pointed to by 'fp' as
the contents.
+
The stream object is not seekable and total size is not known.
- This has the implication that we can't specify the Content-Size and
- Content-MD5 in the header. So for huge uploads, the delay in calculating
- MD5 is avoided but with a penalty of inability to verify the integrity
- of the uploaded data.
+ This has the implication that we can't specify the
+ Content-Size and Content-MD5 in the header. So for huge
+ uploads, the delay in calculating MD5 is avoided but with a
+ penalty of inability to verify the integrity of the uploaded
+ data.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
- :param headers: additional HTTP headers to be sent with the PUT request.
+ :param headers: additional HTTP headers to be sent with the
+ PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
@@ -642,10 +793,10 @@
total number of bytes that need to be transmitted.
:type num_cb: int
- :param num_cb: (optional) If a callback is specified with the cb
- parameter, this parameter determines the granularity of the callback
- by defining the maximum number of times the callback will be called
- during the file transfer.
+ :param num_cb: (optional) If a callback is specified with the
+ cb parameter, this parameter determines the granularity of
+ the callback by defining the maximum number of times the
+ callback will be called during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
@@ -653,10 +804,17 @@
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
- class of the new Key to be
- REDUCED_REDUNDANCY. The Reduced Redundancy
- Storage (RRS) feature of S3, provides lower
- redundancy at lower storage cost.
+ class of the new Key to be REDUCED_REDUNDANCY. The Reduced
+ Redundancy Storage (RRS) feature of S3, provides lower
+ redundancy at lower storage cost.
+
+ :type size: int
+ :param size: (optional) The Maximum number of bytes to read from
+ the file pointer (fp). This is useful when uploading a
+ file in multiple parts where you are splitting the file up
+ into different ranges to be uploaded. If not specified,
+ the default behaviour is to read all bytes from the file
+ pointer. Less bytes may be available.
"""
provider = self.bucket.connection.provider
@@ -674,9 +832,6 @@
if policy:
headers[provider.acl_header] = policy
- # Set the Transfer Encoding for Streams.
- headers['Transfer-Encoding'] = 'chunked'
-
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
@@ -684,85 +839,114 @@
if self.bucket != None:
if not replace:
- k = self.bucket.lookup(self.name)
- if k:
+ if self.bucket.lookup(self.name):
return
self.send_file(fp, headers, cb, num_cb, query_args,
- chunked_transfer=True)
+ chunked_transfer=True, size=size)
def set_contents_from_file(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False, query_args=None,
- encrypt_key=False):
+ encrypt_key=False, size=None, rewind=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the contents of the file pointed to by 'fp' as the
- contents.
+ contents. The data is read from 'fp' from its current position until
+ 'size' bytes have been read or EOF.
:type fp: file
:param fp: the file whose contents to upload
:type headers: dict
:param headers: Additional HTTP headers that will be sent with
- the PUT request.
+ the PUT request.
:type replace: bool
- :param replace: If this parameter is False, the method
- will first check to see if an object exists in the
- bucket with the same key. If it does, it won't
- overwrite it. The default value is True which will
- overwrite the object.
+ :param replace: If this parameter is False, the method will
+ first check to see if an object exists in the bucket with
+ the same key. If it does, it won't overwrite it. The
+ default value is True which will overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
- progress on the upload. The callback should accept
- two integer parameters, the first representing the
- number of bytes that have been successfully
- transmitted to S3 and the second representing the
- size of the to be transmitted object.
+ progress on the upload. The callback should accept two
+ integer parameters, the first representing the number of
+ bytes that have been successfully transmitted to S3 and
+ the second representing the size of the to be transmitted
+ object.
:type cb: int
- :param num_cb: (optional) If a callback is specified with the cb
- parameter this parameter determines the granularity
- of the callback by defining the maximum number of
- times the callback will be called during the
- file transfer.
+ :param num_cb: (optional) If a callback is specified with the
+ cb parameter this parameter determines the granularity of
+ the callback by defining the maximum number of times the
+ callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
- new key in S3.
+ new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
- checksum of the file as the first element and the
- Base64-encoded version of the plain checksum as the
- second element. This is the same format returned by
- the compute_md5 method.
- :param md5: If you need to compute the MD5 for any reason prior
- to upload, it's silly to have to do it twice so this
- param, if present, will be used as the MD5 values of
- the file. Otherwise, the checksum will be computed.
+ checksum of the file as the first element and the
+ Base64-encoded version of the plain checksum as the second
+ element. This is the same format returned by the
+ compute_md5 method.
+ :param md5: If you need to compute the MD5 for any reason
+ prior to upload, it's silly to have to do it twice so this
+ param, if present, will be used as the MD5 values of the
+ file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
- class of the new Key to be
- REDUCED_REDUNDANCY. The Reduced Redundancy
- Storage (RRS) feature of S3, provides lower
- redundancy at lower storage cost.
+ class of the new Key to be REDUCED_REDUNDANCY. The Reduced
+ Redundancy Storage (RRS) feature of S3, provides lower
+ redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
- be encrypted on the server-side by S3 and
- will be stored in an encrypted form while
- at rest in S3.
+ be encrypted on the server-side by S3 and will be stored
+ in an encrypted form while at rest in S3.
+
+ :type size: int
+ :param size: (optional) The Maximum number of bytes to read
+ from the file pointer (fp). This is useful when uploading
+ a file in multiple parts where you are splitting the file
+ up into different ranges to be uploaded. If not specified,
+ the default behaviour is to read all bytes from the file
+ pointer. Less bytes may be available.
+
+ :type rewind: bool
+ :param rewind: (optional) If True, the file pointer (fp) will
+ be rewound to the start before any bytes are read from
+ it. The default behaviour is False which reads from the
+ current position of the file pointer (fp).
+
+ :rtype: int
+ :return: The number of bytes written to the key.
"""
provider = self.bucket.connection.provider
- if headers is None:
- headers = {}
+ headers = headers or {}
if policy:
headers[provider.acl_header] = policy
if encrypt_key:
headers[provider.server_side_encryption_header] = 'AES256'
+ if rewind:
+ # caller requests reading from beginning of fp.
+ fp.seek(0, os.SEEK_SET)
+ else:
+ spos = fp.tell()
+ fp.seek(0, os.SEEK_END)
+ if fp.tell() == spos:
+ fp.seek(0, os.SEEK_SET)
+ if fp.tell() != spos:
+ # Raise an exception as this is likely a programming error
+ # whereby there is data before the fp but nothing after it.
+ fp.seek(spos)
+ raise AttributeError(
+ 'fp is at EOF. Use rewind option or seek() to data start.')
+ # seek back to the correct position.
+ fp.seek(spos)
+
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
@@ -771,23 +955,45 @@
# What if different providers provide different classes?
if hasattr(fp, 'name'):
self.path = fp.name
+
if self.bucket != None:
- if not md5:
- md5 = self.compute_md5(fp)
+ if not md5 and provider.supports_chunked_transfer():
+ # defer md5 calculation to on the fly and
+ # we don't know anything about size yet.
+ chunked_transfer = True
+ self.size = None
else:
- # even if md5 is provided, still need to set size of content
- fp.seek(0, 2)
- self.size = fp.tell()
- fp.seek(0)
- self.md5 = md5[0]
- self.base64md5 = md5[1]
+ chunked_transfer = False
+ if not md5:
+ # compute_md5() and also set self.size to actual
+ # size of the bytes read computing the md5.
+ md5 = self.compute_md5(fp, size)
+ # adjust size if required
+ size = self.size
+ elif size:
+ self.size = size
+ else:
+ # If md5 is provided, still need to size so
+ # calculate based on bytes to end of content
+ spos = fp.tell()
+ fp.seek(0, os.SEEK_END)
+ self.size = fp.tell() - spos
+ fp.seek(spos)
+ size = self.size
+ self.md5 = md5[0]
+ self.base64md5 = md5[1]
+
if self.name == None:
self.name = self.md5
if not replace:
- k = self.bucket.lookup(self.name)
- if k:
+ if self.bucket.lookup(self.name):
return
- self.send_file(fp, headers, cb, num_cb, query_args)
+
+ self.send_file(fp, headers=headers, cb=cb, num_cb=num_cb,
+ query_args=query_args,
+ chunked_transfer=chunked_transfer, size=size)
+ # return number of bytes written.
+ return self.size
def set_contents_from_filename(self, filename, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
@@ -804,52 +1010,48 @@
:type headers: dict
:param headers: Additional headers to pass along with the
- request to AWS.
+ request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file
- if it already exists.
+ if it already exists.
:type cb: function
:param cb: a callback function that will be called to report
- progress on the upload. The callback should accept
- two integer parameters, the first representing the
- number of bytes that have been successfully
- transmitted to S3 and the second representing the
- size of the to be transmitted object.
+ progress on the upload. The callback should accept two
+ integer parameters, the first representing the number of
+ bytes that have been successfully transmitted to S3 and
+ the second representing the size of the to be transmitted
+ object.
:type cb: int
- :param num_cb: (optional) If a callback is specified with
- the cb parameter this parameter determines the
- granularity of the callback by defining
- the maximum number of times the callback will
- be called during the file transfer.
+ :param num_cb: (optional) If a callback is specified with the
+ cb parameter this parameter determines the granularity of
+ the callback by defining the maximum number of times the
+ callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
- new key in S3.
+ new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
- checksum of the file as the first element and the
- Base64-encoded version of the plain checksum as the
- second element. This is the same format returned by
- the compute_md5 method.
- :param md5: If you need to compute the MD5 for any reason prior
- to upload, it's silly to have to do it twice so this
- param, if present, will be used as the MD5 values
- of the file. Otherwise, the checksum will be computed.
+ checksum of the file as the first element and the
+ Base64-encoded version of the plain checksum as the second
+ element. This is the same format returned by the
+ compute_md5 method.
+ :param md5: If you need to compute the MD5 for any reason
+ prior to upload, it's silly to have to do it twice so this
+ param, if present, will be used as the MD5 values of the
+ file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
- class of the new Key to be
- REDUCED_REDUNDANCY. The Reduced Redundancy
- Storage (RRS) feature of S3, provides lower
- redundancy at lower storage cost.
- :type encrypt_key: bool
- :param encrypt_key: If True, the new copy of the object will
- be encrypted on the server-side by S3 and
- will be stored in an encrypted form while
- at rest in S3.
+ class of the new Key to be REDUCED_REDUNDANCY. The Reduced
+ Redundancy Storage (RRS) feature of S3, provides lower
+ redundancy at lower storage cost. :type encrypt_key: bool
+ :param encrypt_key: If True, the new copy of the object
+ will be encrypted on the server-side by S3 and will be
+ stored in an encrypted form while at rest in S3.
"""
fp = open(filename, 'rb')
self.set_contents_from_file(fp, headers, replace, cb, num_cb,
@@ -869,52 +1071,50 @@
:type headers: dict
:param headers: Additional headers to pass along with the
- request to AWS.
+ request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file if
- it already exists.
+ it already exists.
:type cb: function
:param cb: a callback function that will be called to report
- progress on the upload. The callback should accept
- two integer parameters, the first representing the
- number of bytes that have been successfully
- transmitted to S3 and the second representing the
- size of the to be transmitted object.
+ progress on the upload. The callback should accept two
+ integer parameters, the first representing the number of
+ bytes that have been successfully transmitted to S3 and
+ the second representing the size of the to be transmitted
+ object.
:type cb: int
- :param num_cb: (optional) If a callback is specified with
- the cb parameter this parameter determines the
- granularity of the callback by defining
- the maximum number of times the callback will
- be called during the file transfer.
+ :param num_cb: (optional) If a callback is specified with the
+ cb parameter this parameter determines the granularity of
+ the callback by defining the maximum number of times the
+ callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
- new key in S3.
+ new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
- checksum of the file as the first element and the
- Base64-encoded version of the plain checksum as the
- second element. This is the same format returned by
- the compute_md5 method.
- :param md5: If you need to compute the MD5 for any reason prior
- to upload, it's silly to have to do it twice so this
- param, if present, will be used as the MD5 values
- of the file. Otherwise, the checksum will be computed.
+ checksum of the file as the first element and the
+ Base64-encoded version of the plain checksum as the second
+ element. This is the same format returned by the
+ compute_md5 method.
+ :param md5: If you need to compute the MD5 for any reason
+ prior to upload, it's silly to have to do it twice so this
+ param, if present, will be used as the MD5 values of the
+ file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
- class of the new Key to be
- REDUCED_REDUNDANCY. The Reduced Redundancy
- Storage (RRS) feature of S3, provides lower
- redundancy at lower storage cost.
+ class of the new Key to be REDUCED_REDUNDANCY. The Reduced
+ Redundancy Storage (RRS) feature of S3, provides lower
+ redundancy at lower storage cost.
+
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
- be encrypted on the server-side by S3 and
- will be stored in an encrypted form while
- at rest in S3.
+ be encrypted on the server-side by S3 and will be stored
+ in an encrypted form while at rest in S3.
"""
if isinstance(s, unicode):
s = s.encode("utf-8")
@@ -939,41 +1139,33 @@
:type cb: function
:param cb: a callback function that will be called to report
- progress on the upload. The callback should accept
- two integer parameters, the first representing the
- number of bytes that have been successfully
- transmitted to S3 and the second representing the
- size of the to be transmitted object.
+ progress on the upload. The callback should accept two
+ integer parameters, the first representing the number of
+ bytes that have been successfully transmitted to S3 and
+ the second representing the size of the to be transmitted
+ object.
:type cb: int
- :param num_cb: (optional) If a callback is specified with
- the cb parameter this parameter determines the
- granularity of the callback by defining
- the maximum number of times the callback will
- be called during the file transfer.
+ :param num_cb: (optional) If a callback is specified with the
+ cb parameter this parameter determines the granularity of
+ the callback by defining the maximum number of times the
+ callback will be called during the file transfer.
:type torrent: bool
:param torrent: Flag for whether to get a torrent for the file
:type override_num_retries: int
:param override_num_retries: If not None will override configured
- num_retries parameter for underlying GET.
+ num_retries parameter for underlying GET.
:type response_headers: dict
- :param response_headers: A dictionary containing HTTP headers/values
- that will override any headers associated with
- the stored object in the response.
- See http://goo.gl/EWOPb for details.
+ :param response_headers: A dictionary containing HTTP
+ headers/values that will override any headers associated
+ with the stored object in the response. See
+ http://goo.gl/EWOPb for details.
"""
- if cb:
- if num_cb > 2:
- cb_count = self.size / self.BufferSize / (num_cb-2)
- elif num_cb < 0:
- cb_count = -1
- else:
- cb_count = 0
- i = total_bytes = 0
- cb(total_bytes, self.size)
+ if headers is None:
+ headers = {}
save_debug = self.bucket.connection.debug
if self.bucket.connection.debug == 1:
self.bucket.connection.debug = 0
@@ -981,6 +1173,9 @@
query_args = []
if torrent:
query_args.append('torrent')
+ m = None
+ else:
+ m = md5()
# If a version_id is passed in, use that. If not, check to see
# if the Key object has an explicit version_id and, if so, use that.
# Otherwise, don't pass a version_id query param.
@@ -990,20 +1185,47 @@
query_args.append('versionId=%s' % version_id)
if response_headers:
for key in response_headers:
- query_args.append('%s=%s' % (key, response_headers[key]))
+ query_args.append('%s=%s' % (key, urllib.quote(response_headers[key])))
query_args = '&'.join(query_args)
self.open('r', headers, query_args=query_args,
override_num_retries=override_num_retries)
+
+ data_len = 0
+ if cb:
+ if self.size is None:
+ cb_size = 0
+ else:
+ cb_size = self.size
+ if self.size is None and num_cb != -1:
+ # If size is not available due to chunked transfer for example,
+ # we'll call the cb for every 1MB of data transferred.
+ cb_count = (1024 * 1024) / self.BufferSize
+ elif num_cb > 1:
+ cb_count = int(math.ceil(cb_size/self.BufferSize/(num_cb-1.0)))
+ elif num_cb < 0:
+ cb_count = -1
+ else:
+ cb_count = 0
+ i = 0
+ cb(data_len, cb_size)
for bytes in self:
fp.write(bytes)
+ data_len += len(bytes)
+ if m:
+ m.update(bytes)
if cb:
- total_bytes += len(bytes)
+ if cb_size > 0 and data_len >= cb_size:
+ break
i += 1
if i == cb_count or cb_count == -1:
- cb(total_bytes, self.size)
+ cb(data_len, cb_size)
i = 0
- if cb:
- cb(total_bytes, self.size)
+ if cb and (cb_count <= 1 or i > 0) and data_len > 0:
+ cb(data_len, cb_size)
+ if m:
+ self.md5 = m.hexdigest()
+ if self.size is None and not torrent and "Range" not in headers:
+ self.size = data_len
self.close()
self.bucket.connection.debug = save_debug
@@ -1019,18 +1241,17 @@
:type cb: function
:param cb: a callback function that will be called to report
- progress on the upload. The callback should accept
- two integer parameters, the first representing the
- number of bytes that have been successfully
- transmitted to S3 and the second representing the
- size of the to be transmitted object.
+ progress on the upload. The callback should accept two
+ integer parameters, the first representing the number of
+ bytes that have been successfully transmitted to S3 and
+ the second representing the size of the to be transmitted
+ object.
:type cb: int
- :param num_cb: (optional) If a callback is specified with
- the cb parameter this parameter determines the
- granularity of the callback by defining
- the maximum number of times the callback will
- be called during the file transfer.
+ :param num_cb: (optional) If a callback is specified with the
+ cb parameter this parameter determines the granularity of
+ the callback by defining the maximum number of times the
+ callback will be called during the file transfer.
"""
return self.get_file(fp, headers, cb, num_cb, torrent=True)
@@ -1051,36 +1272,35 @@
:type headers: dict
:param headers: additional HTTP headers that will be sent with
- the GET request.
+ the GET request.
:type cb: function
:param cb: a callback function that will be called to report
- progress on the upload. The callback should accept
- two integer parameters, the first representing the
- number of bytes that have been successfully
- transmitted to S3 and the second representing the
- size of the to be transmitted object.
+ progress on the upload. The callback should accept two
+ integer parameters, the first representing the number of
+ bytes that have been successfully transmitted to S3 and
+ the second representing the size of the to be transmitted
+ object.
:type cb: int
- :param num_cb: (optional) If a callback is specified with
- the cb parameter this parameter determines the
- granularity of the callback by defining
- the maximum number of times the callback will
- be called during the file transfer.
+ :param num_cb: (optional) If a callback is specified with the
+ cb parameter this parameter determines the granularity of
+ the callback by defining the maximum number of times the
+ callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent
- file as a string.
+ file as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
- perform the download.
+ perform the download.
:type response_headers: dict
- :param response_headers: A dictionary containing HTTP headers/values
- that will override any headers associated with
- the stored object in the response.
- See http://goo.gl/EWOPb for details.
+ :param response_headers: A dictionary containing HTTP
+ headers/values that will override any headers associated
+ with the stored object in the response. See
+ http://goo.gl/EWOPb for details.
"""
if self.bucket != None:
if res_download_handler:
@@ -1112,32 +1332,31 @@
:type cb: function
:param cb: a callback function that will be called to report
- progress on the upload. The callback should accept
- two integer parameters, the first representing the
- number of bytes that have been successfully
- transmitted to S3 and the second representing the
- size of the to be transmitted object.
+ progress on the upload. The callback should accept two
+ integer parameters, the first representing the number of
+ bytes that have been successfully transmitted to S3 and
+ the second representing the size of the to be transmitted
+ object.
:type cb: int
- :param num_cb: (optional) If a callback is specified with
- the cb parameter this parameter determines the
- granularity of the callback by defining
- the maximum number of times the callback will
- be called during the file transfer.
+ :param num_cb: (optional) If a callback is specified with the
+ cb parameter this parameter determines the granularity of
+ the callback by defining the maximum number of times the
+ callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent file
- as a string.
+ as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
- perform the download.
+ perform the download.
:type response_headers: dict
- :param response_headers: A dictionary containing HTTP headers/values
- that will override any headers associated with
- the stored object in the response.
- See http://goo.gl/EWOPb for details.
+ :param response_headers: A dictionary containing HTTP
+ headers/values that will override any headers associated
+ with the stored object in the response. See
+ http://goo.gl/EWOPb for details.
"""
fp = open(filename, 'wb')
self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent,
@@ -1151,7 +1370,8 @@
modified_tuple = rfc822.parsedate_tz(self.last_modified)
modified_stamp = int(rfc822.mktime_tz(modified_tuple))
os.utime(fp.name, (modified_stamp, modified_stamp))
- except Exception: pass
+ except Exception:
+ pass
def get_contents_as_string(self, headers=None,
cb=None, num_cb=10,
@@ -1169,28 +1389,27 @@
:type cb: function
:param cb: a callback function that will be called to report
- progress on the upload. The callback should accept
- two integer parameters, the first representing the
- number of bytes that have been successfully
- transmitted to S3 and the second representing the
- size of the to be transmitted object.
+ progress on the upload. The callback should accept two
+ integer parameters, the first representing the number of
+ bytes that have been successfully transmitted to S3 and
+ the second representing the size of the to be transmitted
+ object.
:type cb: int
- :param num_cb: (optional) If a callback is specified with
- the cb parameter this parameter determines the
- granularity of the callback by defining
- the maximum number of times the callback will
- be called during the file transfer.
+ :param num_cb: (optional) If a callback is specified with the
+ cb parameter this parameter determines the granularity of
+ the callback by defining the maximum number of times the
+ callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent file
- as a string.
+ as a string.
:type response_headers: dict
- :param response_headers: A dictionary containing HTTP headers/values
- that will override any headers associated with
- the stored object in the response.
- See http://goo.gl/EWOPb for details.
+ :param response_headers: A dictionary containing HTTP
+ headers/values that will override any headers associated
+ with the stored object in the response. See
+ http://goo.gl/EWOPb for details.
:rtype: string
:returns: The contents of the file as a string
@@ -1210,20 +1429,19 @@
:type permission: string
:param permission: The permission being granted. Should be one of:
- (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
+ (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type email_address: string
:param email_address: The email address associated with the AWS
- account your are granting the permission to.
+ account your are granting the permission to.
:type recursive: boolean
- :param recursive: A boolean value to controls whether the command
- will apply the grant to all keys within the bucket
- or not. The default value is False. By passing a
- True value, the call will iterate through all keys
- in the bucket and apply the same grant to each key.
- CAUTION: If you have a lot of keys, this could take
- a long time!
+ :param recursive: A boolean value to controls whether the
+ command will apply the grant to all keys within the bucket
+ or not. The default value is False. By passing a True
+ value, the call will iterate through all keys in the
+ bucket and apply the same grant to each key. CAUTION: If
+ you have a lot of keys, this could take a long time!
"""
policy = self.get_acl(headers=headers)
policy.acl.add_email_grant(permission, email_address)
@@ -1239,15 +1457,15 @@
:type permission: string
:param permission: The permission being granted. Should be one of:
- (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
+ (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type user_id: string
- :param user_id: The canonical user id associated with the AWS
- account your are granting the permission to.
+ :param user_id: The canonical user id associated with the AWS
+ account your are granting the permission to.
:type display_name: string
:param display_name: An option string containing the user's
- Display Name. Only required on Walrus.
+ Display Name. Only required on Walrus.
"""
policy = self.get_acl()
policy.acl.add_user_grant(permission, user_id,
diff --git a/boto/s3/lifecycle.py b/boto/s3/lifecycle.py
new file mode 100644
index 0000000..fa5c5cf
--- /dev/null
+++ b/boto/s3/lifecycle.py
@@ -0,0 +1,126 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+
+class Rule(object):
+ """
+ A Lifcycle rule for an S3 bucket.
+
+ :ivar id: Unique identifier for the rule. The value cannot be longer
+ than 255 characters.
+
+ :ivar prefix: Prefix identifying one or more objects to which the
+ rule applies.
+
+ :ivar status: If Enabled, the rule is currently being applied.
+ If Disabled, the rule is not currently being applied.
+
+ :ivar expiration: Indicates the lifetime, in days, of the objects
+ that are subject to the rule. The value must be a non-zero
+ positive integer.
+ """
+ def __init__(self, id=None, prefix=None, status=None, expiration=None):
+ self.id = id
+ self.prefix = prefix
+ self.status = status
+ self.expiration = expiration
+
+ def __repr__(self):
+ return '<CORSRule: %s>' % self.id
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'ID':
+ self.id = value
+ elif name == 'Prefix':
+ self.prefix = value
+ elif name == 'Status':
+ self.status = value
+ elif name == 'Days':
+ self.expiration = int(value)
+ else:
+ setattr(self, name, value)
+
+ def to_xml(self):
+ s = '<Rule>'
+ s += '<ID>%s</ID>' % self.id
+ s += '<Prefix>%s</Prefix>' % self.prefix
+ s += '<Status>%s</Status>' % self.status
+ s += '<Expiration><Days>%d</Days></Expiration>' % self.expiration
+ s += '</Rule>'
+ return s
+
+
+class Lifecycle(list):
+ """
+ A container for the rules associated with a Lifecycle configuration.
+ """
+
+ def startElement(self, name, attrs, connection):
+ if name == 'Rule':
+ rule = Rule()
+ self.append(rule)
+ return rule
+ return None
+
+ def endElement(self, name, value, connection):
+ setattr(self, name, value)
+
+ def to_xml(self):
+ """
+ Returns a string containing the XML version of the Lifecycle
+ configuration as defined by S3.
+ """
+ s = '<LifecycleConfiguration>'
+ for rule in self:
+ s += rule.to_xml()
+ s += '</LifecycleConfiguration>'
+ return s
+
+ def add_rule(self, id, prefix, status, expiration):
+ """
+ Add a rule to this Lifecycle configuration. This only adds
+ the rule to the local copy. To install the new rule(s) on
+ the bucket, you need to pass this Lifecycle config object
+ to the configure_lifecycle method of the Bucket object.
+
+ :type id: str
+ :param id: Unique identifier for the rule. The value cannot be longer
+ than 255 characters.
+
+ :type prefix: str
+ :iparam prefix: Prefix identifying one or more objects to which the
+ rule applies.
+
+ :type status: str
+ :param status: If 'Enabled', the rule is currently being applied.
+ If 'Disabled', the rule is not currently being applied.
+
+ :type expiration: int
+ :param expiration: Indicates the lifetime, in days, of the objects
+ that are subject to the rule. The value must be a non-zero
+ positive integer.
+ """
+ rule = Rule(id, prefix, status, expiration)
+ self.append(rule)
diff --git a/boto/s3/multidelete.py b/boto/s3/multidelete.py
new file mode 100644
index 0000000..3e2d48e
--- /dev/null
+++ b/boto/s3/multidelete.py
@@ -0,0 +1,138 @@
+# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from boto import handler
+import xml.sax
+
+class Deleted(object):
+ """
+ A successfully deleted object in a multi-object delete request.
+
+ :ivar key: Key name of the object that was deleted.
+
+ :ivar version_id: Version id of the object that was deleted.
+
+ :ivar delete_marker: If True, indicates the object deleted
+ was a DeleteMarker.
+
+ :ivar delete_marker_version_id: Version ID of the delete marker
+ deleted.
+ """
+ def __init__(self, key=None, version_id=None,
+ delete_marker=False, delete_marker_version_id=None):
+ self.key = key
+ self.version_id = version_id
+ self.delete_marker = delete_marker
+ self.delete_marker_version_id = delete_marker_version_id
+
+ def __repr__(self):
+ if self.version_id:
+ return '<Deleted: %s.%s>' % (self.key, self.version_id)
+ else:
+ return '<Deleted: %s>' % self.key
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'Key':
+ self.key = value
+ elif name == 'VersionId':
+ self.version_id = value
+ elif name == 'DeleteMarker':
+ if value.lower() == 'true':
+ self.delete_marker = True
+ elif name == 'DeleteMarkerVersionId':
+ self.delete_marker_version_id = value
+ else:
+ setattr(self, name, value)
+
+class Error(object):
+ """
+ An unsuccessful deleted object in a multi-object delete request.
+
+ :ivar key: Key name of the object that was not deleted.
+
+ :ivar version_id: Version id of the object that was not deleted.
+
+ :ivar code: Status code of the failed delete operation.
+
+ :ivar message: Status message of the failed delete operation.
+ """
+ def __init__(self, key=None, version_id=None,
+ code=None, message=None):
+ self.key = key
+ self.version_id = version_id
+ self.code = code
+ self.message = message
+
+ def __repr__(self):
+ if self.version_id:
+ return '<Error: %s.%s(%s)>' % (self.key, self.version_id,
+ self.code)
+ else:
+ return '<Error: %s(%s)>' % (self.key, self.code)
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'Key':
+ self.key = value
+ elif name == 'VersionId':
+ self.version_id = value
+ elif name == 'Code':
+ self.code = value
+ elif name == 'Message':
+ self.message = value
+ else:
+ setattr(self, name, value)
+
+class MultiDeleteResult(object):
+ """
+ The status returned from a MultiObject Delete request.
+
+ :ivar deleted: A list of successfully deleted objects. Note that if
+ the quiet flag was specified in the request, this list will
+ be empty because only error responses would be returned.
+
+ :ivar errors: A list of unsuccessfully deleted objects.
+ """
+
+ def __init__(self, bucket=None):
+ self.bucket = None
+ self.deleted = []
+ self.errors = []
+
+ def startElement(self, name, attrs, connection):
+ if name == 'Deleted':
+ d = Deleted()
+ self.deleted.append(d)
+ return d
+ elif name == 'Error':
+ e = Error()
+ self.errors.append(e)
+ return e
+ return None
+
+ def endElement(self, name, value, connection):
+ setattr(self, name, value)
+
diff --git a/boto/s3/multipart.py b/boto/s3/multipart.py
index 8befc5e..9b62430 100644
--- a/boto/s3/multipart.py
+++ b/boto/s3/multipart.py
@@ -35,14 +35,18 @@
is contained
* key_name - The name of the new, completed key
* etag - The MD5 hash of the completed, combined upload
+ * version_id - The version_id of the completed upload
+ * encrypted - The value of the encryption header
"""
def __init__(self, bucket=None):
- self.bucket = None
+ self.bucket = bucket
self.location = None
self.bucket_name = None
self.key_name = None
self.etag = None
+ self.version_id = None
+ self.encrypted = None
def __repr__(self):
return '<CompleteMultiPartUpload: %s.%s>' % (self.bucket_name,
@@ -142,7 +146,6 @@
return part_lister(self)
def to_xml(self):
- self.get_all_parts()
s = '<CompleteMultipartUpload>\n'
for part in self:
s += ' <Part>\n'
@@ -185,6 +188,8 @@
self.is_truncated = True
else:
self.is_truncated = False
+ elif name == 'Initiated':
+ self.initiated = value
else:
setattr(self, name, value)
@@ -199,7 +204,7 @@
self._parts = []
query_args = 'uploadId=%s' % self.id
if max_parts:
- query_args += '&max_parts=%d' % max_parts
+ query_args += '&max-parts=%d' % max_parts
if part_number_marker:
query_args += '&part-number-marker=%s' % part_number_marker
response = self.bucket.connection.make_request('GET', self.bucket.name,
@@ -212,7 +217,8 @@
return self._parts
def upload_part_from_file(self, fp, part_num, headers=None, replace=True,
- cb=None, num_cb=10, policy=None, md5=None):
+ cb=None, num_cb=10, policy=None, md5=None,
+ size=None):
"""
Upload another part of this MultiPart Upload.
@@ -231,7 +237,41 @@
key = self.bucket.new_key(self.key_name)
key.set_contents_from_file(fp, headers, replace, cb, num_cb, policy,
md5, reduced_redundancy=False,
- query_args=query_args)
+ query_args=query_args, size=size)
+
+ def copy_part_from_key(self, src_bucket_name, src_key_name, part_num,
+ start=None, end=None):
+ """
+ Copy another part of this MultiPart Upload.
+
+ :type src_bucket_name: string
+ :param src_bucket_name: Name of the bucket containing the source key
+
+ :type src_key_name: string
+ :param src_key_name: Name of the source key
+
+ :type part_num: int
+ :param part_num: The number of this part.
+
+ :type start: int
+ :param start: Zero-based byte offset to start copying from
+
+ :type end: int
+ :param end: Zero-based byte offset to copy to
+ """
+ if part_num < 1:
+ raise ValueError('Part numbers must be greater than zero')
+ query_args = 'uploadId=%s&partNumber=%d' % (self.id, part_num)
+ if start is not None and end is not None:
+ rng = 'bytes=%s-%s' % (start, end)
+ provider = self.bucket.connection.provider
+ headers = {provider.copy_source_range_header: rng}
+ else:
+ headers = None
+ return self.bucket.copy_key(self.key_name, src_bucket_name,
+ src_key_name, storage_class=None,
+ headers=headers,
+ query_args=query_args)
def complete_upload(self):
"""
diff --git a/boto/s3/resumable_download_handler.py b/boto/s3/resumable_download_handler.py
index d7d2aa0..ffa2095 100644
--- a/boto/s3/resumable_download_handler.py
+++ b/boto/s3/resumable_download_handler.py
@@ -212,27 +212,6 @@
override_num_retries=0)
fp.flush()
- def _check_final_md5(self, key, file_name):
- """
- Checks that etag from server agrees with md5 computed after the
- download completes. This is important, since the download could
- have spanned a number of hours and multiple processes (e.g.,
- gsutil runs), and the user could change some of the file and not
- realize they have inconsistent data.
- """
- fp = open(file_name, 'rb')
- if key.bucket.connection.debug >= 1:
- print 'Checking md5 against etag.'
- hex_md5 = key.compute_md5(fp)[0]
- if hex_md5 != key.etag.strip('"\''):
- file_name = fp.name
- fp.close()
- os.unlink(file_name)
- raise ResumableDownloadException(
- 'File changed during download: md5 signature doesn\'t match '
- 'etag (incorrect downloaded file deleted)',
- ResumableTransferDisposition.ABORT)
-
def get_file(self, key, fp, headers, cb=None, num_cb=10, torrent=False,
version_id=None):
"""
@@ -287,7 +266,10 @@
torrent, version_id)
# Download succceded, so remove the tracker file (if have one).
self._remove_tracker_file()
- self._check_final_md5(key, fp.name)
+ # Previously, check_final_md5() was called here to validate
+ # downloaded file's checksum, however, to be consistent with
+ # non-resumable downloads, this call was removed. Checksum
+ # validation of file contents should be done by the caller.
if debug >= 1:
print 'Resumable download complete.'
return
diff --git a/boto/s3/tagging.py b/boto/s3/tagging.py
new file mode 100644
index 0000000..3f0ce8b
--- /dev/null
+++ b/boto/s3/tagging.py
@@ -0,0 +1,68 @@
+from boto import handler
+import xml.sax
+
+
+class Tag(object):
+ def __init__(self, key=None, value=None):
+ self.key = key
+ self.value = value
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'Key':
+ self.key = value
+ elif name == 'Value':
+ self.value = value
+
+ def to_xml(self):
+ return '<Tag><Key>%s</Key><Value>%s</Value></Tag>' % (
+ self.key, self.value)
+
+
+class TagSet(list):
+ def startElement(self, name, attrs, connection):
+ if name == 'Tag':
+ tag = Tag()
+ self.append(tag)
+ return tag
+ return None
+
+ def endElement(self, name, value, connection):
+ setattr(self, name, value)
+
+ def add_tag(self, key, value):
+ tag = Tag(key, value)
+ self.append(tag)
+
+ def to_xml(self):
+ xml = '<TagSet>'
+ for tag in self:
+ xml += tag.to_xml()
+ xml += '</TagSet>'
+ return xml
+
+
+class Tags(list):
+ """A container for the tags associated with a bucket."""
+
+ def startElement(self, name, attrs, connection):
+ if name == 'TagSet':
+ tag_set = TagSet()
+ self.append(tag_set)
+ return tag_set
+ return None
+
+ def endElement(self, name, value, connection):
+ setattr(self, name, value)
+
+ def to_xml(self):
+ xml = '<Tagging>'
+ for tag_set in self:
+ xml += tag_set.to_xml()
+ xml +='</Tagging>'
+ return xml
+
+ def add_tag_set(self, tag_set):
+ self.append(tag_set)
diff --git a/boto/sdb/__init__.py b/boto/sdb/__init__.py
index 9c4fc0a..7f29b69 100644
--- a/boto/sdb/__init__.py
+++ b/boto/sdb/__init__.py
@@ -14,13 +14,14 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
-from regioninfo import SDBRegionInfo
+from .regioninfo import SDBRegionInfo
+
def regions():
"""
@@ -35,6 +36,8 @@
endpoint='sdb.eu-west-1.amazonaws.com'),
SDBRegionInfo(name='us-west-1',
endpoint='sdb.us-west-1.amazonaws.com'),
+ SDBRegionInfo(name='sa-east-1',
+ endpoint='sdb.sa-east-1.amazonaws.com'),
SDBRegionInfo(name='us-west-2',
endpoint='sdb.us-west-2.amazonaws.com'),
SDBRegionInfo(name='ap-northeast-1',
@@ -43,14 +46,15 @@
endpoint='sdb.ap-southeast-1.amazonaws.com')
]
+
def connect_to_region(region_name, **kw_params):
"""
- Given a valid region name, return a
+ Given a valid region name, return a
:class:`boto.sdb.connection.SDBConnection`.
:type: str
:param region_name: The name of the region to connect to.
-
+
:rtype: :class:`boto.sdb.connection.SDBConnection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
@@ -59,20 +63,3 @@
if region.name == region_name:
return region.connect(**kw_params)
return None
-
-def get_region(region_name, **kw_params):
- """
- Find and return a :class:`boto.sdb.regioninfo.RegionInfo` object
- given a region name.
-
- :type: str
- :param: The name of the region.
-
- :rtype: :class:`boto.sdb.regioninfo.RegionInfo`
- :return: The RegionInfo object for the given region or None if
- an invalid region name is provided.
- """
- for region in regions(**kw_params):
- if region.name == region_name:
- return region
- return None
diff --git a/boto/sdb/connection.py b/boto/sdb/connection.py
index f043193..dc5e01d 100644
--- a/boto/sdb/connection.py
+++ b/boto/sdb/connection.py
@@ -14,7 +14,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -31,17 +31,17 @@
class ItemThread(threading.Thread):
"""
- A threaded :class:`Item <boto.sdb.item.Item>` retriever utility class.
+ A threaded :class:`Item <boto.sdb.item.Item>` retriever utility class.
Retrieved :class:`Item <boto.sdb.item.Item>` objects are stored in the
``items`` instance variable after :py:meth:`run() <run>` is called.
-
+
.. tip:: The item retrieval will not start until
the :func:`run() <boto.sdb.connection.ItemThread.run>` method is called.
"""
def __init__(self, name, domain_name, item_names):
"""
:param str name: A thread name. Used for identification.
- :param str domain_name: The name of a SimpleDB
+ :param str domain_name: The name of a SimpleDB
:class:`Domain <boto.sdb.domain.Domain>`
:type item_names: string or list of strings
:param item_names: The name(s) of the items to retrieve from the specified
@@ -57,7 +57,7 @@
def run(self):
"""
- Start the threaded retrieval of items. Populates the
+ Start the threaded retrieval of items. Populates the
``items`` list with :class:`Item <boto.sdb.item.Item>` objects.
"""
for item_name in self.item_names:
@@ -69,16 +69,16 @@
class SDBConnection(AWSQueryConnection):
"""
This class serves as a gateway to your SimpleDB region (defaults to
- us-east-1). Methods within allow access to SimpleDB
+ us-east-1). Methods within allow access to SimpleDB
:class:`Domain <boto.sdb.domain.Domain>` objects and their associated
:class:`Item <boto.sdb.item.Item>` objects.
-
+
.. tip::
While you may instantiate this class directly, it may be easier to
go through :py:func:`boto.connect_sdb`.
"""
DefaultRegionName = 'us-east-1'
- DefaultRegionEndpoint = 'sdb.amazonaws.com'
+ DefaultRegionEndpoint = 'sdb.us-east-1.amazonaws.com'
APIVersion = '2009-04-15'
ResponseError = SDBResponseError
@@ -86,13 +86,13 @@
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
- converter=None, security_token=None):
+ converter=None, security_token=None, validate_certs=True):
"""
For any keywords that aren't documented, refer to the parent class,
:py:class:`boto.connection.AWSAuthConnection`. You can avoid having
to worry about these keyword arguments by instantiating these objects
via :py:func:`boto.connect_sdb`.
-
+
:type region: :class:`boto.sdb.regioninfo.SDBRegionInfo`
:keyword region: Explicitly specify a region. Defaults to ``us-east-1``
if not specified. You may also specify the region in your ``boto.cfg``:
@@ -117,7 +117,8 @@
proxy_port, proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path,
- security_token=security_token)
+ security_token=security_token,
+ validate_certs=validate_certs)
self.box_usage = 0.0
self.converter = converter
self.item_cls = Item
@@ -130,7 +131,7 @@
While the default item class is :py:class:`boto.sdb.item.Item`, this
default may be overridden. Use this method to change a connection's
item class.
-
+
:param object cls: The new class to set as this connection's item
class. See the default item class for inspiration as to what your
replacement should/could look like.
@@ -139,8 +140,7 @@
def _build_name_value_list(self, params, attributes, replace=False,
label='Attribute'):
- keys = attributes.keys()
- keys.sort()
+ keys = sorted(attributes.keys())
i = 1
for key in keys:
value = attributes[key]
@@ -210,7 +210,7 @@
def get_usage(self):
"""
- Returns the BoxUsage (in USD) accumulated on this specific SDBConnection
+ Returns the BoxUsage (in USD) accumulated on this specific SDBConnection
instance.
.. tip:: This can be out of date, and should only be treated as a
@@ -227,7 +227,7 @@
"""
Print the BoxUsage and approximate costs of all requests made on
this specific SDBConnection instance.
-
+
.. tip:: This can be out of date, and should only be treated as a
rough estimate. Also note that this estimate only applies to the
requests made on this specific connection instance. It is by
@@ -241,14 +241,14 @@
"""
Retrieves a :py:class:`boto.sdb.domain.Domain` object whose name
matches ``domain_name``.
-
+
:param str domain_name: The name of the domain to retrieve
:keyword bool validate: When ``True``, check to see if the domain
actually exists. If ``False``, blindly return a
- :py:class:`Domain <boto.sdb.domain.Domain>` object with the
+ :py:class:`Domain <boto.sdb.domain.Domain>` object with the
specified name set.
- :raises:
+ :raises:
:py:class:`boto.exception.SDBResponseError` if ``validate`` is
``True`` and no match could be found.
@@ -262,12 +262,12 @@
def lookup(self, domain_name, validate=True):
"""
- Lookup an existing SimpleDB domain. This differs from
+ Lookup an existing SimpleDB domain. This differs from
:py:meth:`get_domain` in that ``None`` is returned if ``validate`` is
``True`` and no match was found (instead of raising an exception).
:param str domain_name: The name of the domain to retrieve
-
+
:param bool validate: If ``True``, a ``None`` value will be returned
if the specified domain can't be found. If ``False``, a
:py:class:`Domain <boto.sdb.domain.Domain>` object will be dumbly
@@ -287,8 +287,8 @@
Returns a :py:class:`boto.resultset.ResultSet` containing
all :py:class:`boto.sdb.domain.Domain` objects associated with
this connection's Access Key ID.
-
- :keyword int max_domains: Limit the returned
+
+ :keyword int max_domains: Limit the returned
:py:class:`ResultSet <boto.resultset.ResultSet>` to the specified
number of members.
:keyword str next_token: A token string that was returned in an
@@ -325,18 +325,18 @@
"""
Given a ``str`` or :class:`boto.sdb.domain.Domain`, return a
``tuple`` with the following members (in order):
-
+
* In instance of :class:`boto.sdb.domain.Domain` for the requested
domain
* The domain's name as a ``str``
-
+
:type domain_or_name: ``str`` or :class:`boto.sdb.domain.Domain`
:param domain_or_name: The domain or domain name to get the domain
and name for.
-
+
:raises: :class:`boto.exception.SDBResponseError` when an invalid
domain name is specified.
-
+
:rtype: tuple
:return: A ``tuple`` with contents outlined as per above.
"""
@@ -356,7 +356,7 @@
:rtype: bool
:return: True if successful
-
+
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'DomainName':domain_name}
@@ -382,39 +382,39 @@
replace=True, expected_value=None):
"""
Store attributes for a given item in a domain.
-
+
:type domain_or_name: string or :class:`boto.sdb.domain.Domain` object.
:param domain_or_name: Either the name of a domain or a Domain object
-
+
:type item_name: string
:param item_name: The name of the item whose attributes are being
stored.
-
+
:type attribute_names: dict or dict-like object
:param attribute_names: The name/value pairs to store as attributes
-
+
:type expected_value: list
:param expected_value: If supplied, this is a list or tuple consisting
- of a single attribute name and expected value. The list can be
+ of a single attribute name and expected value. The list can be
of the form:
-
+
* ['name', 'value']
-
- In which case the call will first verify that the attribute "name"
+
+ In which case the call will first verify that the attribute "name"
of this item has a value of "value". If it does, the delete
- will proceed, otherwise a ConditionalCheckFailed error will be
+ will proceed, otherwise a ConditionalCheckFailed error will be
returned. The list can also be of the form:
-
+
* ['name', True|False]
-
- which will simply check for the existence (True) or
+
+ which will simply check for the existence (True) or
non-existence (False) of the attribute.
-
+
:type replace: bool
:param replace: Whether the attribute values passed in will replace
existing values or will be added as addition values.
Defaults to True.
-
+
:rtype: bool
:return: True if successful
"""
@@ -429,7 +429,7 @@
def batch_put_attributes(self, domain_or_name, items, replace=True):
"""
Store attributes for multiple items in a domain.
-
+
:type domain_or_name: string or :class:`boto.sdb.domain.Domain` object.
:param domain_or_name: Either the name of a domain or a Domain object
@@ -439,12 +439,12 @@
of attribute names/values, exactly the same as the
attribute_names parameter of the scalar put_attributes
call.
-
+
:type replace: bool
:param replace: Whether the attribute values passed in will replace
existing values or will be added as addition values.
Defaults to True.
-
+
:rtype: bool
:return: True if successful
"""
@@ -462,18 +462,18 @@
:param domain_or_name: Either the name of a domain or a Domain object
:type item_name: string
- :param item_name: The name of the item whose attributes are
+ :param item_name: The name of the item whose attributes are
being retrieved.
:type attribute_names: string or list of strings
:param attribute_names: An attribute name or list of attribute names.
- This parameter is optional. If not supplied, all attributes will
+ This parameter is optional. If not supplied, all attributes will
be retrieved for the item.
:type consistent_read: bool
:param consistent_read: When set to true, ensures that the most recent
data is returned.
-
+
:type item: :class:`boto.sdb.item.Item`
:keyword item: Instead of instantiating a new Item object, you may
specify one to update.
@@ -521,22 +521,22 @@
delete as the value. If no value is supplied,
all attribute name/values for the item will be
deleted.
-
+
:type expected_value: list
:param expected_value: If supplied, this is a list or tuple consisting
- of a single attribute name and expected value. The list can be
+ of a single attribute name and expected value. The list can be
of the form:
* ['name', 'value']
- In which case the call will first verify that the attribute "name"
+ In which case the call will first verify that the attribute "name"
of this item has a value of "value". If it does, the delete
- will proceed, otherwise a ConditionalCheckFailed error will be
+ will proceed, otherwise a ConditionalCheckFailed error will be
returned. The list can also be of the form:
* ['name', True|False]
- which will simply check for the existence (True) or
+ which will simply check for the existence (True) or
non-existence (False) of the attribute.
:rtype: bool
@@ -557,21 +557,21 @@
def batch_delete_attributes(self, domain_or_name, items):
"""
Delete multiple items in a domain.
-
+
:type domain_or_name: string or :class:`boto.sdb.domain.Domain` object.
:param domain_or_name: Either the name of a domain or a Domain object
:type items: dict or dict-like object
:param items: A dictionary-like object. The keys of the dictionary are
the item names and the values are either:
-
+
* dictionaries of attribute names/values, exactly the
same as the attribute_names parameter of the scalar
put_attributes call. The attribute name/value pairs
will only be deleted if they match the name/value
pairs passed in.
* None which means that all attributes associated
- with the item should be deleted.
+ with the item should be deleted.
:return: True if successful
"""
@@ -589,7 +589,7 @@
Even though the select request does not require a domain object,
a domain object must be passed into this method so the Item objects
returned can point to the appropriate domain.
-
+
:type domain_or_name: string or :class:`boto.sdb.domain.Domain` object
:param domain_or_name: Either the name of a domain or a Domain object
diff --git a/boto/sdb/db/__init__.py b/boto/sdb/db/__init__.py
index 86044ed..71f6b7b 100644
--- a/boto/sdb/db/__init__.py
+++ b/boto/sdb/db/__init__.py
@@ -14,8 +14,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
-
diff --git a/boto/sdb/db/key.py b/boto/sdb/db/key.py
index 42a9d8d..f630d39 100644
--- a/boto/sdb/db/key.py
+++ b/boto/sdb/db/key.py
@@ -23,7 +23,7 @@
@classmethod
def from_path(cls, *args, **kwds):
- raise NotImplementedError, "Paths are not currently supported"
+ raise NotImplementedError("Paths are not currently supported")
def __init__(self, encoded=None, obj=None):
self.name = None
@@ -35,7 +35,7 @@
self.kind = None
def app(self):
- raise NotImplementedError, "Applications are not currently supported"
+ raise NotImplementedError("Applications are not currently supported")
def kind(self):
return self.kind
@@ -44,7 +44,7 @@
return self.id
def name(self):
- raise NotImplementedError, "Key Names are not currently supported"
+ raise NotImplementedError("Key Names are not currently supported")
def id_or_name(self):
return self.id
@@ -53,7 +53,7 @@
return self.id != None
def parent(self):
- raise NotImplementedError, "Key parents are not currently supported"
+ raise NotImplementedError("Key parents are not currently supported")
def __str__(self):
return self.id_or_name()
diff --git a/boto/sdb/db/manager/__init__.py b/boto/sdb/db/manager/__init__.py
index 55b32a4..69fc16f 100644
--- a/boto/sdb/db/manager/__init__.py
+++ b/boto/sdb/db/manager/__init__.py
@@ -14,17 +14,18 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import boto
+
def get_manager(cls):
"""
- Returns the appropriate Manager class for a given Model class. It does this by
- looking in the boto config for a section like this::
-
+ Returns the appropriate Manager class for a given Model class. It
+ does this by looking in the boto config for a section like this::
+
[DB]
db_type = SimpleDB
db_user = <aws access key id>
@@ -36,10 +37,11 @@
db_passwd = <another aws secret access key>
db_name = basic_domain
db_port = 1111
-
- The values in the DB section are "generic values" that will be used if nothing more
- specific is found. You can also create a section for a specific Model class that
- gives the db info for that class. In the example above, TestBasic is a Model subclass.
+
+ The values in the DB section are "generic values" that will be used
+ if nothing more specific is found. You can also create a section for
+ a specific Model class that gives the db info for that class.
+ In the example above, TestBasic is a Model subclass.
"""
db_user = boto.config.get('DB', 'db_user', None)
db_passwd = boto.config.get('DB', 'db_passwd', None)
@@ -51,7 +53,7 @@
enable_ssl = boto.config.getbool('DB', 'enable_ssl', True)
sql_dir = boto.config.get('DB', 'sql_dir', None)
debug = boto.config.getint('DB', 'debug', 0)
- # first see if there is a fully qualified section name in the Boto config file
+ # first see if there is a fully qualified section name in the Boto config
module_name = cls.__module__.replace('.', '_')
db_section = 'DB_' + module_name + '_' + cls.__name__
if not boto.config.has_section(db_section):
@@ -75,17 +77,9 @@
from sdbmanager import SDBManager
return SDBManager(cls, db_name, db_user, db_passwd,
db_host, db_port, db_table, sql_dir, enable_ssl)
- elif db_type == 'PostgreSQL':
- from pgmanager import PGManager
- if db_table:
- return PGManager(cls, db_name, db_user, db_passwd,
- db_host, db_port, db_table, sql_dir, enable_ssl)
- else:
- return None
elif db_type == 'XML':
from xmlmanager import XMLManager
return XMLManager(cls, db_name, db_user, db_passwd,
db_host, db_port, db_table, sql_dir, enable_ssl)
else:
- raise ValueError, 'Unknown db_type: %s' % db_type
-
+ raise ValueError('Unknown db_type: %s' % db_type)
diff --git a/boto/sdb/db/manager/pgmanager.py b/boto/sdb/db/manager/pgmanager.py
deleted file mode 100644
index 31f27ca..0000000
--- a/boto/sdb/db/manager/pgmanager.py
+++ /dev/null
@@ -1,389 +0,0 @@
-# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
-#
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the
-# "Software"), to deal in the Software without restriction, including
-# without limitation the rights to use, copy, modify, merge, publish, dis-
-# tribute, sublicense, and/or sell copies of the Software, and to permit
-# persons to whom the Software is furnished to do so, subject to the fol-
-# lowing conditions:
-#
-# The above copyright notice and this permission notice shall be included
-# in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
-# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-# IN THE SOFTWARE.
-from boto.sdb.db.key import Key
-from boto.sdb.db.model import Model
-import psycopg2
-import psycopg2.extensions
-import uuid
-import os
-import string
-from boto.exception import SDBPersistenceError
-
-psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
-
-class PGConverter:
-
- def __init__(self, manager):
- self.manager = manager
- self.type_map = {Key : (self.encode_reference, self.decode_reference),
- Model : (self.encode_reference, self.decode_reference)}
-
- def encode(self, type, value):
- if type in self.type_map:
- encode = self.type_map[type][0]
- return encode(value)
- return value
-
- def decode(self, type, value):
- if type in self.type_map:
- decode = self.type_map[type][1]
- return decode(value)
- return value
-
- def encode_prop(self, prop, value):
- if isinstance(value, list):
- if hasattr(prop, 'item_type'):
- s = "{"
- new_value = []
- for v in value:
- item_type = getattr(prop, 'item_type')
- if Model in item_type.mro():
- item_type = Model
- new_value.append('%s' % self.encode(item_type, v))
- s += ','.join(new_value)
- s += "}"
- return s
- else:
- return value
- return self.encode(prop.data_type, value)
-
- def decode_prop(self, prop, value):
- if prop.data_type == list:
- if value != None:
- if not isinstance(value, list):
- value = [value]
- if hasattr(prop, 'item_type'):
- item_type = getattr(prop, "item_type")
- if Model in item_type.mro():
- if item_type != self.manager.cls:
- return item_type._manager.decode_value(prop, value)
- else:
- item_type = Model
- return [self.decode(item_type, v) for v in value]
- return value
- elif hasattr(prop, 'reference_class'):
- ref_class = getattr(prop, 'reference_class')
- if ref_class != self.manager.cls:
- return ref_class._manager.decode_value(prop, value)
- else:
- return self.decode(prop.data_type, value)
- elif hasattr(prop, 'calculated_type'):
- calc_type = getattr(prop, 'calculated_type')
- return self.decode(calc_type, value)
- else:
- return self.decode(prop.data_type, value)
-
- def encode_reference(self, value):
- if isinstance(value, str) or isinstance(value, unicode):
- return value
- if value == None:
- return ''
- else:
- return value.id
-
- def decode_reference(self, value):
- if not value:
- return None
- try:
- return self.manager.get_object_from_id(value)
- except:
- raise ValueError, 'Unable to convert %s to Object' % value
-
-class PGManager(object):
-
- def __init__(self, cls, db_name, db_user, db_passwd,
- db_host, db_port, db_table, sql_dir, enable_ssl):
- self.cls = cls
- self.db_name = db_name
- self.db_user = db_user
- self.db_passwd = db_passwd
- self.db_host = db_host
- self.db_port = db_port
- self.db_table = db_table
- self.sql_dir = sql_dir
- self.in_transaction = False
- self.converter = PGConverter(self)
- self._connect()
-
- def _build_connect_string(self):
- cs = 'dbname=%s user=%s password=%s host=%s port=%d'
- return cs % (self.db_name, self.db_user, self.db_passwd,
- self.db_host, self.db_port)
-
- def _connect(self):
- self.connection = psycopg2.connect(self._build_connect_string())
- self.connection.set_client_encoding('UTF8')
- self.cursor = self.connection.cursor()
-
- def _object_lister(self, cursor):
- try:
- for row in cursor:
- yield self._object_from_row(row, cursor.description)
- except StopIteration:
- cursor.close()
- raise StopIteration
-
- def _dict_from_row(self, row, description):
- d = {}
- for i in range(0, len(row)):
- d[description[i][0]] = row[i]
- return d
-
- def _object_from_row(self, row, description=None):
- if not description:
- description = self.cursor.description
- d = self._dict_from_row(row, description)
- obj = self.cls(d['id'])
- obj._manager = self
- obj._auto_update = False
- for prop in obj.properties(hidden=False):
- if prop.data_type != Key:
- v = self.decode_value(prop, d[prop.name])
- v = prop.make_value_from_datastore(v)
- if hasattr(prop, 'calculated_type'):
- prop._set_direct(obj, v)
- elif not prop.empty(v):
- setattr(obj, prop.name, v)
- else:
- setattr(obj, prop.name, prop.default_value())
- return obj
-
- def _build_insert_qs(self, obj, calculated):
- fields = []
- values = []
- templs = []
- id_calculated = [p for p in calculated if p.name == 'id']
- for prop in obj.properties(hidden=False):
- if prop not in calculated:
- value = prop.get_value_for_datastore(obj)
- if value != prop.default_value() or prop.required:
- value = self.encode_value(prop, value)
- values.append(value)
- fields.append('"%s"' % prop.name)
- templs.append('%s')
- qs = 'INSERT INTO "%s" (' % self.db_table
- if len(id_calculated) == 0:
- qs += '"id",'
- qs += ','.join(fields)
- qs += ") VALUES ("
- if len(id_calculated) == 0:
- qs += "'%s'," % obj.id
- qs += ','.join(templs)
- qs += ')'
- if calculated:
- qs += ' RETURNING '
- calc_values = ['"%s"' % p.name for p in calculated]
- qs += ','.join(calc_values)
- qs += ';'
- return qs, values
-
- def _build_update_qs(self, obj, calculated):
- fields = []
- values = []
- for prop in obj.properties(hidden=False):
- if prop not in calculated:
- value = prop.get_value_for_datastore(obj)
- if value != prop.default_value() or prop.required:
- value = self.encode_value(prop, value)
- values.append(value)
- field = '"%s"=' % prop.name
- field += '%s'
- fields.append(field)
- qs = 'UPDATE "%s" SET ' % self.db_table
- qs += ','.join(fields)
- qs += """ WHERE "id" = '%s'""" % obj.id
- if calculated:
- qs += ' RETURNING '
- calc_values = ['"%s"' % p.name for p in calculated]
- qs += ','.join(calc_values)
- qs += ';'
- return qs, values
-
- def _get_sql(self, mapping=None):
- print '_get_sql'
- sql = None
- if self.sql_dir:
- path = os.path.join(self.sql_dir, self.cls.__name__ + '.sql')
- print path
- if os.path.isfile(path):
- fp = open(path)
- sql = fp.read()
- fp.close()
- t = string.Template(sql)
- sql = t.safe_substitute(mapping)
- return sql
-
- def start_transaction(self):
- print 'start_transaction'
- self.in_transaction = True
-
- def end_transaction(self):
- print 'end_transaction'
- self.in_transaction = False
- self.commit()
-
- def commit(self):
- if not self.in_transaction:
- print '!!commit on %s' % self.db_table
- try:
- self.connection.commit()
-
- except psycopg2.ProgrammingError, err:
- self.connection.rollback()
- raise err
-
- def rollback(self):
- print '!!rollback on %s' % self.db_table
- self.connection.rollback()
-
- def delete_table(self):
- self.cursor.execute('DROP TABLE "%s";' % self.db_table)
- self.commit()
-
- def create_table(self, mapping=None):
- self.cursor.execute(self._get_sql(mapping))
- self.commit()
-
- def encode_value(self, prop, value):
- return self.converter.encode_prop(prop, value)
-
- def decode_value(self, prop, value):
- return self.converter.decode_prop(prop, value)
-
- def execute_sql(self, query):
- self.cursor.execute(query, None)
- self.commit()
-
- def query_sql(self, query, vars=None):
- self.cursor.execute(query, vars)
- return self.cursor.fetchall()
-
- def lookup(self, cls, name, value):
- values = []
- qs = 'SELECT * FROM "%s" WHERE ' % self.db_table
- found = False
- for property in cls.properties(hidden=False):
- if property.name == name:
- found = True
- value = self.encode_value(property, value)
- values.append(value)
- qs += "%s=" % name
- qs += "%s"
- if not found:
- raise SDBPersistenceError('%s is not a valid field' % name)
- qs += ';'
- print qs
- self.cursor.execute(qs, values)
- if self.cursor.rowcount == 1:
- row = self.cursor.fetchone()
- return self._object_from_row(row, self.cursor.description)
- elif self.cursor.rowcount == 0:
- raise KeyError, 'Object not found'
- else:
- raise LookupError, 'Multiple Objects Found'
-
- def query(self, cls, filters, limit=None, order_by=None):
- parts = []
- qs = 'SELECT * FROM "%s"' % self.db_table
- if filters:
- qs += ' WHERE '
- properties = cls.properties(hidden=False)
- for filter, value in filters:
- name, op = filter.strip().split()
- found = False
- for property in properties:
- if property.name == name:
- found = True
- value = self.encode_value(property, value)
- parts.append(""""%s"%s'%s'""" % (name, op, value))
- if not found:
- raise SDBPersistenceError('%s is not a valid field' % name)
- qs += ','.join(parts)
- qs += ';'
- print qs
- cursor = self.connection.cursor()
- cursor.execute(qs)
- return self._object_lister(cursor)
-
- def get_property(self, prop, obj, name):
- qs = """SELECT "%s" FROM "%s" WHERE id='%s';""" % (name, self.db_table, obj.id)
- print qs
- self.cursor.execute(qs, None)
- if self.cursor.rowcount == 1:
- rs = self.cursor.fetchone()
- for prop in obj.properties(hidden=False):
- if prop.name == name:
- v = self.decode_value(prop, rs[0])
- return v
- raise AttributeError, '%s not found' % name
-
- def set_property(self, prop, obj, name, value):
- pass
- value = self.encode_value(prop, value)
- qs = 'UPDATE "%s" SET ' % self.db_table
- qs += "%s='%s'" % (name, self.encode_value(prop, value))
- qs += " WHERE id='%s'" % obj.id
- qs += ';'
- print qs
- self.cursor.execute(qs)
- self.commit()
-
- def get_object(self, cls, id):
- qs = """SELECT * FROM "%s" WHERE id='%s';""" % (self.db_table, id)
- self.cursor.execute(qs, None)
- if self.cursor.rowcount == 1:
- row = self.cursor.fetchone()
- return self._object_from_row(row, self.cursor.description)
- else:
- raise SDBPersistenceError('%s object with id=%s does not exist' % (cls.__name__, id))
-
- def get_object_from_id(self, id):
- return self.get_object(self.cls, id)
-
- def _find_calculated_props(self, obj):
- return [p for p in obj.properties() if hasattr(p, 'calculated_type')]
-
- def save_object(self, obj, expected_value=None):
- obj._auto_update = False
- calculated = self._find_calculated_props(obj)
- if not obj.id:
- obj.id = str(uuid.uuid4())
- qs, values = self._build_insert_qs(obj, calculated)
- else:
- qs, values = self._build_update_qs(obj, calculated)
- print qs
- self.cursor.execute(qs, values)
- if calculated:
- calc_values = self.cursor.fetchone()
- print calculated
- print calc_values
- for i in range(0, len(calculated)):
- prop = calculated[i]
- prop._set_direct(obj, calc_values[i])
- self.commit()
-
- def delete_object(self, obj):
- qs = """DELETE FROM "%s" WHERE id='%s';""" % (self.db_table, obj.id)
- print qs
- self.cursor.execute(qs)
- self.commit()
-
-
diff --git a/boto/sdb/db/manager/sdbmanager.py b/boto/sdb/db/manager/sdbmanager.py
index 8218f81..bce2e4e 100644
--- a/boto/sdb/db/manager/sdbmanager.py
+++ b/boto/sdb/db/manager/sdbmanager.py
@@ -15,7 +15,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -32,34 +32,39 @@
ISO8601 = '%Y-%m-%dT%H:%M:%SZ'
+
class TimeDecodeError(Exception):
pass
+
class SDBConverter(object):
"""
- Responsible for converting base Python types to format compatible with underlying
- database. For SimpleDB, that means everything needs to be converted to a string
- when stored in SimpleDB and from a string when retrieved.
+ Responsible for converting base Python types to format compatible
+ with underlying database. For SimpleDB, that means everything
+ needs to be converted to a string when stored in SimpleDB and from
+ a string when retrieved.
- To convert a value, pass it to the encode or decode method. The encode method
- will take a Python native value and convert to DB format. The decode method will
- take a DB format value and convert it to Python native format. To find the appropriate
- method to call, the generic encode/decode methods will look for the type-specific
- method by searching for a method called "encode_<type name>" or "decode_<type name>".
+ To convert a value, pass it to the encode or decode method. The
+ encode method will take a Python native value and convert to DB
+ format. The decode method will take a DB format value and convert
+ it to Python native format. To find the appropriate method to
+ call, the generic encode/decode methods will look for the
+ type-specific method by searching for a method
+ called"encode_<type name>" or "decode_<type name>".
"""
def __init__(self, manager):
self.manager = manager
- self.type_map = { bool : (self.encode_bool, self.decode_bool),
- int : (self.encode_int, self.decode_int),
- long : (self.encode_long, self.decode_long),
- float : (self.encode_float, self.decode_float),
- Model : (self.encode_reference, self.decode_reference),
- Key : (self.encode_reference, self.decode_reference),
- datetime : (self.encode_datetime, self.decode_datetime),
- date : (self.encode_date, self.decode_date),
- time : (self.encode_time, self.decode_time),
- Blob: (self.encode_blob, self.decode_blob),
- str: (self.encode_string, self.decode_string),
+ self.type_map = {bool: (self.encode_bool, self.decode_bool),
+ int: (self.encode_int, self.decode_int),
+ long: (self.encode_long, self.decode_long),
+ float: (self.encode_float, self.decode_float),
+ Model: (self.encode_reference, self.decode_reference),
+ Key: (self.encode_reference, self.decode_reference),
+ datetime: (self.encode_datetime, self.decode_datetime),
+ date: (self.encode_date, self.decode_date),
+ time: (self.encode_time, self.decode_time),
+ Blob: (self.encode_blob, self.decode_blob),
+ str: (self.encode_string, self.decode_string),
}
def encode(self, item_type, value):
@@ -92,7 +97,7 @@
# We support lists up to 1,000 attributes, since
# SDB technically only supports 1024 attributes anyway
values = {}
- for k,v in enumerate(value):
+ for k, v in enumerate(value):
values["%03d" % k] = v
return self.encode_map(prop, values)
@@ -101,7 +106,7 @@
if value == None:
return None
if not isinstance(value, dict):
- raise ValueError, 'Expected a dict value, got %s' % type(value)
+ raise ValueError('Expected a dict value, got %s' % type(value))
new_value = []
for key in value:
item_type = getattr(prop, "item_type")
@@ -128,7 +133,7 @@
dec_val = {}
for val in value:
if val != None:
- k,v = self.decode_map_element(item_type, val)
+ k, v = self.decode_map_element(item_type, val)
try:
k = int(k)
except:
@@ -143,7 +148,7 @@
ret_value = {}
item_type = getattr(prop, "item_type")
for val in value:
- k,v = self.decode_map_element(item_type, val)
+ k, v = self.decode_map_element(item_type, val)
ret_value[k] = v
return ret_value
@@ -152,7 +157,7 @@
import urllib
key = value
if ":" in value:
- key, value = value.split(':',1)
+ key, value = value.split(':', 1)
key = urllib.unquote(key)
if Model in item_type.mro():
value = item_type(id=value)
@@ -257,12 +262,26 @@
def encode_datetime(self, value):
if isinstance(value, str) or isinstance(value, unicode):
return value
- return value.strftime(ISO8601)
+ if isinstance(value, datetime):
+ return value.strftime(ISO8601)
+ else:
+ return value.isoformat()
def decode_datetime(self, value):
+ """Handles both Dates and DateTime objects"""
+ if value is None:
+ return value
try:
- return datetime.strptime(value, ISO8601)
- except:
+ if "T" in value:
+ if "." in value:
+ # Handle true "isoformat()" dates, which may have a microsecond on at the end of them
+ return datetime.strptime(value.split(".")[0], "%Y-%m-%dT%H:%M:%S")
+ else:
+ return datetime.strptime(value, ISO8601)
+ else:
+ value = value.split("-")
+ return date(int(value[0]), int(value[1]), int(value[2]))
+ except Exception, e:
return None
def encode_date(self, value):
@@ -332,7 +351,6 @@
key.set_contents_from_string(value.value)
return value.id
-
def decode_blob(self, value):
if not value:
return None
@@ -355,12 +373,14 @@
def encode_string(self, value):
"""Convert ASCII, Latin-1 or UTF-8 to pure Unicode"""
- if not isinstance(value, str): return value
+ if not isinstance(value, str):
+ return value
try:
return unicode(value, 'utf-8')
- except: # really, this should throw an exception.
- # in the interest of not breaking current
- # systems, however:
+ except:
+ # really, this should throw an exception.
+ # in the interest of not breaking current
+ # systems, however:
arr = []
for ch in value:
arr.append(unichr(ord(ch)))
@@ -371,10 +391,12 @@
return the value as-is"""
return value
+
class SDBManager(object):
-
+
def __init__(self, cls, db_name, db_user, db_passwd,
- db_host, db_port, db_table, ddl_dir, enable_ssl, consistent=None):
+ db_host, db_port, db_table, ddl_dir, enable_ssl,
+ consistent=None):
self.cls = cls
self.db_name = db_name
self.db_user = db_user
@@ -428,7 +450,7 @@
obj = self.get_object(cls, item.name, item)
if obj:
yield obj
-
+
def encode_value(self, prop, value):
if value == None:
return None
@@ -453,13 +475,13 @@
except:
self.bucket = s3.create_bucket(bucket_name)
return self.bucket
-
+
def load_object(self, obj):
if not obj._loaded:
- a = self.domain.get_attributes(obj.id,consistent_read=self.consistent)
- if a.has_key('__type__'):
+ a = self.domain.get_attributes(obj.id, consistent_read=self.consistent)
+ if '__type__' in a:
for prop in obj.properties(hidden=False):
- if a.has_key(prop.name):
+ if prop.name in a:
value = self.decode_value(prop, a[prop.name])
value = prop.make_value_from_datastore(value)
try:
@@ -467,18 +489,18 @@
except Exception, e:
boto.log.exception(e)
obj._loaded = True
-
+
def get_object(self, cls, id, a=None):
obj = None
if not a:
- a = self.domain.get_attributes(id,consistent_read=self.consistent)
- if a.has_key('__type__'):
+ a = self.domain.get_attributes(id, consistent_read=self.consistent)
+ if '__type__' in a:
if not cls or a['__type__'] != cls.__name__:
cls = find_class(a['__module__'], a['__type__'])
if cls:
params = {}
for prop in cls.properties(hidden=False):
- if a.has_key(prop.name):
+ if prop.name in a:
value = self.decode_value(prop, a[prop.name])
value = prop.make_value_from_datastore(value)
params[prop.name] = value
@@ -488,7 +510,7 @@
s = '(%s) class %s.%s not found' % (id, a['__module__'], a['__type__'])
boto.log.info('sdbmanager: %s' % s)
return obj
-
+
def get_object_from_id(self, id):
return self.get_object(None, id)
@@ -513,14 +535,13 @@
return count
return count
-
def _build_filter(self, property, name, op, val):
if name == "__id__":
name = 'itemName()'
if name != "itemName()":
name = '`%s`' % name
if val == None:
- if op in ('is','='):
+ if op in ('is', '='):
return "%(name)s is null" % {"name": name}
elif op in ('is not', '!='):
return "%s is not null" % name
@@ -546,10 +567,10 @@
if order_by:
if order_by[0] == "-":
- order_by_method = "DESC";
+ order_by_method = "DESC"
order_by = order_by[1:]
else:
- order_by_method = "ASC";
+ order_by_method = "ASC"
if select:
if order_by and order_by in select:
@@ -567,7 +588,7 @@
for filter in filters:
filter_parts = []
filter_props = filter[0]
- if type(filter_props) != list:
+ if not isinstance(filter_props, list):
filter_props = [filter_props]
for filter_prop in filter_props:
(name, op) = filter_prop.strip().split(" ", 1)
@@ -598,7 +619,7 @@
type_query = "(`__type__` = '%s'" % cls.__name__
for subclass in self._get_all_decendents(cls).keys():
type_query += " or `__type__` = '%s'" % subclass
- type_query +=")"
+ type_query += ")"
query_parts.append(type_query)
order_by_query = ""
@@ -626,15 +647,15 @@
return decendents
def query_gql(self, query_string, *args, **kwds):
- raise NotImplementedError, "GQL queries not supported in SimpleDB"
+ raise NotImplementedError("GQL queries not supported in SimpleDB")
def save_object(self, obj, expected_value=None):
if not obj.id:
obj.id = str(uuid.uuid4())
- attrs = {'__type__' : obj.__class__.__name__,
- '__module__' : obj.__class__.__module__,
- '__lineage__' : obj.get_lineage()}
+ attrs = {'__type__': obj.__class__.__name__,
+ '__module__': obj.__class__.__module__,
+ '__lineage__': obj.get_lineage()}
del_attrs = []
for property in obj.properties(hidden=False):
value = property.get_value_for_datastore(obj)
@@ -658,7 +679,7 @@
if expected_value:
prop = obj.find_property(expected_value[0])
v = expected_value[1]
- if v is not None and not type(v) == bool:
+ if v is not None and not isinstance(v, bool):
v = self.encode_value(prop, v)
expected_value[1] = v
self.domain.put_attributes(obj.id, attrs, replace=True, expected_value=expected_value)
@@ -681,10 +702,10 @@
raise SDBPersistenceError("Error: %s must be unique!" % prop.name)
except(StopIteration):
pass
- self.domain.put_attributes(obj.id, {name : value}, replace=True)
+ self.domain.put_attributes(obj.id, {name: value}, replace=True)
def get_property(self, prop, obj, name):
- a = self.domain.get_attributes(obj.id,consistent_read=self.consistent)
+ a = self.domain.get_attributes(obj.id, consistent_read=self.consistent)
# try to get the attribute value from SDB
if name in a:
@@ -692,21 +713,20 @@
value = prop.make_value_from_datastore(value)
setattr(obj, prop.name, value)
return value
- raise AttributeError, '%s not found' % name
+ raise AttributeError('%s not found' % name)
def set_key_value(self, obj, name, value):
- self.domain.put_attributes(obj.id, {name : value}, replace=True)
+ self.domain.put_attributes(obj.id, {name: value}, replace=True)
def delete_key_value(self, obj, name):
self.domain.delete_attributes(obj.id, name)
def get_key_value(self, obj, name):
- a = self.domain.get_attributes(obj.id, name,consistent_read=self.consistent)
- if a.has_key(name):
+ a = self.domain.get_attributes(obj.id, name, consistent_read=self.consistent)
+ if name in a:
return a[name]
else:
return None
-
+
def get_raw_item(self, obj):
return self.domain.get_item(obj.id)
-
diff --git a/boto/sdb/db/manager/xmlmanager.py b/boto/sdb/db/manager/xmlmanager.py
index 3608b2c..04210db 100644
--- a/boto/sdb/db/manager/xmlmanager.py
+++ b/boto/sdb/db/manager/xmlmanager.py
@@ -395,7 +395,7 @@
return ' intersection '.join(parts)
def query_gql(self, query_string, *args, **kwds):
- raise NotImplementedError, "GQL queries not supported in XML"
+ raise NotImplementedError("GQL queries not supported in XML")
def save_list(self, doc, items, prop_node):
items_node = doc.createElement('items')
@@ -495,7 +495,7 @@
def get_key_value(self, obj, name):
a = self.domain.get_attributes(obj.id, name)
- if a.has_key(name):
+ if name in a:
return a[name]
else:
return None
diff --git a/boto/sdb/db/model.py b/boto/sdb/db/model.py
index eab8276..a625ad2 100644
--- a/boto/sdb/db/model.py
+++ b/boto/sdb/db/model.py
@@ -86,7 +86,7 @@
@classmethod
def get_by_key_name(cls, key_names, parent=None):
- raise NotImplementedError, "Key Names are not currently supported"
+ raise NotImplementedError("Key Names are not currently supported")
@classmethod
def find(cls, limit=None, next_token=None, **params):
@@ -101,7 +101,7 @@
@classmethod
def get_or_insert(key_name, **kw):
- raise NotImplementedError, "get_or_insert not currently supported"
+ raise NotImplementedError("get_or_insert not currently supported")
@classmethod
def properties(cls, hidden=True):
@@ -154,7 +154,7 @@
setattr(self, prop.name, prop.default_value())
except ValueError:
pass
- if kw.has_key('manager'):
+ if 'manager' in kw:
self._manager = kw['manager']
self.id = id
for key in kw:
diff --git a/boto/sdb/db/property.py b/boto/sdb/db/property.py
index 1929a02..b8610cf 100644
--- a/boto/sdb/db/property.py
+++ b/boto/sdb/db/property.py
@@ -14,7 +14,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -28,6 +28,7 @@
import boto.s3.key
from boto.sdb.db.blob import Blob
+
class Property(object):
data_type = str
@@ -35,8 +36,8 @@
name = ''
verbose_name = ''
- def __init__(self, verbose_name=None, name=None, default=None, required=False,
- validator=None, choices=None, unique=False):
+ def __init__(self, verbose_name=None, name=None, default=None,
+ required=False, validator=None, choices=None, unique=False):
self.verbose_name = verbose_name
self.name = name
self.default = default
@@ -48,7 +49,7 @@
else:
self.slot_name = '_'
self.unique = unique
-
+
def __get__(self, obj, objtype):
if obj:
obj.load()
@@ -78,16 +79,16 @@
if isinstance(value, basestring) or value == self.default_value():
return
if not isinstance(value, self.data_type):
- raise TypeError, 'Validation Error, expecting %s, got %s' % (self.data_type, type(value))
-
+ raise TypeError('Validation Error, %s.%s expecting %s, got %s' % (self.model_class.__name__, self.name, self.data_type, type(value)))
+
def default_value(self):
return self.default
def validate(self, value):
- if self.required and value==None:
- raise ValueError, '%s is a required property' % self.name
+ if self.required and value == None:
+ raise ValueError('%s is a required property' % self.name)
if self.choices and value and not value in self.choices:
- raise ValueError, '%s not a valid choice for %s.%s' % (value, self.model_class.__name__, self.name)
+ raise ValueError('%s not a valid choice for %s.%s' % (value, self.model_class.__name__, self.name))
if self.validator:
self.validator(value)
else:
@@ -108,38 +109,46 @@
return self.choices()
return self.choices
+
def validate_string(value):
if value == None:
return
elif isinstance(value, str) or isinstance(value, unicode):
if len(value) > 1024:
- raise ValueError, 'Length of value greater than maxlength'
+ raise ValueError('Length of value greater than maxlength')
else:
- raise TypeError, 'Expecting String, got %s' % type(value)
+ raise TypeError('Expecting String, got %s' % type(value))
+
class StringProperty(Property):
type_name = 'String'
-
- def __init__(self, verbose_name=None, name=None, default='', required=False,
- validator=validate_string, choices=None, unique=False):
- Property.__init__(self, verbose_name, name, default, required, validator, choices, unique)
+
+ def __init__(self, verbose_name=None, name=None, default='',
+ required=False, validator=validate_string,
+ choices=None, unique=False):
+ Property.__init__(self, verbose_name, name, default, required,
+ validator, choices, unique)
+
class TextProperty(Property):
-
+
type_name = 'Text'
-
- def __init__(self, verbose_name=None, name=None, default='', required=False,
- validator=None, choices=None, unique=False, max_length=None):
- Property.__init__(self, verbose_name, name, default, required, validator, choices, unique)
+
+ def __init__(self, verbose_name=None, name=None, default='',
+ required=False, validator=None, choices=None,
+ unique=False, max_length=None):
+ Property.__init__(self, verbose_name, name, default, required,
+ validator, choices, unique)
self.max_length = max_length
def validate(self, value):
value = super(TextProperty, self).validate(value)
if not isinstance(value, str) and not isinstance(value, unicode):
- raise TypeError, 'Expecting Text, got %s' % type(value)
+ raise TypeError('Expecting Text, got %s' % type(value))
if self.max_length and len(value) > self.max_length:
- raise ValueError, 'Length of value greater than maxlength %s' % self.max_length
+ raise ValueError('Length of value greater than maxlength %s' % self.max_length)
+
class PasswordProperty(StringProperty):
"""
@@ -153,7 +162,7 @@
The comparison
- obj.password == 'foo'
+ obj.password == 'foo'
generates a hash of 'foo' and compares it to the
stored hash.
@@ -164,7 +173,7 @@
where sha512 is not available )
It's unlikely you'll ever need to use a different hash
- function, but if you do, you can control the behavior
+ function, but if you do, you can control the behavior
in one of two ways:
1) Specifying hashfunc in PasswordProperty constructor
@@ -175,7 +184,7 @@
password = PasswordProperty(hashfunc=hashlib.sha224)
2) Subclassing Password and PasswordProperty
-
+
class SHA224Password(Password):
hashfunc=hashlib.sha224
@@ -198,9 +207,9 @@
The remaining parameters are passed through to StringProperty.__init__"""
-
- StringProperty.__init__(self, verbose_name, name, default, required, validator, choices, unique)
- self.hashfunc=hashfunc
+ StringProperty.__init__(self, verbose_name, name, default, required,
+ validator, choices, unique)
+ self.hashfunc = hashfunc
def make_value_from_datastore(self, value):
p = self.data_type(value, hashfunc=self.hashfunc)
@@ -227,9 +236,10 @@
value = Property.validate(self, value)
if isinstance(value, self.data_type):
if len(value) > 1024:
- raise ValueError, 'Length of value greater than maxlength'
+ raise ValueError('Length of value greater than maxlength')
else:
- raise TypeError, 'Expecting %s, got %s' % (type(self.data_type), type(value))
+ raise TypeError('Expecting %s, got %s' % (type(self.data_type), type(value)))
+
class BlobProperty(Property):
data_type = Blob
@@ -246,8 +256,9 @@
value = b
Property.__set__(self, obj, value)
+
class S3KeyProperty(Property):
-
+
data_type = boto.s3.key.Key
type_name = 'S3Key'
validate_regex = "^s3:\/\/([^\/]*)\/(.*)$"
@@ -266,7 +277,7 @@
match = re.match(self.validate_regex, value)
if match:
return
- raise TypeError, 'Validation Error, expecting %s, got %s' % (self.data_type, type(value))
+ raise TypeError('Validation Error, expecting %s, got %s' % (self.data_type, type(value)))
def __get__(self, obj, objtype):
value = Property.__get__(self, obj, objtype)
@@ -284,7 +295,7 @@
return k
else:
return value
-
+
def get_value_for_datastore(self, model_instance):
value = Property.get_value_for_datastore(self, model_instance)
if value:
@@ -292,6 +303,7 @@
else:
return None
+
class IntegerProperty(Property):
data_type = int
@@ -307,11 +319,11 @@
value = int(value)
value = Property.validate(self, value)
if value > self.max:
- raise ValueError, 'Maximum value is %d' % self.max
+ raise ValueError('Maximum value is %d' % self.max)
if value < self.min:
- raise ValueError, 'Minimum value is %d' % self.min
+ raise ValueError('Minimum value is %d' % self.min)
return value
-
+
def empty(self, value):
return value is None
@@ -321,7 +333,6 @@
return Property.__set__(self, obj, value)
-
class LongProperty(Property):
data_type = long
@@ -337,14 +348,15 @@
min = -9223372036854775808
max = 9223372036854775807
if value > max:
- raise ValueError, 'Maximum value is %d' % max
+ raise ValueError('Maximum value is %d' % max)
if value < min:
- raise ValueError, 'Minimum value is %d' % min
+ raise ValueError('Minimum value is %d' % min)
return value
-
+
def empty(self, value):
return value is None
+
class BooleanProperty(Property):
data_type = bool
@@ -356,7 +368,8 @@
def empty(self, value):
return value is None
-
+
+
class FloatProperty(Property):
data_type = float
@@ -370,11 +383,15 @@
value = float(value)
value = Property.validate(self, value)
return value
-
+
def empty(self, value):
return value is None
+
class DateTimeProperty(Property):
+ """This class handles both the datetime.datetime object
+ And the datetime.date objects. It can return either one,
+ depending on the value stored in the database"""
data_type = datetime.datetime
type_name = 'DateTime'
@@ -391,11 +408,11 @@
return Property.default_value(self)
def validate(self, value):
- value = super(DateTimeProperty, self).validate(value)
if value == None:
return
- if not isinstance(value, self.data_type):
- raise TypeError, 'Validation Error, expecting %s, got %s' % (self.data_type, type(value))
+ if isinstance(value, datetime.date):
+ return value
+ return super(DateTimeProperty, self).validate(value)
def get_value_for_datastore(self, model_instance):
if self.auto_now:
@@ -405,6 +422,7 @@
def now(self):
return datetime.datetime.utcnow()
+
class DateProperty(Property):
data_type = datetime.date
@@ -426,7 +444,7 @@
if value == None:
return
if not isinstance(value, self.data_type):
- raise TypeError, 'Validation Error, expecting %s, got %s' % (self.data_type, type(value))
+ raise TypeError('Validation Error, expecting %s, got %s' % (self.data_type, type(value)))
def get_value_for_datastore(self, model_instance):
if self.auto_now:
@@ -453,7 +471,7 @@
if value is None:
return
if not isinstance(value, self.data_type):
- raise TypeError, 'Validation Error, expecting %s, got %s' % (self.data_type, type(value))
+ raise TypeError('Validation Error, expecting %s, got %s' % (self.data_type, type(value)))
class ReferenceProperty(Property):
@@ -466,7 +484,7 @@
Property.__init__(self, verbose_name, name, default, required, validator, choices, unique)
self.reference_class = reference_class
self.collection_name = collection_name
-
+
def __get__(self, obj, objtype):
if obj:
value = getattr(obj, self.slot_name)
@@ -484,15 +502,15 @@
"""Don't allow this object to be associated to itself
This causes bad things to happen"""
if value != None and (obj.id == value or (hasattr(value, "id") and obj.id == value.id)):
- raise ValueError, "Can not associate an object with itself!"
- return super(ReferenceProperty, self).__set__(obj,value)
+ raise ValueError("Can not associate an object with itself!")
+ return super(ReferenceProperty, self).__set__(obj, value)
def __property_config__(self, model_class, property_name):
Property.__property_config__(self, model_class, property_name)
if self.collection_name is None:
self.collection_name = '%s_%s_set' % (model_class.__name__.lower(), self.name)
if hasattr(self.reference_class, self.collection_name):
- raise ValueError, 'duplicate property: %s' % self.collection_name
+ raise ValueError('duplicate property: %s' % self.collection_name)
setattr(self.reference_class, self.collection_name,
_ReverseReferenceProperty(model_class, property_name, self.collection_name))
@@ -508,20 +526,21 @@
cls_lineage = self.reference_class.get_lineage()
if obj_lineage.startswith(cls_lineage):
return
- raise TypeError, '%s not instance of %s' % (obj_lineage, cls_lineage)
+ raise TypeError('%s not instance of %s' % (obj_lineage, cls_lineage))
except:
- raise ValueError, '%s is not a Model' % value
-
+ raise ValueError('%s is not a Model' % value)
+
def validate(self, value):
if self.validator:
self.validator(value)
- if self.required and value==None:
- raise ValueError, '%s is a required property' % self.name
+ if self.required and value == None:
+ raise ValueError('%s is a required property' % self.name)
if value == self.default_value():
return
if not isinstance(value, str) and not isinstance(value, unicode):
self.check_instance(value)
-
+
+
class _ReverseReferenceProperty(Property):
data_type = Query
type_name = 'query'
@@ -537,7 +556,7 @@
"""Fetches collection of model instances of this collection property."""
if model_instance is not None:
query = Query(self.__model)
- if type(self.__property) == list:
+ if isinstance(self.__property, list):
props = []
for prop in self.__property:
props.append("%s =" % prop)
@@ -549,9 +568,9 @@
def __set__(self, model_instance, value):
"""Not possible to set a new collection."""
- raise ValueError, 'Virtual property is read-only'
+ raise ValueError('Virtual property is read-only')
-
+
class CalculatedProperty(Property):
def __init__(self, verbose_name=None, name=None, default=None,
@@ -561,7 +580,7 @@
validator, choices, unique)
self.calculated_type = calculated_type
self.use_method = use_method
-
+
def __get__(self, obj, objtype):
value = self.default_value()
if obj:
@@ -588,8 +607,9 @@
else:
return None
+
class ListProperty(Property):
-
+
data_type = list
type_name = 'List'
@@ -616,7 +636,7 @@
for item in value:
if not isinstance(item, item_type):
if item_type == (int, long):
- raise ValueError, 'Items in the %s list must all be integers.' % self.name
+ raise ValueError('Items in the %s list must all be integers.' % self.name)
else:
raise ValueError('Items in the %s list must all be %s instances' %
(self.name, self.item_type.__name__))
@@ -638,13 +658,13 @@
item_type = self.item_type
if isinstance(value, item_type):
value = [value]
- elif value == None: # Override to allow them to set this to "None" to remove everything
+ elif value == None: # Override to allow them to set this to "None" to remove everything
value = []
- return super(ListProperty, self).__set__(obj,value)
+ return super(ListProperty, self).__set__(obj, value)
class MapProperty(Property):
-
+
data_type = dict
type_name = 'Map'
@@ -658,7 +678,7 @@
value = super(MapProperty, self).validate(value)
if value is not None:
if not isinstance(value, dict):
- raise ValueError, 'Value must of type dict'
+ raise ValueError('Value must of type dict')
if self.item_type in (int, long):
item_type = (int, long)
@@ -670,7 +690,7 @@
for key in value:
if not isinstance(value[key], item_type):
if item_type == (int, long):
- raise ValueError, 'Values in the %s Map must all be integers.' % self.name
+ raise ValueError('Values in the %s Map must all be integers.' % self.name)
else:
raise ValueError('Values in the %s Map must all be %s instances' %
(self.name, self.item_type.__name__))
diff --git a/boto/sdb/db/sequence.py b/boto/sdb/db/sequence.py
index 8d10b17..121512f 100644
--- a/boto/sdb/db/sequence.py
+++ b/boto/sdb/db/sequence.py
@@ -136,21 +136,25 @@
self.last_value = None
self.domain_name = domain_name
self.id = id
+ if init_val == None:
+ init_val = fnc(init_val)
+
if self.id == None:
import uuid
self.id = str(uuid.uuid4())
- if init_val == None:
- init_val = fnc(init_val)
- self.val = init_val
self.item_type = type(fnc(None))
self.timestamp = None
# Allow us to pass in a full name to a function
- if type(fnc) == str:
+ if isinstance(fnc, str):
from boto.utils import find_class
fnc = find_class(fnc)
self.fnc = fnc
+ # Bootstrap the value last
+ if not self.val:
+ self.val = init_val
+
def set(self, val):
"""Set the value"""
import time
@@ -167,7 +171,7 @@
self.timestamp = new_val['timestamp']
except SDBResponseError, e:
if e.status == 409:
- raise ValueError, "Sequence out of sync"
+ raise ValueError("Sequence out of sync")
else:
raise
@@ -175,12 +179,13 @@
def get(self):
"""Get the value"""
val = self.db.get_attributes(self.id, consistent_read=True)
- if val and val.has_key('timestamp'):
- self.timestamp = val['timestamp']
- if val and val.has_key('current_value'):
- self._value = self.item_type(val['current_value'])
- if val.has_key("last_value") and val['last_value'] != None:
- self.last_value = self.item_type(val['last_value'])
+ if val:
+ if 'timestamp' in val:
+ self.timestamp = val['timestamp']
+ if 'current_value' in val:
+ self._value = self.item_type(val['current_value'])
+ if "last_value" in val and val['last_value'] != None:
+ self.last_value = self.item_type(val['last_value'])
return self._value
val = property(get, set)
diff --git a/boto/sdb/db/test_db.py b/boto/sdb/db/test_db.py
index 0c345ab..b872f7f 100644
--- a/boto/sdb/db/test_db.py
+++ b/boto/sdb/db/test_db.py
@@ -153,7 +153,7 @@
t = TestList()
_objects['test_list_t'] = t
t.name = 'a list of ints'
- t.nums = [1,2,3,4,5]
+ t.nums = [1, 2, 3, 4, 5]
t.put()
tt = TestList.get_by_id(t.id)
_objects['test_list_tt'] = tt
diff --git a/boto/sdb/domain.py b/boto/sdb/domain.py
index f348c8a..d4faf04 100644
--- a/boto/sdb/domain.py
+++ b/boto/sdb/domain.py
@@ -343,7 +343,7 @@
if self.value and self.attribute:
value = self.value.strip()
attr_name = self.attribute.strip()
- if self.attrs.has_key(attr_name):
+ if attr_name in self.attrs:
self.attrs[attr_name].append(value)
else:
self.attrs[attr_name] = [value]
diff --git a/boto/sdb/item.py b/boto/sdb/item.py
index 86bc70c..999c7f0 100644
--- a/boto/sdb/item.py
+++ b/boto/sdb/item.py
@@ -75,7 +75,7 @@
else:
self.name = self.decode_value(value)
elif name == 'Value':
- if self.has_key(self.last_key):
+ if self.last_key in self:
if not isinstance(self[self.last_key], list):
self[self.last_key] = [self[self.last_key]]
value = self.decode_value(value)
diff --git a/boto/services/result.py b/boto/services/result.py
index 32a6d6a..4854976 100644
--- a/boto/services/result.py
+++ b/boto/services/result.py
@@ -57,8 +57,7 @@
self.latest_time = end_time
def log_message(self, msg, path):
- keys = msg.keys()
- keys.sort()
+ keys = sorted(msg.keys())
if not self.log_fp:
self.log_fp = open(os.path.join(path, self.LogFileName), 'a')
line = ','.join(keys)
@@ -76,7 +75,7 @@
self.log_message(record, path)
self.calculate_stats(record)
outputs = record['OutputKey'].split(',')
- if record.has_key('OutputBucket'):
+ if 'OutputBucket' in record:
bucket = boto.lookup('s3', record['OutputBucket'])
else:
bucket = boto.lookup('s3', record['Bucket'])
@@ -92,7 +91,7 @@
def get_results_from_queue(self, path, get_file=True, delete_msg=True):
m = self.queue.read()
while m:
- if m.has_key('Batch') and m['Batch'] == self.batch:
+ if 'Batch' in m and m['Batch'] == self.batch:
self.process_record(m, path, get_file)
if delete_msg:
self.queue.delete_message(m)
diff --git a/boto/services/service.py b/boto/services/service.py
index 8ee1a8b..e0e987c 100644
--- a/boto/services/service.py
+++ b/boto/services/service.py
@@ -92,7 +92,7 @@
def save_results(self, results, input_message, output_message):
output_keys = []
for file, type in results:
- if input_message.has_key('OutputBucket'):
+ if 'OutputBucket' in input_message:
output_bucket = input_message['OutputBucket']
else:
output_bucket = input_message['Bucket']
@@ -105,7 +105,7 @@
def write_message(self, message):
message['Service-Write'] = get_ts()
message['Server'] = self.name
- if os.environ.has_key('HOSTNAME'):
+ if 'HOSTNAME' in os.environ:
message['Host'] = os.environ['HOSTNAME']
else:
message['Host'] = 'unknown'
diff --git a/boto/ses/__init__.py b/boto/ses/__init__.py
index f893423..b3d03ae 100644
--- a/boto/ses/__init__.py
+++ b/boto/ses/__init__.py
@@ -15,7 +15,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -23,6 +23,7 @@
from connection import SESConnection
from boto.regioninfo import RegionInfo
+
def regions():
"""
Get all available regions for the SES service.
@@ -34,15 +35,16 @@
endpoint='email.us-east-1.amazonaws.com',
connection_cls=SESConnection)]
+
def connect_to_region(region_name, **kw_params):
"""
- Given a valid region name, return a
- :class:`boto.sns.connection.SESConnection`.
+ Given a valid region name, return a
+ :class:`boto.ses.connection.SESConnection`.
:type: str
:param region_name: The name of the region to connect to.
-
- :rtype: :class:`boto.sns.connection.SESConnection` or ``None``
+
+ :rtype: :class:`boto.ses.connection.SESConnection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
@@ -50,20 +52,3 @@
if region.name == region_name:
return region.connect(**kw_params)
return None
-
-def get_region(region_name, **kw_params):
- """
- Find and return a :class:`boto.regioninfo.RegionInfo` object
- given a region name.
-
- :type: str
- :param: The name of the region.
-
- :rtype: :class:`boto.regioninfo.RegionInfo`
- :return: The RegionInfo object for the given region or None if
- an invalid region name is provided.
- """
- for region in regions(**kw_params):
- if region.name == region_name:
- return region
- return None
diff --git a/boto/ses/connection.py b/boto/ses/connection.py
index b6bd298..902e288 100644
--- a/boto/ses/connection.py
+++ b/boto/ses/connection.py
@@ -19,16 +19,17 @@
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
+import re
+import urllib
+import base64
from boto.connection import AWSAuthConnection
from boto.exception import BotoServerError
from boto.regioninfo import RegionInfo
import boto
import boto.jsonresponse
-
-import urllib
-import base64
from boto.ses import exceptions as ses_exceptions
+from boto.exception import BotoServerError
class SESConnection(AWSAuthConnection):
@@ -41,7 +42,8 @@
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
- https_connection_factory=None, region=None, path='/'):
+ https_connection_factory=None, region=None, path='/',
+ security_token=None, validate_certs=True):
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
@@ -50,7 +52,9 @@
aws_access_key_id, aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass, debug,
- https_connection_factory, path)
+ https_connection_factory, path,
+ security_token=security_token,
+ validate_certs=validate_certs)
def _required_auth_capability(self):
return ['ses']
@@ -139,6 +143,26 @@
# Your account has sent above its allowed requests a second rate.
ExceptionToRaise = ses_exceptions.SESMaxSendingRateExceededError
exc_reason = "Maximum sending rate exceeded."
+ elif "Domain ends with dot." in body:
+ # Recipient address ends with a dot/period. This is invalid.
+ ExceptionToRaise = ses_exceptions.SESDomainEndsWithDotError
+ exc_reason = "Domain ends with dot."
+ elif "Local address contains control or whitespace" in body:
+ # I think this pertains to the recipient address.
+ ExceptionToRaise = ses_exceptions.SESLocalAddressCharacterError
+ exc_reason = "Local address contains control or whitespace."
+ elif "Illegal address" in body:
+ # A clearly mal-formed address.
+ ExceptionToRaise = ses_exceptions.SESIllegalAddressError
+ exc_reason = "Illegal address"
+ # The re.search is to distinguish from the
+ # SESAddressNotVerifiedError error above.
+ elif re.search('Identity.*is not verified', body):
+ ExceptionToRaise = ses_exceptions.SESIdentityNotVerifiedError
+ exc_reason = "Identity is not verified."
+ elif "ownership not confirmed" in body:
+ ExceptionToRaise = ses_exceptions.SESDomainNotConfirmedError
+ exc_reason = "Domain ownership is not confirmed."
else:
# This is either a common AWS error, or one that we don't devote
# its own exception to.
@@ -147,8 +171,9 @@
raise ExceptionToRaise(response.status, exc_reason, body)
- def send_email(self, source, subject, body, to_addresses, cc_addresses=None,
- bcc_addresses=None, format='text', reply_addresses=None,
+ def send_email(self, source, subject, body, to_addresses,
+ cc_addresses=None, bcc_addresses=None,
+ format='text', reply_addresses=None,
return_path=None, text_body=None, html_body=None):
"""Composes an email message based on input data, and then immediately
queues the message for sending.
@@ -186,9 +211,9 @@
:param return_path: The email address to which bounce notifications are
to be forwarded. If the message cannot be delivered
to the recipient, then an error message will be
- returned from the recipient's ISP; this message will
- then be forwarded to the email address specified by
- the ReturnPath parameter.
+ returned from the recipient's ISP; this message
+ will then be forwarded to the email address
+ specified by the ReturnPath parameter.
:type text_body: string
:param text_body: The text body to send with this email.
@@ -201,11 +226,13 @@
if body is not None:
if format == "text":
if text_body is not None:
- raise Warning("You've passed in both a body and a text_body; please choose one or the other.")
+ raise Warning("You've passed in both a body and a "
+ "text_body; please choose one or the other.")
text_body = body
else:
if html_body is not None:
- raise Warning("You've passed in both a body and an html_body; please choose one or the other.")
+ raise Warning("You've passed in both a body and an "
+ "html_body; please choose one or the other.")
html_body = body
params = {
@@ -221,7 +248,7 @@
if text_body is not None:
params['Message.Body.Text.Data'] = text_body
- if(format not in ("text","html")):
+ if(format not in ("text", "html")):
raise ValueError("'format' argument must be 'text' or 'html'")
if(not (html_body or text_body)):
@@ -273,6 +300,10 @@
:param destinations: A list of destinations for the message.
"""
+
+ if isinstance(raw_message, unicode):
+ raw_message = raw_message.encode('utf-8')
+
params = {
'RawMessage.Data': base64.b64encode(raw_message),
}
@@ -347,3 +378,69 @@
return self._make_request('VerifyEmailAddress', {
'EmailAddress': email_address,
})
+
+ def verify_domain_dkim(self, domain):
+ """
+ Returns a set of DNS records, or tokens, that must be published in the
+ domain name's DNS to complete the DKIM verification process. These
+ tokens are DNS ``CNAME`` records that point to DKIM public keys hosted
+ by Amazon SES. To complete the DKIM verification process, these tokens
+ must be published in the domain's DNS. The tokens must remain
+ published in order for Easy DKIM signing to function correctly.
+
+ After the tokens are added to the domain's DNS, Amazon SES will be able
+ to DKIM-sign email originating from that domain. To enable or disable
+ Easy DKIM signing for a domain, use the ``SetIdentityDkimEnabled``
+ action. For more information about Easy DKIM, go to the `Amazon SES
+ Developer Guide
+ <http://docs.amazonwebservices.com/ses/latest/DeveloperGuide>`_.
+
+ :type domain: string
+ :param domain: The domain name.
+
+ """
+ return self._make_request('VerifyDomainDkim', {
+ 'Domain': domain,
+ })
+
+ def set_identity_dkim_enabled(self, identity, dkim_enabled):
+ """Enables or disables DKIM signing of email sent from an identity.
+
+ * If Easy DKIM signing is enabled for a domain name identity (e.g.,
+ * ``example.com``),
+ then Amazon SES will DKIM-sign all email sent by addresses under that
+ domain name (e.g., ``user@example.com``)
+ * If Easy DKIM signing is enabled for an email address, then Amazon SES
+ will DKIM-sign all email sent by that email address.
+
+ For email addresses (e.g., ``user@example.com``), you can only enable
+ Easy DKIM signing if the corresponding domain (e.g., ``example.com``)
+ has been set up for Easy DKIM using the AWS Console or the
+ ``VerifyDomainDkim`` action.
+
+ :type identity: string
+ :param identity: An email address or domain name.
+
+ :type dkim_enabled: bool
+ :param dkim_enabled: Specifies whether or not to enable DKIM signing.
+
+ """
+ return self._make_request('SetIdentityDkimEnabled', {
+ 'Identity': identity,
+ 'DkimEnabled': 'true' if dkim_enabled else 'false'
+ })
+
+ def get_identity_dkim_attributes(self, identities):
+ """Get attributes associated with a list of verified identities.
+
+ Given a list of verified identities (email addresses and/or domains),
+ returns a structure describing identity notification attributes.
+
+ :type identities: list
+ :param identities: A list of verified identities (email addresses
+ and/or domains).
+
+ """
+ params = {}
+ self._build_list_params(params, identities, 'Identities.member')
+ return self._make_request('GetIdentityDkimAttributes', params)
diff --git a/boto/ses/exceptions.py b/boto/ses/exceptions.py
index 58825ed..c3341ec 100644
--- a/boto/ses/exceptions.py
+++ b/boto/ses/exceptions.py
@@ -3,14 +3,35 @@
"""
from boto.exception import BotoServerError
-class SESAddressNotVerifiedError(BotoServerError):
+class SESError(BotoServerError):
+ """
+ Sub-class all SES-related errors from here. Don't raise this error
+ directly from anywhere. The only thing this gets us is the ability to
+ catch SESErrors separately from the more generic, top-level
+ BotoServerError exception.
+ """
+ pass
+
+
+
+class SESAddressNotVerifiedError(SESError):
"""
Raised when a "Reply-To" address has not been validated in SES yet.
"""
pass
+class SESIdentityNotVerifiedError(SESError):
+ """
+ Raised when an identity (domain or address) has not been verified in SES yet.
+ """
+ pass
-class SESAddressBlacklistedError(BotoServerError):
+class SESDomainNotConfirmedError(SESError):
+ """
+ """
+ pass
+
+class SESAddressBlacklistedError(SESError):
"""
After you attempt to send mail to an address, and delivery repeatedly
fails, said address is blacklisted for at least 24 hours. The blacklisting
@@ -20,7 +41,7 @@
pass
-class SESDailyQuotaExceededError(BotoServerError):
+class SESDailyQuotaExceededError(SESError):
"""
Your account's daily (rolling 24 hour total) allotment of outbound emails
has been exceeded.
@@ -28,8 +49,29 @@
pass
-class SESMaxSendingRateExceededError(BotoServerError):
+class SESMaxSendingRateExceededError(SESError):
"""
Your account's requests/second limit has been exceeded.
"""
- pass
\ No newline at end of file
+ pass
+
+
+class SESDomainEndsWithDotError(SESError):
+ """
+ Recipient's email address' domain ends with a period/dot.
+ """
+ pass
+
+
+class SESLocalAddressCharacterError(SESError):
+ """
+ An address contained a control or whitespace character.
+ """
+ pass
+
+
+class SESIllegalAddressError(SESError):
+ """
+ Raised when an illegal address is encountered.
+ """
+ pass
diff --git a/boto/sns/__init__.py b/boto/sns/__init__.py
index 6405624..64d0295 100644
--- a/boto/sns/__init__.py
+++ b/boto/sns/__init__.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2010-2011 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010-2011, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
@@ -15,7 +15,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -25,6 +25,7 @@
from connection import SNSConnection
from boto.regioninfo import RegionInfo
+
def regions():
"""
Get all available regions for the SNS service.
@@ -41,6 +42,9 @@
RegionInfo(name='us-west-1',
endpoint='sns.us-west-1.amazonaws.com',
connection_cls=SNSConnection),
+ RegionInfo(name='sa-east-1',
+ endpoint='sns.sa-east-1.amazonaws.com',
+ connection_cls=SNSConnection),
RegionInfo(name='us-west-2',
endpoint='sns.us-west-2.amazonaws.com',
connection_cls=SNSConnection),
@@ -52,14 +56,15 @@
connection_cls=SNSConnection),
]
+
def connect_to_region(region_name, **kw_params):
"""
- Given a valid region name, return a
+ Given a valid region name, return a
:class:`boto.sns.connection.SNSConnection`.
:type: str
:param region_name: The name of the region to connect to.
-
+
:rtype: :class:`boto.sns.connection.SNSConnection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
@@ -68,20 +73,3 @@
if region.name == region_name:
return region.connect(**kw_params)
return None
-
-def get_region(region_name, **kw_params):
- """
- Find and return a :class:`boto.regioninfo.RegionInfo` object
- given a region name.
-
- :type: str
- :param: The name of the region.
-
- :rtype: :class:`boto.regioninfo.RegionInfo`
- :return: The RegionInfo object for the given region or None if
- an invalid region name is provided.
- """
- for region in regions(**kw_params):
- if region.name == region_name:
- return region
- return None
diff --git a/boto/sns/connection.py b/boto/sns/connection.py
index 6ce4ff1..bf528a3 100644
--- a/boto/sns/connection.py
+++ b/boto/sns/connection.py
@@ -1,4 +1,5 @@
-# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010-2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
@@ -14,7 +15,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -28,6 +29,7 @@
except ImportError:
import json
+
class SNSConnection(AWSQueryConnection):
DefaultRegionName = 'us-east-1'
@@ -38,7 +40,7 @@
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
- security_token=None):
+ security_token=None, validate_certs=True):
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint,
@@ -50,7 +52,8 @@
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path,
- security_token=security_token)
+ security_token=security_token,
+ validate_certs=validate_certs)
def _required_auth_capability(self):
return ['sns']
@@ -62,7 +65,7 @@
this method.
"""
- params = {'ContentType' : 'JSON'}
+ params = {'ContentType': 'JSON'}
if next_token:
params['NextToken'] = next_token
response = self.make_request('ListTopics', params, '/', 'GET')
@@ -73,7 +76,7 @@
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
-
+
def get_topic_attributes(self, topic):
"""
Get attributes of a Topic
@@ -82,8 +85,8 @@
:param topic: The ARN of the topic.
"""
- params = {'ContentType' : 'JSON',
- 'TopicArn' : topic}
+ params = {'ContentType': 'JSON',
+ 'TopicArn': topic}
response = self.make_request('GetTopicAttributes', params, '/', 'GET')
body = response.read()
if response.status == 200:
@@ -92,7 +95,7 @@
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
-
+
def set_topic_attributes(self, topic, attr_name, attr_value):
"""
Get attributes of a Topic
@@ -109,10 +112,10 @@
:param attr_value: The new value for the attribute.
"""
- params = {'ContentType' : 'JSON',
- 'TopicArn' : topic,
- 'AttributeName' : attr_name,
- 'AttributeValue' : attr_value}
+ params = {'ContentType': 'JSON',
+ 'TopicArn': topic,
+ 'AttributeName': attr_name,
+ 'AttributeValue': attr_value}
response = self.make_request('SetTopicAttributes', params, '/', 'GET')
body = response.read()
if response.status == 200:
@@ -121,7 +124,7 @@
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
-
+
def add_permission(self, topic, label, account_ids, actions):
"""
Adds a statement to a topic's access control policy, granting
@@ -142,11 +145,11 @@
specified principal(s).
"""
- params = {'ContentType' : 'JSON',
- 'TopicArn' : topic,
- 'Label' : label}
- self.build_list_params(params, account_ids, 'AWSAccountId')
- self.build_list_params(params, actions, 'ActionName')
+ params = {'ContentType': 'JSON',
+ 'TopicArn': topic,
+ 'Label': label}
+ self.build_list_params(params, account_ids, 'AWSAccountId.member')
+ self.build_list_params(params, actions, 'ActionName.member')
response = self.make_request('AddPermission', params, '/', 'GET')
body = response.read()
if response.status == 200:
@@ -155,7 +158,7 @@
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
-
+
def remove_permission(self, topic, label):
"""
Removes a statement from a topic's access control policy.
@@ -168,9 +171,9 @@
to be removed.
"""
- params = {'ContentType' : 'JSON',
- 'TopicArn' : topic,
- 'Label' : label}
+ params = {'ContentType': 'JSON',
+ 'TopicArn': topic,
+ 'Label': label}
response = self.make_request('RemovePermission', params, '/', 'GET')
body = response.read()
if response.status == 200:
@@ -179,7 +182,7 @@
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
-
+
def create_topic(self, topic):
"""
Create a new Topic.
@@ -188,8 +191,8 @@
:param topic: The name of the new topic.
"""
- params = {'ContentType' : 'JSON',
- 'Name' : topic}
+ params = {'ContentType': 'JSON',
+ 'Name': topic}
response = self.make_request('CreateTopic', params, '/', 'GET')
body = response.read()
if response.status == 200:
@@ -207,8 +210,8 @@
:param topic: The ARN of the topic
"""
- params = {'ContentType' : 'JSON',
- 'TopicArn' : topic}
+ params = {'ContentType': 'JSON',
+ 'TopicArn': topic}
response = self.make_request('DeleteTopic', params, '/', 'GET')
body = response.read()
if response.status == 200:
@@ -218,8 +221,6 @@
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
-
-
def publish(self, topic, message, subject=None):
"""
Get properties of a Topic
@@ -237,9 +238,9 @@
line of the email notifications.
"""
- params = {'ContentType' : 'JSON',
- 'TopicArn' : topic,
- 'Message' : message}
+ params = {'ContentType': 'JSON',
+ 'TopicArn': topic,
+ 'Message': message}
if subject:
params['Subject'] = subject
response = self.make_request('Publish', params, '/', 'GET')
@@ -250,7 +251,7 @@
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
-
+
def subscribe(self, topic, protocol, endpoint):
"""
Subscribe to a Topic.
@@ -271,12 +272,11 @@
* For http, this would be a URL beginning with http
* For https, this would be a URL beginning with https
* For sqs, this would be the ARN of an SQS Queue
-
"""
- params = {'ContentType' : 'JSON',
- 'TopicArn' : topic,
- 'Protocol' : protocol,
- 'Endpoint' : endpoint}
+ params = {'ContentType': 'JSON',
+ 'TopicArn': topic,
+ 'Protocol': protocol,
+ 'Endpoint': endpoint}
response = self.make_request('Subscribe', params, '/', 'GET')
body = response.read()
if response.status == 200:
@@ -291,9 +291,9 @@
Subscribe an SQS queue to a topic.
This is convenience method that handles most of the complexity involved
- in using ans SQS queue as an endpoint for an SNS topic. To achieve this
+ in using an SQS queue as an endpoint for an SNS topic. To achieve this
the following operations are performed:
-
+
* The correct ARN is constructed for the SQS queue and that ARN is
then subscribed to the topic.
* A JSON policy document is contructed that grants permission to
@@ -302,7 +302,7 @@
the queue's set_attribute method. If the queue already has
a policy associated with it, this process will add a Statement to
that policy. If no policy exists, a new policy will be created.
-
+
:type topic: string
:param topic: The name of the new topic.
@@ -318,12 +318,12 @@
policy['Version'] = '2008-10-17'
if 'Statement' not in policy:
policy['Statement'] = []
- statement = {'Action' : 'SQS:SendMessage',
- 'Effect' : 'Allow',
- 'Principal' : {'AWS' : '*'},
- 'Resource' : q_arn,
- 'Sid' : str(uuid.uuid4()),
- 'Condition' : {'StringLike' : {'aws:SourceArn' : topic}}}
+ statement = {'Action': 'SQS:SendMessage',
+ 'Effect': 'Allow',
+ 'Principal': {'AWS': '*'},
+ 'Resource': q_arn,
+ 'Sid': str(uuid.uuid4()),
+ 'Condition': {'StringLike': {'aws:SourceArn': topic}}}
policy['Statement'].append(statement)
queue.set_attribute('Policy', json.dumps(policy))
return resp
@@ -347,9 +347,9 @@
of the subscription.
"""
- params = {'ContentType' : 'JSON',
- 'TopicArn' : topic,
- 'Token' : token}
+ params = {'ContentType': 'JSON',
+ 'TopicArn': topic,
+ 'Token': token}
if authenticate_on_unsubscribe:
params['AuthenticateOnUnsubscribe'] = 'true'
response = self.make_request('ConfirmSubscription', params, '/', 'GET')
@@ -360,7 +360,7 @@
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
-
+
def unsubscribe(self, subscription):
"""
Allows endpoint owner to delete subscription.
@@ -370,8 +370,8 @@
:param subscription: The ARN of the subscription to be deleted.
"""
- params = {'ContentType' : 'JSON',
- 'SubscriptionArn' : subscription}
+ params = {'ContentType': 'JSON',
+ 'SubscriptionArn': subscription}
response = self.make_request('Unsubscribe', params, '/', 'GET')
body = response.read()
if response.status == 200:
@@ -380,7 +380,7 @@
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
-
+
def get_all_subscriptions(self, next_token=None):
"""
Get list of all subscriptions.
@@ -390,7 +390,7 @@
this method.
"""
- params = {'ContentType' : 'JSON'}
+ params = {'ContentType': 'JSON'}
if next_token:
params['NextToken'] = next_token
response = self.make_request('ListSubscriptions', params, '/', 'GET')
@@ -401,7 +401,7 @@
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
-
+
def get_all_subscriptions_by_topic(self, topic, next_token=None):
"""
Get list of all subscriptions to a specific topic.
@@ -415,8 +415,8 @@
this method.
"""
- params = {'ContentType' : 'JSON',
- 'TopicArn' : topic}
+ params = {'ContentType': 'JSON',
+ 'TopicArn': topic}
if next_token:
params['NextToken'] = next_token
response = self.make_request('ListSubscriptionsByTopic', params,
@@ -428,4 +428,3 @@
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
-
diff --git a/boto/sqs/__init__.py b/boto/sqs/__init__.py
index f254fbb..b05ea6d 100644
--- a/boto/sqs/__init__.py
+++ b/boto/sqs/__init__.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
@@ -14,7 +14,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -22,12 +22,13 @@
from regioninfo import SQSRegionInfo
+
def regions():
"""
Get all available regions for the SQS service.
-
+
:rtype: list
- :return: A list of :class:`boto.ec2.regioninfo.RegionInfo`
+ :return: A list of :class:`boto.sqs.regioninfo.RegionInfo`
"""
return [SQSRegionInfo(name='us-east-1',
endpoint='queue.amazonaws.com'),
@@ -37,12 +38,15 @@
endpoint='us-west-1.queue.amazonaws.com'),
SQSRegionInfo(name='us-west-2',
endpoint='us-west-2.queue.amazonaws.com'),
+ SQSRegionInfo(name='sa-east-1',
+ endpoint='sa-east-1.queue.amazonaws.com'),
SQSRegionInfo(name='ap-northeast-1',
endpoint='ap-northeast-1.queue.amazonaws.com'),
SQSRegionInfo(name='ap-southeast-1',
endpoint='ap-southeast-1.queue.amazonaws.com')
]
+
def connect_to_region(region_name, **kw_params):
for region in regions():
if region.name == region_name:
diff --git a/boto/sqs/batchresults.py b/boto/sqs/batchresults.py
new file mode 100644
index 0000000..aa5f86b
--- /dev/null
+++ b/boto/sqs/batchresults.py
@@ -0,0 +1,95 @@
+# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2011 Eucalyptus Systems, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+A set of results returned by SendMessageBatch.
+"""
+
+class ResultEntry(dict):
+ """
+ The result (successful or unsuccessful) of a single
+ message within a send_message_batch request.
+
+ In the case of a successful result, this dict-like
+ object will contain the following items:
+
+ :ivar id: A string containing the user-supplied ID of the message.
+ :ivar message_id: A string containing the SQS ID of the new message.
+ :ivar message_md5: A string containing the MD5 hash of the message body.
+
+ In the case of an error, this object will contain the following
+ items:
+
+ :ivar id: A string containing the user-supplied ID of the message.
+ :ivar sender_fault: A boolean value.
+ :ivar error_code: A string containing a short description of the error.
+ :ivar error_message: A string containing a description of the error.
+ """
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'Id':
+ self['id'] = value
+ elif name == 'MessageId':
+ self['message_id'] = value
+ elif name == 'MD5OfMessageBody':
+ self['message_md5'] = value
+ elif name == 'SenderFault':
+ self['sender_fault'] = value
+ elif name == 'Code':
+ self['error_code'] = value
+ elif name == 'Message':
+ self['error_message'] = value
+
+class BatchResults(object):
+ """
+ A container for the results of a send_message_batch request.
+
+ :ivar results: A list of successful results. Each item in the
+ list will be an instance of :class:`ResultEntry`.
+
+ :ivar errors: A list of unsuccessful results. Each item in the
+ list will be an instance of :class:`ResultEntry`.
+ """
+
+ def __init__(self, parent):
+ self.parent = parent
+ self.results = []
+ self.errors = []
+
+ def startElement(self, name, attrs, connection):
+ if name.endswith('MessageBatchResultEntry'):
+ entry = ResultEntry()
+ self.results.append(entry)
+ return entry
+ if name == 'BatchResultErrorEntry':
+ entry = ResultEntry()
+ self.errors.append(entry)
+ return entry
+ return None
+
+ def endElement(self, name, value, connection):
+ setattr(self, name, value)
+
+
diff --git a/boto/sqs/connection.py b/boto/sqs/connection.py
index 1c59b7d..90ecf0f 100644
--- a/boto/sqs/connection.py
+++ b/boto/sqs/connection.py
@@ -14,7 +14,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -24,7 +24,8 @@
from boto.sqs.queue import Queue
from boto.sqs.message import Message
from boto.sqs.attributes import Attributes
-from boto.exception import SQSError
+from boto.sqs.batchresults import BatchResults
+from boto.exception import SQSError, BotoServerError
class SQSConnection(AWSQueryConnection):
@@ -41,7 +42,7 @@
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
- security_token=None):
+ security_token=None, validate_certs=True):
if not region:
region = SQSRegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
@@ -53,7 +54,8 @@
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path,
- security_token=security_token)
+ security_token=security_token,
+ validate_certs=validate_certs)
def _required_auth_capability(self):
return ['sqs']
@@ -63,21 +65,20 @@
Create an SQS Queue.
:type queue_name: str or unicode
- :param queue_name: The name of the new queue. Names are scoped to
- an account and need to be unique within that
- account. Calling this method on an existing
- queue name will not return an error from SQS
- unless the value for visibility_timeout is
- different than the value of the existing queue
- of that name. This is still an expensive operation,
- though, and not the preferred way to check for
- the existence of a queue. See the
- :func:`boto.sqs.connection.SQSConnection.lookup` method.
+ :param queue_name: The name of the new queue. Names are
+ scoped to an account and need to be unique within that
+ account. Calling this method on an existing queue name
+ will not return an error from SQS unless the value for
+ visibility_timeout is different than the value of the
+ existing queue of that name. This is still an expensive
+ operation, though, and not the preferred way to check for
+ the existence of a queue. See the
+ :func:`boto.sqs.connection.SQSConnection.lookup` method.
:type visibility_timeout: int
- :param visibility_timeout: The default visibility timeout for all
- messages written in the queue. This can
- be overridden on a per-message.
+ :param visibility_timeout: The default visibility timeout for
+ all messages written in the queue. This can be overridden
+ on a per-message.
:rtype: :class:`boto.sqs.queue.Queue`
:return: The newly created queue.
@@ -95,16 +96,15 @@
:type queue: A Queue object
:param queue: The SQS queue to be deleted
-
+
:type force_deletion: Boolean
- :param force_deletion: Normally, SQS will not delete a queue that
- contains messages. However, if the
- force_deletion argument is True, the
- queue will be deleted regardless of whether
- there are messages in the queue or not.
- USE WITH CAUTION. This will delete all
- messages in the queue as well.
-
+ :param force_deletion: Normally, SQS will not delete a queue
+ that contains messages. However, if the force_deletion
+ argument is True, the queue will be deleted regardless of
+ whether there are messages in the queue or not. USE WITH
+ CAUTION. This will delete all messages in the queue as
+ well.
+
:rtype: bool
:return: True if the command succeeded, False otherwise
"""
@@ -113,22 +113,22 @@
def get_queue_attributes(self, queue, attribute='All'):
"""
Gets one or all attributes of a Queue
-
+
:type queue: A Queue object
:param queue: The SQS queue to be deleted
:type attribute: str
- :type attribute: The specific attribute requested. If not supplied,
- the default is to return all attributes.
- Valid attributes are:
-
- ApproximateNumberOfMessages|
- ApproximateNumberOfMessagesNotVisible|
- VisibilityTimeout|
- CreatedTimestamp|
- LastModifiedTimestamp|
- Policy
-
+ :type attribute: The specific attribute requested. If not
+ supplied, the default is to return all attributes. Valid
+ attributes are:
+
+ * ApproximateNumberOfMessages|
+ * ApproximateNumberOfMessagesNotVisible|
+ * VisibilityTimeout|
+ * CreatedTimestamp|
+ * LastModifiedTimestamp|
+ * Policy
+
:rtype: :class:`boto.sqs.attributes.Attributes`
:return: An Attributes object containing request value(s).
"""
@@ -147,26 +147,27 @@
:type queue: A Queue object
:param queue: The Queue from which messages are read.
-
+
:type number_messages: int
:param number_messages: The maximum number of messages to read
(default=1)
-
+
:type visibility_timeout: int
:param visibility_timeout: The number of seconds the message should
- remain invisible to other queue readers
- (default=None which uses the Queues default)
+ remain invisible to other queue readers
+ (default=None which uses the Queues default)
:type attributes: str
:param attributes: The name of additional attribute to return
- with response or All if you want all attributes.
- The default is to return no additional attributes.
- Valid values:
-
- All|SenderId|SentTimestamp|
- ApproximateReceiveCount|
- ApproximateFirstReceiveTimestamp
-
+ with response or All if you want all attributes. The
+ default is to return no additional attributes. Valid
+ values:
+ * All
+ * SenderId
+ * SentTimestamp
+ * ApproximateReceiveCount
+ * ApproximateFirstReceiveTimestamp
+
:rtype: list
:return: A list of :class:`boto.sqs.message.Message` objects.
"""
@@ -185,26 +186,46 @@
:type queue: A :class:`boto.sqs.queue.Queue` object
:param queue: The Queue from which messages are read.
-
+
:type message: A :class:`boto.sqs.message.Message` object
:param message: The Message to be deleted
-
+
:rtype: bool
:return: True if successful, False otherwise.
"""
params = {'ReceiptHandle' : message.receipt_handle}
return self.get_status('DeleteMessage', params, queue.id)
+ def delete_message_batch(self, queue, messages):
+ """
+ Deletes a list of messages from a queue in a single request.
+
+ :type queue: A :class:`boto.sqs.queue.Queue` object.
+ :param queue: The Queue to which the messages will be written.
+
+ :type messages: List of :class:`boto.sqs.message.Message` objects.
+ :param messages: A list of message objects.
+ """
+ params = {}
+ for i, msg in enumerate(messages):
+ prefix = 'DeleteMessageBatchRequestEntry'
+ p_name = '%s.%i.Id' % (prefix, (i+1))
+ params[p_name] = msg.id
+ p_name = '%s.%i.ReceiptHandle' % (prefix, (i+1))
+ params[p_name] = msg.receipt_handle
+ return self.get_object('DeleteMessageBatch', params, BatchResults,
+ queue.id, verb='POST')
+
def delete_message_from_handle(self, queue, receipt_handle):
"""
Delete a message from a queue, given a receipt handle.
:type queue: A :class:`boto.sqs.queue.Queue` object
:param queue: The Queue from which messages are read.
-
+
:type receipt_handle: str
:param receipt_handle: The receipt handle for the message
-
+
:rtype: bool
:return: True if successful, False otherwise.
"""
@@ -218,6 +239,34 @@
return self.get_object('SendMessage', params, Message,
queue.id, verb='POST')
+ def send_message_batch(self, queue, messages):
+ """
+ Delivers up to 10 messages to a queue in a single request.
+
+ :type queue: A :class:`boto.sqs.queue.Queue` object.
+ :param queue: The Queue to which the messages will be written.
+
+ :type messages: List of lists.
+ :param messages: A list of lists or tuples. Each inner
+ tuple represents a single message to be written
+ and consists of and ID (string) that must be unique
+ within the list of messages, the message body itself
+ which can be a maximum of 64K in length, and an
+ integer which represents the delay time (in seconds)
+ for the message (0-900) before the message will
+ be delivered to the queue.
+ """
+ params = {}
+ for i, msg in enumerate(messages):
+ p_name = 'SendMessageBatchRequestEntry.%i.Id' % (i+1)
+ params[p_name] = msg[0]
+ p_name = 'SendMessageBatchRequestEntry.%i.MessageBody' % (i+1)
+ params[p_name] = msg[1]
+ p_name = 'SendMessageBatchRequestEntry.%i.DelaySeconds' % (i+1)
+ params[p_name] = msg[2]
+ return self.get_object('SendMessageBatch', params, BatchResults,
+ queue.id, verb='POST')
+
def change_message_visibility(self, queue, receipt_handle,
visibility_timeout):
"""
@@ -226,11 +275,11 @@
:type queue: A :class:`boto.sqs.queue.Queue` object
:param queue: The Queue from which messages are read.
-
+
:type receipt_handle: str
:param queue: The receipt handle associated with the message whose
visibility timeout will be changed.
-
+
:type visibility_timeout: int
:param visibility_timeout: The new value of the message's visibility
timeout in seconds.
@@ -239,18 +288,60 @@
'VisibilityTimeout' : visibility_timeout}
return self.get_status('ChangeMessageVisibility', params, queue.id)
+ def change_message_visibility_batch(self, queue, messages):
+ """
+ A batch version of change_message_visibility that can act
+ on up to 10 messages at a time.
+
+ :type queue: A :class:`boto.sqs.queue.Queue` object.
+ :param queue: The Queue to which the messages will be written.
+
+ :type messages: List of tuples.
+ :param messages: A list of tuples where each tuple consists
+ of a :class:`boto.sqs.message.Message` object and an integer
+ that represents the new visibility timeout for that message.
+ """
+ params = {}
+ for i, t in enumerate(messages):
+ prefix = 'ChangeMessageVisibilityBatchRequestEntry'
+ p_name = '%s.%i.Id' % (prefix, (i+1))
+ params[p_name] = t[0].id
+ p_name = '%s.%i.ReceiptHandle' % (prefix, (i+1))
+ params[p_name] = t[0].receipt_handle
+ p_name = '%s.%i.VisibilityTimeout' % (prefix, (i+1))
+ params[p_name] = t[1]
+ return self.get_object('ChangeMessageVisibilityBatch',
+ params, BatchResults,
+ queue.id, verb='POST')
+
def get_all_queues(self, prefix=''):
+ """
+ Retrieves all queues.
+
+ :keyword str prefix: Optionally, only return queues that start with
+ this value.
+ :rtype: list
+ :returns: A list of :py:class:`boto.sqs.queue.Queue` instances.
+ """
params = {}
if prefix:
params['QueueNamePrefix'] = prefix
return self.get_list('ListQueues', params, [('QueueUrl', Queue)])
def get_queue(self, queue_name):
- rs = self.get_all_queues(queue_name)
- for q in rs:
- if q.url.endswith(queue_name):
- return q
- return None
+ """
+ Retrieves the queue with the given name, or ``None`` if no match
+ was found.
+
+ :param str queue_name: The name of the queue to retrieve.
+ :rtype: :py:class:`boto.sqs.queue.Queue` or ``None``
+ :returns: The requested queue, or ``None`` if no match was found.
+ """
+ params = {'QueueName': queue_name}
+ try:
+ return self.get_object('GetQueueUrl', params, Queue)
+ except SQSError:
+ return None
lookup = get_queue
@@ -267,20 +358,24 @@
:type label: str or unicode
:param label: A unique identification of the permission you are setting.
- Maximum of 80 characters ``[0-9a-zA-Z_-]``
- Example, AliceSendMessage
+ Maximum of 80 characters ``[0-9a-zA-Z_-]``
+ Example, AliceSendMessage
:type aws_account_id: str or unicode
- :param principal_id: The AWS account number of the principal who will
- be given permission. The principal must have
- an AWS account, but does not need to be signed
- up for Amazon SQS. For information
- about locating the AWS account identification.
+ :param principal_id: The AWS account number of the principal
+ who will be given permission. The principal must have an
+ AWS account, but does not need to be signed up for Amazon
+ SQS. For information about locating the AWS account
+ identification.
:type action_name: str or unicode
:param action_name: The action. Valid choices are:
- \*|SendMessage|ReceiveMessage|DeleteMessage|
- ChangeMessageVisibility|GetQueueAttributes
+ * *
+ * SendMessage
+ * ReceiveMessage
+ * DeleteMessage
+ * ChangeMessageVisibility
+ * GetQueueAttributes
:rtype: bool
:return: True if successful, False otherwise.
@@ -307,8 +402,3 @@
"""
params = {'Label': label}
return self.get_status('RemovePermission', params, queue.id)
-
-
-
-
-
diff --git a/boto/sqs/message.py b/boto/sqs/message.py
index 8fabd47..c731990 100644
--- a/boto/sqs/message.py
+++ b/boto/sqs/message.py
@@ -67,6 +67,7 @@
import StringIO
from boto.sqs.attributes import Attributes
from boto.exception import SQSDecodeError
+import boto
class RawMessage:
"""
@@ -156,7 +157,8 @@
try:
value = base64.b64decode(value)
except:
- raise SQSDecodeError('Unable to decode message', self)
+ boto.log.warning('Unable to decode message')
+ return value
return value
class MHMessage(Message):
@@ -198,7 +200,7 @@
return s
def __getitem__(self, key):
- if self._body.has_key(key):
+ if key in self._body:
return self._body[key]
else:
raise KeyError(key)
@@ -217,7 +219,7 @@
return self._body.items()
def has_key(self, key):
- return self._body.has_key(key)
+ return key in self._body
def update(self, d):
self._body.update(d)
diff --git a/boto/sqs/queue.py b/boto/sqs/queue.py
index afb100a..ca5593c 100644
--- a/boto/sqs/queue.py
+++ b/boto/sqs/queue.py
@@ -35,6 +35,9 @@
self.message_class = message_class
self.visibility_timeout = None
+ def __repr__(self):
+ return 'Queue(%s)' % self.url
+
def _id(self):
if self.url:
val = urlparse.urlparse(self.url)[2]
@@ -64,9 +67,10 @@
def set_message_class(self, message_class):
"""
- Set the message class that should be used when instantiating messages read
- from the queue. By default, the class boto.sqs.message.Message is used but
- this can be overriden with any class that behaves like a message.
+ Set the message class that should be used when instantiating
+ messages read from the queue. By default, the class
+ :class:`boto.sqs.message.Message` is used but this can be overriden
+ with any class that behaves like a message.
:type message_class: Message-like class
:param message_class: The new Message class
@@ -101,8 +105,8 @@
only valid value at this time is: VisibilityTimeout
:type value: int
:param value: The new value for the attribute.
- For VisibilityTimeout the value must be an
- integer number of seconds from 0 to 86400.
+ For VisibilityTimeout the value must be an
+ integer number of seconds from 0 to 86400.
:rtype: bool
:return: True if successful, otherwise False.
@@ -137,32 +141,34 @@
:type label: str or unicode
:param label: A unique identification of the permission you are setting.
- Maximum of 80 characters ``[0-9a-zA-Z_-]``
- Example, AliceSendMessage
+ Maximum of 80 characters ``[0-9a-zA-Z_-]``
+ Example, AliceSendMessage
:type aws_account_id: str or unicode
- :param principal_id: The AWS account number of the principal who will be given
- permission. The principal must have an AWS account, but
- does not need to be signed up for Amazon SQS. For information
- about locating the AWS account identification.
+ :param principal_id: The AWS account number of the principal who
+ will be given permission. The principal must have an AWS account,
+ but does not need to be signed up for Amazon SQS. For information
+ about locating the AWS account identification.
:type action_name: str or unicode
:param action_name: The action. Valid choices are:
- \*|SendMessage|ReceiveMessage|DeleteMessage|
- ChangeMessageVisibility|GetQueueAttributes
+ *|SendMessage|ReceiveMessage|DeleteMessage|
+ ChangeMessageVisibility|GetQueueAttributes
:rtype: bool
:return: True if successful, False otherwise.
"""
- return self.connection.add_permission(self, label, aws_account_id, action_name)
+ return self.connection.add_permission(self, label, aws_account_id,
+ action_name)
def remove_permission(self, label):
"""
Remove a permission from a queue.
:type label: str or unicode
- :param label: The unique label associated with the permission being removed.
+ :param label: The unique label associated with the permission
+ being removed.
:rtype: bool
:return: True if successful, False otherwise.
@@ -195,11 +201,29 @@
:rtype: :class:`boto.sqs.message.Message`
:return: The :class:`boto.sqs.message.Message` object that was written.
"""
- new_msg = self.connection.send_message(self, message.get_body_encoded(), delay_seconds)
+ new_msg = self.connection.send_message(self,
+ message.get_body_encoded(),
+ delay_seconds)
message.id = new_msg.id
message.md5 = new_msg.md5
return message
+ def write_batch(self, messages):
+ """
+ Delivers up to 10 messages in a single request.
+
+ :type messages: List of lists.
+ :param messages: A list of lists or tuples. Each inner
+ tuple represents a single message to be written
+ and consists of and ID (string) that must be unique
+ within the list of messages, the message body itself
+ which can be a maximum of 64K in length, and an
+ integer which represents the delay time (in seconds)
+ for the message (0-900) before the message will
+ be delivered to the queue.
+ """
+ return self.connection.send_message_batch(self, messages)
+
def new_message(self, body=''):
"""
Create new message of appropriate class.
@@ -221,20 +245,18 @@
Get a variable number of messages.
:type num_messages: int
- :param num_messages: The maximum number of messages to read from the queue.
+ :param num_messages: The maximum number of messages to read from
+ the queue.
:type visibility_timeout: int
:param visibility_timeout: The VisibilityTimeout for the messages read.
:type attributes: str
- :param attributes: The name of additional attribute to return with response
- or All if you want all attributes. The default is to
- return no additional attributes. Valid values:
- All
- SenderId
- SentTimestamp
- ApproximateReceiveCount
- ApproximateFirstReceiveTimestamp
+ :param attributes: The name of additional attribute to return
+ with response or All if you want all attributes. The
+ default is to return no additional attributes. Valid
+ values: All SenderId SentTimestamp ApproximateReceiveCount
+ ApproximateFirstReceiveTimestamp
:rtype: list
:return: A list of :class:`boto.sqs.message.Message` objects.
@@ -255,6 +277,27 @@
"""
return self.connection.delete_message(self, message)
+ def delete_message_batch(self, messages):
+ """
+ Deletes a list of messages in a single request.
+
+ :type messages: List of :class:`boto.sqs.message.Message` objects.
+ :param messages: A list of message objects.
+ """
+ return self.connection.delete_message_batch(self, messages)
+
+ def change_message_visibility_batch(self, messages):
+ """
+ A batch version of change_message_visibility that can act
+ on up to 10 messages at a time.
+
+ :type messages: List of tuples.
+ :param messages: A list of tuples where each tuple consists
+ of a :class:`boto.sqs.message.Message` object and an integer
+ that represents the new visibility timeout for that message.
+ """
+ return self.connection.change_message_visibility_batch(self, messages)
+
def delete(self):
"""
Delete the queue.
diff --git a/boto/storage_uri.py b/boto/storage_uri.py
index e0a6bef..ca0d7cb 100755
--- a/boto/storage_uri.py
+++ b/boto/storage_uri.py
@@ -22,6 +22,7 @@
import boto
import os
+import sys
from boto.exception import BotoClientError
from boto.exception import InvalidUriError
@@ -41,6 +42,13 @@
# https_connection_factory).
connection_args = None
+ # Map of provider scheme ('s3' or 'gs') to AWSAuthConnection object. We
+ # maintain a pool here in addition to the connection pool implemented
+ # in AWSAuthConnection because the latter re-creates its connection pool
+ # every time that class is instantiated (so the current pool is used to
+ # avoid re-instantiating AWSAuthConnection).
+ provider_pool = {}
+
def __init__(self):
"""Uncallable constructor on abstract base StorageUri class.
"""
@@ -57,10 +65,30 @@
def check_response(self, resp, level, uri):
if resp is None:
- raise InvalidUriError('Attempt to get %s for "%s" failed. This '
- 'probably indicates the URI is invalid' %
+ raise InvalidUriError('Attempt to get %s for "%s" failed.\nThis '
+ 'can happen if the URI refers to a non-'
+ 'existent object or if you meant to\noperate '
+ 'on a directory (e.g., leaving off -R option '
+ 'on gsutil cp, mv, or ls of a\nbucket)' %
(level, uri))
+ def _check_bucket_uri(self, function_name):
+ if issubclass(type(self), BucketStorageUri) and not self.bucket_name:
+ raise InvalidUriError(
+ '%s on bucket-less URI (%s)' % (function_name, self.uri))
+
+ def _check_object_uri(self, function_name):
+ if issubclass(type(self), BucketStorageUri) and not self.object_name:
+ raise InvalidUriError('%s on object-less URI (%s)' %
+ (function_name, self.uri))
+
+ def _warn_about_args(self, function_name, **args):
+ for arg in args:
+ if args[arg]:
+ sys.stderr.write(
+ 'Warning: %s ignores argument: %s=%s\n' %
+ (function_name, arg, str(args[arg])))
+
def connect(self, access_key_id=None, secret_access_key=None, **kwargs):
"""
Opens a connection to appropriate provider, depending on provider
@@ -71,7 +99,6 @@
@rtype: L{AWSAuthConnection<boto.gs.connection.AWSAuthConnection>}
@return: A connection to storage service provider of the given URI.
"""
-
connection_args = dict(self.connection_args or ())
# Use OrdinaryCallingFormat instead of boto-default
# SubdomainCallingFormat because the latter changes the hostname
@@ -82,18 +109,26 @@
# the resumable upload/download tests.
from boto.s3.connection import OrdinaryCallingFormat
connection_args['calling_format'] = OrdinaryCallingFormat()
+ if (hasattr(self, 'suppress_consec_slashes') and
+ 'suppress_consec_slashes' not in connection_args):
+ connection_args['suppress_consec_slashes'] = (
+ self.suppress_consec_slashes)
connection_args.update(kwargs)
if not self.connection:
- if self.scheme == 's3':
+ if self.scheme in self.provider_pool:
+ self.connection = self.provider_pool[self.scheme]
+ elif self.scheme == 's3':
from boto.s3.connection import S3Connection
self.connection = S3Connection(access_key_id,
secret_access_key,
**connection_args)
+ self.provider_pool[self.scheme] = self.connection
elif self.scheme == 'gs':
from boto.gs.connection import GSConnection
self.connection = GSConnection(access_key_id,
secret_access_key,
**connection_args)
+ self.provider_pool[self.scheme] = self.connection
elif self.scheme == 'file':
from boto.file.connection import FileConnection
self.connection = FileConnection(self)
@@ -103,65 +138,73 @@
self.connection.debug = self.debug
return self.connection
- def delete_key(self, validate=True, headers=None, version_id=None,
+ def delete_key(self, validate=False, headers=None, version_id=None,
mfa_token=None):
- if not self.object_name:
- raise InvalidUriError('delete_key on object-less URI (%s)' %
- self.uri)
+ self._check_object_uri('delete_key')
bucket = self.get_bucket(validate, headers)
return bucket.delete_key(self.object_name, headers, version_id,
mfa_token)
- def get_all_keys(self, validate=True, headers=None):
+ def list_bucket(self, prefix='', delimiter='', headers=None):
+ self._check_bucket_uri('list_bucket')
+ return self.get_bucket(headers=headers).list(prefix=prefix,
+ delimiter=delimiter,
+ headers=headers)
+
+ def get_all_keys(self, validate=False, headers=None, prefix=None):
bucket = self.get_bucket(validate, headers)
return bucket.get_all_keys(headers)
- def get_bucket(self, validate=True, headers=None):
- if self.bucket_name is None:
- raise InvalidUriError('get_bucket on bucket-less URI (%s)' %
- self.uri)
+ def get_bucket(self, validate=False, headers=None):
+ self._check_bucket_uri('get_bucket')
conn = self.connect()
bucket = conn.get_bucket(self.bucket_name, validate, headers)
self.check_response(bucket, 'bucket', self.uri)
return bucket
- def get_key(self, validate=True, headers=None, version_id=None):
- if not self.object_name:
- raise InvalidUriError('get_key on object-less URI (%s)' % self.uri)
+ def get_key(self, validate=False, headers=None, version_id=None):
+ self._check_object_uri('get_key')
bucket = self.get_bucket(validate, headers)
key = bucket.get_key(self.object_name, headers, version_id)
self.check_response(key, 'key', self.uri)
return key
- def new_key(self, validate=True, headers=None):
- if not self.object_name:
- raise InvalidUriError('new_key on object-less URI (%s)' % self.uri)
+ def new_key(self, validate=False, headers=None):
+ self._check_object_uri('new_key')
bucket = self.get_bucket(validate, headers)
return bucket.new_key(self.object_name)
- def get_contents_as_string(self, validate=True, headers=None, cb=None,
+ def get_contents_to_stream(self, fp, headers=None, version_id=None):
+ self._check_object_uri('get_key')
+ self._warn_about_args('get_key', validate=False)
+ key = self.get_key(None, headers)
+ self.check_response(key, 'key', self.uri)
+ return key.get_contents_to_file(fp, headers, version_id=version_id)
+
+ def get_contents_to_file(self, fp, headers=None, cb=None, num_cb=10,
+ torrent=False, version_id=None,
+ res_download_handler=None, response_headers=None):
+ self._check_object_uri('get_contents_to_file')
+ key = self.get_key(None, headers)
+ self.check_response(key, 'key', self.uri)
+ key.get_contents_to_file(fp, headers, cb, num_cb, torrent, version_id,
+ res_download_handler, response_headers)
+
+ def get_contents_as_string(self, validate=False, headers=None, cb=None,
num_cb=10, torrent=False, version_id=None):
- if not self.object_name:
- raise InvalidUriError('get_contents_as_string on object-less URI '
- '(%s)' % self.uri)
+ self._check_object_uri('get_contents_as_string')
key = self.get_key(validate, headers)
self.check_response(key, 'key', self.uri)
return key.get_contents_as_string(headers, cb, num_cb, torrent,
version_id)
def acl_class(self):
- if self.bucket_name is None:
- raise InvalidUriError('acl_class on bucket-less URI (%s)' %
- self.uri)
conn = self.connect()
acl_class = conn.provider.acl_class
self.check_response(acl_class, 'acl_class', self.uri)
return acl_class
def canned_acls(self):
- if self.bucket_name is None:
- raise InvalidUriError('canned_acls on bucket-less URI (%s)' %
- self.uri)
conn = self.connect()
canned_acls = conn.provider.canned_acls
self.check_response(canned_acls, 'canned_acls', self.uri)
@@ -174,8 +217,11 @@
Callers should instantiate this class by calling boto.storage_uri().
"""
+ delim = '/'
+ capabilities = set([]) # A set of additional capabilities.
+
def __init__(self, scheme, bucket_name=None, object_name=None,
- debug=0, connection_args=None):
+ debug=0, connection_args=None, suppress_consec_slashes=True):
"""Instantiate a BucketStorageUri from scheme,bucket,object tuple.
@type scheme: string
@@ -190,6 +236,8 @@
@param connection_args: optional map containing args to be
passed to {S3,GS}Connection constructor (e.g., to override
https_connection_factory).
+ @param suppress_consec_slashes: If provided, controls whether
+ consecutive slashes will be suppressed in key paths.
After instantiation the components are available in the following
fields: uri, scheme, bucket_name, object_name.
@@ -200,6 +248,7 @@
self.object_name = object_name
if connection_args:
self.connection_args = connection_args
+ self.suppress_consec_slashes = suppress_consec_slashes
if self.bucket_name and self.object_name:
self.uri = ('%s://%s/%s' % (self.scheme, self.bucket_name,
self.object_name))
@@ -216,52 +265,62 @@
@type new_name: string
@param new_name: new object name
"""
- if not self.bucket_name:
- raise InvalidUriError('clone_replace_name() on bucket-less URI %s' %
- self.uri)
- return BucketStorageUri(self.scheme, self.bucket_name, new_name,
- self.debug)
+ self._check_bucket_uri('clone_replace_name')
+ return BucketStorageUri(
+ self.scheme, bucket_name=self.bucket_name, object_name=new_name,
+ debug=self.debug,
+ suppress_consec_slashes=self.suppress_consec_slashes)
- def get_acl(self, validate=True, headers=None, version_id=None):
+ def get_acl(self, validate=False, headers=None, version_id=None):
"""returns a bucket's acl"""
- if not self.bucket_name:
- raise InvalidUriError('get_acl on bucket-less URI (%s)' % self.uri)
+ self._check_bucket_uri('get_acl')
bucket = self.get_bucket(validate, headers)
# This works for both bucket- and object- level ACLs (former passes
# key_name=None):
- acl = bucket.get_acl(self.object_name, headers, version_id)
+ key_name = self.object_name or ''
+ acl = bucket.get_acl(key_name, headers, version_id)
self.check_response(acl, 'acl', self.uri)
return acl
- def get_def_acl(self, validate=True, headers=None):
+ def get_def_acl(self, validate=False, headers=None):
"""returns a bucket's default object acl"""
- if not self.bucket_name:
- raise InvalidUriError('get_acl on bucket-less URI (%s)' % self.uri)
+ self._check_bucket_uri('get_def_acl')
bucket = self.get_bucket(validate, headers)
# This works for both bucket- and object- level ACLs (former passes
# key_name=None):
- acl = bucket.get_def_acl(self.object_name, headers)
+ acl = bucket.get_def_acl('', headers)
self.check_response(acl, 'acl', self.uri)
return acl
- def get_location(self, validate=True, headers=None):
- if not self.bucket_name:
- raise InvalidUriError('get_location on bucket-less URI (%s)' %
- self.uri)
+ def get_cors(self, validate=False, headers=None):
+ """returns a bucket's CORS XML"""
+ self._check_bucket_uri('get_cors')
+ bucket = self.get_bucket(validate, headers)
+ cors = bucket.get_cors(headers)
+ self.check_response(cors, 'cors', self.uri)
+ return cors
+
+ def set_cors(self, cors, validate=False, headers=None):
+ """sets or updates a bucket's CORS XML"""
+ self._check_bucket_uri('set_cors ')
+ bucket = self.get_bucket(validate, headers)
+ bucket.set_cors(cors.to_xml(), headers)
+
+ def get_location(self, validate=False, headers=None):
+ self._check_bucket_uri('get_location')
bucket = self.get_bucket(validate, headers)
return bucket.get_location()
- def get_subresource(self, subresource, validate=True, headers=None,
+ def get_subresource(self, subresource, validate=False, headers=None,
version_id=None):
- if not self.bucket_name:
- raise InvalidUriError(
- 'get_subresource on bucket-less URI (%s)' % self.uri)
+ self._check_bucket_uri('get_subresource')
bucket = self.get_bucket(validate, headers)
return bucket.get_subresource(subresource, self.object_name, headers,
version_id)
def add_group_email_grant(self, permission, email_address, recursive=False,
- validate=True, headers=None):
+ validate=False, headers=None):
+ self._check_bucket_uri('add_group_email_grant')
if self.scheme != 'gs':
raise ValueError('add_group_email_grant() not supported for %s '
'URIs.' % self.scheme)
@@ -277,14 +336,12 @@
bucket.add_group_email_grant(permission, email_address, recursive,
headers)
else:
- raise InvalidUriError('add_group_email_grant() on bucket-less URI %s' %
- self.uri)
+ raise InvalidUriError('add_group_email_grant() on bucket-less URI '
+ '%s' % self.uri)
def add_email_grant(self, permission, email_address, recursive=False,
- validate=True, headers=None):
- if not self.bucket_name:
- raise InvalidUriError('add_email_grant on bucket-less URI (%s)' %
- self.uri)
+ validate=False, headers=None):
+ self._check_bucket_uri('add_email_grant')
if not self.object_name:
bucket = self.get_bucket(validate, headers)
bucket.add_email_grant(permission, email_address, recursive,
@@ -295,10 +352,8 @@
key.add_email_grant(permission, email_address)
def add_user_grant(self, permission, user_id, recursive=False,
- validate=True, headers=None):
- if not self.bucket_name:
- raise InvalidUriError('add_user_grant on bucket-less URI (%s)' %
- self.uri)
+ validate=False, headers=None):
+ self._check_bucket_uri('add_user_grant')
if not self.object_name:
bucket = self.get_bucket(validate, headers)
bucket.add_user_grant(permission, user_id, recursive, headers)
@@ -308,39 +363,61 @@
key.add_user_grant(permission, user_id)
def list_grants(self, headers=None):
- if not self.bucket_name:
- raise InvalidUriError('list_grants on bucket-less URI (%s)' %
- self.uri)
+ self._check_bucket_uri('list_grants ')
bucket = self.get_bucket(headers)
return bucket.list_grants(headers)
- def names_container(self):
- """Returns True if this URI names a bucket (vs. an object).
- """
- return not self.object_name
-
- def names_singleton(self):
- """Returns True if this URI names an object (vs. a bucket).
- """
- return self.object_name
-
def is_file_uri(self):
+ """Returns True if this URI names a file or directory."""
return False
def is_cloud_uri(self):
+ """Returns True if this URI names a bucket or object."""
return True
+ def names_container(self):
+ """
+ Returns True if this URI names a directory or bucket. Will return
+ False for bucket subdirs; providing bucket subdir semantics needs to
+ be done by the caller (like gsutil does).
+ """
+ return bool(not self.object_name)
+
+ def names_singleton(self):
+ """Returns True if this URI names a file or object."""
+ return bool(self.object_name)
+
+ def names_directory(self):
+ """Returns True if this URI names a directory."""
+ return False
+
+ def names_provider(self):
+ """Returns True if this URI names a provider."""
+ return bool(not self.bucket_name)
+
+ def names_bucket(self):
+ """Returns True if this URI names a bucket."""
+ return self.names_container()
+
+ def names_file(self):
+ """Returns True if this URI names a file."""
+ return False
+
+ def names_object(self):
+ """Returns True if this URI names an object."""
+ return self.names_singleton()
+
+ def is_stream(self):
+ """Returns True if this URI represents input/output stream."""
+ return False
+
def create_bucket(self, headers=None, location='', policy=None):
- if self.bucket_name is None:
- raise InvalidUriError('create_bucket on bucket-less URI (%s)' %
- self.uri)
+ self._check_bucket_uri('create_bucket ')
conn = self.connect()
return conn.create_bucket(self.bucket_name, headers, location, policy)
def delete_bucket(self, headers=None):
- if self.bucket_name is None:
- raise InvalidUriError('delete_bucket on bucket-less URI (%s)' %
- self.uri)
+ self._check_bucket_uri('delete_bucket')
conn = self.connect()
return conn.delete_bucket(self.bucket_name, headers)
@@ -354,50 +431,41 @@
self.check_response(provider, 'provider', self.uri)
return provider
- def set_acl(self, acl_or_str, key_name='', validate=True, headers=None,
+ def set_acl(self, acl_or_str, key_name='', validate=False, headers=None,
version_id=None):
"""sets or updates a bucket's acl"""
- if not self.bucket_name:
- raise InvalidUriError('set_acl on bucket-less URI (%s)' %
- self.uri)
+ self._check_bucket_uri('set_acl')
+ key_name = key_name or self.object_name or ''
self.get_bucket(validate, headers).set_acl(acl_or_str, key_name,
headers, version_id)
- def set_def_acl(self, acl_or_str, key_name='', validate=True, headers=None,
- version_id=None):
+ def set_def_acl(self, acl_or_str, key_name='', validate=False,
+ headers=None, version_id=None):
"""sets or updates a bucket's default object acl"""
- if not self.bucket_name:
- raise InvalidUriError('set_acl on bucket-less URI (%s)' %
- self.uri)
- self.get_bucket(validate, headers).set_def_acl(acl_or_str, key_name,
- headers)
+ self._check_bucket_uri('set_def_acl')
+ self.get_bucket(validate, headers).set_def_acl(acl_or_str, '', headers)
- def set_canned_acl(self, acl_str, validate=True, headers=None,
+ def set_canned_acl(self, acl_str, validate=False, headers=None,
version_id=None):
"""sets or updates a bucket's acl to a predefined (canned) value"""
- if not self.object_name:
- raise InvalidUriError('set_canned_acl on object-less URI (%s)' %
- self.uri)
+ self._check_object_uri('set_canned_acl')
+ self._warn_about_args('set_canned_acl', version_id=version_id)
key = self.get_key(validate, headers)
self.check_response(key, 'key', self.uri)
- key.set_canned_acl(acl_str, headers, version_id)
+ key.set_canned_acl(acl_str, headers)
- def set_def_canned_acl(self, acl_str, validate=True, headers=None,
- version_id=None):
- """sets or updates a bucket's default object acl to a predefined
+ def set_def_canned_acl(self, acl_str, validate=False, headers=None,
+ version_id=None):
+ """sets or updates a bucket's default object acl to a predefined
(canned) value"""
- if not self.object_name:
- raise InvalidUriError('set_canned_acl on object-less URI (%s)' %
- self.uri)
+ self._check_bucket_uri('set_def_canned_acl ')
key = self.get_key(validate, headers)
self.check_response(key, 'key', self.uri)
key.set_def_canned_acl(acl_str, headers, version_id)
- def set_subresource(self, subresource, value, validate=True, headers=None,
+ def set_subresource(self, subresource, value, validate=False, headers=None,
version_id=None):
- if not self.bucket_name:
- raise InvalidUriError(
- 'set_subresource on bucket-less URI (%s)' % self.uri)
+ self._check_bucket_uri('set_subresource')
bucket = self.get_bucket(validate, headers)
bucket.set_subresource(subresource, value, self.object_name, headers,
version_id)
@@ -405,27 +473,79 @@
def set_contents_from_string(self, s, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False):
+ self._check_object_uri('set_contents_from_string')
key = self.new_key(headers=headers)
- key.set_contents_from_string(s, headers, replace, cb, num_cb, policy,
- md5, reduced_redundancy)
+ if self.scheme == 'gs':
+ if reduced_redundancy:
+ sys.stderr.write('Warning: GCS does not support '
+ 'reduced_redundancy; argument ignored by '
+ 'set_contents_from_string')
+ key.set_contents_from_string(s, headers, replace, cb, num_cb,
+ policy, md5)
+ else:
+ key.set_contents_from_string(s, headers, replace, cb, num_cb,
+ policy, md5, reduced_redundancy)
- def enable_logging(self, target_bucket, target_prefix=None,
- canned_acl=None, validate=True, headers=None,
- version_id=None):
- if not self.bucket_name:
- raise InvalidUriError(
- 'disable_logging on bucket-less URI (%s)' % self.uri)
+ def set_contents_from_file(self, fp, headers=None, replace=True, cb=None,
+ num_cb=10, policy=None, md5=None, size=None,
+ rewind=False, res_upload_handler=None):
+ self._check_object_uri('set_contents_from_file')
+ key = self.new_key(headers=headers)
+ if self.scheme == 'gs':
+ return key.set_contents_from_file(
+ fp, headers, replace, cb, num_cb, policy, md5, size=size,
+ rewind=rewind, res_upload_handler=res_upload_handler)
+ else:
+ self._warn_about_args('set_contents_from_file',
+ res_upload_handler=res_upload_handler)
+ return key.set_contents_from_file(fp, headers, replace, cb, num_cb,
+ policy, md5, size=size,
+ rewind=rewind)
+
+ def set_contents_from_stream(self, fp, headers=None, replace=True, cb=None,
+ policy=None, reduced_redundancy=False):
+ self._check_object_uri('set_contents_from_stream')
+ dst_key = self.new_key(False, headers)
+ dst_key.set_contents_from_stream(fp, headers, replace, cb,
+ policy=policy,
+ reduced_redundancy=reduced_redundancy)
+
+ def copy_key(self, src_bucket_name, src_key_name, metadata=None,
+ src_version_id=None, storage_class='STANDARD',
+ preserve_acl=False, encrypt_key=False, headers=None,
+ query_args=None):
+ self._check_object_uri('copy_key')
+ dst_bucket = self.get_bucket(validate=False, headers=headers)
+ dst_bucket.copy_key(new_key_name=self.object_name,
+ src_bucket_name=src_bucket_name,
+ src_key_name=src_key_name, metadata=metadata,
+ src_version_id=src_version_id,
+ storage_class=storage_class,
+ preserve_acl=preserve_acl, encrypt_key=encrypt_key,
+ headers=headers, query_args=query_args)
+
+ def enable_logging(self, target_bucket, target_prefix=None, validate=False,
+ headers=None, version_id=None):
+ self._check_bucket_uri('enable_logging')
bucket = self.get_bucket(validate, headers)
- bucket.enable_logging(target_bucket, target_prefix, headers=headers,
- canned_acl=canned_acl)
+ bucket.enable_logging(target_bucket, target_prefix, headers=headers)
- def disable_logging(self, validate=True, headers=None, version_id=None):
- if not self.bucket_name:
- raise InvalidUriError(
- 'disable_logging on bucket-less URI (%s)' % self.uri)
+ def disable_logging(self, validate=False, headers=None, version_id=None):
+ self._check_bucket_uri('disable_logging')
bucket = self.get_bucket(validate, headers)
bucket.disable_logging(headers=headers)
+ def set_website_config(self, main_page_suffix=None, error_key=None,
+ validate=False, headers=None):
+ bucket = self.get_bucket(validate, headers)
+ if not (main_page_suffix or error_key):
+ bucket.delete_website_configuration(headers)
+ else:
+ bucket.configure_website(main_page_suffix, error_key, headers)
+
+ def get_website_config(self, validate=False, headers=None):
+ bucket = self.get_bucket(validate, headers)
+ return bucket.get_website_configuration_with_xml(headers)
class FileStorageUri(StorageUri):
@@ -436,6 +556,8 @@
See file/README about how we map StorageUri operations onto a file system.
"""
+ delim = os.sep
+
def __init__(self, object_name, debug, is_stream=False):
"""Instantiate a FileStorageUri from a path name.
@@ -465,31 +587,50 @@
"""
return FileStorageUri(new_name, self.debug, self.stream)
- def names_container(self):
- """Returns True if this URI is not representing input/output stream
- and names a directory.
- """
- if not self.stream:
- return os.path.isdir(self.object_name)
- else:
- return False
-
- def names_singleton(self):
- """Returns True if this URI names a file or
- if URI represents input/output stream.
- """
- if self.stream:
- return True
- else:
- return os.path.isfile(self.object_name)
-
def is_file_uri(self):
+ """Returns True if this URI names a file or directory."""
return True
def is_cloud_uri(self):
+ """Returns True if this URI names a bucket or object."""
+ return False
+
+ def names_container(self):
+ """Returns True if this URI names a directory or bucket."""
+ return self.names_directory()
+
+ def names_singleton(self):
+ """Returns True if this URI names a file (or stream) or object."""
+ return not self.names_container()
+
+ def names_directory(self):
+ """Returns True if this URI names a directory."""
+ if self.stream:
+ return False
+ return os.path.isdir(self.object_name)
+
+ def names_provider(self):
+ """Returns True if this URI names a provider."""
+ return False
+
+ def names_bucket(self):
+ """Returns True if this URI names a bucket."""
+ return False
+
+ def names_file(self):
+ """Returns True if this URI names a file."""
+ return self.names_singleton()
+
+ def names_object(self):
+ """Returns True if this URI names an object."""
return False
def is_stream(self):
- """Retruns True if this URI represents input/output stream.
+ """Returns True if this URI represents input/output stream.
"""
- return self.stream
+ return bool(self.stream)
+
+ def close(self):
+ """Closes the underlying file.
+ """
+ self.get_key().close()
diff --git a/boto/sts/__init__.py b/boto/sts/__init__.py
index 7ee10b4..05fd74e 100644
--- a/boto/sts/__init__.py
+++ b/boto/sts/__init__.py
@@ -15,7 +15,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -23,6 +23,7 @@
from connection import STSConnection
from boto.regioninfo import RegionInfo
+
def regions():
"""
Get all available regions for the STS service.
@@ -35,14 +36,15 @@
connection_cls=STSConnection)
]
+
def connect_to_region(region_name, **kw_params):
"""
- Given a valid region name, return a
+ Given a valid region name, return a
:class:`boto.sts.connection.STSConnection`.
:type: str
:param region_name: The name of the region to connect to.
-
+
:rtype: :class:`boto.sts.connection.STSConnection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
@@ -51,20 +53,3 @@
if region.name == region_name:
return region.connect(**kw_params)
return None
-
-def get_region(region_name, **kw_params):
- """
- Find and return a :class:`boto.regioninfo.RegionInfo` object
- given a region name.
-
- :type: str
- :param: The name of the region.
-
- :rtype: :class:`boto.regioninfo.RegionInfo`
- :return: The RegionInfo object for the given region or None if
- an invalid region name is provided.
- """
- for region in regions(**kw_params):
- if region.name == region_name:
- return region
- return None
diff --git a/boto/sts/connection.py b/boto/sts/connection.py
index 6761327..42835b0 100644
--- a/boto/sts/connection.py
+++ b/boto/sts/connection.py
@@ -15,7 +15,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -24,6 +24,12 @@
from boto.regioninfo import RegionInfo
from credentials import Credentials, FederationToken
import boto
+import boto.utils
+import datetime
+import threading
+
+_session_token_cache = {}
+
class STSConnection(AWSQueryConnection):
@@ -35,42 +41,102 @@
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
- converter=None):
+ converter=None, validate_certs=True):
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint,
connection_cls=STSConnection)
self.region = region
+ self._mutex = threading.Semaphore()
AWSQueryConnection.__init__(self, aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
- https_connection_factory, path)
+ https_connection_factory, path,
+ validate_certs=validate_certs)
def _required_auth_capability(self):
return ['sign-v2']
- def get_session_token(self, duration=None):
- """
- :type duration: int
- :param duration: The number of seconds the credentials should
- remain valid.
+ def _check_token_cache(self, token_key, duration=None, window_seconds=60):
+ token = _session_token_cache.get(token_key, None)
+ if token:
+ now = datetime.datetime.utcnow()
+ expires = boto.utils.parse_ts(token.expiration)
+ delta = expires - now
+ if delta < datetime.timedelta(seconds=window_seconds):
+ msg = 'Cached session token %s is expired' % token_key
+ boto.log.debug(msg)
+ token = None
+ return token
- """
+ def _get_session_token(self, duration=None,
+ mfa_serial_number=None, mfa_token=None):
params = {}
if duration:
- params['Duration'] = duration
+ params['DurationSeconds'] = duration
+ if mfa_serial_number:
+ params['SerialNumber'] = mfa_serial_number
+ if mfa_token:
+ params['TokenCode'] = mfa_token
return self.get_object('GetSessionToken', params,
Credentials, verb='POST')
-
-
+
+ def get_session_token(self, duration=None, force_new=False,
+ mfa_serial_number=None, mfa_token=None):
+ """
+ Return a valid session token. Because retrieving new tokens
+ from the Secure Token Service is a fairly heavyweight operation
+ this module caches previously retrieved tokens and returns
+ them when appropriate. Each token is cached with a key
+ consisting of the region name of the STS endpoint
+ concatenated with the requesting user's access id. If there
+ is a token in the cache meeting with this key, the session
+ expiration is checked to make sure it is still valid and if
+ so, the cached token is returned. Otherwise, a new session
+ token is requested from STS and it is placed into the cache
+ and returned.
+
+ :type duration: int
+ :param duration: The number of seconds the credentials should
+ remain valid.
+
+ :type force_new: bool
+ :param force_new: If this parameter is True, a new session token
+ will be retrieved from the Secure Token Service regardless
+ of whether there is a valid cached token or not.
+
+ :type mfa_serial_number: str
+ :param mfa_serial_number: The serial number of an MFA device.
+ If this is provided and if the mfa_passcode provided is
+ valid, the temporary session token will be authorized with
+ to perform operations requiring the MFA device authentication.
+
+ :type mfa_token: str
+ :param mfa_token: The 6 digit token associated with the
+ MFA device.
+ """
+ token_key = '%s:%s' % (self.region.name, self.provider.access_key)
+ token = self._check_token_cache(token_key, duration)
+ if force_new or not token:
+ boto.log.debug('fetching a new token for %s' % token_key)
+ try:
+ self._mutex.acquire()
+ token = self._get_session_token(duration,
+ mfa_serial_number,
+ mfa_token)
+ _session_token_cache[token_key] = token
+ finally:
+ self._mutex.release()
+ return token
+
def get_federation_token(self, name, duration=None, policy=None):
"""
:type name: str
:param name: The name of the Federated user associated with
the credentials.
-
+
:type duration: int
:param duration: The number of seconds the credentials should
remain valid.
@@ -79,12 +145,10 @@
:param policy: A JSON policy to associate with these credentials.
"""
- params = {'Name' : name}
+ params = {'Name': name}
if duration:
- params['Duration'] = duration
+ params['DurationSeconds'] = duration
if policy:
params['Policy'] = policy
return self.get_object('GetFederationToken', params,
FederationToken, verb='POST')
-
-
diff --git a/boto/sts/credentials.py b/boto/sts/credentials.py
index daf4c78..f6d5174 100644
--- a/boto/sts/credentials.py
+++ b/boto/sts/credentials.py
@@ -20,6 +20,14 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
+import boto.utils
+import os
+import datetime
+try:
+ import simplejson as json
+except ImportError:
+ import json
+
class Credentials(object):
"""
:ivar access_key: The AccessKeyID.
@@ -36,6 +44,36 @@
self.session_token = None
self.expiration = None
+ @classmethod
+ def from_json(cls, json_doc):
+ """
+ Create and return a new Session Token based on the contents
+ of a JSON document.
+
+ :type json_doc: str
+ :param json_doc: A string containing a JSON document with a
+ previously saved Credentials object.
+ """
+ d = json.loads(json_doc)
+ token = cls()
+ token.__dict__.update(d)
+ return token
+
+ @classmethod
+ def load(cls, file_path):
+ """
+ Create and return a new Session Token based on the contents
+ of a previously saved JSON-format file.
+
+ :type file_path: str
+ :param file_path: The fully qualified path to the JSON-format
+ file containing the previously saved Session Token information.
+ """
+ fp = open(file_path)
+ json_doc = fp.read()
+ fp.close()
+ return cls.from_json(json_doc)
+
def startElement(self, name, attrs, connection):
return None
@@ -52,6 +90,54 @@
self.request_id = value
else:
pass
+
+ def to_dict(self):
+ """
+ Return a Python dict containing the important information
+ about this Session Token.
+ """
+ return {'access_key': self.access_key,
+ 'secret_key': self.secret_key,
+ 'session_token': self.session_token,
+ 'expiration': self.expiration,
+ 'request_id': self.request_id}
+
+ def save(self, file_path):
+ """
+ Persist a Session Token to a file in JSON format.
+
+ :type path: str
+ :param path: The fully qualified path to the file where the
+ the Session Token data should be written. Any previous
+ data in the file will be overwritten. To help protect
+ the credentials contained in the file, the permissions
+ of the file will be set to readable/writable by owner only.
+ """
+ fp = open(file_path, 'wb')
+ json.dump(self.to_dict(), fp)
+ fp.close()
+ os.chmod(file_path, 0600)
+
+ def is_expired(self, time_offset_seconds=0):
+ """
+ Checks to see if the Session Token is expired or not. By default
+ it will check to see if the Session Token is expired as of the
+ moment the method is called. However, you can supply an
+ optional parameter which is the number of seconds of offset
+ into the future for the check. For example, if you supply
+ a value of 5, this method will return a True if the Session
+ Token will be expired 5 seconds from this moment.
+
+ :type time_offset_seconds: int
+ :param time_offset_seconds: The number of seconds into the future
+ to test the Session Token for expiration.
+ """
+ now = datetime.datetime.utcnow()
+ if time_offset_seconds:
+ now = now + datetime.timedelta(seconds=time_offset_seconds)
+ ts = boto.utils.parse_ts(self.expiration)
+ delta = ts - now
+ return delta.total_seconds() <= 0
class FederationToken(object):
"""
diff --git a/boto/swf/__init__.py b/boto/swf/__init__.py
new file mode 100644
index 0000000..34abc1d
--- /dev/null
+++ b/boto/swf/__init__.py
@@ -0,0 +1,47 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+from boto.ec2.regioninfo import RegionInfo
+import boto.swf.layer1
+
+
+def regions():
+ """
+ Get all available regions for the Amazon Simple Workflow service.
+
+ :rtype: list
+ :return: A list of :class:`boto.regioninfo.RegionInfo`
+ """
+ import boto.dynamodb.layer2
+ return [RegionInfo(name='us-east-1',
+ endpoint='swf.us-east-1.amazonaws.com',
+ connection_cls=boto.swf.layer1.Layer1),
+ ]
+
+
+def connect_to_region(region_name, **kw_params):
+ for region in regions():
+ if region.name == region_name:
+ return region.connect(**kw_params)
+ return None
diff --git a/boto/swf/exceptions.py b/boto/swf/exceptions.py
new file mode 100644
index 0000000..f3ac6ae
--- /dev/null
+++ b/boto/swf/exceptions.py
@@ -0,0 +1,44 @@
+"""
+Exceptions that are specific to the swf module.
+
+This module subclasses the base SWF response exception,
+boto.exceptions.SWFResponseError, for some of the SWF specific faults.
+"""
+from boto.exception import SWFResponseError
+
+
+class SWFDomainAlreadyExistsError(SWFResponseError):
+ """
+ Raised when when the domain already exists.
+ """
+ pass
+
+
+class SWFLimitExceededError(SWFResponseError):
+ """
+ Raised when when a system imposed limitation has been reached.
+ """
+ pass
+
+
+class SWFOperationNotPermittedError(SWFResponseError):
+ """
+ Raised when (reserved for future use).
+ """
+
+
+class SWFTypeAlreadyExistsError(SWFResponseError):
+ """
+ Raised when when the workflow type or activity type already exists.
+ """
+ pass
+
+
+class SWFWorkflowExecutionAlreadyStartedError(SWFResponseError):
+ """
+ Raised when an open execution with the same workflow_id is already running
+ in the specified domain.
+ """
+
+
+
diff --git a/boto/swf/layer1.py b/boto/swf/layer1.py
new file mode 100644
index 0000000..f11963b
--- /dev/null
+++ b/boto/swf/layer1.py
@@ -0,0 +1,1553 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+import boto
+from boto.connection import AWSAuthConnection
+from boto.provider import Provider
+from boto.exception import SWFResponseError
+from boto.swf import exceptions as swf_exceptions
+
+import time
+try:
+ import simplejson as json
+except ImportError:
+ import json
+
+#
+# To get full debug output, uncomment the following line and set the
+# value of Debug to be 2
+#
+#boto.set_stream_logger('swf')
+Debug = 0
+
+
+class Layer1(AWSAuthConnection):
+ """
+ Low-level interface to Simple WorkFlow Service.
+ """
+
+ DefaultRegionName = 'us-east-1'
+ """The default region name for Simple Workflow."""
+
+ ServiceName = 'com.amazonaws.swf.service.model.SimpleWorkflowService'
+ """The name of the Service"""
+
+ # In some cases, the fault response __type value is mapped to
+ # an exception class more specific than SWFResponseError.
+ _fault_excp = {
+ 'com.amazonaws.swf.base.model#DomainAlreadyExistsFault':
+ swf_exceptions.SWFDomainAlreadyExistsError,
+ 'com.amazonaws.swf.base.model#LimitExceededFault':
+ swf_exceptions.SWFLimitExceededError,
+ 'com.amazonaws.swf.base.model#OperationNotPermittedFault':
+ swf_exceptions.SWFOperationNotPermittedError,
+ 'com.amazonaws.swf.base.model#TypeAlreadyExistsFault':
+ swf_exceptions.SWFTypeAlreadyExistsError,
+ 'com.amazonaws.swf.base.model#WorkflowExecutionAlreadyStartedFault':
+ swf_exceptions.SWFWorkflowExecutionAlreadyStartedError,
+ }
+
+ ResponseError = SWFResponseError
+
+ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
+ is_secure=True, port=None, proxy=None, proxy_port=None,
+ debug=0, session_token=None, region=None):
+ if not region:
+ region_name = boto.config.get('SWF', 'region',
+ self.DefaultRegionName)
+ for reg in boto.swf.regions():
+ if reg.name == region_name:
+ region = reg
+ break
+
+ self.region = region
+ AWSAuthConnection.__init__(self, self.region.endpoint,
+ aws_access_key_id, aws_secret_access_key,
+ is_secure, port, proxy, proxy_port,
+ debug, session_token)
+
+ def _required_auth_capability(self):
+ return ['hmac-v3-http']
+
+ def make_request(self, action, body='', object_hook=None):
+ """
+ :raises: ``SWFResponseError`` if response status is not 200.
+ """
+ headers = {'X-Amz-Target': '%s.%s' % (self.ServiceName, action),
+ 'Host': self.region.endpoint,
+ 'Content-Type': 'application/json; charset=UTF-8',
+ 'Content-Encoding': 'amz-1.0',
+ 'Content-Length': str(len(body))}
+ http_request = self.build_base_http_request('POST', '/', '/',
+ {}, headers, body, None)
+ response = self._mexe(http_request, sender=None,
+ override_num_retries=10)
+ response_body = response.read()
+ boto.log.debug(response_body)
+ if response.status == 200:
+ if response_body:
+ return json.loads(response_body, object_hook=object_hook)
+ else:
+ return None
+ else:
+ json_body = json.loads(response_body)
+ fault_name = json_body.get('__type', None)
+ # Certain faults get mapped to more specific exception classes.
+ excp_cls = self._fault_excp.get(fault_name, self.ResponseError)
+ raise excp_cls(response.status, response.reason, body=json_body)
+
+ # Actions related to Activities
+
+ def poll_for_activity_task(self, domain, task_list, identity=None):
+ """
+ Used by workers to get an ActivityTask from the specified
+ activity taskList. This initiates a long poll, where the
+ service holds the HTTP connection open and responds as soon as
+ a task becomes available. The maximum time the service holds
+ on to the request before responding is 60 seconds. If no task
+ is available within 60 seconds, the poll will return an empty
+ result. An empty result, in this context, means that an
+ ActivityTask is returned, but that the value of taskToken is
+ an empty string. If a task is returned, the worker should use
+ its type to identify and process it correctly.
+
+ :type domain: string
+ :param domain: The name of the domain that contains the task
+ lists being polled.
+
+ :type task_list: string
+ :param task_list: Specifies the task list to poll for activity tasks.
+
+ :type identity: string
+ :param identity: Identity of the worker making the request, which
+ is recorded in the ActivityTaskStarted event in the workflow
+ history. This enables diagnostic tracing when problems arise.
+ The form of this identity is user defined.
+
+ :raises: UnknownResourceFault, SWFOperationNotPermittedError
+ """
+ data = {'domain': domain, 'taskList': {'name': task_list}}
+ if identity:
+ data['identity'] = identity
+ json_input = json.dumps(data)
+ return self.make_request('PollForActivityTask', json_input)
+
+ def respond_activity_task_completed(self, task_token, result=None):
+ """
+ Used by workers to tell the service that the ActivityTask
+ identified by the taskToken completed successfully with a
+ result (if provided).
+
+ :type task_token: string
+ :param task_token: The taskToken of the ActivityTask.
+
+ :type result: string
+ :param result: The result of the activity task. It is a free
+ form string that is implementation specific.
+
+ :raises: UnknownResourceFault, SWFOperationNotPermittedError
+ """
+ data = {'taskToken': task_token}
+ if result:
+ data['result'] = result
+ json_input = json.dumps(data)
+ return self.make_request('RespondActivityTaskCompleted', json_input)
+
+ def respond_activity_task_failed(self, task_token,
+ details=None, reason=None):
+ """
+ Used by workers to tell the service that the ActivityTask
+ identified by the taskToken has failed with reason (if
+ specified).
+
+ :type task_token: string
+ :param task_token: The taskToken of the ActivityTask.
+
+ :type details: string
+ :param details: Optional detailed information about the failure.
+
+ :type reason: string
+ :param reason: Description of the error that may assist in diagnostics.
+
+ :raises: UnknownResourceFault, SWFOperationNotPermittedError
+ """
+ data = {'taskToken': task_token}
+ if details:
+ data['details'] = details
+ if reason:
+ data['reason'] = reason
+ json_input = json.dumps(data)
+ return self.make_request('RespondActivityTaskFailed', json_input)
+
+ def respond_activity_task_canceled(self, task_token, details=None):
+ """
+ Used by workers to tell the service that the ActivityTask
+ identified by the taskToken was successfully
+ canceled. Additional details can be optionally provided using
+ the details argument.
+
+ :type task_token: string
+ :param task_token: The taskToken of the ActivityTask.
+
+ :type details: string
+ :param details: Optional detailed information about the failure.
+
+ :raises: UnknownResourceFault, SWFOperationNotPermittedError
+ """
+ data = {'taskToken': task_token}
+ if details:
+ data['details'] = details
+ json_input = json.dumps(data)
+ return self.make_request('RespondActivityTaskCanceled', json_input)
+
+ def record_activity_task_heartbeat(self, task_token, details=None):
+ """
+ Used by activity workers to report to the service that the
+ ActivityTask represented by the specified taskToken is still
+ making progress. The worker can also (optionally) specify
+ details of the progress, for example percent complete, using
+ the details parameter. This action can also be used by the
+ worker as a mechanism to check if cancellation is being
+ requested for the activity task. If a cancellation is being
+ attempted for the specified task, then the boolean
+ cancelRequested flag returned by the service is set to true.
+
+ :type task_token: string
+ :param task_token: The taskToken of the ActivityTask.
+
+ :type details: string
+ :param details: If specified, contains details about the
+ progress of the task.
+
+ :raises: UnknownResourceFault, SWFOperationNotPermittedError
+ """
+ data = {'taskToken': task_token}
+ if details:
+ data['details'] = details
+ json_input = json.dumps(data)
+ return self.make_request('RecordActivityTaskHeartbeat', json_input)
+
+ # Actions related to Deciders
+
+ def poll_for_decision_task(self, domain, task_list, identity=None,
+ maximum_page_size=None,
+ next_page_token=None,
+ reverse_order=None):
+ """
+ Used by deciders to get a DecisionTask from the specified
+ decision taskList. A decision task may be returned for any
+ open workflow execution that is using the specified task
+ list. The task includes a paginated view of the history of the
+ workflow execution. The decider should use the workflow type
+ and the history to determine how to properly handle the task.
+
+ :type domain: string
+ :param domain: The name of the domain containing the task
+ lists to poll.
+
+ :type task_list: string
+ :param task_list: Specifies the task list to poll for decision tasks.
+
+ :type identity: string
+ :param identity: Identity of the decider making the request,
+ which is recorded in the DecisionTaskStarted event in the
+ workflow history. This enables diagnostic tracing when
+ problems arise. The form of this identity is user defined.
+
+ :type maximum_page_size: integer :param maximum_page_size: The
+ maximum number of history events returned in each page. The
+ default is 100, but the caller can override this value to a
+ page size smaller than the default. You cannot specify a page
+ size greater than 100.
+
+ :type next_page_token: string
+ :param next_page_token: If on a previous call to this method a
+ NextPageToken was returned, the results are being paginated.
+ To get the next page of results, repeat the call with the
+ returned token and all other arguments unchanged.
+
+ :type reverse_order: boolean
+ :param reverse_order: When set to true, returns the events in
+ reverse order. By default the results are returned in
+ ascending order of the eventTimestamp of the events.
+
+ :raises: UnknownResourceFault, SWFOperationNotPermittedError
+ """
+ data = {'domain': domain, 'taskList': {'name': task_list}}
+ if identity:
+ data['identity'] = identity
+ if maximum_page_size:
+ data['maximumPageSize'] = maximum_page_size
+ if next_page_token:
+ data['nextPageToken'] = next_page_token
+ if reverse_order:
+ data['reverseOrder'] = 'true'
+ json_input = json.dumps(data)
+ return self.make_request('PollForDecisionTask', json_input)
+
+ def respond_decision_task_completed(self, task_token,
+ decisions=None,
+ execution_context=None):
+ """
+ Used by deciders to tell the service that the DecisionTask
+ identified by the taskToken has successfully completed.
+ The decisions argument specifies the list of decisions
+ made while processing the task.
+
+ :type task_token: string
+ :param task_token: The taskToken of the ActivityTask.
+
+ :type decisions: list
+ :param decisions: The list of decisions (possibly empty) made by
+ the decider while processing this decision task. See the docs
+ for the Decision structure for details.
+
+ :type execution_context: string
+ :param execution_context: User defined context to add to
+ workflow execution.
+
+ :raises: UnknownResourceFault, SWFOperationNotPermittedError
+ """
+ data = {'taskToken': task_token}
+ if decisions:
+ data['decisions'] = decisions
+ if execution_context:
+ data['executionContext'] = execution_context
+ json_input = json.dumps(data)
+ return self.make_request('RespondDecisionTaskCompleted', json_input)
+
+ def request_cancel_workflow_execution(self, domain, workflow_id,
+ run_id=None):
+ """
+ Records a WorkflowExecutionCancelRequested event in the
+ currently running workflow execution identified by the given
+ domain, workflowId, and runId. This logically requests the
+ cancellation of the workflow execution as a whole. It is up to
+ the decider to take appropriate actions when it receives an
+ execution history with this event.
+
+ :type domain: string
+ :param domain: The name of the domain containing the workflow
+ execution to cancel.
+
+ :type run_id: string
+ :param run_id: The runId of the workflow execution to cancel.
+
+ :type workflow_id: string
+ :param workflow_id: The workflowId of the workflow execution
+ to cancel.
+
+ :raises: UnknownResourceFault, SWFOperationNotPermittedError
+ """
+ data = {'domain': domain, 'workflowId': workflow_id}
+ if run_id:
+ data['runId'] = run_id
+ json_input = json.dumps(data)
+ return self.make_request('RequestCancelWorkflowExecution', json_input)
+
+ def start_workflow_execution(self, domain, workflow_id,
+ workflow_name, workflow_version,
+ task_list=None, child_policy=None,
+ execution_start_to_close_timeout=None,
+ input=None, tag_list=None,
+ task_start_to_close_timeout=None):
+ """
+ Starts an execution of the workflow type in the specified
+ domain using the provided workflowId and input data.
+
+ :type domain: string
+ :param domain: The name of the domain in which the workflow
+ execution is created.
+
+ :type workflow_id: string
+ :param workflow_id: The user defined identifier associated with
+ the workflow execution. You can use this to associate a
+ custom identifier with the workflow execution. You may
+ specify the same identifier if a workflow execution is
+ logically a restart of a previous execution. You cannot
+ have two open workflow executions with the same workflowId
+ at the same time.
+
+ :type workflow_name: string
+ :param workflow_name: The name of the workflow type.
+
+ :type workflow_version: string
+ :param workflow_version: The version of the workflow type.
+
+ :type task_list: string
+ :param task_list: The task list to use for the decision tasks
+ generated for this workflow execution. This overrides the
+ defaultTaskList specified when registering the workflow type.
+
+ :type child_policy: string
+ :param child_policy: If set, specifies the policy to use for the
+ child workflow executions of this workflow execution if it
+ is terminated, by calling the TerminateWorkflowExecution
+ action explicitly or due to an expired timeout. This policy
+ overrides the default child policy specified when registering
+ the workflow type using RegisterWorkflowType. The supported
+ child policies are:
+
+ * TERMINATE: the child executions will be terminated.
+ * REQUEST_CANCEL: a request to cancel will be attempted
+ for each child execution by recording a
+ WorkflowExecutionCancelRequested event in its history.
+ It is up to the decider to take appropriate actions
+ when it receives an execution history with this event.
+ * ABANDON: no action will be taken. The child executions
+ will continue to run.
+
+ :type execution_start_to_close_timeout: string
+ :param execution_start_to_close_timeout: The total duration for
+ this workflow execution. This overrides the
+ defaultExecutionStartToCloseTimeout specified when
+ registering the workflow type.
+
+ :type input: string
+ :param input: The input for the workflow
+ execution. This is a free form string which should be
+ meaningful to the workflow you are starting. This input is
+ made available to the new workflow execution in the
+ WorkflowExecutionStarted history event.
+
+ :type tag_list: list :param tag_list: The list of tags to
+ associate with the workflow execution. You can specify a
+ maximum of 5 tags. You can list workflow executions with a
+ specific tag by calling list_open_workflow_executions or
+ list_closed_workflow_executions and specifying a TagFilter.
+
+ :type task_start_to_close_timeout: string :param
+ task_start_to_close_timeout: Specifies the maximum duration of
+ decision tasks for this workflow execution. This parameter
+ overrides the defaultTaskStartToCloseTimout specified when
+ registering the workflow type using register_workflow_type.
+
+ :raises: UnknownResourceFault, TypeDeprecatedFault,
+ SWFWorkflowExecutionAlreadyStartedError, SWFLimitExceededError,
+ SWFOperationNotPermittedError, DefaultUndefinedFault
+ """
+ data = {'domain': domain, 'workflowId': workflow_id}
+ data['workflowType'] = {'name': workflow_name,
+ 'version': workflow_version}
+ if task_list:
+ data['taskList'] = {'name': task_list}
+ if child_policy:
+ data['childPolicy'] = child_policy
+ if execution_start_to_close_timeout:
+ data['executionStartToCloseTimeout'] = execution_start_to_close_timeout
+ if input:
+ data['input'] = input
+ if tag_list:
+ data['tagList'] = tag_list
+ if task_start_to_close_timeout:
+ data['taskStartToCloseTimeout'] = task_start_to_close_timeout
+ json_input = json.dumps(data)
+ return self.make_request('StartWorkflowExecution', json_input)
+
+ def signal_workflow_execution(self, domain, signal_name, workflow_id,
+ input=None, run_id=None):
+ """
+ Records a WorkflowExecutionSignaled event in the workflow
+ execution history and creates a decision task for the workflow
+ execution identified by the given domain, workflowId and
+ runId. The event is recorded with the specified user defined
+ signalName and input (if provided).
+
+ :type domain: string
+ :param domain: The name of the domain containing the workflow
+ execution to signal.
+
+ :type signal_name: string
+ :param signal_name: The name of the signal. This name must be
+ meaningful to the target workflow.
+
+ :type workflow_id: string
+ :param workflow_id: The workflowId of the workflow execution
+ to signal.
+
+ :type input: string
+ :param input: Data to attach to the WorkflowExecutionSignaled
+ event in the target workflow execution's history.
+
+ :type run_id: string
+ :param run_id: The runId of the workflow execution to signal.
+
+ :raises: UnknownResourceFault, SWFOperationNotPermittedError
+ """
+ data = {'domain': domain, 'signalName': signal_name,
+ 'workflowId': workflow_id}
+ if input:
+ data['input'] = input
+ if run_id:
+ data['runId'] = run_id
+ json_input = json.dumps(data)
+ return self.make_request('SignalWorkflowExecution', json_input)
+
+ def terminate_workflow_execution(self, domain, workflow_id,
+ child_policy=None, details=None,
+ reason=None, run_id=None):
+ """
+ Records a WorkflowExecutionTerminated event and forces closure
+ of the workflow execution identified by the given domain,
+ runId, and workflowId. The child policy, registered with the
+ workflow type or specified when starting this execution, is
+ applied to any open child workflow executions of this workflow
+ execution.
+
+ :type domain: string
+ :param domain: The domain of the workflow execution to terminate.
+
+ :type workflow_id: string
+ :param workflow_id: The workflowId of the workflow execution
+ to terminate.
+
+ :type child_policy: string
+ :param child_policy: If set, specifies the policy to use for
+ the child workflow executions of the workflow execution being
+ terminated. This policy overrides the child policy specified
+ for the workflow execution at registration time or when
+ starting the execution. The supported child policies are:
+
+ * TERMINATE: the child executions will be terminated.
+
+ * REQUEST_CANCEL: a request to cancel will be attempted
+ for each child execution by recording a
+ WorkflowExecutionCancelRequested event in its
+ history. It is up to the decider to take appropriate
+ actions when it receives an execution history with this
+ event.
+
+ * ABANDON: no action will be taken. The child executions
+ will continue to run.
+
+ :type details: string
+ :param details: Optional details for terminating the
+ workflow execution.
+
+ :type reason: string
+ :param reason: An optional descriptive reason for terminating
+ the workflow execution.
+
+ :type run_id: string
+ :param run_id: The runId of the workflow execution to terminate.
+
+ :raises: UnknownResourceFault, SWFOperationNotPermittedError
+ """
+ data = {'domain': domain, 'workflowId': workflow_id}
+ if child_policy:
+ data['childPolicy'] = child_policy
+ if details:
+ data['details'] = details
+ if reason:
+ data['reason'] = reason
+ if run_id:
+ data['runId'] = run_id
+ json_input = json.dumps(data)
+ return self.make_request('TerminateWorkflowExecution', json_input)
+
+# Actions related to Administration
+
+## Activity Management
+
+ def register_activity_type(self, domain, name, version, task_list=None,
+ default_task_heartbeat_timeout=None,
+ default_task_schedule_to_close_timeout=None,
+ default_task_schedule_to_start_timeout=None,
+ default_task_start_to_close_timeout=None,
+ description=None):
+ """
+ Registers a new activity type along with its configuration
+ settings in the specified domain.
+
+ :type domain: string
+ :param domain: The name of the domain in which this activity is
+ to be registered.
+
+ :type name: string
+ :param name: The name of the activity type within the domain.
+
+ :type version: string
+ :param version: The version of the activity type.
+
+ :type task_list: string
+ :param task_list: If set, specifies the default task list to
+ use for scheduling tasks of this activity type. This default
+ task list is used if a task list is not provided when a task
+ is scheduled through the schedule_activity_task Decision.
+
+ :type default_task_heartbeat_timeout: string
+ :param default_task_heartbeat_timeout: If set, specifies the
+ default maximum time before which a worker processing a task
+ of this type must report progress by calling
+ RecordActivityTaskHeartbeat. If the timeout is exceeded, the
+ activity task is automatically timed out. This default can be
+ overridden when scheduling an activity task using the
+ ScheduleActivityTask Decision. If the activity worker
+ subsequently attempts to record a heartbeat or returns a
+ result, the activity worker receives an UnknownResource
+ fault. In this case, Amazon SWF no longer considers the
+ activity task to be valid; the activity worker should clean up
+ the activity task.no docs
+
+ :type default_task_schedule_to_close_timeout: string
+ :param default_task_schedule_to_close_timeout: If set,
+ specifies the default maximum duration for a task of this
+ activity type. This default can be overridden when scheduling
+ an activity task using the ScheduleActivityTask Decision.no
+ docs
+
+ :type default_task_schedule_to_start_timeout: string
+ :param default_task_schedule_to_start_timeout: If set,
+ specifies the default maximum duration that a task of this
+ activity type can wait before being assigned to a worker. This
+ default can be overridden when scheduling an activity task
+ using the ScheduleActivityTask Decision.
+
+ :type default_task_start_to_close_timeout: string
+ :param default_task_start_to_close_timeout: If set, specifies
+ the default maximum duration that a worker can take to process
+ tasks of this activity type. This default can be overridden
+ when scheduling an activity task using the
+ ScheduleActivityTask Decision.
+
+ :type description: string
+ :param description: A textual description of the activity type.
+
+ :raises: SWFTypeAlreadyExistsError, SWFLimitExceededError,
+ UnknownResourceFault, SWFOperationNotPermittedError
+ """
+ data = {'domain': domain,
+ 'name': name,
+ 'version': version}
+ if task_list:
+ data['defaultTaskList'] = {'name': task_list}
+ if default_task_heartbeat_timeout:
+ data['defaultTaskHeartbeatTimeout'] = default_task_heartbeat_timeout
+ if default_task_schedule_to_close_timeout:
+ data['defaultTaskScheduleToCloseTimeout'] = default_task_schedule_to_close_timeout
+ if default_task_schedule_to_start_timeout:
+ data['defaultTaskScheduleToStartTimeout'] = default_task_schedule_to_start_timeout
+ if default_task_start_to_close_timeout:
+ data['defaultTaskStartToCloseTimeout'] = default_task_start_to_close_timeout
+ if description:
+ data['description'] = description
+ json_input = json.dumps(data)
+ return self.make_request('RegisterActivityType', json_input)
+
+ def deprecate_activity_type(self, domain, activity_name, activity_version):
+ """
+ Returns information about the specified activity type. This
+ includes configuration settings provided at registration time
+ as well as other general information about the type.
+
+ :type domain: string
+ :param domain: The name of the domain in which the activity
+ type is registered.
+
+ :type activity_name: string
+ :param activity_name: The name of this activity.
+
+ :type activity_version: string
+ :param activity_version: The version of this activity.
+
+ :raises: UnknownResourceFault, TypeDeprecatedFault,
+ SWFOperationNotPermittedError
+ """
+ data = {'domain': domain}
+ data['activityType'] = {'name': activity_name,
+ 'version': activity_version}
+ json_input = json.dumps(data)
+ return self.make_request('DeprecateActivityType', json_input)
+
+## Workflow Management
+
+ def register_workflow_type(self, domain, name, version,
+ task_list=None,
+ default_child_policy=None,
+ default_execution_start_to_close_timeout=None,
+ default_task_start_to_close_timeout=None,
+ description=None):
+ """
+ Registers a new workflow type and its configuration settings
+ in the specified domain.
+
+ :type domain: string
+ :param domain: The name of the domain in which to register
+ the workflow type.
+
+ :type name: string
+ :param name: The name of the workflow type.
+
+ :type version: string
+ :param version: The version of the workflow type.
+
+ :type task_list: list of name, version of tasks
+ :param task_list: If set, specifies the default task list to use
+ for scheduling decision tasks for executions of this workflow
+ type. This default is used only if a task list is not provided
+ when starting the execution through the StartWorkflowExecution
+ Action or StartChildWorkflowExecution Decision.
+
+ :type default_child_policy: string
+
+ :param default_child_policy: If set, specifies the default
+ policy to use for the child workflow executions when a
+ workflow execution of this type is terminated, by calling the
+ TerminateWorkflowExecution action explicitly or due to an
+ expired timeout. This default can be overridden when starting
+ a workflow execution using the StartWorkflowExecution action
+ or the StartChildWorkflowExecution Decision. The supported
+ child policies are:
+
+ * TERMINATE: the child executions will be terminated.
+
+ * REQUEST_CANCEL: a request to cancel will be attempted
+ for each child execution by recording a
+ WorkflowExecutionCancelRequested event in its
+ history. It is up to the decider to take appropriate
+ actions when it receives an execution history with this
+ event.
+
+ * ABANDON: no action will be taken. The child executions
+ will continue to run.no docs
+
+ :type default_execution_start_to_close_timeout: string
+ :param default_execution_start_to_close_timeout: If set,
+ specifies the default maximum duration for executions of this
+ workflow type. You can override this default when starting an
+ execution through the StartWorkflowExecution Action or
+ StartChildWorkflowExecution Decision.
+
+ :type default_task_start_to_close_timeout: string
+ :param default_task_start_to_close_timeout: If set, specifies
+ the default maximum duration of decision tasks for this
+ workflow type. This default can be overridden when starting a
+ workflow execution using the StartWorkflowExecution action or
+ the StartChildWorkflowExecution Decision.
+
+ :type description: string
+ :param description: Textual description of the workflow type.
+
+ :raises: SWFTypeAlreadyExistsError, SWFLimitExceededError,
+ UnknownResourceFault, SWFOperationNotPermittedError
+ """
+ data = {'domain': domain, 'name': name, 'version': version}
+ if task_list:
+ data['defaultTaskList'] = {'name': task_list}
+ if default_child_policy:
+ data['defaultChildPolicy'] = default_child_policy
+ if default_execution_start_to_close_timeout:
+ data['defaultExecutionStartToCloseTimeout'] = default_execution_start_to_close_timeout
+ if default_task_start_to_close_timeout:
+ data['defaultTaskStartToCloseTimeout'] = default_task_start_to_close_timeout
+ if description:
+ data['description'] = description
+ json_input = json.dumps(data)
+ return self.make_request('RegisterWorkflowType', json_input)
+
+ def deprecate_workflow_type(self, domain, workflow_name, workflow_version):
+ """
+ Deprecates the specified workflow type. After a workflow type
+ has been deprecated, you cannot create new executions of that
+ type. Executions that were started before the type was
+ deprecated will continue to run. A deprecated workflow type
+ may still be used when calling visibility actions.
+
+ :type domain: string
+ :param domain: The name of the domain in which the workflow
+ type is registered.
+
+ :type workflow_name: string
+ :param workflow_name: The name of the workflow type.
+
+ :type workflow_version: string
+ :param workflow_version: The version of the workflow type.
+
+ :raises: UnknownResourceFault, TypeDeprecatedFault,
+ SWFOperationNotPermittedError
+ """
+ data = {'domain': domain}
+ data['workflowType'] = {'name': workflow_name,
+ 'version': workflow_version}
+ json_input = json.dumps(data)
+ return self.make_request('DeprecateWorkflowType', json_input)
+
+## Domain Management
+
+ def register_domain(self, name,
+ workflow_execution_retention_period_in_days,
+ description=None):
+ """
+ Registers a new domain.
+
+ :type name: string
+ :param name: Name of the domain to register. The name must be unique.
+
+ :type workflow_execution_retention_period_in_days: string
+
+ :param workflow_execution_retention_period_in_days: Specifies
+ the duration *in days* for which the record (including the
+ history) of workflow executions in this domain should be kept
+ by the service. After the retention period, the workflow
+ execution will not be available in the results of visibility
+ calls. If a duration of NONE is specified, the records for
+ workflow executions in this domain are not retained at all.
+
+ :type description: string
+ :param description: Textual description of the domain.
+
+ :raises: SWFDomainAlreadyExistsError, SWFLimitExceededError,
+ SWFOperationNotPermittedError
+ """
+ data = {'name': name,
+ 'workflowExecutionRetentionPeriodInDays': workflow_execution_retention_period_in_days}
+ if description:
+ data['description'] = description
+ json_input = json.dumps(data)
+ return self.make_request('RegisterDomain', json_input)
+
+ def deprecate_domain(self, name):
+ """
+ Deprecates the specified domain. After a domain has been
+ deprecated it cannot be used to create new workflow executions
+ or register new types. However, you can still use visibility
+ actions on this domain. Deprecating a domain also deprecates
+ all activity and workflow types registered in the
+ domain. Executions that were started before the domain was
+ deprecated will continue to run.
+
+ :type name: string
+ :param name: The name of the domain to deprecate.
+
+ :raises: UnknownResourceFault, DomainDeprecatedFault,
+ SWFOperationNotPermittedError
+ """
+ data = {'name': name}
+ json_input = json.dumps(data)
+ return self.make_request('DeprecateDomain', json_input)
+
+# Visibility Actions
+
+## Activity Visibility
+
+ def list_activity_types(self, domain, registration_status,
+ name=None,
+ maximum_page_size=None,
+ next_page_token=None, reverse_order=None):
+ """
+ Returns information about all activities registered in the
+ specified domain that match the specified name and
+ registration status. The result includes information like
+ creation date, current status of the activity, etc. The
+ results may be split into multiple pages. To retrieve
+ subsequent pages, make the call again using the nextPageToken
+ returned by the initial call.
+
+ :type domain: string
+ :param domain: The name of the domain in which the activity
+ types have been registered.
+
+ :type registration_status: string
+ :param registration_status: Specifies the registration status
+ of the activity types to list. Valid values are:
+
+ * REGISTERED
+ * DEPRECATED
+
+ :type name: string
+ :param name: If specified, only lists the activity types that
+ have this name.
+
+ :type maximum_page_size: integer
+ :param maximum_page_size: The maximum number of results
+ returned in each page. The default is 100, but the caller can
+ override this value to a page size smaller than the
+ default. You cannot specify a page size greater than 100.
+
+ :type next_page_token: string
+ :param next_page_token: If on a previous call to this method a
+ NextResultToken was returned, the results have more than one
+ page. To get the next page of results, repeat the call with
+ the nextPageToken and keep all other arguments unchanged.
+
+ :type reverse_order: boolean
+
+ :param reverse_order: When set to true, returns the results in
+ reverse order. By default the results are returned in
+ ascending alphabetical order of the name of the activity
+ types.
+
+ :raises: SWFOperationNotPermittedError, UnknownResourceFault
+ """
+ data = {'domain': domain, 'registrationStatus': registration_status}
+ if name:
+ data['name'] = name
+ if maximum_page_size:
+ data['maximumPageSize'] = maximum_page_size
+ if next_page_token:
+ data['nextPageToken'] = next_page_token
+ if reverse_order:
+ data['reverseOrder'] = 'true'
+ json_input = json.dumps(data)
+ return self.make_request('ListActivityTypes', json_input)
+
+ def describe_activity_type(self, domain, activity_name, activity_version):
+ """
+ Returns information about the specified activity type. This
+ includes configuration settings provided at registration time
+ as well as other general information about the type.
+
+ :type domain: string
+ :param domain: The name of the domain in which the activity
+ type is registered.
+
+ :type activity_name: string
+ :param activity_name: The name of this activity.
+
+ :type activity_version: string
+ :param activity_version: The version of this activity.
+
+ :raises: UnknownResourceFault, SWFOperationNotPermittedError
+ """
+ data = {'domain': domain}
+ data['activityType'] = {'name': activity_name,
+ 'version': activity_version}
+ json_input = json.dumps(data)
+ return self.make_request('DescribeActivityType', json_input)
+
+## Workflow Visibility
+
+ def list_workflow_types(self, domain, registration_status,
+ maximum_page_size=None, name=None,
+ next_page_token=None, reverse_order=None):
+ """
+ Returns information about workflow types in the specified
+ domain. The results may be split into multiple pages that can
+ be retrieved by making the call repeatedly.
+
+ :type domain: string
+ :param domain: The name of the domain in which the workflow
+ types have been registered.
+
+ :type registration_status: string
+ :param registration_status: Specifies the registration status
+ of the activity types to list. Valid values are:
+
+ * REGISTERED
+ * DEPRECATED
+
+ :type name: string
+ :param name: If specified, lists the workflow type with this name.
+
+ :type maximum_page_size: integer
+ :param maximum_page_size: The maximum number of results
+ returned in each page. The default is 100, but the caller can
+ override this value to a page size smaller than the
+ default. You cannot specify a page size greater than 100.
+
+ :type next_page_token: string
+ :param next_page_token: If on a previous call to this method a
+ NextPageToken was returned, the results are being
+ paginated. To get the next page of results, repeat the call
+ with the returned token and all other arguments unchanged.
+
+ :type reverse_order: boolean
+ :param reverse_order: When set to true, returns the results in
+ reverse order. By default the results are returned in
+ ascending alphabetical order of the name of the workflow
+ types.
+
+ :raises: SWFOperationNotPermittedError, UnknownResourceFault
+ """
+ data = {'domain': domain, 'registrationStatus': registration_status}
+ if maximum_page_size:
+ data['maximumPageSize'] = maximum_page_size
+ if name:
+ data['name'] = name
+ if next_page_token:
+ data['nextPageToken'] = next_page_token
+ if reverse_order:
+ data['reverseOrder'] = 'true'
+ json_input = json.dumps(data)
+ return self.make_request('ListWorkflowTypes', json_input)
+
+ def describe_workflow_type(self, domain, workflow_name, workflow_version):
+ """
+ Returns information about the specified workflow type. This
+ includes configuration settings specified when the type was
+ registered and other information such as creation date,
+ current status, etc.
+
+ :type domain: string
+ :param domain: The name of the domain in which this workflow
+ type is registered.
+
+ :type workflow_name: string
+ :param workflow_name: The name of the workflow type.
+
+ :type workflow_version: string
+ :param workflow_version: The version of the workflow type.
+
+ :raises: UnknownResourceFault, SWFOperationNotPermittedError
+ """
+ data = {'domain': domain}
+ data['workflowType'] = {'name': workflow_name,
+ 'version': workflow_version}
+ json_input = json.dumps(data)
+ return self.make_request('DescribeWorkflowType', json_input)
+
+## Workflow Execution Visibility
+
+ def describe_workflow_execution(self, domain, run_id, workflow_id):
+ """
+ Returns information about the specified workflow execution
+ including its type and some statistics.
+
+ :type domain: string
+ :param domain: The name of the domain containing the
+ workflow execution.
+
+ :type run_id: string
+ :param run_id: A system generated unique identifier for the
+ workflow execution.
+
+ :type workflow_id: string
+ :param workflow_id: The user defined identifier associated
+ with the workflow execution.
+
+ :raises: UnknownResourceFault, SWFOperationNotPermittedError
+ """
+ data = {'domain': domain}
+ data['execution'] = {'runId': run_id, 'workflowId': workflow_id}
+ json_input = json.dumps(data)
+ return self.make_request('DescribeWorkflowExecution', json_input)
+
+ def get_workflow_execution_history(self, domain, run_id, workflow_id,
+ maximum_page_size=None,
+ next_page_token=None,
+ reverse_order=None):
+ """
+ Returns the history of the specified workflow execution. The
+ results may be split into multiple pages. To retrieve
+ subsequent pages, make the call again using the nextPageToken
+ returned by the initial call.
+
+ :type domain: string
+ :param domain: The name of the domain containing the
+ workflow execution.
+
+ :type run_id: string
+ :param run_id: A system generated unique identifier for the
+ workflow execution.
+
+ :type workflow_id: string
+ :param workflow_id: The user defined identifier associated
+ with the workflow execution.
+
+ :type maximum_page_size: integer
+ :param maximum_page_size: Specifies the maximum number of
+ history events returned in one page. The next page in the
+ result is identified by the NextPageToken returned. By default
+ 100 history events are returned in a page but the caller can
+ override this value to a page size smaller than the
+ default. You cannot specify a page size larger than 100.
+
+ :type next_page_token: string
+ :param next_page_token: If a NextPageToken is returned, the
+ result has more than one pages. To get the next page, repeat
+ the call and specify the nextPageToken with all other
+ arguments unchanged.
+
+ :type reverse_order: boolean
+ :param reverse_order: When set to true, returns the events in
+ reverse order. By default the results are returned in
+ ascending order of the eventTimeStamp of the events.
+
+ :raises: UnknownResourceFault, SWFOperationNotPermittedError
+ """
+ data = {'domain': domain}
+ data['execution'] = {'runId': run_id, 'workflowId': workflow_id}
+ if maximum_page_size:
+ data['maximumPageSize'] = maximum_page_size
+ if next_page_token:
+ data['nextPageToken'] = next_page_token
+ if reverse_order:
+ data['reverseOrder'] = 'true'
+ json_input = json.dumps(data)
+ return self.make_request('GetWorkflowExecutionHistory', json_input)
+
+ def count_open_workflow_executions(self, domain, latest_date, oldest_date,
+ tag=None,
+ workflow_id=None,
+ workflow_name=None,
+ workflow_version=None):
+ """
+ Returns the number of open workflow executions within the
+ given domain that meet the specified filtering criteria.
+
+ .. note:
+ workflow_id, workflow_name/workflow_version and tag are mutually
+ exclusive. You can specify at most one of these in a request.
+
+ :type domain: string
+ :param domain: The name of the domain containing the
+ workflow executions to count.
+
+ :type latest_date: timestamp
+ :param latest_date: Specifies the latest start or close date
+ and time to return.
+
+ :type oldest_date: timestamp
+ :param oldest_date: Specifies the oldest start or close date
+ and time to return.
+
+ :type workflow_name: string
+ :param workflow_name: Name of the workflow type to filter on.
+
+ :type workflow_version: string
+ :param workflow_version: Version of the workflow type to filter on.
+
+ :type tag: string
+ :param tag: If specified, only executions that have a tag
+ that matches the filter are counted.
+
+ :type workflow_id: string
+ :param workflow_id: If specified, only workflow executions
+ matching the workflow_id are counted.
+
+ :raises: UnknownResourceFault, SWFOperationNotPermittedError
+ """
+ data = {'domain': domain}
+ data['startTimeFilter'] = {'oldestDate': oldest_date,
+ 'latestDate': latest_date}
+ if workflow_name and workflow_version:
+ data['typeFilter'] = {'name': workflow_name,
+ 'version': workflow_version}
+ if workflow_id:
+ data['executionFilter'] = {'workflowId': workflow_id}
+ if tag:
+ data['tagFilter'] = {'tag': tag}
+ json_input = json.dumps(data)
+ return self.make_request('CountOpenWorkflowExecutions', json_input)
+
+ def list_open_workflow_executions(self, domain,
+ latest_date=None,
+ oldest_date=None,
+ tag=None,
+ workflow_id=None,
+ workflow_name=None,
+ workflow_version=None,
+ maximum_page_size=None,
+ next_page_token=None,
+ reverse_order=None):
+ """
+ Returns the list of open workflow executions within the
+ given domain that meet the specified filtering criteria.
+
+ .. note:
+ workflow_id, workflow_name/workflow_version
+ and tag are mutually exclusive. You can specify at most
+ one of these in a request.
+
+ :type domain: string
+ :param domain: The name of the domain containing the
+ workflow executions to count.
+
+ :type latest_date: timestamp
+ :param latest_date: Specifies the latest start or close date
+ and time to return.
+
+ :type oldest_date: timestamp
+ :param oldest_date: Specifies the oldest start or close date
+ and time to return.
+
+ :type tag: string
+ :param tag: If specified, only executions that have a tag
+ that matches the filter are counted.
+
+ :type workflow_id: string
+ :param workflow_id: If specified, only workflow executions
+ matching the workflow_id are counted.
+
+ :type workflow_name: string
+ :param workflow_name: Name of the workflow type to filter on.
+
+ :type workflow_version: string
+ :param workflow_version: Version of the workflow type to filter on.
+
+ :type maximum_page_size: integer
+ :param maximum_page_size: The maximum number of results
+ returned in each page. The default is 100, but the caller can
+ override this value to a page size smaller than the
+ default. You cannot specify a page size greater than 100.
+
+ :type next_page_token: string
+ :param next_page_token: If on a previous call to this method a
+ NextPageToken was returned, the results are being
+ paginated. To get the next page of results, repeat the call
+ with the returned token and all other arguments unchanged.
+
+ :type reverse_order: boolean
+ :param reverse_order: When set to true, returns the results in
+ reverse order. By default the results are returned in
+ descending order of the start or the close time of the
+ executions.
+
+ :raises: UnknownResourceFault, SWFOperationNotPermittedError
+
+ """
+ data = {'domain': domain}
+ data['startTimeFilter'] = {'oldestDate': oldest_date,
+ 'latestDate': latest_date}
+ if tag:
+ data['tagFilter'] = {'tag': tag}
+ if workflow_name and workflow_version:
+ data['typeFilter'] = {'name': workflow_name,
+ 'version': workflow_version}
+ if workflow_id:
+ data['executionFilter'] = {'workflowId': workflow_id}
+
+ if maximum_page_size:
+ data['maximumPageSize'] = maximum_page_size
+ if next_page_token:
+ data['nextPageToken'] = next_page_token
+ if reverse_order:
+ data['reverseOrder'] = 'true'
+ json_input = json.dumps(data)
+ return self.make_request('ListOpenWorkflowExecutions', json_input)
+
+ def count_closed_workflow_executions(self, domain,
+ start_latest_date=None,
+ start_oldest_date=None,
+ close_latest_date=None,
+ close_oldest_date=None,
+ close_status=None,
+ tag=None,
+ workflow_id=None,
+ workflow_name=None,
+ workflow_version=None):
+ """
+ Returns the number of closed workflow executions within the
+ given domain that meet the specified filtering criteria.
+
+ .. note:
+ close_status, workflow_id, workflow_name/workflow_version
+ and tag are mutually exclusive. You can specify at most
+ one of these in a request.
+
+ .. note:
+ start_latest_date/start_oldest_date and
+ close_latest_date/close_oldest_date are mutually
+ exclusive. You can specify at most one of these in a request.
+
+ :type domain: string
+ :param domain: The name of the domain containing the
+ workflow executions to count.
+
+ :type start_latest_date: timestamp
+ :param start_latest_date: If specified, only workflow executions
+ that meet the start time criteria of the filter are counted.
+
+ :type start_oldest_date: timestamp
+ :param start_oldest_date: If specified, only workflow executions
+ that meet the start time criteria of the filter are counted.
+
+ :type close_latest_date: timestamp
+ :param close_latest_date: If specified, only workflow executions
+ that meet the close time criteria of the filter are counted.
+
+ :type close_oldest_date: timestamp
+ :param close_oldest_date: If specified, only workflow executions
+ that meet the close time criteria of the filter are counted.
+
+ :type close_status: string
+ :param close_status: The close status that must match the close status
+ of an execution for it to meet the criteria of this filter.
+ Valid values are:
+
+ * COMPLETED
+ * FAILED
+ * CANCELED
+ * TERMINATED
+ * CONTINUED_AS_NEW
+ * TIMED_OUT
+
+ :type tag: string
+ :param tag: If specified, only executions that have a tag
+ that matches the filter are counted.
+
+ :type workflow_id: string
+ :param workflow_id: If specified, only workflow executions
+ matching the workflow_id are counted.
+
+ :type workflow_name: string
+ :param workflow_name: Name of the workflow type to filter on.
+
+ :type workflow_version: string
+ :param workflow_version: Version of the workflow type to filter on.
+
+ :raises: UnknownResourceFault, SWFOperationNotPermittedError
+ """
+ data = {'domain': domain}
+ if start_latest_date and start_oldest_date:
+ data['startTimeFilter'] = {'oldestDate': start_oldest_date,
+ 'latestDate': start_latest_date}
+ if close_latest_date and close_oldest_date:
+ data['closeTimeFilter'] = {'oldestDate': close_oldest_date,
+ 'latestDate': close_latest_date}
+ if close_status:
+ data['closeStatusFilter'] = {'status': close_status}
+ if tag:
+ data['tagFilter'] = {'tag': tag}
+ if workflow_name and workflow_version:
+ data['typeFilter'] = {'name': workflow_name,
+ 'version': workflow_version}
+ if workflow_id:
+ data['executionFilter'] = {'workflowId': workflow_id}
+
+ json_input = json.dumps(data)
+ return self.make_request('CountClosedWorkflowExecutions', json_input)
+
+ def list_closed_workflow_executions(self, domain,
+ start_latest_date=None,
+ start_oldest_date=None,
+ close_latest_date=None,
+ close_oldest_date=None,
+ close_status=None,
+ tag=None,
+ workflow_id=None,
+ workflow_name=None,
+ workflow_version=None,
+ maximum_page_size=None,
+ next_page_token=None,
+ reverse_order=None):
+ """
+ Returns the number of closed workflow executions within the
+ given domain that meet the specified filtering criteria.
+
+ .. note:
+ close_status, workflow_id, workflow_name/workflow_version
+ and tag are mutually exclusive. You can specify at most
+ one of these in a request.
+
+ .. note:
+ start_latest_date/start_oldest_date and
+ close_latest_date/close_oldest_date are mutually
+ exclusive. You can specify at most one of these in a request.
+
+ :type domain: string
+ :param domain: The name of the domain containing the
+ workflow executions to count.
+
+ :type start_latest_date: timestamp
+ :param start_latest_date: If specified, only workflow executions
+ that meet the start time criteria of the filter are counted.
+
+ :type start_oldest_date: timestamp
+ :param start_oldest_date: If specified, only workflow executions
+ that meet the start time criteria of the filter are counted.
+
+ :type close_latest_date: timestamp
+ :param close_latest_date: If specified, only workflow executions
+ that meet the close time criteria of the filter are counted.
+
+ :type close_oldest_date: timestamp
+ :param close_oldest_date: If specified, only workflow executions
+ that meet the close time criteria of the filter are counted.
+
+ :type close_status: string
+ :param close_status: The close status that must match the close status
+ of an execution for it to meet the criteria of this filter.
+ Valid values are:
+
+ * COMPLETED
+ * FAILED
+ * CANCELED
+ * TERMINATED
+ * CONTINUED_AS_NEW
+ * TIMED_OUT
+
+ :type tag: string
+ :param tag: If specified, only executions that have a tag
+ that matches the filter are counted.
+
+ :type workflow_id: string
+ :param workflow_id: If specified, only workflow executions
+ matching the workflow_id are counted.
+
+ :type workflow_name: string
+ :param workflow_name: Name of the workflow type to filter on.
+
+ :type workflow_version: string
+ :param workflow_version: Version of the workflow type to filter on.
+
+ :type maximum_page_size: integer
+ :param maximum_page_size: The maximum number of results
+ returned in each page. The default is 100, but the caller can
+ override this value to a page size smaller than the
+ default. You cannot specify a page size greater than 100.
+
+ :type next_page_token: string
+ :param next_page_token: If on a previous call to this method a
+ NextPageToken was returned, the results are being
+ paginated. To get the next page of results, repeat the call
+ with the returned token and all other arguments unchanged.
+
+ :type reverse_order: boolean
+ :param reverse_order: When set to true, returns the results in
+ reverse order. By default the results are returned in
+ descending order of the start or the close time of the
+ executions.
+
+ :raises: UnknownResourceFault, SWFOperationNotPermittedError
+ """
+ data = {'domain': domain}
+ if start_latest_date and start_oldest_date:
+ data['startTimeFilter'] = {'oldestDate': start_oldest_date,
+ 'latestDate': start_latest_date}
+ if close_latest_date and close_oldest_date:
+ data['closeTimeFilter'] = {'oldestDate': close_oldest_date,
+ 'latestDate': close_latest_date}
+
+ if workflow_id:
+ data['executionFilter'] = {'workflowId': workflow_id}
+
+ if close_status:
+ data['closeStatusFilter'] = {'status': close_status}
+ if tag:
+ data['tagFilter'] = {'tag': tag}
+ if workflow_name and workflow_version:
+ data['typeFilter'] = {'name': workflow_name,
+ 'version': workflow_version}
+ if maximum_page_size:
+ data['maximumPageSize'] = maximum_page_size
+ if next_page_token:
+ data['nextPageToken'] = next_page_token
+ if reverse_order:
+ data['reverseOrder'] = 'true'
+ json_input = json.dumps(data)
+ return self.make_request('ListClosedWorkflowExecutions', json_input)
+
+## Domain Visibility
+
+ def list_domains(self, registration_status,
+ maximum_page_size=None,
+ next_page_token=None, reverse_order=None):
+ """
+ Returns the list of domains registered in the account. The
+ results may be split into multiple pages. To retrieve
+ subsequent pages, make the call again using the nextPageToken
+ returned by the initial call.
+
+ :type registration_status: string
+ :param registration_status: Specifies the registration status
+ of the domains to list. Valid Values:
+
+ * REGISTERED
+ * DEPRECATED
+
+ :type maximum_page_size: integer
+ :param maximum_page_size: The maximum number of results
+ returned in each page. The default is 100, but the caller can
+ override this value to a page size smaller than the
+ default. You cannot specify a page size greater than 100.
+
+ :type next_page_token: string
+ :param next_page_token: If on a previous call to this method a
+ NextPageToken was returned, the result has more than one
+ page. To get the next page of results, repeat the call with
+ the returned token and all other arguments unchanged.
+
+ :type reverse_order: boolean
+ :param reverse_order: When set to true, returns the results in
+ reverse order. By default the results are returned in
+ ascending alphabetical order of the name of the domains.
+
+ :raises: SWFOperationNotPermittedError
+ """
+ data = {'registrationStatus': registration_status}
+ if maximum_page_size:
+ data['maximumPageSize'] = maximum_page_size
+ if next_page_token:
+ data['nextPageToken'] = next_page_token
+ if reverse_order:
+ data['reverseOrder'] = 'true'
+ json_input = json.dumps(data)
+ return self.make_request('ListDomains', json_input)
+
+ def describe_domain(self, name):
+ """
+ Returns information about the specified domain including
+ description and status.
+
+ :type name: string
+ :param name: The name of the domain to describe.
+
+ :raises: UnknownResourceFault, SWFOperationNotPermittedError
+ """
+ data = {'name': name}
+ json_input = json.dumps(data)
+ return self.make_request('DescribeDomain', json_input)
+
+## Task List Visibility
+
+ def count_pending_decision_tasks(self, domain, task_list):
+ """
+ Returns the estimated number of decision tasks in the
+ specified task list. The count returned is an approximation
+ and is not guaranteed to be exact. If you specify a task list
+ that no decision task was ever scheduled in then 0 will be
+ returned.
+
+ :type domain: string
+ :param domain: The name of the domain that contains the task list.
+
+ :type task_list: string
+ :param task_list: The name of the task list.
+
+ :raises: UnknownResourceFault, SWFOperationNotPermittedError
+ """
+ data = {'domain': domain, 'taskList': {'name': task_list}}
+ json_input = json.dumps(data)
+ return self.make_request('CountPendingDecisionTasks', json_input)
+
+ def count_pending_activity_tasks(self, domain, task_list):
+ """
+ Returns the estimated number of activity tasks in the
+ specified task list. The count returned is an approximation
+ and is not guaranteed to be exact. If you specify a task list
+ that no activity task was ever scheduled in then 0 will be
+ returned.
+
+ :type domain: string
+ :param domain: The name of the domain that contains the task list.
+
+ :type task_list: string
+ :param task_list: The name of the task list.
+
+ :raises: UnknownResourceFault, SWFOperationNotPermittedError
+ """
+ data = {'domain': domain, 'taskList': {'name': task_list}}
+ json_input = json.dumps(data)
+ return self.make_request('CountPendingActivityTasks', json_input)
diff --git a/boto/swf/layer1_decisions.py b/boto/swf/layer1_decisions.py
new file mode 100644
index 0000000..6c273aa
--- /dev/null
+++ b/boto/swf/layer1_decisions.py
@@ -0,0 +1,287 @@
+"""
+Helper class for creating decision responses.
+"""
+
+
+class Layer1Decisions:
+ """
+ Use this object to build a list of decisions for a decision response.
+ Each method call will add append a new decision. Retrieve the list
+ of decisions from the _data attribute.
+
+ """
+ def __init__(self):
+ self._data = []
+
+ def schedule_activity_task(self,
+ activity_id,
+ activity_type_name,
+ activity_type_version,
+ task_list=None,
+ control=None,
+ heartbeat_timeout=None,
+ schedule_to_close_timeout=None,
+ schedule_to_start_timeout=None,
+ start_to_close_timeout=None,
+ input=None):
+ """
+ Schedules an activity task.
+
+ :type activity_id: string
+ :param activity_id: The activityId of the type of the activity
+ being scheduled.
+
+ :type activity_type_name: string
+ :param activity_type_name: The name of the type of the activity
+ being scheduled.
+
+ :type activity_type_version: string
+ :param activity_type_version: The version of the type of the
+ activity being scheduled.
+
+ :type task_list: string
+ :param task_list: If set, specifies the name of the task list in
+ which to schedule the activity task. If not specified, the
+ defaultTaskList registered with the activity type will be used.
+ Note: a task list for this activity task must be specified either
+ as a default for the activity type or through this field. If
+ neither this field is set nor a default task list was specified
+ at registration time then a fault will be returned.
+ """
+ o = {}
+ o['decisionType'] = 'ScheduleActivityTask'
+ attrs = o['scheduleActivityTaskDecisionAttributes'] = {}
+ attrs['activityId'] = activity_id
+ attrs['activityType'] = {
+ 'name': activity_type_name,
+ 'version': activity_type_version,
+ }
+ if task_list is not None:
+ attrs['taskList'] = {'name': task_list}
+ if control is not None:
+ attrs['control'] = control
+ if heartbeat_timeout is not None:
+ attrs['heartbeatTimeout'] = heartbeat_timeout
+ if schedule_to_close_timeout is not None:
+ attrs['scheduleToCloseTimeout'] = schedule_to_close_timeout
+ if schedule_to_start_timeout is not None:
+ attrs['scheduleToStartTimeout'] = schedule_to_start_timeout
+ if start_to_close_timeout is not None:
+ attrs['startToCloseTimeout'] = start_to_close_timeout
+ if input is not None:
+ attrs['input'] = input
+ self._data.append(o)
+
+ def request_cancel_activity_task(self, activity_id):
+ """
+ Attempts to cancel a previously scheduled activity task. If
+ the activity task was scheduled but has not been assigned to a
+ worker, then it will be canceled. If the activity task was
+ already assigned to a worker, then the worker will be informed
+ that cancellation has been requested in the response to
+ RecordActivityTaskHeartbeat.
+ """
+ o = {}
+ o['decisionType'] = 'RequestCancelActivityTask'
+ attrs = o['requestCancelActivityTaskDecisionAttributes'] = {}
+ attrs['activityId'] = activity_id
+ self._data.append(o)
+
+ def record_marker(self, marker_name, details=None):
+ """
+ Records a MarkerRecorded event in the history. Markers can be
+ used for adding custom information in the history for instance
+ to let deciders know that they do not need to look at the
+ history beyond the marker event.
+ """
+ o = {}
+ o['decisionType'] = 'RecordMarker'
+ attrs = o['recordMarkerDecisionAttributes'] = {}
+ attrs['markerName'] = marker_name
+ if details is not None:
+ attrs['details'] = details
+ self._data.append(o)
+
+ def complete_workflow_execution(self, result=None):
+ """
+ Closes the workflow execution and records a WorkflowExecutionCompleted
+ event in the history
+ """
+ o = {}
+ o['decisionType'] = 'CompleteWorkflowExecution'
+ attrs = o['completeWorkflowExecutionDecisionAttributes'] = {}
+ if result is not None:
+ attrs['result'] = result
+ self._data.append(o)
+
+ def fail_workflow_execution(self, reason=None, details=None):
+ """
+ Closes the workflow execution and records a
+ WorkflowExecutionFailed event in the history.
+ """
+ o = {}
+ o['decisionType'] = 'FailWorkflowExecution'
+ attrs = o['failWorkflowExecutionDecisionAttributes'] = {}
+ if reason is not None:
+ attrs['reason'] = reason
+ if details is not None:
+ attrs['details'] = details
+ self._data.append(o)
+
+ def cancel_workflow_executions(self, details=None):
+ """
+ Closes the workflow execution and records a WorkflowExecutionCanceled
+ event in the history.
+ """
+ o = {}
+ o['decisionType'] = 'CancelWorkflowExecution'
+ attrs = o['cancelWorkflowExecutionsDecisionAttributes'] = {}
+ if details is not None:
+ attrs['details'] = details
+ self._data.append(o)
+
+ def continue_as_new_workflow_execution(self,
+ child_policy=None,
+ execution_start_to_close_timeout=None,
+ input=None,
+ tag_list=None,
+ task_list=None,
+ start_to_close_timeout=None,
+ workflow_type_version=None):
+ """
+ Closes the workflow execution and starts a new workflow execution of
+ the same type using the same workflow id and a unique run Id. A
+ WorkflowExecutionContinuedAsNew event is recorded in the history.
+ """
+ o = {}
+ o['decisionType'] = 'ContinueAsNewWorkflowExecution'
+ attrs = o['continueAsNewWorkflowExecutionDecisionAttributes'] = {}
+ if child_policy is not None:
+ attrs['childPolicy'] = child_policy
+ if execution_start_to_close_timeout is not None:
+ attrs['executionStartToCloseTimeout'] = execution_start_to_close_timeout
+ if input is not None:
+ attrs['input'] = input
+ if tag_list is not None:
+ attrs['tagList'] = tag_list
+ if task_list is not None:
+ attrs['taskList'] = {'name': task_list}
+ if start_to_close_timeout is not None:
+ attrs['startToCloseTimeout'] = start_to_close_timeout
+ if workflow_type_version is not None:
+ attrs['workflowTypeVersion'] = workflow_type_version
+ self._data.append(o)
+
+ def start_timer(self,
+ start_to_fire_timeout,
+ timer_id,
+ control=None):
+ """
+ Starts a timer for this workflow execution and records a TimerStarted
+ event in the history. This timer will fire after the specified delay
+ and record a TimerFired event.
+ """
+ o = {}
+ o['decisionType'] = 'StartTimer'
+ attrs = o['startTimerDecisionAttributes'] = {}
+ attrs['startToFireTimeout'] = start_to_fire_timeout
+ attrs['timerId'] = timer_id
+ if control is not None:
+ attrs['control'] = control
+ self._data.append(o)
+
+ def cancel_timer(self, timer_id):
+ """
+ Cancels a previously started timer and records a TimerCanceled
+ event in the history.
+ """
+ o = {}
+ o['decisionType'] = 'CancelTimer'
+ attrs = o['cancelTimerDecisionAttributes'] = {}
+ attrs['timerId'] = timer_id
+ self._data.append(o)
+
+ def signal_external_workflow_execution(self,
+ workflow_id,
+ signal_name,
+ run_id=None,
+ control=None,
+ input=None):
+ """
+ Requests a signal to be delivered to the specified external workflow
+ execution and records a SignalExternalWorkflowExecutionInitiated
+ event in the history.
+ """
+ o = {}
+ o['decisionType'] = 'SignalExternalWorkflowExecution'
+ attrs = o['signalExternalWorkflowExecutionDecisionAttributes'] = {}
+ attrs['workflowId'] = workflow_id
+ attrs['signalName'] = signal_name
+ if run_id is not None:
+ attrs['runId'] = run_id
+ if control is not None:
+ attrs['control'] = control
+ if input is not None:
+ attrs['input'] = input
+ self._data.append(o)
+
+ def request_cancel_external_workflow_execution(self,
+ workflow_id,
+ control=None,
+ run_id=None):
+ """
+ Requests that a request be made to cancel the specified
+ external workflow execution and records a
+ RequestCancelExternalWorkflowExecutionInitiated event in the
+ history.
+ """
+ o = {}
+ o['decisionType'] = 'RequestCancelExternalWorkflowExecution'
+ attrs = o['requestCancelExternalWorkflowExecutionDecisionAttributes'] = {}
+ attrs['workflowId'] = workflow_id
+ if control is not None:
+ attrs['control'] = control
+ if run_id is not None:
+ attrs['runId'] = run_id
+ self._data.append(o)
+
+ def start_child_workflow_execution(self,
+ workflow_type_name,
+ workflow_type_version,
+ workflow_id,
+ child_policy=None,
+ control=None,
+ execution_start_to_close_timeout=None,
+ input=None,
+ tag_list=None,
+ task_list=None,
+ task_start_to_close_timeout=None):
+ """
+ Requests that a child workflow execution be started and
+ records a StartChildWorkflowExecutionInitiated event in the
+ history. The child workflow execution is a separate workflow
+ execution with its own history.
+ """
+ o = {}
+ o['decisionType'] = 'StartChildWorkflowExecution'
+ attrs = o['startChildWorkflowExecutionDecisionAttributes'] = {}
+ attrs['workflowType'] = {
+ 'name': workflow_type_name,
+ 'version': workflow_type_version,
+ }
+ attrs['workflowId'] = workflow_id
+ if child_policy is not None:
+ attrs['childPolicy'] = child_policy
+ if control is not None:
+ attrs['control'] = control
+ if execution_start_to_close_timeout is not None:
+ attrs['executionStartToCloseTimeout'] = execution_start_to_close_timeout
+ if input is not None:
+ attrs['input'] = input
+ if tag_list is not None:
+ attrs['tagList'] = tag_list
+ if task_list is not None:
+ attrs['taskList'] = {'name': task_list}
+ if task_start_to_close_timeout is not None:
+ attrs['taskStartToCloseTimeout'] = task_start_to_close_timeout
+ self._data.append(o)
diff --git a/boto/utils.py b/boto/utils.py
index 5ee5ff4..0945364 100644
--- a/boto/utils.py
+++ b/boto/utils.py
@@ -1,5 +1,6 @@
-# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
@@ -16,7 +17,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -38,6 +39,7 @@
Some handy utility functions used by several classes.
"""
+import socket
import urllib
import urllib2
import imp
@@ -46,15 +48,22 @@
import time
import logging.handlers
import boto
+import boto.provider
import tempfile
import smtplib
import datetime
+import re
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.MIMEText import MIMEText
from email.Utils import formatdate
from email import Encoders
import gzip
+import base64
+try:
+ from hashlib import md5
+except ImportError:
+ from md5 import md5
try:
@@ -64,14 +73,32 @@
import md5
_hashfn = md5.md5
+try:
+ import simplejson as json
+except:
+ import json
+
# List of Query String Arguments of Interest
-qsa_of_interest = ['acl', 'defaultObjectAcl', 'location', 'logging',
- 'partNumber', 'policy', 'requestPayment', 'torrent',
- 'versioning', 'versionId', 'versions', 'website',
- 'uploads', 'uploadId', 'response-content-type',
- 'response-content-language', 'response-expires',
- 'reponse-cache-control', 'response-content-disposition',
- 'response-content-encoding']
+qsa_of_interest = ['acl', 'cors', 'defaultObjectAcl', 'location', 'logging',
+ 'partNumber', 'policy', 'requestPayment', 'torrent',
+ 'versioning', 'versionId', 'versions', 'website',
+ 'uploads', 'uploadId', 'response-content-type',
+ 'response-content-language', 'response-expires',
+ 'response-cache-control', 'response-content-disposition',
+ 'response-content-encoding', 'delete', 'lifecycle',
+ 'tagging']
+
+
+_first_cap_regex = re.compile('(.)([A-Z][a-z]+)')
+_number_cap_regex = re.compile('([a-z])([0-9]+)')
+_end_cap_regex = re.compile('([a-z0-9])([A-Z])')
+
+
+def unquote_v(nv):
+ if len(nv) == 1:
+ return nv
+ else:
+ return (nv[0], urllib.unquote(nv[1]))
# generates the aws canonical string for the given parameters
def canonical_string(method, path, headers, expires=None,
@@ -86,13 +113,13 @@
interesting_headers[lk] = headers[key].strip()
# these keys get empty strings if they don't exist
- if not interesting_headers.has_key('content-type'):
+ if 'content-type' not in interesting_headers:
interesting_headers['content-type'] = ''
- if not interesting_headers.has_key('content-md5'):
+ if 'content-md5' not in interesting_headers:
interesting_headers['content-md5'] = ''
# just in case someone used this. it's not necessary in this lib.
- if interesting_headers.has_key(provider.date_header):
+ if provider.date_header in interesting_headers:
interesting_headers['date'] = ''
# if you're using expires for query string auth, then it trumps date
@@ -100,8 +127,7 @@
if expires:
interesting_headers['date'] = str(expires)
- sorted_header_keys = interesting_headers.keys()
- sorted_header_keys.sort()
+ sorted_header_keys = sorted(interesting_headers.keys())
buf = "%s\n" % method
for key in sorted_header_keys:
@@ -113,16 +139,16 @@
# don't include anything after the first ? in the resource...
# unless it is one of the QSA of interest, defined above
- t = path.split('?')
+ t = path.split('?')
buf += t[0]
if len(t) > 1:
qsa = t[1].split('&')
- qsa = [ a.split('=') for a in qsa]
- qsa = [ a for a in qsa if a[0] in qsa_of_interest ]
+ qsa = [a.split('=', 1) for a in qsa]
+ qsa = [unquote_v(a) for a in qsa if a[0] in qsa_of_interest]
if len(qsa) > 0:
- qsa.sort(cmp=lambda x,y:cmp(x[0], y[0]))
- qsa = [ '='.join(a) for a in qsa ]
+ qsa.sort(cmp=lambda x, y:cmp(x[0], y[0]))
+ qsa = ['='.join(a) for a in qsa]
buf += '?'
buf += '&'.join(qsa)
@@ -172,48 +198,149 @@
code = e.code
if code == 404 and not retry_on_404:
return ''
- except:
+ except urllib2.URLError, e:
+ raise e
+ except Exception, e:
pass
boto.log.exception('Caught exception reading instance data')
time.sleep(2**i)
boto.log.error('Unable to read instance data, giving up')
return ''
-def _get_instance_metadata(url):
- d = {}
- data = retry_url(url)
- if data:
- fields = data.split('\n')
- for field in fields:
- if field.endswith('/'):
- d[field[0:-1]] = _get_instance_metadata(url + field)
- else:
- p = field.find('=')
- if p > 0:
- key = field[p+1:]
- resource = field[0:p] + '/openssh-key'
+def _get_instance_metadata(url, num_retries):
+ return LazyLoadMetadata(url, num_retries)
+
+class LazyLoadMetadata(dict):
+ def __init__(self, url, num_retries):
+ self._url = url
+ self._num_retries = num_retries
+ self._leaves = {}
+ self._dicts = []
+ data = boto.utils.retry_url(self._url, num_retries=self._num_retries)
+ if data:
+ fields = data.split('\n')
+ for field in fields:
+ if field.endswith('/'):
+ key = field[0:-1]
+ self._dicts.append(key)
else:
- key = resource = field
- val = retry_url(url + resource)
+ p = field.find('=')
+ if p > 0:
+ key = field[p + 1:]
+ resource = field[0:p] + '/openssh-key'
+ else:
+ key = resource = field
+ self._leaves[key] = resource
+ self[key] = None
+
+ def _materialize(self):
+ for key in self:
+ self[key]
+
+ def __getitem__(self, key):
+ if key not in self:
+ # allow dict to throw the KeyError
+ return super(LazyLoadMetadata, self).__getitem__(key)
+
+ # already loaded
+ val = super(LazyLoadMetadata, self).__getitem__(key)
+ if val is not None:
+ return val
+
+ if key in self._leaves:
+ resource = self._leaves[key]
+ val = boto.utils.retry_url(self._url + urllib.quote(resource,
+ safe="/:"),
+ num_retries=self._num_retries)
+ if val and val[0] == '{':
+ val = json.loads(val)
+ else:
p = val.find('\n')
if p > 0:
val = val.split('\n')
- d[key] = val
- return d
+ self[key] = val
+ elif key in self._dicts:
+ self[key] = LazyLoadMetadata(self._url + key + '/',
+ self._num_retries)
-def get_instance_metadata(version='latest', url='http://169.254.169.254'):
+ return super(LazyLoadMetadata, self).__getitem__(key)
+
+ def get(self, key, default=None):
+ try:
+ return self[key]
+ except KeyError:
+ return default
+
+ def values(self):
+ self._materialize()
+ return super(LazyLoadMetadata, self).values()
+
+ def items(self):
+ self._materialize()
+ return super(LazyLoadMetadata, self).items()
+
+ def __str__(self):
+ self._materialize()
+ return super(LazyLoadMetadata, self).__str__()
+
+ def __repr__(self):
+ self._materialize()
+ return super(LazyLoadMetadata, self).__repr__()
+
+def get_instance_metadata(version='latest', url='http://169.254.169.254',
+ timeout=None, num_retries=5):
"""
Returns the instance metadata as a nested Python dictionary.
Simple values (e.g. local_hostname, hostname, etc.) will be
stored as string values. Values such as ancestor-ami-ids will
be stored in the dict as a list of string values. More complex
fields such as public-keys and will be stored as nested dicts.
+
+ If the timeout is specified, the connection to the specified url
+ will time out after the specified number of seconds.
+
"""
- return _get_instance_metadata('%s/%s/meta-data/' % (url, version))
+ if timeout is not None:
+ original = socket.getdefaulttimeout()
+ socket.setdefaulttimeout(timeout)
+ try:
+ return _get_instance_metadata('%s/%s/meta-data/' % (url, version),
+ num_retries=num_retries)
+ except urllib2.URLError, e:
+ return None
+ finally:
+ if timeout is not None:
+ socket.setdefaulttimeout(original)
+
+def get_instance_identity(version='latest', url='http://169.254.169.254',
+ timeout=None, num_retries=5):
+ """
+ Returns the instance identity as a nested Python dictionary.
+ """
+ iid = {}
+ base_url = 'http://169.254.169.254/latest/dynamic/instance-identity'
+ if timeout is not None:
+ original = socket.getdefaulttimeout()
+ socket.setdefaulttimeout(timeout)
+ try:
+ data = retry_url(base_url, num_retries=num_retries)
+ fields = data.split('\n')
+ for field in fields:
+ val = retry_url(base_url + '/' + field + '/')
+ if val[0] == '{':
+ val = json.loads(val)
+ if field:
+ iid[field] = val
+ return iid
+ except urllib2.URLError, e:
+ return None
+ finally:
+ if timeout is not None:
+ socket.setdefaulttimeout(original)
def get_instance_userdata(version='latest', sep=None,
url='http://169.254.169.254'):
- ud_url = '%s/%s/user-data' % (url,version)
+ ud_url = '%s/%s/user-data' % (url, version)
user_data = retry_url(ud_url, retry_on_404=False)
if user_data:
if sep:
@@ -226,13 +353,14 @@
ISO8601 = '%Y-%m-%dT%H:%M:%SZ'
ISO8601_MS = '%Y-%m-%dT%H:%M:%S.%fZ'
-
+
def get_ts(ts=None):
if not ts:
ts = time.gmtime()
return time.strftime(ISO8601, ts)
def parse_ts(ts):
+ ts = ts.strip()
try:
dt = datetime.datetime.strptime(ts, ISO8601)
return dt
@@ -255,7 +383,7 @@
return c
except:
return None
-
+
def update_dme(username, password, dme_id, ip_address):
"""
Update your Dynamic DNS record with DNSMadeEasy.com
@@ -268,7 +396,7 @@
def fetch_file(uri, file=None, username=None, password=None):
"""
Fetch a file based on the URI provided. If you do not pass in a file pointer
- a tempfile.NamedTemporaryFile, or None if the file could not be
+ a tempfile.NamedTemporaryFile, or None if the file could not be
retrieved is returned.
The URI can be either an HTTP url, or "s3://bucket_name/key_name"
"""
@@ -346,7 +474,7 @@
to accept a username and password on the constructor and to then use those
credentials to authenticate with the SMTP server. To use this, you could
add something like this in your boto config file:
-
+
[handler_hand07]
class=boto.utils.AuthSMTPHandler
level=WARN
@@ -364,7 +492,7 @@
logging.handlers.SMTPHandler.__init__(self, mailhost, fromaddr, toaddrs, subject)
self.username = username
self.password = password
-
+
def emit(self, record):
"""
Emit a record.
@@ -395,29 +523,29 @@
class LRUCache(dict):
"""A dictionary-like object that stores only a certain number of items, and
discards its least recently used item when full.
-
+
>>> cache = LRUCache(3)
>>> cache['A'] = 0
>>> cache['B'] = 1
>>> cache['C'] = 2
>>> len(cache)
3
-
+
>>> cache['A']
0
-
+
Adding new items to the cache does not increase its size. Instead, the least
recently used item is dropped:
-
+
>>> cache['D'] = 3
>>> len(cache)
3
>>> 'B' in cache
False
-
+
Iterating over the cache returns the keys, starting with the most recently
used:
-
+
>>> for key in cache:
... print key
D
@@ -527,10 +655,10 @@
def set(self, value):
self.str = self.hashfunc(value).hexdigest()
-
+
def __str__(self):
return str(self.str)
-
+
def __eq__(self, other):
if other == None:
return False
@@ -557,7 +685,7 @@
msg['To'] = to_string
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = subject
-
+
if body:
msg.attach(MIMEText(body))
@@ -608,16 +736,23 @@
value = [value]
return value
-def pythonize_name(name, sep='_'):
- s = ''
- if name[0].isupper:
- s = name[0].lower()
- for c in name[1:]:
- if c.isupper():
- s += sep + c.lower()
- else:
- s += c
- return s
+def pythonize_name(name):
+ """Convert camel case to a "pythonic" name.
+
+ Examples::
+
+ pythonize_name('CamelCase') -> 'camel_case'
+ pythonize_name('already_pythonized') -> 'already_pythonized'
+ pythonize_name('HTTPRequest') -> 'http_request'
+ pythonize_name('HTTPStatus200Ok') -> 'http_status_200_ok'
+ pythonize_name('UPPER') -> 'upper'
+ pythonize_name('') -> ''
+
+ """
+ s1 = _first_cap_regex.sub(r'\1_\2', name)
+ s2 = _number_cap_regex.sub(r'\1_\2', s1)
+ return _end_cap_regex.sub(r'\1_\2', s2).lower()
+
def write_mime_multipart(content, compress=False, deftype='text/plain', delimiter=':'):
"""Description:
@@ -638,7 +773,7 @@
:rtype: str:
"""
wrapper = MIMEMultipart()
- for name,con in content:
+ for name, con in content:
definite_type = guess_mime_type(con, deftype)
maintype, subtype = definite_type.split('/', 1)
if maintype == 'text':
@@ -684,8 +819,62 @@
'#cloud-boothook' : 'text/cloud-boothook'
}
rtype = deftype
- for possible_type,mimetype in starts_with_mappings.items():
+ for possible_type, mimetype in starts_with_mappings.items():
if content.startswith(possible_type):
rtype = mimetype
break
return(rtype)
+
+def compute_md5(fp, buf_size=8192, size=None):
+ """
+ Compute MD5 hash on passed file and return results in a tuple of values.
+
+ :type fp: file
+ :param fp: File pointer to the file to MD5 hash. The file pointer
+ will be reset to its current location before the
+ method returns.
+
+ :type buf_size: integer
+ :param buf_size: Number of bytes per read request.
+
+ :type size: int
+ :param size: (optional) The Maximum number of bytes to read from
+ the file pointer (fp). This is useful when uploading
+ a file in multiple parts where the file is being
+ split inplace into different parts. Less bytes may
+ be available.
+
+ :rtype: tuple
+ :return: A tuple containing the hex digest version of the MD5 hash
+ as the first element, the base64 encoded version of the
+ plain digest as the second element and the data size as
+ the third element.
+ """
+ return compute_hash(fp, buf_size, size, hash_algorithm=md5)
+
+
+def compute_hash(fp, buf_size=8192, size=None, hash_algorithm=md5):
+ hash_obj = hash_algorithm()
+ spos = fp.tell()
+ if size and size < buf_size:
+ s = fp.read(size)
+ else:
+ s = fp.read(buf_size)
+ while s:
+ hash_obj.update(s)
+ if size:
+ size -= len(s)
+ if size <= 0:
+ break
+ if size and size < buf_size:
+ s = fp.read(size)
+ else:
+ s = fp.read(buf_size)
+ hex_digest = hash_obj.hexdigest()
+ base64_digest = base64.encodestring(hash_obj.digest())
+ if base64_digest[-1] == '\n':
+ base64_digest = base64_digest[0:-1]
+ # data_size based on bytes read.
+ data_size = fp.tell() - spos
+ fp.seek(spos)
+ return (hex_digest, base64_digest, data_size)
diff --git a/boto/vpc/__init__.py b/boto/vpc/__init__.py
index ae55a26..e5c0eef 100644
--- a/boto/vpc/__init__.py
+++ b/boto/vpc/__init__.py
@@ -50,12 +50,12 @@
:type filters: list of tuples
:param filters: A list of tuples containing filters. Each tuple
- consists of a filter key and a filter value.
- Possible filter keys are:
+ consists of a filter key and a filter value.
+ Possible filter keys are:
- - *state*, the state of the VPC (pending or available)
- - *cidrBlock*, CIDR block of the VPC
- - *dhcpOptionsId*, the ID of a set of DHCP options
+ * *state* - a list of states of the VPC (pending or available)
+ * *cidrBlock* - a list CIDR blocks of the VPC
+ * *dhcpOptionsId* - a list of IDs of a set of DHCP options
:rtype: list
:return: A list of :class:`boto.vpc.vpc.VPC`
@@ -64,11 +64,7 @@
if vpc_ids:
self.build_list_params(params, vpc_ids, 'VpcId')
if filters:
- i = 1
- for filter in filters:
- params[('Filter.%d.Name' % i)] = filter[0]
- params[('Filter.%d.Value.1' % i)] = filter[1]
- i += 1
+ self.build_filter_params(params, dict(filters))
return self.get_list('DescribeVpcs', params, [('item', VPC)])
def create_vpc(self, cidr_block):
@@ -261,7 +257,6 @@
self.build_list_params(params, internet_gateway_ids, 'InternetGatewayId')
if filters:
self.build_filter_params(params, dict(filters))
-
return self.get_list('DescribeInternetGateways', params, [('item', InternetGateway)])
def create_internet_gateway(self):
@@ -311,7 +306,7 @@
Detach an internet gateway from a specific VPC.
:type internet_gateway_id: str
- :param internet_gateway_id: The ID of the internet gateway to delete.
+ :param internet_gateway_id: The ID of the internet gateway to detach.
:type vpc_id: str
:param vpc_id: The ID of the VPC to attach to.
@@ -356,11 +351,8 @@
if customer_gateway_ids:
self.build_list_params(params, customer_gateway_ids, 'CustomerGatewayId')
if filters:
- i = 1
- for filter in filters:
- params[('Filter.%d.Name' % i)] = filter[0]
- params[('Filter.%d.Value.1')] = filter[1]
- i += 1
+ self.build_filter_params(params, dict(filters))
+
return self.get_list('DescribeCustomerGateways', params, [('item', CustomerGateway)])
def create_customer_gateway(self, type, ip_address, bgp_asn):
@@ -416,10 +408,10 @@
consists of a filter key and a filter value.
Possible filter keys are:
- - *state*, the state of the VpnGateway
+ - *state*, a list of states of the VpnGateway
(pending,available,deleting,deleted)
- - *type*, the type of customer gateway (ipsec.1)
- - *availabilityZone*, the Availability zone the
+ - *type*, a list types of customer gateway (ipsec.1)
+ - *availabilityZone*, a list of Availability zones the
VPN gateway is in.
:rtype: list
@@ -429,11 +421,7 @@
if vpn_gateway_ids:
self.build_list_params(params, vpn_gateway_ids, 'VpnGatewayId')
if filters:
- i = 1
- for filter in filters:
- params[('Filter.%d.Name' % i)] = filter[0]
- params[('Filter.%d.Value.1')] = filter[1]
- i += 1
+ self.build_filter_params(params, dict(filters))
return self.get_list('DescribeVpnGateways', params, [('item', VpnGateway)])
def create_vpn_gateway(self, type, availability_zone=None):
@@ -501,11 +489,11 @@
consists of a filter key and a filter value.
Possible filter keys are:
- - *state*, the state of the Subnet
+ - *state*, a list of states of the Subnet
(pending,available)
- - *vpdId*, the ID of teh VPC the subnet is in.
- - *cidrBlock*, CIDR block of the subnet
- - *availabilityZone*, the Availability Zone
+ - *vpdId*, a list of IDs of teh VPC the subnet is in.
+ - *cidrBlock*, a list of CIDR blocks of the subnet
+ - *availabilityZone*, list of the Availability Zones
the subnet is in.
@@ -516,11 +504,7 @@
if subnet_ids:
self.build_list_params(params, subnet_ids, 'SubnetId')
if filters:
- i = 1
- for filter in filters:
- params[('Filter.%d.Name' % i)] = filter[0]
- params[('Filter.%d.Value.1' % i)] = filter[1]
- i += 1
+ self.build_filter_params(params, dict(filters))
return self.get_list('DescribeSubnets', params, [('item', Subnet)])
def create_subnet(self, vpc_id, cidr_block, availability_zone=None):
@@ -645,12 +629,12 @@
consists of a filter key and a filter value.
Possible filter keys are:
- - *state*, the state of the VPN_CONNECTION
+ - *state*, a list of states of the VPN_CONNECTION
pending,available,deleting,deleted
- - *type*, the type of connection, currently 'ipsec.1'
- - *customerGatewayId*, the ID of the customer gateway
+ - *type*, a list of types of connection, currently 'ipsec.1'
+ - *customerGatewayId*, a list of IDs of the customer gateway
associated with the VPN
- - *vpnGatewayId*, the ID of the VPN gateway associated
+ - *vpnGatewayId*, a list of IDs of the VPN gateway associated
with the VPN connection
:rtype: list
@@ -660,11 +644,7 @@
if vpn_connection_ids:
self.build_list_params(params, vpn_connection_ids, 'Vpn_ConnectionId')
if filters:
- i = 1
- for filter in filters:
- params[('Filter.%d.Name' % i)] = filter[0]
- params[('Filter.%d.Value.1')] = filter[1]
- i += 1
+ self.build_filter_params(params, dict(filters))
return self.get_list('DescribeVpnConnections', params, [('item', VpnConnection)])
def create_vpn_connection(self, type, customer_gateway_id, vpn_gateway_id):
@@ -701,5 +681,3 @@
"""
params = {'VpnConnectionId': vpn_connection_id}
return self.get_status('DeleteVpnConnection', params)
-
-
diff --git a/boto/vpc/dhcpoptions.py b/boto/vpc/dhcpoptions.py
index 810d9cf..7484683 100644
--- a/boto/vpc/dhcpoptions.py
+++ b/boto/vpc/dhcpoptions.py
@@ -38,7 +38,7 @@
def startElement(self, name, attrs, connection):
if name == 'valueSet':
- if not self.has_key(self._name):
+ if self._name not in self:
self[self._name] = DhcpValueSet()
return self[self._name]
diff --git a/boto/vpc/vpnconnection.py b/boto/vpc/vpnconnection.py
index 2e089e7..3979238 100644
--- a/boto/vpc/vpnconnection.py
+++ b/boto/vpc/vpnconnection.py
@@ -44,7 +44,7 @@
self.id = value
elif name == 'state':
self.state = value
- elif name == 'CustomerGatewayConfiguration':
+ elif name == 'customerGatewayConfiguration':
self.customer_gateway_configuration = value
elif name == 'type':
self.type = value
diff --git a/docs/BotoCheatSheet.pdf b/docs/BotoCheatSheet.pdf
new file mode 100644
index 0000000..474dd65
--- /dev/null
+++ b/docs/BotoCheatSheet.pdf
Binary files differ
diff --git a/docs/source/autoscale_tut.rst b/docs/source/autoscale_tut.rst
index a99bc3e..879d522 100644
--- a/docs/source/autoscale_tut.rst
+++ b/docs/source/autoscale_tut.rst
@@ -94,7 +94,7 @@
>>> ag = AutoScalingGroup(group_name='my_group', load_balancers=['my-lb'],
availability_zones=['us-east-1a', 'us-east-1b'],
- launch_config=lc, min_size=4, max_size=4)
+ launch_config=lc, min_size=4, max_size=8)
>>> conn.create_auto_scaling_group(ag)
We now have a new autoscaling group defined! At this point instances should be
@@ -116,25 +116,76 @@
Scaling a Group Up or Down
^^^^^^^^^^^^^^^^^^^^^^^^^^
-It might be more useful to also define means to scale a group up or down
-depending on certain criteria. For example, if the average CPU utilization of
-all your instances goes above 60%, you may want to scale up a number of
-instances to deal with demand - likewise you might want to scale down if usage
-drops. These criteria are defined in *triggers*.
+It can also be useful to scale a group up or down depending on certain criteria.
+For example, if the average CPU utilization of the group goes above 70%, you may
+want to scale up the number of instances to deal with demand. Likewise, you
+might want to scale down if usage drops again.
+These rules for **how** to scale are defined by *Scaling Polices*, and the rules for
+**when** to scale are defined by CloudWatch *Metric Alarms*.
-For example, let's modify our above group to have a maxsize of 8 and define means
-of scaling up based on CPU utilization. We'll say we should scale up if the average
-CPU usage goes above 80% and scale down if it goes below 40%.
+For example, let's configure scaling for the above group based on CPU utilization.
+We'll say it should scale up if the average CPU usage goes above 70% and scale
+down if it goes below 40%.
->>> from boto.ec2.autoscale import Trigger
->>> tr = Trigger(name='my_trigger', autoscale_group=ag,
- measure_name='CPUUtilization', statistic='Average',
- unit='Percent',
- dimensions=[('AutoScalingGroupName', ag.name)],
- period=60, lower_threshold=40,
- lower_breach_scale_increment='-5',
- upper_threshold=80,
- upper_breach_scale_increment='10',
- breach_duration=360)
->> conn.create_trigger(tr)
+Firstly, define some Scaling Policies. These tell Auto Scaling how to scale
+the group (but not when to do it, we'll specify that later).
+We need one policy for scaling up and one for scaling down.
+
+>>> scale_up_policy = ScalingPolicy(
+ name='scale_up', adjustment_type='ChangeInCapacity',
+ as_name='my_group', scaling_adjustment=1, cooldown=180)
+>>> scale_down_policy = ScalingPolicy(
+ name='scale_down', adjustment_type='ChangeInCapacity',
+ as_name='my_group', scaling_adjustment=-1, cooldown=180)
+
+The policy objects are now defined locally.
+Let's submit them to AWS.
+
+>>> conn.create_scaling_policy(scale_up_policy)
+>>> conn.create_scaling_policy(scale_down_policy)
+
+Now that the polices have been digested by AWS, they have extra properties
+that we aren't aware of locally. We need to refresh them by requesting them
+back again.
+
+>>> scale_up_policy = autoscale.get_all_policies(
+ as_group='my_group', policy_names=['scale_up'])[0]
+>>> scale_down_policy = autoscale.get_all_policies(
+ as_group='my_group', policy_names=['scale_down'])[0]
+
+Specifically, we'll need the Amazon Resource Name (ARN) of each policy, which
+will now be a property of our ScalingPolicy objects.
+
+Next we'll create CloudWatch alarms that will define when to run the
+Auto Scaling Policies.
+
+>>> cloudwatch = boto.connect_cloudwatch()
+
+It makes sense to measure the average CPU usage across the whole Auto Scaling
+Group, rather than individual instances. We express that as CloudWatch
+*Dimensions*.
+
+>>> alarm_dimensions = {"AutoScalingGroupName": 'my_group'}
+
+Create an alarm for when to scale up, and one for when to scale down.
+
+>>> scale_up_alarm = MetricAlarm(
+ name='scale_up_on_cpu', namespace='AWS/EC2',
+ metric='CPUUtilization', statistic='Average',
+ comparison='>', threshold='70',
+ period='60', evaluation_periods=2,
+ alarm_actions=[scale_up_policy.policy_arn],
+ dimensions=alarm_dimensions)
+>>> cloudwatch.create_alarm(scale_up_alarm)
+
+>>> scale_down_alarm = MetricAlarm(
+ name='scale_down_on_cpu', namespace='AWS/EC2',
+ metric='CPUUtilization', statistic='Average',
+ comparison='<', threshold='40',
+ period='60', evaluation_periods=2,
+ alarm_actions=[scale_down_policy.policy_arn],
+ dimensions=alarm_dimensions)
+>>> cloudwatch.create_alarm(scale_down_alarm)
+
+Auto Scaling will now create a new instance if the existing cluster averages more than 70% CPU for two minutes. Similarly, it will terminate an instance when CPU usage sits below 40%. Auto Scaling will not add or remove instances beyond the limits of the Scaling Group's 'max_size' and 'min_size' properties.
diff --git a/docs/source/boto_config_tut.rst b/docs/source/boto_config_tut.rst
new file mode 100644
index 0000000..76b27b6
--- /dev/null
+++ b/docs/source/boto_config_tut.rst
@@ -0,0 +1,110 @@
+.. _ref-boto_config:
+
+===========
+Boto Config
+===========
+
+Introduction
+------------
+
+There is a growing list of configuration options for the boto library. Many of
+these options can be passed into the constructors for top-level objects such as
+connections. Some options, such as credentials, can also be read from
+environment variables (e.g. ``AWS_ACCESS_KEY_ID`` and ``AWS_SECRET_ACCESS_KEY``).
+But there is no central place to manage these options. So, the development
+version of boto has now introduced the notion of boto config files.
+
+Details
+-------
+
+A boto config file is simply a .ini format configuration file that specifies
+values for options that control the behavior of the boto library. Upon startup,
+the boto library looks for configuration files in the following locations
+and in the following order:
+
+* /etc/boto.cfg - for site-wide settings that all users on this machine will use
+* ~/.boto - for user-specific settings
+
+The options are merged into a single, in-memory configuration that is
+available as :py:mod:`boto.config`. The :py:class:`boto.pyami.config.Config`
+class is a subclass of the standard Python
+:py:class:`ConfigParser.SafeConfigParser` object and inherits all of the
+methods of that object. In addition, the boto
+:py:class:`Config <boto.pyami.config.Config>` class defines additional
+methods that are described on the PyamiConfigMethods page.
+
+Sections
+--------
+
+The following sections and options are currently recognized within the
+boto config file.
+
+Credentials
+^^^^^^^^^^^
+
+The Credentials section is used to specify the AWS credentials used for all
+boto requests. The order of precedence for authentication credentials is:
+
+* Credentials passed into Connection class constructor.
+* Credentials specified by environment variables
+* Credentials specified as options in the config file.
+
+This section defines the following options: ``aws_access_key_id`` and
+``aws_secret_access_key``. The former being your aws key id and the latter
+being the secret key.
+
+For example::
+
+ [Credentials]
+ aws_access_key_id = <your access key>
+ aws_secret_access_key = <your secret key>
+
+Please notice that quote characters are not used to either side of the '='
+operator even when both your aws access key id and secret key are strings.
+
+Boto
+^^^^
+
+The Boto section is used to specify options that control the operaton of
+boto itself. This section defines the following options:
+
+:debug: Controls the level of debug messages that will be printed by the boto library.
+ The following values are defined::
+
+ 0 - no debug messages are printed
+ 1 - basic debug messages from boto are printed
+ 2 - all boto debugging messages plus request/response messages from httplib
+
+:proxy: The name of the proxy host to use for connecting to AWS.
+:proxy_port: The port number to use to connect to the proxy host.
+:proxy_user: The user name to use when authenticating with proxy host.
+:proxy_pass: The password to use when authenticating with proxy host.
+:num_retries: The number of times to retry failed requests to an AWS server.
+ If boto receives an error from AWS, it will attempt to recover and retry the
+ request. The default number of retries is 5 but you can change the default
+ with this option.
+
+As an example::
+
+ [Boto]
+ debug = 0
+ num_retries = 10
+
+ proxy = myproxy.com
+ proxy_port = 8080
+ proxy_user = foo
+ proxy_pass = bar
+
+Precedence
+----------
+
+Even if you have your boto config setup, you can also have credentials and
+options stored in environmental variables or you can explicitly pass them to
+method calls i.e.::
+
+ >>> boto.connect_ec2('<KEY_ID>','<SECRET_KEY>')
+
+In these cases where these options can be found in more than one place boto
+will first use the explicitly supplied arguments, if none found it will then
+look for them amidst environment variables and if that fails it will use the
+ones in boto config.
diff --git a/docs/source/cloudfront_tut.rst b/docs/source/cloudfront_tut.rst
new file mode 100644
index 0000000..7f41ff1
--- /dev/null
+++ b/docs/source/cloudfront_tut.rst
@@ -0,0 +1,196 @@
+.. _cloudfront_tut:
+
+==========
+CloudFront
+==========
+
+This new boto module provides an interface to Amazon's Content Service,
+CloudFront.
+
+.. warning::
+
+ This module is not well tested. Paging of distributions is not yet
+ supported. CNAME support is completely untested. Use with caution.
+ Feedback and bug reports are greatly appreciated.
+
+Creating a CloudFront connection
+--------------------------------
+If you've placed your credentials in your ``$HOME/.boto`` config file then you
+can simply create a CloudFront connection using::
+
+ >>> import boto
+ >>> c = boto.connect_cloudfront()
+
+If you do not have this file you will need to specify your AWS access key and
+secret access key::
+
+ >>> import boto
+ >>> c = boto.connect_cloudfront('your-aws-access-key-id', 'your-aws-secret-access-key')
+
+Working with CloudFront Distributions
+-------------------------------------
+Create a new :class:`boto.cloudfront.distribution.Distribution`::
+
+ >>> distro = c.create_distribution(origin='mybucket.s3.amazonaws.com', enabled=False, comment='My new Distribution')
+ >>> d.domain_name
+ u'd2oxf3980lnb8l.cloudfront.net'
+ >>> d.id
+ u'ECH69MOIW7613'
+ >>> d.status
+ u'InProgress'
+ >>> d.config.comment
+ u'My new distribution'
+ >>> d.config.origin
+ <S3Origin: mybucket.s3.amazonaws.com>
+ >>> d.config.caller_reference
+ u'31b8d9cf-a623-4a28-b062-a91856fac6d0'
+ >>> d.config.enabled
+ False
+
+Note that a new caller reference is created automatically, using
+uuid.uuid4(). The :class:`boto.cloudfront.distribution.Distribution`,
+:class:`boto.cloudfront.distribution.DistributionConfig` and
+:class:`boto.cloudfront.distribution.DistributionSummary` objects are defined
+in the :mod:`boto.cloudfront.distribution` module.
+
+To get a listing of all current distributions::
+
+ >>> rs = c.get_all_distributions()
+ >>> rs
+ [<boto.cloudfront.distribution.DistributionSummary instance at 0xe8d4e0>,
+ <boto.cloudfront.distribution.DistributionSummary instance at 0xe8d788>]
+
+This returns a list of :class:`boto.cloudfront.distribution.DistributionSummary`
+objects. Note that paging is not yet supported! To get a
+:class:`boto.cloudfront.distribution.DistributionObject` from a
+:class:`boto.cloudfront.distribution.DistributionSummary` object::
+
+ >>> ds = rs[1]
+ >>> distro = ds.get_distribution()
+ >>> distro.domain_name
+ u'd2oxf3980lnb8l.cloudfront.net'
+
+To change a property of a distribution object::
+
+ >>> distro.comment
+ u'My new distribution'
+ >>> distro.update(comment='This is a much better comment')
+ >>> distro.comment
+ 'This is a much better comment'
+
+You can also enable/disable a distribution using the following
+convenience methods::
+
+ >>> distro.enable() # just calls distro.update(enabled=True)
+
+or::
+
+ >>> distro.disable() # just calls distro.update(enabled=False)
+
+The only attributes that can be updated for a Distribution are
+comment, enabled and cnames.
+
+To delete a :class:`boto.cloudfront.distribution.Distribution`::
+
+ >>> distro.delete()
+
+Invalidating CloudFront Distribution Paths
+------------------------------------------
+Invalidate a list of paths in a CloudFront distribution::
+
+ >>> paths = ['/path/to/file1.html', '/path/to/file2.html', ...]
+ >>> inval_req = c.create_invalidation_request(u'ECH69MOIW7613', paths)
+ >>> print inval_req
+ <InvalidationBatch: IFCT7K03VUETK>
+ >>> print inval_req.id
+ u'IFCT7K03VUETK'
+ >>> print inval_req.paths
+ [u'/path/to/file1.html', u'/path/to/file2.html', ..]
+
+.. warning::
+
+ Each CloudFront invalidation request can only specify up to 1000 paths. If
+ you need to invalidate more than 1000 paths you will need to split up the
+ paths into groups of 1000 or less and create multiple invalidation requests.
+
+This will return a :class:`boto.cloudfront.invalidation.InvalidationBatch`
+object representing the invalidation request. You can also fetch a single
+invalidaton request for a given distribution using
+``invalidation_request_status``::
+
+ >>> inval_req = c.invalidation_request_status(u'ECH69MOIW7613', u'IFCT7K03VUETK')
+ >>> print inval_req
+ <InvalidationBatch: IFCT7K03VUETK>
+
+The first parameter is the CloudFront distribution id the request belongs to
+and the second parameter is the invalidation request id.
+
+It's also possible to get *all* invalidations for a given CloudFront
+distribution::
+
+ >>> invals = c.get_invalidation_requests(u'ECH69MOIW7613')
+ >>> print invals
+ <boto.cloudfront.invalidation.InvalidationListResultSet instance at 0x15d28d0>
+
+This will return an instance of
+:class:`boto.cloudfront.invalidation.InvalidationListResultSet` which is an
+iterable object that contains a list of
+:class:`boto.cloudfront.invalidation.InvalidationSummary` objects that describe
+each invalidation request and its status::
+
+ >>> for inval in invals:
+ >>> print 'Object: %s, ID: %s, Status: %s' % (inval, inval.id, inval.status)
+ Object: <InvalidationSummary: ICXT2K02SUETK>, ID: ICXT2K02SUETK, Status: Completed
+ Object: <InvalidationSummary: ITV9SV0PDNY1Y>, ID: ITV9SV0PDNY1Y, Status: Completed
+ Object: <InvalidationSummary: I1X3F6N0PLGJN5>, ID: I1X3F6N0PLGJN5, Status: Completed
+ Object: <InvalidationSummary: I1F3G9N0ZLGKN2>, ID: I1F3G9N0ZLGKN2, Status: Completed
+ ...
+
+Simply iterating over the
+:class:`boto.cloudfront.invalidation.InvalidationListResultSet` object will
+automatically paginate the results on-the-fly as needed by repeatedly
+requesting more results from CloudFront until there are none left.
+
+If you wish to paginate the results manually you can do so by specifying the
+``max_items`` option when calling ``get_invalidation_requests``::
+
+ >>> invals = c.get_invalidation_requests(u'ECH69MOIW7613', max_items=2)
+ >>> print len(list(invals))
+ 2
+ >>> for inval in invals:
+ >>> print 'Object: %s, ID: %s, Status: %s' % (inval, inval.id, inval.status)
+ Object: <InvalidationSummary: ICXT2K02SUETK>, ID: ICXT2K02SUETK, Status: Completed
+ Object: <InvalidationSummary: ITV9SV0PDNY1Y>, ID: ITV9SV0PDNY1Y, Status: Completed
+
+In this case, iterating over the
+:class:`boto.cloudfront.invalidation.InvalidationListResultSet` object will
+*only* make a single request to CloudFront and *only* ``max_items``
+invalidation requests are returned by the iterator. To get the next "page" of
+results pass the ``next_marker`` attribute of the previous
+:class:`boto.cloudfront.invalidation.InvalidationListResultSet` object as the
+``marker`` option to the next call to ``get_invalidation_requests``::
+
+ >>> invals = c.get_invalidation_requests(u'ECH69MOIW7613', max_items=10, marker=invals.next_marker)
+ >>> print len(list(invals))
+ 2
+ >>> for inval in invals:
+ >>> print 'Object: %s, ID: %s, Status: %s' % (inval, inval.id, inval.status)
+ Object: <InvalidationSummary: I1X3F6N0PLGJN5>, ID: I1X3F6N0PLGJN5, Status: Completed
+ Object: <InvalidationSummary: I1F3G9N0ZLGKN2>, ID: I1F3G9N0ZLGKN2, Status: Completed
+
+You can get the :class:`boto.cloudfront.invalidation.InvalidationBatch` object
+representing the invalidation request pointed to by a
+:class:`boto.cloudfront.invalidation.InvalidationSummary` object using::
+
+ >>> inval_req = inval.get_invalidation_request()
+ >>> print inval_req
+ <InvalidationBatch: IFCT7K03VUETK>
+
+Simiarly you can get the parent
+:class:`boto.cloudfront.distribution.Distribution` object for the invalidation
+request from a :class:`boto.cloudfront.invalidation.InvalidationSummary` object
+using::
+
+ >>> dist = inval.get_distribution()
+ >>> print dist
+ <boto.cloudfront.distribution.Distribution instance at 0x304a7e8>
diff --git a/docs/source/cloudsearch_tut.rst b/docs/source/cloudsearch_tut.rst
new file mode 100644
index 0000000..6916eac
--- /dev/null
+++ b/docs/source/cloudsearch_tut.rst
@@ -0,0 +1,264 @@
+.. cloudsearch_tut:
+
+===============================================
+An Introduction to boto's Cloudsearch interface
+===============================================
+
+This tutorial focuses on the boto interface to AWS' Cloudsearch_. This tutorial
+assumes that you have boto already downloaded and installed.
+
+.. _Cloudsearch: http://aws.amazon.com/cloudsearch/
+
+Creating a Domain
+-----------------
+
+ >>> import boto
+
+ >>> our_ip = '192.168.1.0'
+
+ >>> conn = boto.connect_cloudsearch()
+ >>> domain = conn.create_domain('demo')
+
+ >>> # Allow our IP address to access the document and search services
+ >>> policy = domain.get_access_policies()
+ >>> policy.allow_search_ip(our_ip)
+ >>> policy.allow_doc_ip(our_ip)
+
+ >>> # Create an 'text' index field called 'username'
+ >>> uname_field = domain.create_index_field('username', 'text')
+
+ >>> # But it would be neat to drill down into different countries
+ >>> loc_field = domain.create_index_field('location', 'text', facet=True)
+
+ >>> # Epoch time of when the user last did something
+ >>> time_field = domain.create_index_field('last_activity', 'uint', default=0)
+
+ >>> follower_field = domain.create_index_field('follower_count', 'uint', default=0)
+
+ >>> domain.create_rank_expression('recently_active', 'last_activity') # We'll want to be able to just show the most recently active users
+
+ >>> domain.create_rank_expression('activish', 'text_relevance + ((follower_count/(time() - last_activity))*1000)') # Let's get trickier and combine text relevance with a really dynamic expression
+
+Viewing and Adjusting Stemming for a Domain
+--------------------------------------------
+
+A stemming dictionary maps related words to a common stem. A stem is
+typically the root or base word from which variants are derived. For
+example, run is the stem of running and ran. During indexing, Amazon
+CloudSearch uses the stemming dictionary when it performs
+text-processing on text fields. At search time, the stemming
+dictionary is used to perform text-processing on the search
+request. This enables matching on variants of a word. For example, if
+you map the term running to the stem run and then search for running,
+the request matches documents that contain run as well as running.
+
+To get the current stemming dictionary defined for a domain, use the
+``get_stemming`` method of the Domain object.
+
+ >>> stems = domain.get_stemming()
+ >>> stems
+ {u'stems': {}}
+ >>>
+
+This returns a dictionary object that can be manipulated directly to
+add additional stems for your search domain by adding pairs of term:stem
+to the stems dictionary.
+
+ >>> stems['stems']['running'] = 'run'
+ >>> stems['stems']['ran'] = 'run'
+ >>> stems
+ {u'stems': {u'ran': u'run', u'running': u'run'}}
+ >>>
+
+This has changed the value locally. To update the information in
+Amazon CloudSearch, you need to save the data.
+
+ >>> stems.save()
+
+You can also access certain CloudSearch-specific attributes related to
+the stemming dictionary defined for your domain.
+
+ >>> stems.status
+ u'RequiresIndexDocuments'
+ >>> stems.creation_date
+ u'2012-05-01T12:12:32Z'
+ >>> stems.update_date
+ u'2012-05-01T12:12:32Z'
+ >>> stems.update_version
+ 19
+ >>>
+
+The status indicates that, because you have changed the stems associated
+with the domain, you will need to re-index the documents in the domain
+before the new stems are used.
+
+Viewing and Adjusting Stopwords for a Domain
+--------------------------------------------
+
+Stopwords are words that should typically be ignored both during
+indexing and at search time because they are either insignificant or
+so common that including them would result in a massive number of
+matches.
+
+To view the stopwords currently defined for your domain, use the
+``get_stopwords`` method of the Domain object.
+
+ >>> stopwords = domain.get_stopwords()
+ >>> stopwords
+ {u'stopwords': [u'a',
+ u'an',
+ u'and',
+ u'are',
+ u'as',
+ u'at',
+ u'be',
+ u'but',
+ u'by',
+ u'for',
+ u'in',
+ u'is',
+ u'it',
+ u'of',
+ u'on',
+ u'or',
+ u'the',
+ u'to',
+ u'was']}
+ >>>
+
+You can add additional stopwords by simply appending the values to the
+list.
+
+ >>> stopwords['stopwords'].append('foo')
+ >>> stopwords['stopwords'].append('bar')
+ >>> stopwords
+
+Similarly, you could remove currently defined stopwords from the list.
+To save the changes, use the ``save`` method.
+
+ >>> stopwords.save()
+
+The stopwords object has similar attributes defined above for stemming
+that provide additional information about the stopwords in your domain.
+
+
+Viewing and Adjusting Stopwords for a Domain
+--------------------------------------------
+
+You can configure synonyms for terms that appear in the data you are
+searching. That way, if a user searches for the synonym rather than
+the indexed term, the results will include documents that contain the
+indexed term.
+
+If you want two terms to match the same documents, you must define
+them as synonyms of each other. For example:
+
+ cat, feline
+ feline, cat
+
+To view the synonyms currently defined for your domain, use the
+``get_synonyms`` method of the Domain object.
+
+ >>> synonyms = domain.get_synsonyms()
+ >>> synonyms
+ {u'synonyms': {}}
+ >>>
+
+You can define new synonyms by adding new term:synonyms entries to the
+synonyms dictionary object.
+
+ >>> synonyms['synonyms']['cat'] = ['feline', 'kitten']
+ >>> synonyms['synonyms']['dog'] = ['canine', 'puppy']
+
+To save the changes, use the ``save`` method.
+
+ >>> synonyms.save()
+
+The synonyms object has similar attributes defined above for stemming
+that provide additional information about the stopwords in your domain.
+
+Adding Documents to the Index
+-----------------------------
+
+Now, we can add some documents to our new search domain.
+
+ >>> doc_service = domain.get_document_service()
+
+ >>> # Presumably get some users from your db of choice.
+ >>> users = [
+ {
+ 'id': 1,
+ 'username': 'dan',
+ 'last_activity': 1334252740,
+ 'follower_count': 20,
+ 'location': 'USA'
+ },
+ {
+ 'id': 2,
+ 'username': 'dankosaur',
+ 'last_activity': 1334252904,
+ 'follower_count': 1,
+ 'location': 'UK'
+ },
+ {
+ 'id': 3,
+ 'username': 'danielle',
+ 'last_activity': 1334252969,
+ 'follower_count': 100,
+ 'location': 'DE'
+ },
+ {
+ 'id': 4,
+ 'username': 'daniella',
+ 'last_activity': 1334253279,
+ 'follower_count': 7,
+ 'location': 'USA'
+ }
+ ]
+
+ >>> for user in users:
+ >>> doc_service.add(user['id'], user['last_activity'], user)
+
+ >>> result = doc_service.commit() # Actually post the SDF to the document service
+
+The result is an instance of `cloudsearch.CommitResponse` which will
+makes the plain dictionary response a nice object (ie result.adds,
+result.deletes) and raise an exception for us if all of our documents
+weren't actually committed.
+
+
+Searching Documents
+-------------------
+
+Now, let's try performing a search.
+
+ >>> # Get an instance of cloudsearch.SearchServiceConnection
+ >>> search_service = domain.get_search_service()
+
+ >>> # Horray wildcard search
+ >>> query = "username:'dan*'"
+
+
+ >>> results = search_service.search(bq=query, rank=['-recently_active'], start=0, size=10)
+
+ >>> # Results will give us back a nice cloudsearch.SearchResults object that looks as
+ >>> # close as possible to pysolr.Results
+
+ >>> print "Got %s results back." % results.hits
+ >>> print "User ids are:"
+ >>> for result in results:
+ >>> print result['id']
+
+
+Deleting Documents
+------------------
+
+ >>> import time
+ >>> from datetime import datetime
+
+ >>> doc_service = domain.get_document_service()
+
+ >>> # Again we'll cheat and use the current epoch time as our version number
+
+ >>> doc_service.delete(4, int(time.mktime(datetime.utcnow().timetuple())))
+ >>> service.commit()
diff --git a/docs/source/cloudwatch_tut.rst b/docs/source/cloudwatch_tut.rst
new file mode 100644
index 0000000..5639c04
--- /dev/null
+++ b/docs/source/cloudwatch_tut.rst
@@ -0,0 +1,116 @@
+.. cloudwatch_tut:
+
+==========
+CloudWatch
+==========
+
+First, make sure you have something to monitor. You can either create a
+LoadBalancer or enable monitoring on an existing EC2 instance. To enable
+monitoring, you can either call the monitor_instance method on the
+EC2Connection object or call the monitor method on the Instance object.
+
+It takes a while for the monitoring data to start accumulating but once
+it does, you can do this::
+
+ >>> import boto
+ >>> c = boto.connect_cloudwatch()
+ >>> metrics = c.list_metrics()
+ >>> metrics
+ [Metric:NetworkIn,
+ Metric:NetworkOut,
+ Metric:NetworkOut(InstanceType,m1.small),
+ Metric:NetworkIn(InstanceId,i-e573e68c),
+ Metric:CPUUtilization(InstanceId,i-e573e68c),
+ Metric:DiskWriteBytes(InstanceType,m1.small),
+ Metric:DiskWriteBytes(ImageId,ami-a1ffb63),
+ Metric:NetworkOut(ImageId,ami-a1ffb63),
+ Metric:DiskWriteOps(InstanceType,m1.small),
+ Metric:DiskReadBytes(InstanceType,m1.small),
+ Metric:DiskReadOps(ImageId,ami-a1ffb63),
+ Metric:CPUUtilization(InstanceType,m1.small),
+ Metric:NetworkIn(ImageId,ami-a1ffb63),
+ Metric:DiskReadOps(InstanceType,m1.small),
+ Metric:DiskReadBytes,
+ Metric:CPUUtilization,
+ Metric:DiskWriteBytes(InstanceId,i-e573e68c),
+ Metric:DiskWriteOps(InstanceId,i-e573e68c),
+ Metric:DiskWriteOps,
+ Metric:DiskReadOps,
+ Metric:CPUUtilization(ImageId,ami-a1ffb63),
+ Metric:DiskReadOps(InstanceId,i-e573e68c),
+ Metric:NetworkOut(InstanceId,i-e573e68c),
+ Metric:DiskReadBytes(ImageId,ami-a1ffb63),
+ Metric:DiskReadBytes(InstanceId,i-e573e68c),
+ Metric:DiskWriteBytes,
+ Metric:NetworkIn(InstanceType,m1.small),
+ Metric:DiskWriteOps(ImageId,ami-a1ffb63)]
+
+The list_metrics call will return a list of all of the available metrics
+that you can query against. Each entry in the list is a Metric object.
+As you can see from the list above, some of the metrics are generic metrics
+and some have Dimensions associated with them (e.g. InstanceType=m1.small).
+The Dimension can be used to refine your query. So, for example, I could
+query the metric Metric:CPUUtilization which would create the desired statistic
+by aggregating cpu utilization data across all sources of information available
+or I could refine that by querying the metric
+Metric:CPUUtilization(InstanceId,i-e573e68c) which would use only the data
+associated with the instance identified by the instance ID i-e573e68c.
+
+Because for this example, I'm only monitoring a single instance, the set
+of metrics available to me are fairly limited. If I was monitoring many
+instances, using many different instance types and AMI's and also several
+load balancers, the list of available metrics would grow considerably.
+
+Once you have the list of available metrics, you can actually
+query the CloudWatch system for that metric. Let's choose the CPU utilization
+metric for our instance.::
+
+ >>> m = metrics[5]
+ >>> m
+ Metric:CPUUtilization(InstanceId,i-e573e68c)
+
+The Metric object has a query method that lets us actually perform
+the query against the collected data in CloudWatch. To call that,
+we need a start time and end time to control the time span of data
+that we are interested in. For this example, let's say we want the
+data for the previous hour::
+
+ >>> import datetime
+ >>> end = datetime.datetime.now()
+ >>> start = end - datetime.timedelta(hours=1)
+
+We also need to supply the Statistic that we want reported and
+the Units to use for the results. The Statistic can be one of these
+values::
+
+ ['Minimum', 'Maximum', 'Sum', 'Average', 'SampleCount']
+
+And Units must be one of the following::
+
+ ['Seconds', 'Percent', 'Bytes', 'Bits', 'Count',
+ 'Bytes/Second', 'Bits/Second', 'Count/Second']
+
+The query method also takes an optional parameter, period. This
+parameter controls the granularity (in seconds) of the data returned.
+The smallest period is 60 seconds and the value must be a multiple
+of 60 seconds. So, let's ask for the average as a percent::
+
+ >>> datapoints = m.query(start, end, 'Average', 'Percent')
+ >>> len(datapoints)
+ 60
+
+Our period was 60 seconds and our duration was one hour so
+we should get 60 data points back and we can see that we did.
+Each element in the datapoints list is a DataPoint object
+which is a simple subclass of a Python dict object. Each
+Datapoint object contains all of the information available
+about that particular data point.::
+
+ >>> d = datapoints[0]
+ >>> d
+ {u'Average': 0.0,
+ u'SampleCount': 1.0,
+ u'Timestamp': u'2009-05-21T19:55:00Z',
+ u'Unit': u'Percent'}
+
+My server obviously isn't very busy right now!
\ No newline at end of file
diff --git a/docs/source/conf.py b/docs/source/conf.py
index 459c44f..fa1d0c2 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
-import sys, os
+import os
+import boto
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo']
autoclass_content="both"
@@ -9,7 +10,7 @@
master_doc = 'index'
project = u'boto'
copyright = u'2009,2010, Mitch Garnaat'
-version = '2.0'
+version = boto.__version__
exclude_trees = []
pygments_style = 'sphinx'
html_theme = 'boto_theme'
@@ -28,4 +29,4 @@
except Exception, e:
print e
-html_title = "boto v%s (r%s)" % (version, release)
+html_title = "boto v%s" % version
diff --git a/docs/source/contributing.rst b/docs/source/contributing.rst
new file mode 100644
index 0000000..ac9305a
--- /dev/null
+++ b/docs/source/contributing.rst
@@ -0,0 +1,204 @@
+====================
+Contributing to Boto
+====================
+
+
+Setting Up a Development Environment
+====================================
+
+While not strictly required, it is highly recommended to do development
+in a virtualenv. You can install virtualenv using pip::
+
+ $ pip install virtualenv
+
+Once the package is installed, you'll have a ``virtualenv`` command you can
+use to create a virtual environment::
+
+ $ virtualenv venv
+
+You can then activate the virtualenv::
+
+ $ . venv/bin/activate
+
+.. note::
+
+ You may also want to check out virtualenvwrapper_, which is a set of
+ extensions to virtualenv that makes it easy to manage multiple virtual
+ environments.
+
+A requirements.txt is included with boto which contains all the additional
+packages needed for boto development. You can install these packages by
+running::
+
+ $ pip install -r requirements.txt
+
+
+Running the Tests
+=================
+
+All of the tests for boto are under the ``tests/`` directory. The tests for
+boto have been split into two main categories, unit and integration tests:
+
+* **unit** - These are tests that do not talk to any AWS services. Anyone
+ should be able to run these tests without have any credentials
+ configured. These are the types of tests that could be run in something
+ like a public CI server. These tests tend to be fast.
+
+* **integration** - These are tests that will talk to AWS services, and
+ will typically require a boto config file with valid credentials.
+ Due to the nature of these tests, they tend to take a while to run.
+ Also keep in mind anyone who runs these tests will incur any usage
+ fees associated with the various AWS services.
+
+To run all the unit tests, cd to the ``tests/`` directory and run::
+
+ $ python test.py unit
+
+You should see output like this::
+
+ $ python test.py unit
+ ................................
+ ----------------------------------------------------------------------
+ Ran 32 tests in 0.075s
+
+ OK
+
+To run the integration tests, run::
+
+ $ python test.py integration
+
+Note that running the integration tests may take a while.
+
+Various integration tests have been tagged with service names to allow
+you to easily run tests by service type. For example, to run the ec2
+integration tests you can run::
+
+ $ python test.py -t ec2
+
+You can specify the ``-t`` argument multiple times. For example, to
+run the s3 and ec2 tests you can run::
+
+ $ python test.py -t ec2 -t s3
+
+.. warning::
+
+ In the examples above no top level directory was specified. By default,
+ nose will assume the current working directory, so the above command is
+ equivalent to::
+
+ $ python test.py -t ec2 -t s3 .
+
+ Be sure that you are in the ``tests/`` directory when running the tests,
+ or explicitly specify the top level directory. For example, if you in the
+ root directory of the boto repo, you could run the ec2 and s3 tests by
+ running::
+
+ $ python tests/test.py -t ec2 -t s3 tests/
+
+
+You can use nose's collect plugin to see what tests are associated with each
+service tag::
+
+ $ python tests.py -t s3 -t ec2 --with-id --collect -v
+
+
+Testing Details
+---------------
+
+The ``tests/test.py`` script is a lightweight wrapper around nose_. In
+general, you should be able to run ``nosetests`` directly instead of
+``tests/test.py``. The ``tests/unit`` and ``tests/integration`` args
+in the commands above were referring to directories. The command line
+arguments are forwarded to nose when you use ``tests/test.py``. For example,
+you can run::
+
+ $ python tests/test.py -x -vv tests/unit/cloudformation
+
+And the ``-x -vv tests/unit/cloudformation`` are forwarded to nose. See
+the nose_ docs for the supported command line options, or run
+``nosetests --help``.
+
+The only thing that ``tests/test.py`` does before invoking nose is to
+inject an argument that specifies that any testcase tagged with "notdefault"
+should not be run. A testcase may be tagged with "notdefault" if the test
+author does not want everyone to run the tests. In general, there shouldn't be
+many of these tests, but some reasons a test may be tagged "notdefault"
+include:
+
+* An integration test that requires specific credentials.
+* An interactive test (the S3 MFA tests require you to type in the S/N and
+ code).
+
+Tagging is done using nose's tagging_ plugin. To summarize, you can tag a
+specific testcase by setting an attribute on the object. Nose provides
+an ``attr`` decorator for convenience::
+
+ from nose.plugins.attrib import attr
+
+ @attr('notdefault')
+ def test_s3_mfs():
+ pass
+
+You can then run these tests be specifying::
+
+ nosetests -a 'notdefault'
+
+Or you can exclude any tests tagged with 'notdefault' by running::
+
+ nosetests -a '!notdefault'
+
+Conceptually, ``tests/test.py`` is injecting the "-a !notdefault" arg
+into nosetests.
+
+
+Testing Supported Python Versions
+==================================
+
+Boto supports python 2.6 and 2.7. An easy way to verify functionality
+across multiple python versions is to use tox_. A tox.ini file is included
+with boto. You can run tox with no args and it will automatically test
+all supported python versions::
+
+ $ tox
+ GLOB sdist-make: boto/setup.py
+ py26 sdist-reinst: boto/.tox/dist/boto-2.4.1.zip
+ py26 runtests: commands[0]
+ ................................
+ ----------------------------------------------------------------------
+ Ran 32 tests in 0.089s
+
+ OK
+ py27 sdist-reinst: boto/.tox/dist/boto-2.4.1.zip
+ py27 runtests: commands[0]
+ ................................
+ ----------------------------------------------------------------------
+ Ran 32 tests in 0.087s
+
+ OK
+ ____ summary ____
+ py26: commands succeeded
+ py27: commands succeeded
+ congratulations :)
+
+
+Writing Documentation
+=====================
+
+The boto docs use sphinx_ to generate documentation. All of the docs are
+located in the ``docs/`` directory. To generate the html documentation, cd
+into the docs directory and run ``make html``::
+
+ $ cd docs
+ $ make html
+
+The generated documentation will be in the ``docs/build/html`` directory.
+The source for the documentation is located in ``docs/source`` directory,
+and uses `restructured text`_ for the markup language.
+
+
+.. _nose: http://readthedocs.org/docs/nose/en/latest/
+.. _tagging: http://nose.readthedocs.org/en/latest/plugins/attrib.html
+.. _tox: http://tox.testrun.org/latest/
+.. _virtualenvwrapper: http://www.doughellmann.com/projects/virtualenvwrapper/
+.. _sphinx: http://sphinx.pocoo.org/
+.. _restructured text: http://sphinx.pocoo.org/rest.html
diff --git a/docs/source/dynamodb_tut.rst b/docs/source/dynamodb_tut.rst
new file mode 100644
index 0000000..3e64122
--- /dev/null
+++ b/docs/source/dynamodb_tut.rst
@@ -0,0 +1,240 @@
+.. dynamodb_tut:
+
+============================================
+An Introduction to boto's DynamoDB interface
+============================================
+
+This tutorial focuses on the boto interface to AWS' DynamoDB_. This tutorial
+assumes that you have boto already downloaded and installed.
+
+.. _DynamoDB: http://aws.amazon.com/dynamodb/
+
+Creating a Connection
+---------------------
+
+The first step in accessing DynamoDB is to create a connection to the service.
+To do so, the most straight forward way is the following::
+
+ >>> import boto
+ >>> conn = boto.connect_dynamodb(
+ aws_access_key_id='<YOUR_AWS_KEY_ID>',
+ aws_secret_access_key='<YOUR_AWS_SECRET_KEY>')
+ >>> conn
+ <boto.dynamodb.layer2.Layer2 object at 0x3fb3090>
+
+Bear in mind that if you have your credentials in boto config in your home
+directory, the two keyword arguments in the call above are not needed. More
+details on configuration can be found in :doc:`boto_config_tut`.
+
+.. note:: At this
+ time, Amazon DynamoDB is available only in the US-EAST-1 region. The
+ ``connect_dynamodb`` method automatically connect to that region.
+
+The :py:func:`boto.connect_dynamodb` functions returns a
+:py:class:`boto.dynamodb.layer2.Layer2` instance, which is a high-level API
+for working with DynamoDB. Layer2 is a set of abstractions that sit atop
+the lower level :py:class:`boto.dynamodb.layer1.Layer1` API, which closely
+mirrors the Amazon DynamoDB API. For the purpose of this tutorial, we'll
+just be covering Layer2.
+
+Listing Tables
+--------------
+
+Now that we have a DynamoDB connection object, we can then query for a list of
+existing tables in that region::
+
+ >>> conn.list_tables()
+ ['test-table', 'another-table']
+
+Creating Tables
+---------------
+
+DynamoDB tables are created with the
+:py:meth:`Layer2.create_table <boto.dynamodb.layer2.Layer2.create_table>`
+method. While DynamoDB's items (a rough equivalent to a relational DB's row)
+don't have a fixed schema, you do need to create a schema for the table's
+hash key element, and the optional range key element. This is explained in
+greater detail in DynamoDB's `Data Model`_ documentation.
+
+We'll start by defining a schema that has a hash key and a range key that
+are both keys::
+
+ >>> message_table_schema = conn.create_schema(
+ hash_key_name='forum_name',
+ hash_key_proto_value='S',
+ range_key_name='subject',
+ range_key_proto_value='S'
+ )
+
+The next few things to determine are table name and read/write throughput. We'll
+defer explaining throughput to the DynamoDB's `Provisioned Throughput`_ docs.
+
+We're now ready to create the table::
+
+ >>> table = conn.create_table(
+ name='messages',
+ schema=message_table_schema,
+ read_units=10,
+ write_units=10
+ )
+ >>> table
+ Table(messages)
+
+This returns a :py:class:`boto.dynamodb.table.Table` instance, which provides
+simple ways to create (put), update, and delete items.
+
+.. _Data Model: http://docs.amazonwebservices.com/amazondynamodb/latest/developerguide/DataModel.html
+.. _Provisioned Throughput: http://docs.amazonwebservices.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html
+
+Getting a Table
+---------------
+
+To retrieve an existing table, use
+:py:meth:`Layer2.get_table <boto.dynamodb.layer2.Layer2.get_table>`::
+
+ >>> conn.list_tables()
+ ['test-table', 'another-table', 'messages']
+ >>> table = conn.get_table('messages')
+ >>> table
+ Table(messages)
+
+:py:meth:`Layer2.get_table <boto.dynamodb.layer2.Layer2.get_table>`, like
+:py:meth:`Layer2.create_table <boto.dynamodb.layer2.Layer2.create_table>`,
+returns a :py:class:`boto.dynamodb.table.Table` instance.
+
+Describing Tables
+-----------------
+
+To get a complete description of a table, use
+:py:meth:`Layer2.describe_table <boto.dynamodb.layer2.Layer2.describe_table>`::
+
+ >>> conn.list_tables()
+ ['test-table', 'another-table', 'messages']
+ >>> conn.describe_table('messages')
+ {
+ 'Table': {
+ 'CreationDateTime': 1327117581.624,
+ 'ItemCount': 0,
+ 'KeySchema': {
+ 'HashKeyElement': {
+ 'AttributeName': 'forum_name',
+ 'AttributeType': 'S'
+ },
+ 'RangeKeyElement': {
+ 'AttributeName': 'subject',
+ 'AttributeType': 'S'
+ }
+ },
+ 'ProvisionedThroughput': {
+ 'ReadCapacityUnits': 10,
+ 'WriteCapacityUnits': 10
+ },
+ 'TableName': 'messages',
+ 'TableSizeBytes': 0,
+ 'TableStatus': 'ACTIVE'
+ }
+ }
+
+Adding Items
+------------
+
+Continuing on with our previously created ``messages`` table, adding an::
+
+ >>> table = conn.get_table('messages')
+ >>> item_data = {
+ 'Body': 'http://url_to_lolcat.gif',
+ 'SentBy': 'User A',
+ 'ReceivedTime': '12/9/2011 11:36:03 PM',
+ }
+ >>> item = table.new_item(
+ # Our hash key is 'forum'
+ hash_key='LOLCat Forum',
+ # Our range key is 'subject'
+ range_key='Check this out!',
+ # This has the
+ attrs=item_data
+ )
+
+The
+:py:meth:`Table.new_item <boto.dynamodb.table.Table.new_item>` method creates
+a new :py:class:`boto.dynamodb.item.Item` instance with your specified
+hash key, range key, and attributes already set.
+:py:class:`Item <boto.dynamodb.item.Item>` is a :py:class:`dict` sub-class,
+meaning you can edit your data as such::
+
+ item['a_new_key'] = 'testing'
+ del item['a_new_key']
+
+After you are happy with the contents of the item, use
+:py:meth:`Item.put <boto.dynamodb.item.Item.put>` to commit it to DynamoDB::
+
+ >>> item.put()
+
+Retrieving Items
+----------------
+
+Now, let's check if it got added correctly. Since DynamoDB works under an
+'eventual consistency' mode, we need to specify that we wish a consistent read,
+as follows::
+
+ >>> table = conn.get_table('messages')
+ >>> item = table.get_item(
+ # Your hash key was 'forum_name'
+ hash_key='LOLCat Forum',
+ # Your range key was 'subject'
+ range_key='Check this out!'
+ )
+ >>> item
+ {
+ # Note that this was your hash key attribute (forum_name)
+ 'forum_name': 'LOLCat Forum',
+ # This is your range key attribute (subject)
+ 'subject': 'Check this out!'
+ 'Body': 'http://url_to_lolcat.gif',
+ 'ReceivedTime': '12/9/2011 11:36:03 PM',
+ 'SentBy': 'User A',
+ }
+
+Updating Items
+--------------
+
+To update an item's attributes, simply retrieve it, modify the value, then
+:py:meth:`Item.put <boto.dynamodb.item.Item.put>` it again::
+
+ >>> table = conn.get_table('messages')
+ >>> item = table.get_item(
+ hash_key='LOLCat Forum',
+ range_key='Check this out!'
+ )
+ >>> item['SentBy'] = 'User B'
+ >>> item.put()
+
+Deleting Items
+--------------
+
+To delete items, use the
+:py:meth:`Item.delete <boto.dynamodb.item.Item.delete>` method::
+
+ >>> table = conn.get_table('messages')
+ >>> item = table.get_item(
+ hash_key='LOLCat Forum',
+ range_key='Check this out!'
+ )
+ >>> item.delete()
+
+Deleting Tables
+---------------
+
+.. WARNING::
+ Deleting a table will also **permanently** delete all of its contents without prompt. Use carefully.
+
+There are two easy ways to delete a table. Through your top-level
+:py:class:`Layer2 <boto.dynamodb.layer2.Layer2>` object::
+
+ >>> conn.delete_table(table)
+
+Or by getting the table, then using
+:py:meth:`Table.delete <boto.dynamodb.table.Table.delete>`::
+
+ >>> table = conn.get_table('messages')
+ >>> table.delete()
diff --git a/docs/source/ec2_tut.rst b/docs/source/ec2_tut.rst
index 6326243..f8614db 100644
--- a/docs/source/ec2_tut.rst
+++ b/docs/source/ec2_tut.rst
@@ -10,411 +10,81 @@
Creating a Connection
---------------------
-The first step in accessing EC2 is to create a connection to the service.
-There are two ways to do this in boto. The first is:
->>> from boto.ec2.connection import EC2Connection
->>> conn = EC2Connection('<aws access key>', '<aws secret key>')
+The first step in accessing EC2 is to create a connection to the service.
+There are two ways to do this in boto. The first is::
+
+ >>> from boto.ec2.connection import EC2Connection
+ >>> conn = EC2Connection('<AWS_ACCESS_KEY_ID>', '<AWS_SECRET_ACCESS_KEY>')
At this point the variable conn will point to an EC2Connection object. In
this example, the AWS access key and AWS secret key are passed in to the
-method explicitely. Alternatively, you can set the environment variables:
+method explicitely. Alternatively, you can set the boto config environment variables
+and then call the constructor without any arguments, like this::
-AWS_ACCESS_KEY_ID - Your AWS Access Key ID
-AWS_SECRET_ACCESS_KEY - Your AWS Secret Access Key
-
-and then call the constructor without any arguments, like this:
-
->>> conn = EC2Connection()
+ >>> conn = EC2Connection()
There is also a shortcut function in the boto package, called connect_ec2
-that may provide a slightly easier means of creating a connection:
+that may provide a slightly easier means of creating a connection::
->>> import boto
->>> conn = boto.connect_ec2()
+ >>> import boto
+ >>> conn = boto.connect_ec2()
In either case, conn will point to an EC2Connection object which we will
use throughout the remainder of this tutorial.
-A Note About Regions
---------------------
-The 2008-12-01 version of the EC2 API introduced the idea of Regions.
-A Region is geographically distinct and is completely isolated from
-other EC2 Regions. At the time of the launch of the 2008-12-01 API
-there were two available regions, us-east-1 and eu-west-1. Each
-Region has it's own service endpoint and therefore would require
-it's own EC2Connection object in boto.
+Launching Instances
+-------------------
-The default behavior in boto, as shown above, is to connect you with
-the us-east-1 region which is exactly the same as the behavior prior
-to the introduction of Regions.
+Possibly, the most important and common task you'll use EC2 for is to launch,
+stop and terminate instances. In its most primitive form, you can launch an
+instance as follows::
-However, if you would like to connect to a region other than us-east-1,
-there are a couple of ways to accomplish that. The first way, is to
-as EC2 to provide a list of currently supported regions. You can do
-that using the regions function in the boto.ec2 module:
+ >>> conn.run_instances('<ami-image-id>')
+
+This will launch an instance in the specified region with the default parameters.
+You will not be able to SSH into this machine, as it doesn't have a security
+group set. See :doc:`security_groups` for details on creating one.
->>> import boto.ec2
->>> regions = boto.ec2.regions()
->>> regions
-[RegionInfo:eu-west-1, RegionInfo:us-east-1]
->>>
+Now, let's say that you already have a key pair, want a specific type of
+instance, and you have your :doc:`security group <security_groups>` all setup.
+In this case we can use the keyword arguments to accomplish that::
-As you can see, a list of available regions is returned. Each region
-is represented by a RegionInfo object. A RegionInfo object has two
-attributes; a name and an endpoint.
+ >>> conn.run_instances(
+ '<ami-image-id>',
+ key_name='myKey',
+ instance_type='c1.xlarge',
+ security_groups=['your-security-group-here'])
->>> eu = regions[0]
->>> eu.name
-u'eu-west-1'
->>> eu.endpoint
-u'eu-west-1.ec2.amazonaws.com'
->>>
+The main caveat with the above call is that it is possible to request an
+instance type that is not compatible with the provided AMI (for example, the
+instance was created for a 64-bit instance and you choose a m1.small instance_type).
+For more details on the plethora of possible keyword parameters, be sure to
+check out boto's :doc:`EC2 API reference <ref/ec2>`.
-You can easily create a connection to a region by using the connect
-method of the RegionInfo object:
-
->>> conn_eu = eu.connect()
->>> conn_eu
-<boto.ec2.connection.EC2Connection instance at 0xccaaa8>
->>>
-
-The variable conn_eu is now bound to an EC2Connection object connected
-to the endpoint of the eu-west-1 region and all operations performed via
-that connection and all objects created by that connection will be scoped
-to the eu-west-1 region. You can always tell which region a connection
-is associated with by accessing it's region attribute:
-
->>> conn_eu.region
-RegionInfo:eu-west-1
->>>
-
-Supporting EC2 objects such as SecurityGroups, KeyPairs, Addresses,
-Volumes, Images and SnapShots are local to a particular region. So
-don't expect to find the security groups you created in the us-east-1
-region to be available in the eu-west-1 region.
-
-Some objects in boto, such as SecurityGroup, have a new method called
-copy_to_region which will attempt to create a copy of the object in
-another region. For example:
-
->>> regions
-[RegionInfo:eu-west-1, RegionInfo:us-east-1]
->>> conn_us = regions[1].connect()
->>> groups = conn_us.get_all_security_groups()
->>> groups
-[SecurityGroup:alfresco, SecurityGroup:apache, SecurityGroup:vnc,
-SecurityGroup:appserver2, SecurityGroup:FTP, SecurityGroup:webserver,
-SecurityGroup:default, SecurityGroup:test-1228851996]
->>> us_group = groups[0]
->>> us_group
-SecurityGroup:alfresco
->>> us_group.rules
-[IPPermissions:tcp(22-22), IPPermissions:tcp(80-80), IPPermissions:tcp(1445-1445)]
->>> eu_group = us_group.copy_to_region(eu)
->>> eu_group.rules
-[IPPermissions:tcp(22-22), IPPermissions:tcp(80-80), IPPermissions:tcp(1445-1445)]
-
-In the above example, we chose one of the security groups available
-in the us-east-1 region (the group alfresco) and copied that security
-group to the eu-west-1 region. All of the rules associated with the
-original security group will be copied as well.
-
-If you would like your default region to be something other than
-us-east-1, you can override that default in your boto config file
-(either ~/.boto for personal settings or /etc/boto.cfg for system-wide
-settings). For example:
-
-[Boto]
-ec2_region_name = eu-west-1
-ec2_region_endpoint = eu-west-1.ec2.amazonaws.com
-
-The above lines added to either boto config file would set the default
-region to be eu-west-1.
-
-Images & Instances
+Stopping Instances
------------------
+Once you have your instances up and running, you might wish to shut them down
+if they're not in use. Please note that this will only de-allocate virtual
+hardware resources (as well as instance store drives), but won't destroy your
+EBS volumes -- this means you'll pay nominal provisioned EBS storage fees
+even if your instance is stopped. To do this, you can do so as follows::
-An Image object represents an Amazon Machine Image (AMI) which is an
-encrypted machine image stored in Amazon S3. It contains all of the
-information necessary to boot instances of your software in EC2.
+ >>> conn.stop_instances(instance_ids=['instance-id-1','instance-id-2', ...])
-To get a listing of all available Images:
+This will request a 'graceful' stop of each of the specified instances. If you
+wish to request the equivalent of unplugging your instance(s), simply add
+``force=True`` keyword argument to the call above. Please note that stop
+instance is not allowed with Spot instances.
->>> images = conn.get_all_images()
->>> images
-[Image:ami-20b65349, Image:ami-22b6534b, Image:ami-23b6534a, Image:ami-25b6534c, Image:ami-26b6534f, Image:ami-2bb65342, Image:ami-78b15411, Image:ami-a4aa4fcd, Image:ami-c3b550aa, Image:ami-e4b6538d, Image:ami-f1b05598]
->>> for image in images:
-... print image.location
-ec2-public-images/fedora-core4-base.manifest.xml
-ec2-public-images/fedora-core4-mysql.manifest.xml
-ec2-public-images/fedora-core4-apache.manifest.xml
-ec2-public-images/fedora-core4-apache-mysql.manifest.xml
-ec2-public-images/developer-image.manifest.xml
-ec2-public-images/getting-started.manifest.xml
-marcins_cool_public_images/fedora-core-6.manifest.xml
-khaz_fc6_win2003/image.manifest
-aes-images/django.manifest
-marcins_cool_public_images/ubuntu-6.10.manifest.xml
-ckk_public_ec2_images/centos-base-4.4.manifest.xml
+Terminating Instances
+---------------------
+Once you are completely done with your instance and wish to surrender both
+virtual hardware, root EBS volume and all other underlying components
+you can request instance termination. To do so you can use the call bellow::
-The most useful thing you can do with an Image is to actually run it, so let's
-run a new instance of the base Fedora image:
+ >>> conn.terminate_instances(instance_ids=['instance-id-1','instance-id-2', ...])
->>> image = images[0]
->>> image.location
-ec2-public-images/fedora-core4-base.manifest.xml
->>> reservation = image.run()
-
-This will begin the boot process for a new EC2 instance. The run method
-returns a Reservation object which represents a collection of instances
-that are all started at the same time. In this case, we only started one
-but you can check the instances attribute of the Reservation object to see
-all of the instances associated with this reservation:
-
->>> reservation.instances
-[Instance:i-6761850e]
->>> instance = reservation.instances[0]
->>> instance.state
-u'pending'
->>>
-
-So, we have an instance booting up that is still in the pending state. We
-can call the update method on the instance to get a refreshed view of it's
-state:
-
->>> instance.update()
->>> instance.state
-u'pending'
->>> # wait a few minutes
->>> instance.update()
->>> instance.state
-u'running'
-
-So, now our instance is running. The time it takes to boot a new instance
-varies based on a number of different factors but usually it takes less than
-five minutes.
-
-Now the instance is up and running you can find out its DNS name like this:
-
->>> instance.dns_name
-u'ec2-72-44-40-153.z-2.compute-1.amazonaws.com'
-
-This provides the public DNS name for your instance. Since the 2007--3-22
-release of the EC2 service, the default addressing scheme for instances
-uses NAT-addresses which means your instance has both a public IP address and a
-non-routable private IP address. You can access each of these addresses
-like this:
-
->>> instance.public_dns_name
-u'ec2-72-44-40-153.z-2.compute-1.amazonaws.com'
->>> instance.private_dns_name
-u'domU-12-31-35-00-42-33.z-2.compute-1.internal'
-
-Even though your instance has a public DNS name, you won't be able to
-access it yet because you need to set up some security rules which are
-described later in this tutorial.
-
-Since you are now being charged for that instance we just created, you will
-probably want to know how to terminate the instance, as well. The simplest
-way is to use the stop method of the Instance object:
-
->>> instance.stop()
->>> instance.update()
->>> instance.state
-u'shutting-down'
->>> # wait a minute
->>> instance.update()
->>> instance.state
-u'terminated'
->>>
-
-When we created our new instance, we didn't pass any args to the run method
-so we got all of the default values. The full set of possible parameters
-to the run method are:
-
-min_count - The minimum number of instances to launch.
-max_count - The maximum number of instances to launch.
-keypair - Keypair to launch instances with (either a KeyPair object or a string with the name of the desired keypair.
-security_groups - A list of security groups to associate with the instance. This can either be a list of SecurityGroup objects or a list of strings with the names of the desired security groups.
-user_data - Data to be made available to the launched instances. This should be base64 encoded according to the EC2 documentation.
-
-So, if I wanted to create two instances of the base image and launch them
-with my keypair, called gsg-keypair, I would to this:
-
->>> reservation.image.run(2,2,'gsg-keypair')
->>> reservation.instances
-[Instance:i-5f618536, Instance:i-5e618537]
->>> for i in reservation.instances:
-... print i.status
-u'pending'
-u'pending'
->>>
-
-Later, when you are finished with the instances you can either stop each
-individually or you can call the stop_all method on the Reservation object:
-
->>> reservation.stop_all()
->>>
-
-If you just want to get a list of all of your running instances, use
-the get_all_instances method of the connection object. Note that the
-list returned is actually a list of Reservation objects (which contain
-the Instances) and that the list may include recently terminated instances
-for a small period of time subsequent to their termination.
-
->>> instances = conn.get_all_instances()
->>> instances
-[Reservation:r-a76085ce, Reservation:r-a66085cf, Reservation:r-8c6085e5]
->>> r = instances[0]
->>> for inst in r.instances:
-... print inst.state
-u'terminated'
->>>
-
-A recent addition to the EC2 api's is to allow other EC2 users to launch
-your images. There are a couple of ways of accessing this capability in
-boto but I'll show you the simplest way here. First of all, you need to
-know the Amazon ID for the user in question. The Amazon Id is a twelve
-digit number that appears on your Account Activity page at AWS. It looks
-like this:
-
-1234-5678-9012
-
-To use this number in API calls, you need to remove the dashes so in our
-example the user ID would be 12345678912. To allow the user associated
-with this ID to launch one of your images, let's assume that the variable
-image represents the Image you want to share. So:
-
->>> image.get_launch_permissions()
-{}
->>>
-
-The get_launch_permissions method returns a dictionary object two possible
-entries; user_ids or groups. In our case we haven't yet given anyone
-permission to launch our image so the dictionary is empty. To add our
-EC2 user:
-
->>> image.set_launch_permissions(['123456789012'])
-True
->>> image.get_launch_permissions()
-{'user_ids': [u'123456789012']}
->>>
-
-We have now added the desired user to the launch permissions for the Image
-so that user will now be able to access and launch our Image. You can add
-multiple users at one time by adding them all to the list you pass in as
-a parameter to the method. To revoke the user's launch permissions:
-
->>> image.remove_launch_permissions(['123456789012'])
-True
->>> image.get_launch_permissions()
-{}
->>>
-
-It is possible to pass a list of group names to the set_launch_permissions
-method, as well. The only group available at the moment is the group "all"
-which would allow any valid EC2 user to launch your image.
-
-Finally, you can completely reset the launch permissions for an Image with:
-
->>> image.reset_launch_permissions()
-True
->>>
-
-This will remove all users and groups from the launch permission list and
-makes the Image private, again.
-
-Security Groups
-----------------
-
-Amazon defines a security group as:
-
-"A security group is a named collection of access rules. These access rules
- specify which ingress, i.e. incoming, network traffic should be delivered
- to your instance."
-
-To get a listing of all currently defined security groups:
-
->>> rs = conn.get_all_security_groups()
->>> print rs
-[SecurityGroup:appserver, SecurityGroup:default, SecurityGroup:vnc, SecurityGroup:webserver]
->>>
-
-Each security group can have an arbitrary number of rules which represent
-different network ports which are being enabled. To find the rules for a
-particular security group, use the rules attribute:
-
->>> sg = rs[1]
->>> sg.name
-u'default'
->>> sg.rules
-[IPPermissions:tcp(0-65535),
- IPPermissions:udp(0-65535),
- IPPermissions:icmp(-1--1),
- IPPermissions:tcp(22-22),
- IPPermissions:tcp(80-80)]
->>>
-
-In addition to listing the available security groups you can also create
-a new security group. I'll follow through the "Three Tier Web Service"
-example included in the EC2 Developer's Guide for an example of how to
-create security groups and add rules to them.
-
-First, let's create a group for our Apache web servers that allows HTTP
-access to the world:
-
->>> web = conn.create_security_group('apache', 'Our Apache Group')
->>> web
-SecurityGroup:apache
->>> web.authorize('tcp', 80, 80, '0.0.0.0/0')
-True
->>>
-
-The first argument is the ip protocol which can be one of; tcp, udp or icmp.
-The second argument is the FromPort or the beginning port in the range, the
-third argument is the ToPort or the ending port in the range and the last
-argument is the CIDR IP range to authorize access to.
-
-Next we create another group for the app servers:
-
->>> app = conn.create_security_group('appserver', 'The application tier')
->>>
-
-We then want to grant access between the web server group and the app
-server group. So, rather than specifying an IP address as we did in the
-last example, this time we will specify another SecurityGroup object.
-
->>> app.authorize(src_group=web)
-True
->>>
-
-Now, to verify that the web group now has access to the app servers, we want to
-temporarily allow SSH access to the web servers from our computer. Let's
-say that our IP address is 192.168.1.130 as it is in the EC2 Developer
-Guide. To enable that access:
-
->>> web.authorize(ip_protocol='tcp', from_port=22, to_port=22, cidr_ip='192.168.1.130/32')
-True
->>>
-
-Now that this access is authorized, we could ssh into an instance running in
-the web group and then try to telnet to specific ports on servers in the
-appserver group, as shown in the EC2 Developer's Guide. When this testing is
-complete, we would want to revoke SSH access to the web server group, like this:
-
->>> web.rules
-[IPPermissions:tcp(80-80),
- IPPermissions:tcp(22-22)]
->>> web.revoke('tcp', 22, 22, cidr_ip='192.168.1.130/32')
-True
->>> web.rules
-[IPPermissions:tcp(80-80)]
->>>
-
-
-
-
-
-
+Please use with care since once you request termination for an instance there
+is no turning back.
diff --git a/docs/source/elb_tut.rst b/docs/source/elb_tut.rst
index 7440b08..d560b2c 100644
--- a/docs/source/elb_tut.rst
+++ b/docs/source/elb_tut.rst
@@ -4,61 +4,69 @@
An Introduction to boto's Elastic Load Balancing interface
==========================================================
-This tutorial focuses on the boto interface for Elastic Load Balancing
-from Amazon Web Services. This tutorial assumes that you have already
+This tutorial focuses on the boto interface for `Elastic Load Balancing`_
+from Amazon Web Services. This tutorial assumes that you have already
downloaded and installed boto, and are familiar with the boto ec2 interface.
+.. _Elastic Load Balancing: http://aws.amazon.com/elasticloadbalancing/
+
Elastic Load Balancing Concepts
-------------------------------
-Elastic Load Balancing (ELB) is intimately connected with Amazon's Elastic
-Compute Cloud (EC2) service. Using the ELB service allows you to create a load
+`Elastic Load Balancing`_ (ELB) is intimately connected with Amazon's `Elastic
+Compute Cloud`_ (EC2) service. Using the ELB service allows you to create a load
balancer - a DNS endpoint and set of ports that distributes incoming requests
-to a set of ec2 instances. The advantages of using a load balancer is that it
+to a set of EC2 instances. The advantages of using a load balancer is that it
allows you to truly scale up or down a set of backend instances without
-disrupting service. Before the ELB service you had to do this manually by
+disrupting service. Before the ELB service, you had to do this manually by
launching an EC2 instance and installing load balancer software on it (nginx,
haproxy, perlbal, etc.) to distribute traffic to other EC2 instances.
-Recall that the ec2 service is split into Regions and Availability Zones (AZ).
-At the time of writing, there are two Regions - US and Europe, and each region
-is divided into a number of AZs (for example, us-east-1a, us-east-1b, etc.).
-You can think of AZs as data centers - each runs off a different set of ISP
-backbones and power providers. ELB load balancers can span multiple AZs but
-cannot span multiple regions. That means that if you'd like to create a set of
-instances spanning both the US and Europe Regions you'd have to create two load
-balancers and have some sort of other means of distributing requests between
-the two loadbalancers. An example of this could be using GeoIP techniques to
-choose the correct load balancer, or perhaps DNS round robin. Keep in mind also
-that traffic is distributed equally over all AZs the ELB balancer spans. This
-means you should have an equal number of instances in each AZ if you want to
-equally distribute load amongst all your instances.
+Recall that the EC2 service is split into Regions, which are further
+divided into Availability Zones (AZ).
+For example, the US-East region is divided into us-east-1a, us-east-1b,
+us-east-1c, us-east-1d, and us-east-1e. You can think of AZs as data centers -
+each runs off a different set of ISP backbones and power providers.
+ELB load balancers can span multiple AZs but cannot span multiple regions.
+That means that if you'd like to create a set of instances spanning both the
+US and Europe Regions you'd have to create two load balancers and have some
+sort of other means of distributing requests between the two load balancers.
+An example of this could be using GeoIP techniques to choose the correct load
+balancer, or perhaps DNS round robin. Keep in mind also that traffic is
+distributed equally over all AZs the ELB balancer spans. This means you should
+have an equal number of instances in each AZ if you want to equally distribute
+load amongst all your instances.
+
+.. _Elastic Compute Cloud: http://aws.amazon.com/ec2/
Creating a Connection
---------------------
+
The first step in accessing ELB is to create a connection to the service.
-There are two ways to do this in boto. The first is:
-
->>> from boto.ec2.elb import ELBConnection
->>> conn = ELBConnection('<aws access key>', '<aws secret key>')
-
-There is also a shortcut function in the boto package, called connect_elb
-that may provide a slightly easier means of creating a connection:
>>> import boto
->>> conn = boto.connect_elb()
+>>> conn = boto.connect_elb(
+ aws_access_key_id='YOUR-KEY-ID-HERE',
+ aws_secret_access_key='YOUR-SECRET-HERE'
+ )
-In either case, conn will point to an ELBConnection object which we will
-use throughout the remainder of this tutorial.
A Note About Regions and Endpoints
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Like EC2 the ELB service has a different endpoint for each region. By default
-the US endpoint is used. To choose a specific region, instantiate the
+
+Like EC2, the ELB service has a different endpoint for each region. By default
+the US East endpoint is used. To choose a specific region, instantiate the
ELBConnection object with that region's information.
>>> from boto.regioninfo import RegionInfo
->>> reg = RegionInfo(name='eu-west-1', endpoint='elasticloadbalancing.eu-west-1.amazonaws.com')
->>> elb = boto.connect_elb(region=reg)
+>>> reg = RegionInfo(
+ name='eu-west-1',
+ endpoint='elasticloadbalancing.eu-west-1.amazonaws.com'
+ )
+>>> conn = boto.connect_elb(
+ aws_access_key_id='YOUR-KEY-ID-HERE',
+ aws_secret_access_key='YOUR-SECRET-HERE',
+ region=reg
+ )
Another way to connect to an alternative region is like this:
@@ -90,8 +98,22 @@
To retrieve any exiting load balancers:
>>> conn.get_all_load_balancers()
+[LoadBalancer:load-balancer-prod, LoadBalancer:load-balancer-staging]
-You will get back a list of LoadBalancer objects.
+You can also filter by name
+
+>>> conn.get_all_load_balancers(load_balancer_names=['load-balancer-prod'])
+[LoadBalancer:load-balancer-prod]
+
+:py:meth:`get_all_load_balancers <boto.ec2.elb.ELBConnection.get_all_load_balancers>`
+returns a :py:class:`boto.resultset.ResultSet` that contains instances
+of :class:`boto.ec2.elb.loadbalancer.LoadBalancer`, each of which abstracts
+access to a load balancer. :py:class:`ResultSet <boto.resultset.ResultSet>`
+works very much like a list.
+
+>>> balancers = conn.get_all_load_balancers()
+>>> balancers[0]
+[LoadBalancer:load-balancer-prod]
Creating a Load Balancer
------------------------
@@ -125,13 +147,14 @@
however specifying HTTP allows you to let ELB handle some work for you -
for example HTTP header parsing.
+.. _elb-configuring-a-health-check:
Configuring a Health Check
^^^^^^^^^^^^^^^^^^^^^^^^^^
A health check allows ELB to determine which instances are alive and able to
respond to requests. A health check is essentially a tuple consisting of:
- * *target*: What to check on an instance. For a TCP check this is comprised of::
+ * *Target*: What to check on an instance. For a TCP check this is comprised of::
TCP:PORT_TO_CHECK
@@ -143,47 +166,58 @@
This means that the health check will connect to the resource /RESOURCE on
PORT_TO_CHECK. If an HTTP 200 status is returned the instance is deemed healthy.
- * *interval*: How often the check is made. This is given in seconds and defaults to 30.
- The valid range of intervals goes from 5 seconds to 600 seconds.
- * *timeout*: The number of seconds the load balancer will wait for a check to return a
- result.
- * *UnhealthyThreshold*: The number of consecutive failed checks to deem the instance
- as being dead. The default is 5, and the range of valid values lies from 2 to 10.
+ * *Interval*: How often the check is made. This is given in seconds and defaults
+ to 30. The valid range of intervals goes from 5 seconds to 600 seconds.
+ * *Timeout*: The number of seconds the load balancer will wait for a check to
+ return a result.
+ * *Unhealthy threshold*: The number of consecutive failed checks to deem the
+ instance as being dead. The default is 5, and the range of valid values lies
+ from 2 to 10.
-The following example creates a health check called *instance_health* that simply checks
-instances every 20 seconds on port 80 over HTTP at the resource /health for 200 successes.
+The following example creates a health check called *instance_health* that
+simply checks instances every 20 seconds on port 80 over HTTP at the
+resource /health for 200 successes.
->>> import boto
>>> from boto.ec2.elb import HealthCheck
->>> conn = boto.connect_elb()
->>> hc = HealthCheck('instance_health', interval=20, target='HTTP:8080/health')
+>>> hc = HealthCheck(
+ interval=20,
+ healthy_threshold=3,
+ unhealthy_threshold=5,
+ target='HTTP:8080/health'
+ )
Putting It All Together
^^^^^^^^^^^^^^^^^^^^^^^
-Finally, let's create a load balancer in the US region that listens on ports 80 and 443
-and distributes requests to instances on 8080 and 8443 over HTTP and TCP. We want the
-load balancer to span the availability zones *us-east-1a* and *us-east-1b*:
+Finally, let's create a load balancer in the US region that listens on ports
+80 and 443 and distributes requests to instances on 8080 and 8443 over HTTP
+and TCP. We want the load balancer to span the availability zones
+*us-east-1a* and *us-east-1b*:
->>> lb = conn.create_load_balancer('my_lb', ['us-east-1a', 'us-east-1b'],
- [(80, 8080, 'http'), (443, 8443, 'tcp')])
+>>> regions = ['us-east-1a', 'us-east-1b']
+>>> ports = [(80, 8080, 'http'), (443, 8443, 'tcp')]
+>>> lb = conn.create_load_balancer('my-lb', regions, ports)
+>>> # This is from the previous section.
>>> lb.configure_health_check(hc)
-The load balancer has been created. To see where you can actually connect to it, do:
+The load balancer has been created. To see where you can actually connect to
+it, do:
>>> print lb.dns_name
my_elb-123456789.us-east-1.elb.amazonaws.com
-You can then CNAME map a better name, i.e. www.MYWEBSITE.com to the above address.
+You can then CNAME map a better name, i.e. www.MYWEBSITE.com to the
+above address.
Adding Instances To a Load Balancer
-----------------------------------
-Now that the load balancer has been created, there are two ways to add instances to it:
+Now that the load balancer has been created, there are two ways to add
+instances to it:
#. Manually, adding each instance in turn.
- #. Mapping an autoscale group to the load balancer. Please see the Autoscale
- tutorial for information on how to do this.
+ #. Mapping an autoscale group to the load balancer. Please see the
+ :doc:`Autoscale tutorial <autoscale_tut>` for information on how to do this.
Manually Adding and Removing Instances
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/docs/source/index.rst b/docs/source/index.rst
index bb76ff4..1a7e930 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -5,84 +5,125 @@
===============================================
An integrated interface to current and future infrastructural services
-offered by Amazon Web Services.
+offered by `Amazon Web Services`_.
-Currently, this includes:
+.. _Amazon Web Services: http://aws.amazon.com/
-* Compute
+Currently Supported Services
+----------------------------
- * Elastic Compute Cloud (EC2)
- * Elastic MapReduce (EMR)
- * Auto Scaling
+* **Compute**
-* Content Delivery
+ * :doc:`Elastic Compute Cloud (EC2) <ec2_tut>` -- (:doc:`API Reference <ref/ec2>`)
+ * :doc:`Elastic MapReduce (EMR) <emr_tut>` -- (:doc:`API Reference <ref/emr>`)
+ * :doc:`Auto Scaling <autoscale_tut>` -- (:doc:`API Reference <ref/autoscale>`)
- * CloudFront
+* **Content Delivery**
-* Database
+ * :doc:`CloudFront <cloudfront_tut>` -- (:doc:`API Reference <ref/cloudfront>`)
- * SimpleDB
- * Relational Data Services (RDS)
+* **Database**
-* Deployment and Management
+ * :doc:`SimpleDB <simpledb_tut>` -- (:doc:`API Reference <ref/sdb>`)
+ * :doc:`DynamoDB <dynamodb_tut>` -- (:doc:`API Reference <ref/dynamodb>`)
+ * Relational Data Services (RDS) -- (:doc:`API Reference <ref/rds>`)
- * CloudFormation
+* **Deployment and Management**
-* Identity & Access
+ * CloudFormation -- (:doc:`API Reference <ref/cloudformation>`)
- * Identity and Access Management (IAM)
+* **Identity & Access**
-* Messaging
+ * Identity and Access Management (IAM) -- (:doc:`API Reference <ref/iam>`)
- * Simple Queue Service (SQS)
- * Simple Notificaiton Service (SNS)
- * Simple Email Service (SES)
+* **Application Services**
-* Monitoring
+ * Simple Workflow Service (SWF) -- (:doc:`API Reference <ref/swf>`)
+ * :doc:`Simple Queue Service (SQS) <sqs_tut>` -- (:doc:`API Reference <ref/sqs>`)
+ * Simple Notification Service (SNS) -- (:doc:`API Reference <ref/sns>`)
+ * :doc:`Simple Email Service (SES) <ses_tut>` -- (:doc:`API Reference <ref/ses>`)
+ * :doc:`Cloudsearch <cloudsearch_tut>` -- (:doc:`API Reference <ref/cloudsearch>`)
- * CloudWatch
+* **Monitoring**
-* Networking
+ * :doc:`CloudWatch <cloudwatch_tut>` -- (:doc:`API Reference <ref/cloudwatch>`)
- * Route 53
- * Virtual Private Cloud (VPC)
- * Elastic Load Balancing (ELB)
+* **Networking**
-* Payments & Billing
+ * Route 53 -- (:doc:`API Reference <ref/route53>`)
+ * :doc:`Virtual Private Cloud (VPC) <vpc_tut>` -- (:doc:`API Reference <ref/vpc>`)
+ * :doc:`Elastic Load Balancing (ELB) <elb_tut>` -- (:doc:`API Reference <ref/elb>`)
- * Flexible Payments Service (FPS)
+* **Payments & Billing**
-* Storage
+ * Flexible Payments Service (FPS) -- (:doc:`API Reference <ref/fps>`)
- * Simple Storage Service (S3)
+* **Storage**
-* Workforce
+ * :doc:`Simple Storage Service (S3) <s3_tut>` -- (:doc:`API Reference <ref/s3>`)
+ * Amazon Glacier -- (:doc:`API Reference <ref/glacier>`)
+ * Google Cloud Storage -- (:doc:`API Reference <ref/gs>`)
- * Mechanical Turk
+* **Workforce**
-The boto source repository is at http://github.com/boto
+ * Mechanical Turk -- (:doc:`API Reference <ref/mturk>`)
-Follow project updates on Twitter (http://twitter.com/pythonboto).
+Additional Resources
+--------------------
-Follow Mitch on Twitter (http://twitter.com/garnaat).
+* :doc:`Boto Config Tutorial <boto_config_tut>`
+* :doc:`Contributing to Boto <contributing>`
+* `Boto Source Repository`_
+* `Boto Issue Tracker`_
+* `Boto Twitter`_
+* `Follow Mitch on Twitter`_
+* Join our `IRC channel`_ (#boto on FreeNode).
-Join our `IRC channel`_ (#boto on FreeNode).
-
+.. _Boto Issue Tracker: https://github.com/boto/boto/issues
+.. _Boto Source Repository: https://github.com/boto/boto
+.. _Boto Twitter: http://twitter.com/pythonboto
.. _IRC channel: http://webchat.freenode.net/?channels=boto
-
-Documentation Contents
-----------------------
+.. _Follow Mitch on Twitter: http://twitter.com/garnaat
.. toctree::
- :maxdepth: 2
+ :hidden:
- sqs_tut
- s3_tut
ec2_tut
- elb_tut
- autoscale_tut
- vpc_tut
+ security_groups
+ ref/ec2
emr_tut
+ ref/emr
+ autoscale_tut
+ ref/autoscale
+ cloudfront_tut
+ ref/cloudfront
+ simpledb_tut
+ ref/sdb
+ ref/sdb_db
+ dynamodb_tut
+ ref/dynamodb
+ ref/rds
+ ref/cloudformation
+ ref/iam
+ sqs_tut
+ ref/sqs
+ ref/sns
+ ses_tut
+ ref/ses
+ cloudsearch_tut
+ ref/cloudsearch
+ cloudwatch_tut
+ ref/cloudwatch
+ ref/route53
+ vpc_tut
+ ref/vpc
+ elb_tut
+ ref/elb
+ ref/fps
+ s3_tut
+ ref/s3
+ ref/mturk
+ boto_config_tut
ref/index
documentation
diff --git a/docs/source/ref/autoscale.rst b/docs/source/ref/autoscale.rst
new file mode 100644
index 0000000..f372806
--- /dev/null
+++ b/docs/source/ref/autoscale.rst
@@ -0,0 +1,62 @@
+.. ref-autoscale
+
+======================
+Auto Scaling Reference
+======================
+
+boto.ec2.autoscale
+------------------
+
+.. automodule:: boto.ec2.autoscale
+ :members:
+ :undoc-members:
+
+boto.ec2.autoscale.activity
+---------------------------
+
+.. automodule:: boto.ec2.autoscale.activity
+ :members:
+ :undoc-members:
+
+boto.ec2.autoscale.group
+------------------------
+
+.. automodule:: boto.ec2.autoscale.group
+ :members:
+ :undoc-members:
+
+
+boto.ec2.autoscale.instance
+---------------------------
+
+.. automodule:: boto.ec2.autoscale.instance
+ :members:
+ :undoc-members:
+
+boto.ec2.autoscale.launchconfig
+-------------------------------
+
+.. automodule:: boto.ec2.autoscale.launchconfig
+ :members:
+ :undoc-members:
+
+boto.ec2.autoscale.policy
+--------------------------
+
+.. automodule:: boto.ec2.autoscale.policy
+ :members:
+ :undoc-members:
+
+boto.ec2.autoscale.request
+--------------------------
+
+.. automodule:: boto.ec2.autoscale.request
+ :members:
+ :undoc-members:
+
+boto.ec2.autoscale.scheduled
+----------------------------
+
+.. automodule:: boto.ec2.autoscale.scheduled
+ :members:
+ :undoc-members:
diff --git a/docs/source/ref/cloudformation.rst b/docs/source/ref/cloudformation.rst
index 447f487..3e0ab41 100644
--- a/docs/source/ref/cloudformation.rst
+++ b/docs/source/ref/cloudformation.rst
@@ -5,14 +5,21 @@
==============
boto.cloudformation
----------------
+-------------------
.. automodule:: boto.cloudformation
:members:
:undoc-members:
+boto.cloudformation.connection
+------------------------------
+
+.. automodule:: boto.cloudformation.connection
+ :members:
+ :undoc-members:
+
boto.cloudformation.stack
-----------------------------
+-------------------------
.. automodule:: boto.cloudformation.stack
:members:
diff --git a/docs/source/ref/cloudfront.rst b/docs/source/ref/cloudfront.rst
index 51f9455..5b8df14 100644
--- a/docs/source/ref/cloudfront.rst
+++ b/docs/source/ref/cloudfront.rst
@@ -1,96 +1,14 @@
.. ref-cloudfront
==========
-cloudfront
+CloudFront
==========
-A Crash Course in CloudFront in Boto
-------------------------------------
-
-This new boto module provides an interface to Amazon's new Content Service, CloudFront.
-
-.. warning::
-
- This module is not well tested. Paging of distributions is not yet
- supported. CNAME support is completely untested. Use with caution.
- Feedback and bug reports are greatly appreciated.
-
-The following shows the main features of the cloudfront module from an interactive shell:
-
-Create an cloudfront connection:
-
->>> from boto.cloudfront import CloudFrontConnection
->>> c = CloudFrontConnection()
-
-Create a new :class:`boto.cloudfront.distribution.Distribution`:
-
->>> distro = c.create_distribution(origin='mybucket.s3.amazonaws.com', enabled=False, comment='My new Distribution')
->>> d.domain_name
-u'd2oxf3980lnb8l.cloudfront.net'
->>> d.id
-u'ECH69MOIW7613'
->>> d.status
-u'InProgress'
->>> d.config.comment
-u'My new distribution'
->>> d.config.origin
-<S3Origin: mybucket.s3.amazonaws.com>
->>> d.config.caller_reference
-u'31b8d9cf-a623-4a28-b062-a91856fac6d0'
->>> d.config.enabled
-False
-
-Note that a new caller reference is created automatically, using
-uuid.uuid4(). The :class:`boto.cloudfront.distribution.Distribution`, :class:`boto.cloudfront.distribution.DistributionConfig` and
-:class:`boto.cloudfront.distribution.DistributionSummary` objects are defined in the :mod:`boto.cloudfront.distribution`
-module.
-
-To get a listing of all current distributions:
-
->>> rs = c.get_all_distributions()
->>> rs
-[<boto.cloudfront.distribution.DistributionSummary instance at 0xe8d4e0>,
- <boto.cloudfront.distribution.DistributionSummary instance at 0xe8d788>]
-
-This returns a list of :class:`boto.cloudfront.distribution.DistributionSummary` objects. Note that paging
-is not yet supported! To get a :class:`boto.cloudfront.distribution.DistributionObject` from a
-:class:`boto.cloudfront.distribution.DistributionSummary` object:
-
->>> ds = rs[1]
->>> distro = ds.get_distribution()
->>> distro.domain_name
-u'd2oxf3980lnb8l.cloudfront.net'
-
-To change a property of a distribution object:
-
->>> distro.comment
-u'My new distribution'
->>> distro.update(comment='This is a much better comment')
->>> distro.comment
-'This is a much better comment'
-
-You can also enable/disable a distribution using the following
-convenience methods:
-
->>> distro.enable() # just calls distro.update(enabled=True)
-
-or
-
->>> distro.disable() # just calls distro.update(enabled=False)
-
-The only attributes that can be updated for a Distribution are
-comment, enabled and cnames.
-
-To delete a :class:`boto.cloudfront.distribution.Distribution`:
-
->>> distro.delete()
-
-
boto.cloudfront
---------------
.. automodule:: boto.cloudfront
- :members:
+ :members:
:undoc-members:
boto.cloudfront.distribution
@@ -101,15 +19,50 @@
:undoc-members:
boto.cloudfront.origin
-----------------------------
+----------------------
.. automodule:: boto.cloudfront.origin
:members:
:undoc-members:
+boto.cloudfront.identity
+------------------------
+
+.. automodule:: boto.cloudfront.identity
+ :members:
+ :undoc-members:
+
+boto.cloudfront.signers
+-----------------------
+
+.. automodule:: boto.cloudfront.signers
+ :members:
+ :undoc-members:
+
+boto.cloudfront.invalidation
+----------------------------
+
+.. automodule:: boto.cloudfront.invalidation
+ :members:
+ :undoc-members:
+
+boto.cloudfront.object
+----------------------
+
+.. automodule:: boto.cloudfront.object
+ :members:
+ :undoc-members:
+
+boto.cloudfront.logging
+-----------------------
+
+.. automodule:: boto.cloudfront.logging
+ :members:
+ :undoc-members:
+
boto.cloudfront.exception
-------------------------
.. automodule:: boto.cloudfront.exception
- :members:
- :undoc-members:
\ No newline at end of file
+ :members:
+ :undoc-members:
diff --git a/docs/source/ref/cloudsearch.rst b/docs/source/ref/cloudsearch.rst
new file mode 100644
index 0000000..14671ee
--- /dev/null
+++ b/docs/source/ref/cloudsearch.rst
@@ -0,0 +1,59 @@
+.. ref-cloudsearch
+
+===========
+Cloudsearch
+===========
+
+boto.cloudsearch
+----------------
+
+.. automodule:: boto.swf
+ :members:
+ :undoc-members:
+
+boto.cloudsearch.domain
+-----------------------
+
+.. automodule:: boto.cloudsearch.domain
+ :members:
+ :undoc-members:
+
+boto.cloudsearch.layer1
+-----------------------
+
+.. automodule:: boto.cloudsearch.layer1
+ :members:
+ :undoc-members:
+
+boto.cloudsearch.layer2
+-----------------------
+
+.. automodule:: boto.cloudsearch.layer2
+ :members:
+ :undoc-members:
+
+boto.cloudsearch.optionstatus
+-----------------------------
+
+.. automodule:: boto.cloudsearch.optionstatus
+ :members:
+ :undoc-members:
+
+boto.cloudsearch.search
+-----------------------
+
+.. automodule:: boto.cloudsearch.search
+ :members:
+ :undoc-members:
+
+boto.cloudsearch.document
+-------------------------
+
+.. automodule:: boto.cloudsearch.document
+ :members:
+ :undoc-members:
+
+
+
+
+
diff --git a/docs/source/ref/cloudwatch.rst b/docs/source/ref/cloudwatch.rst
new file mode 100644
index 0000000..96c650e
--- /dev/null
+++ b/docs/source/ref/cloudwatch.rst
@@ -0,0 +1,27 @@
+.. ref-cloudwatch
+
+====================
+CloudWatch Reference
+====================
+
+boto.ec2.cloudwatch
+-------------------
+
+.. automodule:: boto.ec2.cloudwatch
+ :members:
+ :undoc-members:
+
+boto.ec2.cloudwatch.datapoint
+-----------------------------
+
+.. automodule:: boto.ec2.cloudwatch.datapoint
+ :members:
+ :undoc-members:
+
+boto.ec2.cloudwatch.metric
+--------------------------
+
+.. automodule:: boto.ec2.cloudwatch.metric
+ :members:
+ :undoc-members:
+
diff --git a/docs/source/ref/dynamodb.rst b/docs/source/ref/dynamodb.rst
new file mode 100644
index 0000000..560556e
--- /dev/null
+++ b/docs/source/ref/dynamodb.rst
@@ -0,0 +1,56 @@
+.. ref-dynamodb
+
+========
+DynamoDB
+========
+
+boto.dynamodb
+-------------
+
+.. automodule:: boto.dynamodb
+ :members:
+ :undoc-members:
+
+boto.dynamodb.layer1
+--------------------
+
+.. automodule:: boto.dynamodb.layer1
+ :members:
+ :undoc-members:
+
+boto.dynamodb.layer2
+--------------------
+
+.. automodule:: boto.dynamodb.layer2
+ :members:
+ :undoc-members:
+
+boto.dynamodb.table
+-------------------
+
+.. automodule:: boto.dynamodb.table
+ :members:
+ :undoc-members:
+
+boto.dynamodb.schema
+--------------------
+
+.. automodule:: boto.dynamodb.schema
+ :members:
+ :undoc-members:
+
+boto.dynamodb.item
+------------------
+
+.. automodule:: boto.dynamodb.item
+ :members:
+ :undoc-members:
+
+boto.dynamodb.batch
+-------------------
+
+.. automodule:: boto.dynamodb.batch
+ :members:
+ :undoc-members:
+
+
diff --git a/docs/source/ref/ec2.rst b/docs/source/ref/ec2.rst
index edc3bc2..0d5ac0e 100644
--- a/docs/source/ref/ec2.rst
+++ b/docs/source/ref/ec2.rst
@@ -19,61 +19,9 @@
:undoc-members:
boto.ec2.autoscale
-------------------
+-------------------
-.. automodule:: boto.ec2.autoscale
- :members:
- :undoc-members:
-
-boto.ec2.autoscale.activity
----------------------------
-
-.. automodule:: boto.ec2.autoscale.activity
- :members:
- :undoc-members:
-
-boto.ec2.autoscale.group
-------------------------
-
-.. automodule:: boto.ec2.autoscale.group
- :members:
- :undoc-members:
-
-
-boto.ec2.autoscale.instance
----------------------------
-
-.. automodule:: boto.ec2.autoscale.instance
- :members:
- :undoc-members:
-
-boto.ec2.autoscale.launchconfig
--------------------------------
-
-.. automodule:: boto.ec2.autoscale.launchconfig
- :members:
- :undoc-members:
-
-boto.ec2.autoscale.policy
---------------------------
-
-.. automodule:: boto.ec2.autoscale.policy
- :members:
- :undoc-members:
-
-boto.ec2.autoscale.request
---------------------------
-
-.. automodule:: boto.ec2.autoscale.request
- :members:
- :undoc-members:
-
-boto.ec2.autoscale.scheduled
-----------------------------
-
-.. automodule:: boto.ec2.autoscale.scheduled
- :members:
- :undoc-members:
+See the :doc:`Auto Scaling Reference <autoscale>`.
boto.ec2.buyreservation
-----------------------
@@ -85,23 +33,7 @@
boto.ec2.cloudwatch
-------------------
-.. automodule:: boto.ec2.cloudwatch
- :members:
- :undoc-members:
-
-boto.ec2.cloudwatch.datapoint
------------------------------
-
-.. automodule:: boto.ec2.cloudwatch.datapoint
- :members:
- :undoc-members:
-
-boto.ec2.cloudwatch.metric
---------------------------
-
-.. automodule:: boto.ec2.cloudwatch.metric
- :members:
- :undoc-members:
+See the :doc:`CloudWatch Reference <cloudwatch>`.
boto.ec2.connection
-------------------
@@ -118,46 +50,9 @@
:undoc-members:
boto.ec2.elb
-------------
+-------------------
-.. automodule:: boto.ec2.elb
- :members:
- :undoc-members:
-
-boto.ec2.elb.healthcheck
-------------------------
-
-.. automodule:: boto.ec2.elb.healthcheck
- :members:
- :undoc-members:
-
-boto.ec2.elb.instancestate
---------------------------
-
-.. automodule:: boto.ec2.elb.instancestate
- :members:
- :undoc-members:
-
-boto.ec2.elb.listelement
-------------------------
-
-.. automodule:: boto.ec2.elb.listelement
- :members:
- :undoc-members:
-
-boto.ec2.elb.listener
----------------------
-
-.. automodule:: boto.ec2.elb.listener
- :members:
- :undoc-members:
-
-boto.ec2.elb.loadbalancer
--------------------------
-
-.. automodule:: boto.ec2.elb.loadbalancer
- :members:
- :undoc-members:
+See the :doc:`ELB Reference <elb>`.
boto.ec2.image
--------------
@@ -180,6 +75,13 @@
:members:
:undoc-members:
+boto.ec2.instancestatus
+--------------------------
+
+.. automodule:: boto.ec2.instancestatus
+ :members:
+ :undoc-members:
+
boto.ec2.keypair
----------------
@@ -227,4 +129,5 @@
.. automodule:: boto.ec2.zone
:members:
- :undoc-members:
\ No newline at end of file
+ :undoc-members:
+
diff --git a/docs/source/ref/elb.rst b/docs/source/ref/elb.rst
new file mode 100644
index 0000000..74e77f3
--- /dev/null
+++ b/docs/source/ref/elb.rst
@@ -0,0 +1,47 @@
+.. ref-elb
+
+=============
+ELB Reference
+=============
+
+boto.ec2.elb
+------------
+
+.. automodule:: boto.ec2.elb
+ :members:
+ :undoc-members:
+
+boto.ec2.elb.healthcheck
+------------------------
+
+.. automodule:: boto.ec2.elb.healthcheck
+ :members:
+ :undoc-members:
+
+boto.ec2.elb.instancestate
+--------------------------
+
+.. automodule:: boto.ec2.elb.instancestate
+ :members:
+ :undoc-members:
+
+boto.ec2.elb.listelement
+------------------------
+
+.. automodule:: boto.ec2.elb.listelement
+ :members:
+ :undoc-members:
+
+boto.ec2.elb.listener
+---------------------
+
+.. automodule:: boto.ec2.elb.listener
+ :members:
+ :undoc-members:
+
+boto.ec2.elb.loadbalancer
+-------------------------
+
+.. automodule:: boto.ec2.elb.loadbalancer
+ :members:
+ :undoc-members:
diff --git a/docs/source/ref/glacier.rst b/docs/source/ref/glacier.rst
new file mode 100644
index 0000000..6f5ccbb
--- /dev/null
+++ b/docs/source/ref/glacier.rst
@@ -0,0 +1,56 @@
+.. ref-glacier
+
+=======
+Glaicer
+=======
+
+boto.glacier
+------------
+
+.. automodule:: boto.glacier
+ :members:
+ :undoc-members:
+
+boto.glacier.layer1
+------------------
+
+.. automodule:: boto.glacier.layer1
+ :members:
+ :undoc-members:
+
+boto.glacier.layer2
+-------------------
+
+.. automodule:: boto.glacier.layer2
+ :members:
+ :undoc-members:
+
+boto.glacier.vault
+------------------
+
+.. automodule:: boto.glacier.vault
+ :members:
+ :undoc-members:
+
+boto.glacier.job
+----------------
+
+.. automodule:: boto.glacier.job
+ :members:
+ :undoc-members:
+
+boto.glacier.writer
+-------------------
+
+.. automodule:: boto.glacier.writer
+ :members:
+ :undoc-members:
+
+boto.glacier.exceptions
+-----------------------
+
+.. automodule:: boto.glacier.exceptions
+ :members:
+ :undoc-members:
+
+
diff --git a/docs/source/ref/iam.rst b/docs/source/ref/iam.rst
index 73e825e..81f7b67 100644
--- a/docs/source/ref/iam.rst
+++ b/docs/source/ref/iam.rst
@@ -18,10 +18,10 @@
:members:
:undoc-members:
-boto.iam.response
------------------
+boto.iam.summarymap
+-------------------
-.. automodule:: boto.iam.response
+.. automodule:: boto.iam.summarymap
:members:
:undoc-members:
diff --git a/docs/source/ref/index.rst b/docs/source/ref/index.rst
index e6bee79..b13fc06 100644
--- a/docs/source/ref/index.rst
+++ b/docs/source/ref/index.rst
@@ -10,20 +10,22 @@
boto
cloudformation
cloudfront
+ cloudsearch
contrib
+ dynamodb
ec2
ecs
emr
file
fps
+ glacier
gs
iam
manage
- mashups
mturk
pyami
rds
- route53
+ route53
s3
sdb
services
@@ -31,5 +33,6 @@
sns
sqs
sts
+ swf
vpc
-
+
diff --git a/docs/source/ref/mashups.rst b/docs/source/ref/mashups.rst
deleted file mode 100644
index 5eca846..0000000
--- a/docs/source/ref/mashups.rst
+++ /dev/null
@@ -1,40 +0,0 @@
-.. ref-mashups
-
-=======
-mashups
-=======
-
-boto.mashups
-------------
-
-.. automodule:: boto.mashups
- :members:
- :undoc-members:
-
-boto.mashups.interactive
-------------------------
-
-.. automodule:: boto.mashups.interactive
- :members:
- :undoc-members:
-
-boto.mashups.iobject
---------------------
-
-.. automodule:: boto.mashups.iobject
- :members:
- :undoc-members:
-
-boto.mashups.order
-------------------
-
-.. automodule:: boto.mashups.order
- :members:
- :undoc-members:
-
-boto.mashups.server
--------------------
-
-.. automodule:: boto.mashups.server
- :members:
- :undoc-members:
diff --git a/docs/source/ref/route53.rst b/docs/source/ref/route53.rst
index e267d9b..d1e4762 100644
--- a/docs/source/ref/route53.rst
+++ b/docs/source/ref/route53.rst
@@ -19,6 +19,13 @@
:members:
:undoc-members:
+boto.route53.record
+-------------------
+
+.. automodule:: boto.route53.record
+ :members:
+ :undoc-members:
+
boto.route53.exception
----------------------
diff --git a/docs/source/ref/s3.rst b/docs/source/ref/s3.rst
index 86b411a..1082c08 100644
--- a/docs/source/ref/s3.rst
+++ b/docs/source/ref/s3.rst
@@ -8,21 +8,21 @@
-----------
.. automodule:: boto.s3.acl
- :members:
+ :members:
:undoc-members:
boto.s3.bucket
--------------
.. automodule:: boto.s3.bucket
- :members:
+ :members:
:undoc-members:
boto.s3.bucketlistresultset
---------------------------
.. automodule:: boto.s3.bucketlistresultset
- :members:
+ :members:
:undoc-members:
boto.s3.connection
@@ -32,45 +32,80 @@
:members:
:undoc-members:
-boto.s3.key
------------
-
-.. automodule:: boto.s3.key
- :members:
- :undoc-members:
-
-boto.s3.prefix
+boto.s3.cors
--------------
-.. automodule:: boto.s3.prefix
- :members:
- :undoc-members:
-
-boto.s3.user
-------------
-
-.. automodule:: boto.s3.user
- :members:
- :undoc-members:
-
-boto.s3.multipart
------------------
-
-.. automodule:: boto.s3.multipart
- :members:
- :undoc-members:
-
-boto.s3.resumable_download_handler
-----------------------------------
-
-.. automodule:: boto.s3.resumable_download_handler
- :members:
+.. automodule:: boto.s3.cors
+ :members:
:undoc-members:
boto.s3.deletemarker
--------------------
.. automodule:: boto.s3.deletemarker
- :members:
+ :members:
:undoc-members:
-
+
+boto.s3.key
+-----------
+
+.. automodule:: boto.s3.key
+ :members:
+ :undoc-members:
+
+boto.s3.prefix
+--------------
+
+.. automodule:: boto.s3.prefix
+ :members:
+ :undoc-members:
+
+boto.s3.multipart
+-----------------
+
+.. automodule:: boto.s3.multipart
+ :members:
+ :undoc-members:
+
+boto.s3.multidelete
+-----------------
+
+.. automodule:: boto.s3.multidelete
+ :members:
+ :undoc-members:
+
+boto.s3.resumable_download_handler
+----------------------------------
+
+.. automodule:: boto.s3.resumable_download_handler
+ :members:
+ :undoc-members:
+
+boto.s3.lifecycle
+--------------------
+
+.. automodule:: boto.s3.lifecycle
+ :members:
+ :undoc-members:
+
+boto.s3.prefix
+-----------------
+
+.. automodule:: boto.s3.prefix
+ :members:
+ :undoc-members:
+
+boto.s3.tagging
+--------------
+
+.. automodule:: boto.s3.tagging
+ :members:
+ :undoc-members:
+
+boto.s3.user
+------------
+
+.. automodule:: boto.s3.user
+ :members:
+ :undoc-members:
+
diff --git a/docs/source/ref/sdb.rst b/docs/source/ref/sdb.rst
index 8b96d00..28946f8 100644
--- a/docs/source/ref/sdb.rst
+++ b/docs/source/ref/sdb.rst
@@ -1,8 +1,13 @@
.. ref-sdb
-===
-sdb
-===
+=============
+SDB Reference
+=============
+
+In addition to what is seen below, boto includes an abstraction
+layer for SimpleDB that may be used:
+
+* :doc:`SimpleDB DB <sdb_db>` (Maintained, but little documentation)
boto.sdb
--------
@@ -18,82 +23,6 @@
:members:
:undoc-members:
-boto.sdb.db
------------
-
-.. automodule:: boto.sdb.db
- :members:
- :undoc-members:
-
-boto.sdb.db.blob
-----------------
-
-.. automodule:: boto.sdb.db.blob
- :members:
- :undoc-members:
-
-boto.sdb.db.key
----------------
-
-.. automodule:: boto.sdb.db.key
- :members:
- :undoc-members:
-
-boto.sdb.db.manager
--------------------
-
-.. automodule:: boto.sdb.db.manager
- :members:
- :undoc-members:
-
-boto.sdb.db.manager.pgmanager
------------------------------
-
-.. note::
-
- This module requires psycopg2__ to be installed in the Python path.
-
- __ http://initd.org/
-
-.. automodule:: boto.sdb.db.manager.pgmanager
- :members:
- :undoc-members:
-
-boto.sdb.db.manager.sdbmanager
-------------------------------
-
-.. automodule:: boto.sdb.db.manager.sdbmanager
- :members:
- :undoc-members:
-
-boto.sdb.db.manager.xmlmanager
-------------------------------
-
-.. automodule:: boto.sdb.db.manager.xmlmanager
- :members:
- :undoc-members:
-
-boto.sdb.db.model
------------------
-
-.. automodule:: boto.sdb.db.model
- :members:
- :undoc-members:
-
-boto.sdb.db.property
---------------------
-
-.. automodule:: boto.sdb.db.property
- :members:
- :undoc-members:
-
-boto.sdb.db.query
------------------
-
-.. automodule:: boto.sdb.db.query
- :members:
- :undoc-members:
-
boto.sdb.domain
---------------
@@ -108,34 +37,6 @@
:members:
:undoc-members:
-boto.sdb.persist
-----------------
-
-.. automodule:: boto.sdb.persist
- :members:
- :undoc-members:
-
-boto.sdb.persist.checker
-------------------------
-
-.. automodule:: boto.sdb.persist.checker
- :members:
- :undoc-members:
-
-boto.sdb.persist.object
------------------------
-
-.. automodule:: boto.sdb.persist.object
- :members:
- :undoc-members:
-
-boto.sdb.persist.property
--------------------------
-
-.. automodule:: boto.sdb.persist.property
- :members:
- :undoc-members:
-
boto.sdb.queryresultset
-----------------------
diff --git a/docs/source/ref/sdb_db.rst b/docs/source/ref/sdb_db.rst
new file mode 100644
index 0000000..5b77e2b
--- /dev/null
+++ b/docs/source/ref/sdb_db.rst
@@ -0,0 +1,83 @@
+.. ref-sdbdb
+
+================
+SDB DB Reference
+================
+
+This module offers an ORM-like layer on top of SimpleDB.
+
+boto.sdb.db
+-----------
+
+.. automodule:: boto.sdb.db
+ :members:
+ :undoc-members:
+
+boto.sdb.db.blob
+----------------
+
+.. automodule:: boto.sdb.db.blob
+ :members:
+ :undoc-members:
+
+boto.sdb.db.key
+---------------
+
+.. automodule:: boto.sdb.db.key
+ :members:
+ :undoc-members:
+
+boto.sdb.db.manager
+-------------------
+
+.. automodule:: boto.sdb.db.manager
+ :members:
+ :undoc-members:
+
+boto.sdb.db.manager.pgmanager
+-----------------------------
+
+.. note::
+
+ This module requires psycopg2__ to be installed in the Python path.
+
+ __ http://initd.org/
+
+.. automodule:: boto.sdb.db.manager.pgmanager
+ :members:
+ :undoc-members:
+
+boto.sdb.db.manager.sdbmanager
+------------------------------
+
+.. automodule:: boto.sdb.db.manager.sdbmanager
+ :members:
+ :undoc-members:
+
+boto.sdb.db.manager.xmlmanager
+------------------------------
+
+.. automodule:: boto.sdb.db.manager.xmlmanager
+ :members:
+ :undoc-members:
+
+boto.sdb.db.model
+-----------------
+
+.. automodule:: boto.sdb.db.model
+ :members:
+ :undoc-members:
+
+boto.sdb.db.property
+--------------------
+
+.. automodule:: boto.sdb.db.property
+ :members:
+ :undoc-members:
+
+boto.sdb.db.query
+-----------------
+
+.. automodule:: boto.sdb.db.query
+ :members:
+ :undoc-members:
diff --git a/docs/source/ref/sqs.rst b/docs/source/ref/sqs.rst
index 86aa2b4..88f1c16 100644
--- a/docs/source/ref/sqs.rst
+++ b/docs/source/ref/sqs.rst
@@ -52,3 +52,10 @@
.. automodule:: boto.sqs.regioninfo
:members:
:undoc-members:
+
+boto.sqs.batchresults
+---------------------
+
+.. automodule:: boto.sqs.batchresults
+ :members:
+ :undoc-members:
diff --git a/docs/source/ref/swf.rst b/docs/source/ref/swf.rst
new file mode 100644
index 0000000..d4b0ca3
--- /dev/null
+++ b/docs/source/ref/swf.rst
@@ -0,0 +1,22 @@
+.. ref-swf
+
+===
+SWF
+===
+
+boto.swf
+--------
+
+.. automodule:: boto.swf
+ :members:
+ :undoc-members:
+
+boto.swf.layer1
+--------------------
+
+.. automodule:: boto.swf.layer1
+ :members:
+ :undoc-members:
+
+
+
diff --git a/docs/source/s3_tut.rst b/docs/source/s3_tut.rst
index d1bdbae..81b97e4 100644
--- a/docs/source/s3_tut.rst
+++ b/docs/source/s3_tut.rst
@@ -41,7 +41,7 @@
Once you have a connection established with S3, you will probably want to
create a bucket. A bucket is a container used to store key/value pairs
-in S3. A bucket can hold un unlimited about of data so you could potentially
+in S3. A bucket can hold an unlimited amount of data so you could potentially
have just one bucket in S3 for all of your information. Or, you could create
separate buckets for different types of data. You can figure all of that out
later, first let's just create a bucket. That can be accomplished like this:
@@ -203,7 +203,7 @@
[<boto.acl.Grant instance at 0x2e6a08>]
>>> for grant in acp.acl.grants:
... print grant.permission, grant.display_name, grant.email_address, grant.id
-...
+...
FULL_CONTROL <boto.user.User instance at 0x2e6a30>
The Python objects representing the ACL can be found in the acl.py module
@@ -243,3 +243,43 @@
>>> k.get_metadata('meta2')
'This is the second metadata value'
>>>
+
+Setting/Getting/Deleting CORS Configuration on a Bucket
+-------------------------------------------------------
+
+Cross-origin resource sharing (CORS) defines a way for client web
+applications that are loaded in one domain to interact with resources
+in a different domain. With CORS support in Amazon S3, you can build
+rich client-side web applications with Amazon S3 and selectively allow
+cross-origin access to your Amazon S3 resources.
+
+To create a CORS configuration and associate it with a bucket:
+
+>>> from boto.s3.cors import CORSConfiguration
+>>> cors_cfg = CORSConfiguration()
+>>> cors_cfg.add_rule(['PUT', 'POST', 'DELETE'], 'https://www.example.com', allowed_header='*', max_age_seconds=3000, expose_header='x-amz-server-side-encryption')
+>>> cors_cfg.add_rule('GET', '*')
+
+The above code creates a CORS configuration object with two rules.
+
+* The first rule allows cross-origin PUT, POST, and DELETE requests from
+ the https://www.example.com/ origin. The rule also allows all headers
+ in preflight OPTIONS request through the Access-Control-Request-Headers
+ header. In response to any preflight OPTIONS request, Amazon S3 will
+ return any requested headers.
+* The second rule allows cross-origin GET requests from all origins.
+
+To associate this configuration with a bucket:
+
+>>> import boto
+>>> c = boto.connect_s3()
+>>> bucket = c.lookup('mybucket')
+>>> bucket.set_cors(cors_cfg)
+
+To retrieve the CORS configuration associated with a bucket:
+
+>>> cors_cfg = bucket.get_cors()
+
+And, finally, to delete all CORS configurations from a bucket:
+
+>>> bucket.delete_cors()
diff --git a/docs/source/security_groups.rst b/docs/source/security_groups.rst
new file mode 100644
index 0000000..0b959c4
--- /dev/null
+++ b/docs/source/security_groups.rst
@@ -0,0 +1,82 @@
+.. _security_groups:
+
+===================
+EC2 Security Groups
+===================
+
+Amazon defines a security group as:
+
+"A security group is a named collection of access rules. These access rules
+ specify which ingress, i.e. incoming, network traffic should be delivered
+ to your instance."
+
+To get a listing of all currently defined security groups::
+
+ >>> rs = conn.get_all_security_groups()
+ >>> print rs
+ [SecurityGroup:appserver, SecurityGroup:default, SecurityGroup:vnc, SecurityGroup:webserver]
+
+Each security group can have an arbitrary number of rules which represent
+different network ports which are being enabled. To find the rules for a
+particular security group, use the rules attribute::
+
+ >>> sg = rs[1]
+ >>> sg.name
+ u'default'
+ >>> sg.rules
+ [IPPermissions:tcp(0-65535),
+ IPPermissions:udp(0-65535),
+ IPPermissions:icmp(-1--1),
+ IPPermissions:tcp(22-22),
+ IPPermissions:tcp(80-80)]
+
+In addition to listing the available security groups you can also create
+a new security group. I'll follow through the "Three Tier Web Service"
+example included in the EC2 Developer's Guide for an example of how to
+create security groups and add rules to them.
+
+First, let's create a group for our Apache web servers that allows HTTP
+access to the world::
+
+ >>> web = conn.create_security_group('apache', 'Our Apache Group')
+ >>> web
+ SecurityGroup:apache
+ >>> web.authorize('tcp', 80, 80, '0.0.0.0/0')
+ True
+
+The first argument is the ip protocol which can be one of; tcp, udp or icmp.
+The second argument is the FromPort or the beginning port in the range, the
+third argument is the ToPort or the ending port in the range and the last
+argument is the CIDR IP range to authorize access to.
+
+Next we create another group for the app servers::
+
+ >>> app = conn.create_security_group('appserver', 'The application tier')
+
+We then want to grant access between the web server group and the app
+server group. So, rather than specifying an IP address as we did in the
+last example, this time we will specify another SecurityGroup object.:
+
+ >>> app.authorize(src_group=web)
+ True
+
+Now, to verify that the web group now has access to the app servers, we want to
+temporarily allow SSH access to the web servers from our computer. Let's
+say that our IP address is 192.168.1.130 as it is in the EC2 Developer
+Guide. To enable that access::
+
+ >>> web.authorize(ip_protocol='tcp', from_port=22, to_port=22, cidr_ip='192.168.1.130/32')
+ True
+
+Now that this access is authorized, we could ssh into an instance running in
+the web group and then try to telnet to specific ports on servers in the
+appserver group, as shown in the EC2 Developer's Guide. When this testing is
+complete, we would want to revoke SSH access to the web server group, like this::
+
+ >>> web.rules
+ [IPPermissions:tcp(80-80),
+ IPPermissions:tcp(22-22)]
+ >>> web.revoke('tcp', 22, 22, cidr_ip='192.168.1.130/32')
+ True
+ >>> web.rules
+ [IPPermissions:tcp(80-80)]
\ No newline at end of file
diff --git a/docs/source/ses_tut.rst b/docs/source/ses_tut.rst
new file mode 100644
index 0000000..c71e886
--- /dev/null
+++ b/docs/source/ses_tut.rst
@@ -0,0 +1,171 @@
+.. ses_tut:
+
+=============================
+Simple Email Service Tutorial
+=============================
+
+This tutorial focuses on the boto interface to AWS' `Simple Email Service (SES) <ses>`_.
+This tutorial assumes that you have boto already downloaded and installed.
+
+.. _SES: http://aws.amazon.com/ses/
+
+Creating a Connection
+---------------------
+
+The first step in accessing SES is to create a connection to the service.
+To do so, the most straight forward way is the following::
+
+ >>> import boto
+ >>> conn = boto.connect_ses(
+ aws_access_key_id='<YOUR_AWS_KEY_ID>',
+ aws_secret_access_key='<YOUR_AWS_SECRET_KEY>')
+ >>> conn
+ SESConnection:email.us-east-1.amazonaws.com
+
+Bear in mind that if you have your credentials in boto config in your home
+directory, the two keyword arguments in the call above are not needed. More
+details on configuration can be fond in :doc:`boto_config_tut`.
+
+The :py:func:`boto.connect_ses` functions returns a
+:py:class:`boto.ses.connection.SESConnection` instance, which is a the boto API
+for working with SES.
+
+Notes on Sending
+----------------
+
+It is important to keep in mind that while emails appear to come "from" the
+address that you specify via Reply-To, the sending is done through Amazon.
+Some clients do pick up on this disparity, and leave a note on emails.
+
+Verifying a Sender Email Address
+--------------------------------
+
+Before you can send email "from" an address, you must prove that you have
+access to the account. When you send a validation request, an email is sent
+to the address with a link in it. Clicking on the link validates the address
+and adds it to your SES account. Here's how to send the validation email::
+
+ >>> conn.verify_email_address('some@address.com')
+ {
+ 'VerifyEmailAddressResponse': {
+ 'ResponseMetadata': {
+ 'RequestId': '4a974fd5-56c2-11e1-ad4c-c1f08c91d554'
+ }
+ }
+ }
+
+After a short amount of time, you'll find an email with the validation
+link inside. Click it, and this address may be used to send emails.
+
+Listing Verified Addresses
+--------------------------
+
+If you'd like to list the addresses that are currently verified on your
+SES account, use
+:py:meth:`list_verified_email_addresses <boto.ses.connection.SESConnection.list_verified_email_addresses>`::
+
+ >>> conn.list_verified_email_addresses()
+ {
+ 'ListVerifiedEmailAddressesResponse': {
+ 'ListVerifiedEmailAddressesResult': {
+ 'VerifiedEmailAddresses': [
+ 'some@address.com',
+ 'another@address.com'
+ ]
+ },
+ 'ResponseMetadata': {
+ 'RequestId': '2ab45c18-56c3-11e1-be66-ffd2a4549d70'
+ }
+ }
+ }
+
+Deleting a Verified Address
+---------------------------
+
+In the event that you'd like to remove an email address from your account,
+use
+:py:meth:`delete_verified_email_address <boto.ses.connection.SESConnection.delete_verified_email_address>`::
+
+ >>> conn.delete_verified_email_address('another@address.com')
+
+Sending an Email
+----------------
+
+Sending an email is done via
+:py:meth:`send_email <boto.ses.connection.SESConnection.send_email>`::
+
+ >>> conn.send_email(
+ 'some@address.com',
+ 'Your subject',
+ 'Body here',
+ ['recipient-address-1@gmail.com'])
+ {
+ 'SendEmailResponse': {
+ 'ResponseMetadata': {
+ 'RequestId': '4743c2b7-56c3-11e1-bccd-c99bd68002fd'
+ },
+ 'SendEmailResult': {
+ 'MessageId': '000001357a177192-7b894025-147a-4705-8455-7c880b0c8270-000000'
+ }
+ }
+ }
+
+If you're wanting to send a multipart MIME email, see the reference for
+:py:meth:`send_raw_email <boto.ses.connection.SESConnection.send_raw_email>`,
+which is a bit more of a low-level alternative.
+
+Checking your Send Quota
+------------------------
+
+Staying within your quota is critical, since the upper limit is a hard cap.
+Once you have hit your quota, no further email may be sent until enough
+time elapses to where your 24 hour email count (rolling continuously) is
+within acceptable ranges. Use
+:py:meth:`get_send_quota <boto.ses.connection.SESConnection.get_send_quota>`::
+
+ >>> conn.get_send_quota()
+ {
+ 'GetSendQuotaResponse': {
+ 'GetSendQuotaResult': {
+ 'Max24HourSend': '100000.0',
+ 'SentLast24Hours': '181.0',
+ 'MaxSendRate': '28.0'
+ },
+ 'ResponseMetadata': {
+ 'RequestId': u'8a629245-56c4-11e1-9c53-9d5f4d2cc8d3'
+ }
+ }
+ }
+
+Checking your Send Statistics
+-----------------------------
+
+In order to fight spammers and ensure quality mail is being sent from SES,
+Amazon tracks bounces, rejections, and complaints. This is done via
+:py:meth:`get_send_statistics <boto.ses.connection.SESConnection.get_send_statistics>`.
+Please be warned that the output is extremely verbose, to the point
+where we'll just show a short excerpt here::
+
+ >>> conn.get_send_statistics()
+ {
+ 'GetSendStatisticsResponse': {
+ 'GetSendStatisticsResult': {
+ 'SendDataPoints': [
+ {
+ 'Complaints': '0',
+ 'Timestamp': '2012-02-13T05:02:00Z',
+ 'DeliveryAttempts': '8',
+ 'Bounces': '0',
+ 'Rejects': '0'
+ },
+ {
+ 'Complaints': '0',
+ 'Timestamp': '2012-02-13T05:17:00Z',
+ 'DeliveryAttempts': '12',
+ 'Bounces': '0',
+ 'Rejects': '0'
+ }
+ ]
+ }
+ }
+ }
\ No newline at end of file
diff --git a/docs/source/simpledb_tut.rst b/docs/source/simpledb_tut.rst
new file mode 100644
index 0000000..3960726
--- /dev/null
+++ b/docs/source/simpledb_tut.rst
@@ -0,0 +1,188 @@
+.. simpledb_tut:
+
+============================================
+An Introduction to boto's SimpleDB interface
+============================================
+This tutorial focuses on the boto interface to AWS' SimpleDB_. This tutorial
+assumes that you have boto already downloaded and installed.
+
+.. _SimpleDB: http://aws.amazon.com/simpledb/
+
+Creating a Connection
+---------------------
+The first step in accessing SimpleDB is to create a connection to the service.
+To do so, the most straight forward way is the following::
+
+ >>> import boto
+ >>> conn = boto.connect_sdb(aws_access_key_id='<YOUR_AWS_KEY_ID>',aws_secret_access_key='<YOUR_AWS_SECRET_KEY>')
+ >>> conn
+ SDBConnection:sdb.amazonaws.com
+ >>>
+
+Bear in mind that if you have your credentials in boto config in your home
+directory, the two keyword arguments in the call above are not needed. Also
+important to note is that just as any other AWS service, SimpleDB is
+region-specific and as such you might want to specify which region to connect
+to, by default, it'll connect to the US-EAST-1 region.
+
+Creating Domains
+----------------
+Arguably, once you have your connection established, you'll want to create one or more dmains.
+Creating new domains is a fairly straight forward operation. To do so, you can proceed as follows::
+
+ >>> conn.create_domain('test-domain')
+ Domain:test-domain
+ >>>
+ >>> conn.create_domain('test-domain-2')
+ Domain:test-domain
+ >>>
+
+Please note that SimpleDB, unlike its newest sibling DynamoDB, is truly and completely schema-less.
+Thus, there's no need specify domain keys or ranges.
+
+Listing All Domains
+-------------------
+Unlike DynamoDB or other database systems, SimpleDB uses the concept of 'domains' instead of tables.
+So, to list all your domains for your account in a region, you can simply do as follows::
+
+ >>> domains = conn.get_all_domains()
+ >>> domains
+ [Domain:test-domain, Domain:test-domain-2]
+ >>>
+
+The get_all_domains() method returns a :py:class:`boto.resultset.ResultSet` containing
+all :py:class:`boto.sdb.domain.Domain` objects associated with
+this connection's Access Key ID for that region.
+
+Retrieving a Domain (by name)
+-----------------------------
+If you wish to retrieve a specific domain whose name is known, you can do so as follows::
+
+ >>> dom = conn.get_domain('test-domain')
+ >>> dom
+ Domain:test-domain
+ >>>
+
+The get_domain call has an optional validate parameter, which defaults to True. This will make sure to raise
+an exception if the domain you are looking for doesn't exist. If you set it to false, it will return a
+:py:class:`Domain <boto.sdb.domain.Domain>` object blindly regardless of its existence.
+
+Getting Domain Metadata
+------------------------
+There are times when you might want to know your domains' machine usage, aprox. item count and other such data.
+To this end, boto offers a simple and convenient way to do so as shown below::
+
+ >>> domain_meta = conn.domain_metadata(dom)
+ >>> domain_meta
+ <boto.sdb.domain.DomainMetaData instance at 0x23cd440>
+ >>> dir(domain_meta)
+ ['BoxUsage', 'DomainMetadataResponse', 'DomainMetadataResult', 'RequestId', 'ResponseMetadata',
+ '__doc__', '__init__', '__module__', 'attr_name_count', 'attr_names_size', 'attr_value_count', 'attr_values_size',
+ 'domain', 'endElement', 'item_count', 'item_names_size', 'startElement', 'timestamp']
+ >>> domain_meta.item_count
+ 0
+ >>>
+
+Please bear in mind that while in the example above we used a previously retrieved domain object as the parameter, you
+can retrieve the domain metadata via its name (string).
+
+Adding Items (and attributes)
+-----------------------------
+Once you have your domain setup, presumably, you'll want to start adding items to it.
+In its most straight forward form, you need to provide a name for the item -- think of it
+as a record id -- and a collection of the attributes you want to store in the item (often a Dictionary-like object).
+So, adding an item to a domain looks as follows::
+
+ >>> item_name = 'ABC_123'
+ >>> item_attrs = {'Artist': 'The Jackson 5', 'Genera':'Pop'}
+ >>> dom.put_attributes(item_name, item_attrs)
+ True
+ >>>
+
+Now let's check if it worked::
+
+ >>> domain_meta = conn.domain_metadata(dom)
+ >>> domain_meta.item_count
+ 1
+ >>>
+
+
+Batch Adding Items (and attributes)
+-----------------------------------
+You can also add a number of items at the same time in a similar fashion. All you have to provide to the batch_put_attributes() method
+is a Dictionary-like object with your items and their respective attributes, as follows::
+
+ >>> items = {'item1':{'attr1':'val1'},'item2':{'attr2':'val2'}}
+ >>> dom.batch_put_attributes(items)
+ True
+ >>>
+
+Now, let's check the item count once again::
+
+ >>> domain_meta = conn.domain_metadata(dom)
+ >>> domain_meta.item_count
+ 3
+ >>>
+
+A few words of warning: both batch_put_attributes() and put_item(), by default, will overwrite the values of the attributes if both
+the item and attribute already exist. If the item exists, but not the attributes, it will append the new attributes to the
+attribute list of that item. If you do not wish these methods to behave in that manner, simply supply them with a 'replace=False'
+parameter.
+
+
+Retrieving Items
+-----------------
+To retrieve an item along with its attributes is a fairly straight forward operation and can be accomplished as follows::
+
+ >>> dom.get_item('item1')
+ {u'attr1': u'val1'}
+ >>>
+
+Since SimpleDB works in an "eventual consistency" manner, we can also request a forced consistent read (though this will
+invariably adversely affect read performance). The way to accomplish that is as shown below::
+
+ >>> dom.get_item('item1', consistent_read=True)
+ {u'attr1': u'val1'}
+ >>>
+
+Retrieving One or More Items
+----------------------------
+Another way to retrieve items is through boto's select() method. This method, at the bare minimum, requires a standard SQL select query string
+and you would do something along the lines of::
+
+ >>> query = 'select * from `test-domain` where attr1="val1"'
+ >>> rs = dom.select(query)
+ >>> for j in rs:
+ ... print 'o hai'
+ ...
+ o hai
+ >>>
+
+This method returns a ResultSet collection you can iterate over.
+
+Updating Item Attributes
+------------------------
+The easiest way to modify an item's attributes is by manipulating the item's attributes and then saving those changes. For example::
+
+ >>> item = dom.get_item('item1')
+ >>> item['attr1'] = 'val_changed'
+ >>> item.save()
+
+
+Deleting Items (and its attributes)
+-----------------------------------
+Deleting an item is a very simple operation. All you are required to provide is either the name of the item or an item object to the
+delete_item() method, boto will take care of the rest::
+
+ >>>dom.delete_item(item)
+ >>>True
+
+
+
+Deleting Domains
+-----------------------------------
+To delete a domain and all items under it (i.e. be very careful), you can do it as follows::
+
+ >>> conn.delete_domain('test-domain')
+ True
+ >>>
diff --git a/docs/source/sqs_tut.rst b/docs/source/sqs_tut.rst
index 8c3edc5..742800f 100644
--- a/docs/source/sqs_tut.rst
+++ b/docs/source/sqs_tut.rst
@@ -5,96 +5,106 @@
=======================================
This tutorial focuses on the boto interface to the Simple Queue Service
-from Amazon Web Services. This tutorial assumes that you have already
-downloaded and installed boto.
+from Amazon Web Services. This tutorial assumes that you have boto already
+downloaded and installed.
Creating a Connection
---------------------
The first step in accessing SQS is to create a connection to the service.
-There are two ways to do this in boto. The first is:
+There are two ways to do this in boto. The first is::
->>> from boto.sqs.connection import SQSConnection
->>> conn = SQSConnection('<aws access key>', '<aws secret key>')
+ >>> from boto.sqs.connection import SQSConnection
+ >>> conn = SQSConnection('<aws access key>', '<aws secret key>')
-At this point the variable conn will point to an SQSConnection object. In
+At this point the variable conn will point to an SQSConnection object. Bear in mind that
+just as any other AWS service SQS is region-specfic. Also important to note is that by default,
+if no region is provided, it'll connect to the US-EAST-1 region. In
this example, the AWS access key and AWS secret key are passed in to the
method explicitely. Alternatively, you can set the environment variables:
AWS_ACCESS_KEY_ID - Your AWS Access Key ID
AWS_SECRET_ACCESS_KEY - Your AWS Secret Access Key
-and then call the constructor without any arguments, like this:
+and then call the constructor without any arguments, like this::
->>> conn = SQSConnection()
+ >>> conn = SQSConnection()
There is also a shortcut function in the boto package, called connect_sqs
-that may provide a slightly easier means of creating a connection:
+that may provide a slightly easier means of creating a connection::
->>> import boto
->>> conn = boto.connect_sqs()
+ >>> import boto
+ >>> conn = boto.connect_sqs()
In either case, conn will point to an SQSConnection object which we will
use throughout the remainder of this tutorial.
Creating a Queue
----------------
-
Once you have a connection established with SQS, you will probably want to
-create a queue. That can be accomplished like this:
+create a queue. In its simplest form, that can be accomplished as follows::
->>> q = conn.create_queue('myqueue')
+ >>> q = conn.create_queue('myqueue')
-The create_queue method will create the requested queue if it does not
-exist or will return the existing queue if it does exist. There is an
+The create_queue method will create (and return) the requested queue if it does not
+exist or will return the existing queue if it does. There is an
optional parameter to create_queue called visibility_timeout. This basically
controls how long a message will remain invisible to other queue readers
once it has been read (see SQS documentation for more detailed explanation).
If this is not explicitly specified the queue will be created with whatever
default value SQS provides (currently 30 seconds). If you would like to
-specify another value, you could do so like this:
+specify another value, you could do so like this::
->>> q = conn.create_queue('myqueue', 120)
+ >>> q = conn.create_queue('myqueue', 120)
This would establish a default visibility timeout for this queue of 120
seconds. As you will see later on, this default value for the queue can
also be overridden each time a message is read from the queue. If you want
-to check what the default visibility timeout is for a queue:
+to check what the default visibility timeout is for a queue::
->>> q.get_timeout()
-30
->>>
+ >>> q.get_timeout()
+ 30
+
+Listing all Queues
+------------------
+
+To retrieve a list of the queues for your account in the current region::
+
+ >>> conn.get_all_queues()
+ [
+ Queue(https://queue.amazonaws.com/411358162645/myqueue),
+ Queue(https://queue.amazonaws.com/411358162645/another_queue),
+ Queue(https://queue.amazonaws.com/411358162645/another_queue2)
+ ]
+
+This will leave you with a list of all of your :py:class:`boto.sqs.queue.Queue`
+instances. Alternatively, if you wanted to only list the queues that started
+with ``'another'``::
+
+ >>> conn.get_all_queues(prefix='another')
+ [
+ Queue(https://queue.amazonaws.com/411358162645/another_queue),
+ Queue(https://queue.amazonaws.com/411358162645/another_queue2)
+ ]
+
+Getting a Queue (by name)
+-------------------------
+If you wish to explicitly retrieve an existing queue and the name of the queue is known,
+you can retrieve the queue as follows::
+
+ >>> my_queue = conn.get_queue('myqueue')
+ Queue(https://queue.amazonaws.com/411358162645/myqueue)
+
+This leaves you with a single :py:class:`boto.sqs.queue.Queue`, which abstracts
+the SQS Queue named 'myqueue'.
Writing Messages
----------------
-
-Once you have a queue, presumably you will want to write some messages
+Once you have a queue setup, presumably you will want to write some messages
to it. SQS doesn't care what kind of information you store in your messages
or what format you use to store it. As long as the amount of data per
-message is less than or equal to 256Kb, it's happy.
+message is less than or equal to 256Kb, SQS won't complain.
-However, you may have a lot of specific requirements around the format of
-that data. For example, you may want to store one big string or you might
-want to store something that looks more like RFC822 messages or you might want
-to store a binary payload such as pickled Python objects.
-
-The way boto deals with this is to define a simple Message object that
-treats the message data as one big string which you can set and get. If that
-Message object meets your needs, you're good to go. However, if you need to
-incorporate different behavior in your message or handle different types of
-data you can create your own Message class. You just need to register that
-class with the queue so that it knows that when you read a message from the
-queue that it should create one of your message objects rather than the
-default boto Message object. To register your message class, you would:
-
->>> q.set_message_class(MyMessage)
-
-where MyMessage is the class definition for your message class. Your
-message class should subclass the boto Message because there is a small
-bit of Python magic happening in the __setattr__ method of the boto Message
-class.
-
-For this tutorial, let's just assume that we are using the boto Message
-class. So, first we need to create a Message object:
+So, first we need to create a Message object::
>>> from boto.sqs.message import Message
>>> m = Message()
@@ -106,6 +116,34 @@
not to write the message for some reason) or an exception if there was
some sort of problem with the request.
+Writing Messages (Custom Format)
+--------------------------------
+The technique above will work only if you use boto's default Message payload format;
+however, you may have a lot of specific requirements around the format of
+the message data. For example, you may want to store one big string or you might
+want to store something that looks more like RFC822 messages or you might want
+to store a binary payload such as pickled Python objects.
+
+The way boto deals with this issue is to define a simple Message object that
+treats the message data as one big string which you can set and get. If that
+Message object meets your needs, you're good to go. However, if you need to
+incorporate different behavior in your message or handle different types of
+data you can create your own Message class. You just need to register that
+class with the boto queue object so that it knows that, when you read a message from the
+queue, it should create one of your message objects rather than the
+default boto Message object. To register your message class, you would::
+
+>>> import MyMessage
+>>> q.set_message_class(MyMessage)
+>>> m = MyMessage()
+>>> m.set_body('This is my first message.')
+>>> status = q.write(m)
+
+where MyMessage is the class definition for your message class. Your
+message class should subclass the boto Message because there is a small
+bit of Python magic happening in the __setattr__ method of the boto Message
+class.
+
Reading Messages
----------------
@@ -129,9 +167,9 @@
At this point, we have read the message from the queue and SQS will make
sure that this message remains invisible to other readers of the queue
-until the visibility timeout period for the queue expires. If I delete
-the message before the timeout period expires then no one will ever see
-the message again. However, if I don't delete it (maybe because I crashed
+until the visibility timeout period for the queue expires. If you delete
+the message before the timeout period expires then no one else will ever see
+the message again. However, if you don't delete it (maybe because your reader crashed
or failed in some way, for example) it will magically reappear in my queue
for someone else to read. If you aren't happy with the default visibility
timeout defined for the queue, you can override it when you read a message:
@@ -171,10 +209,8 @@
Deleting Messages and Queues
----------------------------
-
-Note that the first message we put in the queue is still there, even though
-we have read it a number of times. That's because we never deleted it. To
-remove a message from a queue:
+As stated above, messages are never deleted by the queue unless explicitly told to do so.
+To remove a message from a queue:
>>> q.delete_message(m)
[]
@@ -183,30 +219,11 @@
>>> conn.delete_queue(q)
-However, this won't succeed unless the queue is empty.
+However, and this is a good safe guard, this won't succeed unless the queue is empty.
-Listing All Available Queues
-----------------------------
-In addition to accessing specific queues via the create_queue method
-you can also get a list of all available queues that you have created.
-
->>> rs = conn.get_all_queues()
-
-This returns a ResultSet object, as described above. The ResultSet
-can be used as a sequence or list type object to retrieve Queue objects.
-
->>> len(rs)
-11
->>> for q in rs:
-... print q.id
-...
-<listing of available queues>
->>> q = rs[0]
-
-Other Stuff
------------
-
-That covers the basic operations of creating queues, writing messages,
+Additional Information
+----------------------
+The above tutorial covers the basic operations of creating queues, writing messages,
reading messages, deleting messages, and deleting queues. There are a
few utility methods in boto that might be useful as well. For example,
to count the number of messages in a queue:
diff --git a/docs/source/vpc_tut.rst b/docs/source/vpc_tut.rst
index 0040866..ce26ead 100644
--- a/docs/source/vpc_tut.rst
+++ b/docs/source/vpc_tut.rst
@@ -86,3 +86,15 @@
>>> vg.attach(vpc.id)
>>>
+
+Associating an Elastic IP with a VPC Instance
+---------------------------------------------
+
+>>> ec2.connection.associate_address('i-71b2f60b', None, 'eipalloc-35cf685d')
+>>>
+
+Releasing an Elastic IP Attached to a VPC Instance
+--------------------------------------------------
+
+>>> ec2.connection.release_address(None, 'eipalloc-35cf685d')
+>>>
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..b2776cb
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,9 @@
+mock==0.8.0
+nose==1.1.2
+M2Crypto==0.21.1
+requests==0.13.1
+tox==1.4
+Sphinx==1.1.3
+simplejson==2.5.2
+argparse==1.2.1
+unittest2==0.5.1
diff --git a/setup.py b/setup.py
index c30f12c..662c5e1 100644
--- a/setup.py
+++ b/setup.py
@@ -42,24 +42,29 @@
setup(name = "boto",
version = __version__,
description = "Amazon Web Services Library",
- long_description = "Python interface to Amazon's Web Services.",
+ long_description = open("README.rst").read(),
author = "Mitch Garnaat",
author_email = "mitch@garnaat.com",
scripts = ["bin/sdbadmin", "bin/elbadmin", "bin/cfadmin",
"bin/s3put", "bin/fetch_file", "bin/launch_instance",
"bin/list_instances", "bin/taskadmin", "bin/kill_instance",
"bin/bundle_image", "bin/pyami_sendmail", "bin/lss3",
- "bin/cq", "bin/route53", "bin/s3multiput", "bin/cwutil"],
- url = "http://code.google.com/p/boto/",
+ "bin/cq", "bin/route53", "bin/s3multiput", "bin/cwutil",
+ "bin/instance_events", "bin/asadmin", "bin/glacier"],
+ url = "https://github.com/boto/boto/",
packages = ["boto", "boto.sqs", "boto.s3", "boto.gs", "boto.file",
"boto.ec2", "boto.ec2.cloudwatch", "boto.ec2.autoscale",
"boto.ec2.elb", "boto.sdb", "boto.cacerts",
- "boto.sdb.db", "boto.sdb.db.manager", "boto.mturk",
- "boto.pyami", "boto.mashups", "boto.contrib", "boto.manage",
- "boto.services", "boto.cloudfront", "boto.roboto",
- "boto.rds", "boto.vpc", "boto.fps", "boto.emr", "boto.sns",
+ "boto.sdb.db", "boto.sdb.db.manager",
+ "boto.mturk", "boto.pyami",
+ "boto.pyami.installers", "boto.pyami.installers.ubuntu",
+ "boto.mashups", "boto.contrib", "boto.manage",
+ "boto.services", "boto.cloudfront",
+ "boto.roboto", "boto.rds", "boto.vpc", "boto.fps",
+ "boto.fps", "boto.emr", "boto.emr", "boto.sns",
"boto.ecs", "boto.iam", "boto.route53", "boto.ses",
- "boto.cloudformation", "boto.sts"],
+ "boto.cloudformation", "boto.sts", "boto.dynamodb",
+ "boto.swf", "boto.mws", "boto.cloudsearch", "boto.glacier"],
package_data = {"boto.cacerts": ["cacerts.txt"]},
license = "MIT",
platforms = "Posix; MacOS X; Windows",
@@ -67,6 +72,10 @@
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
- "Topic :: Internet"],
+ "Topic :: Internet",
+ "Programming Language :: Python :: 2",
+ "Programming Language :: Python :: 2.5",
+ "Programming Language :: Python :: 2.6",
+ "Programming Language :: Python :: 2.7"],
**extra
)
diff --git a/tests/db/test_lists.py b/tests/db/test_lists.py
index d9c7639..4861272 100644
--- a/tests/db/test_lists.py
+++ b/tests/db/test_lists.py
@@ -46,13 +46,13 @@
def test_list_order(self):
"""Testing the order of lists"""
t = SimpleListModel()
- t.nums = [5,4,1,3,2]
+ t.nums = [5, 4, 1, 3, 2]
t.strs = ["B", "C", "A", "D", "Foo"]
t.put()
self.objs.append(t)
time.sleep(3)
t = SimpleListModel.get_by_id(t.id)
- assert(t.nums == [5,4,1,3,2])
+ assert(t.nums == [5, 4, 1, 3, 2])
assert(t.strs == ["B", "C", "A", "D", "Foo"])
def test_old_compat(self):
@@ -66,8 +66,7 @@
item.save()
time.sleep(3)
t = SimpleListModel.get_by_id(t.id)
- i1 = item['strs']
- i1.sort()
+ i1 = sorted(item['strs'])
i2 = t.strs
i2.sort()
assert(i1 == i2)
@@ -82,7 +81,7 @@
time.sleep(3)
assert(SimpleListModel.find(strs="Bizzle").count() == 1)
assert(SimpleListModel.find(strs="Bar").count() == 1)
- assert(SimpleListModel.find(strs=["Bar","Bizzle"]).count() == 1)
+ assert(SimpleListModel.find(strs=["Bar", "Bizzle"]).count() == 1)
def test_query_not_equals(self):
"""Test a not equal filter"""
diff --git a/tests/db/test_password.py b/tests/db/test_password.py
index a0c1424..74c3409 100644
--- a/tests/db/test_password.py
+++ b/tests/db/test_password.py
@@ -81,7 +81,7 @@
id= obj.id
time.sleep(5)
obj = MyModel.get_by_id(id)
- self.assertEquals(obj.password,'bar')
+ self.assertEquals(obj.password, 'bar')
self.assertEquals(str(obj.password), expected)
#hmac.new('mysecret','bar').hexdigest())
@@ -98,7 +98,7 @@
def test_password_constructor_hashfunc(self):
import hmac
- myhashfunc=lambda msg: hmac.new('mysecret',msg)
+ myhashfunc=lambda msg: hmac.new('mysecret', msg)
cls = self.test_model(hashfunc=myhashfunc)
obj = cls()
obj.password='hello'
diff --git a/tests/db/test_sequence.py b/tests/db/test_sequence.py
index 35f4b35..b950ee6 100644
--- a/tests/db/test_sequence.py
+++ b/tests/db/test_sequence.py
@@ -69,7 +69,7 @@
assert(s2.val == 3)
def test_sequence_simple_string(self):
- from boto.sdb.db.sequence import Sequence,increment_string
+ from boto.sdb.db.sequence import Sequence, increment_string
s = Sequence(fnc=increment_string)
self.sequences.append(s)
assert(s.val == "A")
@@ -80,26 +80,26 @@
from boto.sdb.db.sequence import fib
# Just check the first few numbers in the sequence
lv = 0
- for v in [1,2,3,5,8,13,21,34,55,89,144]:
- assert(fib(v,lv) == lv+v)
- lv = fib(v,lv)
+ for v in [1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144]:
+ assert(fib(v, lv) == lv+v)
+ lv = fib(v, lv)
def test_sequence_fib(self):
"""Test the fibonacci sequence"""
- from boto.sdb.db.sequence import Sequence,fib
+ from boto.sdb.db.sequence import Sequence, fib
s = Sequence(fnc=fib)
s2 = Sequence(s.id)
self.sequences.append(s)
assert(s.val == 1)
# Just check the first few numbers in the sequence
- for v in [1,2,3,5,8,13,21,34,55,89,144]:
+ for v in [1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144]:
assert(s.next() == v)
assert(s.val == v)
assert(s2.val == v) # it shouldn't matter which reference we use since it's garunteed to be consistent
def test_sequence_string(self):
"""Test the String incrementation sequence"""
- from boto.sdb.db.sequence import Sequence,increment_string
+ from boto.sdb.db.sequence import Sequence, increment_string
s = Sequence(fnc=increment_string)
self.sequences.append(s)
assert(s.val == "A")
diff --git a/tests/devpay/test_s3.py b/tests/devpay/test_s3.py
index bb91125..8666570 100644
--- a/tests/devpay/test_s3.py
+++ b/tests/devpay/test_s3.py
@@ -16,7 +16,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -37,141 +37,145 @@
AMAZON_USER_TOKEN = '{UserToken}...your token here...'
DEVPAY_HEADERS = { 'x-amz-security-token': AMAZON_USER_TOKEN }
-print '--- running S3Connection tests (DevPay) ---'
-c = S3Connection()
-# create a new, empty bucket
-bucket_name = 'test-%d' % int(time.time())
-bucket = c.create_bucket(bucket_name, headers=DEVPAY_HEADERS)
-# now try a get_bucket call and see if it's really there
-bucket = c.get_bucket(bucket_name, headers=DEVPAY_HEADERS)
-# test logging
-logging_bucket = c.create_bucket(bucket_name + '-log', headers=DEVPAY_HEADERS)
-logging_bucket.set_as_logging_target(headers=DEVPAY_HEADERS)
-bucket.enable_logging(target_bucket=logging_bucket, target_prefix=bucket.name, headers=DEVPAY_HEADERS)
-bucket.disable_logging(headers=DEVPAY_HEADERS)
-c.delete_bucket(logging_bucket, headers=DEVPAY_HEADERS)
-# create a new key and store it's content from a string
-k = bucket.new_key()
-k.name = 'foobar'
-s1 = 'This is a test of file upload and download'
-s2 = 'This is a second string to test file upload and download'
-k.set_contents_from_string(s1, headers=DEVPAY_HEADERS)
-fp = open('foobar', 'wb')
-# now get the contents from s3 to a local file
-k.get_contents_to_file(fp, headers=DEVPAY_HEADERS)
-fp.close()
-fp = open('foobar')
-# check to make sure content read from s3 is identical to original
-assert s1 == fp.read(), 'corrupted file'
-fp.close()
-# test generated URLs
-url = k.generate_url(3600, headers=DEVPAY_HEADERS)
-file = urllib.urlopen(url)
-assert s1 == file.read(), 'invalid URL %s' % url
-url = k.generate_url(3600, force_http=True, headers=DEVPAY_HEADERS)
-file = urllib.urlopen(url)
-assert s1 == file.read(), 'invalid URL %s' % url
-bucket.delete_key(k, headers=DEVPAY_HEADERS)
-# test a few variations on get_all_keys - first load some data
-# for the first one, let's override the content type
-phony_mimetype = 'application/x-boto-test'
-headers = {'Content-Type': phony_mimetype}
-headers.update(DEVPAY_HEADERS)
-k.name = 'foo/bar'
-k.set_contents_from_string(s1, headers)
-k.name = 'foo/bas'
-k.set_contents_from_filename('foobar', headers=DEVPAY_HEADERS)
-k.name = 'foo/bat'
-k.set_contents_from_string(s1, headers=DEVPAY_HEADERS)
-k.name = 'fie/bar'
-k.set_contents_from_string(s1, headers=DEVPAY_HEADERS)
-k.name = 'fie/bas'
-k.set_contents_from_string(s1, headers=DEVPAY_HEADERS)
-k.name = 'fie/bat'
-k.set_contents_from_string(s1, headers=DEVPAY_HEADERS)
-# try resetting the contents to another value
-md5 = k.md5
-k.set_contents_from_string(s2, headers=DEVPAY_HEADERS)
-assert k.md5 != md5
-os.unlink('foobar')
-all = bucket.get_all_keys(headers=DEVPAY_HEADERS)
-assert len(all) == 6
-rs = bucket.get_all_keys(prefix='foo', headers=DEVPAY_HEADERS)
-assert len(rs) == 3
-rs = bucket.get_all_keys(prefix='', delimiter='/', headers=DEVPAY_HEADERS)
-assert len(rs) == 2
-rs = bucket.get_all_keys(maxkeys=5, headers=DEVPAY_HEADERS)
-assert len(rs) == 5
-# test the lookup method
-k = bucket.lookup('foo/bar', headers=DEVPAY_HEADERS)
-assert isinstance(k, bucket.key_class)
-assert k.content_type == phony_mimetype
-k = bucket.lookup('notthere', headers=DEVPAY_HEADERS)
-assert k == None
-# try some metadata stuff
-k = bucket.new_key()
-k.name = 'has_metadata'
-mdkey1 = 'meta1'
-mdval1 = 'This is the first metadata value'
-k.set_metadata(mdkey1, mdval1)
-mdkey2 = 'meta2'
-mdval2 = 'This is the second metadata value'
-k.set_metadata(mdkey2, mdval2)
-k.set_contents_from_string(s1, headers=DEVPAY_HEADERS)
-k = bucket.lookup('has_metadata', headers=DEVPAY_HEADERS)
-assert k.get_metadata(mdkey1) == mdval1
-assert k.get_metadata(mdkey2) == mdval2
-k = bucket.new_key()
-k.name = 'has_metadata'
-k.get_contents_as_string(headers=DEVPAY_HEADERS)
-assert k.get_metadata(mdkey1) == mdval1
-assert k.get_metadata(mdkey2) == mdval2
-bucket.delete_key(k, headers=DEVPAY_HEADERS)
-# test list and iterator
-rs1 = bucket.list(headers=DEVPAY_HEADERS)
-num_iter = 0
-for r in rs1:
- num_iter = num_iter + 1
-rs = bucket.get_all_keys(headers=DEVPAY_HEADERS)
-num_keys = len(rs)
-assert num_iter == num_keys
-# try a key with a funny character
-k = bucket.new_key()
-k.name = 'testnewline\n'
-k.set_contents_from_string('This is a test', headers=DEVPAY_HEADERS)
-rs = bucket.get_all_keys(headers=DEVPAY_HEADERS)
-assert len(rs) == num_keys + 1
-bucket.delete_key(k, headers=DEVPAY_HEADERS)
-rs = bucket.get_all_keys(headers=DEVPAY_HEADERS)
-assert len(rs) == num_keys
-# try some acl stuff
-bucket.set_acl('public-read', headers=DEVPAY_HEADERS)
-policy = bucket.get_acl(headers=DEVPAY_HEADERS)
-assert len(policy.acl.grants) == 2
-bucket.set_acl('private', headers=DEVPAY_HEADERS)
-policy = bucket.get_acl(headers=DEVPAY_HEADERS)
-assert len(policy.acl.grants) == 1
-k = bucket.lookup('foo/bar', headers=DEVPAY_HEADERS)
-k.set_acl('public-read', headers=DEVPAY_HEADERS)
-policy = k.get_acl(headers=DEVPAY_HEADERS)
-assert len(policy.acl.grants) == 2
-k.set_acl('private', headers=DEVPAY_HEADERS)
-policy = k.get_acl(headers=DEVPAY_HEADERS)
-assert len(policy.acl.grants) == 1
-# try the convenience methods for grants
-# this doesn't work with devpay
-#bucket.add_user_grant('FULL_CONTROL',
-# 'c1e724fbfa0979a4448393c59a8c055011f739b6d102fb37a65f26414653cd67',
-# headers=DEVPAY_HEADERS)
-try:
- bucket.add_email_grant('foobar', 'foo@bar.com', headers=DEVPAY_HEADERS)
-except S3PermissionsError:
- pass
-# now delete all keys in bucket
-for k in all:
+def test():
+ print '--- running S3Connection tests (DevPay) ---'
+ c = S3Connection()
+ # create a new, empty bucket
+ bucket_name = 'test-%d' % int(time.time())
+ bucket = c.create_bucket(bucket_name, headers=DEVPAY_HEADERS)
+ # now try a get_bucket call and see if it's really there
+ bucket = c.get_bucket(bucket_name, headers=DEVPAY_HEADERS)
+ # test logging
+ logging_bucket = c.create_bucket(bucket_name + '-log', headers=DEVPAY_HEADERS)
+ logging_bucket.set_as_logging_target(headers=DEVPAY_HEADERS)
+ bucket.enable_logging(target_bucket=logging_bucket, target_prefix=bucket.name, headers=DEVPAY_HEADERS)
+ bucket.disable_logging(headers=DEVPAY_HEADERS)
+ c.delete_bucket(logging_bucket, headers=DEVPAY_HEADERS)
+ # create a new key and store it's content from a string
+ k = bucket.new_key()
+ k.name = 'foobar'
+ s1 = 'This is a test of file upload and download'
+ s2 = 'This is a second string to test file upload and download'
+ k.set_contents_from_string(s1, headers=DEVPAY_HEADERS)
+ fp = open('foobar', 'wb')
+ # now get the contents from s3 to a local file
+ k.get_contents_to_file(fp, headers=DEVPAY_HEADERS)
+ fp.close()
+ fp = open('foobar')
+ # check to make sure content read from s3 is identical to original
+ assert s1 == fp.read(), 'corrupted file'
+ fp.close()
+ # test generated URLs
+ url = k.generate_url(3600, headers=DEVPAY_HEADERS)
+ file = urllib.urlopen(url)
+ assert s1 == file.read(), 'invalid URL %s' % url
+ url = k.generate_url(3600, force_http=True, headers=DEVPAY_HEADERS)
+ file = urllib.urlopen(url)
+ assert s1 == file.read(), 'invalid URL %s' % url
bucket.delete_key(k, headers=DEVPAY_HEADERS)
-# now delete bucket
+ # test a few variations on get_all_keys - first load some data
+ # for the first one, let's override the content type
+ phony_mimetype = 'application/x-boto-test'
+ headers = {'Content-Type': phony_mimetype}
+ headers.update(DEVPAY_HEADERS)
+ k.name = 'foo/bar'
+ k.set_contents_from_string(s1, headers)
+ k.name = 'foo/bas'
+ k.set_contents_from_filename('foobar', headers=DEVPAY_HEADERS)
+ k.name = 'foo/bat'
+ k.set_contents_from_string(s1, headers=DEVPAY_HEADERS)
+ k.name = 'fie/bar'
+ k.set_contents_from_string(s1, headers=DEVPAY_HEADERS)
+ k.name = 'fie/bas'
+ k.set_contents_from_string(s1, headers=DEVPAY_HEADERS)
+ k.name = 'fie/bat'
+ k.set_contents_from_string(s1, headers=DEVPAY_HEADERS)
+ # try resetting the contents to another value
+ md5 = k.md5
+ k.set_contents_from_string(s2, headers=DEVPAY_HEADERS)
+ assert k.md5 != md5
+ os.unlink('foobar')
+ all = bucket.get_all_keys(headers=DEVPAY_HEADERS)
+ assert len(all) == 6
+ rs = bucket.get_all_keys(prefix='foo', headers=DEVPAY_HEADERS)
+ assert len(rs) == 3
+ rs = bucket.get_all_keys(prefix='', delimiter='/', headers=DEVPAY_HEADERS)
+ assert len(rs) == 2
+ rs = bucket.get_all_keys(maxkeys=5, headers=DEVPAY_HEADERS)
+ assert len(rs) == 5
+ # test the lookup method
+ k = bucket.lookup('foo/bar', headers=DEVPAY_HEADERS)
+ assert isinstance(k, bucket.key_class)
+ assert k.content_type == phony_mimetype
+ k = bucket.lookup('notthere', headers=DEVPAY_HEADERS)
+ assert k == None
+ # try some metadata stuff
+ k = bucket.new_key()
+ k.name = 'has_metadata'
+ mdkey1 = 'meta1'
+ mdval1 = 'This is the first metadata value'
+ k.set_metadata(mdkey1, mdval1)
+ mdkey2 = 'meta2'
+ mdval2 = 'This is the second metadata value'
+ k.set_metadata(mdkey2, mdval2)
+ k.set_contents_from_string(s1, headers=DEVPAY_HEADERS)
+ k = bucket.lookup('has_metadata', headers=DEVPAY_HEADERS)
+ assert k.get_metadata(mdkey1) == mdval1
+ assert k.get_metadata(mdkey2) == mdval2
+ k = bucket.new_key()
+ k.name = 'has_metadata'
+ k.get_contents_as_string(headers=DEVPAY_HEADERS)
+ assert k.get_metadata(mdkey1) == mdval1
+ assert k.get_metadata(mdkey2) == mdval2
+ bucket.delete_key(k, headers=DEVPAY_HEADERS)
+ # test list and iterator
+ rs1 = bucket.list(headers=DEVPAY_HEADERS)
+ num_iter = 0
+ for r in rs1:
+ num_iter = num_iter + 1
+ rs = bucket.get_all_keys(headers=DEVPAY_HEADERS)
+ num_keys = len(rs)
+ assert num_iter == num_keys
+ # try a key with a funny character
+ k = bucket.new_key()
+ k.name = 'testnewline\n'
+ k.set_contents_from_string('This is a test', headers=DEVPAY_HEADERS)
+ rs = bucket.get_all_keys(headers=DEVPAY_HEADERS)
+ assert len(rs) == num_keys + 1
+ bucket.delete_key(k, headers=DEVPAY_HEADERS)
+ rs = bucket.get_all_keys(headers=DEVPAY_HEADERS)
+ assert len(rs) == num_keys
+ # try some acl stuff
+ bucket.set_acl('public-read', headers=DEVPAY_HEADERS)
+ policy = bucket.get_acl(headers=DEVPAY_HEADERS)
+ assert len(policy.acl.grants) == 2
+ bucket.set_acl('private', headers=DEVPAY_HEADERS)
+ policy = bucket.get_acl(headers=DEVPAY_HEADERS)
+ assert len(policy.acl.grants) == 1
+ k = bucket.lookup('foo/bar', headers=DEVPAY_HEADERS)
+ k.set_acl('public-read', headers=DEVPAY_HEADERS)
+ policy = k.get_acl(headers=DEVPAY_HEADERS)
+ assert len(policy.acl.grants) == 2
+ k.set_acl('private', headers=DEVPAY_HEADERS)
+ policy = k.get_acl(headers=DEVPAY_HEADERS)
+ assert len(policy.acl.grants) == 1
+ # try the convenience methods for grants
+ # this doesn't work with devpay
+ #bucket.add_user_grant('FULL_CONTROL',
+ # 'c1e724fbfa0979a4448393c59a8c055011f739b6d102fb37a65f26414653cd67',
+ # headers=DEVPAY_HEADERS)
+ try:
+ bucket.add_email_grant('foobar', 'foo@bar.com', headers=DEVPAY_HEADERS)
+ except S3PermissionsError:
+ pass
+ # now delete all keys in bucket
+ for k in all:
+ bucket.delete_key(k, headers=DEVPAY_HEADERS)
+ # now delete bucket
-c.delete_bucket(bucket, headers=DEVPAY_HEADERS)
+ c.delete_bucket(bucket, headers=DEVPAY_HEADERS)
-print '--- tests completed ---'
+ print '--- tests completed ---'
+
+if __name__ == '__main__':
+ test()
diff --git a/tests/ec2/cloudwatch/test_connection.py b/tests/ec2/cloudwatch/test_connection.py
deleted file mode 100644
index c549c1d..0000000
--- a/tests/ec2/cloudwatch/test_connection.py
+++ /dev/null
@@ -1,133 +0,0 @@
-# Copyright (c) 2010 Hunter Blanks http://artifex.org/~hblanks/
-# All rights reserved.
-#
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the
-# "Software"), to deal in the Software without restriction, including
-# without limitation the rights to use, copy, modify, merge, publish, dis-
-# tribute, sublicense, and/or sell copies of the Software, and to permit
-# persons to whom the Software is furnished to do so, subject to the fol-
-# lowing conditions:
-#
-# The above copyright notice and this permission notice shall be included
-# in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
-# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-# IN THE SOFTWARE.
-
-"""
-Initial, and very limited, unit tests for CloudWatchConnection.
-"""
-
-import datetime
-import time
-import unittest
-
-from boto.ec2.cloudwatch import CloudWatchConnection
-from boto.ec2.cloudwatch.metric import Metric
-
-class CloudWatchConnectionTest(unittest.TestCase):
-
- def test_build_list_params(self):
- c = CloudWatchConnection()
- params = {}
- c.build_list_params(
- params, ['thing1', 'thing2', 'thing3'], 'ThingName%d')
- expected_params = {
- 'ThingName1': 'thing1',
- 'ThingName2': 'thing2',
- 'ThingName3': 'thing3'
- }
- self.assertEqual(params, expected_params)
-
- def test_build_put_params_one(self):
- c = CloudWatchConnection()
- params = {}
- c.build_put_params(params, name="N", value=1, dimensions={"D": "V"})
- expected_params = {
- 'MetricData.member.1.MetricName': 'N',
- 'MetricData.member.1.Value': 1,
- 'MetricData.member.1.Dimensions.member.1.Name': 'D',
- 'MetricData.member.1.Dimensions.member.1.Value': 'V',
- }
- self.assertEqual(params, expected_params)
-
- def test_build_put_params_multiple_metrics(self):
- c = CloudWatchConnection()
- params = {}
- c.build_put_params(params, name=["N", "M"], value=[1, 2], dimensions={"D": "V"})
- expected_params = {
- 'MetricData.member.1.MetricName': 'N',
- 'MetricData.member.1.Value': 1,
- 'MetricData.member.1.Dimensions.member.1.Name': 'D',
- 'MetricData.member.1.Dimensions.member.1.Value': 'V',
- 'MetricData.member.2.MetricName': 'M',
- 'MetricData.member.2.Value': 2,
- 'MetricData.member.2.Dimensions.member.1.Name': 'D',
- 'MetricData.member.2.Dimensions.member.1.Value': 'V',
- }
- self.assertEqual(params, expected_params)
-
- def test_build_put_params_multiple_dimensions(self):
- c = CloudWatchConnection()
- params = {}
- c.build_put_params(params, name="N", value=[1, 2], dimensions=[{"D": "V"}, {"D": "W"}])
- expected_params = {
- 'MetricData.member.1.MetricName': 'N',
- 'MetricData.member.1.Value': 1,
- 'MetricData.member.1.Dimensions.member.1.Name': 'D',
- 'MetricData.member.1.Dimensions.member.1.Value': 'V',
- 'MetricData.member.2.MetricName': 'N',
- 'MetricData.member.2.Value': 2,
- 'MetricData.member.2.Dimensions.member.1.Name': 'D',
- 'MetricData.member.2.Dimensions.member.1.Value': 'W',
- }
- self.assertEqual(params, expected_params)
-
- def test_build_put_params_invalid(self):
- c = CloudWatchConnection()
- params = {}
- try:
- c.build_put_params(params, name=["N", "M"], value=[1, 2, 3])
- except:
- pass
- else:
- self.fail("Should not accept lists of different lengths.")
-
- def test_get_metric_statistics(self):
- c = CloudWatchConnection()
- m = c.list_metrics()[0]
- end = datetime.datetime.now()
- start = end - datetime.timedelta(hours=24*14)
- c.get_metric_statistics(
- 3600*24, start, end, m.name, m.namespace, ['Average', 'Sum'])
-
- def test_put_metric_data(self):
- c = CloudWatchConnection()
- now = datetime.datetime.now()
- name, namespace = 'unit-test-metric', 'boto-unit-test'
- c.put_metric_data(namespace, name, 5, now, 'Bytes')
-
- # Uncomment the following lines for a slower but more thorough
- # test. (Hurrah for eventual consistency...)
- #
- # metric = Metric(connection=c)
- # metric.name = name
- # metric.namespace = namespace
- # time.sleep(60)
- # l = metric.query(
- # now - datetime.timedelta(seconds=60),
- # datetime.datetime.now(),
- # 'Average')
- # assert l
- # for row in l:
- # self.assertEqual(row['Unit'], 'Bytes')
- # self.assertEqual(row['Average'], 5.0)
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/tests/cloudfront/__init__.py b/tests/fps/__init__.py
similarity index 100%
copy from tests/cloudfront/__init__.py
copy to tests/fps/__init__.py
diff --git a/tests/fps/test.py b/tests/fps/test.py
new file mode 100755
index 0000000..d5efb4b
--- /dev/null
+++ b/tests/fps/test.py
@@ -0,0 +1,100 @@
+#!/usr/bin/env python
+
+from tests.unit import unittest
+import sys
+import os
+import os.path
+
+simple = True
+advanced = False
+if __name__ == "__main__":
+ devpath = os.path.relpath(os.path.join('..', '..'),
+ start=os.path.dirname(__file__))
+ sys.path = [devpath] + sys.path
+ print '>>> advanced FPS tests; using local boto sources'
+ advanced = True
+
+from boto.fps.connection import FPSConnection
+from boto.fps.response import ComplexAmount
+
+
+class FPSTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.fps = FPSConnection(host='fps.sandbox.amazonaws.com')
+ if advanced:
+ self.activity = self.fps.get_account_activity(\
+ StartDate='2012-01-01')
+ result = self.activity.GetAccountActivityResult
+ self.transactions = result.Transaction
+
+ @unittest.skipUnless(simple, "skipping simple test")
+ def test_get_account_balance(self):
+ response = self.fps.get_account_balance()
+ self.assertTrue(hasattr(response, 'GetAccountBalanceResult'))
+ self.assertTrue(hasattr(response.GetAccountBalanceResult,
+ 'AccountBalance'))
+ accountbalance = response.GetAccountBalanceResult.AccountBalance
+ self.assertTrue(hasattr(accountbalance, 'TotalBalance'))
+ self.assertIsInstance(accountbalance.TotalBalance, ComplexAmount)
+ self.assertTrue(hasattr(accountbalance, 'AvailableBalances'))
+ availablebalances = accountbalance.AvailableBalances
+ self.assertTrue(hasattr(availablebalances, 'RefundBalance'))
+
+ @unittest.skipUnless(simple, "skipping simple test")
+ def test_complex_amount(self):
+ response = self.fps.get_account_balance()
+ accountbalance = response.GetAccountBalanceResult.AccountBalance
+ asfloat = float(accountbalance.TotalBalance.Value)
+ self.assertIn('.', str(asfloat))
+
+ @unittest.skipUnless(simple, "skipping simple test")
+ def test_required_arguments(self):
+ with self.assertRaises(KeyError):
+ self.fps.write_off_debt(AdjustmentAmount=123.45)
+
+ @unittest.skipUnless(simple, "skipping simple test")
+ def test_cbui_url(self):
+ inputs = {
+ 'transactionAmount': 123.45,
+ 'pipelineName': 'SingleUse',
+ 'returnURL': 'https://localhost/',
+ 'paymentReason': 'a reason for payment',
+ 'callerReference': 'foo',
+ }
+ result = self.fps.cbui_url(**inputs)
+ print "cbui_url() yields {0}".format(result)
+
+ @unittest.skipUnless(simple, "skipping simple test")
+ def test_get_account_activity(self):
+ response = self.fps.get_account_activity(StartDate='2012-01-01')
+ self.assertTrue(hasattr(response, 'GetAccountActivityResult'))
+ result = response.GetAccountActivityResult
+ self.assertTrue(hasattr(result, 'BatchSize'))
+ try:
+ int(result.BatchSize)
+ except:
+ self.assertTrue(False)
+
+ @unittest.skipUnless(advanced, "skipping advanced test")
+ def test_get_transaction(self):
+ assert len(self.transactions)
+ transactionid = self.transactions[0].TransactionId
+ result = self.fps.get_transaction(TransactionId=transactionid)
+ self.assertTrue(hasattr(result.GetTransactionResult, 'Transaction'))
+
+ @unittest.skip('cosmetic')
+ def test_bad_request(self):
+ try:
+ self.fps.write_off_debt(CreditInstrumentId='foo',
+ AdjustmentAmount=123.45)
+ except Exception, e:
+ print e
+
+ @unittest.skip('cosmetic')
+ def test_repr(self):
+ print self.fps.get_account_balance()
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/fps/test_verify_signature.py b/tests/fps/test_verify_signature.py
new file mode 100644
index 0000000..efc037f
--- /dev/null
+++ b/tests/fps/test_verify_signature.py
@@ -0,0 +1,12 @@
+from boto.fps.connection import FPSConnection
+
+def test():
+ conn = FPSConnection()
+ # example response from the docs
+ params = 'expiry=08%2F2015&signature=ynDukZ9%2FG77uSJVb5YM0cadwHVwYKPMKOO3PNvgADbv6VtymgBxeOWEhED6KGHsGSvSJnMWDN%2FZl639AkRe9Ry%2F7zmn9CmiM%2FZkp1XtshERGTqi2YL10GwQpaH17MQqOX3u1cW4LlyFoLy4celUFBPq1WM2ZJnaNZRJIEY%2FvpeVnCVK8VIPdY3HMxPAkNi5zeF2BbqH%2BL2vAWef6vfHkNcJPlOuOl6jP4E%2B58F24ni%2B9ek%2FQH18O4kw%2FUJ7ZfKwjCCI13%2BcFybpofcKqddq8CuUJj5Ii7Pdw1fje7ktzHeeNhF0r9siWcYmd4JaxTP3NmLJdHFRq2T%2FgsF3vK9m3gw%3D%3D&signatureVersion=2&signatureMethod=RSA-SHA1&certificateUrl=https%3A%2F%2Ffps.sandbox.amazonaws.com%2Fcerts%2F090909%2FPKICert.pem&tokenID=A5BB3HUNAZFJ5CRXIPH72LIODZUNAUZIVP7UB74QNFQDSQ9MN4HPIKISQZWPLJXF&status=SC&callerReference=callerReferenceMultiUse1'
+ endpoint = 'http://vamsik.desktop.amazon.com:8080/ipn.jsp'
+ conn.verify_signature(endpoint, params)
+
+
+if __name__ == '__main__':
+ test()
diff --git a/tests/cloudfront/__init__.py b/tests/integration/__init__.py
similarity index 100%
copy from tests/cloudfront/__init__.py
copy to tests/integration/__init__.py
diff --git a/tests/integration/beanstalk/test_wrapper.py b/tests/integration/beanstalk/test_wrapper.py
new file mode 100644
index 0000000..e3183e5
--- /dev/null
+++ b/tests/integration/beanstalk/test_wrapper.py
@@ -0,0 +1,209 @@
+import unittest
+import random
+import time
+from functools import partial
+
+from boto.beanstalk.wrapper import Layer1Wrapper
+import boto.beanstalk.response as response
+
+
+class BasicSuite(unittest.TestCase):
+ def setUp(self):
+ self.random_id = str(random.randint(1, 1000000))
+ self.app_name = 'app-' + self.random_id
+ self.app_version = 'version-' + self.random_id
+ self.template = 'template-' + self.random_id
+ self.environment = 'environment-' + self.random_id
+ self.beanstalk = Layer1Wrapper()
+
+
+class MiscSuite(BasicSuite):
+ def test_check_dns_availability(self):
+ result = self.beanstalk.check_dns_availability('amazon')
+ self.assertIsInstance(result, response.CheckDNSAvailabilityResponse,
+ 'incorrect response object returned')
+ self.assertFalse(result.available)
+
+
+class TestApplicationObjects(BasicSuite):
+ def create_application(self):
+ # This method is used for any API calls that require an application
+ # object. This also adds a cleanup step to automatically delete the
+ # app when the test is finished. No assertions are performed
+ # here. If you want to validate create_application, don't use this
+ # method.
+ self.beanstalk.create_application(application_name=self.app_name)
+ self.addCleanup(partial(self.beanstalk.delete_application,
+ application_name=self.app_name))
+
+ def test_create_delete_application_version(self):
+ # This will create an app, create an app version, delete the app
+ # version, and delete the app. For each API call we check that the
+ # return type is what we expect and that a few attributes have the
+ # correct values.
+ app_result = self.beanstalk.create_application(application_name=self.app_name)
+ self.assertIsInstance(app_result, response.CreateApplicationResponse)
+ self.assertEqual(app_result.application.application_name, self.app_name)
+
+ version_result = self.beanstalk.create_application_version(
+ application_name=self.app_name, version_label=self.app_version)
+ self.assertIsInstance(version_result, response.CreateApplicationVersionResponse)
+ self.assertEqual(version_result.application_version.version_label,
+ self.app_version)
+ result = self.beanstalk.delete_application_version(
+ application_name=self.app_name, version_label=self.app_version)
+ self.assertIsInstance(result, response.DeleteApplicationVersionResponse)
+ result = self.beanstalk.delete_application(
+ application_name=self.app_name
+ )
+ self.assertIsInstance(result, response.DeleteApplicationResponse)
+
+ def test_create_configuration_template(self):
+ self.create_application()
+ result = self.beanstalk.create_configuration_template(
+ application_name=self.app_name, template_name=self.template,
+ solution_stack_name='32bit Amazon Linux running Tomcat 6')
+ self.assertIsInstance(
+ result, response.CreateConfigurationTemplateResponse)
+ self.assertEqual(result.solution_stack_name,
+ '32bit Amazon Linux running Tomcat 6')
+
+ def test_create_storage_location(self):
+ result = self.beanstalk.create_storage_location()
+ self.assertIsInstance(result, response.CreateStorageLocationResponse)
+
+ def test_update_application(self):
+ self.create_application()
+ result = self.beanstalk.update_application(application_name=self.app_name)
+ self.assertIsInstance(result, response.UpdateApplicationResponse)
+
+ def test_update_application_version(self):
+ self.create_application()
+ self.beanstalk.create_application_version(
+ application_name=self.app_name, version_label=self.app_version)
+ result = self.beanstalk.update_application_version(
+ application_name=self.app_name, version_label=self.app_version)
+ self.assertIsInstance(
+ result, response.UpdateApplicationVersionResponse)
+
+
+class GetSuite(BasicSuite):
+ def test_describe_applications(self):
+ result = self.beanstalk.describe_applications()
+ self.assertIsInstance(result, response.DescribeApplicationsResponse)
+
+ def test_describe_application_versions(self):
+ result = self.beanstalk.describe_application_versions()
+ self.assertIsInstance(result,
+ response.DescribeApplicationVersionsResponse)
+
+
+ def test_describe_configuration_options(self):
+ result = self.beanstalk.describe_configuration_options()
+ self.assertIsInstance(result,
+ response.DescribeConfigurationOptionsResponse)
+
+ def test_12_describe_environments(self):
+ result = self.beanstalk.describe_environments()
+ self.assertIsInstance(
+ result, response.DescribeEnvironmentsResponse)
+
+ def test_14_describe_events(self):
+ result = self.beanstalk.describe_events()
+ self.assertIsInstance(result, response.DescribeEventsResponse)
+
+ def test_15_list_available_solution_stacks(self):
+ result = self.beanstalk.list_available_solution_stacks()
+ self.assertIsInstance(
+ result, response.ListAvailableSolutionStacksResponse)
+ self.assertIn('32bit Amazon Linux running Tomcat 6',
+ result.solution_stacks)
+
+
+
+class TestsWithEnvironment(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.random_id = str(random.randint(1, 1000000))
+ cls.app_name = 'app-' + cls.random_id
+ cls.environment = 'environment-' + cls.random_id
+ cls.template = 'template-' + cls.random_id
+
+ cls.beanstalk = Layer1Wrapper()
+ cls.beanstalk.create_application(application_name=cls.app_name)
+ cls.beanstalk.create_configuration_template(
+ application_name=cls.app_name, template_name=cls.template,
+ solution_stack_name='32bit Amazon Linux running Tomcat 6')
+ cls.app_version = 'version-' + cls.random_id
+ cls.beanstalk.create_application_version(
+ application_name=cls.app_name, version_label=cls.app_version)
+ cls.beanstalk.create_environment(cls.app_name, cls.environment,
+ template_name=cls.template)
+ cls.wait_for_env(cls.environment)
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.beanstalk.delete_application(application_name=cls.app_name,
+ terminate_env_by_force=True)
+ cls.wait_for_env(cls.environment, 'Terminated')
+
+ @classmethod
+ def wait_for_env(cls, env_name, status='Ready'):
+ while not cls.env_ready(env_name, status):
+ time.sleep(15)
+
+ @classmethod
+ def env_ready(cls, env_name, desired_status):
+ result = cls.beanstalk.describe_environments(
+ application_name=cls.app_name, environment_names=env_name)
+ status = result.environments[0].status
+ return status == desired_status
+
+ def test_describe_environment_resources(self):
+ result = self.beanstalk.describe_environment_resources(
+ environment_name=self.environment)
+ self.assertIsInstance(
+ result, response.DescribeEnvironmentResourcesResponse)
+
+ def test_describe_configuration_settings(self):
+ result = self.beanstalk.describe_configuration_settings(
+ application_name=self.app_name, environment_name=self.environment)
+ self.assertIsInstance(
+ result, response.DescribeConfigurationSettingsResponse)
+
+ def test_request_environment_info(self):
+ result = self.beanstalk.request_environment_info(
+ environment_name=self.environment, info_type='tail')
+ self.assertIsInstance(result, response.RequestEnvironmentInfoResponse)
+ self.wait_for_env(self.environment)
+ result = self.beanstalk.retrieve_environment_info(
+ environment_name=self.environment, info_type='tail')
+ self.assertIsInstance(result, response.RetrieveEnvironmentInfoResponse)
+
+ def test_rebuild_environment(self):
+ result = self.beanstalk.rebuild_environment(
+ environment_name=self.environment)
+ self.assertIsInstance(result, response.RebuildEnvironmentResponse)
+ self.wait_for_env(self.environment)
+
+ def test_restart_app_server(self):
+ result = self.beanstalk.restart_app_server(
+ environment_name=self.environment)
+ self.assertIsInstance(result, response.RestartAppServerResponse)
+ self.wait_for_env(self.environment)
+
+ def test_update_configuration_template(self):
+ result = self.beanstalk.update_configuration_template(
+ application_name=self.app_name, template_name=self.template)
+ self.assertIsInstance(
+ result, response.UpdateConfigurationTemplateResponse)
+
+ def test_update_environment(self):
+ result = self.beanstalk.update_environment(
+ environment_name=self.environment)
+ self.assertIsInstance(result, response.UpdateEnvironmentResponse)
+ self.wait_for_env(self.environment)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests/integration/cloudformation/__init__.py b/tests/integration/cloudformation/__init__.py
new file mode 100644
index 0000000..b7fe4c2
--- /dev/null
+++ b/tests/integration/cloudformation/__init__.py
@@ -0,0 +1,21 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
diff --git a/tests/integration/cloudformation/test_cert_verification.py b/tests/integration/cloudformation/test_cert_verification.py
new file mode 100644
index 0000000..8a576e9
--- /dev/null
+++ b/tests/integration/cloudformation/test_cert_verification.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Check that all of the certs on all service endpoints validate.
+"""
+
+import unittest
+import boto.cloudformation
+
+
+class CertVerificationTest(unittest.TestCase):
+
+ cloudformation = True
+ ssl = True
+
+ def test_certs(self):
+ for region in boto.cloudformation.regions():
+ c = region.connect()
+ c.describe_stacks()
diff --git a/tests/integration/cloudsearch/__init__.py b/tests/integration/cloudsearch/__init__.py
new file mode 100644
index 0000000..b7fe4c2
--- /dev/null
+++ b/tests/integration/cloudsearch/__init__.py
@@ -0,0 +1,21 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
diff --git a/tests/integration/cloudsearch/test_cert_verification.py b/tests/integration/cloudsearch/test_cert_verification.py
new file mode 100644
index 0000000..9947702
--- /dev/null
+++ b/tests/integration/cloudsearch/test_cert_verification.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Check that all of the certs on all service endpoints validate.
+"""
+
+import unittest
+import boto.cloudsearch
+
+
+class CertVerificationTest(unittest.TestCase):
+
+ rds = True
+ ssl = True
+
+ def test_certs(self):
+ for region in boto.cloudsearch.regions():
+ c = region.connect()
+ c.describe_domains()
diff --git a/tests/integration/dynamodb/__init__.py b/tests/integration/dynamodb/__init__.py
new file mode 100644
index 0000000..354aa06
--- /dev/null
+++ b/tests/integration/dynamodb/__init__.py
@@ -0,0 +1,20 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
diff --git a/tests/integration/dynamodb/test_cert_verification.py b/tests/integration/dynamodb/test_cert_verification.py
new file mode 100644
index 0000000..d0e4ef4
--- /dev/null
+++ b/tests/integration/dynamodb/test_cert_verification.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Check that all of the certs on all service endpoints validate.
+"""
+
+import unittest
+import boto.dynamodb
+
+
+class CertVerificationTest(unittest.TestCase):
+
+ dynamodb = True
+ ssl = True
+
+ def test_certs(self):
+ for region in boto.dynamodb.regions():
+ c = region.connect()
+ c.layer1.list_tables()
diff --git a/tests/integration/dynamodb/test_layer1.py b/tests/integration/dynamodb/test_layer1.py
new file mode 100644
index 0000000..74f84ad
--- /dev/null
+++ b/tests/integration/dynamodb/test_layer1.py
@@ -0,0 +1,266 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Tests for Layer1 of DynamoDB
+"""
+import time
+import base64
+
+from tests.unit import unittest
+from boto.dynamodb.exceptions import DynamoDBKeyNotFoundError
+from boto.dynamodb.exceptions import DynamoDBConditionalCheckFailedError
+from boto.dynamodb.exceptions import DynamoDBValidationError
+from boto.dynamodb.layer1 import Layer1
+
+
+class DynamoDBLayer1Test(unittest.TestCase):
+ dynamodb = True
+
+ def setUp(self):
+ self.dynamodb = Layer1()
+ self.table_name = 'test-%d' % int(time.time())
+ self.hash_key_name = 'forum_name'
+ self.hash_key_type = 'S'
+ self.range_key_name = 'subject'
+ self.range_key_type = 'S'
+ self.read_units = 5
+ self.write_units = 5
+ self.schema = {'HashKeyElement': {'AttributeName': self.hash_key_name,
+ 'AttributeType': self.hash_key_type},
+ 'RangeKeyElement': {'AttributeName': self.range_key_name,
+ 'AttributeType': self.range_key_type}}
+ self.provisioned_throughput = {'ReadCapacityUnits': self.read_units,
+ 'WriteCapacityUnits': self.write_units}
+
+ def tearDown(self):
+ pass
+
+ def create_table(self, table_name, schema, provisioned_throughput):
+ result = self.dynamodb.create_table(table_name, schema, provisioned_throughput)
+ self.addCleanup(self.dynamodb.delete_table, table_name)
+ return result
+
+ def test_layer1_basic(self):
+ print '--- running DynamoDB Layer1 tests ---'
+
+ c = self.dynamodb
+
+ # First create a table
+ table_name = self.table_name
+ hash_key_name = self.hash_key_name
+ hash_key_type = self.hash_key_type
+ range_key_name = self.range_key_name
+ range_key_type = self.range_key_type
+ read_units = self.read_units
+ write_units = self.write_units
+ schema = self.schema
+ provisioned_throughput = self.provisioned_throughput
+
+ result = self.create_table(table_name, schema, provisioned_throughput)
+ assert result['TableDescription']['TableName'] == table_name
+ result_schema = result['TableDescription']['KeySchema']
+ assert result_schema['HashKeyElement']['AttributeName'] == hash_key_name
+ assert result_schema['HashKeyElement']['AttributeType'] == hash_key_type
+ assert result_schema['RangeKeyElement']['AttributeName'] == range_key_name
+ assert result_schema['RangeKeyElement']['AttributeType'] == range_key_type
+ result_thruput = result['TableDescription']['ProvisionedThroughput']
+ assert result_thruput['ReadCapacityUnits'] == read_units
+ assert result_thruput['WriteCapacityUnits'] == write_units
+
+ # Wait for table to become active
+ result = c.describe_table(table_name)
+ while result['Table']['TableStatus'] != 'ACTIVE':
+ time.sleep(5)
+ result = c.describe_table(table_name)
+
+ # List tables and make sure new one is there
+ result = c.list_tables()
+ assert table_name in result['TableNames']
+
+ # Update the tables ProvisionedThroughput
+ new_read_units = 10
+ new_write_units = 5
+ new_provisioned_throughput = {'ReadCapacityUnits': new_read_units,
+ 'WriteCapacityUnits': new_write_units}
+ result = c.update_table(table_name, new_provisioned_throughput)
+
+ # Wait for table to be updated
+ result = c.describe_table(table_name)
+ while result['Table']['TableStatus'] == 'UPDATING':
+ time.sleep(5)
+ result = c.describe_table(table_name)
+
+ result_thruput = result['Table']['ProvisionedThroughput']
+ assert result_thruput['ReadCapacityUnits'] == new_read_units
+ assert result_thruput['WriteCapacityUnits'] == new_write_units
+
+ # Put an item
+ item1_key = 'Amazon DynamoDB'
+ item1_range = 'DynamoDB Thread 1'
+ item1_data = {
+ hash_key_name: {hash_key_type: item1_key},
+ range_key_name: {range_key_type: item1_range},
+ 'Message': {'S': 'DynamoDB thread 1 message text'},
+ 'LastPostedBy': {'S': 'User A'},
+ 'Views': {'N': '0'},
+ 'Replies': {'N': '0'},
+ 'Answered': {'N': '0'},
+ 'Tags': {'SS': ["index", "primarykey", "table"]},
+ 'LastPostDateTime': {'S': '12/9/2011 11:36:03 PM'}
+ }
+ result = c.put_item(table_name, item1_data)
+
+ # Now do a consistent read and check results
+ key1 = {'HashKeyElement': {hash_key_type: item1_key},
+ 'RangeKeyElement': {range_key_type: item1_range}}
+ result = c.get_item(table_name, key=key1, consistent_read=True)
+ for name in item1_data:
+ assert name in result['Item']
+
+ # Try to get an item that does not exist.
+ invalid_key = {'HashKeyElement': {hash_key_type: 'bogus_key'},
+ 'RangeKeyElement': {range_key_type: item1_range}}
+ self.assertRaises(DynamoDBKeyNotFoundError,
+ c.get_item, table_name, key=invalid_key)
+
+ # Try retrieving only select attributes
+ attributes = ['Message', 'Views']
+ result = c.get_item(table_name, key=key1, consistent_read=True,
+ attributes_to_get=attributes)
+ for name in result['Item']:
+ assert name in attributes
+
+ # Try to delete the item with the wrong Expected value
+ expected = {'Views': {'Value': {'N': '1'}}}
+ self.assertRaises(DynamoDBConditionalCheckFailedError,
+ c.delete_item, table_name, key=key1,
+ expected=expected)
+
+ # Now update the existing object
+ attribute_updates = {'Views': {'Value': {'N': '5'},
+ 'Action': 'PUT'},
+ 'Tags': {'Value': {'SS': ['foobar']},
+ 'Action': 'ADD'}}
+ result = c.update_item(table_name, key=key1,
+ attribute_updates=attribute_updates)
+
+ # Try and update an item, in a fashion which makes it too large.
+ # The new message text is the item size limit minus 32 bytes and
+ # the current object is larger than 32 bytes.
+ item_size_overflow_text = 'Text to be padded'.zfill(64*1024-32)
+ attribute_updates = {'Message': {'Value': {'S': item_size_overflow_text},
+ 'Action': 'PUT'}}
+ self.assertRaises(DynamoDBValidationError,
+ c.update_item, table_name, key=key1,
+ attribute_updates=attribute_updates)
+
+
+ # Put a few more items into the table
+ item2_key = 'Amazon DynamoDB'
+ item2_range = 'DynamoDB Thread 2'
+ item2_data = {
+ hash_key_name: {hash_key_type: item2_key},
+ range_key_name: {range_key_type: item2_range},
+ 'Message': {'S': 'DynamoDB thread 2 message text'},
+ 'LastPostedBy': {'S': 'User A'},
+ 'Views': {'N': '0'},
+ 'Replies': {'N': '0'},
+ 'Answered': {'N': '0'},
+ 'Tags': {'SS': ["index", "primarykey", "table"]},
+ 'LastPostDateTime': {'S': '12/9/2011 11:36:03 PM'}
+ }
+ result = c.put_item(table_name, item2_data)
+ key2 = {'HashKeyElement': {hash_key_type: item2_key},
+ 'RangeKeyElement': {range_key_type: item2_range}}
+
+ item3_key = 'Amazon S3'
+ item3_range = 'S3 Thread 1'
+ item3_data = {
+ hash_key_name: {hash_key_type: item3_key},
+ range_key_name: {range_key_type: item3_range},
+ 'Message': {'S': 'S3 Thread 1 message text'},
+ 'LastPostedBy': {'S': 'User A'},
+ 'Views': {'N': '0'},
+ 'Replies': {'N': '0'},
+ 'Answered': {'N': '0'},
+ 'Tags': {'SS': ['largeobject', 'multipart upload']},
+ 'LastPostDateTime': {'S': '12/9/2011 11:36:03 PM'}
+ }
+ result = c.put_item(table_name, item3_data)
+ key3 = {'HashKeyElement': {hash_key_type: item3_key},
+ 'RangeKeyElement': {range_key_type: item3_range}}
+
+ # Try a few queries
+ result = c.query(table_name, {'S': 'Amazon DynamoDB'},
+ {'AttributeValueList': [{'S': 'DynamoDB'}],
+ 'ComparisonOperator': 'BEGINS_WITH'})
+ assert 'Count' in result
+ assert result['Count'] == 2
+
+ # Try a few scans
+ result = c.scan(table_name,
+ {'Tags': {'AttributeValueList':[{'S': 'table'}],
+ 'ComparisonOperator': 'CONTAINS'}})
+ assert 'Count' in result
+ assert result['Count'] == 2
+
+ # Now delete the items
+ result = c.delete_item(table_name, key=key1)
+ result = c.delete_item(table_name, key=key2)
+ result = c.delete_item(table_name, key=key3)
+
+ print '--- tests completed ---'
+
+ def test_binary_attributes(self):
+ c = self.dynamodb
+ result = self.create_table(self.table_name, self.schema,
+ self.provisioned_throughput)
+ # Wait for table to become active
+ result = c.describe_table(self.table_name)
+ while result['Table']['TableStatus'] != 'ACTIVE':
+ time.sleep(5)
+ result = c.describe_table(self.table_name)
+
+ # Put an item
+ item1_key = 'Amazon DynamoDB'
+ item1_range = 'DynamoDB Thread 1'
+ item1_data = {
+ self.hash_key_name: {self.hash_key_type: item1_key},
+ self.range_key_name: {self.range_key_type: item1_range},
+ 'Message': {'S': 'DynamoDB thread 1 message text'},
+ 'LastPostedBy': {'S': 'User A'},
+ 'Views': {'N': '0'},
+ 'Replies': {'N': '0'},
+ 'BinaryData': {'B': base64.b64encode(bytes('\x01\x02\x03\x04'))},
+ 'Answered': {'N': '0'},
+ 'Tags': {'SS': ["index", "primarykey", "table"]},
+ 'LastPostDateTime': {'S': '12/9/2011 11:36:03 PM'}
+ }
+ result = c.put_item(self.table_name, item1_data)
+
+ # Now do a consistent read and check results
+ key1 = {'HashKeyElement': {self.hash_key_type: item1_key},
+ 'RangeKeyElement': {self.range_key_type: item1_range}}
+ result = c.get_item(self.table_name, key=key1, consistent_read=True)
+ self.assertEqual(result['Item']['BinaryData'],
+ {'B': base64.b64encode(bytes('\x01\x02\x03\x04'))})
diff --git a/tests/integration/dynamodb/test_layer2.py b/tests/integration/dynamodb/test_layer2.py
new file mode 100644
index 0000000..a87ade2
--- /dev/null
+++ b/tests/integration/dynamodb/test_layer2.py
@@ -0,0 +1,430 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Tests for Layer2 of Amazon DynamoDB
+"""
+
+import unittest
+import time
+import uuid
+from boto.dynamodb.exceptions import DynamoDBKeyNotFoundError
+from boto.dynamodb.exceptions import DynamoDBConditionalCheckFailedError
+from boto.dynamodb.layer2 import Layer2
+from boto.dynamodb.types import get_dynamodb_type, Binary
+from boto.dynamodb.condition import BEGINS_WITH, CONTAINS, GT
+
+
+class DynamoDBLayer2Test (unittest.TestCase):
+ dynamodb = True
+
+ def setUp(self):
+ self.dynamodb = Layer2()
+ self.hash_key_name = 'forum_name'
+ self.hash_key_proto_value = ''
+ self.range_key_name = 'subject'
+ self.range_key_proto_value = ''
+
+ def create_table(self, table_name, schema, read_units, write_units):
+ result = self.dynamodb.create_table(table_name, schema, read_units, write_units)
+ self.addCleanup(self.dynamodb.delete_table, result)
+ return result
+
+ def test_layer2_basic(self):
+ print '--- running Amazon DynamoDB Layer2 tests ---'
+ c = self.dynamodb
+
+ # First create a schema for the table
+ schema = c.create_schema(self.hash_key_name, self.hash_key_proto_value,
+ self.range_key_name,
+ self.range_key_proto_value)
+
+ # Create another schema without a range key
+ schema2 = c.create_schema('post_id', '')
+
+ # Now create a table
+ index = int(time.time())
+ table_name = 'test-%d' % index
+ read_units = 5
+ write_units = 5
+ table = self.create_table(table_name, schema, read_units, write_units)
+ assert table.name == table_name
+ assert table.schema.hash_key_name == self.hash_key_name
+ assert table.schema.hash_key_type == get_dynamodb_type(self.hash_key_proto_value)
+ assert table.schema.range_key_name == self.range_key_name
+ assert table.schema.range_key_type == get_dynamodb_type(self.range_key_proto_value)
+ assert table.read_units == read_units
+ assert table.write_units == write_units
+ assert table.item_count == 0
+ assert table.size_bytes == 0
+
+ # Create the second table
+ table2_name = 'test-%d' % (index + 1)
+ table2 = self.create_table(table2_name, schema2, read_units, write_units)
+
+ # Wait for table to become active
+ table.refresh(wait_for_active=True)
+ table2.refresh(wait_for_active=True)
+
+ # List tables and make sure new one is there
+ table_names = c.list_tables()
+ assert table_name in table_names
+ assert table2_name in table_names
+
+ # Update the tables ProvisionedThroughput
+ new_read_units = 10
+ new_write_units = 5
+ table.update_throughput(new_read_units, new_write_units)
+
+ # Wait for table to be updated
+ table.refresh(wait_for_active=True)
+ assert table.read_units == new_read_units
+ assert table.write_units == new_write_units
+
+ # Put an item
+ item1_key = 'Amazon DynamoDB'
+ item1_range = 'DynamoDB Thread 1'
+ item1_attrs = {
+ 'Message': 'DynamoDB thread 1 message text',
+ 'LastPostedBy': 'User A',
+ 'Views': 0,
+ 'Replies': 0,
+ 'Answered': 0,
+ 'Public': True,
+ 'Tags': set(['index', 'primarykey', 'table']),
+ 'LastPostDateTime': '12/9/2011 11:36:03 PM'}
+
+ # Test a few corner cases with new_item
+
+ # Try supplying a hash_key as an arg and as an item in attrs
+ item1_attrs[self.hash_key_name] = 'foo'
+ foobar_item = table.new_item(item1_key, item1_range, item1_attrs)
+ assert foobar_item.hash_key == item1_key
+
+ # Try supplying a range_key as an arg and as an item in attrs
+ item1_attrs[self.range_key_name] = 'bar'
+ foobar_item = table.new_item(item1_key, item1_range, item1_attrs)
+ assert foobar_item.range_key == item1_range
+
+ # Try supplying hash and range key in attrs dict
+ foobar_item = table.new_item(attrs=item1_attrs)
+ assert foobar_item.hash_key == 'foo'
+ assert foobar_item.range_key == 'bar'
+
+ del item1_attrs[self.hash_key_name]
+ del item1_attrs[self.range_key_name]
+
+ item1 = table.new_item(item1_key, item1_range, item1_attrs)
+ # make sure the put() succeeds
+ try:
+ item1.put()
+ except c.layer1.ResponseError, e:
+ raise Exception("Item put failed: %s" % e)
+
+ # Try to get an item that does not exist.
+ self.assertRaises(DynamoDBKeyNotFoundError,
+ table.get_item, 'bogus_key', item1_range)
+
+ # Now do a consistent read and check results
+ item1_copy = table.get_item(item1_key, item1_range,
+ consistent_read=True)
+ assert item1_copy.hash_key == item1.hash_key
+ assert item1_copy.range_key == item1.range_key
+ for attr_name in item1_copy:
+ val = item1_copy[attr_name]
+ if isinstance(val, (int, long, float, basestring)):
+ assert val == item1[attr_name]
+
+ # Try retrieving only select attributes
+ attributes = ['Message', 'Views']
+ item1_small = table.get_item(item1_key, item1_range,
+ attributes_to_get=attributes,
+ consistent_read=True)
+ for attr_name in item1_small:
+ # The item will include the attributes we asked for as
+ # well as the hashkey and rangekey, so filter those out.
+ if attr_name not in (item1_small.hash_key_name,
+ item1_small.range_key_name):
+ assert attr_name in attributes
+
+ self.assertTrue(table.has_item(item1_key, range_key=item1_range,
+ consistent_read=True))
+
+ # Try to delete the item with the wrong Expected value
+ expected = {'Views': 1}
+ self.assertRaises(DynamoDBConditionalCheckFailedError,
+ item1.delete, expected_value=expected)
+
+ # Try to delete a value while expecting a non-existant attribute
+ expected = {'FooBar': True}
+ try:
+ item1.delete(expected_value=expected)
+ except c.layer1.ResponseError, e:
+ pass
+
+ # Now update the existing object
+ item1.add_attribute('Replies', 2)
+
+ removed_attr = 'Public'
+ item1.delete_attribute(removed_attr)
+
+ removed_tag = item1_attrs['Tags'].copy().pop()
+ item1.delete_attribute('Tags', set([removed_tag]))
+
+ replies_by_set = set(['Adam', 'Arnie'])
+ item1.put_attribute('RepliesBy', replies_by_set)
+ retvals = item1.save(return_values='ALL_OLD')
+ # Need more tests here for variations on return_values
+ assert 'Attributes' in retvals
+
+ # Check for correct updates
+ item1_updated = table.get_item(item1_key, item1_range,
+ consistent_read=True)
+ assert item1_updated['Replies'] == item1_attrs['Replies'] + 2
+ self.assertFalse(removed_attr in item1_updated)
+ self.assertTrue(removed_tag not in item1_updated['Tags'])
+ self.assertTrue('RepliesBy' in item1_updated)
+ self.assertTrue(item1_updated['RepliesBy'] == replies_by_set)
+
+ # Put a few more items into the table
+ item2_key = 'Amazon DynamoDB'
+ item2_range = 'DynamoDB Thread 2'
+ item2_attrs = {
+ 'Message': 'DynamoDB thread 2 message text',
+ 'LastPostedBy': 'User A',
+ 'Views': 0,
+ 'Replies': 0,
+ 'Answered': 0,
+ 'Tags': set(["index", "primarykey", "table"]),
+ 'LastPost2DateTime': '12/9/2011 11:36:03 PM'}
+ item2 = table.new_item(item2_key, item2_range, item2_attrs)
+ item2.put()
+
+ item3_key = 'Amazon S3'
+ item3_range = 'S3 Thread 1'
+ item3_attrs = {
+ 'Message': 'S3 Thread 1 message text',
+ 'LastPostedBy': 'User A',
+ 'Views': 0,
+ 'Replies': 0,
+ 'Answered': 0,
+ 'Tags': set(['largeobject', 'multipart upload']),
+ 'LastPostDateTime': '12/9/2011 11:36:03 PM'
+ }
+ item3 = table.new_item(item3_key, item3_range, item3_attrs)
+ item3.put()
+
+ # Put an item into the second table
+ table2_item1_key = uuid.uuid4().hex
+ table2_item1_attrs = {
+ 'DateTimePosted': '25/1/2011 12:34:56 PM',
+ 'Text': 'I think boto rocks and so does DynamoDB'
+ }
+ table2_item1 = table2.new_item(table2_item1_key,
+ attrs=table2_item1_attrs)
+ table2_item1.put()
+
+ # Try a few queries
+ items = table.query('Amazon DynamoDB', BEGINS_WITH('DynamoDB'))
+ n = 0
+ for item in items:
+ n += 1
+ assert n == 2
+ assert items.consumed_units > 0
+
+ items = table.query('Amazon DynamoDB', BEGINS_WITH('DynamoDB'),
+ request_limit=1, max_results=1)
+ n = 0
+ for item in items:
+ n += 1
+ assert n == 1
+ assert items.consumed_units > 0
+
+ # Try a few scans
+ items = table.scan()
+ n = 0
+ for item in items:
+ n += 1
+ assert n == 3
+ assert items.consumed_units > 0
+
+ items = table.scan({'Replies': GT(0)})
+ n = 0
+ for item in items:
+ n += 1
+ assert n == 1
+ assert items.consumed_units > 0
+
+ # Test some integer and float attributes
+ integer_value = 42
+ float_value = 345.678
+ item3['IntAttr'] = integer_value
+ item3['FloatAttr'] = float_value
+
+ # Test booleans
+ item3['TrueBoolean'] = True
+ item3['FalseBoolean'] = False
+
+ # Test some set values
+ integer_set = set([1, 2, 3, 4, 5])
+ float_set = set([1.1, 2.2, 3.3, 4.4, 5.5])
+ mixed_set = set([1, 2, 3.3, 4, 5.555])
+ str_set = set(['foo', 'bar', 'fie', 'baz'])
+ item3['IntSetAttr'] = integer_set
+ item3['FloatSetAttr'] = float_set
+ item3['MixedSetAttr'] = mixed_set
+ item3['StrSetAttr'] = str_set
+ item3.put()
+
+ # Now do a consistent read
+ item4 = table.get_item(item3_key, item3_range, consistent_read=True)
+ assert item4['IntAttr'] == integer_value
+ assert item4['FloatAttr'] == float_value
+ assert item4['TrueBoolean'] == True
+ assert item4['FalseBoolean'] == False
+ # The values will not necessarily be in the same order as when
+ # we wrote them to the DB.
+ for i in item4['IntSetAttr']:
+ assert i in integer_set
+ for i in item4['FloatSetAttr']:
+ assert i in float_set
+ for i in item4['MixedSetAttr']:
+ assert i in mixed_set
+ for i in item4['StrSetAttr']:
+ assert i in str_set
+
+ # Try a batch get
+ batch_list = c.new_batch_list()
+ batch_list.add_batch(table, [(item2_key, item2_range),
+ (item3_key, item3_range)])
+ response = batch_list.submit()
+ assert len(response['Responses'][table.name]['Items']) == 2
+
+ # Try an empty batch get
+ batch_list = c.new_batch_list()
+ batch_list.add_batch(table, [])
+ response = batch_list.submit()
+ assert response == {}
+
+ # Try a few batch write operations
+ item4_key = 'Amazon S3'
+ item4_range = 'S3 Thread 2'
+ item4_attrs = {
+ 'Message': 'S3 Thread 2 message text',
+ 'LastPostedBy': 'User A',
+ 'Views': 0,
+ 'Replies': 0,
+ 'Answered': 0,
+ 'Tags': set(['largeobject', 'multipart upload']),
+ 'LastPostDateTime': '12/9/2011 11:36:03 PM'
+ }
+ item5_key = 'Amazon S3'
+ item5_range = 'S3 Thread 3'
+ item5_attrs = {
+ 'Message': 'S3 Thread 3 message text',
+ 'LastPostedBy': 'User A',
+ 'Views': 0,
+ 'Replies': 0,
+ 'Answered': 0,
+ 'Tags': set(['largeobject', 'multipart upload']),
+ 'LastPostDateTime': '12/9/2011 11:36:03 PM'
+ }
+ item4 = table.new_item(item4_key, item4_range, item4_attrs)
+ item5 = table.new_item(item5_key, item5_range, item5_attrs)
+ batch_list = c.new_batch_write_list()
+ batch_list.add_batch(table, puts=[item4, item5])
+ response = batch_list.submit()
+ # should really check for unprocessed items
+
+ batch_list = c.new_batch_write_list()
+ batch_list.add_batch(table, deletes=[(item4_key, item4_range),
+ (item5_key, item5_range)])
+ response = batch_list.submit()
+
+
+ # Try queries
+ results = table.query('Amazon DynamoDB', BEGINS_WITH('DynamoDB'))
+ n = 0
+ for item in results:
+ n += 1
+ assert n == 2
+
+ # Try scans
+ results = table.scan({'Tags': CONTAINS('table')})
+ n = 0
+ for item in results:
+ n += 1
+ assert n == 2
+
+ # Try to delete the item with the right Expected value
+ expected = {'Views': 0}
+ item1.delete(expected_value=expected)
+
+ self.assertFalse(table.has_item(item1_key, range_key=item1_range,
+ consistent_read=True))
+ # Now delete the remaining items
+ ret_vals = item2.delete(return_values='ALL_OLD')
+ # some additional checks here would be useful
+ assert ret_vals['Attributes'][self.hash_key_name] == item2_key
+ assert ret_vals['Attributes'][self.range_key_name] == item2_range
+
+ item3.delete()
+ table2_item1.delete()
+ print '--- tests completed ---'
+
+ def test_binary_attrs(self):
+ c = self.dynamodb
+ schema = c.create_schema(self.hash_key_name, self.hash_key_proto_value,
+ self.range_key_name,
+ self.range_key_proto_value)
+ index = int(time.time())
+ table_name = 'test-%d' % index
+ read_units = 5
+ write_units = 5
+ table = self.create_table(table_name, schema, read_units, write_units)
+ table.refresh(wait_for_active=True)
+ item1_key = 'Amazon S3'
+ item1_range = 'S3 Thread 1'
+ item1_attrs = {
+ 'Message': 'S3 Thread 1 message text',
+ 'LastPostedBy': 'User A',
+ 'Views': 0,
+ 'Replies': 0,
+ 'Answered': 0,
+ 'BinaryData': Binary('\x01\x02\x03\x04'),
+ 'BinarySequence': set([Binary('\x01\x02'), Binary('\x03\x04')]),
+ 'Tags': set(['largeobject', 'multipart upload']),
+ 'LastPostDateTime': '12/9/2011 11:36:03 PM'
+ }
+ item1 = table.new_item(item1_key, item1_range, item1_attrs)
+ item1.put()
+
+ retrieved = table.get_item(item1_key, item1_range, consistent_read=True)
+ self.assertEqual(retrieved['Message'], 'S3 Thread 1 message text')
+ self.assertEqual(retrieved['Views'], 0)
+ self.assertEqual(retrieved['Tags'],
+ set(['largeobject', 'multipart upload']))
+ self.assertEqual(retrieved['BinaryData'], Binary('\x01\x02\x03\x04'))
+ # Also comparable directly to bytes:
+ self.assertEqual(retrieved['BinaryData'], bytes('\x01\x02\x03\x04'))
+ self.assertEqual(retrieved['BinarySequence'],
+ set([Binary('\x01\x02'), Binary('\x03\x04')]))
diff --git a/tests/ec2/__init__.py b/tests/integration/ec2/__init__.py
similarity index 100%
rename from tests/ec2/__init__.py
rename to tests/integration/ec2/__init__.py
diff --git a/tests/autoscale/__init__.py b/tests/integration/ec2/autoscale/__init__.py
similarity index 100%
rename from tests/autoscale/__init__.py
rename to tests/integration/ec2/autoscale/__init__.py
diff --git a/tests/integration/ec2/autoscale/test_cert_verification.py b/tests/integration/ec2/autoscale/test_cert_verification.py
new file mode 100644
index 0000000..9927153
--- /dev/null
+++ b/tests/integration/ec2/autoscale/test_cert_verification.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Check that all of the certs on all service endpoints validate.
+"""
+
+import unittest
+import boto.ec2.autoscale
+
+
+class CertVerificationTest(unittest.TestCase):
+
+ autoscale = True
+ ssl = True
+
+ def test_certs(self):
+ for region in boto.ec2.autoscale.regions():
+ c = region.connect()
+ c.get_all_groups()
diff --git a/tests/autoscale/test_connection.py b/tests/integration/ec2/autoscale/test_connection.py
similarity index 61%
rename from tests/autoscale/test_connection.py
rename to tests/integration/ec2/autoscale/test_connection.py
index 921fe43..cf8d99a 100644
--- a/tests/autoscale/test_connection.py
+++ b/tests/integration/ec2/autoscale/test_connection.py
@@ -33,8 +33,12 @@
from boto.ec2.autoscale.policy import AdjustmentType, MetricCollectionTypes, ScalingPolicy
from boto.ec2.autoscale.scheduled import ScheduledUpdateGroupAction
from boto.ec2.autoscale.instance import Instance
+from boto.ec2.autoscale.tag import Tag
+
class AutoscaleConnectionTest(unittest.TestCase):
+ ec2 = True
+ autoscale = True
def test_basic(self):
# NB: as it says on the tin these are really basic tests that only
@@ -91,5 +95,73 @@
types = c.get_all_metric_collection_types()
self.assertTrue(type(types), MetricCollectionTypes)
- print '--- tests completed ---'
+ # create the simplest possible AutoScale group
+ # first create the launch configuration
+ time_string = '%d' % int(time.time())
+ lc_name = 'lc-%s' % time_string
+ lc = LaunchConfiguration(name=lc_name, image_id='ami-2272864b',
+ instance_type='t1.micro')
+ c.create_launch_configuration(lc)
+ found = False
+ lcs = c.get_all_launch_configurations()
+ for lc in lcs:
+ if lc.name == lc_name:
+ found = True
+ break
+ assert found
+ # now create autoscaling group
+ group_name = 'group-%s' % time_string
+ group = AutoScalingGroup(name=group_name, launch_config=lc,
+ availability_zones=['us-east-1a'],
+ min_size=1, max_size=1)
+ c.create_auto_scaling_group(group)
+ found = False
+ groups = c.get_all_groups()
+ for group in groups:
+ if group.name == group_name:
+ found = True
+ break
+ assert found
+
+ # now create a tag
+ tag = Tag(key='foo', value='bar', resource_id=group_name,
+ propagate_at_launch=True)
+ c.create_or_update_tags([tag])
+
+ found = False
+ tags = c.get_all_tags()
+ for tag in tags:
+ if tag.resource_id == group_name and tag.key == 'foo':
+ found = True
+ break
+ assert found
+
+ c.delete_tags([tag])
+
+ # shutdown instances and wait for them to disappear
+ group.shutdown_instances()
+ instances = True
+ while instances:
+ time.sleep(5)
+ groups = c.get_all_groups()
+ for group in groups:
+ if group.name == group_name:
+ if not group.instances:
+ instances = False
+
+ group.delete()
+ lc.delete()
+
+ found = True
+ while found:
+ found = False
+ time.sleep(5)
+ tags = c.get_all_tags()
+ for tag in tags:
+ if tag.resource_id == group_name and tag.key == 'foo':
+ found = True
+
+ assert not found
+
+ print '--- tests completed ---'
diff --git a/tests/ec2/cloudwatch/__init__.py b/tests/integration/ec2/cloudwatch/__init__.py
similarity index 100%
rename from tests/ec2/cloudwatch/__init__.py
rename to tests/integration/ec2/cloudwatch/__init__.py
diff --git a/tests/integration/ec2/cloudwatch/test_cert_verification.py b/tests/integration/ec2/cloudwatch/test_cert_verification.py
new file mode 100644
index 0000000..4dfb56d
--- /dev/null
+++ b/tests/integration/ec2/cloudwatch/test_cert_verification.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Check that all of the certs on all service endpoints validate.
+"""
+
+import unittest
+import boto.ec2.cloudwatch
+
+
+class CertVerificationTest(unittest.TestCase):
+
+ cloudwatch = True
+ ssl = True
+
+ def test_certs(self):
+ for region in boto.ec2.cloudwatch.regions():
+ c = region.connect()
+ c.describe_alarms()
diff --git a/tests/integration/ec2/cloudwatch/test_connection.py b/tests/integration/ec2/cloudwatch/test_connection.py
new file mode 100644
index 0000000..922c17b
--- /dev/null
+++ b/tests/integration/ec2/cloudwatch/test_connection.py
@@ -0,0 +1,275 @@
+# Copyright (c) 2010 Hunter Blanks http://artifex.org/~hblanks/
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Initial, and very limited, unit tests for CloudWatchConnection.
+"""
+
+import datetime
+import time
+import unittest
+
+from boto.ec2.cloudwatch import CloudWatchConnection
+from boto.ec2.cloudwatch.metric import Metric
+
+# HTTP response body for CloudWatchConnection.describe_alarms
+DESCRIBE_ALARMS_BODY = """<DescribeAlarmsResponse xmlns="http://monitoring.amazonaws.com/doc/2010-08-01/">
+ <DescribeAlarmsResult>
+ <MetricAlarms>
+ <member>
+ <StateUpdatedTimestamp>2011-11-18T23:43:59.111Z</StateUpdatedTimestamp>
+ <InsufficientDataActions/>
+ <StateReasonData>{"version":"1.0","queryDate":"2011-11-18T23:43:59.089+0000","startDate":"2011-11-18T23:30:00.000+0000","statistic":"Maximum","period":60,"recentDatapoints":[1.0,null,null,null,null,null,null,null,null,null,1.0],"threshold":1.0}</StateReasonData>
+ <AlarmArn>arn:aws:cloudwatch:us-east-1:1234:alarm:FancyAlarm</AlarmArn>
+ <AlarmConfigurationUpdatedTimestamp>2011-11-18T23:43:58.489Z</AlarmConfigurationUpdatedTimestamp>
+ <AlarmName>FancyAlarm</AlarmName>
+ <StateValue>OK</StateValue>
+ <Period>60</Period>
+ <OKActions/>
+ <ActionsEnabled>true</ActionsEnabled>
+ <Namespace>AcmeCo/Cronjobs</Namespace>
+ <EvaluationPeriods>15</EvaluationPeriods>
+ <Threshold>1.0</Threshold>
+ <Statistic>Maximum</Statistic>
+ <AlarmActions>
+ <member>arn:aws:sns:us-east-1:1234:Alerts</member>
+ </AlarmActions>
+ <StateReason>Threshold Crossed: 2 datapoints were not less than the threshold (1.0). The most recent datapoints: [1.0, 1.0].</StateReason>
+ <Dimensions>
+ <member>
+ <Name>Job</Name>
+ <Value>ANiceCronJob</Value>
+ </member>
+ </Dimensions>
+ <ComparisonOperator>LessThanThreshold</ComparisonOperator>
+ <MetricName>Success</MetricName>
+ </member>
+ <member>
+ <StateUpdatedTimestamp>2011-11-19T08:09:20.655Z</StateUpdatedTimestamp>
+ <InsufficientDataActions/>
+ <StateReasonData>{"version":"1.0","queryDate":"2011-11-19T08:09:20.633+0000","startDate":"2011-11-19T08:07:00.000+0000","statistic":"Maximum","period":60,"recentDatapoints":[1.0],"threshold":1.0}</StateReasonData>
+ <AlarmArn>arn:aws:cloudwatch:us-east-1:1234:alarm:SuprtFancyAlarm</AlarmArn>
+ <AlarmConfigurationUpdatedTimestamp>2011-11-19T16:20:19.687Z</AlarmConfigurationUpdatedTimestamp>
+ <AlarmName>SuperFancyAlarm</AlarmName>
+ <StateValue>OK</StateValue>
+ <Period>60</Period>
+ <OKActions/>
+ <ActionsEnabled>true</ActionsEnabled>
+ <Namespace>AcmeCo/CronJobs</Namespace>
+ <EvaluationPeriods>60</EvaluationPeriods>
+ <Threshold>1.0</Threshold>
+ <Statistic>Maximum</Statistic>
+ <AlarmActions>
+ <member>arn:aws:sns:us-east-1:1234:alerts</member>
+ </AlarmActions>
+ <StateReason>Threshold Crossed: 1 datapoint (1.0) was not less than the threshold (1.0).</StateReason>
+ <Dimensions>
+ <member>
+ <Name>Job</Name>
+ <Value>ABadCronJob</Value>
+ </member>
+ </Dimensions>
+ <ComparisonOperator>GreaterThanThreshold</ComparisonOperator>
+ <MetricName>Success</MetricName>
+ </member>
+ </MetricAlarms>
+ </DescribeAlarmsResult>
+ <ResponseMetadata>
+ <RequestId>f621311-1463-11e1-95c3-312389123</RequestId>
+ </ResponseMetadata>
+</DescribeAlarmsResponse>"""
+
+
+class CloudWatchConnectionTest(unittest.TestCase):
+ ec2 = True
+
+ def test_build_list_params(self):
+ c = CloudWatchConnection()
+ params = {}
+ c.build_list_params(
+ params, ['thing1', 'thing2', 'thing3'], 'ThingName%d')
+ expected_params = {
+ 'ThingName1': 'thing1',
+ 'ThingName2': 'thing2',
+ 'ThingName3': 'thing3'
+ }
+ self.assertEqual(params, expected_params)
+
+ def test_build_put_params_one(self):
+ c = CloudWatchConnection()
+ params = {}
+ c.build_put_params(params, name="N", value=1, dimensions={"D": "V"})
+ expected_params = {
+ 'MetricData.member.1.MetricName': 'N',
+ 'MetricData.member.1.Value': 1,
+ 'MetricData.member.1.Dimensions.member.1.Name': 'D',
+ 'MetricData.member.1.Dimensions.member.1.Value': 'V',
+ }
+ self.assertEqual(params, expected_params)
+
+ def test_build_put_params_multiple_metrics(self):
+ c = CloudWatchConnection()
+ params = {}
+ c.build_put_params(params, name=["N", "M"], value=[1, 2], dimensions={"D": "V"})
+ expected_params = {
+ 'MetricData.member.1.MetricName': 'N',
+ 'MetricData.member.1.Value': 1,
+ 'MetricData.member.1.Dimensions.member.1.Name': 'D',
+ 'MetricData.member.1.Dimensions.member.1.Value': 'V',
+ 'MetricData.member.2.MetricName': 'M',
+ 'MetricData.member.2.Value': 2,
+ 'MetricData.member.2.Dimensions.member.1.Name': 'D',
+ 'MetricData.member.2.Dimensions.member.1.Value': 'V',
+ }
+ self.assertEqual(params, expected_params)
+
+ def test_build_put_params_multiple_dimensions(self):
+ c = CloudWatchConnection()
+ params = {}
+ c.build_put_params(params, name="N", value=[1, 2], dimensions=[{"D": "V"}, {"D": "W"}])
+ expected_params = {
+ 'MetricData.member.1.MetricName': 'N',
+ 'MetricData.member.1.Value': 1,
+ 'MetricData.member.1.Dimensions.member.1.Name': 'D',
+ 'MetricData.member.1.Dimensions.member.1.Value': 'V',
+ 'MetricData.member.2.MetricName': 'N',
+ 'MetricData.member.2.Value': 2,
+ 'MetricData.member.2.Dimensions.member.1.Name': 'D',
+ 'MetricData.member.2.Dimensions.member.1.Value': 'W',
+ }
+ self.assertEqual(params, expected_params)
+
+ def test_build_put_params_multiple_parameter_dimension(self):
+ from collections import OrderedDict
+ self.maxDiff = None
+ c = CloudWatchConnection()
+ params = {}
+ dimensions = [OrderedDict((("D1", "V"), ("D2", "W")))]
+ c.build_put_params(params,
+ name="N",
+ value=[1],
+ dimensions=dimensions)
+ expected_params = {
+ 'MetricData.member.1.MetricName': 'N',
+ 'MetricData.member.1.Value': 1,
+ 'MetricData.member.1.Dimensions.member.1.Name': 'D1',
+ 'MetricData.member.1.Dimensions.member.1.Value': 'V',
+ 'MetricData.member.1.Dimensions.member.2.Name': 'D2',
+ 'MetricData.member.1.Dimensions.member.2.Value': 'W',
+ }
+ self.assertEqual(params, expected_params)
+
+ def test_build_get_params_multiple_parameter_dimension1(self):
+ from collections import OrderedDict
+ self.maxDiff = None
+ c = CloudWatchConnection()
+ params = {}
+ dimensions = OrderedDict((("D1", "V"), ("D2", "W")))
+ c.build_dimension_param(dimensions, params)
+ expected_params = {
+ 'Dimensions.member.1.Name': 'D1',
+ 'Dimensions.member.1.Value': 'V',
+ 'Dimensions.member.2.Name': 'D2',
+ 'Dimensions.member.2.Value': 'W',
+ }
+ self.assertEqual(params, expected_params)
+
+ def test_build_get_params_multiple_parameter_dimension2(self):
+ from collections import OrderedDict
+ self.maxDiff = None
+ c = CloudWatchConnection()
+ params = {}
+ dimensions = OrderedDict((("D1", ["V1", "V2"]), ("D2", "W"), ("D3", None)))
+ c.build_dimension_param(dimensions, params)
+ expected_params = {
+ 'Dimensions.member.1.Name': 'D1',
+ 'Dimensions.member.1.Value': 'V1',
+ 'Dimensions.member.2.Name': 'D1',
+ 'Dimensions.member.2.Value': 'V2',
+ 'Dimensions.member.3.Name': 'D2',
+ 'Dimensions.member.3.Value': 'W',
+ 'Dimensions.member.4.Name': 'D3',
+ }
+ self.assertEqual(params, expected_params)
+
+ def test_build_put_params_invalid(self):
+ c = CloudWatchConnection()
+ params = {}
+ try:
+ c.build_put_params(params, name=["N", "M"], value=[1, 2, 3])
+ except:
+ pass
+ else:
+ self.fail("Should not accept lists of different lengths.")
+
+ def test_get_metric_statistics(self):
+ c = CloudWatchConnection()
+ m = c.list_metrics()[0]
+ end = datetime.datetime.now()
+ start = end - datetime.timedelta(hours=24*14)
+ c.get_metric_statistics(
+ 3600*24, start, end, m.name, m.namespace, ['Average', 'Sum'])
+
+ def test_put_metric_data(self):
+ c = CloudWatchConnection()
+ now = datetime.datetime.now()
+ name, namespace = 'unit-test-metric', 'boto-unit-test'
+ c.put_metric_data(namespace, name, 5, now, 'Bytes')
+
+ # Uncomment the following lines for a slower but more thorough
+ # test. (Hurrah for eventual consistency...)
+ #
+ # metric = Metric(connection=c)
+ # metric.name = name
+ # metric.namespace = namespace
+ # time.sleep(60)
+ # l = metric.query(
+ # now - datetime.timedelta(seconds=60),
+ # datetime.datetime.now(),
+ # 'Average')
+ # assert l
+ # for row in l:
+ # self.assertEqual(row['Unit'], 'Bytes')
+ # self.assertEqual(row['Average'], 5.0)
+
+
+ def test_describe_alarms(self):
+ c = CloudWatchConnection()
+ def make_request(*args, **kwargs):
+ class Body(object):
+ def __init__(self):
+ self.status = 200
+ def read(self):
+ return DESCRIBE_ALARMS_BODY
+ return Body()
+
+ c.make_request = make_request
+ alarms = c.describe_alarms()
+ self.assertEquals(alarms[0].name, 'FancyAlarm')
+ self.assertEquals(alarms[0].comparison, '<')
+ self.assertEquals(alarms[0].dimensions, {u'Job': [u'ANiceCronJob']})
+ self.assertEquals(alarms[1].name, 'SuperFancyAlarm')
+ self.assertEquals(alarms[1].comparison, '>')
+ self.assertEquals(alarms[1].dimensions, {u'Job': [u'ABadCronJob']})
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests/sdb/__init__.py b/tests/integration/ec2/elb/__init__.py
similarity index 100%
copy from tests/sdb/__init__.py
copy to tests/integration/ec2/elb/__init__.py
diff --git a/tests/integration/ec2/elb/test_cert_verification.py b/tests/integration/ec2/elb/test_cert_verification.py
new file mode 100644
index 0000000..a574f67
--- /dev/null
+++ b/tests/integration/ec2/elb/test_cert_verification.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Check that all of the certs on all service endpoints validate.
+"""
+
+import unittest
+import boto.ec2.elb
+
+
+class CertVerificationTest(unittest.TestCase):
+
+ elb = True
+ ssl = True
+
+ def test_certs(self):
+ for region in boto.ec2.elb.regions():
+ c = region.connect()
+ c.get_all_load_balancers()
diff --git a/tests/ec2/elb/test_connection.py b/tests/integration/ec2/elb/test_connection.py
similarity index 74%
rename from tests/ec2/elb/test_connection.py
rename to tests/integration/ec2/elb/test_connection.py
index 4b6b7bb..2d574d9 100644
--- a/tests/ec2/elb/test_connection.py
+++ b/tests/integration/ec2/elb/test_connection.py
@@ -28,6 +28,7 @@
from boto.ec2.elb import ELBConnection
class ELBConnectionTest(unittest.TestCase):
+ ec2 = True
def tearDown(self):
""" Deletes all load balancers after every test. """
@@ -89,16 +90,41 @@
balancers = c.get_all_load_balancers()
self.assertEqual([lb.name for lb in balancers], [name])
self.assertEqual(
- [l.get_tuple() for l in balancers[0].listeners], listeners)
+ sorted([l.get_tuple() for l in balancers[0].listeners]),
+ sorted(listeners))
c.delete_load_balancer_listeners(name, [443])
balancers = c.get_all_load_balancers()
self.assertEqual([lb.name for lb in balancers], [name])
- self.assertEqual(
- [l.get_tuple() for l in balancers[0].listeners],
- listeners[:1]
- )
+ self.assertEqual([l.get_tuple() for l in balancers[0].listeners],
+ listeners[:1])
+ def test_create_load_balancer_listeners_with_policies(self):
+ c = ELBConnection()
+ name = 'elb-boto-unit-test-policy'
+ availability_zones = ['us-east-1a']
+ listeners = [(80, 8000, 'HTTP')]
+ balancer = c.create_load_balancer(name, availability_zones, listeners)
+
+ more_listeners = [(443, 8001, 'HTTP')]
+ c.create_load_balancer_listeners(name, more_listeners)
+
+ lb_policy_name = 'lb-policy'
+ c.create_lb_cookie_stickiness_policy(1000, name, lb_policy_name)
+ c.set_lb_policies_of_listener(name, listeners[0][0], lb_policy_name)
+
+ app_policy_name = 'app-policy'
+ c.create_app_cookie_stickiness_policy('appcookie', name, app_policy_name)
+ c.set_lb_policies_of_listener(name, more_listeners[0][0], app_policy_name)
+
+ balancers = c.get_all_load_balancers()
+ self.assertEqual([lb.name for lb in balancers], [name])
+ self.assertEqual(
+ sorted(l.get_tuple() for l in balancers[0].listeners),
+ sorted(listeners + more_listeners)
+ )
+ # Policy names should be checked here once they are supported
+ # in the Listener object.
if __name__ == '__main__':
- unittest.main()
\ No newline at end of file
+ unittest.main()
diff --git a/tests/integration/ec2/test_cert_verification.py b/tests/integration/ec2/test_cert_verification.py
new file mode 100644
index 0000000..6b1c574
--- /dev/null
+++ b/tests/integration/ec2/test_cert_verification.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Check that all of the certs on all service endpoints validate.
+"""
+
+import unittest
+import boto.rds
+
+
+class CertVerificationTest(unittest.TestCase):
+
+ ec2 = True
+ ssl = True
+
+ def test_certs(self):
+ for region in boto.rds.regions():
+ c = region.connect()
+ c.get_all_dbinstances()
diff --git a/tests/ec2/test_connection.py b/tests/integration/ec2/test_connection.py
similarity index 84%
rename from tests/ec2/test_connection.py
rename to tests/integration/ec2/test_connection.py
index 6b7ece1..ef1080b 100644
--- a/tests/ec2/test_connection.py
+++ b/tests/integration/ec2/test_connection.py
@@ -16,7 +16,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -27,13 +27,18 @@
import unittest
import time
-from boto.ec2.connection import EC2Connection
import telnetlib
import socket
-class EC2ConnectionTest (unittest.TestCase):
+from nose.plugins.attrib import attr
+from boto.ec2.connection import EC2Connection
- def test_1_basic(self):
+
+class EC2ConnectionTest (unittest.TestCase):
+ ec2 = True
+
+ @attr('notdefault')
+ def test_launch_permissions(self):
# this is my user_id, if you want to run these tests you should
# replace this with yours or they won't work
user_id = '963068290131'
@@ -48,16 +53,18 @@
status = image.set_launch_permissions(group_names=['all'])
assert status
d = image.get_launch_permissions()
- assert d.has_key('groups')
+ assert 'groups' in d
assert len(d['groups']) > 0
# now remove that permission
status = image.remove_launch_permissions(group_names=['all'])
assert status
time.sleep(10)
d = image.get_launch_permissions()
- assert not d.has_key('groups')
-
+ assert 'groups' not in d
+
+ def test_1_basic(self):
# create 2 new security groups
+ c = EC2Connection()
group1_name = 'test-%d' % int(time.time())
group_desc = 'This is a security group created during unit testing'
group1 = c.create_security_group(group1_name, group_desc)
@@ -77,18 +84,26 @@
assert len(rs) == 1
# try some group to group authorizations/revocations
# first try the old style
- status = c.authorize_security_group(group1.name, group2.name, group2.owner_id)
+ status = c.authorize_security_group(group1.name,
+ group2.name,
+ group2.owner_id)
assert status
- status = c.revoke_security_group(group1.name, group2.name, group2.owner_id)
+ status = c.revoke_security_group(group1.name,
+ group2.name,
+ group2.owner_id)
assert status
# now try specifying a specific port
- status = c.authorize_security_group(group1.name, group2.name, group2.owner_id,
+ status = c.authorize_security_group(group1.name,
+ group2.name,
+ group2.owner_id,
'tcp', 22, 22)
assert status
- status = c.revoke_security_group(group1.name, group2.name, group2.owner_id,
+ status = c.revoke_security_group(group1.name,
+ group2.name,
+ group2.owner_id,
'tcp', 22, 22)
assert status
-
+
# now delete the second security group
status = c.delete_security_group(group2_name)
# now make sure it's really gone
@@ -100,7 +115,7 @@
assert not found
group = group1
-
+
# now try to launch apache image with our new security group
rs = c.get_all_images()
img_loc = 'ec2-public-images/fedora-core4-apache.manifest.xml'
@@ -131,9 +146,16 @@
pass
# now kill the instance and delete the security group
instance.terminate()
+
+ # check that state and previous_state have updated
+ assert instance.state == 'shutting-down'
+ assert instance.state_code == 32
+ assert instance.previous_state == 'running'
+ assert instance.previous_state_code == 16
+
# unfortunately, I can't delete the sg within this script
#sg.delete()
-
+
# create a new key pair
key_name = 'test-%d' % int(time.time())
status = c.create_key_pair(key_name)
@@ -166,5 +188,5 @@
assert len(l) == 1
assert len(l[0].product_codes) == 1
assert l[0].product_codes[0] == demo_paid_ami_product_code
-
+
print '--- tests completed ---'
diff --git a/tests/ec2/__init__.py b/tests/integration/emr/__init__.py
similarity index 100%
copy from tests/ec2/__init__.py
copy to tests/integration/emr/__init__.py
diff --git a/tests/integration/emr/test_cert_verification.py b/tests/integration/emr/test_cert_verification.py
new file mode 100644
index 0000000..7c09813
--- /dev/null
+++ b/tests/integration/emr/test_cert_verification.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Check that all of the certs on all EMR endpoints validate.
+"""
+
+import unittest
+import boto.emr
+
+
+class EMRCertVerificationTest(unittest.TestCase):
+
+ emr = True
+ ssl = True
+
+ def test_certs(self):
+ for region in boto.emr.regions():
+ c = region.connect()
+ c.describe_jobflows()
diff --git a/tests/integration/glacier/__init__.py b/tests/integration/glacier/__init__.py
new file mode 100644
index 0000000..5326afc
--- /dev/null
+++ b/tests/integration/glacier/__init__.py
@@ -0,0 +1,22 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
diff --git a/tests/integration/glacier/test_cert_verification.py b/tests/integration/glacier/test_cert_verification.py
new file mode 100644
index 0000000..abb818e
--- /dev/null
+++ b/tests/integration/glacier/test_cert_verification.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Check that all of the certs on all service endpoints validate.
+"""
+
+import unittest
+import boto.glacier
+
+
+class CertVerificationTest(unittest.TestCase):
+
+ glacier = True
+ ssl = True
+
+ def test_certs(self):
+ for region in boto.glacier.regions():
+ c = region.connect()
+ c.list_vaults()
diff --git a/tests/integration/glacier/test_layer2.py b/tests/integration/glacier/test_layer2.py
new file mode 100644
index 0000000..caa44fa
--- /dev/null
+++ b/tests/integration/glacier/test_layer2.py
@@ -0,0 +1,45 @@
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+import time
+from tests.unit import unittest
+
+from boto.glacier.layer2 import Layer1, Layer2
+
+
+class TestGlacierLayer2(unittest.TestCase):
+ glacier = True
+
+ def setUp(self):
+ self.layer2 = Layer2()
+ self.vault_name = 'testvault%s' % int(time.time())
+
+ def test_create_delete_vault(self):
+ vault = self.layer2.create_vault(self.vault_name)
+ retrieved_vault = self.layer2.get_vault(self.vault_name)
+ self.layer2.delete_vault(self.vault_name)
+ self.assertEqual(vault.name, retrieved_vault.name)
+ self.assertEqual(vault.arn, retrieved_vault.arn)
+ self.assertEqual(vault.creation_date, retrieved_vault.creation_date)
+ self.assertEqual(vault.last_inventory_date,
+ retrieved_vault.last_inventory_date)
+ self.assertEqual(vault.number_of_archives,
+ retrieved_vault.number_of_archives)
diff --git a/tests/integration/iam/__init__.py b/tests/integration/iam/__init__.py
new file mode 100644
index 0000000..fc0f80d
--- /dev/null
+++ b/tests/integration/iam/__init__.py
@@ -0,0 +1,20 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
diff --git a/tests/integration/iam/test_cert_verification.py b/tests/integration/iam/test_cert_verification.py
new file mode 100644
index 0000000..5791ac1
--- /dev/null
+++ b/tests/integration/iam/test_cert_verification.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Check that all of the certs on all service endpoints validate.
+"""
+
+import unittest
+import boto.iam
+
+
+class CertVerificationTest(unittest.TestCase):
+
+ iam = True
+ ssl = True
+
+ def test_certs(self):
+ for region in boto.iam.regions():
+ c = region.connect()
+ c.get_all_users()
diff --git a/tests/cloudfront/__init__.py b/tests/integration/mws/__init__.py
similarity index 100%
copy from tests/cloudfront/__init__.py
copy to tests/integration/mws/__init__.py
diff --git a/tests/integration/mws/test.py b/tests/integration/mws/test.py
new file mode 100644
index 0000000..1d75379
--- /dev/null
+++ b/tests/integration/mws/test.py
@@ -0,0 +1,97 @@
+#!/usr/bin/env python
+from tests.unit import unittest
+import sys
+import os
+import os.path
+
+
+simple = os.environ.get('MWS_MERCHANT', None)
+if not simple:
+ print """
+ Please set the MWS_MERCHANT environmental variable
+ to your Merchant or SellerId to enable MWS tests.
+ """
+
+
+advanced = False
+isolator = True
+if __name__ == "__main__":
+ devpath = os.path.relpath(os.path.join('..', '..'),
+ start=os.path.dirname(__file__))
+ sys.path = [devpath] + sys.path
+ advanced = simple and True or False
+ if advanced:
+ print '>>> advanced MWS tests; using local boto sources'
+
+from boto.mws.connection import MWSConnection
+
+
+class MWSTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.mws = MWSConnection(Merchant=simple, debug=0)
+
+ @unittest.skipUnless(simple and isolator, "skipping simple test")
+ def test_feedlist(self):
+ self.mws.get_feed_submission_list()
+
+ @unittest.skipUnless(simple and isolator, "skipping simple test")
+ def test_inbound_status(self):
+ response = self.mws.get_inbound_service_status()
+ status = response.GetServiceStatusResult.Status
+ self.assertIn(status, ('GREEN', 'GREEN_I', 'YELLOW', 'RED'))
+
+ @property
+ def marketplace(self):
+ response = self.mws.list_marketplace_participations()
+ result = response.ListMarketplaceParticipationsResult
+ return result.ListMarketplaces.Marketplace[0]
+
+ @property
+ def marketplace_id(self):
+ return self.marketplace.MarketplaceId
+
+ @unittest.skipUnless(simple and isolator, "skipping simple test")
+ def test_marketplace_participations(self):
+ response = self.mws.list_marketplace_participations()
+ result = response.ListMarketplaceParticipationsResult
+ self.assertTrue(result.ListMarketplaces.Marketplace[0].MarketplaceId)
+
+ @unittest.skipUnless(simple and isolator, "skipping simple test")
+ def test_get_product_categories_for_asin(self):
+ asin = '144930544X'
+ response = self.mws.get_product_categories_for_asin(\
+ MarketplaceId=self.marketplace_id,
+ ASIN=asin)
+ result = response._result
+ self.assertTrue(int(result.Self.ProductCategoryId) == 21)
+
+ @unittest.skipUnless(simple and isolator, "skipping simple test")
+ def test_list_matching_products(self):
+ response = self.mws.list_matching_products(\
+ MarketplaceId=self.marketplace_id,
+ Query='boto')
+ products = response._result.Products
+ self.assertTrue(len(products))
+
+ @unittest.skipUnless(simple and isolator, "skipping simple test")
+ def test_get_matching_product(self):
+ asin = 'B001UDRNHO'
+ response = self.mws.get_matching_product(\
+ MarketplaceId=self.marketplace_id,
+ ASINList=[asin,])
+ product = response._result[0].Product
+
+
+ @unittest.skipUnless(simple and isolator, "skipping simple test")
+ def test_get_lowest_offer_listings_for_asin(self):
+ asin = '144930544X'
+ response = self.mws.get_lowest_offer_listings_for_asin(\
+ MarketplaceId=self.marketplace_id,
+ ItemCondition='New',
+ ASINList=[asin,])
+ product = response._result[0].Product
+ self.assertTrue(product.LowestOfferListings)
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/integration/rds/__init__.py b/tests/integration/rds/__init__.py
new file mode 100644
index 0000000..b7fe4c2
--- /dev/null
+++ b/tests/integration/rds/__init__.py
@@ -0,0 +1,21 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
diff --git a/tests/integration/rds/test_cert_verification.py b/tests/integration/rds/test_cert_verification.py
new file mode 100644
index 0000000..1efe8f3
--- /dev/null
+++ b/tests/integration/rds/test_cert_verification.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Check that all of the certs on all service endpoints validate.
+"""
+
+import unittest
+import boto.rds
+
+
+class CertVerificationTest(unittest.TestCase):
+
+ rds = True
+ ssl = True
+
+ def test_certs(self):
+ for region in boto.rds.regions():
+ c = region.connect()
+ c.get_all_dbinstances()
diff --git a/tests/integration/route53/__init__.py b/tests/integration/route53/__init__.py
new file mode 100644
index 0000000..fc0f80d
--- /dev/null
+++ b/tests/integration/route53/__init__.py
@@ -0,0 +1,20 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
diff --git a/tests/integration/route53/test_cert_verification.py b/tests/integration/route53/test_cert_verification.py
new file mode 100644
index 0000000..18c43a9
--- /dev/null
+++ b/tests/integration/route53/test_cert_verification.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Check that all of the certs on all service endpoints validate.
+"""
+
+import unittest
+import boto.route53
+
+
+class CertVerificationTest(unittest.TestCase):
+
+ route53 = True
+ ssl = True
+
+ def test_certs(self):
+ for region in boto.route53.regions():
+ c = region.connect()
+ c.get_all_hosted_zones()
diff --git a/tests/s3/__init__.py b/tests/integration/s3/__init__.py
similarity index 100%
rename from tests/s3/__init__.py
rename to tests/integration/s3/__init__.py
diff --git a/tests/s3/cb_test_harnass.py b/tests/integration/s3/cb_test_harnass.py
similarity index 100%
rename from tests/s3/cb_test_harnass.py
rename to tests/integration/s3/cb_test_harnass.py
diff --git a/tests/s3/mock_storage_service.py b/tests/integration/s3/mock_storage_service.py
similarity index 68%
rename from tests/s3/mock_storage_service.py
rename to tests/integration/s3/mock_storage_service.py
index aa28b59..f08af79 100644
--- a/tests/s3/mock_storage_service.py
+++ b/tests/integration/s3/mock_storage_service.py
@@ -28,6 +28,15 @@
import copy
import boto
+import base64
+
+from boto.utils import compute_md5
+from boto.s3.prefix import Prefix
+
+try:
+ from hashlib import md5
+except ImportError:
+ from md5 import md5
NOT_IMPL = None
@@ -53,10 +62,19 @@
self.bucket = bucket
self.name = name
self.data = None
+ self.etag = None
self.size = None
self.content_encoding = None
+ self.content_language = None
self.content_type = None
self.last_modified = 'Wed, 06 Oct 2010 05:11:54 GMT'
+ self.BufferSize = 8192
+
+ def __repr__(self):
+ if self.bucket:
+ return '<MockKey: %s,%s>' % (self.bucket.name, self.name)
+ else:
+ return '<MockKey: %s>' % self.name
def get_contents_as_string(self, headers=NOT_IMPL,
cb=NOT_IMPL, num_cb=NOT_IMPL,
@@ -83,6 +101,8 @@
self.content_encoding = headers['Content-Encoding']
if 'Content-Type' in headers:
self.content_type = headers['Content-Type']
+ if 'Content-Language' in headers:
+ self.content_language = headers['Content-Language']
def open_read(self, headers=NOT_IMPL, query_args=NOT_IMPL,
override_num_retries=NOT_IMPL):
@@ -93,6 +113,7 @@
policy=NOT_IMPL, md5=NOT_IMPL,
res_upload_handler=NOT_IMPL):
self.data = fp.read()
+ self.set_etag()
self.size = len(self.data)
self._handle_headers(headers)
@@ -100,13 +121,14 @@
cb=NOT_IMPL, num_cb=NOT_IMPL, policy=NOT_IMPL,
md5=NOT_IMPL, reduced_redundancy=NOT_IMPL):
self.data = copy.copy(s)
+ self.set_etag()
self.size = len(s)
self._handle_headers(headers)
- def set_contents_from_filename(self, filename, headers=None, replace=NOT_IMPL,
- cb=NOT_IMPL, num_cb=NOT_IMPL,
- policy=NOT_IMPL, md5=NOT_IMPL,
- res_upload_handler=NOT_IMPL):
+ def set_contents_from_filename(self, filename, headers=None,
+ replace=NOT_IMPL, cb=NOT_IMPL,
+ num_cb=NOT_IMPL, policy=NOT_IMPL,
+ md5=NOT_IMPL, res_upload_handler=NOT_IMPL):
fp = open(filename, 'rb')
self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5, res_upload_handler)
@@ -118,6 +140,38 @@
return dst_bucket.copy_key(dst_key, self.bucket.name,
self.name, metadata)
+ def set_etag(self):
+ """
+ Set etag attribute by generating hex MD5 checksum on current
+ contents of mock key.
+ """
+ m = md5()
+ m.update(self.data)
+ hex_md5 = m.hexdigest()
+ self.etag = hex_md5
+
+ def compute_md5(self, fp):
+ """
+ :type fp: file
+ :param fp: File pointer to the file to MD5 hash. The file pointer
+ will be reset to the beginning of the file before the
+ method returns.
+
+ :rtype: tuple
+ :return: A tuple containing the hex digest version of the MD5 hash
+ as the first element and the base64 encoded version of the
+ plain digest as the second element.
+ """
+ tup = compute_md5(fp)
+ # Returned values are MD5 hash, base64 encoded MD5 hash, and file size.
+ # The internal implementation of compute_md5() needs to return the
+ # file size but we don't want to return that value to the external
+ # caller because it changes the class interface (i.e. it might
+ # break some code) so we consume the third tuple value here and
+ # return the remainder of the tuple to the caller, thereby preserving
+ # the existing interface.
+ self.size = tup[2]
+ return tup[0:2]
class MockBucket(object):
@@ -131,9 +185,13 @@
self.connection = connection
self.logging = False
+ def __repr__(self):
+ return 'MockBucket: %s' % self.name
+
def copy_key(self, new_key_name, src_bucket_name,
src_key_name, metadata=NOT_IMPL, src_version_id=NOT_IMPL,
- storage_class=NOT_IMPL, preserve_acl=NOT_IMPL):
+ storage_class=NOT_IMPL, preserve_acl=NOT_IMPL,
+ encrypt_key=NOT_IMPL, headers=NOT_IMPL, query_args=NOT_IMPL):
new_key = self.new_key(key_name=new_key_name)
src_key = mock_connection.get_bucket(
src_bucket_name).get_key(src_key_name)
@@ -188,17 +246,29 @@
return None
return self.keys[key_name]
- def list(self, prefix='', delimiter=NOT_IMPL, marker=NOT_IMPL,
+ def list(self, prefix='', delimiter='', marker=NOT_IMPL,
headers=NOT_IMPL):
+ prefix = prefix or '' # Turn None into '' for prefix match.
# Return list instead of using a generator so we don't get
# 'dictionary changed size during iteration' error when performing
# deletions while iterating (e.g., during test cleanup).
result = []
+ key_name_set = set()
for k in self.keys.itervalues():
- if not prefix:
- result.append(k)
- elif k.name.startswith(prefix):
- result.append(k)
+ if k.name.startswith(prefix):
+ k_name_past_prefix = k.name[len(prefix):]
+ if delimiter:
+ pos = k_name_past_prefix.find(delimiter)
+ else:
+ pos = -1
+ if (pos != -1):
+ key_or_prefix = Prefix(
+ bucket=self, name=k.name[:len(prefix)+pos+1])
+ else:
+ key_or_prefix = MockKey(bucket=self, name=k.name)
+ if key_or_prefix.name not in key_name_set:
+ key_name_set.add(key_or_prefix.name)
+ result.append(key_or_prefix)
return result
def set_acl(self, acl_or_str, key_name='', headers=NOT_IMPL,
@@ -207,10 +277,10 @@
# the get_acl call will just return that string name.
if key_name:
# Set ACL for the key.
- self.acls[key_name] = acl_or_str
+ self.acls[key_name] = MockAcl(acl_or_str)
else:
# Set ACL for the bucket.
- self.acls[self.name] = acl_or_str
+ self.acls[self.name] = MockAcl(acl_or_str)
def set_def_acl(self, acl_or_str, key_name=NOT_IMPL, headers=NOT_IMPL,
version_id=NOT_IMPL):
@@ -270,8 +340,10 @@
class MockBucketStorageUri(object):
+ delim = '/'
+
def __init__(self, scheme, bucket_name=None, object_name=None,
- debug=NOT_IMPL):
+ debug=NOT_IMPL, suppress_consec_slashes=NOT_IMPL):
self.scheme = scheme
self.bucket_name = bucket_name
self.object_name = object_name
@@ -314,9 +386,8 @@
version_id=NOT_IMPL):
self.get_bucket().disable_logging()
- def enable_logging(self, target_bucket, target_prefix, canned_acl=NOT_IMPL,
- validate=NOT_IMPL, headers=NOT_IMPL,
- version_id=NOT_IMPL):
+ def enable_logging(self, target_bucket, target_prefix, validate=NOT_IMPL,
+ headers=NOT_IMPL, version_id=NOT_IMPL):
self.get_bucket().enable_logging(target_bucket)
def equals(self, uri):
@@ -325,7 +396,7 @@
def get_acl(self, validate=NOT_IMPL, headers=NOT_IMPL, version_id=NOT_IMPL):
return self.get_bucket().get_acl(self.object_name)
- def get_def_acl(self, validate=NOT_IMPL, headers=NOT_IMPL,
+ def get_def_acl(self, validate=NOT_IMPL, headers=NOT_IMPL,
version_id=NOT_IMPL):
return self.get_bucket().get_def_acl(self.object_name)
@@ -339,6 +410,9 @@
def get_all_keys(self, validate=NOT_IMPL, headers=NOT_IMPL):
return self.get_bucket().get_all_keys(self)
+ def list_bucket(self, prefix='', delimiter='', headers=NOT_IMPL):
+ return self.get_bucket().list(prefix=prefix, delimiter=delimiter)
+
def get_bucket(self, validate=NOT_IMPL, headers=NOT_IMPL):
return self.connect().get_bucket(self.bucket_name)
@@ -353,10 +427,28 @@
return True
def names_container(self):
- return not self.object_name
+ return bool(not self.object_name)
def names_singleton(self):
- return self.object_name
+ return bool(self.object_name)
+
+ def names_directory(self):
+ return False
+
+ def names_provider(self):
+ return bool(not self.bucket_name)
+
+ def names_bucket(self):
+ return self.names_container()
+
+ def names_file(self):
+ return False
+
+ def names_object(self):
+ return not self.names_container()
+
+ def is_stream(self):
+ return False
def new_key(self, validate=NOT_IMPL, headers=NOT_IMPL):
bucket = self.get_bucket()
@@ -373,3 +465,43 @@
def set_subresource(self, subresource, value, validate=NOT_IMPL,
headers=NOT_IMPL, version_id=NOT_IMPL):
self.get_bucket().set_subresource(subresource, value, self.object_name)
+
+ def copy_key(self, src_bucket_name, src_key_name, metadata=NOT_IMPL,
+ src_version_id=NOT_IMPL, storage_class=NOT_IMPL,
+ preserve_acl=NOT_IMPL, encrypt_key=NOT_IMPL, headers=NOT_IMPL,
+ query_args=NOT_IMPL):
+ dst_bucket = self.get_bucket()
+ return dst_bucket.copy_key(new_key_name=self.object_name,
+ src_bucket_name=src_bucket_name,
+ src_key_name=src_key_name)
+
+ def set_contents_from_string(self, s, headers=NOT_IMPL, replace=NOT_IMPL,
+ cb=NOT_IMPL, num_cb=NOT_IMPL, policy=NOT_IMPL,
+ md5=NOT_IMPL, reduced_redundancy=NOT_IMPL):
+ key = self.new_key()
+ key.set_contents_from_string(s)
+
+ def set_contents_from_file(self, fp, headers=None, replace=NOT_IMPL,
+ cb=NOT_IMPL, num_cb=NOT_IMPL, policy=NOT_IMPL,
+ md5=NOT_IMPL, size=NOT_IMPL, rewind=NOT_IMPL,
+ res_upload_handler=NOT_IMPL):
+ key = self.new_key()
+ return key.set_contents_from_file(fp, headers=headers)
+
+ def set_contents_from_stream(self, fp, headers=NOT_IMPL, replace=NOT_IMPL,
+ cb=NOT_IMPL, num_cb=NOT_IMPL, policy=NOT_IMPL,
+ reduced_redundancy=NOT_IMPL,
+ query_args=NOT_IMPL, size=NOT_IMPL):
+ dst_key.set_contents_from_stream(fp)
+
+ def get_contents_to_file(self, fp, headers=NOT_IMPL, cb=NOT_IMPL,
+ num_cb=NOT_IMPL, torrent=NOT_IMPL,
+ version_id=NOT_IMPL, res_download_handler=NOT_IMPL,
+ response_headers=NOT_IMPL):
+ key = self.get_key()
+ key.get_contents_to_file(fp)
+
+ def get_contents_to_stream(self, fp, headers=NOT_IMPL, cb=NOT_IMPL,
+ num_cb=NOT_IMPL, version_id=NOT_IMPL):
+ key = self.get_key()
+ return key.get_contents_to_file(fp)
diff --git a/tests/s3/other_cacerts.txt b/tests/integration/s3/other_cacerts.txt
similarity index 100%
rename from tests/s3/other_cacerts.txt
rename to tests/integration/s3/other_cacerts.txt
diff --git a/tests/integration/s3/test_bucket.py b/tests/integration/s3/test_bucket.py
new file mode 100644
index 0000000..2611be0
--- /dev/null
+++ b/tests/integration/s3/test_bucket.py
@@ -0,0 +1,150 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Some unit tests for the S3 Bucket
+"""
+
+import unittest
+import time
+
+from boto.exception import S3ResponseError
+from boto.s3.connection import S3Connection
+from boto.s3.bucketlogging import BucketLogging
+from boto.s3.acl import Grant
+from boto.s3.tagging import Tags, TagSet
+
+
+class S3BucketTest (unittest.TestCase):
+ s3 = True
+
+ def setUp(self):
+ self.conn = S3Connection()
+ self.bucket_name = 'bucket-%d' % int(time.time())
+ self.bucket = self.conn.create_bucket(self.bucket_name)
+
+ def tearDown(self):
+ for key in self.bucket:
+ key.delete()
+ self.bucket.delete()
+
+ def test_next_marker(self):
+ expected = ["a/", "b", "c"]
+ for key_name in expected:
+ key = self.bucket.new_key(key_name)
+ key.set_contents_from_string(key_name)
+
+ # Normal list of first 2 keys will have
+ # no NextMarker set, so we use last key to iterate
+ # last element will be "b" so no issue.
+ rs = self.bucket.get_all_keys(max_keys=2)
+ for element in rs:
+ pass
+ self.assertEqual(element.name, "b")
+ self.assertEqual(rs.next_marker, None)
+
+ # list using delimiter of first 2 keys will have
+ # a NextMarker set (when truncated). As prefixes
+ # are grouped together at the end, we get "a/" as
+ # last element, but luckily we have next_marker.
+ rs = self.bucket.get_all_keys(max_keys=2, delimiter="/")
+ for element in rs:
+ pass
+ self.assertEqual(element.name, "a/")
+ self.assertEqual(rs.next_marker, "b")
+
+ # ensure bucket.list() still works by just
+ # popping elements off the front of expected.
+ rs = self.bucket.list()
+ for element in rs:
+ self.assertEqual(element.name, expected.pop(0))
+ self.assertEqual(expected, [])
+
+ def test_logging(self):
+ # use self.bucket as the target bucket so that teardown
+ # will delete any log files that make it into the bucket
+ # automatically and all we have to do is delete the
+ # source bucket.
+ sb_name = "src-" + self.bucket_name
+ sb = self.conn.create_bucket(sb_name)
+ # grant log write perms to target bucket using canned-acl
+ self.bucket.set_acl("log-delivery-write")
+ target_bucket = self.bucket_name
+ target_prefix = u"jp/ãƒã‚°/"
+ # Check existing status is disabled
+ bls = sb.get_logging_status()
+ self.assertEqual(bls.target, None)
+ # Create a logging status and grant auth users READ PERM
+ authuri = "http://acs.amazonaws.com/groups/global/AuthenticatedUsers"
+ authr = Grant(permission="READ", type="Group", uri=authuri)
+ sb.enable_logging(target_bucket, target_prefix=target_prefix, grants=[authr])
+ # Check the status and confirm its set.
+ bls = sb.get_logging_status()
+ self.assertEqual(bls.target, target_bucket)
+ self.assertEqual(bls.prefix, target_prefix)
+ self.assertEqual(len(bls.grants), 1)
+ self.assertEqual(bls.grants[0].type, "Group")
+ self.assertEqual(bls.grants[0].uri, authuri)
+ # finally delete the src bucket
+ sb.delete()
+
+ def test_tagging(self):
+ tagging = """
+ <Tagging>
+ <TagSet>
+ <Tag>
+ <Key>tagkey</Key>
+ <Value>tagvalue</Value>
+ </Tag>
+ </TagSet>
+ </Tagging>
+ """
+ self.bucket.set_xml_tags(tagging)
+ response = self.bucket.get_tags()
+ self.assertEqual(response[0][0].key, 'tagkey')
+ self.assertEqual(response[0][0].value, 'tagvalue')
+ self.bucket.delete_tags()
+ try:
+ self.bucket.get_tags()
+ except S3ResponseError, e:
+ self.assertEqual(e.code, 'NoSuchTagSet')
+ except Exception, e:
+ self.fail("Wrong exception raised (expected S3ResponseError): %s"
+ % e)
+ else:
+ self.fail("Expected S3ResponseError, but no exception raised.")
+
+ def test_tagging_from_objects(self):
+ """Create tags from python objects rather than raw xml."""
+ t = Tags()
+ tag_set = TagSet()
+ tag_set.add_tag('akey', 'avalue')
+ tag_set.add_tag('anotherkey', 'anothervalue')
+ t.add_tag_set(tag_set)
+ self.bucket.set_tags(t)
+ response = self.bucket.get_tags()
+ self.assertEqual(response[0][0].key, 'akey')
+ self.assertEqual(response[0][0].value, 'avalue')
+ self.assertEqual(response[0][1].key, 'anotherkey')
+ self.assertEqual(response[0][1].value, 'anothervalue')
diff --git a/tests/integration/s3/test_cert_verification.py b/tests/integration/s3/test_cert_verification.py
new file mode 100644
index 0000000..27116de
--- /dev/null
+++ b/tests/integration/s3/test_cert_verification.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Check that all of the certs on SQS endpoints validate.
+"""
+
+import unittest
+import boto.s3
+
+
+class CertVerificationTest(unittest.TestCase):
+
+ s3 = True
+ ssl = True
+
+ def test_certs(self):
+ for region in boto.s3.regions():
+ c = region.connect()
+ c.get_all_buckets()
diff --git a/tests/s3/test_connection.py b/tests/integration/s3/test_connection.py
similarity index 73%
rename from tests/s3/test_connection.py
rename to tests/integration/s3/test_connection.py
index 4c209bd..b673303 100644
--- a/tests/s3/test_connection.py
+++ b/tests/integration/s3/test_connection.py
@@ -28,10 +28,15 @@
import time
import os
import urllib
+import urlparse
+import httplib
from boto.s3.connection import S3Connection
-from boto.exception import S3PermissionsError
+from boto.s3.bucket import Bucket
+from boto.exception import S3PermissionsError, S3ResponseError
+
class S3ConnectionTest (unittest.TestCase):
+ s3 = True
def test_1_basic(self):
print '--- running S3Connection tests ---'
@@ -47,8 +52,7 @@
bucket.enable_logging(target_bucket=logging_bucket, target_prefix=bucket.name)
bucket.disable_logging()
c.delete_bucket(logging_bucket)
- k = bucket.new_key()
- k.name = 'foobar'
+ k = bucket.new_key('foobar')
s1 = 'This is a test of file upload and download'
s2 = 'This is a second string to test file upload and download'
k.set_contents_from_string(s1)
@@ -70,6 +74,23 @@
url = k.generate_url(3600, force_http=True, headers={'x-amz-x-token' : 'XYZ'})
file = urllib.urlopen(url)
assert s1 == file.read(), 'invalid URL %s' % url
+ rh = {'response-content-disposition': 'attachment; filename="foo.txt"'}
+ url = k.generate_url(60, response_headers=rh)
+ file = urllib.urlopen(url)
+ assert s1 == file.read(), 'invalid URL %s' % url
+ #test whether amperands and to-be-escaped characters work in header filename
+ rh = {'response-content-disposition': 'attachment; filename="foo&z%20ar&ar&zar&bar.txt"'}
+ url = k.generate_url(60, response_headers=rh, force_http=True)
+ file = urllib.urlopen(url)
+ assert s1 == file.read(), 'invalid URL %s' % url
+ # overwrite foobar contents with a PUT
+ url = k.generate_url(3600, 'PUT', force_http=True, policy='private', reduced_redundancy=True)
+ up = urlparse.urlsplit(url)
+ con = httplib.HTTPConnection(up.hostname, up.port)
+ con.request("PUT", up.path + '?' + up.query, body="hello there")
+ resp = con.getresponse()
+ assert 200 == resp.status
+ assert "hello there" == k.get_contents_as_string()
bucket.delete_key(k)
# test a few variations on get_all_keys - first load some data
# for the first one, let's override the content type
@@ -107,8 +128,7 @@
k = bucket.lookup('notthere')
assert k == None
# try some metadata stuff
- k = bucket.new_key()
- k.name = 'has_metadata'
+ k = bucket.new_key('has_metadata')
mdkey1 = 'meta1'
mdval1 = 'This is the first metadata value'
k.set_metadata(mdkey1, mdval1)
@@ -124,8 +144,7 @@
assert k.get_metadata(mdkey1) == mdval1
assert k.get_metadata(mdkey2) == mdval2
assert k.get_metadata(mdkey3) == mdval3
- k = bucket.new_key()
- k.name = 'has_metadata'
+ k = bucket.new_key('has_metadata')
k.get_contents_as_string()
assert k.get_metadata(mdkey1) == mdval1
assert k.get_metadata(mdkey2) == mdval2
@@ -140,8 +159,7 @@
num_keys = len(rs)
assert num_iter == num_keys
# try a key with a funny character
- k = bucket.new_key()
- k.name = 'testnewline\n'
+ k = bucket.new_key('testnewline\n')
k.set_contents_from_string('This is a test')
rs = bucket.get_all_keys()
assert len(rs) == num_keys + 1
@@ -187,3 +205,41 @@
time.sleep(5)
c.delete_bucket(bucket)
print '--- tests completed ---'
+
+ def test_basic_anon(self):
+ auth_con = S3Connection()
+ # create a new, empty bucket
+ bucket_name = 'test-%d' % int(time.time())
+ auth_bucket = auth_con.create_bucket(bucket_name)
+
+ # try read the bucket anonymously
+ anon_con = S3Connection(anon=True)
+ anon_bucket = Bucket(anon_con, bucket_name)
+ try:
+ iter(anon_bucket.list()).next()
+ self.fail("anon bucket list should fail")
+ except S3ResponseError:
+ pass
+
+ # give bucket anon user access and anon read again
+ auth_bucket.set_acl('public-read')
+ try:
+ iter(anon_bucket.list()).next()
+ self.fail("not expecting contents")
+ except S3ResponseError, e:
+ self.fail("We should have public-read access, but received "
+ "an error: %s" % e)
+ except StopIteration:
+ pass
+
+ # cleanup
+ auth_con.delete_bucket(auth_bucket)
+
+ def test_error_code_populated(self):
+ c = S3Connection()
+ try:
+ c.create_bucket('bad$bucket$name')
+ except S3ResponseError, e:
+ self.assertEqual(e.error_code, 'InvalidBucketName')
+ else:
+ self.fail("S3ResponseError not raised.")
diff --git a/tests/integration/s3/test_cors.py b/tests/integration/s3/test_cors.py
new file mode 100644
index 0000000..84b12f0
--- /dev/null
+++ b/tests/integration/s3/test_cors.py
@@ -0,0 +1,78 @@
+# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Some integration tests for S3 CORS
+"""
+
+import unittest
+import time
+
+from boto.s3.connection import S3Connection
+from boto.exception import S3ResponseError
+from boto.s3.cors import CORSConfiguration
+
+
+class S3CORSTest (unittest.TestCase):
+ s3 = True
+
+ def setUp(self):
+ self.conn = S3Connection()
+ self.bucket_name = 'cors-%d' % int(time.time())
+ self.bucket = self.conn.create_bucket(self.bucket_name)
+
+ def tearDown(self):
+ self.bucket.delete()
+
+ def test_cors(self):
+ self.cfg = CORSConfiguration()
+ self.cfg.add_rule(['PUT', 'POST', 'DELETE'],
+ 'http://www.example.com',
+ allowed_header='*', max_age_seconds=3000,
+ expose_header='x-amz-server-side-encryption',
+ id='foobar_rule')
+ assert self.bucket.set_cors(self.cfg)
+ time.sleep(5)
+ cfg = self.bucket.get_cors()
+ for i, rule in enumerate(cfg):
+ self.assertEqual(rule.id, self.cfg[i].id)
+ self.assertEqual(rule.max_age_seconds, self.cfg[i].max_age_seconds)
+ methods = zip(rule.allowed_method, self.cfg[i].allowed_method)
+ for v1, v2 in methods:
+ self.assertEqual(v1, v2)
+ origins = zip(rule.allowed_origin, self.cfg[i].allowed_origin)
+ for v1, v2 in origins:
+ self.assertEqual(v1, v2)
+ headers = zip(rule.allowed_header, self.cfg[i].allowed_header)
+ for v1, v2 in headers:
+ self.assertEqual(v1, v2)
+ headers = zip(rule.expose_header, self.cfg[i].expose_header)
+ for v1, v2 in headers:
+ self.assertEqual(v1, v2)
+ self.bucket.delete_cors()
+ time.sleep(5)
+ try:
+ self.bucket.get_cors()
+ self.fail('CORS configuration should not be there')
+ except S3ResponseError:
+ pass
diff --git a/tests/s3/test_encryption.py b/tests/integration/s3/test_encryption.py
similarity index 99%
rename from tests/s3/test_encryption.py
rename to tests/integration/s3/test_encryption.py
index 91ef71c..c5b1bc6 100644
--- a/tests/s3/test_encryption.py
+++ b/tests/integration/s3/test_encryption.py
@@ -51,6 +51,7 @@
}"""
class S3EncryptionTest (unittest.TestCase):
+ s3 = True
def test_1_versions(self):
print '--- running S3Encryption tests ---'
diff --git a/tests/s3/test_gsconnection.py b/tests/integration/s3/test_gsconnection.py
similarity index 69%
rename from tests/s3/test_gsconnection.py
rename to tests/integration/s3/test_gsconnection.py
index c8c6d58..250cea9 100644
--- a/tests/s3/test_gsconnection.py
+++ b/tests/integration/s3/test_gsconnection.py
@@ -2,6 +2,7 @@
# Copyright (c) 2006-2011 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# Copyright (c) 2011, Nexenta Systems, Inc.
+# Copyright (c) 2012, Google, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
@@ -27,13 +28,40 @@
Some unit tests for the GSConnection
"""
-import unittest
+import boto
import time
import os
+import re
+import xml
from boto.gs.connection import GSConnection
+from boto.gs.cors import Cors
+from boto import handler
from boto import storage_uri
+from boto.provider import Provider
+from tests.unit import unittest
-class GSConnectionTest (unittest.TestCase):
+
+_HAS_GOOGLE_CREDENTIALS = None
+
+
+def has_google_credentials():
+ global _HAS_GOOGLE_CREDENTIALS
+ if _HAS_GOOGLE_CREDENTIALS is None:
+ provider = Provider('google')
+ if provider.access_key is None or provider.secret_key is None:
+ _HAS_GOOGLE_CREDENTIALS = False
+ else:
+ _HAS_GOOGLE_CREDENTIALS = True
+ return _HAS_GOOGLE_CREDENTIALS
+
+
+
+@unittest.skipUnless(has_google_credentials(),
+ "Google credentials are required to run the Google "
+ "Cloud Storage tests. Update your boto.cfg to run "
+ "these tests.")
+class GSConnectionTest(unittest.TestCase):
+ gs = True
def test_1_basic(self):
"""basic regression test for Google Cloud Storage"""
@@ -44,16 +72,16 @@
bucket = c.create_bucket(bucket_name)
# now try a get_bucket call and see if it's really there
bucket = c.get_bucket(bucket_name)
- k = bucket.new_key()
- k.name = 'foobar'
+ key_name = 'foobar'
+ k = bucket.new_key(key_name)
s1 = 'This is a test of file upload and download'
s2 = 'This is a second string to test file upload and download'
k.set_contents_from_string(s1)
- fp = open('foobar', 'wb')
+ fp = open(key_name, 'wb')
# now get the contents from s3 to a local file
k.get_contents_to_file(fp)
fp.close()
- fp = open('foobar')
+ fp = open(key_name)
# check to make sure content read from s3 is identical to original
assert s1 == fp.read(), 'corrupted file'
fp.close()
@@ -86,7 +114,7 @@
fp = open('foobar1', 'wb')
k.get_contents_to_file(fp)
fp.close()
- fp2.seek(0,0)
+ fp2.seek(0, 0)
fp = open('foobar1', 'rb')
assert (fp2.read() == fp.read()), 'Chunked Transfer corrupted the Data'
fp.close()
@@ -108,8 +136,8 @@
k = bucket.lookup('notthere')
assert k == None
# try some metadata stuff
- k = bucket.new_key()
- k.name = 'has_metadata'
+ key_name = 'has_metadata'
+ k = bucket.new_key(key_name)
mdkey1 = 'meta1'
mdval1 = 'This is the first metadata value'
k.set_metadata(mdkey1, mdval1)
@@ -123,12 +151,11 @@
k.set_metadata(mdkey3, mdval3)
k.set_contents_from_string(s1)
- k = bucket.lookup('has_metadata')
+ k = bucket.lookup(key_name)
assert k.get_metadata(mdkey1) == mdval1
assert k.get_metadata(mdkey2) == mdval2
assert k.get_metadata(mdkey3) == mdval3
- k = bucket.new_key()
- k.name = 'has_metadata'
+ k = bucket.new_key(key_name)
k.get_contents_as_string()
assert k.get_metadata(mdkey1) == mdval1
assert k.get_metadata(mdkey2) == mdval2
@@ -156,21 +183,31 @@
k.set_acl('private')
acl = k.get_acl()
assert len(acl.entries.entry_list) == 1
+ #
+ # Test case-insensitivity of XML ACL parsing.
+ acl_xml = (
+ '<ACCESSControlList><EntrIes><Entry>' +
+ '<Scope type="AllUsers"></Scope><Permission>READ</Permission>' +
+ '</Entry></EntrIes></ACCESSControlList>')
+ acl = boto.gs.acl.ACL()
+ h = handler.XmlHandler(acl, bucket)
+ xml.sax.parseString(acl_xml, h)
+ bucket.set_acl(acl)
+ assert len(acl.entries.entry_list) == 1
+ #
# try set/get raw logging subresource
empty_logging_str="<?xml version='1.0' encoding='UTF-8'?><Logging/>"
logging_str = (
"<?xml version='1.0' encoding='UTF-8'?><Logging>"
"<LogBucket>log-bucket</LogBucket>" +
"<LogObjectPrefix>example</LogObjectPrefix>" +
- "<PredefinedAcl>bucket-owner-full-control</PredefinedAcl>" +
"</Logging>")
bucket.set_subresource('logging', logging_str);
assert bucket.get_subresource('logging') == logging_str;
# try disable/enable logging
bucket.disable_logging()
assert bucket.get_subresource('logging') == empty_logging_str
- bucket.enable_logging('log-bucket', 'example',
- canned_acl='bucket-owner-full-control')
+ bucket.enable_logging('log-bucket', 'example')
assert bucket.get_subresource('logging') == logging_str;
# now delete all keys in bucket
for k in bucket:
@@ -187,18 +224,18 @@
bucket_name_2 = 'test2-%d' % int(time.time())
bucket1 = c.create_bucket(bucket_name_1)
bucket2 = c.create_bucket(bucket_name_2)
- # verify buckets got created
+ # verify buckets got created
bucket1 = c.get_bucket(bucket_name_1)
bucket2 = c.get_bucket(bucket_name_2)
# create a key in bucket1 and give it some content
- k1 = bucket1.new_key()
- assert isinstance(k1, bucket1.key_class)
key_name = 'foobar'
+ k1 = bucket1.new_key(key_name)
+ assert isinstance(k1, bucket1.key_class)
k1.name = key_name
s = 'This is a test.'
k1.set_contents_from_string(s)
# copy the new key from bucket1 to bucket2
- k1.copy(bucket_name_2, key_name)
+ k1.copy(bucket_name_2, key_name)
# now copy the contents from bucket2 to a local file
k2 = bucket2.lookup(key_name)
assert isinstance(k2, bucket2.key_class)
@@ -215,18 +252,29 @@
# delete test buckets
c.delete_bucket(bucket1)
c.delete_bucket(bucket2)
+ # delete temp file
+ os.unlink('foobar')
def test_3_default_object_acls(self):
"""test default object acls"""
+ # regexp for matching project-private default object ACL
+ project_private_re = '\s*<AccessControlList>\s*<Entries>\s*<Entry>' \
+ '\s*<Scope type="GroupById"><ID>[0-9a-fA-F]+</ID></Scope>' \
+ '\s*<Permission>FULL_CONTROL</Permission>\s*</Entry>\s*<Entry>' \
+ '\s*<Scope type="GroupById"><ID>[0-9a-fA-F]+</ID></Scope>' \
+ '\s*<Permission>FULL_CONTROL</Permission>\s*</Entry>\s*<Entry>' \
+ '\s*<Scope type="GroupById"><ID>[0-9a-fA-F]+</ID></Scope>' \
+ '\s*<Permission>READ</Permission></Entry>\s*</Entries>' \
+ '\s*</AccessControlList>\s*'
c = GSConnection()
# create a new bucket
bucket_name = 'test-%d' % int(time.time())
bucket = c.create_bucket(bucket_name)
# now call get_bucket to see if it's really there
bucket = c.get_bucket(bucket_name)
- # get default acl and make sure it's empty
+ # get default acl and make sure it's project-private
acl = bucket.get_def_acl()
- assert acl.to_xml() == '<AccessControlList></AccessControlList>'
+ assert re.search(project_private_re, acl.to_xml())
# set default acl to a canned acl and verify it gets set
bucket.set_def_acl('public-read')
acl = bucket.get_def_acl()
@@ -256,9 +304,9 @@
bucket_name = 'test-%d' % int(time.time())
uri = storage_uri('gs://' + bucket_name)
uri.create_bucket()
- # get default acl and make sure it's empty
+ # get default acl and make sure it's project-private
acl = uri.get_def_acl()
- assert acl.to_xml() == '<AccessControlList></AccessControlList>'
+ assert re.search(project_private_re, acl.to_xml())
# set default acl to a canned acl and verify it gets set
uri.set_def_acl('public-read')
acl = uri.get_def_acl()
@@ -283,5 +331,50 @@
assert acl.to_xml() == '<AccessControlList></AccessControlList>'
# delete bucket
uri.delete_bucket()
-
+
+ def test_4_cors_xml(self):
+ """test setting and getting of CORS XML documents"""
+ # regexp for matching project-private default object ACL
+ cors_empty = '<CorsConfig></CorsConfig>'
+ cors_doc = ('<CorsConfig><Cors><Origins><Origin>origin1.example.com'
+ '</Origin><Origin>origin2.example.com</Origin></Origins>'
+ '<Methods><Method>GET</Method><Method>PUT</Method>'
+ '<Method>POST</Method></Methods><ResponseHeaders>'
+ '<ResponseHeader>foo</ResponseHeader>'
+ '<ResponseHeader>bar</ResponseHeader></ResponseHeaders>'
+ '</Cors></CorsConfig>')
+ c = GSConnection()
+ # create a new bucket
+ bucket_name = 'test-%d' % int(time.time())
+ bucket = c.create_bucket(bucket_name)
+ # now call get_bucket to see if it's really there
+ bucket = c.get_bucket(bucket_name)
+ # get new bucket cors and make sure it's empty
+ cors = re.sub(r'\s', '', bucket.get_cors().to_xml())
+ assert cors == cors_empty
+ # set cors document on new bucket
+ bucket.set_cors(cors_doc)
+ cors = re.sub(r'\s', '', bucket.get_cors().to_xml())
+ assert cors == cors_doc
+ # delete bucket
+ c.delete_bucket(bucket)
+
+ # repeat cors tests using boto's storage_uri interface
+ # create a new bucket
+ bucket_name = 'test-%d' % int(time.time())
+ uri = storage_uri('gs://' + bucket_name)
+ uri.create_bucket()
+ # get new bucket cors and make sure it's empty
+ cors = re.sub(r'\s', '', uri.get_cors().to_xml())
+ assert cors == cors_empty
+ # set cors document on new bucket
+ cors_obj = Cors()
+ h = handler.XmlHandler(cors_obj, None)
+ xml.sax.parseString(cors_doc, h)
+ uri.set_cors(cors_obj)
+ cors = re.sub(r'\s', '', uri.get_cors().to_xml())
+ assert cors == cors_doc
+ # delete bucket
+ uri.delete_bucket()
+
print '--- tests completed ---'
diff --git a/tests/s3/test_https_cert_validation.py b/tests/integration/s3/test_https_cert_validation.py
similarity index 97%
rename from tests/s3/test_https_cert_validation.py
rename to tests/integration/s3/test_https_cert_validation.py
index c8babb5..9222a4a 100644
--- a/tests/s3/test_https_cert_validation.py
+++ b/tests/integration/s3/test_https_cert_validation.py
@@ -39,11 +39,14 @@
import ssl
import unittest
+from nose.plugins.attrib import attr
+
import boto
from boto import exception, https_connection
from boto.gs.connection import GSConnection
from boto.s3.connection import S3Connection
+
# File 'other_cacerts.txt' contains a valid CA certificate of a CA that is used
# by neither S3 nor Google Cloud Storage. Validation against this CA cert should
# result in a certificate error.
@@ -59,8 +62,9 @@
# the server should return a certificate with CN 'www.<somedomain>.com').
INVALID_HOSTNAME_HOST = os.environ.get('INVALID_HOSTNAME_HOST', 'www')
-class CertValidationTest (unittest.TestCase):
+@attr('notdefault', 'ssl')
+class CertValidationTest(unittest.TestCase):
def setUp(self):
# Clear config
for section in boto.config.sections():
diff --git a/tests/integration/s3/test_key.py b/tests/integration/s3/test_key.py
new file mode 100644
index 0000000..6aecb22
--- /dev/null
+++ b/tests/integration/s3/test_key.py
@@ -0,0 +1,352 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Some unit tests for S3 Key
+"""
+
+import unittest
+import time
+import StringIO
+from boto.s3.connection import S3Connection
+from boto.s3.key import Key
+from boto.exception import S3ResponseError
+
+class S3KeyTest (unittest.TestCase):
+ s3 = True
+
+ def setUp(self):
+ self.conn = S3Connection()
+ self.bucket_name = 'keytest-%d' % int(time.time())
+ self.bucket = self.conn.create_bucket(self.bucket_name)
+
+ def tearDown(self):
+ for key in self.bucket:
+ key.delete()
+ self.bucket.delete()
+
+ def test_set_contents_from_file_dataloss(self):
+ # Create an empty stringio and write to it.
+ content = "abcde"
+ sfp = StringIO.StringIO()
+ sfp.write(content)
+ # Try set_contents_from_file() without rewinding sfp
+ k = self.bucket.new_key("k")
+ try:
+ k.set_contents_from_file(sfp)
+ self.fail("forgot to rewind so should fail.")
+ except AttributeError:
+ pass
+ # call with rewind and check if we wrote 5 bytes
+ k.set_contents_from_file(sfp, rewind=True)
+ self.assertEqual(k.size, 5)
+ # check actual contents by getting it.
+ kn = self.bucket.new_key("k")
+ ks = kn.get_contents_as_string()
+ self.assertEqual(ks, content)
+
+ # finally, try with a 0 length string
+ sfp = StringIO.StringIO()
+ k = self.bucket.new_key("k")
+ k.set_contents_from_file(sfp)
+ self.assertEqual(k.size, 0)
+ # check actual contents by getting it.
+ kn = self.bucket.new_key("k")
+ ks = kn.get_contents_as_string()
+ self.assertEqual(ks, "")
+
+ def test_set_contents_as_file(self):
+ content="01234567890123456789"
+ sfp = StringIO.StringIO(content)
+
+ # fp is set at 0 for just opened (for read) files.
+ # set_contents should write full content to key.
+ k = self.bucket.new_key("k")
+ k.set_contents_from_file(sfp)
+ self.assertEqual(k.size, 20)
+ kn = self.bucket.new_key("k")
+ ks = kn.get_contents_as_string()
+ self.assertEqual(ks, content)
+
+ # set fp to 5 and set contents. this should
+ # set "567890123456789" to the key
+ sfp.seek(5)
+ k = self.bucket.new_key("k")
+ k.set_contents_from_file(sfp)
+ self.assertEqual(k.size, 15)
+ kn = self.bucket.new_key("k")
+ ks = kn.get_contents_as_string()
+ self.assertEqual(ks, content[5:])
+
+ # set fp to 5 and only set 5 bytes. this should
+ # write the value "56789" to the key.
+ sfp.seek(5)
+ k = self.bucket.new_key("k")
+ k.set_contents_from_file(sfp, size=5)
+ self.assertEqual(k.size, 5)
+ self.assertEqual(sfp.tell(), 10)
+ kn = self.bucket.new_key("k")
+ ks = kn.get_contents_as_string()
+ self.assertEqual(ks, content[5:10])
+
+ def test_set_contents_with_md5(self):
+ content="01234567890123456789"
+ sfp = StringIO.StringIO(content)
+
+ # fp is set at 0 for just opened (for read) files.
+ # set_contents should write full content to key.
+ k = self.bucket.new_key("k")
+ good_md5 = k.compute_md5(sfp)
+ k.set_contents_from_file(sfp, md5=good_md5)
+ kn = self.bucket.new_key("k")
+ ks = kn.get_contents_as_string()
+ self.assertEqual(ks, content)
+
+ # set fp to 5 and only set 5 bytes. this should
+ # write the value "56789" to the key.
+ sfp.seek(5)
+ k = self.bucket.new_key("k")
+ good_md5 = k.compute_md5(sfp, size=5)
+ k.set_contents_from_file(sfp, size=5, md5=good_md5)
+ self.assertEqual(sfp.tell(), 10)
+ kn = self.bucket.new_key("k")
+ ks = kn.get_contents_as_string()
+ self.assertEqual(ks, content[5:10])
+
+ # let's try a wrong md5 by just altering it.
+ k = self.bucket.new_key("k")
+ sfp.seek(0)
+ hexdig, base64 = k.compute_md5(sfp)
+ bad_md5 = (hexdig, base64[3:])
+ try:
+ k.set_contents_from_file(sfp, md5=bad_md5)
+ self.fail("should fail with bad md5")
+ except S3ResponseError:
+ pass
+
+ def test_get_contents_with_md5(self):
+ content="01234567890123456789"
+ sfp = StringIO.StringIO(content)
+
+ k = self.bucket.new_key("k")
+ k.set_contents_from_file(sfp)
+ kn = self.bucket.new_key("k")
+ s = kn.get_contents_as_string()
+ self.assertEqual(kn.md5, k.md5)
+ self.assertEqual(s, content)
+
+ def test_file_callback(self):
+ def callback(wrote, total):
+ self.my_cb_cnt += 1
+ self.assertNotEqual(wrote, self.my_cb_last, "called twice with same value")
+ self.my_cb_last = wrote
+
+ # Zero bytes written => 1 call
+ self.my_cb_cnt = 0
+ self.my_cb_last = None
+ k = self.bucket.new_key("k")
+ k.BufferSize = 2
+ sfp = StringIO.StringIO("")
+ k.set_contents_from_file(sfp, cb=callback, num_cb=10)
+ self.assertEqual(self.my_cb_cnt, 1)
+ self.assertEqual(self.my_cb_last, 0)
+ sfp.close()
+
+ # Read back zero bytes => 1 call
+ self.my_cb_cnt = 0
+ self.my_cb_last = None
+ s = k.get_contents_as_string(cb=callback)
+ self.assertEqual(self.my_cb_cnt, 1)
+ self.assertEqual(self.my_cb_last, 0)
+
+ content="01234567890123456789"
+ sfp = StringIO.StringIO(content)
+
+ # expect 2 calls due start/finish
+ self.my_cb_cnt = 0
+ self.my_cb_last = None
+ k = self.bucket.new_key("k")
+ k.set_contents_from_file(sfp, cb=callback, num_cb=10)
+ self.assertEqual(self.my_cb_cnt, 2)
+ self.assertEqual(self.my_cb_last, 20)
+
+ # Read back all bytes => 2 calls
+ self.my_cb_cnt = 0
+ self.my_cb_last = None
+ s = k.get_contents_as_string(cb=callback)
+ self.assertEqual(self.my_cb_cnt, 2)
+ self.assertEqual(self.my_cb_last, 20)
+ self.assertEqual(s, content)
+
+ # rewind sfp and try upload again. -1 should call
+ # for every read/write so that should make 11 when bs=2
+ sfp.seek(0)
+ self.my_cb_cnt = 0
+ self.my_cb_last = None
+ k = self.bucket.new_key("k")
+ k.BufferSize = 2
+ k.set_contents_from_file(sfp, cb=callback, num_cb=-1)
+ self.assertEqual(self.my_cb_cnt, 11)
+ self.assertEqual(self.my_cb_last, 20)
+
+ # Read back all bytes => 11 calls
+ self.my_cb_cnt = 0
+ self.my_cb_last = None
+ s = k.get_contents_as_string(cb=callback, num_cb=-1)
+ self.assertEqual(self.my_cb_cnt, 11)
+ self.assertEqual(self.my_cb_last, 20)
+ self.assertEqual(s, content)
+
+ # no more than 1 times => 2 times
+ # last time always 20 bytes
+ sfp.seek(0)
+ self.my_cb_cnt = 0
+ self.my_cb_last = None
+ k = self.bucket.new_key("k")
+ k.BufferSize = 2
+ k.set_contents_from_file(sfp, cb=callback, num_cb=1)
+ self.assertTrue(self.my_cb_cnt <= 2)
+ self.assertEqual(self.my_cb_last, 20)
+
+ # no more than 1 times => 2 times
+ self.my_cb_cnt = 0
+ self.my_cb_last = None
+ s = k.get_contents_as_string(cb=callback, num_cb=1)
+ self.assertTrue(self.my_cb_cnt <= 2)
+ self.assertEqual(self.my_cb_last, 20)
+ self.assertEqual(s, content)
+
+ # no more than 2 times
+ # last time always 20 bytes
+ sfp.seek(0)
+ self.my_cb_cnt = 0
+ self.my_cb_last = None
+ k = self.bucket.new_key("k")
+ k.BufferSize = 2
+ k.set_contents_from_file(sfp, cb=callback, num_cb=2)
+ self.assertTrue(self.my_cb_cnt <= 2)
+ self.assertEqual(self.my_cb_last, 20)
+
+ # no more than 2 times
+ self.my_cb_cnt = 0
+ self.my_cb_last = None
+ s = k.get_contents_as_string(cb=callback, num_cb=2)
+ self.assertTrue(self.my_cb_cnt <= 2)
+ self.assertEqual(self.my_cb_last, 20)
+ self.assertEqual(s, content)
+
+ # no more than 3 times
+ # last time always 20 bytes
+ sfp.seek(0)
+ self.my_cb_cnt = 0
+ self.my_cb_last = None
+ k = self.bucket.new_key("k")
+ k.BufferSize = 2
+ k.set_contents_from_file(sfp, cb=callback, num_cb=3)
+ self.assertTrue(self.my_cb_cnt <= 3)
+ self.assertEqual(self.my_cb_last, 20)
+
+ # no more than 3 times
+ self.my_cb_cnt = 0
+ self.my_cb_last = None
+ s = k.get_contents_as_string(cb=callback, num_cb=3)
+ self.assertTrue(self.my_cb_cnt <= 3)
+ self.assertEqual(self.my_cb_last, 20)
+ self.assertEqual(s, content)
+
+ # no more than 4 times
+ # last time always 20 bytes
+ sfp.seek(0)
+ self.my_cb_cnt = 0
+ self.my_cb_last = None
+ k = self.bucket.new_key("k")
+ k.BufferSize = 2
+ k.set_contents_from_file(sfp, cb=callback, num_cb=4)
+ self.assertTrue(self.my_cb_cnt <= 4)
+ self.assertEqual(self.my_cb_last, 20)
+
+ # no more than 4 times
+ self.my_cb_cnt = 0
+ self.my_cb_last = None
+ s = k.get_contents_as_string(cb=callback, num_cb=4)
+ self.assertTrue(self.my_cb_cnt <= 4)
+ self.assertEqual(self.my_cb_last, 20)
+ self.assertEqual(s, content)
+
+ # no more than 6 times
+ # last time always 20 bytes
+ sfp.seek(0)
+ self.my_cb_cnt = 0
+ self.my_cb_last = None
+ k = self.bucket.new_key("k")
+ k.BufferSize = 2
+ k.set_contents_from_file(sfp, cb=callback, num_cb=6)
+ self.assertTrue(self.my_cb_cnt <= 6)
+ self.assertEqual(self.my_cb_last, 20)
+
+ # no more than 6 times
+ self.my_cb_cnt = 0
+ self.my_cb_last = None
+ s = k.get_contents_as_string(cb=callback, num_cb=6)
+ self.assertTrue(self.my_cb_cnt <= 6)
+ self.assertEqual(self.my_cb_last, 20)
+ self.assertEqual(s, content)
+
+ # no more than 10 times
+ # last time always 20 bytes
+ sfp.seek(0)
+ self.my_cb_cnt = 0
+ self.my_cb_last = None
+ k = self.bucket.new_key("k")
+ k.BufferSize = 2
+ k.set_contents_from_file(sfp, cb=callback, num_cb=10)
+ self.assertTrue(self.my_cb_cnt <= 10)
+ self.assertEqual(self.my_cb_last, 20)
+
+ # no more than 10 times
+ self.my_cb_cnt = 0
+ self.my_cb_last = None
+ s = k.get_contents_as_string(cb=callback, num_cb=10)
+ self.assertTrue(self.my_cb_cnt <= 10)
+ self.assertEqual(self.my_cb_last, 20)
+ self.assertEqual(s, content)
+
+ # no more than 1000 times
+ # last time always 20 bytes
+ sfp.seek(0)
+ self.my_cb_cnt = 0
+ self.my_cb_last = None
+ k = self.bucket.new_key("k")
+ k.BufferSize = 2
+ k.set_contents_from_file(sfp, cb=callback, num_cb=1000)
+ self.assertTrue(self.my_cb_cnt <= 1000)
+ self.assertEqual(self.my_cb_last, 20)
+
+ # no more than 1000 times
+ self.my_cb_cnt = 0
+ self.my_cb_last = None
+ s = k.get_contents_as_string(cb=callback, num_cb=1000)
+ self.assertTrue(self.my_cb_cnt <= 1000)
+ self.assertEqual(self.my_cb_last, 20)
+ self.assertEqual(s, content)
diff --git a/tests/integration/s3/test_mfa.py b/tests/integration/s3/test_mfa.py
new file mode 100644
index 0000000..1d6d62f
--- /dev/null
+++ b/tests/integration/s3/test_mfa.py
@@ -0,0 +1,95 @@
+# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Some unit tests for S3 MfaDelete with versioning
+"""
+
+import unittest
+import time
+from nose.plugins.attrib import attr
+
+from boto.s3.connection import S3Connection
+from boto.exception import S3ResponseError
+from boto.s3.deletemarker import DeleteMarker
+
+
+@attr('notdefault', 's3mfa')
+class S3MFATest (unittest.TestCase):
+
+ def setUp(self):
+ self.conn = S3Connection()
+ self.bucket_name = 'mfa-%d' % int(time.time())
+ self.bucket = self.conn.create_bucket(self.bucket_name)
+
+ def tearDown(self):
+ for k in self.bucket.list_versions():
+ self.bucket.delete_key(k.name, version_id=k.version_id)
+ self.bucket.delete()
+
+ def test_mfadel(self):
+ # Enable Versioning with MfaDelete
+ mfa_sn = raw_input('MFA S/N: ')
+ mfa_code = raw_input('MFA Code: ')
+ self.bucket.configure_versioning(True, mfa_delete=True, mfa_token=(mfa_sn, mfa_code))
+
+ # Check enabling mfa worked.
+ i = 0
+ for i in range(1, 8):
+ time.sleep(2**i)
+ d = self.bucket.get_versioning_status()
+ if d['Versioning'] == 'Enabled' and d['MfaDelete'] == 'Enabled':
+ break
+ self.assertEqual('Enabled', d['Versioning'])
+ self.assertEqual('Enabled', d['MfaDelete'])
+
+ # Add a key to the bucket
+ k = self.bucket.new_key('foobar')
+ s1 = 'This is v1'
+ k.set_contents_from_string(s1)
+ v1 = k.version_id
+
+ # Now try to delete v1 without the MFA token
+ try:
+ self.bucket.delete_key('foobar', version_id=v1)
+ self.fail("Must fail if not using MFA token")
+ except S3ResponseError:
+ pass
+
+ # Now try delete again with the MFA token
+ mfa_code = raw_input('MFA Code: ')
+ self.bucket.delete_key('foobar', version_id=v1, mfa_token=(mfa_sn, mfa_code))
+
+ # Next suspend versioning and disable MfaDelete on the bucket
+ mfa_code = raw_input('MFA Code: ')
+ self.bucket.configure_versioning(False, mfa_delete=False, mfa_token=(mfa_sn, mfa_code))
+
+ # Lastly, check disabling mfa worked.
+ i = 0
+ for i in range(1, 8):
+ time.sleep(2**i)
+ d = self.bucket.get_versioning_status()
+ if d['Versioning'] == 'Suspended' and d['MfaDelete'] != 'Enabled':
+ break
+ self.assertEqual('Suspended', d['Versioning'])
+ self.assertNotEqual('Enabled', d['MfaDelete'])
diff --git a/tests/integration/s3/test_multidelete.py b/tests/integration/s3/test_multidelete.py
new file mode 100644
index 0000000..b22581b
--- /dev/null
+++ b/tests/integration/s3/test_multidelete.py
@@ -0,0 +1,181 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Some unit tests for the S3 MultiDelete
+"""
+
+import unittest
+import time
+from boto.s3.key import Key
+from boto.s3.deletemarker import DeleteMarker
+from boto.s3.prefix import Prefix
+from boto.s3.connection import S3Connection
+from boto.exception import S3ResponseError
+
+class S3MultiDeleteTest(unittest.TestCase):
+ s3 = True
+
+ def setUp(self):
+ self.conn = S3Connection()
+ self.bucket_name = 'multidelete-%d' % int(time.time())
+ self.bucket = self.conn.create_bucket(self.bucket_name)
+
+ def tearDown(self):
+ for key in self.bucket:
+ key.delete()
+ self.bucket.delete()
+
+ def test_delete_nothing(self):
+ result = self.bucket.delete_keys([])
+ self.assertEqual(len(result.deleted), 0)
+ self.assertEqual(len(result.errors), 0)
+
+ def test_delete_illegal(self):
+ result = self.bucket.delete_keys([{"dict":"notallowed"}])
+ self.assertEqual(len(result.deleted), 0)
+ self.assertEqual(len(result.errors), 1)
+
+ def test_delete_mix(self):
+ result = self.bucket.delete_keys(["king",
+ ("mice", None),
+ Key(name="regular"),
+ Key(),
+ Prefix(name="folder/"),
+ DeleteMarker(name="deleted"),
+ {"bad":"type"}])
+ self.assertEqual(len(result.deleted), 4)
+ self.assertEqual(len(result.errors), 3)
+
+ def test_delete_quietly(self):
+ result = self.bucket.delete_keys(["king"], quiet=True)
+ self.assertEqual(len(result.deleted), 0)
+ self.assertEqual(len(result.errors), 0)
+
+ def test_delete_must_escape(self):
+ result = self.bucket.delete_keys([Key(name=">_<;")])
+ self.assertEqual(len(result.deleted), 1)
+ self.assertEqual(len(result.errors), 0)
+
+ def test_delete_unknown_version(self):
+ no_ver = Key(name="no")
+ no_ver.version_id = "version"
+ result = self.bucket.delete_keys([no_ver])
+ self.assertEqual(len(result.deleted), 0)
+ self.assertEqual(len(result.errors), 1)
+
+ def test_delete_kanji(self):
+ result = self.bucket.delete_keys([u"æ¼¢å—", Key(name=u"日本語")])
+ self.assertEqual(len(result.deleted), 2)
+ self.assertEqual(len(result.errors), 0)
+
+ def test_delete_empty_by_list(self):
+ result = self.bucket.delete_keys(self.bucket.list())
+ self.assertEqual(len(result.deleted), 0)
+ self.assertEqual(len(result.errors), 0)
+
+ def test_delete_kanji_by_list(self):
+ for key_name in [u"æ¼¢å—", u"日本語", u"テスト"]:
+ key = self.bucket.new_key(key_name)
+ key.set_contents_from_string('this is a test')
+ result = self.bucket.delete_keys(self.bucket.list())
+ self.assertEqual(len(result.deleted), 3)
+ self.assertEqual(len(result.errors), 0)
+
+ def test_delete_with_prefixes(self):
+ for key_name in ["a", "a/b", "b"]:
+ key = self.bucket.new_key(key_name)
+ key.set_contents_from_string('this is a test')
+
+ # First delete all "files": "a" and "b"
+ result = self.bucket.delete_keys(self.bucket.list(delimiter="/"))
+ self.assertEqual(len(result.deleted), 2)
+ # Using delimiter will cause 1 common prefix to be listed
+ # which will be skipped as an error.
+ self.assertEqual(len(result.errors), 1)
+ self.assertEqual(result.errors[0].key, "a/")
+
+ # Next delete any remaining objects: "a/b"
+ result = self.bucket.delete_keys(self.bucket.list())
+ self.assertEqual(len(result.deleted), 1)
+ self.assertEqual(len(result.errors), 0)
+ self.assertEqual(result.deleted[0].key, "a/b")
+
+ def test_delete_too_many_versions(self):
+ # configure versioning first
+ self.bucket.configure_versioning(True)
+
+ # Add 1000 initial versions as DMs by deleting them :-)
+ # Adding 1000 objects is painful otherwise...
+ key_names = ['key-%03d' % i for i in range(0, 1000)]
+ result = self.bucket.delete_keys(key_names)
+ self.assertEqual(len(result.deleted), 1000)
+ self.assertEqual(len(result.errors), 0)
+
+ # delete them again to create 1000 more delete markers
+ result = self.bucket.delete_keys(key_names)
+ self.assertEqual(len(result.deleted), 1000)
+ self.assertEqual(len(result.errors), 0)
+
+ # Sometimes takes AWS sometime to settle
+ time.sleep(10)
+
+ # delete all versions to delete 2000 objects.
+ # this tests the 1000 limit.
+ result = self.bucket.delete_keys(self.bucket.list_versions())
+ self.assertEqual(len(result.deleted), 2000)
+ self.assertEqual(len(result.errors), 0)
+
+ def test_1(self):
+ nkeys = 100
+
+ # create a bunch of keynames
+ key_names = ['key-%03d' % i for i in range(0, nkeys)]
+
+ # create the corresponding keys
+ for key_name in key_names:
+ key = self.bucket.new_key(key_name)
+ key.set_contents_from_string('this is a test')
+
+ # now count keys in bucket
+ n = 0
+ for key in self.bucket:
+ n += 1
+
+ self.assertEqual(n, nkeys)
+
+ # now delete them all
+ result = self.bucket.delete_keys(key_names)
+
+ self.assertEqual(len(result.deleted), nkeys)
+ self.assertEqual(len(result.errors), 0)
+
+ time.sleep(5)
+
+ # now count keys in bucket
+ n = 0
+ for key in self.bucket:
+ n += 1
+
+ self.assertEqual(n, 0)
diff --git a/tests/integration/s3/test_multipart.py b/tests/integration/s3/test_multipart.py
new file mode 100644
index 0000000..51d34a5
--- /dev/null
+++ b/tests/integration/s3/test_multipart.py
@@ -0,0 +1,139 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Some unit tests for the S3 MultiPartUpload
+"""
+
+# Note:
+# Multipart uploads require at least one part. If you upload
+# multiple parts then all parts except the last part has to be
+# bigger than 5M. Hence we just use 1 part so we can keep
+# things small and still test logic.
+
+import unittest
+import time
+import StringIO
+from boto.s3.connection import S3Connection
+
+
+class S3MultiPartUploadTest(unittest.TestCase):
+ s3 = True
+
+ def setUp(self):
+ self.conn = S3Connection(is_secure=False)
+ self.bucket_name = 'multipart-%d' % int(time.time())
+ self.bucket = self.conn.create_bucket(self.bucket_name)
+
+ def tearDown(self):
+ for key in self.bucket:
+ key.delete()
+ self.bucket.delete()
+
+ def test_abort(self):
+ key_name = u"テスト"
+ mpu = self.bucket.initiate_multipart_upload(key_name)
+ mpu.cancel_upload()
+
+ def test_complete_ascii(self):
+ key_name = "test"
+ mpu = self.bucket.initiate_multipart_upload(key_name)
+ fp = StringIO.StringIO("small file")
+ mpu.upload_part_from_file(fp, part_num=1)
+ fp.close()
+ cmpu = mpu.complete_upload()
+ self.assertEqual(cmpu.key_name, key_name)
+ self.assertNotEqual(cmpu.etag, None)
+
+ def test_complete_japanese(self):
+ key_name = u"テスト"
+ mpu = self.bucket.initiate_multipart_upload(key_name)
+ fp = StringIO.StringIO("small file")
+ mpu.upload_part_from_file(fp, part_num=1)
+ fp.close()
+ cmpu = mpu.complete_upload()
+ # LOL... just found an Amazon bug when it returns the
+ # key in the completemultipartupload result. AWS returns
+ # ??? instead of the correctly encoded key name. We should
+ # fix this to the comment line below when amazon fixes this
+ # and this test starts failing due to below assertion.
+ self.assertEqual(cmpu.key_name, "???")
+ #self.assertEqual(cmpu.key_name, key_name)
+ self.assertNotEqual(cmpu.etag, None)
+
+ def test_list_japanese(self):
+ key_name = u"テスト"
+ mpu = self.bucket.initiate_multipart_upload(key_name)
+ rs = self.bucket.list_multipart_uploads()
+ # New bucket, so only one upload expected
+ lmpu = iter(rs).next()
+ self.assertEqual(lmpu.id, mpu.id)
+ self.assertEqual(lmpu.key_name, key_name)
+ # Abort using the one returned in the list
+ lmpu.cancel_upload()
+
+ def test_list_multipart_uploads(self):
+ key_name = u"テスト"
+ mpus = []
+ mpus.append(self.bucket.initiate_multipart_upload(key_name))
+ mpus.append(self.bucket.initiate_multipart_upload(key_name))
+ rs = self.bucket.list_multipart_uploads()
+ # uploads (for a key) are returned in time initiated asc order
+ for lmpu in rs:
+ ompu = mpus.pop(0)
+ self.assertEqual(lmpu.key_name, ompu.key_name)
+ self.assertEqual(lmpu.id, ompu.id)
+ self.assertEqual(0, len(mpus))
+
+ def test_four_part_file(self):
+ key_name = "k"
+ contents = "01234567890123456789"
+ sfp = StringIO.StringIO(contents)
+
+ # upload 20 bytes in 4 parts of 5 bytes each
+ mpu = self.bucket.initiate_multipart_upload(key_name)
+ mpu.upload_part_from_file(sfp, part_num=1, size=5)
+ mpu.upload_part_from_file(sfp, part_num=2, size=5)
+ mpu.upload_part_from_file(sfp, part_num=3, size=5)
+ mpu.upload_part_from_file(sfp, part_num=4, size=5)
+ sfp.close()
+
+ etags = {}
+ pn = 0
+ for part in mpu:
+ pn += 1
+ self.assertEqual(5, part.size)
+ etags[pn] = part.etag
+ self.assertEqual(pn, 4)
+ # etags for 01234
+ self.assertEqual(etags[1], etags[3])
+ # etags for 56789
+ self.assertEqual(etags[2], etags[4])
+ # etag 01234 != etag 56789
+ self.assertNotEqual(etags[1], etags[2])
+
+ # parts are too small to compete as each part must
+ # be a min of 5MB so so we'll assume that is enough
+ # testing and abort the upload.
+ mpu.cancel_upload()
diff --git a/tests/s3/test_pool.py b/tests/integration/s3/test_pool.py
similarity index 100%
rename from tests/s3/test_pool.py
rename to tests/integration/s3/test_pool.py
diff --git a/tests/s3/test_resumable_downloads.py b/tests/integration/s3/test_resumable_downloads.py
old mode 100755
new mode 100644
similarity index 65%
rename from tests/s3/test_resumable_downloads.py
rename to tests/integration/s3/test_resumable_downloads.py
index 4e3e6ba..f204c21
--- a/tests/s3/test_resumable_downloads.py
+++ b/tests/integration/s3/test_resumable_downloads.py
@@ -61,6 +61,7 @@
"""
Resumable download test suite.
"""
+ gs = True
def get_suite_description(self):
return 'Resumable download test suite'
@@ -72,116 +73,88 @@
except StorageResponseError, e:
pass
- @classmethod
- def setUp(cls):
- """
- Creates file-like object for detination of each download test.
-
- This method's namingCase is required by the unittest framework.
- """
- cls.dst_fp = open(cls.dst_file_name, 'w')
-
- @classmethod
- def tearDown(cls):
- """
- Deletes any objects or files created by last test run, and closes
- any keys in case they were read incompletely (which would leave
- partial buffers of data for subsequent tests to trip over).
-
- This method's namingCase is required by the unittest framework.
- """
- # Recursively delete dst dir and then re-create it, so in effect we
- # remove all dirs and files under that directory.
- shutil.rmtree(cls.tmp_dir)
- os.mkdir(cls.tmp_dir)
-
- # Close test objects.
- cls.resilient_close(cls.empty_src_key)
- cls.resilient_close(cls.small_src_key)
- cls.resilient_close(cls.larger_src_key)
-
- @classmethod
- def build_test_input_object(cls, obj_name, size, debug):
+ def build_input_object(self, obj_name, size):
buf = []
for i in range(size):
buf.append(str(random.randint(0, 9)))
string_data = ''.join(buf)
- uri = cls.src_bucket_uri.clone_replace_name(obj_name)
+ uri = self.src_bucket_uri.clone_replace_name(obj_name)
key = uri.new_key(validate=False)
key.set_contents_from_file(StringIO.StringIO(string_data))
- # Set debug on key's connection after creating data, so only the test
- # runs will show HTTP output (if called passed debug>0).
- key.bucket.connection.debug = debug
return (string_data, key)
- @classmethod
- def set_up_class(cls, debug):
+ def setUp(self):
"""
- Initializes test suite.
+ Initializes for each test.
"""
-
# Create the test bucket.
hostname = socket.gethostname().split('.')[0]
uri_base_str = 'gs://res-download-test-%s-%s-%s' % (
hostname, os.getpid(), int(time.time()))
- cls.src_bucket_uri = storage_uri('%s-dst' % uri_base_str)
- cls.src_bucket_uri.create_bucket()
+ self.src_bucket_uri = storage_uri('%s-dst' % uri_base_str)
+ self.src_bucket_uri.create_bucket()
# Create test source objects.
- cls.empty_src_key_size = 0
- (cls.empty_src_key_as_string, cls.empty_src_key) = (
- cls.build_test_input_object('empty', cls.empty_src_key_size,
- debug=debug))
- cls.small_src_key_size = 2 * 1024 # 2 KB.
- (cls.small_src_key_as_string, cls.small_src_key) = (
- cls.build_test_input_object('small', cls.small_src_key_size,
- debug=debug))
- cls.larger_src_key_size = 500 * 1024 # 500 KB.
- (cls.larger_src_key_as_string, cls.larger_src_key) = (
- cls.build_test_input_object('larger', cls.larger_src_key_size,
- debug=debug))
+ self.empty_src_key_size = 0
+ (self.empty_src_key_as_string, self.empty_src_key) = (
+ self.build_input_object('empty', self.empty_src_key_size))
+ self.small_src_key_size = 2 * 1024 # 2 KB.
+ (self.small_src_key_as_string, self.small_src_key) = (
+ self.build_input_object('small', self.small_src_key_size))
+ self.larger_src_key_size = 500 * 1024 # 500 KB.
+ (self.larger_src_key_as_string, self.larger_src_key) = (
+ self.build_input_object('larger', self.larger_src_key_size))
# Use a designated tmpdir prefix to make it easy to find the end of
# the tmp path.
- cls.tmpdir_prefix = 'tmp_resumable_download_test'
+ self.tmpdir_prefix = 'tmp_resumable_download_test'
# Create temp dir and name for download file.
- cls.tmp_dir = tempfile.mkdtemp(prefix=cls.tmpdir_prefix)
- cls.dst_file_name = '%s%sdst_file' % (cls.tmp_dir, os.sep)
+ self.tmp_dir = tempfile.mkdtemp(prefix=self.tmpdir_prefix)
+ self.dst_file_name = '%s%sdst_file' % (self.tmp_dir, os.sep)
- cls.tracker_file_name = '%s%stracker' % (cls.tmp_dir, os.sep)
+ self.tracker_file_name = '%s%stracker' % (self.tmp_dir, os.sep)
- cls.created_test_data = True
+ # Create file-like object for detination of each download test.
+ self.dst_fp = open(self.dst_file_name, 'w')
+ self.created_test_data = True
- @classmethod
- def tear_down_class(cls):
+ def tearDown(self):
"""
- Deletes test objects and bucket and tmp dir created by set_up_class.
+ Deletes test objects and bucket and tmp dir created by set_up_class,
+ and closes any keys in case they were read incompletely (which would
+ leave partial buffers of data for subsequent tests to trip over).
"""
- if not hasattr(cls, 'created_test_data'):
+ if not hasattr(self, 'created_test_data'):
return
- # Call cls.tearDown() in case the tests got interrupted, to ensure
- # dst objects get deleted.
- cls.tearDown()
+ # Recursively delete dst dir and then re-create it, so in effect we
+ # remove all dirs and files under that directory.
+ shutil.rmtree(self.tmp_dir)
+ os.mkdir(self.tmp_dir)
+
+ # Close test objects.
+ self.resilient_close(self.empty_src_key)
+ self.resilient_close(self.small_src_key)
+ self.resilient_close(self.larger_src_key)
# Delete test objects.
- cls.empty_src_key.delete()
- cls.small_src_key.delete()
- cls.larger_src_key.delete()
+ self.empty_src_key.delete()
+ self.small_src_key.delete()
+ self.larger_src_key.delete()
# Retry (for up to 2 minutes) the bucket gets deleted (it may not
# the first time round, due to eventual consistency of bucket delete
# operations).
for i in range(60):
try:
- cls.src_bucket_uri.delete_bucket()
+ self.src_bucket_uri.delete_bucket()
break
except StorageResponseError:
print 'Test bucket (%s) not yet deleted, still trying' % (
- cls.src_bucket_uri.uri)
+ self.src_bucket_uri.uri)
time.sleep(2)
- shutil.rmtree(cls.tmp_dir)
- cls.tmp_dir = tempfile.mkdtemp(prefix=cls.tmpdir_prefix)
+ shutil.rmtree(self.tmp_dir)
+ self.tmp_dir = tempfile.mkdtemp(prefix=self.tmpdir_prefix)
def test_non_resumable_download(self):
"""
@@ -379,88 +352,6 @@
self.dst_fp, res_download_handler=res_download_handler)
self.assertEqual(0, get_cur_file_size(self.dst_fp))
- def test_download_with_object_size_change_between_starts(self):
- """
- Tests resumable download on an object that changes sizes between inital
- download start and restart
- """
- harnass = CallbackTestHarnass(
- fail_after_n_bytes=self.larger_src_key_size/2, num_times_to_fail=2)
- # Set up first process' ResumableDownloadHandler not to do any
- # retries (initial download request will establish expected size to
- # download server).
- res_download_handler = ResumableDownloadHandler(
- tracker_file_name=self.tracker_file_name, num_retries=0)
- try:
- self.larger_src_key.get_contents_to_file(
- self.dst_fp, cb=harnass.call,
- res_download_handler=res_download_handler)
- self.fail('Did not get expected ResumableDownloadException')
- except ResumableDownloadException, e:
- # First abort (from harnass-forced failure) should be
- # ABORT_CUR_PROCESS.
- self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT_CUR_PROCESS)
- # Ensure a tracker file survived.
- self.assertTrue(os.path.exists(self.tracker_file_name))
- # Try it again, this time with different src key (simulating an
- # object that changes sizes between downloads).
- try:
- self.small_src_key.get_contents_to_file(
- self.dst_fp, res_download_handler=res_download_handler)
- self.fail('Did not get expected ResumableDownloadException')
- except ResumableDownloadException, e:
- # This abort should be a hard abort (object size changing during
- # transfer).
- self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT)
- self.assertNotEqual(
- e.message.find('md5 signature doesn\'t match etag'), -1)
-
- def test_download_with_file_content_change_during_download(self):
- """
- Tests resumable download on an object where the file content changes
- without changing length while download in progress
- """
- harnass = CallbackTestHarnass(
- fail_after_n_bytes=self.larger_src_key_size/2, num_times_to_fail=2)
- # Set up first process' ResumableDownloadHandler not to do any
- # retries (initial download request will establish expected size to
- # download server).
- res_download_handler = ResumableDownloadHandler(
- tracker_file_name=self.tracker_file_name, num_retries=0)
- dst_filename = self.dst_fp.name
- try:
- self.larger_src_key.get_contents_to_file(
- self.dst_fp, cb=harnass.call,
- res_download_handler=res_download_handler)
- self.fail('Did not get expected ResumableDownloadException')
- except ResumableDownloadException, e:
- # First abort (from harnass-forced failure) should be
- # ABORT_CUR_PROCESS.
- self.assertEqual(e.disposition,
- ResumableTransferDisposition.ABORT_CUR_PROCESS)
- # Ensure a tracker file survived.
- self.assertTrue(os.path.exists(self.tracker_file_name))
- # Before trying again change the first byte of the file fragment
- # that was already downloaded.
- orig_size = get_cur_file_size(self.dst_fp)
- self.dst_fp.seek(0, os.SEEK_SET)
- self.dst_fp.write('a')
- # Ensure the file size didn't change.
- self.assertEqual(orig_size, get_cur_file_size(self.dst_fp))
- try:
- self.larger_src_key.get_contents_to_file(
- self.dst_fp, cb=harnass.call,
- res_download_handler=res_download_handler)
- self.fail('Did not get expected ResumableDownloadException')
- except ResumableDownloadException, e:
- # This abort should be a hard abort (file content changing during
- # transfer).
- self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT)
- self.assertNotEqual(
- e.message.find('md5 signature doesn\'t match etag'), -1)
- # Ensure the bad data wasn't left around.
- self.assertFalse(os.path.exists(dst_filename))
-
def test_download_with_invalid_tracker_etag(self):
"""
Tests resumable download with a tracker file containing an invalid etag
@@ -522,36 +413,3 @@
finally:
# Restore original protection of dir where tracker_file lives.
os.chmod(self.tmp_dir, save_mod)
-
-if __name__ == '__main__':
- if sys.version_info[:3] < (2, 5, 1):
- sys.exit('These tests must be run on at least Python 2.5.1\n')
-
- # Use -d to see more HTTP protocol detail during tests. Note that
- # unlike the upload test case, you won't see much for the downloads
- # because there's no HTTP server state protocol for in the download case
- # (and the actual Range GET HTTP protocol detail is suppressed by the
- # normal boto.s3.Key.get_file() processing).
- debug = 0
- opts, args = getopt.getopt(sys.argv[1:], 'd', ['debug'])
- for o, a in opts:
- if o in ('-d', '--debug'):
- debug = 2
-
- test_loader = unittest.TestLoader()
- test_loader.testMethodPrefix = 'test_'
- suite = test_loader.loadTestsFromTestCase(ResumableDownloadTests)
- # Seems like there should be a cleaner way to find the test_class.
- test_class = suite.__getattribute__('_tests')[0]
- # We call set_up_class() and tear_down_class() ourselves because we
- # don't assume the user has Python 2.7 (which supports classmethods
- # that do it, with camelCase versions of these names).
- try:
- print 'Setting up %s...' % test_class.get_suite_description()
- test_class.set_up_class(debug)
- print 'Running %s...' % test_class.get_suite_description()
- unittest.TextTestRunner(verbosity=2).run(suite)
- finally:
- print 'Cleaning up after %s...' % test_class.get_suite_description()
- test_class.tear_down_class()
- print ''
diff --git a/tests/s3/test_resumable_uploads.py b/tests/integration/s3/test_resumable_uploads.py
old mode 100755
new mode 100644
similarity index 81%
rename from tests/s3/test_resumable_uploads.py
rename to tests/integration/s3/test_resumable_uploads.py
index bb0f7a9..51c293a
--- a/tests/s3/test_resumable_uploads.py
+++ b/tests/integration/s3/test_resumable_uploads.py
@@ -61,36 +61,12 @@
"""
Resumable upload test suite.
"""
+ gs = True
def get_suite_description(self):
return 'Resumable upload test suite'
- def setUp(self):
- """
- Creates dst_key needed by all tests.
-
- This method's namingCase is required by the unittest framework.
- """
- self.dst_key = self.dst_key_uri.new_key(validate=False)
-
- def tearDown(self):
- """
- Deletes any objects or files created by last test run.
-
- This method's namingCase is required by the unittest framework.
- """
- try:
- self.dst_key_uri.delete_key()
- except GSResponseError:
- # Ignore possible not-found error.
- pass
- # Recursively delete dst dir and then re-create it, so in effect we
- # remove all dirs and files under that directory.
- shutil.rmtree(self.tmp_dir)
- os.mkdir(self.tmp_dir)
-
- @staticmethod
- def build_test_input_file(size):
+ def build_input_file(self, size):
buf = []
# I manually construct the random data here instead of calling
# os.urandom() because I want to constrain the range of data (in
@@ -102,107 +78,119 @@
file_as_string = ''.join(buf)
return (file_as_string, StringIO.StringIO(file_as_string))
- @classmethod
- def get_dst_bucket_uri(cls, debug):
+ def get_dst_bucket_uri(self):
"""A unique bucket to test."""
hostname = socket.gethostname().split('.')[0]
uri_base_str = 'gs://res-upload-test-%s-%s-%s' % (
hostname, os.getpid(), int(time.time()))
- return boto.storage_uri('%s-dst' % uri_base_str, debug=debug)
+ return boto.storage_uri('%s-dst' % uri_base_str)
- @classmethod
- def get_dst_key_uri(cls):
+ def get_dst_key_uri(self):
"""A key to test."""
- return cls.dst_bucket_uri.clone_replace_name('obj')
+ return self.dst_bucket_uri.clone_replace_name('obj')
- @classmethod
- def get_staged_host(cls):
+ def get_staged_host(self):
"""URL of an existing bucket."""
return 'pub.commondatastorage.googleapis.com'
- @classmethod
- def get_invalid_upload_id(cls):
+ def get_invalid_upload_id(self):
return (
'http://%s/?upload_id='
'AyzB2Uo74W4EYxyi5dp_-r68jz8rtbvshsv4TX7srJVkJ57CxTY5Dw2' % (
- cls.get_staged_host()))
+ self.get_staged_host()))
- @classmethod
- def set_up_class(cls, debug):
+ def setUp(self):
"""
- Initializes test suite.
+ Creates dst bucket and data needed by each test.
"""
-
# Use a designated tmpdir prefix to make it easy to find the end of
# the tmp path.
- cls.tmpdir_prefix = 'tmp_resumable_upload_test'
+ self.tmpdir_prefix = 'tmp_resumable_upload_test'
# Create test source file data.
- cls.empty_src_file_size = 0
- (cls.empty_src_file_as_string, cls.empty_src_file) = (
- cls.build_test_input_file(cls.empty_src_file_size))
- cls.small_src_file_size = 2 * 1024 # 2 KB.
- (cls.small_src_file_as_string, cls.small_src_file) = (
- cls.build_test_input_file(cls.small_src_file_size))
- cls.larger_src_file_size = 500 * 1024 # 500 KB.
- (cls.larger_src_file_as_string, cls.larger_src_file) = (
- cls.build_test_input_file(cls.larger_src_file_size))
- cls.largest_src_file_size = 1024 * 1024 # 1 MB.
- (cls.largest_src_file_as_string, cls.largest_src_file) = (
- cls.build_test_input_file(cls.largest_src_file_size))
+ self.empty_src_file_size = 0
+ (self.empty_src_file_as_string, self.empty_src_file) = (
+ self.build_input_file(self.empty_src_file_size))
+ self.small_src_file_size = 2 * 1024 # 2 KB.
+ (self.small_src_file_as_string, self.small_src_file) = (
+ self.build_input_file(self.small_src_file_size))
+ self.larger_src_file_size = 500 * 1024 # 500 KB.
+ (self.larger_src_file_as_string, self.larger_src_file) = (
+ self.build_input_file(self.larger_src_file_size))
+ self.largest_src_file_size = 1024 * 1024 # 1 MB.
+ (self.largest_src_file_as_string, self.largest_src_file) = (
+ self.build_input_file(self.largest_src_file_size))
# Create temp dir.
- cls.tmp_dir = tempfile.mkdtemp(prefix=cls.tmpdir_prefix)
+ self.tmp_dir = tempfile.mkdtemp(prefix=self.tmpdir_prefix)
# Create the test bucket.
- cls.dst_bucket_uri = cls.get_dst_bucket_uri(debug)
- cls.dst_bucket_uri.create_bucket()
- cls.dst_key_uri = cls.get_dst_key_uri()
+ self.dst_bucket_uri = self.get_dst_bucket_uri()
+ self.dst_bucket_uri.create_bucket()
+ self.dst_key_uri = self.get_dst_key_uri()
- cls.tracker_file_name = '%s%suri_tracker' % (cls.tmp_dir, os.sep)
+ self.tracker_file_name = '%s%suri_tracker' % (self.tmp_dir, os.sep)
- cls.syntactically_invalid_tracker_file_name = (
- '%s%ssynt_invalid_uri_tracker' % (cls.tmp_dir, os.sep))
- f = open(cls.syntactically_invalid_tracker_file_name, 'w')
+ self.syntactically_invalid_tracker_file_name = (
+ '%s%ssynt_invalid_uri_tracker' % (self.tmp_dir, os.sep))
+ f = open(self.syntactically_invalid_tracker_file_name, 'w')
f.write('ftp://example.com')
f.close()
- cls.invalid_upload_id = cls.get_invalid_upload_id()
- cls.invalid_upload_id_tracker_file_name = (
- '%s%sinvalid_upload_id_tracker' % (cls.tmp_dir, os.sep))
- f = open(cls.invalid_upload_id_tracker_file_name, 'w')
- f.write(cls.invalid_upload_id)
+ self.invalid_upload_id = self.get_invalid_upload_id()
+ self.invalid_upload_id_tracker_file_name = (
+ '%s%sinvalid_upload_id_tracker' % (self.tmp_dir, os.sep))
+ f = open(self.invalid_upload_id_tracker_file_name, 'w')
+ f.write(self.invalid_upload_id)
f.close()
- cls.created_test_data = True
+ self.dst_key = self.dst_key_uri.new_key(validate=False)
+ self.created_test_data = True
- @classmethod
- def tear_down_class(cls):
+ def tearDown(self):
"""
- Deletes bucket and tmp dir created by set_up_class.
+ Deletes any objects, files, and bucket from each test run.
"""
- if not hasattr(cls, 'created_test_data'):
+ if not hasattr(self, 'created_test_data'):
return
+ shutil.rmtree(self.tmp_dir)
+
# Retry (for up to 2 minutes) the bucket gets deleted (it may not
# the first time round, due to eventual consistency of bucket delete
- # operations).
+ # operations). We also retry key deletions because if the key fails
+ # to be deleted on the first attempt, it will stop us from deleting
+ # the bucket.
for i in range(60):
try:
- cls.dst_bucket_uri.delete_bucket()
+ self.dst_key_uri.delete_key()
+ except GSResponseError, e:
+ # Ignore errors attempting to delete the key, because not all
+ # tests will write to the dst key.
+ pass
+ try:
+ self.dst_bucket_uri.delete_bucket()
break
except StorageResponseError:
print 'Test bucket (%s) not yet deleted, still trying' % (
- cls.dst_bucket_uri.uri)
+ self.dst_bucket_uri.uri)
time.sleep(2)
- shutil.rmtree(cls.tmp_dir)
- cls.tmp_dir = tempfile.mkdtemp(prefix=cls.tmpdir_prefix)
+ shutil.rmtree(self.tmp_dir, ignore_errors=True)
+ self.tmp_dir = tempfile.mkdtemp(prefix=self.tmpdir_prefix)
def test_non_resumable_upload(self):
"""
Tests that non-resumable uploads work
"""
- self.dst_key.set_contents_from_file(self.small_src_file)
+ # Seek to end incase its the first test.
+ self.small_src_file.seek(0, os.SEEK_END)
+ try:
+ self.dst_key.set_contents_from_file(self.small_src_file)
+ self.fail("should fail as need to rewind the filepointer")
+ except AttributeError:
+ pass
+ # Now try calling with a proper rewind.
+ self.dst_key.set_contents_from_file(self.small_src_file, rewind=True)
self.assertEqual(self.small_src_file_size, self.dst_key.size)
self.assertEqual(self.small_src_file_as_string,
self.dst_key.get_contents_as_string())
@@ -212,6 +200,7 @@
Tests a single resumable upload, with no tracker URI persistence
"""
res_upload_handler = ResumableUploadHandler()
+ self.small_src_file.seek(0)
self.dst_key.set_contents_from_file(
self.small_src_file, res_upload_handler=res_upload_handler)
self.assertEqual(self.small_src_file_size, self.dst_key.size)
@@ -225,6 +214,7 @@
harnass = CallbackTestHarnass()
res_upload_handler = ResumableUploadHandler(
tracker_file_name=self.tracker_file_name, num_retries=0)
+ self.small_src_file.seek(0)
try:
self.dst_key.set_contents_from_file(
self.small_src_file, cb=harnass.call,
@@ -251,6 +241,7 @@
exception = ResumableUploadHandler.RETRYABLE_EXCEPTIONS[0]
harnass = CallbackTestHarnass(exception=exception)
res_upload_handler = ResumableUploadHandler(num_retries=1)
+ self.small_src_file.seek(0)
self.dst_key.set_contents_from_file(
self.small_src_file, cb=harnass.call,
res_upload_handler=res_upload_handler)
@@ -266,6 +257,7 @@
exception = IOError(errno.EPIPE, "Broken pipe")
harnass = CallbackTestHarnass(exception=exception)
res_upload_handler = ResumableUploadHandler(num_retries=1)
+ self.small_src_file.seek(0)
self.dst_key.set_contents_from_file(
self.small_src_file, cb=harnass.call,
res_upload_handler=res_upload_handler)
@@ -281,6 +273,7 @@
harnass = CallbackTestHarnass(
exception=OSError(errno.EACCES, 'Permission denied'))
res_upload_handler = ResumableUploadHandler(num_retries=1)
+ self.small_src_file.seek(0)
try:
self.dst_key.set_contents_from_file(
self.small_src_file, cb=harnass.call,
@@ -298,6 +291,7 @@
harnass = CallbackTestHarnass()
res_upload_handler = ResumableUploadHandler(
tracker_file_name=self.tracker_file_name, num_retries=1)
+ self.small_src_file.seek(0)
self.dst_key.set_contents_from_file(
self.small_src_file, cb=harnass.call,
res_upload_handler=res_upload_handler)
@@ -313,6 +307,7 @@
Tests resumable upload that fails twice in one process, then completes
"""
res_upload_handler = ResumableUploadHandler(num_retries=3)
+ self.small_src_file.seek(0)
self.dst_key.set_contents_from_file(
self.small_src_file, res_upload_handler=res_upload_handler)
# Ensure uploaded object has correct content.
@@ -332,6 +327,7 @@
fail_after_n_bytes=self.larger_src_file_size/2, num_times_to_fail=2)
res_upload_handler = ResumableUploadHandler(
tracker_file_name=self.tracker_file_name, num_retries=1)
+ self.larger_src_file.seek(0)
try:
self.dst_key.set_contents_from_file(
self.larger_src_file, cb=harnass.call,
@@ -343,6 +339,7 @@
# Ensure a tracker file survived.
self.assertTrue(os.path.exists(self.tracker_file_name))
# Try it one more time; this time should succeed.
+ self.larger_src_file.seek(0)
self.dst_key.set_contents_from_file(
self.larger_src_file, cb=harnass.call,
res_upload_handler=res_upload_handler)
@@ -365,6 +362,7 @@
harnass = CallbackTestHarnass(
fail_after_n_bytes=self.larger_src_file_size/2)
res_upload_handler = ResumableUploadHandler(num_retries=1)
+ self.larger_src_file.seek(0)
self.dst_key.set_contents_from_file(
self.larger_src_file, cb=harnass.call,
res_upload_handler=res_upload_handler)
@@ -382,6 +380,7 @@
Tests uploading an empty file (exercises boundary conditions).
"""
res_upload_handler = ResumableUploadHandler()
+ self.empty_src_file.seek(0)
self.dst_key.set_contents_from_file(
self.empty_src_file, res_upload_handler=res_upload_handler)
self.assertEqual(0, self.dst_key.size)
@@ -393,6 +392,7 @@
res_upload_handler = ResumableUploadHandler()
headers = {'Content-Type' : 'text/plain', 'Content-Encoding' : 'gzip',
'x-goog-meta-abc' : 'my meta', 'x-goog-acl' : 'public-read'}
+ self.small_src_file.seek(0)
self.dst_key.set_contents_from_file(
self.small_src_file, headers=headers,
res_upload_handler=res_upload_handler)
@@ -423,6 +423,7 @@
# upload server).
res_upload_handler = ResumableUploadHandler(
tracker_file_name=self.tracker_file_name, num_retries=0)
+ self.larger_src_file.seek(0)
try:
self.dst_key.set_contents_from_file(
self.larger_src_file, cb=harnass.call,
@@ -440,6 +441,7 @@
# 500 response in the next attempt.
time.sleep(1)
try:
+ self.largest_src_file.seek(0)
self.dst_key.set_contents_from_file(
self.largest_src_file, res_upload_handler=res_upload_handler)
self.fail('Did not get expected ResumableUploadException')
@@ -447,7 +449,7 @@
# This abort should be a hard abort (file size changing during
# transfer).
self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT)
- self.assertNotEqual(e.message.find('file size changed'), -1, e.message)
+ self.assertNotEqual(e.message.find('file size changed'), -1, e.message)
def test_upload_with_file_size_change_during_upload(self):
"""
@@ -456,7 +458,7 @@
"""
# Create a file we can change during the upload.
test_file_size = 500 * 1024 # 500 KB.
- test_file = self.build_test_input_file(test_file_size)[1]
+ test_file = self.build_input_file(test_file_size)[1]
harnass = CallbackTestHarnass(fp_to_change=test_file,
fp_change_pos=test_file_size)
res_upload_handler = ResumableUploadHandler(num_retries=1)
@@ -476,13 +478,13 @@
(so, size stays the same) while upload in progress
"""
test_file_size = 500 * 1024 # 500 KB.
- test_file = self.build_test_input_file(test_file_size)[1]
+ test_file = self.build_input_file(test_file_size)[1]
harnass = CallbackTestHarnass(fail_after_n_bytes=test_file_size/2,
fp_to_change=test_file,
- # Writing at file_size-5 won't change file
- # size because CallbackTestHarnass only
- # writes 3 bytes.
- fp_change_pos=test_file_size-5)
+ # Write to byte 1, as the CallbackTestHarnass writes
+ # 3 bytes. This will result in the data on the server
+ # being different than the local file.
+ fp_change_pos=1)
res_upload_handler = ResumableUploadHandler(num_retries=1)
try:
self.dst_key.set_contents_from_file(
@@ -510,6 +512,7 @@
to set the content length when gzipping a file.
"""
res_upload_handler = ResumableUploadHandler()
+ self.small_src_file.seek(0)
try:
self.dst_key.set_contents_from_file(
self.small_src_file, res_upload_handler=res_upload_handler,
@@ -528,6 +531,7 @@
tracker_file_name=self.syntactically_invalid_tracker_file_name)
# An error should be printed about the invalid URI, but then it
# should run the update successfully.
+ self.small_src_file.seek(0)
self.dst_key.set_contents_from_file(
self.small_src_file, res_upload_handler=res_upload_handler)
self.assertEqual(self.small_src_file_size, self.dst_key.size)
@@ -542,6 +546,7 @@
tracker_file_name=self.invalid_upload_id_tracker_file_name)
# An error should occur, but then the tracker URI should be
# regenerated and the the update should succeed.
+ self.small_src_file.seek(0)
self.dst_key.set_contents_from_file(
self.small_src_file, res_upload_handler=res_upload_handler)
self.assertEqual(self.small_src_file_size, self.dst_key.size)
@@ -567,32 +572,3 @@
finally:
# Restore original protection of dir where tracker_file lives.
os.chmod(self.tmp_dir, save_mod)
-
-if __name__ == '__main__':
- if sys.version_info[:3] < (2, 5, 1):
- sys.exit('These tests must be run on at least Python 2.5.1\n')
-
- # Use -d to see more HTTP protocol detail during tests.
- debug = 0
- opts, args = getopt.getopt(sys.argv[1:], 'd', ['debug'])
- for o, a in opts:
- if o in ('-d', '--debug'):
- debug = 2
-
- test_loader = unittest.TestLoader()
- test_loader.testMethodPrefix = 'test_'
- suite = test_loader.loadTestsFromTestCase(ResumableUploadTests)
- # Seems like there should be a cleaner way to find the test_class.
- test_class = suite.__getattribute__('_tests')[0]
- # We call set_up_class() and tear_down_class() ourselves because we
- # don't assume the user has Python 2.7 (which supports classmethods
- # that do it, with camelCase versions of these names).
- try:
- print 'Setting up %s...' % test_class.get_suite_description()
- test_class.set_up_class(debug)
- print 'Running %s...' % test_class.get_suite_description()
- unittest.TextTestRunner(verbosity=2).run(suite)
- finally:
- print 'Cleaning up after %s...' % test_class.get_suite_description()
- test_class.tear_down_class()
- print ''
diff --git a/tests/integration/s3/test_versioning.py b/tests/integration/s3/test_versioning.py
new file mode 100644
index 0000000..4207e1b
--- /dev/null
+++ b/tests/integration/s3/test_versioning.py
@@ -0,0 +1,158 @@
+# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Some unit tests for the S3 Versioning.
+"""
+
+import unittest
+import time
+from boto.s3.connection import S3Connection
+from boto.exception import S3ResponseError
+from boto.s3.deletemarker import DeleteMarker
+
+class S3VersionTest (unittest.TestCase):
+
+ def setUp(self):
+ self.conn = S3Connection()
+ self.bucket_name = 'version-%d' % int(time.time())
+ self.bucket = self.conn.create_bucket(self.bucket_name)
+
+ def tearDown(self):
+ for k in self.bucket.list_versions():
+ self.bucket.delete_key(k.name, version_id=k.version_id)
+ self.bucket.delete()
+
+ def test_1_versions(self):
+ # check versioning off
+ d = self.bucket.get_versioning_status()
+ self.assertFalse('Versioning' in d)
+
+ # enable versioning
+ self.bucket.configure_versioning(versioning=True)
+ d = self.bucket.get_versioning_status()
+ self.assertEqual('Enabled', d['Versioning'])
+
+ # create a new key in the versioned bucket
+ k = self.bucket.new_key("foobar")
+ s1 = 'This is v1'
+ k.set_contents_from_string(s1)
+
+ # remember the version id of this object
+ v1 = k.version_id
+
+ # now get the contents from s3
+ o1 = k.get_contents_as_string()
+
+ # check to make sure content read from k is identical to original
+ self.assertEqual(s1, o1)
+
+ # now overwrite that same key with new data
+ s2 = 'This is v2'
+ k.set_contents_from_string(s2)
+ v2 = k.version_id
+
+ # now retrieve latest contents as a string and compare
+ k2 = self.bucket.new_key("foobar")
+ o2 = k2.get_contents_as_string()
+ self.assertEqual(s2, o2)
+
+ # next retrieve explicit versions and compare
+ o1 = k.get_contents_as_string(version_id=v1)
+ o2 = k.get_contents_as_string(version_id=v2)
+ self.assertEqual(s1, o1)
+ self.assertEqual(s2, o2)
+
+ # Now list all versions and compare to what we have
+ rs = self.bucket.get_all_versions()
+ self.assertEqual(v2, rs[0].version_id)
+ self.assertEqual(v1, rs[1].version_id)
+
+ # Now do a regular list command and make sure only the new key shows up
+ rs = self.bucket.get_all_keys()
+ self.assertEqual(1, len(rs))
+
+ # Now do regular delete
+ self.bucket.delete_key('foobar')
+
+ # Now list versions and make sure old versions are there
+ # plus the DeleteMarker which is latest.
+ rs = self.bucket.get_all_versions()
+ self.assertEqual(3, len(rs))
+ self.assertTrue(isinstance(rs[0], DeleteMarker))
+
+ # Now delete v1 of the key
+ self.bucket.delete_key('foobar', version_id=v1)
+
+ # Now list versions again and make sure v1 is not there
+ rs = self.bucket.get_all_versions()
+ versions = [k.version_id for k in rs]
+ self.assertTrue(v1 not in versions)
+ self.assertTrue(v2 in versions)
+
+ # Now suspend Versioning on the bucket
+ self.bucket.configure_versioning(False)
+ # Allow time for the change to fully propagate.
+ time.sleep(3)
+ d = self.bucket.get_versioning_status()
+ self.assertEqual('Suspended', d['Versioning'])
+
+ def test_latest_version(self):
+ self.bucket.configure_versioning(versioning=True)
+
+ # add v1 of an object
+ key_name = "key"
+ kv1 = self.bucket.new_key(key_name)
+ kv1.set_contents_from_string("v1")
+
+ # read list which should contain latest v1
+ listed_kv1 = iter(self.bucket.get_all_versions()).next()
+ self.assertEqual(listed_kv1.name, key_name)
+ self.assertEqual(listed_kv1.version_id, kv1.version_id)
+ self.assertEqual(listed_kv1.is_latest, True)
+
+ # add v2 of the object
+ kv2 = self.bucket.new_key(key_name)
+ kv2.set_contents_from_string("v2")
+
+ # read 2 versions, confirm v2 is latest
+ i = iter(self.bucket.get_all_versions())
+ listed_kv2 = i.next()
+ listed_kv1 = i.next()
+ self.assertEqual(listed_kv2.version_id, kv2.version_id)
+ self.assertEqual(listed_kv1.version_id, kv1.version_id)
+ self.assertEqual(listed_kv2.is_latest, True)
+ self.assertEqual(listed_kv1.is_latest, False)
+
+ # delete key, which creates a delete marker as latest
+ self.bucket.delete_key(key_name)
+ i = iter(self.bucket.get_all_versions())
+ listed_kv3 = i.next()
+ listed_kv2 = i.next()
+ listed_kv1 = i.next()
+ self.assertNotEqual(listed_kv3.version_id, None)
+ self.assertEqual(listed_kv2.version_id, kv2.version_id)
+ self.assertEqual(listed_kv1.version_id, kv1.version_id)
+ self.assertEqual(listed_kv3.is_latest, True)
+ self.assertEqual(listed_kv2.is_latest, False)
+ self.assertEqual(listed_kv1.is_latest, False)
diff --git a/tests/sdb/__init__.py b/tests/integration/sdb/__init__.py
similarity index 100%
rename from tests/sdb/__init__.py
rename to tests/integration/sdb/__init__.py
diff --git a/tests/integration/sdb/test_cert_verification.py b/tests/integration/sdb/test_cert_verification.py
new file mode 100644
index 0000000..1e0cf4f
--- /dev/null
+++ b/tests/integration/sdb/test_cert_verification.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Check that all of the certs on all service endpoints validate.
+"""
+
+import unittest
+import boto.sdb
+
+
+class CertVerificationTest(unittest.TestCase):
+
+ sdb = True
+ ssl = True
+
+ def test_certs(self):
+ for region in boto.sdb.regions():
+ c = region.connect()
+ c.get_all_domains()
diff --git a/tests/sdb/test_connection.py b/tests/integration/sdb/test_connection.py
similarity index 99%
rename from tests/sdb/test_connection.py
rename to tests/integration/sdb/test_connection.py
index a834a9d..72a26cf 100644
--- a/tests/sdb/test_connection.py
+++ b/tests/integration/sdb/test_connection.py
@@ -31,6 +31,7 @@
from boto.exception import SDBResponseError
class SDBConnectionTest (unittest.TestCase):
+ sdb = True
def test_1_basic(self):
print '--- running SDBConnection tests ---'
diff --git a/tests/cloudfront/__init__.py b/tests/integration/ses/__init__.py
similarity index 100%
copy from tests/cloudfront/__init__.py
copy to tests/integration/ses/__init__.py
diff --git a/tests/integration/ses/test_cert_verification.py b/tests/integration/ses/test_cert_verification.py
new file mode 100644
index 0000000..8954ec8
--- /dev/null
+++ b/tests/integration/ses/test_cert_verification.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Check that all of the certs on all service endpoints validate.
+"""
+
+import unittest
+import boto.ses
+
+
+class CertVerificationTest(unittest.TestCase):
+
+ ses = True
+ ssl = True
+
+ def test_certs(self):
+ for region in boto.ses.regions():
+ c = region.connect()
+ c.list_verified_email_addresses()
diff --git a/tests/integration/ses/test_connection.py b/tests/integration/ses/test_connection.py
new file mode 100644
index 0000000..83b9994
--- /dev/null
+++ b/tests/integration/ses/test_connection.py
@@ -0,0 +1,38 @@
+from tests.unit import unittest
+
+from boto.ses.connection import SESConnection
+from boto.ses import exceptions
+
+
+class SESConnectionTest(unittest.TestCase):
+ ses = True
+
+ def setUp(self):
+ self.ses = SESConnection()
+
+ def test_get_dkim_attributes(self):
+ response = self.ses.get_identity_dkim_attributes(['example.com'])
+ # Verify we get the structure we expect, we don't care about the
+ # values.
+ self.assertTrue('GetIdentityDkimAttributesResponse' in response)
+ self.assertTrue('GetIdentityDkimAttributesResult' in
+ response['GetIdentityDkimAttributesResponse'])
+ self.assertTrue(
+ 'DkimAttributes' in response['GetIdentityDkimAttributesResponse']\
+ ['GetIdentityDkimAttributesResult'])
+
+ def test_set_identity_dkim_enabled(self):
+ # This api call should fail because have not verified the domain,
+ # so we can test that it at least fails we we expect.
+ with self.assertRaises(exceptions.SESIdentityNotVerifiedError):
+ self.ses.set_identity_dkim_enabled('example.com', True)
+
+ def test_verify_domain_dkim(self):
+ # This api call should fail because have not confirmed the domain,
+ # so we can test that it at least fails we we expect.
+ with self.assertRaises(exceptions.SESDomainNotConfirmedError):
+ self.ses.verify_domain_dkim('example.com')
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests/sqs/__init__.py b/tests/integration/sns/__init__.py
similarity index 100%
copy from tests/sqs/__init__.py
copy to tests/integration/sns/__init__.py
diff --git a/tests/integration/sns/test_cert_verification.py b/tests/integration/sns/test_cert_verification.py
new file mode 100644
index 0000000..a67e1aa
--- /dev/null
+++ b/tests/integration/sns/test_cert_verification.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Check that all of the certs on SQS endpoints validate.
+"""
+
+import unittest
+import boto.sns
+
+
+class CertVerificationTest(unittest.TestCase):
+
+ sns = True
+ ssl = True
+
+ def test_certs(self):
+ for region in boto.sns.regions():
+ c = region.connect()
+ c.get_all_topics()
diff --git a/tests/sqs/__init__.py b/tests/integration/sqs/__init__.py
similarity index 100%
rename from tests/sqs/__init__.py
rename to tests/integration/sqs/__init__.py
diff --git a/tests/integration/sqs/test_cert_verification.py b/tests/integration/sqs/test_cert_verification.py
new file mode 100644
index 0000000..1b18fe8
--- /dev/null
+++ b/tests/integration/sqs/test_cert_verification.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Check that all of the certs on SQS endpoints validate.
+"""
+
+import unittest
+import boto.sqs
+
+
+class SQSCertVerificationTest(unittest.TestCase):
+
+ sqs = True
+ ssl = True
+
+ def test_certs(self):
+ for region in boto.sqs.regions():
+ c = region.connect()
+ c.get_all_queues()
diff --git a/tests/sqs/test_connection.py b/tests/integration/sqs/test_connection.py
similarity index 81%
rename from tests/sqs/test_connection.py
rename to tests/integration/sqs/test_connection.py
index 6996a54..4851be9 100644
--- a/tests/sqs/test_connection.py
+++ b/tests/integration/sqs/test_connection.py
@@ -16,7 +16,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -31,7 +31,10 @@
from boto.sqs.message import MHMessage
from boto.exception import SQSError
-class SQSConnectionTest (unittest.TestCase):
+
+class SQSConnectionTest(unittest.TestCase):
+
+ sqs = True
def test_1_basic(self):
print '--- running SQSConnection tests ---'
@@ -40,24 +43,24 @@
num_queues = 0
for q in rs:
num_queues += 1
-
+
# try illegal name
try:
queue = c.create_queue('bad*queue*name')
self.fail('queue name should have been bad')
except SQSError:
pass
-
+
# now create one that should work and should be unique (i.e. a new one)
queue_name = 'test%d' % int(time.time())
timeout = 60
queue = c.create_queue(queue_name, timeout)
time.sleep(60)
- rs = c.get_all_queues()
+ rs = c.get_all_queues()
i = 0
for q in rs:
i += 1
- assert i == num_queues+1
+ assert i == num_queues + 1
assert queue.count_slow() == 0
# check the visibility timeout
@@ -66,14 +69,14 @@
# now try to get queue attributes
a = q.get_attributes()
- assert a.has_key('ApproximateNumberOfMessages')
- assert a.has_key('VisibilityTimeout')
+ assert 'ApproximateNumberOfMessages' in a
+ assert 'VisibilityTimeout' in a
a = q.get_attributes('ApproximateNumberOfMessages')
- assert a.has_key('ApproximateNumberOfMessages')
- assert not a.has_key('VisibilityTimeout')
+ assert 'ApproximateNumberOfMessages' in a
+ assert 'VisibilityTimeout' not in a
a = q.get_attributes('VisibilityTimeout')
- assert not a.has_key('ApproximateNumberOfMessages')
- assert a.has_key('VisibilityTimeout')
+ assert 'ApproximateNumberOfMessages' not in a
+ assert 'VisibilityTimeout' in a
# now change the visibility timeout
timeout = 45
@@ -81,7 +84,7 @@
time.sleep(60)
t = queue.get_timeout()
assert t == timeout, '%d != %d' % (t, timeout)
-
+
# now add a message
message_body = 'This is a test\n'
message = queue.new_message(message_body)
@@ -109,6 +112,20 @@
time.sleep(30)
assert queue.count_slow() == 0
+ # try a batch write
+ num_msgs = 10
+ msgs = [(i, 'This is message %d' % i, 0) for i in range(num_msgs)]
+ queue.write_batch(msgs)
+
+ # try to delete all of the messages using batch delete
+ deleted = 0
+ while deleted < num_msgs:
+ time.sleep(5)
+ msgs = queue.get_messages(num_msgs)
+ if msgs:
+ br = queue.delete_message_batch(msgs)
+ deleted += len(br.results)
+
# create another queue so we can test force deletion
# we will also test MHMessage with this queue
queue_name = 'test%d' % int(time.time())
@@ -116,12 +133,12 @@
queue = c.create_queue(queue_name, timeout)
queue.set_message_class(MHMessage)
time.sleep(30)
-
+
# now add a couple of messages
message = queue.new_message()
message['foo'] = 'bar'
queue.write(message)
- message_body = {'fie' : 'baz', 'foo' : 'bar'}
+ message_body = {'fie': 'baz', 'foo': 'bar'}
message = queue.new_message(body=message_body)
queue.write(message)
time.sleep(30)
@@ -133,4 +150,3 @@
c.delete_queue(queue, True)
print '--- tests completed ---'
-
diff --git a/tests/integration/sts/__init__.py b/tests/integration/sts/__init__.py
new file mode 100644
index 0000000..354aa06
--- /dev/null
+++ b/tests/integration/sts/__init__.py
@@ -0,0 +1,20 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
diff --git a/tests/integration/sts/test_cert_verification.py b/tests/integration/sts/test_cert_verification.py
new file mode 100644
index 0000000..0696ed9
--- /dev/null
+++ b/tests/integration/sts/test_cert_verification.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Check that all of the certs on all service endpoints validate.
+"""
+
+import unittest
+import boto.sts
+
+
+class CertVerificationTest(unittest.TestCase):
+
+ sts = True
+ ssl = True
+
+ def test_certs(self):
+ for region in boto.sts.regions():
+ c = region.connect()
+ c.get_session_token()
diff --git a/tests/integration/sts/test_session_token.py b/tests/integration/sts/test_session_token.py
new file mode 100644
index 0000000..fa33d5f
--- /dev/null
+++ b/tests/integration/sts/test_session_token.py
@@ -0,0 +1,65 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Tests for Session Tokens
+"""
+
+import unittest
+import time
+import os
+from boto.sts.connection import STSConnection
+from boto.sts.credentials import Credentials
+from boto.s3.connection import S3Connection
+
+class SessionTokenTest (unittest.TestCase):
+ sts = True
+
+ def test_session_token(self):
+ print '--- running Session Token tests ---'
+ c = STSConnection()
+
+ # Create a session token
+ token = c.get_session_token()
+
+ # Save session token to a file
+ token.save('token.json')
+
+ # Now load up a copy of that token
+ token_copy = Credentials.load('token.json')
+ assert token_copy.access_key == token.access_key
+ assert token_copy.secret_key == token.secret_key
+ assert token_copy.session_token == token.session_token
+ assert token_copy.expiration == token.expiration
+ assert token_copy.request_id == token.request_id
+
+ os.unlink('token.json')
+
+ assert not token.is_expired()
+
+ # Try using the session token with S3
+ s3 = S3Connection(aws_access_key_id=token.access_key,
+ aws_secret_access_key=token.secret_key,
+ security_token=token.session_token)
+ buckets = s3.get_all_buckets()
+
+ print '--- tests completed ---'
diff --git a/tests/cloudfront/__init__.py b/tests/integration/swf/__init__.py
similarity index 100%
copy from tests/cloudfront/__init__.py
copy to tests/integration/swf/__init__.py
diff --git a/tests/integration/swf/test_cert_verification.py b/tests/integration/swf/test_cert_verification.py
new file mode 100644
index 0000000..1328b82
--- /dev/null
+++ b/tests/integration/swf/test_cert_verification.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Check that all of the certs on all service endpoints validate.
+"""
+
+import unittest
+import boto.swf
+
+
+class CertVerificationTest(unittest.TestCase):
+
+ swf = True
+ ssl = True
+
+ def test_certs(self):
+ for region in boto.swf.regions():
+ c = region.connect()
+ c.list_domains('REGISTERED')
diff --git a/tests/integration/swf/test_layer1.py b/tests/integration/swf/test_layer1.py
new file mode 100644
index 0000000..02ad051
--- /dev/null
+++ b/tests/integration/swf/test_layer1.py
@@ -0,0 +1,246 @@
+"""
+Tests for Layer1 of Simple Workflow
+
+"""
+import os
+import unittest
+import time
+
+from boto.swf.layer1 import Layer1
+from boto.swf import exceptions as swf_exceptions
+
+
+
+# A standard AWS account is permitted a maximum of 100 of SWF domains,
+# registered or deprecated. Deleting deprecated domains on demand does
+# not appear possible. Therefore, these tests reuse a default or
+# user-named testing domain. This is named by the user via the environment
+# variable BOTO_SWF_UNITTEST_DOMAIN, if available. Otherwise the default
+# testing domain is literally "boto-swf-unittest-domain". Do not use
+# the testing domain for other purposes.
+BOTO_SWF_UNITTEST_DOMAIN = os.environ.get("BOTO_SWF_UNITTEST_DOMAIN",
+ "boto-swf-unittest-domain")
+
+# A standard domain can have a maxiumum of 10,000 workflow types and
+# activity types, registered or deprecated. Therefore, eventually any
+# tests which register new workflow types or activity types would begin
+# to fail with LimitExceeded. Instead of generating new workflow types
+# and activity types, these tests reuse the existing types.
+
+# The consequence of the limits and inability to delete deprecated
+# domains, workflow types, and activity types is that the tests in
+# this module will not test for the three register actions:
+# * register_domain
+# * register_workflow_type
+# * register_activity_type
+# Instead, the setUp of the TestCase create a domain, workflow type,
+# and activity type, expecting that they may already exist, and the
+# tests themselves test other things.
+
+# If you really want to re-test the register_* functions in their
+# ability to create things (rather than just reporting that they
+# already exist), you'll need to use a new BOTO_SWF_UNITTEST_DOMAIN.
+# But, beware that once you hit 100 domains, you are cannot create any
+# more, delete existing ones, or rename existing ones.
+
+# Some API calls establish resources, but these resources are not instantly
+# available to the next API call. For testing purposes, it is necessary to
+# have a short pause to avoid having tests fail for invalid reasons.
+PAUSE_SECONDS = 4
+
+
+
+class SimpleWorkflowLayer1TestBase(unittest.TestCase):
+ """
+ There are at least two test cases which share this setUp/tearDown
+ and the class-based parameter definitions:
+ * SimpleWorkflowLayer1Test
+ * tests.swf.test_layer1_workflow_execution.SwfL1WorkflowExecutionTest
+ """
+ swf = True
+ # Some params used throughout the tests...
+ # Domain registration params...
+ _domain = BOTO_SWF_UNITTEST_DOMAIN
+ _workflow_execution_retention_period_in_days = 'NONE'
+ _domain_description = 'test workflow domain'
+ # Type registration params used for workflow type and activity type...
+ _task_list = 'tasklist1'
+ # Workflow type registration params...
+ _workflow_type_name = 'wft1'
+ _workflow_type_version = '1'
+ _workflow_type_description = 'wft1 description'
+ _default_child_policy = 'REQUEST_CANCEL'
+ _default_execution_start_to_close_timeout = '600'
+ _default_task_start_to_close_timeout = '60'
+ # Activity type registration params...
+ _activity_type_name = 'at1'
+ _activity_type_version = '1'
+ _activity_type_description = 'at1 description'
+ _default_task_heartbeat_timeout = '30'
+ _default_task_schedule_to_close_timeout = '90'
+ _default_task_schedule_to_start_timeout = '10'
+ _default_task_start_to_close_timeout = '30'
+
+
+ def setUp(self):
+ # Create a Layer1 connection for testing.
+ # Tester needs boto config or keys in environment variables.
+ self.conn = Layer1()
+
+ # Register a domain. Expect None (success) or
+ # SWFDomainAlreadyExistsError.
+ try:
+ r = self.conn.register_domain(self._domain,
+ self._workflow_execution_retention_period_in_days,
+ description=self._domain_description)
+ assert r is None
+ time.sleep(PAUSE_SECONDS)
+ except swf_exceptions.SWFDomainAlreadyExistsError:
+ pass
+
+ # Register a workflow type. Expect None (success) or
+ # SWFTypeAlreadyExistsError.
+ try:
+ r = self.conn.register_workflow_type(self._domain,
+ self._workflow_type_name, self._workflow_type_version,
+ task_list=self._task_list,
+ default_child_policy=self._default_child_policy,
+ default_execution_start_to_close_timeout=
+ self._default_execution_start_to_close_timeout,
+ default_task_start_to_close_timeout=
+ self._default_task_start_to_close_timeout,
+ description=self._workflow_type_description)
+ assert r is None
+ time.sleep(PAUSE_SECONDS)
+ except swf_exceptions.SWFTypeAlreadyExistsError:
+ pass
+
+ # Register an activity type. Expect None (success) or
+ # SWFTypeAlreadyExistsError.
+ try:
+ r = self.conn.register_activity_type(self._domain,
+ self._activity_type_name, self._activity_type_version,
+ task_list=self._task_list,
+ default_task_heartbeat_timeout=
+ self._default_task_heartbeat_timeout,
+ default_task_schedule_to_close_timeout=
+ self._default_task_schedule_to_close_timeout,
+ default_task_schedule_to_start_timeout=
+ self._default_task_schedule_to_start_timeout,
+ default_task_start_to_close_timeout=
+ self._default_task_start_to_close_timeout,
+ description=self._activity_type_description)
+ assert r is None
+ time.sleep(PAUSE_SECONDS)
+ except swf_exceptions.SWFTypeAlreadyExistsError:
+ pass
+
+ def tearDown(self):
+ # Delete what we can...
+ pass
+
+
+
+
+class SimpleWorkflowLayer1Test(SimpleWorkflowLayer1TestBase):
+
+ def test_list_domains(self):
+ # Find the domain.
+ r = self.conn.list_domains('REGISTERED')
+ found = None
+ for info in r['domainInfos']:
+ if info['name'] == self._domain:
+ found = info
+ break
+ self.assertNotEqual(found, None, 'list_domains; test domain not found')
+ # Validate some properties.
+ self.assertEqual(found['description'], self._domain_description,
+ 'list_domains; description does not match')
+ self.assertEqual(found['status'], 'REGISTERED',
+ 'list_domains; status does not match')
+
+ def test_list_workflow_types(self):
+ # Find the workflow type.
+ r = self.conn.list_workflow_types(self._domain, 'REGISTERED')
+ found = None
+ for info in r['typeInfos']:
+ if ( info['workflowType']['name'] == self._workflow_type_name and
+ info['workflowType']['version'] == self._workflow_type_version ):
+ found = info
+ break
+ self.assertNotEqual(found, None, 'list_workflow_types; test type not found')
+ # Validate some properties.
+ self.assertEqual(found['description'], self._workflow_type_description,
+ 'list_workflow_types; description does not match')
+ self.assertEqual(found['status'], 'REGISTERED',
+ 'list_workflow_types; status does not match')
+
+ def test_list_activity_types(self):
+ # Find the activity type.
+ r = self.conn.list_activity_types(self._domain, 'REGISTERED')
+ found = None
+ for info in r['typeInfos']:
+ if info['activityType']['name'] == self._activity_type_name:
+ found = info
+ break
+ self.assertNotEqual(found, None, 'list_activity_types; test type not found')
+ # Validate some properties.
+ self.assertEqual(found['description'], self._activity_type_description,
+ 'list_activity_types; description does not match')
+ self.assertEqual(found['status'], 'REGISTERED',
+ 'list_activity_types; status does not match')
+
+
+ def test_list_closed_workflow_executions(self):
+ # Test various legal ways to call function.
+ latest_date = time.time()
+ oldest_date = time.time() - 3600
+ # With startTimeFilter...
+ self.conn.list_closed_workflow_executions(self._domain,
+ start_latest_date=latest_date, start_oldest_date=oldest_date)
+ # With closeTimeFilter...
+ self.conn.list_closed_workflow_executions(self._domain,
+ close_latest_date=latest_date, close_oldest_date=oldest_date)
+ # With closeStatusFilter...
+ self.conn.list_closed_workflow_executions(self._domain,
+ close_latest_date=latest_date, close_oldest_date=oldest_date,
+ close_status='COMPLETED')
+ # With tagFilter...
+ self.conn.list_closed_workflow_executions(self._domain,
+ close_latest_date=latest_date, close_oldest_date=oldest_date,
+ tag='ig')
+ # With executionFilter...
+ self.conn.list_closed_workflow_executions(self._domain,
+ close_latest_date=latest_date, close_oldest_date=oldest_date,
+ workflow_id='ig')
+ # With typeFilter...
+ self.conn.list_closed_workflow_executions(self._domain,
+ close_latest_date=latest_date, close_oldest_date=oldest_date,
+ workflow_name='ig', workflow_version='ig')
+ # With reverseOrder...
+ self.conn.list_closed_workflow_executions(self._domain,
+ close_latest_date=latest_date, close_oldest_date=oldest_date,
+ reverse_order=True)
+
+
+ def test_list_open_workflow_executions(self):
+ # Test various legal ways to call function.
+ latest_date = time.time()
+ oldest_date = time.time() - 3600
+ # With required params only...
+ self.conn.list_closed_workflow_executions(self._domain,
+ latest_date, oldest_date)
+ # With tagFilter...
+ self.conn.list_closed_workflow_executions(self._domain,
+ latest_date, oldest_date, tag='ig')
+ # With executionFilter...
+ self.conn.list_closed_workflow_executions(self._domain,
+ latest_date, oldest_date, workflow_id='ig')
+ # With typeFilter...
+ self.conn.list_closed_workflow_executions(self._domain,
+ latest_date, oldest_date,
+ workflow_name='ig', workflow_version='ig')
+ # With reverseOrder...
+ self.conn.list_closed_workflow_executions(self._domain,
+ latest_date, oldest_date, reverse_order=True)
+
diff --git a/tests/integration/swf/test_layer1_workflow_execution.py b/tests/integration/swf/test_layer1_workflow_execution.py
new file mode 100644
index 0000000..6f59a7a
--- /dev/null
+++ b/tests/integration/swf/test_layer1_workflow_execution.py
@@ -0,0 +1,173 @@
+"""
+Tests for Layer1 of Simple Workflow
+
+"""
+import time
+import uuid
+import json
+import traceback
+
+from boto.swf.layer1_decisions import Layer1Decisions
+
+from test_layer1 import SimpleWorkflowLayer1TestBase
+
+
+
+class SwfL1WorkflowExecutionTest(SimpleWorkflowLayer1TestBase):
+ """
+ test a simple workflow execution
+ """
+ swf = True
+
+ def run_decider(self):
+ """
+ run one iteration of a simple decision engine
+ """
+ # Poll for a decision task.
+ tries = 0
+ while True:
+ dtask = self.conn.poll_for_decision_task(self._domain,
+ self._task_list, reverse_order=True)
+ if dtask.get('taskToken') is not None:
+ # This means a real decision task has arrived.
+ break
+ time.sleep(2)
+ tries += 1
+ if tries > 10:
+ # Give up if it's taking too long. Probably
+ # means something is broken somewhere else.
+ assert False, 'no decision task occurred'
+
+ # Get the most recent interesting event.
+ ignorable = (
+ 'DecisionTaskScheduled',
+ 'DecisionTaskStarted',
+ 'DecisionTaskTimedOut',
+ )
+ event = None
+ for tevent in dtask['events']:
+ if tevent['eventType'] not in ignorable:
+ event = tevent
+ break
+
+ # Construct the decision response.
+ decisions = Layer1Decisions()
+ if event['eventType'] == 'WorkflowExecutionStarted':
+ activity_id = str(uuid.uuid1())
+ decisions.schedule_activity_task(activity_id,
+ self._activity_type_name, self._activity_type_version,
+ task_list=self._task_list,
+ input=event['workflowExecutionStartedEventAttributes']['input'])
+ elif event['eventType'] == 'ActivityTaskCompleted':
+ decisions.complete_workflow_execution(
+ result=event['activityTaskCompletedEventAttributes']['result'])
+ elif event['eventType'] == 'ActivityTaskFailed':
+ decisions.fail_workflow_execution(
+ reason=event['activityTaskFailedEventAttributes']['reason'],
+ details=event['activityTaskFailedEventAttributes']['details'])
+ else:
+ decisions.fail_workflow_execution(
+ reason='unhandled decision task type; %r' % (event['eventType'],))
+
+ # Send the decision response.
+ r = self.conn.respond_decision_task_completed(dtask['taskToken'],
+ decisions=decisions._data,
+ execution_context=None)
+ assert r is None
+
+
+ def run_worker(self):
+ """
+ run one iteration of a simple worker engine
+ """
+ # Poll for an activity task.
+ tries = 0
+ while True:
+ atask = self.conn.poll_for_activity_task(self._domain,
+ self._task_list, identity='test worker')
+ if atask.get('activityId') is not None:
+ # This means a real activity task has arrived.
+ break
+ time.sleep(2)
+ tries += 1
+ if tries > 10:
+ # Give up if it's taking too long. Probably
+ # means something is broken somewhere else.
+ assert False, 'no activity task occurred'
+ # Do the work or catch a "work exception."
+ reason = None
+ try:
+ result = json.dumps(sum(json.loads(atask['input'])))
+ except:
+ reason = 'an exception was raised'
+ details = traceback.format_exc()
+ if reason is None:
+ r = self.conn.respond_activity_task_completed(
+ atask['taskToken'], result)
+ else:
+ r = self.conn.respond_activity_task_failed(
+ atask['taskToken'], reason=reason, details=details)
+ assert r is None
+
+
+ def test_workflow_execution(self):
+ # Start a workflow execution whose activity task will succeed.
+ workflow_id = 'wfid-%.2f' % (time.time(),)
+ r = self.conn.start_workflow_execution(self._domain,
+ workflow_id,
+ self._workflow_type_name,
+ self._workflow_type_version,
+ execution_start_to_close_timeout='20',
+ input='[600, 15]')
+ # Need the run_id to lookup the execution history later.
+ run_id = r['runId']
+
+ # Move the workflow execution forward by having the
+ # decider schedule an activity task.
+ self.run_decider()
+
+ # Run the worker to handle the scheduled activity task.
+ self.run_worker()
+
+ # Complete the workflow execution by having the
+ # decider close it down.
+ self.run_decider()
+
+ # Check that the result was stored in the execution history.
+ r = self.conn.get_workflow_execution_history(self._domain,
+ run_id, workflow_id,
+ reverse_order=True)['events'][0]
+ result = r['workflowExecutionCompletedEventAttributes']['result']
+ assert json.loads(result) == 615
+
+
+ def test_failed_workflow_execution(self):
+ # Start a workflow execution whose activity task will fail.
+ workflow_id = 'wfid-%.2f' % (time.time(),)
+ r = self.conn.start_workflow_execution(self._domain,
+ workflow_id,
+ self._workflow_type_name,
+ self._workflow_type_version,
+ execution_start_to_close_timeout='20',
+ input='[600, "s"]')
+ # Need the run_id to lookup the execution history later.
+ run_id = r['runId']
+
+ # Move the workflow execution forward by having the
+ # decider schedule an activity task.
+ self.run_decider()
+
+ # Run the worker to handle the scheduled activity task.
+ self.run_worker()
+
+ # Complete the workflow execution by having the
+ # decider close it down.
+ self.run_decider()
+
+ # Check that the failure was stored in the execution history.
+ r = self.conn.get_workflow_execution_history(self._domain,
+ run_id, workflow_id,
+ reverse_order=True)['events'][0]
+ reason = r['workflowExecutionFailedEventAttributes']['reason']
+ assert reason == 'an exception was raised'
+
diff --git a/boto/mturk/test/.gitignore b/tests/mturk/.gitignore
similarity index 100%
rename from boto/mturk/test/.gitignore
rename to tests/mturk/.gitignore
diff --git a/tests/cloudfront/__init__.py b/tests/mturk/__init__.py
similarity index 100%
copy from tests/cloudfront/__init__.py
copy to tests/mturk/__init__.py
diff --git a/tests/mturk/_init_environment.py b/tests/mturk/_init_environment.py
new file mode 100644
index 0000000..3ca5cf6
--- /dev/null
+++ b/tests/mturk/_init_environment.py
@@ -0,0 +1,28 @@
+import os
+import functools
+
+live_connection = False
+mturk_host = 'mechanicalturk.sandbox.amazonaws.com'
+external_url = 'http://www.example.com/'
+
+
+SetHostMTurkConnection = None
+
+def config_environment():
+ global SetHostMTurkConnection
+ try:
+ local = os.path.join(os.path.dirname(__file__), 'local.py')
+ execfile(local)
+ except:
+ pass
+
+ if live_connection:
+ #TODO: you must set the auth credentials to something valid
+ from boto.mturk.connection import MTurkConnection
+ else:
+ # Here the credentials must be set, but it doesn't matter what
+ # they're set to.
+ os.environ.setdefault('AWS_ACCESS_KEY_ID', 'foo')
+ os.environ.setdefault('AWS_SECRET_ACCESS_KEY', 'bar')
+ from mocks import MTurkConnection
+ SetHostMTurkConnection = functools.partial(MTurkConnection, host=mturk_host)
diff --git a/boto/mturk/test/all_tests.py b/tests/mturk/all_tests.py
similarity index 90%
rename from boto/mturk/test/all_tests.py
rename to tests/mturk/all_tests.py
index f17cf85..ba2e122 100644
--- a/boto/mturk/test/all_tests.py
+++ b/tests/mturk/all_tests.py
@@ -11,7 +11,7 @@
doctest_suite = doctest.DocFileSuite(
*glob('*.doctest'),
- optionflags=doctest.REPORT_ONLY_FIRST_FAILURE
+ **{'optionflags': doctest.REPORT_ONLY_FIRST_FAILURE}
)
class Program(unittest.TestProgram):
diff --git a/boto/mturk/test/cleanup_tests.py b/tests/mturk/cleanup_tests.py
similarity index 94%
rename from boto/mturk/test/cleanup_tests.py
rename to tests/mturk/cleanup_tests.py
index 2381dd9..bda5167 100644
--- a/boto/mturk/test/cleanup_tests.py
+++ b/tests/mturk/cleanup_tests.py
@@ -1,6 +1,7 @@
import itertools
from _init_environment import SetHostMTurkConnection
+from _init_environment import config_environment
def description_filter(substring):
return lambda hit: substring in hit.Title
@@ -17,6 +18,7 @@
def cleanup():
"""Remove any boto test related HIT's"""
+ config_environment()
global conn
diff --git a/tests/mturk/common.py b/tests/mturk/common.py
new file mode 100644
index 0000000..151714a
--- /dev/null
+++ b/tests/mturk/common.py
@@ -0,0 +1,45 @@
+import unittest
+import uuid
+import datetime
+
+from boto.mturk.question import (
+ Question, QuestionContent, AnswerSpecification, FreeTextAnswer,
+)
+from _init_environment import SetHostMTurkConnection, config_environment
+
+class MTurkCommon(unittest.TestCase):
+ def setUp(self):
+ config_environment()
+ self.conn = SetHostMTurkConnection()
+
+ @staticmethod
+ def get_question():
+ # create content for a question
+ qn_content = QuestionContent()
+ qn_content.append_field('Title', 'Boto no hit type question content')
+ qn_content.append_field('Text', 'What is a boto no hit type?')
+
+ # create the question specification
+ qn = Question(identifier=str(uuid.uuid4()),
+ content=qn_content,
+ answer_spec=AnswerSpecification(FreeTextAnswer()))
+ return qn
+
+ @staticmethod
+ def get_hit_params():
+ return dict(
+ lifetime=datetime.timedelta(minutes=65),
+ max_assignments=2,
+ title='Boto create_hit title',
+ description='Boto create_hit description',
+ keywords=['boto', 'test'],
+ reward=0.23,
+ duration=datetime.timedelta(minutes=6),
+ approval_delay=60*60,
+ annotation='An annotation from boto create_hit test',
+ response_groups=['Minimal',
+ 'HITDetail',
+ 'HITQuestion',
+ 'HITAssignmentSummary',],
+ )
+
diff --git a/boto/mturk/test/create_free_text_question_regex.doctest b/tests/mturk/create_free_text_question_regex.doctest
similarity index 100%
rename from boto/mturk/test/create_free_text_question_regex.doctest
rename to tests/mturk/create_free_text_question_regex.doctest
diff --git a/boto/mturk/test/create_hit.doctest b/tests/mturk/create_hit.doctest
similarity index 100%
rename from boto/mturk/test/create_hit.doctest
rename to tests/mturk/create_hit.doctest
diff --git a/boto/mturk/test/create_hit_binary.doctest b/tests/mturk/create_hit_binary.doctest
similarity index 100%
rename from boto/mturk/test/create_hit_binary.doctest
rename to tests/mturk/create_hit_binary.doctest
diff --git a/tests/mturk/create_hit_external.py b/tests/mturk/create_hit_external.py
new file mode 100644
index 0000000..f2264c8
--- /dev/null
+++ b/tests/mturk/create_hit_external.py
@@ -0,0 +1,21 @@
+import unittest
+import uuid
+import datetime
+from boto.mturk.question import ExternalQuestion
+
+from _init_environment import SetHostMTurkConnection, external_url, \
+ config_environment
+
+class Test(unittest.TestCase):
+ def setUp(self):
+ config_environment()
+
+ def test_create_hit_external(self):
+ q = ExternalQuestion(external_url=external_url, frame_height=800)
+ conn = SetHostMTurkConnection()
+ keywords=['boto', 'test', 'doctest']
+ create_hit_rs = conn.create_hit(question=q, lifetime=60*65, max_assignments=2, title="Boto External Question Test", keywords=keywords, reward = 0.05, duration=60*6, approval_delay=60*60, annotation='An annotation from boto external question test', response_groups=['Minimal', 'HITDetail', 'HITQuestion', 'HITAssignmentSummary',])
+ assert(create_hit_rs.status == True)
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/boto/mturk/test/create_hit_from_hit_type.doctest b/tests/mturk/create_hit_from_hit_type.doctest
similarity index 100%
rename from boto/mturk/test/create_hit_from_hit_type.doctest
rename to tests/mturk/create_hit_from_hit_type.doctest
diff --git a/boto/mturk/test/create_hit_test.py b/tests/mturk/create_hit_test.py
similarity index 100%
rename from boto/mturk/test/create_hit_test.py
rename to tests/mturk/create_hit_test.py
diff --git a/boto/mturk/test/create_hit_with_qualifications.py b/tests/mturk/create_hit_with_qualifications.py
similarity index 69%
rename from boto/mturk/test/create_hit_with_qualifications.py
rename to tests/mturk/create_hit_with_qualifications.py
index 9ef2bc5..04559c1 100644
--- a/boto/mturk/test/create_hit_with_qualifications.py
+++ b/tests/mturk/create_hit_with_qualifications.py
@@ -8,7 +8,7 @@
keywords=['boto', 'test', 'doctest']
qualifications = Qualifications()
qualifications.add(PercentAssignmentsApprovedRequirement(comparator="GreaterThan", integer_value="95"))
- create_hit_rs = conn.create_hit(question=q, lifetime=60*65,max_assignments=2,title="Boto External Question Test", keywords=keywords,reward = 0.05, duration=60*6,approval_delay=60*60, annotation='An annotation from boto external question test', qualifications=qualifications)
+ create_hit_rs = conn.create_hit(question=q, lifetime=60*65, max_assignments=2, title="Boto External Question Test", keywords=keywords, reward = 0.05, duration=60*6, approval_delay=60*60, annotation='An annotation from boto external question test', qualifications=qualifications)
assert(create_hit_rs.status == True)
print create_hit_rs.HITTypeId
diff --git a/boto/mturk/test/hit_persistence.py b/tests/mturk/hit_persistence.py
similarity index 100%
rename from boto/mturk/test/hit_persistence.py
rename to tests/mturk/hit_persistence.py
diff --git a/boto/mturk/test/mocks.py b/tests/mturk/mocks.py
similarity index 100%
rename from boto/mturk/test/mocks.py
rename to tests/mturk/mocks.py
diff --git a/boto/mturk/test/reviewable_hits.doctest b/tests/mturk/reviewable_hits.doctest
similarity index 100%
rename from boto/mturk/test/reviewable_hits.doctest
rename to tests/mturk/reviewable_hits.doctest
diff --git a/boto/mturk/test/run-doctest.py b/tests/mturk/run-doctest.py
similarity index 86%
rename from boto/mturk/test/run-doctest.py
rename to tests/mturk/run-doctest.py
index 251b7e0..802b773 100644
--- a/boto/mturk/test/run-doctest.py
+++ b/tests/mturk/run-doctest.py
@@ -1,5 +1,3 @@
-from __future__ import print_function
-
import argparse
import doctest
@@ -12,4 +10,4 @@
doctest.testfile(
args.test_name,
optionflags=doctest.REPORT_ONLY_FIRST_FAILURE
- )
\ No newline at end of file
+ )
diff --git a/boto/mturk/test/search_hits.doctest b/tests/mturk/search_hits.doctest
similarity index 100%
rename from boto/mturk/test/search_hits.doctest
rename to tests/mturk/search_hits.doctest
diff --git a/boto/mturk/test/selenium_support.py b/tests/mturk/selenium_support.py
similarity index 100%
rename from boto/mturk/test/selenium_support.py
rename to tests/mturk/selenium_support.py
diff --git a/tests/mturk/support.py b/tests/mturk/support.py
new file mode 100644
index 0000000..2630825
--- /dev/null
+++ b/tests/mturk/support.py
@@ -0,0 +1,7 @@
+import sys
+
+# use unittest2 under Python 2.6 and earlier.
+if sys.version_info >= (2, 7):
+ import unittest
+else:
+ import unittest2 as unittest
diff --git a/boto/mturk/test/test_disable_hit.py b/tests/mturk/test_disable_hit.py
similarity index 85%
rename from boto/mturk/test/test_disable_hit.py
rename to tests/mturk/test_disable_hit.py
index 0913443..2d9bd9b 100644
--- a/boto/mturk/test/test_disable_hit.py
+++ b/tests/mturk/test_disable_hit.py
@@ -1,4 +1,4 @@
-from boto.mturk.test.support import unittest
+from tests.mturk.support import unittest
from common import MTurkCommon
from boto.mturk.connection import MTurkRequestError
diff --git a/tests/s3/test_versioning.py b/tests/s3/test_versioning.py
deleted file mode 100644
index 7a84b99..0000000
--- a/tests/s3/test_versioning.py
+++ /dev/null
@@ -1,145 +0,0 @@
-# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
-# Copyright (c) 2010, Eucalyptus Systems, Inc.
-# All rights reserved.
-#
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the
-# "Software"), to deal in the Software without restriction, including
-# without limitation the rights to use, copy, modify, merge, publish, dis-
-# tribute, sublicense, and/or sell copies of the Software, and to permit
-# persons to whom the Software is furnished to do so, subject to the fol-
-# lowing conditions:
-#
-# The above copyright notice and this permission notice shall be included
-# in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
-# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-# IN THE SOFTWARE.
-
-"""
-Some unit tests for the S3 Versioning and MfaDelete
-"""
-
-import unittest
-import time
-from boto.s3.connection import S3Connection
-from boto.exception import S3ResponseError
-from boto.s3.deletemarker import DeleteMarker
-
-class S3VersionTest (unittest.TestCase):
-
- def test_1_versions(self):
- print '--- running S3Version tests ---'
- c = S3Connection()
- # create a new, empty bucket
- bucket_name = 'version-%d' % int(time.time())
- bucket = c.create_bucket(bucket_name)
-
- # now try a get_bucket call and see if it's really there
- bucket = c.get_bucket(bucket_name)
-
- # enable versions
- d = bucket.get_versioning_status()
- assert not d.has_key('Versioning')
- bucket.configure_versioning(versioning=True)
- time.sleep(15)
- d = bucket.get_versioning_status()
- assert d['Versioning'] == 'Enabled'
-
- # create a new key in the versioned bucket
- k = bucket.new_key()
- k.name = 'foobar'
- s1 = 'This is a test of s3 versioning'
- s2 = 'This is the second test of s3 versioning'
- k.set_contents_from_string(s1)
- time.sleep(5)
-
- # remember the version id of this object
- v1 = k.version_id
-
- # now get the contents from s3
- o1 = k.get_contents_as_string()
-
- # check to make sure content read from s3 is identical to original
- assert o1 == s1
-
- # now overwrite that same key with new data
- k.set_contents_from_string(s2)
- v2 = k.version_id
- time.sleep(5)
-
- # now retrieve the contents as a string and compare
- s3 = k.get_contents_as_string(version_id=v2)
- assert s3 == s2
-
- # Now list all versions and compare to what we have
- rs = bucket.get_all_versions()
- assert rs[0].version_id == v2
- assert rs[1].version_id == v1
-
- # Now do a regular list command and make sure only the new key shows up
- rs = bucket.get_all_keys()
- assert len(rs) == 1
-
- # Now do regular delete
- bucket.delete_key('foobar')
- time.sleep(5)
-
- # Now list versions and make sure old versions are there
- # plus the DeleteMarker
- rs = bucket.get_all_versions()
- assert len(rs) == 3
- assert isinstance(rs[0], DeleteMarker)
-
- # Now delete v1 of the key
- bucket.delete_key('foobar', version_id=v1)
- time.sleep(5)
-
- # Now list versions again and make sure v1 is not there
- rs = bucket.get_all_versions()
- versions = [k.version_id for k in rs]
- assert v1 not in versions
- assert v2 in versions
-
- # Now try to enable MfaDelete
- mfa_sn = raw_input('MFA S/N: ')
- mfa_code = raw_input('MFA Code: ')
- bucket.configure_versioning(True, mfa_delete=True, mfa_token=(mfa_sn, mfa_code))
- i = 0
- for i in range(1,8):
- time.sleep(2**i)
- d = bucket.get_versioning_status()
- if d['Versioning'] == 'Enabled' and d['MfaDelete'] == 'Enabled':
- break
- assert d['Versioning'] == 'Enabled'
- assert d['MfaDelete'] == 'Enabled'
-
- # Now try to delete v2 without the MFA token
- try:
- bucket.delete_key('foobar', version_id=v2)
- except S3ResponseError:
- pass
-
- # Now try to delete v2 with the MFA token
- mfa_code = raw_input('MFA Code: ')
- bucket.delete_key('foobar', version_id=v2, mfa_token=(mfa_sn, mfa_code))
-
- # Now disable MfaDelete on the bucket
- mfa_code = raw_input('MFA Code: ')
- bucket.configure_versioning(True, mfa_delete=False, mfa_token=(mfa_sn, mfa_code))
-
- # Now suspend Versioning on the bucket
- bucket.configure_versioning(False)
-
- # now delete all keys and deletemarkers in bucket
- for k in bucket.list_versions():
- bucket.delete_key(k.name, version_id=k.version_id)
-
- # now delete bucket
- c.delete_bucket(bucket)
- print '--- tests completed ---'
diff --git a/tests/test.py b/tests/test.py
index 9e14cda..68e7af2 100755
--- a/tests/test.py
+++ b/tests/test.py
@@ -20,95 +20,37 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
-"""
-do the unit tests!
-"""
-
import logging
import sys
import unittest
-import getopt
-from sqs.test_connection import SQSConnectionTest
-from s3.test_connection import S3ConnectionTest
-from s3.test_versioning import S3VersionTest
-from s3.test_encryption import S3EncryptionTest
-from s3.test_gsconnection import GSConnectionTest
-from s3.test_https_cert_validation import CertValidationTest
-from ec2.test_connection import EC2ConnectionTest
-from autoscale.test_connection import AutoscaleConnectionTest
-from sdb.test_connection import SDBConnectionTest
-from cloudfront.test_signed_urls import CloudfrontSignedUrlsTest
+from nose.core import run
+import argparse
-def usage():
- print "test.py [-t testsuite] [-v verbosity]"
- print " -t run specific testsuite (s3|ssl|s3ver|s3nover|gs|sqs|ec2|sdb|all)"
- print " -v verbosity (0|1|2)"
def main():
- try:
- opts, args = getopt.getopt(sys.argv[1:], "ht:v:",
- ["help", "testsuite", "verbosity"])
- except:
- usage()
- sys.exit(2)
- testsuite = "all"
- verbosity = 1
- for o, a in opts:
- if o in ("-h", "--help"):
- usage()
- sys.exit()
- if o in ("-t", "--testsuite"):
- testsuite = a
- if o in ("-v", "--verbosity"):
- verbosity = int(a)
- if len(args) != 0:
- usage()
- sys.exit()
- try:
- tests = suite(testsuite)
- except ValueError:
- usage()
- sys.exit()
- if verbosity > 1:
- logging.basicConfig(level=logging.DEBUG)
- unittest.TextTestRunner(verbosity=verbosity).run(tests)
-
-def suite(testsuite="all"):
- tests = unittest.TestSuite()
- if testsuite == "all":
- tests.addTest(unittest.makeSuite(SQSConnectionTest))
- tests.addTest(unittest.makeSuite(S3ConnectionTest))
- tests.addTest(unittest.makeSuite(EC2ConnectionTest))
- tests.addTest(unittest.makeSuite(SDBConnectionTest))
- tests.addTest(unittest.makeSuite(AutoscaleConnectionTest))
- tests.addTest(unittest.makeSuite(CloudfrontSignedUrlsTest))
- elif testsuite == "s3":
- tests.addTest(unittest.makeSuite(S3ConnectionTest))
- tests.addTest(unittest.makeSuite(S3VersionTest))
- tests.addTest(unittest.makeSuite(S3EncryptionTest))
- elif testsuite == "ssl":
- tests.addTest(unittest.makeSuite(CertValidationTest))
- elif testsuite == "s3ver":
- tests.addTest(unittest.makeSuite(S3VersionTest))
- elif testsuite == "s3nover":
- tests.addTest(unittest.makeSuite(S3ConnectionTest))
- tests.addTest(unittest.makeSuite(S3EncryptionTest))
- elif testsuite == "gs":
- tests.addTest(unittest.makeSuite(GSConnectionTest))
- elif testsuite == "sqs":
- tests.addTest(unittest.makeSuite(SQSConnectionTest))
- elif testsuite == "ec2":
- tests.addTest(unittest.makeSuite(EC2ConnectionTest))
- elif testsuite == "autoscale":
- tests.addTest(unittest.makeSuite(AutoscaleConnectionTest))
- elif testsuite == "sdb":
- tests.addTest(unittest.makeSuite(SDBConnectionTest))
- elif testsuite == "cloudfront":
- tests.addTest(unittest.makeSuite(CloudfrontSignedUrlsTest))
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-t', '--service-tests', action="append", default=[],
+ help="Run tests for a given service. This will "
+ "run any test tagged with the specified value, "
+ "e.g -t s3 -t ec2")
+ known_args, remaining_args = parser.parse_known_args()
+ attribute_args = []
+ for service_attribute in known_args.service_tests:
+ attribute_args.extend(['-a', '!notdefault,' +service_attribute])
+ if not attribute_args:
+ # If the user did not specify any filtering criteria, we at least
+ # will filter out any test tagged 'notdefault'.
+ attribute_args = ['-a', '!notdefault']
+ all_args = [__file__] + attribute_args + remaining_args
+ print "nose command:", ' '.join(all_args)
+ if run(argv=all_args):
+ # run will return True is all the tests pass. We want
+ # this to equal a 0 rc
+ return 0
else:
- raise ValueError("Invalid choice.")
- return tests
+ return 1
+
if __name__ == "__main__":
- main()
+ sys.exit(main())
diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py
new file mode 100644
index 0000000..4e52b76
--- /dev/null
+++ b/tests/unit/__init__.py
@@ -0,0 +1,75 @@
+try:
+ import unittest2 as unittest
+except ImportError:
+ import unittest
+import httplib
+
+from mock import Mock
+
+
+class AWSMockServiceTestCase(unittest.TestCase):
+ """Base class for mocking aws services."""
+ # This param is used by the unittest module to display a full
+ # diff when assert*Equal methods produce an error message.
+ maxDiff = None
+ connection_class = None
+
+ def setUp(self):
+ self.https_connection = Mock(spec=httplib.HTTPSConnection)
+ self.https_connection_factory = (
+ Mock(return_value=self.https_connection), ())
+ self.service_connection = self.create_service_connection(
+ https_connection_factory=self.https_connection_factory,
+ aws_access_key_id='aws_access_key_id',
+ aws_secret_access_key='aws_secret_access_key')
+ self.actual_request = None
+ self.original_mexe = self.service_connection._mexe
+ self.service_connection._mexe = self._mexe_spy
+
+ def create_service_connection(self, **kwargs):
+ if self.connection_class is None:
+ raise ValueError("The connection_class class attribute must be "
+ "set to a non-None value.")
+ return self.connection_class(**kwargs)
+
+ def _mexe_spy(self, request, *args, **kwargs):
+ self.actual_request = request
+ return self.original_mexe(request, *args, **kwargs)
+
+ def create_response(self, status_code, reason='', header=[], body=None):
+ if body is None:
+ body = self.default_body()
+ response = Mock(spec=httplib.HTTPResponse)
+ response.status = status_code
+ response.read.return_value = body
+ response.reason = reason
+
+ response.getheaders.return_value = header
+ def overwrite_header(arg, default=None):
+ header_dict = dict(header)
+ if header_dict.has_key(arg):
+ return header_dict[arg]
+ else:
+ return default
+ response.getheader.side_effect = overwrite_header
+
+ return response
+
+ def assert_request_parameters(self, params, ignore_params_values=None):
+ """Verify the actual parameters sent to the service API."""
+ request_params = self.actual_request.params.copy()
+ if ignore_params_values is not None:
+ for param in ignore_params_values:
+ # We still want to check that the ignore_params_values params
+ # are in the request parameters, we just don't need to check
+ # their value.
+ self.assertIn(param, request_params)
+ del request_params[param]
+ self.assertDictEqual(request_params, params)
+
+ def set_http_response(self, status_code, reason='', header=[], body=None):
+ http_response = self.create_response(status_code, reason, header, body)
+ self.https_connection.getresponse.return_value = http_response
+
+ def default_body(self):
+ return ''
diff --git a/tests/cloudfront/__init__.py b/tests/unit/beanstalk/__init__.py
similarity index 100%
copy from tests/cloudfront/__init__.py
copy to tests/unit/beanstalk/__init__.py
diff --git a/tests/unit/beanstalk/test_layer1.py b/tests/unit/beanstalk/test_layer1.py
new file mode 100644
index 0000000..6df7537
--- /dev/null
+++ b/tests/unit/beanstalk/test_layer1.py
@@ -0,0 +1,128 @@
+#!/usr/bin/env python
+
+import json
+
+from tests.unit import AWSMockServiceTestCase
+
+from boto.beanstalk.layer1 import Layer1
+
+# These tests are just checking the basic structure of
+# the Elastic Beanstalk code, by picking a few calls
+# and verifying we get the expected results with mocked
+# responses. The integration tests actually verify the
+# API calls interact with the service correctly.
+class TestListAvailableSolutionStacks(AWSMockServiceTestCase):
+ connection_class = Layer1
+
+ def default_body(self):
+ return json.dumps(
+ {u'ListAvailableSolutionStacksResponse':
+ {u'ListAvailableSolutionStacksResult':
+ {u'SolutionStackDetails': [
+ {u'PermittedFileTypes': [u'war', u'zip'],
+ u'SolutionStackName': u'32bit Amazon Linux running Tomcat 7'},
+ {u'PermittedFileTypes': [u'zip'],
+ u'SolutionStackName': u'32bit Amazon Linux running PHP 5.3'}],
+ u'SolutionStacks': [u'32bit Amazon Linux running Tomcat 7',
+ u'32bit Amazon Linux running PHP 5.3']},
+ u'ResponseMetadata': {u'RequestId': u'request_id'}}})
+
+ def test_list_available_solution_stacks(self):
+ self.set_http_response(status_code=200)
+ api_response = self.service_connection.list_available_solution_stacks()
+ stack_details = api_response['ListAvailableSolutionStacksResponse']\
+ ['ListAvailableSolutionStacksResult']\
+ ['SolutionStackDetails']
+ solution_stacks = api_response['ListAvailableSolutionStacksResponse']\
+ ['ListAvailableSolutionStacksResult']\
+ ['SolutionStacks']
+ self.assertEqual(solution_stacks,
+ [u'32bit Amazon Linux running Tomcat 7',
+ u'32bit Amazon Linux running PHP 5.3'])
+ # These are the parameters that are actually sent to the CloudFormation
+ # service.
+ self.assert_request_parameters({
+ 'Action': 'ListAvailableSolutionStacks',
+ 'ContentType': 'JSON',
+ 'SignatureMethod': 'HmacSHA256',
+ 'SignatureVersion': 2,
+ 'Version': '2010-12-01',
+ 'AWSAccessKeyId': 'aws_access_key_id',
+ }, ignore_params_values=['Timestamp'])
+
+
+class TestCreateApplicationVersion(AWSMockServiceTestCase):
+ connection_class = Layer1
+
+ def default_body(self):
+ return json.dumps({
+ 'CreateApplicationVersionResponse':
+ {u'CreateApplicationVersionResult':
+ {u'ApplicationVersion':
+ {u'ApplicationName': u'application1',
+ u'DateCreated': 1343067094.342,
+ u'DateUpdated': 1343067094.342,
+ u'Description': None,
+ u'SourceBundle': {u'S3Bucket': u'elasticbeanstalk-us-east-1',
+ u'S3Key': u'resources/elasticbeanstalk-sampleapp.war'},
+ u'VersionLabel': u'version1'}}}})
+
+ def test_create_application_version(self):
+ self.set_http_response(status_code=200)
+ api_response = self.service_connection.create_application_version(
+ 'application1', 'version1', s3_bucket='mybucket', s3_key='mykey',
+ auto_create_application=True)
+ app_version = api_response['CreateApplicationVersionResponse']\
+ ['CreateApplicationVersionResult']\
+ ['ApplicationVersion']
+ self.assert_request_parameters({
+ 'Action': 'CreateApplicationVersion',
+ 'ContentType': 'JSON',
+ 'SignatureMethod': 'HmacSHA256',
+ 'SignatureVersion': 2,
+ 'Version': '2010-12-01',
+ 'ApplicationName': 'application1',
+ 'AutoCreateApplication': 'true',
+ 'SourceBundle.S3Bucket': 'mybucket',
+ 'SourceBundle.S3Key': 'mykey',
+ 'VersionLabel': 'version1',
+ 'AWSAccessKeyId': 'aws_access_key_id',
+ }, ignore_params_values=['Timestamp'])
+ self.assertEqual(app_version['ApplicationName'], 'application1')
+ self.assertEqual(app_version['VersionLabel'], 'version1')
+
+
+class TestCreateEnvironment(AWSMockServiceTestCase):
+ connection_class = Layer1
+
+ def default_body(self):
+ return json.dumps({})
+
+ def test_create_environment(self):
+ self.set_http_response(status_code=200)
+ api_response = self.service_connection.create_environment(
+ 'application1', 'environment1', 'version1',
+ '32bit Amazon Linux running Tomcat 7',
+ option_settings=[
+ ('aws:autoscaling:launchconfiguration', 'Ec2KeyName',
+ 'mykeypair'),
+ ('aws:elasticbeanstalk:application:environment', 'ENVVAR',
+ 'VALUE1')])
+ self.assert_request_parameters({
+ 'Action': 'CreateEnvironment',
+ 'ApplicationName': 'application1',
+ 'EnvironmentName': 'environment1',
+ 'TemplateName': '32bit Amazon Linux running Tomcat 7',
+ 'ContentType': 'JSON',
+ 'SignatureMethod': 'HmacSHA256',
+ 'SignatureVersion': 2,
+ 'Version': '2010-12-01',
+ 'VersionLabel': 'version1',
+ 'AWSAccessKeyId': 'aws_access_key_id',
+ 'OptionSettings.member.1.Namespace': 'aws:autoscaling:launchconfiguration',
+ 'OptionSettings.member.1.OptionName': 'Ec2KeyName',
+ 'OptionSettings.member.1.Value': 'mykeypair',
+ 'OptionSettings.member.2.Namespace': 'aws:elasticbeanstalk:application:environment',
+ 'OptionSettings.member.2.OptionName': 'ENVVAR',
+ 'OptionSettings.member.2.Value': 'VALUE1',
+ }, ignore_params_values=['Timestamp'])
diff --git a/tests/cloudfront/__init__.py b/tests/unit/cloudformation/__init__.py
similarity index 100%
copy from tests/cloudfront/__init__.py
copy to tests/unit/cloudformation/__init__.py
diff --git a/tests/unit/cloudformation/test_connection.py b/tests/unit/cloudformation/test_connection.py
new file mode 100644
index 0000000..d7f86c7
--- /dev/null
+++ b/tests/unit/cloudformation/test_connection.py
@@ -0,0 +1,605 @@
+#!/usr/bin/env python
+import unittest
+import httplib
+from datetime import datetime
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+from mock import Mock
+
+from tests.unit import AWSMockServiceTestCase
+from boto.cloudformation.connection import CloudFormationConnection
+
+
+SAMPLE_TEMPLATE = r"""
+{
+ "AWSTemplateFormatVersion" : "2010-09-09",
+ "Description" : "Sample template",
+ "Parameters" : {
+ "KeyName" : {
+ "Description" : "key pair",
+ "Type" : "String"
+ }
+ },
+ "Resources" : {
+ "Ec2Instance" : {
+ "Type" : "AWS::EC2::Instance",
+ "Properties" : {
+ "KeyName" : { "Ref" : "KeyName" },
+ "ImageId" : "ami-7f418316",
+ "UserData" : { "Fn::Base64" : "80" }
+ }
+ }
+ },
+ "Outputs" : {
+ "InstanceId" : {
+ "Description" : "InstanceId of the newly created EC2 instance",
+ "Value" : { "Ref" : "Ec2Instance" }
+ }
+}
+"""
+
+class CloudFormationConnectionBase(AWSMockServiceTestCase):
+ connection_class = CloudFormationConnection
+
+ def setUp(self):
+ super(CloudFormationConnectionBase, self).setUp()
+ self.stack_id = u'arn:aws:cloudformation:us-east-1:18:stack/Name/id'
+
+
+class TestCloudFormationCreateStack(CloudFormationConnectionBase):
+ def default_body(self):
+ return json.dumps(
+ {u'CreateStackResponse':
+ {u'CreateStackResult': {u'StackId': self.stack_id},
+ u'ResponseMetadata': {u'RequestId': u'1'}}})
+
+ def test_create_stack_has_correct_request_params(self):
+ self.set_http_response(status_code=200)
+ api_response = self.service_connection.create_stack(
+ 'stack_name', template_url='http://url',
+ template_body=SAMPLE_TEMPLATE,
+ parameters=[('KeyName', 'myKeyName')],
+ tags={'TagKey': 'TagValue'},
+ notification_arns=['arn:notify1', 'arn:notify2'],
+ disable_rollback=True,
+ timeout_in_minutes=20, capabilities=['CAPABILITY_IAM']
+ )
+ self.assertEqual(api_response, self.stack_id)
+ # These are the parameters that are actually sent to the CloudFormation
+ # service.
+ self.assert_request_parameters({
+ 'Action': 'CreateStack',
+ 'Capabilities.member.1': 'CAPABILITY_IAM',
+ 'ContentType': 'JSON',
+ 'DisableRollback': 'true',
+ 'NotificationARNs.member.1': 'arn:notify1',
+ 'NotificationARNs.member.2': 'arn:notify2',
+ 'Parameters.member.1.ParameterKey': 'KeyName',
+ 'Parameters.member.1.ParameterValue': 'myKeyName',
+ 'Tags.member.1.Key': 'TagKey',
+ 'Tags.member.1.Value': 'TagValue',
+ 'StackName': 'stack_name',
+ 'Version': '2010-05-15',
+ 'TimeoutInMinutes': 20,
+ 'TemplateBody': SAMPLE_TEMPLATE,
+ 'TemplateURL': 'http://url',
+ })
+
+ # The test_create_stack_has_correct_request_params verified all of the
+ # params needed when making a create_stack service call. The rest of the
+ # tests for create_stack only verify specific parts of the params sent
+ # to CloudFormation.
+
+ def test_create_stack_with_minimum_args(self):
+ # This will fail in practice, but the API docs only require stack_name.
+ self.set_http_response(status_code=200)
+ api_response = self.service_connection.create_stack('stack_name')
+ self.assertEqual(api_response, self.stack_id)
+ self.assert_request_parameters({
+ 'Action': 'CreateStack',
+ 'ContentType': 'JSON',
+ 'DisableRollback': 'false',
+ 'StackName': 'stack_name',
+ 'Version': '2010-05-15',
+ })
+
+ def test_create_stack_fails(self):
+ self.set_http_response(status_code=400, reason='Bad Request',
+ body='Invalid arg.')
+ with self.assertRaises(self.service_connection.ResponseError):
+ api_response = self.service_connection.create_stack(
+ 'stack_name', template_body=SAMPLE_TEMPLATE,
+ parameters=[('KeyName', 'myKeyName')])
+
+
+class TestCloudFormationUpdateStack(CloudFormationConnectionBase):
+ def default_body(self):
+ return json.dumps(
+ {u'UpdateStackResponse':
+ {u'UpdateStackResult': {u'StackId': self.stack_id},
+ u'ResponseMetadata': {u'RequestId': u'1'}}})
+
+ def test_update_stack_all_args(self):
+ self.set_http_response(status_code=200)
+ api_response = self.service_connection.update_stack(
+ 'stack_name', template_url='http://url',
+ template_body=SAMPLE_TEMPLATE,
+ parameters=[('KeyName', 'myKeyName')],
+ tags={'TagKey': 'TagValue'},
+ notification_arns=['arn:notify1', 'arn:notify2'],
+ disable_rollback=True,
+ timeout_in_minutes=20
+ )
+ self.assert_request_parameters({
+ 'Action': 'UpdateStack',
+ 'ContentType': 'JSON',
+ 'DisableRollback': 'true',
+ 'NotificationARNs.member.1': 'arn:notify1',
+ 'NotificationARNs.member.2': 'arn:notify2',
+ 'Parameters.member.1.ParameterKey': 'KeyName',
+ 'Parameters.member.1.ParameterValue': 'myKeyName',
+ 'Tags.member.1.Key': 'TagKey',
+ 'Tags.member.1.Value': 'TagValue',
+ 'StackName': 'stack_name',
+ 'Version': '2010-05-15',
+ 'TimeoutInMinutes': 20,
+ 'TemplateBody': SAMPLE_TEMPLATE,
+ 'TemplateURL': 'http://url',
+ })
+
+ def test_update_stack_with_minimum_args(self):
+ self.set_http_response(status_code=200)
+ api_response = self.service_connection.update_stack('stack_name')
+ self.assertEqual(api_response, self.stack_id)
+ self.assert_request_parameters({
+ 'Action': 'UpdateStack',
+ 'ContentType': 'JSON',
+ 'DisableRollback': 'false',
+ 'StackName': 'stack_name',
+ 'Version': '2010-05-15',
+ })
+
+ def test_update_stack_fails(self):
+ self.set_http_response(status_code=400, reason='Bad Request',
+ body='Invalid arg.')
+ with self.assertRaises(self.service_connection.ResponseError):
+ api_response = self.service_connection.update_stack(
+ 'stack_name', template_body=SAMPLE_TEMPLATE,
+ parameters=[('KeyName', 'myKeyName')])
+
+
+class TestCloudFormationDeleteStack(CloudFormationConnectionBase):
+ def default_body(self):
+ return json.dumps(
+ {u'DeleteStackResponse':
+ {u'ResponseMetadata': {u'RequestId': u'1'}}})
+
+ def test_delete_stack(self):
+ self.set_http_response(status_code=200)
+ api_response = self.service_connection.delete_stack('stack_name')
+ self.assertEqual(api_response, json.loads(self.default_body()))
+ self.assert_request_parameters({
+ 'Action': 'DeleteStack',
+ 'ContentType': 'JSON',
+ 'StackName': 'stack_name',
+ 'Version': '2010-05-15',
+ })
+
+ def test_delete_stack_fails(self):
+ self.set_http_response(status_code=400)
+ with self.assertRaises(self.service_connection.ResponseError):
+ api_response = self.service_connection.delete_stack('stack_name')
+
+
+class TestCloudFormationDescribeStackResource(CloudFormationConnectionBase):
+ def default_body(self):
+ return json.dumps('fake server response')
+
+ def test_describe_stack_resource(self):
+ self.set_http_response(status_code=200)
+ api_response = self.service_connection.describe_stack_resource(
+ 'stack_name', 'resource_id')
+ self.assertEqual(api_response, 'fake server response')
+ self.assert_request_parameters({
+ 'Action': 'DescribeStackResource',
+ 'ContentType': 'JSON',
+ 'LogicalResourceId': 'resource_id',
+ 'StackName': 'stack_name',
+ 'Version': '2010-05-15',
+ })
+
+ def test_describe_stack_resource_fails(self):
+ self.set_http_response(status_code=400)
+ with self.assertRaises(self.service_connection.ResponseError):
+ api_response = self.service_connection.describe_stack_resource(
+ 'stack_name', 'resource_id')
+
+
+class TestCloudFormationGetTemplate(CloudFormationConnectionBase):
+ def default_body(self):
+ return json.dumps('fake server response')
+
+ def test_get_template(self):
+ self.set_http_response(status_code=200)
+ api_response = self.service_connection.get_template('stack_name')
+ self.assertEqual(api_response, 'fake server response')
+ self.assert_request_parameters({
+ 'Action': 'GetTemplate',
+ 'ContentType': 'JSON',
+ 'StackName': 'stack_name',
+ 'Version': '2010-05-15',
+ })
+
+
+ def test_get_template_fails(self):
+ self.set_http_response(status_code=400)
+ with self.assertRaises(self.service_connection.ResponseError):
+ api_response = self.service_connection.get_template('stack_name')
+
+
+class TestCloudFormationGetStackevents(CloudFormationConnectionBase):
+ def default_body(self):
+ return """
+ <DescribeStackEventsResult>
+ <StackEvents>
+ <member>
+ <EventId>Event-1-Id</EventId>
+ <StackId>arn:aws:cfn:us-east-1:1:stack</StackId>
+ <StackName>MyStack</StackName>
+ <LogicalResourceId>MyStack</LogicalResourceId>
+ <PhysicalResourceId>MyStack_One</PhysicalResourceId>
+ <ResourceType>AWS::CloudFormation::Stack</ResourceType>
+ <Timestamp>2010-07-27T22:26:28Z</Timestamp>
+ <ResourceStatus>CREATE_IN_PROGRESS</ResourceStatus>
+ <ResourceStatusReason>User initiated</ResourceStatusReason>
+ </member>
+ <member>
+ <EventId>Event-2-Id</EventId>
+ <StackId>arn:aws:cfn:us-east-1:1:stack</StackId>
+ <StackName>MyStack</StackName>
+ <LogicalResourceId>MySG1</LogicalResourceId>
+ <PhysicalResourceId>MyStack_SG1</PhysicalResourceId>
+ <ResourceType>AWS::SecurityGroup</ResourceType>
+ <Timestamp>2010-07-27T22:28:28Z</Timestamp>
+ <ResourceStatus>CREATE_COMPLETE</ResourceStatus>
+ </member>
+ </StackEvents>
+ </DescribeStackEventsResult>
+ """
+
+ def test_describe_stack_events(self):
+ self.set_http_response(status_code=200)
+ first, second = self.service_connection.describe_stack_events('stack_name', next_token='next_token')
+ self.assertEqual(first.event_id, 'Event-1-Id')
+ self.assertEqual(first.logical_resource_id, 'MyStack')
+ self.assertEqual(first.physical_resource_id, 'MyStack_One')
+ self.assertEqual(first.resource_properties, None)
+ self.assertEqual(first.resource_status, 'CREATE_IN_PROGRESS')
+ self.assertEqual(first.resource_status_reason, 'User initiated')
+ self.assertEqual(first.resource_type, 'AWS::CloudFormation::Stack')
+ self.assertEqual(first.stack_id, 'arn:aws:cfn:us-east-1:1:stack')
+ self.assertEqual(first.stack_name, 'MyStack')
+ self.assertIsNotNone(first.timestamp)
+
+ self.assertEqual(second.event_id, 'Event-2-Id')
+ self.assertEqual(second.logical_resource_id, 'MySG1')
+ self.assertEqual(second.physical_resource_id, 'MyStack_SG1')
+ self.assertEqual(second.resource_properties, None)
+ self.assertEqual(second.resource_status, 'CREATE_COMPLETE')
+ self.assertEqual(second.resource_status_reason, None)
+ self.assertEqual(second.resource_type, 'AWS::SecurityGroup')
+ self.assertEqual(second.stack_id, 'arn:aws:cfn:us-east-1:1:stack')
+ self.assertEqual(second.stack_name, 'MyStack')
+ self.assertIsNotNone(second.timestamp)
+
+ self.assert_request_parameters({
+ 'Action': 'DescribeStackEvents',
+ 'NextToken': 'next_token',
+ 'StackName': 'stack_name',
+ 'Version': '2010-05-15',
+ })
+
+
+class TestCloudFormationDescribeStackResources(CloudFormationConnectionBase):
+ def default_body(self):
+ return """
+ <DescribeStackResourcesResult>
+ <StackResources>
+ <member>
+ <StackId>arn:aws:cfn:us-east-1:1:stack</StackId>
+ <StackName>MyStack</StackName>
+ <LogicalResourceId>MyDBInstance</LogicalResourceId>
+ <PhysicalResourceId>MyStack_DB1</PhysicalResourceId>
+ <ResourceType>AWS::DBInstance</ResourceType>
+ <Timestamp>2010-07-27T22:27:28Z</Timestamp>
+ <ResourceStatus>CREATE_COMPLETE</ResourceStatus>
+ </member>
+ <member>
+ <StackId>arn:aws:cfn:us-east-1:1:stack</StackId>
+ <StackName>MyStack</StackName>
+ <LogicalResourceId>MyAutoScalingGroup</LogicalResourceId>
+ <PhysicalResourceId>MyStack_ASG1</PhysicalResourceId>
+ <ResourceType>AWS::AutoScalingGroup</ResourceType>
+ <Timestamp>2010-07-27T22:28:28Z</Timestamp>
+ <ResourceStatus>CREATE_IN_PROGRESS</ResourceStatus>
+ </member>
+ </StackResources>
+ </DescribeStackResourcesResult>
+ """
+
+ def test_describe_stack_resources(self):
+ self.set_http_response(status_code=200)
+ first, second = self.service_connection.describe_stack_resources(
+ 'stack_name', 'logical_resource_id', 'physical_resource_id')
+ self.assertEqual(first.description, None)
+ self.assertEqual(first.logical_resource_id, 'MyDBInstance')
+ self.assertEqual(first.physical_resource_id, 'MyStack_DB1')
+ self.assertEqual(first.resource_status, 'CREATE_COMPLETE')
+ self.assertEqual(first.resource_status_reason, None)
+ self.assertEqual(first.resource_type, 'AWS::DBInstance')
+ self.assertEqual(first.stack_id, 'arn:aws:cfn:us-east-1:1:stack')
+ self.assertEqual(first.stack_name, 'MyStack')
+ self.assertIsNotNone(first.timestamp)
+
+ self.assertEqual(second.description, None)
+ self.assertEqual(second.logical_resource_id, 'MyAutoScalingGroup')
+ self.assertEqual(second.physical_resource_id, 'MyStack_ASG1')
+ self.assertEqual(second.resource_status, 'CREATE_IN_PROGRESS')
+ self.assertEqual(second.resource_status_reason, None)
+ self.assertEqual(second.resource_type, 'AWS::AutoScalingGroup')
+ self.assertEqual(second.stack_id, 'arn:aws:cfn:us-east-1:1:stack')
+ self.assertEqual(second.stack_name, 'MyStack')
+ self.assertIsNotNone(second.timestamp)
+
+ self.assert_request_parameters({
+ 'Action': 'DescribeStackResources',
+ 'LogicalResourceId': 'logical_resource_id',
+ 'PhysicalResourceId': 'physical_resource_id',
+ 'StackName': 'stack_name',
+ 'Version': '2010-05-15',
+ })
+
+
+class TestCloudFormationDescribeStacks(CloudFormationConnectionBase):
+ def default_body(self):
+ return """
+ <DescribeStacksResponse>
+ <DescribeStacksResult>
+ <Stacks>
+ <member>
+ <StackId>arn:aws:cfn:us-east-1:1:stack</StackId>
+ <StackStatus>CREATE_COMPLETE</StackStatus>
+ <StackName>MyStack</StackName>
+ <StackStatusReason/>
+ <Description>My Description</Description>
+ <CreationTime>2012-05-16T22:55:31Z</CreationTime>
+ <Capabilities>
+ <member>CAPABILITY_IAM</member>
+ </Capabilities>
+ <NotificationARNs>
+ <member>arn:aws:sns:region-name:account-name:topic-name</member>
+ </NotificationARNs>
+ <DisableRollback>false</DisableRollback>
+ <Parameters>
+ <member>
+ <ParameterValue>MyValue</ParameterValue>
+ <ParameterKey>MyKey</ParameterKey>
+ </member>
+ </Parameters>
+ <Outputs>
+ <member>
+ <OutputValue>http://url/</OutputValue>
+ <Description>Server URL</Description>
+ <OutputKey>ServerURL</OutputKey>
+ </member>
+ </Outputs>
+ <Tags>
+ <member>
+ <Key>MyTagKey</Key>
+ <Value>MyTagValue</Value>
+ </member>
+ </Tags>
+ </member>
+ </Stacks>
+ </DescribeStacksResult>
+ <ResponseMetadata>
+ <RequestId>12345</RequestId>
+ </ResponseMetadata>
+ </DescribeStacksResponse>
+ """
+
+ def test_describe_stacks(self):
+ self.set_http_response(status_code=200)
+
+ stacks = self.service_connection.describe_stacks('MyStack')
+ self.assertEqual(len(stacks), 1)
+
+ stack = stacks[0]
+ self.assertEqual(stack.creation_time,
+ datetime(2012, 5, 16, 22, 55, 31))
+ self.assertEqual(stack.description, 'My Description')
+ self.assertEqual(stack.disable_rollback, True)
+ self.assertEqual(stack.stack_id, 'arn:aws:cfn:us-east-1:1:stack')
+ self.assertEqual(stack.stack_status, 'CREATE_COMPLETE')
+ self.assertEqual(stack.stack_name, 'MyStack')
+ self.assertEqual(stack.stack_name_reason, None)
+ self.assertEqual(stack.timeout_in_minutes, None)
+
+ self.assertEqual(len(stack.outputs), 1)
+ self.assertEqual(stack.outputs[0].description, 'Server URL')
+ self.assertEqual(stack.outputs[0].key, 'ServerURL')
+ self.assertEqual(stack.outputs[0].value, 'http://url/')
+
+ self.assertEqual(len(stack.parameters), 1)
+ self.assertEqual(stack.parameters[0].key, 'MyKey')
+ self.assertEqual(stack.parameters[0].value, 'MyValue')
+
+ self.assertEqual(len(stack.capabilities), 1)
+ self.assertEqual(stack.capabilities[0].value, 'CAPABILITY_IAM')
+
+ self.assertEqual(len(stack.notification_arns), 1)
+ self.assertEqual(stack.notification_arns[0].value, 'arn:aws:sns:region-name:account-name:topic-name')
+
+ self.assertEqual(len(stack.tags), 1)
+ self.assertEqual(stack.tags['MyTagKey'], 'MyTagValue')
+
+ self.assert_request_parameters({
+ 'Action': 'DescribeStacks',
+ 'StackName': 'MyStack',
+ 'Version': '2010-05-15',
+ })
+
+
+class TestCloudFormationListStackResources(CloudFormationConnectionBase):
+ def default_body(self):
+ return """
+ <ListStackResourcesResponse>
+ <ListStackResourcesResult>
+ <StackResourceSummaries>
+ <member>
+ <ResourceStatus>CREATE_COMPLETE</ResourceStatus>
+ <LogicalResourceId>SampleDB</LogicalResourceId>
+ <LastUpdatedTimestamp>2011-06-21T20:25:57Z</LastUpdatedTimestamp>
+ <PhysicalResourceId>My-db-ycx</PhysicalResourceId>
+ <ResourceType>AWS::RDS::DBInstance</ResourceType>
+ </member>
+ <member>
+ <ResourceStatus>CREATE_COMPLETE</ResourceStatus>
+ <LogicalResourceId>CPUAlarmHigh</LogicalResourceId>
+ <LastUpdatedTimestamp>2011-06-21T20:29:23Z</LastUpdatedTimestamp>
+ <PhysicalResourceId>MyStack-CPUH-PF</PhysicalResourceId>
+ <ResourceType>AWS::CloudWatch::Alarm</ResourceType>
+ </member>
+ </StackResourceSummaries>
+ </ListStackResourcesResult>
+ <ResponseMetadata>
+ <RequestId>2d06e36c-ac1d-11e0-a958-f9382b6eb86b</RequestId>
+ </ResponseMetadata>
+ </ListStackResourcesResponse>
+ """
+
+ def test_list_stack_resources(self):
+ self.set_http_response(status_code=200)
+ resources = self.service_connection.list_stack_resources('MyStack',
+ next_token='next_token')
+ self.assertEqual(len(resources), 2)
+ self.assertEqual(resources[0].last_updated_timestamp,
+ datetime(2011, 6, 21, 20, 25, 57))
+ self.assertEqual(resources[0].logical_resource_id, 'SampleDB')
+ self.assertEqual(resources[0].physical_resource_id, 'My-db-ycx')
+ self.assertEqual(resources[0].resource_status, 'CREATE_COMPLETE')
+ self.assertEqual(resources[0].resource_status_reason, None)
+ self.assertEqual(resources[0].resource_type, 'AWS::RDS::DBInstance')
+
+ self.assertEqual(resources[1].last_updated_timestamp,
+ datetime(2011, 6, 21, 20, 29, 23))
+ self.assertEqual(resources[1].logical_resource_id, 'CPUAlarmHigh')
+ self.assertEqual(resources[1].physical_resource_id, 'MyStack-CPUH-PF')
+ self.assertEqual(resources[1].resource_status, 'CREATE_COMPLETE')
+ self.assertEqual(resources[1].resource_status_reason, None)
+ self.assertEqual(resources[1].resource_type, 'AWS::CloudWatch::Alarm')
+
+ self.assert_request_parameters({
+ 'Action': 'ListStackResources',
+ 'NextToken': 'next_token',
+ 'StackName': 'MyStack',
+ 'Version': '2010-05-15',
+ })
+
+
+class TestCloudFormationListStacks(CloudFormationConnectionBase):
+ def default_body(self):
+ return """
+ <ListStacksResponse>
+ <ListStacksResult>
+ <StackSummaries>
+ <member>
+ <StackId>arn:aws:cfn:us-east-1:1:stack/Test1/aa</StackId>
+ <StackStatus>CREATE_IN_PROGRESS</StackStatus>
+ <StackName>vpc1</StackName>
+ <CreationTime>2011-05-23T15:47:44Z</CreationTime>
+ <TemplateDescription>My Description.</TemplateDescription>
+ </member>
+ </StackSummaries>
+ </ListStacksResult>
+ </ListStacksResponse>
+ """
+
+ def test_list_stacks(self):
+ self.set_http_response(status_code=200)
+ stacks = self.service_connection.list_stacks(['CREATE_IN_PROGRESS'],
+ next_token='next_token')
+ self.assertEqual(len(stacks), 1)
+ self.assertEqual(stacks[0].stack_id,
+ 'arn:aws:cfn:us-east-1:1:stack/Test1/aa')
+ self.assertEqual(stacks[0].stack_status, 'CREATE_IN_PROGRESS')
+ self.assertEqual(stacks[0].stack_name, 'vpc1')
+ self.assertEqual(stacks[0].creation_time,
+ datetime(2011, 5, 23, 15, 47, 44))
+ self.assertEqual(stacks[0].deletion_time, None)
+ self.assertEqual(stacks[0].template_description, 'My Description.')
+
+ self.assert_request_parameters({
+ 'Action': 'ListStacks',
+ 'NextToken': 'next_token',
+ 'StackStatusFilter.member.1': 'CREATE_IN_PROGRESS',
+ 'Version': '2010-05-15',
+ })
+
+
+class TestCloudFormationValidateTemplate(CloudFormationConnectionBase):
+ def default_body(self):
+ return """
+ <ValidateTemplateResponse xmlns="http://cloudformation.amazonaws.com/doc/2010-05-15/">
+ <ValidateTemplateResult>
+ <Description>My Description.</Description>
+ <Parameters>
+ <member>
+ <NoEcho>false</NoEcho>
+ <ParameterKey>InstanceType</ParameterKey>
+ <Description>Type of instance to launch</Description>
+ <DefaultValue>m1.small</DefaultValue>
+ </member>
+ <member>
+ <NoEcho>false</NoEcho>
+ <ParameterKey>KeyName</ParameterKey>
+ <Description>EC2 KeyPair</Description>
+ </member>
+ </Parameters>
+ </ValidateTemplateResult>
+ <ResponseMetadata>
+ <RequestId>0be7b6e8-e4a0-11e0-a5bd-9f8d5a7dbc91</RequestId>
+ </ResponseMetadata>
+ </ValidateTemplateResponse>
+ """
+
+ def test_validate_template(self):
+ self.set_http_response(status_code=200)
+ template = self.service_connection.validate_template(template_body=SAMPLE_TEMPLATE,
+ template_url='http://url')
+ self.assertEqual(template.description, 'My Description.')
+ self.assertEqual(len(template.template_parameters), 2)
+ param1, param2 = template.template_parameters
+ self.assertEqual(param1.default_value, 'm1.small')
+ self.assertEqual(param1.description, 'Type of instance to launch')
+ self.assertEqual(param1.no_echo, True)
+ self.assertEqual(param1.parameter_key, 'InstanceType')
+
+ self.assertEqual(param2.default_value, None)
+ self.assertEqual(param2.description, 'EC2 KeyPair')
+ self.assertEqual(param2.no_echo, True)
+ self.assertEqual(param2.parameter_key, 'KeyName')
+
+ self.assert_request_parameters({
+ 'Action': 'ValidateTemplate',
+ 'TemplateBody': SAMPLE_TEMPLATE,
+ 'TemplateURL': 'http://url',
+ 'Version': '2010-05-15',
+ })
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests/cloudfront/__init__.py b/tests/unit/cloudfront/__init__.py
similarity index 100%
rename from tests/cloudfront/__init__.py
rename to tests/unit/cloudfront/__init__.py
diff --git a/tests/unit/cloudfront/test_invalidation_list.py b/tests/unit/cloudfront/test_invalidation_list.py
new file mode 100644
index 0000000..2801178
--- /dev/null
+++ b/tests/unit/cloudfront/test_invalidation_list.py
@@ -0,0 +1,113 @@
+#!/usr/bin/env python
+import random
+import string
+from tests.unit import unittest
+
+import mock
+import boto
+
+
+RESPONSE_TEMPLATE = r"""
+<InvalidationList>
+ <Marker/>
+ <NextMarker>%(next_marker)s</NextMarker>
+ <MaxItems>%(max_items)s</MaxItems>
+ <IsTruncated>%(is_truncated)s</IsTruncated>
+ %(inval_summaries)s
+</InvalidationList>
+"""
+
+INVAL_SUMMARY_TEMPLATE = r"""
+ <InvalidationSummary>
+ <Id>%(cfid)s</Id>
+ <Status>%(status)s</Status>
+ </InvalidationSummary>
+"""
+
+
+class CFInvalidationListTest(unittest.TestCase):
+
+ cloudfront = True
+
+ def setUp(self):
+ self.cf = boto.connect_cloudfront('aws.aws_access_key_id',
+ 'aws.aws_secret_access_key')
+
+ def _get_random_id(self, length=14):
+ return ''.join([random.choice(string.ascii_letters) for i in
+ range(length)])
+
+ def _group_iter(self, iterator, n):
+ accumulator = []
+ for item in iterator:
+ accumulator.append(item)
+ if len(accumulator) == n:
+ yield accumulator
+ accumulator = []
+ if len(accumulator) != 0:
+ yield accumulator
+
+ def _get_mock_responses(self, num, max_items):
+ max_items = min(max_items, 100)
+ cfid_groups = list(self._group_iter([self._get_random_id() for i in
+ range(num)], max_items))
+ cfg = dict(status='Completed', max_items=max_items, next_marker='')
+ responses = []
+ is_truncated = 'true'
+ for i, group in enumerate(cfid_groups):
+ next_marker = group[-1]
+ if (i + 1) == len(cfid_groups):
+ is_truncated = 'false'
+ next_marker = ''
+ invals = ''
+ cfg.update(dict(next_marker=next_marker,
+ is_truncated=is_truncated))
+ for cfid in group:
+ cfg.update(dict(cfid=cfid))
+ invals += INVAL_SUMMARY_TEMPLATE % cfg
+ cfg.update(dict(inval_summaries=invals))
+ mock_response = mock.Mock()
+ mock_response.read.return_value = RESPONSE_TEMPLATE % cfg
+ mock_response.status = 200
+ responses.append(mock_response)
+ return responses
+
+ def test_manual_pagination(self, num_invals=30, max_items=4):
+ """
+ Test that paginating manually works properly
+ """
+ self.assertGreater(num_invals, max_items)
+ responses = self._get_mock_responses(num=num_invals,
+ max_items=max_items)
+ self.cf.make_request = mock.Mock(side_effect=responses)
+ ir = self.cf.get_invalidation_requests('dist-id-here',
+ max_items=max_items)
+ all_invals = list(ir)
+ self.assertEqual(len(all_invals), max_items)
+ while ir.is_truncated:
+ ir = self.cf.get_invalidation_requests('dist-id-here',
+ marker=ir.next_marker,
+ max_items=max_items)
+ invals = list(ir)
+ self.assertLessEqual(len(invals), max_items)
+ all_invals.extend(invals)
+ remainder = num_invals % max_items
+ if remainder != 0:
+ self.assertEqual(len(invals), remainder)
+ self.assertEqual(len(all_invals), num_invals)
+
+ def test_auto_pagination(self, num_invals=1024):
+ """
+ Test that auto-pagination works properly
+ """
+ max_items = 100
+ self.assertGreaterEqual(num_invals, max_items)
+ responses = self._get_mock_responses(num=num_invals,
+ max_items=max_items)
+ self.cf.make_request = mock.Mock(side_effect=responses)
+ ir = self.cf.get_invalidation_requests('dist-id-here')
+ self.assertEqual(len(ir._inval_cache), max_items)
+ self.assertEqual(len(list(ir)), num_invals)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests/cloudfront/test_signed_urls.py b/tests/unit/cloudfront/test_signed_urls.py
similarity index 96%
rename from tests/cloudfront/test_signed_urls.py
rename to tests/unit/cloudfront/test_signed_urls.py
index 118117c..2957538 100644
--- a/tests/cloudfront/test_signed_urls.py
+++ b/tests/unit/cloudfront/test_signed_urls.py
@@ -1,10 +1,17 @@
import unittest
-import json
+try:
+ import simplejson as json
+except ImportError:
+ import json
from textwrap import dedent
from boto.cloudfront.distribution import Distribution
class CloudfrontSignedUrlsTest(unittest.TestCase):
+
+ cloudfront = True
+ notdefault = True
+
def setUp(self):
self.pk_str = dedent("""
-----BEGIN RSA PRIVATE KEY-----
@@ -191,7 +198,8 @@
resource = statement["Resource"]
self.assertEqual(url, resource)
condition = statement["Condition"]
- self.assertEqual(1, len(condition.keys()))
+ self.assertEqual(2, len(condition.keys()))
+ date_less_than = condition["DateLessThan"]
date_greater_than = condition["DateGreaterThan"]
self.assertEqual(1, len(date_greater_than.keys()))
aws_epoch_time = date_greater_than["AWS:EpochTime"]
@@ -214,8 +222,9 @@
resource = statement["Resource"]
self.assertEqual(url, resource)
condition = statement["Condition"]
- self.assertEqual(1, len(condition.keys()))
+ self.assertEqual(2, len(condition.keys()))
ip_address = condition["IpAddress"]
+ self.assertTrue("DateLessThan" in condition)
self.assertEqual(1, len(ip_address.keys()))
source_ip = ip_address["AWS:SourceIp"]
self.assertEqual("%s/32" % ip_range, source_ip)
@@ -237,7 +246,8 @@
resource = statement["Resource"]
self.assertEqual(url, resource)
condition = statement["Condition"]
- self.assertEqual(1, len(condition.keys()))
+ self.assertEqual(2, len(condition.keys()))
+ self.assertTrue("DateLessThan" in condition)
ip_address = condition["IpAddress"]
self.assertEqual(1, len(ip_address.keys()))
source_ip = ip_address["AWS:SourceIp"]
diff --git a/tests/cloudfront/__init__.py b/tests/unit/ec2/__init__.py
similarity index 100%
copy from tests/cloudfront/__init__.py
copy to tests/unit/ec2/__init__.py
diff --git a/tests/unit/ec2/test_address.py b/tests/unit/ec2/test_address.py
new file mode 100644
index 0000000..f266197
--- /dev/null
+++ b/tests/unit/ec2/test_address.py
@@ -0,0 +1,39 @@
+import mock
+import unittest
+
+from boto.ec2.address import Address
+
+class AddressTest(unittest.TestCase):
+ def setUp(self):
+ self.address = Address()
+ self.address.connection = mock.Mock()
+ self.address.public_ip = "192.168.1.1"
+
+ def check_that_attribute_has_been_set(self, name, value, attribute):
+ self.address.endElement(name, value, None)
+ self.assertEqual(getattr(self.address, attribute), value)
+
+ def test_endElement_sets_correct_attributes_with_values(self):
+ for arguments in [("publicIp", "192.168.1.1", "public_ip"),
+ ("instanceId", 1, "instance_id"),
+ ("domain", "some domain", "domain"),
+ ("allocationId", 1, "allocation_id"),
+ ("associationId", 1, "association_id"),
+ ("somethingRandom", "somethingRandom", "somethingRandom")]:
+ self.check_that_attribute_has_been_set(arguments[0], arguments[1], arguments[2])
+
+
+ def test_release_calls_connection_release_address_with_correct_args(self):
+ self.address.release()
+ self.address.connection.release_address.assert_called_with("192.168.1.1")
+
+ def test_associate_calls_connection_associate_address_with_correct_args(self):
+ self.address.associate(1)
+ self.address.connection.associate_address.assert_called_with(1, "192.168.1.1")
+
+ def test_disassociate_calls_connection_disassociate_address_with_correct_args(self):
+ self.address.disassociate()
+ self.address.connection.disassociate_address.assert_called_with("192.168.1.1")
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/unit/ec2/test_blockdevicemapping.py b/tests/unit/ec2/test_blockdevicemapping.py
new file mode 100644
index 0000000..02ecf58
--- /dev/null
+++ b/tests/unit/ec2/test_blockdevicemapping.py
@@ -0,0 +1,79 @@
+import mock
+import unittest
+
+from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
+
+class BlockDeviceTypeTests(unittest.TestCase):
+ def setUp(self):
+ self.block_device_type = BlockDeviceType()
+
+ def check_that_attribute_has_been_set(self, name, value, attribute):
+ self.block_device_type.endElement(name, value, None)
+ self.assertEqual(getattr(self.block_device_type, attribute), value)
+
+ def test_endElement_sets_correct_attributes_with_values(self):
+ for arguments in [("volumeId", 1, "volume_id"),
+ ("virtualName", "some name", "ephemeral_name"),
+ ("snapshotId", 1, "snapshot_id"),
+ ("volumeSize", 1, "size"),
+ ("status", "some status", "status"),
+ ("attachTime", 1, "attach_time"),
+ ("somethingRandom", "somethingRandom", "somethingRandom")]:
+ self.check_that_attribute_has_been_set(arguments[0], arguments[1], arguments[2])
+
+ def test_endElement_with_name_NoDevice_value_true(self):
+ self.block_device_type.endElement("NoDevice", 'true', None)
+ self.assertEqual(self.block_device_type.no_device, True)
+
+ def test_endElement_with_name_NoDevice_value_other(self):
+ self.block_device_type.endElement("NoDevice", 'something else', None)
+ self.assertEqual(self.block_device_type.no_device, False)
+
+ def test_endElement_with_name_deleteOnTermination_value_true(self):
+ self.block_device_type.endElement("deleteOnTermination", "true", None)
+ self.assertEqual(self.block_device_type.delete_on_termination, True)
+
+ def test_endElement_with_name_deleteOnTermination_value_other(self):
+ self.block_device_type.endElement("deleteOnTermination", 'something else', None)
+ self.assertEqual(self.block_device_type.delete_on_termination, False)
+
+class BlockDeviceMappingTests(unittest.TestCase):
+ def setUp(self):
+ self.block_device_mapping = BlockDeviceMapping()
+
+ def block_device_type_eq(self, b1, b2):
+ if isinstance(b1, BlockDeviceType) and isinstance(b2, BlockDeviceType):
+ return all([b1.connection == b2.connection,
+ b1.ephemeral_name == b2.ephemeral_name,
+ b1.no_device == b2.no_device,
+ b1.volume_id == b2.volume_id,
+ b1.snapshot_id == b2.snapshot_id,
+ b1.status == b2.status,
+ b1.attach_time == b2.attach_time,
+ b1.delete_on_termination == b2.delete_on_termination,
+ b1.size == b2.size])
+
+ def test_startElement_with_name_ebs_sets_and_returns_current_value(self):
+ retval = self.block_device_mapping.startElement("ebs", None, None)
+ assert self.block_device_type_eq(retval, BlockDeviceType(self.block_device_mapping))
+
+ def test_startElement_with_name_virtualName_sets_and_returns_current_value(self):
+ retval = self.block_device_mapping.startElement("virtualName", None, None)
+ assert self.block_device_type_eq(retval, BlockDeviceType(self.block_device_mapping))
+
+ def test_endElement_with_name_device_sets_current_name(self):
+ self.block_device_mapping.endElement("device", "/dev/null", None)
+ self.assertEqual(self.block_device_mapping.current_name, "/dev/null")
+
+ def test_endElement_with_name_device_sets_current_name(self):
+ self.block_device_mapping.endElement("deviceName", "some device name", None)
+ self.assertEqual(self.block_device_mapping.current_name, "some device name")
+
+ def test_endElement_with_name_item_sets_current_name_key_to_current_value(self):
+ self.block_device_mapping.current_name = "some name"
+ self.block_device_mapping.current_value = "some value"
+ self.block_device_mapping.endElement("item", "some item", None)
+ self.assertEqual(self.block_device_mapping["some name"], "some value")
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/unit/ec2/test_connection.py b/tests/unit/ec2/test_connection.py
new file mode 100644
index 0000000..a93830d
--- /dev/null
+++ b/tests/unit/ec2/test_connection.py
@@ -0,0 +1,379 @@
+#!/usr/bin/env python
+from tests.unit import unittest
+from tests.unit import AWSMockServiceTestCase
+
+from boto.ec2.connection import EC2Connection
+
+
+class TestEC2ConnectionBase(AWSMockServiceTestCase):
+ connection_class = EC2Connection
+
+ def setUp(self):
+ super(TestEC2ConnectionBase, self).setUp()
+ self.ec2 = self.service_connection
+
+
+class TestReservedInstanceOfferings(TestEC2ConnectionBase):
+
+ def default_body(self):
+ return """
+ <DescribeReservedInstancesOfferingsResponse>
+ <requestId>d3253568-edcf-4897-9a3d-fb28e0b3fa38</requestId>
+ <reservedInstancesOfferingsSet>
+ <item>
+ <reservedInstancesOfferingId>2964d1bf71d8</reservedInstancesOfferingId>
+ <instanceType>c1.medium</instanceType>
+ <availabilityZone>us-east-1c</availabilityZone>
+ <duration>94608000</duration>
+ <fixedPrice>775.0</fixedPrice>
+ <usagePrice>0.0</usagePrice>
+ <productDescription>product description</productDescription>
+ <instanceTenancy>default</instanceTenancy>
+ <currencyCode>USD</currencyCode>
+ <offeringType>Heavy Utilization</offeringType>
+ <recurringCharges>
+ <item>
+ <frequency>Hourly</frequency>
+ <amount>0.095</amount>
+ </item>
+ </recurringCharges>
+ <marketplace>false</marketplace>
+ <pricingDetailsSet>
+ <item>
+ <price>0.045</price>
+ <count>1</count>
+ </item>
+ </pricingDetailsSet>
+ </item>
+ <item>
+ <reservedInstancesOfferingId>2dce26e46889</reservedInstancesOfferingId>
+ <instanceType>c1.medium</instanceType>
+ <availabilityZone>us-east-1c</availabilityZone>
+ <duration>94608000</duration>
+ <fixedPrice>775.0</fixedPrice>
+ <usagePrice>0.0</usagePrice>
+ <productDescription>Linux/UNIX</productDescription>
+ <instanceTenancy>default</instanceTenancy>
+ <currencyCode>USD</currencyCode>
+ <offeringType>Heavy Utilization</offeringType>
+ <recurringCharges>
+ <item>
+ <frequency>Hourly</frequency>
+ <amount>0.035</amount>
+ </item>
+ </recurringCharges>
+ <marketplace>false</marketplace>
+ <pricingDetailsSet/>
+ </item>
+ </reservedInstancesOfferingsSet>
+ <nextToken>next_token</nextToken>
+ </DescribeReservedInstancesOfferingsResponse>
+ """
+
+ def test_get_reserved_instance_offerings(self):
+ self.set_http_response(status_code=200)
+ response = self.ec2.get_all_reserved_instances_offerings()
+ self.assertEqual(len(response), 2)
+ instance = response[0]
+ self.assertEqual(instance.id, '2964d1bf71d8')
+ self.assertEqual(instance.instance_type, 'c1.medium')
+ self.assertEqual(instance.availability_zone, 'us-east-1c')
+ self.assertEqual(instance.duration, 94608000)
+ self.assertEqual(instance.fixed_price, '775.0')
+ self.assertEqual(instance.usage_price, '0.0')
+ self.assertEqual(instance.description, 'product description')
+ self.assertEqual(instance.instance_tenancy, 'default')
+ self.assertEqual(instance.currency_code, 'USD')
+ self.assertEqual(instance.offering_type, 'Heavy Utilization')
+ self.assertEqual(len(instance.recurring_charges), 1)
+ self.assertEqual(instance.recurring_charges[0].frequency, 'Hourly')
+ self.assertEqual(instance.recurring_charges[0].amount, '0.095')
+ self.assertEqual(len(instance.pricing_details), 1)
+ self.assertEqual(instance.pricing_details[0].price, '0.045')
+ self.assertEqual(instance.pricing_details[0].count, '1')
+
+ def test_get_reserved_instance_offerings_params(self):
+ self.set_http_response(status_code=200)
+ self.ec2.get_all_reserved_instances_offerings(
+ reserved_instances_offering_ids=['id1','id2'],
+ instance_type='t1.micro',
+ availability_zone='us-east-1',
+ product_description='description',
+ instance_tenancy='dedicated',
+ offering_type='offering_type',
+ include_marketplace=False,
+ min_duration=100,
+ max_duration=1000,
+ max_instance_count=1,
+ next_token='next_token',
+ max_results=10
+ )
+ self.assert_request_parameters({
+ 'Action': 'DescribeReservedInstancesOfferings',
+ 'ReservedInstancesOfferingId.1': 'id1',
+ 'ReservedInstancesOfferingId.2': 'id2',
+ 'InstanceType': 't1.micro',
+ 'AvailabilityZone': 'us-east-1',
+ 'ProductDescription': 'description',
+ 'InstanceTenancy': 'dedicated',
+ 'OfferingType': 'offering_type',
+ 'IncludeMarketplace': 'false',
+ 'MinDuration': '100',
+ 'MaxDuration': '1000',
+ 'MaxInstanceCount': '1',
+ 'NextToken': 'next_token',
+ 'MaxResults': '10',
+ 'Version': '2012-08-15'},
+ ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
+ 'SignatureVersion', 'Timestamp'])
+
+
+class TestPurchaseReservedInstanceOffering(TestEC2ConnectionBase):
+ def default_body(self):
+ return """<PurchaseReservedInstancesOffering />"""
+
+ def test_serialized_api_args(self):
+ self.set_http_response(status_code=200)
+ response = self.ec2.purchase_reserved_instance_offering(
+ 'offering_id', 1, (100.0, 'USD'))
+ self.assert_request_parameters({
+ 'Action': 'PurchaseReservedInstancesOffering',
+ 'InstanceCount': 1,
+ 'ReservedInstancesOfferingId': 'offering_id',
+ 'LimitPrice.Amount': '100.0',
+ 'LimitPrice.CurrencyCode': 'USD',
+ 'Version': '2012-08-15'},
+ ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
+ 'SignatureVersion', 'Timestamp'])
+
+
+class TestCancelReservedInstancesListing(TestEC2ConnectionBase):
+ def default_body(self):
+ return """
+ <CancelReservedInstancesListingResponse>
+ <requestId>request_id</requestId>
+ <reservedInstancesListingsSet>
+ <item>
+ <reservedInstancesListingId>listing_id</reservedInstancesListingId>
+ <reservedInstancesId>instance_id</reservedInstancesId>
+ <createDate>2012-07-12T16:55:28.000Z</createDate>
+ <updateDate>2012-07-12T16:55:28.000Z</updateDate>
+ <status>cancelled</status>
+ <statusMessage>CANCELLED</statusMessage>
+ <instanceCounts>
+ <item>
+ <state>Available</state>
+ <instanceCount>0</instanceCount>
+ </item>
+ <item>
+ <state>Sold</state>
+ <instanceCount>0</instanceCount>
+ </item>
+ <item>
+ <state>Cancelled</state>
+ <instanceCount>1</instanceCount>
+ </item>
+ <item>
+ <state>Pending</state>
+ <instanceCount>0</instanceCount>
+ </item>
+ </instanceCounts>
+ <priceSchedules>
+ <item>
+ <term>5</term>
+ <price>166.64</price>
+ <currencyCode>USD</currencyCode>
+ <active>false</active>
+ </item>
+ <item>
+ <term>4</term>
+ <price>133.32</price>
+ <currencyCode>USD</currencyCode>
+ <active>false</active>
+ </item>
+ <item>
+ <term>3</term>
+ <price>99.99</price>
+ <currencyCode>USD</currencyCode>
+ <active>false</active>
+ </item>
+ <item>
+ <term>2</term>
+ <price>66.66</price>
+ <currencyCode>USD</currencyCode>
+ <active>false</active>
+ </item>
+ <item>
+ <term>1</term>
+ <price>33.33</price>
+ <currencyCode>USD</currencyCode>
+ <active>false</active>
+ </item>
+ </priceSchedules>
+ <tagSet/>
+ <clientToken>XqJIt1342112125076</clientToken>
+ </item>
+ </reservedInstancesListingsSet>
+ </CancelReservedInstancesListingResponse>
+ """
+
+ def test_reserved_instances_listing(self):
+ self.set_http_response(status_code=200)
+ response = self.ec2.cancel_reserved_instances_listing()
+ self.assertEqual(len(response), 1)
+ cancellation = response[0]
+ self.assertEqual(cancellation.status, 'cancelled')
+ self.assertEqual(cancellation.status_message, 'CANCELLED')
+ self.assertEqual(len(cancellation.instance_counts), 4)
+ first = cancellation.instance_counts[0]
+ self.assertEqual(first.state, 'Available')
+ self.assertEqual(first.instance_count, 0)
+ self.assertEqual(len(cancellation.price_schedules), 5)
+ schedule = cancellation.price_schedules[0]
+ self.assertEqual(schedule.term, 5)
+ self.assertEqual(schedule.price, '166.64')
+ self.assertEqual(schedule.currency_code, 'USD')
+ self.assertEqual(schedule.active, False)
+
+
+class TestCreateReservedInstancesListing(TestEC2ConnectionBase):
+ def default_body(self):
+ return """
+ <CreateReservedInstancesListingResponse>
+ <requestId>request_id</requestId>
+ <reservedInstancesListingsSet>
+ <item>
+ <reservedInstancesListingId>listing_id</reservedInstancesListingId>
+ <reservedInstancesId>instance_id</reservedInstancesId>
+ <createDate>2012-07-17T17:11:09.449Z</createDate>
+ <updateDate>2012-07-17T17:11:09.468Z</updateDate>
+ <status>active</status>
+ <statusMessage>ACTIVE</statusMessage>
+ <instanceCounts>
+ <item>
+ <state>Available</state>
+ <instanceCount>1</instanceCount>
+ </item>
+ <item>
+ <state>Sold</state>
+ <instanceCount>0</instanceCount>
+ </item>
+ <item>
+ <state>Cancelled</state>
+ <instanceCount>0</instanceCount>
+ </item>
+ <item>
+ <state>Pending</state>
+ <instanceCount>0</instanceCount>
+ </item>
+ </instanceCounts>
+ <priceSchedules>
+ <item>
+ <term>11</term>
+ <price>2.5</price>
+ <currencyCode>USD</currencyCode>
+ <active>true</active>
+ </item>
+ <item>
+ <term>10</term>
+ <price>2.5</price>
+ <currencyCode>USD</currencyCode>
+ <active>false</active>
+ </item>
+ <item>
+ <term>9</term>
+ <price>2.5</price>
+ <currencyCode>USD</currencyCode>
+ <active>false</active>
+ </item>
+ <item>
+ <term>8</term>
+ <price>2.0</price>
+ <currencyCode>USD</currencyCode>
+ <active>false</active>
+ </item>
+ <item>
+ <term>7</term>
+ <price>2.0</price>
+ <currencyCode>USD</currencyCode>
+ <active>false</active>
+ </item>
+ <item>
+ <term>6</term>
+ <price>2.0</price>
+ <currencyCode>USD</currencyCode>
+ <active>false</active>
+ </item>
+ <item>
+ <term>5</term>
+ <price>1.5</price>
+ <currencyCode>USD</currencyCode>
+ <active>false</active>
+ </item>
+ <item>
+ <term>4</term>
+ <price>1.5</price>
+ <currencyCode>USD</currencyCode>
+ <active>false</active>
+ </item>
+ <item>
+ <term>3</term>
+ <price>0.7</price>
+ <currencyCode>USD</currencyCode>
+ <active>false</active>
+ </item>
+ <item>
+ <term>2</term>
+ <price>0.7</price>
+ <currencyCode>USD</currencyCode>
+ <active>false</active>
+ </item>
+ <item>
+ <term>1</term>
+ <price>0.1</price>
+ <currencyCode>USD</currencyCode>
+ <active>false</active>
+ </item>
+ </priceSchedules>
+ <tagSet/>
+ <clientToken>myIdempToken1</clientToken>
+ </item>
+ </reservedInstancesListingsSet>
+ </CreateReservedInstancesListingResponse>
+ """
+
+ def test_create_reserved_instances_listing(self):
+ self.set_http_response(status_code=200)
+ response = self.ec2.create_reserved_instances_listing(
+ 'instance_id', 1, [('2.5', 11), ('2.0', 8)], 'client_token')
+ self.assertEqual(len(response), 1)
+ cancellation = response[0]
+ self.assertEqual(cancellation.status, 'active')
+ self.assertEqual(cancellation.status_message, 'ACTIVE')
+ self.assertEqual(len(cancellation.instance_counts), 4)
+ first = cancellation.instance_counts[0]
+ self.assertEqual(first.state, 'Available')
+ self.assertEqual(first.instance_count, 1)
+ self.assertEqual(len(cancellation.price_schedules), 11)
+ schedule = cancellation.price_schedules[0]
+ self.assertEqual(schedule.term, 11)
+ self.assertEqual(schedule.price, '2.5')
+ self.assertEqual(schedule.currency_code, 'USD')
+ self.assertEqual(schedule.active, True)
+
+ self.assert_request_parameters({
+ 'Action': 'CreateReservedInstancesListing',
+ 'ReservedInstancesId': 'instance_id',
+ 'InstanceCount': '1',
+ 'ClientToken': 'client_token',
+ 'PriceSchedules.0.Price': '2.5',
+ 'PriceSchedules.0.Term': '11',
+ 'PriceSchedules.1.Price': '2.0',
+ 'PriceSchedules.1.Term': '8',
+ 'Version': '2012-08-15'},
+ ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
+ 'SignatureVersion', 'Timestamp'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests/unit/ec2/test_instance.py b/tests/unit/ec2/test_instance.py
new file mode 100644
index 0000000..7d304d7
--- /dev/null
+++ b/tests/unit/ec2/test_instance.py
@@ -0,0 +1,98 @@
+#!/usr/bin/env python
+
+from tests.unit import unittest
+
+import mock
+
+from boto.ec2.connection import EC2Connection
+
+
+RESPONSE = r"""
+<RunInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2012-06-01/">
+ <requestId>ad4b83c2-f606-4c39-90c6-5dcc5be823e1</requestId>
+ <reservationId>r-c5cef7a7</reservationId>
+ <ownerId>184906166255</ownerId>
+ <groupSet>
+ <item>
+ <groupId>sg-99a710f1</groupId>
+ <groupName>SSH</groupName>
+ </item>
+ </groupSet>
+ <instancesSet>
+ <item>
+ <instanceId>i-ff0f1299</instanceId>
+ <imageId>ami-ed65ba84</imageId>
+ <instanceState>
+ <code>0</code>
+ <name>pending</name>
+ </instanceState>
+ <privateDnsName/>
+ <dnsName/>
+ <reason/>
+ <keyName>awskeypair</keyName>
+ <amiLaunchIndex>0</amiLaunchIndex>
+ <productCodes/>
+ <instanceType>t1.micro</instanceType>
+ <launchTime>2012-05-30T19:21:18.000Z</launchTime>
+ <placement>
+ <availabilityZone>us-east-1a</availabilityZone>
+ <groupName/>
+ <tenancy>default</tenancy>
+ </placement>
+ <kernelId>aki-b6aa75df</kernelId>
+ <monitoring>
+ <state>disabled</state>
+ </monitoring>
+ <groupSet>
+ <item>
+ <groupId>sg-99a710f1</groupId>
+ <groupName>SSH</groupName>
+ </item>
+ </groupSet>
+ <stateReason>
+ <code>pending</code>
+ <message>pending</message>
+ </stateReason>
+ <architecture>i386</architecture>
+ <rootDeviceType>ebs</rootDeviceType>
+ <rootDeviceName>/dev/sda1</rootDeviceName>
+ <blockDeviceMapping/>
+ <virtualizationType>paravirtual</virtualizationType>
+ <clientToken/>
+ <hypervisor>xen</hypervisor>
+ <networkInterfaceSet/>
+ <iamInstanceProfile>
+ <arn>arn:aws:iam::184906166255:instance-profile/myinstanceprofile</arn>
+ <id>AIPAIQ2LVHYBCH7LYQFDK</id>
+ </iamInstanceProfile>
+ </item>
+ </instancesSet>
+</RunInstancesResponse>
+"""
+
+
+class TestRunInstanceResponseParsing(unittest.TestCase):
+ def testIAMInstanceProfileParsedCorrectly(self):
+ ec2 = EC2Connection(aws_access_key_id='aws_access_key_id',
+ aws_secret_access_key='aws_secret_access_key')
+ mock_response = mock.Mock()
+ mock_response.read.return_value = RESPONSE
+ mock_response.status = 200
+ ec2.make_request = mock.Mock(return_value=mock_response)
+ reservation = ec2.run_instances(image_id='ami-12345')
+ self.assertEqual(len(reservation.instances), 1)
+ instance = reservation.instances[0]
+ self.assertEqual(instance.image_id, 'ami-ed65ba84')
+ # iamInstanceProfile has an ID element, so we want to make sure
+ # that this does not map to instance.id (which should be the
+ # id of the ec2 instance).
+ self.assertEqual(instance.id, 'i-ff0f1299')
+ self.assertDictEqual(
+ instance.instance_profile,
+ {'arn': ('arn:aws:iam::184906166255:'
+ 'instance-profile/myinstanceprofile'),
+ 'id': 'AIPAIQ2LVHYBCH7LYQFDK'})
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests/unit/ec2/test_volume.py b/tests/unit/ec2/test_volume.py
new file mode 100644
index 0000000..fd2a455
--- /dev/null
+++ b/tests/unit/ec2/test_volume.py
@@ -0,0 +1,248 @@
+import mock
+from tests.unit import unittest
+
+from boto.ec2.snapshot import Snapshot
+from boto.ec2.tag import Tag, TagSet
+from boto.ec2.volume import Volume, AttachmentSet, VolumeAttribute
+
+
+class VolumeTests(unittest.TestCase):
+ def setUp(self):
+ self.attach_data = AttachmentSet()
+ self.attach_data.id = 1
+ self.attach_data.instance_id = 2
+ self.attach_data.status = "some status"
+ self.attach_data.attach_time = 5
+ self.attach_data.device = "/dev/null"
+
+ self.volume_one = Volume()
+ self.volume_one.id = 1
+ self.volume_one.create_time = 5
+ self.volume_one.status = "one_status"
+ self.volume_one.size = "one_size"
+ self.volume_one.snapshot_id = 1
+ self.volume_one.attach_data = self.attach_data
+ self.volume_one.zone = "one_zone"
+
+ self.volume_two = Volume()
+ self.volume_two.connection = mock.Mock()
+ self.volume_two.id = 1
+ self.volume_two.create_time = 6
+ self.volume_two.status = "two_status"
+ self.volume_two.size = "two_size"
+ self.volume_two.snapshot_id = 2
+ self.volume_two.attach_data = None
+ self.volume_two.zone = "two_zone"
+
+ @mock.patch("boto.ec2.volume.TaggedEC2Object.startElement")
+ def test_startElement_calls_TaggedEC2Object_startElement_with_correct_args(self, startElement):
+ volume = Volume()
+ volume.startElement("some name", "some attrs", None)
+ startElement.assert_called_with(volume, "some name", "some attrs", None)
+
+ @mock.patch("boto.ec2.volume.TaggedEC2Object.startElement")
+ def test_startElement_retval_not_None_returns_correct_thing(self, startElement):
+ tag_set = mock.Mock(TagSet)
+ startElement.return_value = tag_set
+ volume = Volume()
+ retval = volume.startElement(None, None, None)
+ self.assertEqual(retval, tag_set)
+
+ @mock.patch("boto.ec2.volume.TaggedEC2Object.startElement")
+ @mock.patch("boto.resultset.ResultSet")
+ def test_startElement_with_name_tagSet_calls_ResultSet(self, ResultSet, startElement):
+ startElement.return_value = None
+ result_set = mock.Mock(ResultSet([("item", Tag)]))
+ volume = Volume()
+ volume.tags = result_set
+ retval = volume.startElement("tagSet", None, None)
+ self.assertEqual(retval, volume.tags)
+
+ @mock.patch("boto.ec2.volume.TaggedEC2Object.startElement")
+ def test_startElement_with_name_attachmentSet_returns_AttachmentSet(self, startElement):
+ startElement.return_value = None
+ attach_data = AttachmentSet()
+ volume = Volume()
+ volume.attach_data = attach_data
+ retval = volume.startElement("attachmentSet", None, None)
+ self.assertEqual(retval, volume.attach_data)
+
+ @mock.patch("boto.ec2.volume.TaggedEC2Object.startElement")
+ def test_startElement_else_returns_None(self, startElement):
+ startElement.return_value = None
+ volume = Volume()
+ retval = volume.startElement("not tagSet or attachmentSet", None, None)
+ self.assertEqual(retval, None)
+
+ def check_that_attribute_has_been_set(self, name, value, attribute):
+ volume = Volume()
+ volume.endElement(name, value, None)
+ self.assertEqual(getattr(volume, attribute), value)
+
+ def test_endElement_sets_correct_attributes_with_values(self):
+ for arguments in [("volumeId", "some value", "id"),
+ ("createTime", "some time", "create_time"),
+ ("status", "some status", "status"),
+ ("size", 5, "size"),
+ ("snapshotId", 1, "snapshot_id"),
+ ("availabilityZone", "some zone", "zone"),
+ ("someName", "some value", "someName")]:
+ self.check_that_attribute_has_been_set(arguments[0], arguments[1], arguments[2])
+
+ def test_endElement_with_name_status_and_empty_string_value_doesnt_set_status(self):
+ volume = Volume()
+ volume.endElement("status", "", None)
+ self.assertNotEqual(volume.status, "")
+
+ def test_update_with_result_set_greater_than_0_updates_dict(self):
+ self.volume_two.connection.get_all_volumes.return_value = [self.volume_one]
+ self.volume_two.update()
+
+ assert all([self.volume_two.create_time == 5,
+ self.volume_two.status == "one_status",
+ self.volume_two.size == "one_size",
+ self.volume_two.snapshot_id == 1,
+ self.volume_two.attach_data == self.attach_data,
+ self.volume_two.zone == "one_zone"])
+
+ def test_update_with_validate_true_raises_value_error(self):
+ self.volume_one.connection = mock.Mock()
+ self.volume_one.connection.get_all_volumes.return_value = []
+ with self.assertRaisesRegexp(ValueError, "^1 is not a valid Volume ID$"):
+ self.volume_one.update(True)
+
+ def test_update_returns_status(self):
+ self.volume_one.connection = mock.Mock()
+ self.volume_one.connection.get_all_volumes.return_value = [self.volume_two]
+ retval = self.volume_one.update()
+ self.assertEqual(retval, "two_status")
+
+ def test_delete_calls_delete_volume(self):
+ self.volume_one.connection = mock.Mock()
+ self.volume_one.delete()
+ self.volume_one.connection.delete_volume.assert_called_with(1)
+
+ def test_attach_calls_attach_volume(self):
+ self.volume_one.connection = mock.Mock()
+ self.volume_one.attach("instance_id", "/dev/null")
+ self.volume_one.connection.attach_volume.assert_called_with(1, "instance_id", "/dev/null")
+
+ def test_detach_calls_detach_volume(self):
+ self.volume_one.connection = mock.Mock()
+ self.volume_one.detach()
+ self.volume_one.connection.detach_volume.assert_called_with(
+ 1, 2, "/dev/null", False)
+
+ def test_detach_with_no_attach_data(self):
+ self.volume_two.connection = mock.Mock()
+ self.volume_two.detach()
+ self.volume_two.connection.detach_volume.assert_called_with(
+ 1, None, None, False)
+
+ def test_detach_with_force_calls_detach_volume_with_force(self):
+ self.volume_one.connection = mock.Mock()
+ self.volume_one.detach(True)
+ self.volume_one.connection.detach_volume.assert_called_with(
+ 1, 2, "/dev/null", True)
+
+
+ def test_create_snapshot_calls_connection_create_snapshot(self):
+ self.volume_one.connection = mock.Mock()
+ self.volume_one.create_snapshot()
+ self.volume_one.connection.create_snapshot.assert_called_with(
+ 1, None)
+
+ def test_create_snapshot_with_description(self):
+ self.volume_one.connection = mock.Mock()
+ self.volume_one.create_snapshot("some description")
+ self.volume_one.connection.create_snapshot.assert_called_with(
+ 1, "some description")
+
+ def test_volume_state_returns_status(self):
+ retval = self.volume_one.volume_state()
+ self.assertEqual(retval, "one_status")
+
+ def test_attachment_state_returns_state(self):
+ retval = self.volume_one.attachment_state()
+ self.assertEqual(retval, "some status")
+
+ def test_attachment_state_no_attach_data_returns_None(self):
+ retval = self.volume_two.attachment_state()
+ self.assertEqual(retval, None)
+
+ def test_snapshots_returns_snapshots(self):
+ snapshot_one = Snapshot()
+ snapshot_one.volume_id = 1
+ snapshot_two = Snapshot()
+ snapshot_two.volume_id = 2
+
+ self.volume_one.connection = mock.Mock()
+ self.volume_one.connection.get_all_snapshots.return_value = [snapshot_one, snapshot_two]
+ retval = self.volume_one.snapshots()
+ self.assertEqual(retval, [snapshot_one])
+
+ def test_snapshots__with_owner_and_restorable_by(self):
+ self.volume_one.connection = mock.Mock()
+ self.volume_one.connection.get_all_snapshots.return_value = []
+ self.volume_one.snapshots("owner", "restorable_by")
+ self.volume_one.connection.get_all_snapshots.assert_called_with(
+ owner="owner", restorable_by="restorable_by")
+
+class AttachmentSetTests(unittest.TestCase):
+ def check_that_attribute_has_been_set(self, name, value, attribute):
+ attachment_set = AttachmentSet()
+ attachment_set.endElement(name, value, None)
+ self.assertEqual(getattr(attachment_set, attribute), value)
+
+ def test_endElement_with_name_volumeId_sets_id(self):
+ return self.check_that_attribute_has_been_set("volumeId", "some value", "id")
+
+ def test_endElement_with_name_instanceId_sets_instance_id(self):
+ return self.check_that_attribute_has_been_set("instanceId", 1, "instance_id")
+
+ def test_endElement_with_name_status_sets_status(self):
+ return self.check_that_attribute_has_been_set("status", "some value", "status")
+
+ def test_endElement_with_name_attachTime_sets_attach_time(self):
+ return self.check_that_attribute_has_been_set("attachTime", 5, "attach_time")
+
+ def test_endElement_with_name_device_sets_device(self):
+ return self.check_that_attribute_has_been_set("device", "/dev/null", "device")
+
+ def test_endElement_with_other_name_sets_other_name_attribute(self):
+ return self.check_that_attribute_has_been_set("someName", "some value", "someName")
+
+class VolumeAttributeTests(unittest.TestCase):
+ def setUp(self):
+ self.volume_attribute = VolumeAttribute()
+ self.volume_attribute._key_name = "key_name"
+ self.volume_attribute.attrs = {"key_name": False}
+
+ def test_startElement_with_name_autoEnableIO_sets_key_name(self):
+ self.volume_attribute.startElement("autoEnableIO", None, None)
+ self.assertEqual(self.volume_attribute._key_name, "autoEnableIO")
+
+ def test_startElement_without_name_autoEnableIO_returns_None(self):
+ retval = self.volume_attribute.startElement("some name", None, None)
+ self.assertEqual(retval, None)
+
+ def test_endElement_with_name_value_and_value_true_sets_attrs_key_name_True(self):
+ self.volume_attribute.endElement("value", "true", None)
+ self.assertEqual(self.volume_attribute.attrs['key_name'], True)
+
+ def test_endElement_with_name_value_and_value_false_sets_attrs_key_name_False(self):
+ self.volume_attribute._key_name = "other_key_name"
+ self.volume_attribute.endElement("value", "false", None)
+ self.assertEqual(self.volume_attribute.attrs['other_key_name'], False)
+
+ def test_endElement_with_name_volumeId_sets_id(self):
+ self.volume_attribute.endElement("volumeId", "some_value", None)
+ self.assertEqual(self.volume_attribute.id, "some_value")
+
+ def test_endElement_with_other_name_sets_other_name_attribute(self):
+ self.volume_attribute.endElement("someName", "some value", None)
+ self.assertEqual(self.volume_attribute.someName, "some value")
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/boto/emr/tests/test_emr_responses.py b/tests/unit/emr/test_emr_responses.py
similarity index 100%
rename from boto/emr/tests/test_emr_responses.py
rename to tests/unit/emr/test_emr_responses.py
diff --git a/tests/cloudfront/__init__.py b/tests/unit/glacier/__init__.py
similarity index 100%
copy from tests/cloudfront/__init__.py
copy to tests/unit/glacier/__init__.py
diff --git a/tests/unit/glacier/test_layer1.py b/tests/unit/glacier/test_layer1.py
new file mode 100644
index 0000000..7d7b6fc
--- /dev/null
+++ b/tests/unit/glacier/test_layer1.py
@@ -0,0 +1,78 @@
+from tests.unit import AWSMockServiceTestCase
+from boto.glacier.layer1 import Layer1
+import json
+import copy
+
+
+class GlacierLayer1ConnectionBase(AWSMockServiceTestCase):
+ connection_class = Layer1
+
+ def setUp(self):
+ super(GlacierLayer1ConnectionBase, self).setUp()
+ self.json_header = [('Content-Type', 'application/json')]
+ self.vault_name = u'examplevault'
+ self.vault_arn = 'arn:aws:glacier:us-east-1:012345678901:vaults/' + \
+ self.vault_name
+ self.vault_info = {u'CreationDate': u'2012-03-16T22:22:47.214Z',
+ u'LastInventoryDate': u'2012-03-21T22:06:51.218Z',
+ u'NumberOfArchives': 2,
+ u'SizeInBytes': 12334,
+ u'VaultARN': self.vault_arn,
+ u'VaultName': self.vault_name}
+
+
+class GlacierVaultsOperations(GlacierLayer1ConnectionBase):
+
+ def test_create_vault_parameters(self):
+ self.set_http_response(status_code=201)
+ self.service_connection.create_vault(self.vault_name)
+
+ def test_list_vaults(self):
+ content = {u'Marker': None,
+ u'RequestId': None,
+ u'VaultList': [self.vault_info]}
+ self.set_http_response(status_code=200, header=self.json_header,
+ body=json.dumps(content))
+ api_response = self.service_connection.list_vaults()
+ self.assertDictEqual(content, api_response)
+
+ def test_describe_vaults(self):
+ content = copy.copy(self.vault_info)
+ content[u'RequestId'] = None
+ self.set_http_response(status_code=200, header=self.json_header,
+ body=json.dumps(content))
+ api_response = self.service_connection.describe_vault(self.vault_name)
+ self.assertDictEqual(content, api_response)
+
+ def test_delete_vault(self):
+ self.set_http_response(status_code=204)
+ self.service_connection.delete_vault(self.vault_name)
+
+
+class GlacierJobOperations(GlacierLayer1ConnectionBase):
+
+ def setUp(self):
+ super(GlacierJobOperations, self).setUp()
+ self.job_content = 'abc' * 1024
+
+ def test_initiate_archive_job(self):
+ content = {u'Type': u'archive-retrieval',
+ u'ArchiveId': u'AAABZpJrTyioDC_HsOmHae8EZp_uBSJr6cnGOLKp_XJCl-Q',
+ u'Description': u'Test Archive',
+ u'SNSTopic': u'Topic',
+ u'JobId': None,
+ u'Location': None,
+ u'RequestId': None}
+ self.set_http_response(status_code=202, header=self.json_header,
+ body=json.dumps(content))
+ api_response = self.service_connection.initiate_job(self.vault_name,
+ self.job_content)
+ self.assertDictEqual(content, api_response)
+
+ def test_get_archive_output(self):
+ header = [('Content-Type', 'application/octet-stream')]
+ self.set_http_response(status_code=200, header=header,
+ body=self.job_content)
+ response = self.service_connection.get_job_output(self.vault_name,
+ 'example-job-id')
+ self.assertEqual(self.job_content, response.read())
diff --git a/tests/unit/glacier/test_layer2.py b/tests/unit/glacier/test_layer2.py
new file mode 100644
index 0000000..a82a3a2
--- /dev/null
+++ b/tests/unit/glacier/test_layer2.py
@@ -0,0 +1,147 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+from tests.unit import unittest
+
+from mock import Mock
+
+from boto.glacier.layer1 import Layer1
+from boto.glacier.layer2 import Layer2
+from boto.glacier.vault import Vault
+from boto.glacier.vault import Job
+
+# Some fixture data from the Glacier docs
+FIXTURE_VAULT = {
+ "CreationDate" : "2012-02-20T17:01:45.198Z",
+ "LastInventoryDate" : "2012-03-20T17:03:43.221Z",
+ "NumberOfArchives" : 192,
+ "SizeInBytes" : 78088912,
+ "VaultARN" : "arn:aws:glacier:us-east-1:012345678901:vaults/examplevault",
+ "VaultName" : "examplevault"
+}
+
+FIXTURE_ARCHIVE_JOB = {
+ "Action": "ArchiveRetrieval",
+ "ArchiveId": ("NkbByEejwEggmBz2fTHgJrg0XBoDfjP4q6iu87-TjhqG6eGoOY9Z8i1_AUyUs"
+ "uhPAdTqLHy8pTl5nfCFJmDl2yEZONi5L26Omw12vcs01MNGntHEQL8MBfGlqr"
+ "EXAMPLEArchiveId"),
+ "ArchiveSizeInBytes": 16777216,
+ "Completed": False,
+ "CreationDate": "2012-05-15T17:21:39.339Z",
+ "CompletionDate": "2012-05-15T17:21:43.561Z",
+ "InventorySizeInBytes": None,
+ "JobDescription": "My ArchiveRetrieval Job",
+ "JobId": ("HkF9p6o7yjhFx-K3CGl6fuSm6VzW9T7esGQfco8nUXVYwS0jlb5gq1JZ55yHgt5v"
+ "P54ZShjoQzQVVh7vEXAMPLEjobID"),
+ "SHA256TreeHash": ("beb0fe31a1c7ca8c6c04d574ea906e3f97b31fdca7571defb5b44dc"
+ "a89b5af60"),
+ "SNSTopic": "arn:aws:sns:us-east-1:012345678901:mytopic",
+ "StatusCode": "InProgress",
+ "StatusMessage": "Operation in progress.",
+ "VaultARN": "arn:aws:glacier:us-east-1:012345678901:vaults/examplevault"
+}
+
+
+class GlacierLayer2Base(unittest.TestCase):
+ def setUp(self):
+ self.mock_layer1 = Mock(spec=Layer1)
+
+
+class TestGlacierLayer2Connection(GlacierLayer2Base):
+ def setUp(self):
+ GlacierLayer2Base.setUp(self)
+ self.layer2 = Layer2(layer1=self.mock_layer1)
+
+ def test_create_vault(self):
+ self.mock_layer1.describe_vault.return_value = FIXTURE_VAULT
+ self.layer2.create_vault("My Vault")
+ self.mock_layer1.create_vault.assert_called_with("My Vault")
+
+ def test_get_vault(self):
+ self.mock_layer1.describe_vault.return_value = FIXTURE_VAULT
+ vault = self.layer2.get_vault("examplevault")
+ self.assertEqual(vault.layer1, self.mock_layer1)
+ self.assertEqual(vault.name, "examplevault")
+ self.assertEqual(vault.size, 78088912)
+ self.assertEqual(vault.number_of_archives, 192)
+
+ def list_vaults(self):
+ self.mock_layer1.list_vaults.return_value = [FIXTURE_VAULT]
+ vaults = self.layer2.list_vaults()
+ self.assertEqual(vaults[0].name, "examplevault")
+
+
+class TestVault(GlacierLayer2Base):
+ def setUp(self):
+ GlacierLayer2Base.setUp(self)
+ self.vault = Vault(self.mock_layer1, FIXTURE_VAULT)
+
+ # TODO: Tests for the other methods of uploading
+
+ def test_create_archive_writer(self):
+ self.mock_layer1.initiate_multipart_upload.return_value = {
+ "UploadId": "UPLOADID"}
+ writer = self.vault.create_archive_writer(description="stuff")
+ self.mock_layer1.initiate_multipart_upload.assert_called_with(
+ "examplevault", self.vault.DefaultPartSize, "stuff")
+ self.assertEqual(writer.vault, self.vault)
+ self.assertEqual(writer.upload_id, "UPLOADID")
+
+ def test_delete_vault(self):
+ self.vault.delete_archive("archive")
+ self.mock_layer1.delete_archive.assert_called_with("examplevault",
+ "archive")
+
+ def test_get_job(self):
+ self.mock_layer1.describe_job.return_value = FIXTURE_ARCHIVE_JOB
+ job = self.vault.get_job(
+ "NkbByEejwEggmBz2fTHgJrg0XBoDfjP4q6iu87-TjhqG6eGoOY9Z8i1_AUyUsuhPA"
+ "dTqLHy8pTl5nfCFJmDl2yEZONi5L26Omw12vcs01MNGntHEQL8MBfGlqrEXAMPLEA"
+ "rchiveId")
+ self.assertEqual(job.action, "ArchiveRetrieval")
+
+ def test_list_jobs(self):
+ self.mock_layer1.list_jobs.return_value = {
+ "JobList": [FIXTURE_ARCHIVE_JOB]}
+ jobs = self.vault.list_jobs(False, "InProgress")
+ self.mock_layer1.list_jobs.assert_called_with("examplevault",
+ False, "InProgress")
+ self.assertEqual(jobs[0].archive_id,
+ "NkbByEejwEggmBz2fTHgJrg0XBoDfjP4q6iu87-TjhqG6eGoOY9Z"
+ "8i1_AUyUsuhPAdTqLHy8pTl5nfCFJmDl2yEZONi5L26Omw12vcs0"
+ "1MNGntHEQL8MBfGlqrEXAMPLEArchiveId")
+
+
+class TestJob(GlacierLayer2Base):
+ def setUp(self):
+ GlacierLayer2Base.setUp(self)
+ self.vault = Vault(self.mock_layer1, FIXTURE_VAULT)
+ self.job = Job(self.vault, FIXTURE_ARCHIVE_JOB)
+
+ def test_get_job_output(self):
+ self.mock_layer1.get_job_output.return_value = "TEST_OUTPUT"
+ self.job.get_output((0,100))
+ self.mock_layer1.get_job_output.assert_called_with(
+ "examplevault",
+ "HkF9p6o7yjhFx-K3CGl6fuSm6VzW9T7esGQfco8nUXVYwS0jlb5gq1JZ55yHgt5vP"
+ "54ZShjoQzQVVh7vEXAMPLEjobID", (0,100))
diff --git a/tests/unit/glacier/test_writer.py b/tests/unit/glacier/test_writer.py
new file mode 100644
index 0000000..216429f
--- /dev/null
+++ b/tests/unit/glacier/test_writer.py
@@ -0,0 +1,26 @@
+from hashlib import sha256
+
+from tests.unit import unittest
+import mock
+
+from boto.glacier.writer import Writer, chunk_hashes
+
+
+class TestChunking(unittest.TestCase):
+ def test_chunk_hashes_exact(self):
+ chunks = chunk_hashes('a' * (2 * 1024 * 1024))
+ self.assertEqual(len(chunks), 2)
+ self.assertEqual(chunks[0], sha256('a' * 1024 * 1024).digest())
+
+ def test_chunks_with_leftovers(self):
+ bytestring = 'a' * (2 * 1024 * 1024 + 20)
+ chunks = chunk_hashes(bytestring)
+ self.assertEqual(len(chunks), 3)
+ self.assertEqual(chunks[0], sha256('a' * 1024 * 1024).digest())
+ self.assertEqual(chunks[1], sha256('a' * 1024 * 1024).digest())
+ self.assertEqual(chunks[2], sha256('a' * 20).digest())
+
+ def test_less_than_one_chunk(self):
+ chunks = chunk_hashes('aaaa')
+ self.assertEqual(len(chunks), 1)
+ self.assertEqual(chunks[0], sha256('aaaa').digest())
diff --git a/tests/cloudfront/__init__.py b/tests/unit/provider/__init__.py
similarity index 100%
copy from tests/cloudfront/__init__.py
copy to tests/unit/provider/__init__.py
diff --git a/tests/unit/provider/test_provider.py b/tests/unit/provider/test_provider.py
new file mode 100644
index 0000000..6e494d1
--- /dev/null
+++ b/tests/unit/provider/test_provider.py
@@ -0,0 +1,149 @@
+#!/usr/bin/env python
+from datetime import datetime, timedelta
+
+from tests.unit import unittest
+import mock
+
+from boto import provider
+
+
+class TestProvider(unittest.TestCase):
+ def setUp(self):
+ self.environ = {}
+ self.config = {}
+
+ self.metadata_patch = mock.patch('boto.utils.get_instance_metadata')
+ self.config_patch = mock.patch('boto.provider.config.get',
+ self.get_config)
+ self.has_config_patch = mock.patch('boto.provider.config.has_option',
+ self.has_config)
+ self.environ_patch = mock.patch('os.environ', self.environ)
+
+ self.get_instance_metadata = self.metadata_patch.start()
+ self.config_patch.start()
+ self.has_config_patch.start()
+ self.environ_patch.start()
+
+
+ def tearDown(self):
+ self.metadata_patch.stop()
+ self.config_patch.stop()
+ self.has_config_patch.stop()
+ self.environ_patch.stop()
+
+ def has_config(self, section_name, key):
+ try:
+ self.config[section_name][key]
+ return True
+ except KeyError:
+ return False
+
+ def get_config(self, section_name, key):
+ try:
+ return self.config[section_name][key]
+ except KeyError:
+ return None
+
+ def test_passed_in_values_are_used(self):
+ p = provider.Provider('aws', 'access_key', 'secret_key', 'security_token')
+ self.assertEqual(p.access_key, 'access_key')
+ self.assertEqual(p.secret_key, 'secret_key')
+ self.assertEqual(p.security_token, 'security_token')
+
+ def test_environment_variables_are_used(self):
+ self.environ['AWS_ACCESS_KEY_ID'] = 'env_access_key'
+ self.environ['AWS_SECRET_ACCESS_KEY'] = 'env_secret_key'
+ p = provider.Provider('aws')
+ self.assertEqual(p.access_key, 'env_access_key')
+ self.assertEqual(p.secret_key, 'env_secret_key')
+ self.assertIsNone(p.security_token)
+
+ def test_config_values_are_used(self):
+ self.config = {
+ 'Credentials': {
+ 'aws_access_key_id': 'cfg_access_key',
+ 'aws_secret_access_key': 'cfg_secret_key',
+ }
+ }
+ p = provider.Provider('aws')
+ self.assertEqual(p.access_key, 'cfg_access_key')
+ self.assertEqual(p.secret_key, 'cfg_secret_key')
+ self.assertIsNone(p.security_token)
+
+ def test_env_vars_beat_config_values(self):
+ self.environ['AWS_ACCESS_KEY_ID'] = 'env_access_key'
+ self.environ['AWS_SECRET_ACCESS_KEY'] = 'env_secret_key'
+ self.config = {
+ 'Credentials': {
+ 'aws_access_key_id': 'cfg_access_key',
+ 'aws_secret_access_key': 'cfg_secret_key',
+ }
+ }
+ p = provider.Provider('aws')
+ self.assertEqual(p.access_key, 'env_access_key')
+ self.assertEqual(p.secret_key, 'env_secret_key')
+ self.assertIsNone(p.security_token)
+
+ def test_metadata_server_credentials(self):
+ instance_config = {
+ 'iam': {
+ 'security-credentials': {
+ 'allowall': {u'AccessKeyId': u'iam_access_key',
+ u'Code': u'Success',
+ u'Expiration': u'2012-09-01T03:57:34Z',
+ u'LastUpdated': u'2012-08-31T21:43:40Z',
+ u'SecretAccessKey': u'iam_secret_key',
+ u'Token': u'iam_token',
+ u'Type': u'AWS-HMAC'}
+ }
+ }
+ }
+ self.get_instance_metadata.return_value = instance_config
+ p = provider.Provider('aws')
+ self.assertEqual(p.access_key, 'iam_access_key')
+ self.assertEqual(p.secret_key, 'iam_secret_key')
+ self.assertEqual(p.security_token, 'iam_token')
+
+ def test_refresh_credentials(self):
+ now = datetime.now()
+ first_expiration = (now + timedelta(seconds=10)).strftime(
+ "%Y-%m-%dT%H:%M:%SZ")
+ credentials = {
+ u'AccessKeyId': u'first_access_key',
+ u'Code': u'Success',
+ u'Expiration': first_expiration,
+ u'LastUpdated': u'2012-08-31T21:43:40Z',
+ u'SecretAccessKey': u'first_secret_key',
+ u'Token': u'first_token',
+ u'Type': u'AWS-HMAC'
+ }
+ instance_config = {
+ 'iam': {
+ 'security-credentials': {
+ 'allowall': credentials
+ }
+ }
+ }
+ self.get_instance_metadata.return_value = instance_config
+ p = provider.Provider('aws')
+ self.assertEqual(p.access_key, 'first_access_key')
+ self.assertEqual(p.secret_key, 'first_secret_key')
+ self.assertEqual(p.security_token, 'first_token')
+ self.assertIsNotNone(p._credential_expiry_time)
+
+ # Now set the expiration to something in the past.
+ expired = now - timedelta(seconds=20)
+ p._credential_expiry_time = expired
+ credentials['AccessKeyId'] = 'second_access_key'
+ credentials['SecretAccessKey'] = 'second_secret_key'
+ credentials['Token'] = 'second_token'
+ self.get_instance_metadata.return_value = instance_config
+
+ # Now upon attribute access, the credentials should be updated.
+ self.assertEqual(p.access_key, 'second_access_key')
+ self.assertEqual(p.secret_key, 'second_secret_key')
+ self.assertEqual(p.security_token, 'second_token')
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests/cloudfront/__init__.py b/tests/unit/s3/__init__.py
similarity index 100%
copy from tests/cloudfront/__init__.py
copy to tests/unit/s3/__init__.py
diff --git a/tests/unit/s3/test_cors_configuration.py b/tests/unit/s3/test_cors_configuration.py
new file mode 100644
index 0000000..8f69803
--- /dev/null
+++ b/tests/unit/s3/test_cors_configuration.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+
+import unittest
+from boto.s3.cors import CORSConfiguration
+
+CORS_BODY_1 = (
+ '<CORSConfiguration>'
+ '<CORSRule>'
+ '<AllowedMethod>PUT</AllowedMethod>'
+ '<AllowedMethod>POST</AllowedMethod>'
+ '<AllowedMethod>DELETE</AllowedMethod>'
+ '<AllowedOrigin>http://www.example.com</AllowedOrigin>'
+ '<AllowedHeader>*</AllowedHeader>'
+ '<ExposeHeader>x-amz-server-side-encryption</ExposeHeader>'
+ '<MaxAgeSeconds>3000</MaxAgeSeconds>'
+ '<ID>foobar_rule</ID>'
+ '</CORSRule>'
+ '</CORSConfiguration>')
+
+CORS_BODY_2 = (
+ '<CORSConfiguration>'
+ '<CORSRule>'
+ '<AllowedMethod>PUT</AllowedMethod>'
+ '<AllowedMethod>POST</AllowedMethod>'
+ '<AllowedMethod>DELETE</AllowedMethod>'
+ '<AllowedOrigin>http://www.example.com</AllowedOrigin>'
+ '<AllowedHeader>*</AllowedHeader>'
+ '<ExposeHeader>x-amz-server-side-encryption</ExposeHeader>'
+ '<MaxAgeSeconds>3000</MaxAgeSeconds>'
+ '</CORSRule>'
+ '<CORSRule>'
+ '<AllowedMethod>GET</AllowedMethod>'
+ '<AllowedOrigin>*</AllowedOrigin>'
+ '<AllowedHeader>*</AllowedHeader>'
+ '<MaxAgeSeconds>3000</MaxAgeSeconds>'
+ '</CORSRule>'
+ '</CORSConfiguration>')
+
+CORS_BODY_3 = (
+ '<CORSConfiguration>'
+ '<CORSRule>'
+ '<AllowedMethod>GET</AllowedMethod>'
+ '<AllowedOrigin>*</AllowedOrigin>'
+ '</CORSRule>'
+ '</CORSConfiguration>')
+
+
+class TestCORSConfiguration(unittest.TestCase):
+
+ def test_one_rule_with_id(self):
+ cfg = CORSConfiguration()
+ cfg.add_rule(['PUT', 'POST', 'DELETE'],
+ 'http://www.example.com',
+ allowed_header='*',
+ max_age_seconds=3000,
+ expose_header='x-amz-server-side-encryption',
+ id='foobar_rule')
+ self.assertEqual(cfg.to_xml(), CORS_BODY_1)
+
+ def test_two_rules(self):
+ cfg = CORSConfiguration()
+ cfg.add_rule(['PUT', 'POST', 'DELETE'],
+ 'http://www.example.com',
+ allowed_header='*',
+ max_age_seconds=3000,
+ expose_header='x-amz-server-side-encryption')
+ cfg.add_rule('GET', '*', allowed_header='*', max_age_seconds=3000)
+ self.assertEqual(cfg.to_xml(), CORS_BODY_2)
+
+ def test_minimal(self):
+ cfg = CORSConfiguration()
+ cfg.add_rule('GET', '*')
+ self.assertEqual(cfg.to_xml(), CORS_BODY_3)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/unit/s3/test_tagging.py b/tests/unit/s3/test_tagging.py
new file mode 100644
index 0000000..4a0be38
--- /dev/null
+++ b/tests/unit/s3/test_tagging.py
@@ -0,0 +1,37 @@
+from tests.unit import AWSMockServiceTestCase
+
+from boto.s3.connection import S3Connection
+from boto.s3.bucket import Bucket
+
+
+class TestS3Tagging(AWSMockServiceTestCase):
+ connection_class = S3Connection
+
+ def default_body(self):
+ return """
+ <Tagging>
+ <TagSet>
+ <Tag>
+ <Key>Project</Key>
+ <Value>Project One</Value>
+ </Tag>
+ <Tag>
+ <Key>User</Key>
+ <Value>jsmith</Value>
+ </Tag>
+ </TagSet>
+ </Tagging>
+ """
+
+ def test_parse_tagging_response(self):
+ self.set_http_response(status_code=200)
+ b = Bucket(self.service_connection, 'mybucket')
+ api_response = b.get_tags()
+ # The outer list is a list of tag sets.
+ self.assertEqual(len(api_response), 1)
+ # The inner list is a list of tags.
+ self.assertEqual(len(api_response[0]), 2)
+ self.assertEqual(api_response[0][0].key, 'Project')
+ self.assertEqual(api_response[0][0].value, 'Project One')
+ self.assertEqual(api_response[0][1].key, 'User')
+ self.assertEqual(api_response[0][1].value, 'jsmith')
diff --git a/tests/unit/utils/test_utils.py b/tests/unit/utils/test_utils.py
new file mode 100644
index 0000000..205d3d8
--- /dev/null
+++ b/tests/unit/utils/test_utils.py
@@ -0,0 +1,109 @@
+# Copyright (c) 2010 Robert Mela
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+import unittest
+import hashlib
+import hmac
+
+from boto.utils import Password
+from boto.utils import pythonize_name
+
+
+class TestPassword(unittest.TestCase):
+ """Test basic password functionality"""
+
+ def clstest(self, cls):
+ """Insure that password.__eq__ hashes test value before compare."""
+ password = cls('foo')
+ self.assertNotEquals(password, 'foo')
+
+ password.set('foo')
+ hashed = str(password)
+ self.assertEquals(password, 'foo')
+ self.assertEquals(password.str, hashed)
+
+ password = cls(hashed)
+ self.assertNotEquals(password.str, 'foo')
+ self.assertEquals(password, 'foo')
+ self.assertEquals(password.str, hashed)
+
+ def test_aaa_version_1_9_default_behavior(self):
+ self.clstest(Password)
+
+ def test_custom_hashclass(self):
+ class SHA224Password(Password):
+ hashfunc = hashlib.sha224
+
+ password = SHA224Password()
+ password.set('foo')
+ self.assertEquals(hashlib.sha224('foo').hexdigest(), str(password))
+
+ def test_hmac(self):
+ def hmac_hashfunc(cls, msg):
+ return hmac.new('mysecretkey', msg)
+
+ class HMACPassword(Password):
+ hashfunc = hmac_hashfunc
+
+ self.clstest(HMACPassword)
+ password = HMACPassword()
+ password.set('foo')
+
+ self.assertEquals(str(password),
+ hmac.new('mysecretkey', 'foo').hexdigest())
+
+ def test_constructor(self):
+ hmac_hashfunc = lambda msg: hmac.new('mysecretkey', msg)
+
+ password = Password(hashfunc=hmac_hashfunc)
+ password.set('foo')
+ self.assertEquals(password.str,
+ hmac.new('mysecretkey', 'foo').hexdigest())
+
+
+class TestPythonizeName(unittest.TestCase):
+ def test_empty_string(self):
+ self.assertEqual(pythonize_name(''), '')
+
+ def test_all_lower_case(self):
+ self.assertEqual(pythonize_name('lowercase'), 'lowercase')
+
+ def test_all_upper_case(self):
+ self.assertEqual(pythonize_name('UPPERCASE'), 'uppercase')
+
+ def test_camel_case(self):
+ self.assertEqual(pythonize_name('OriginallyCamelCased'),
+ 'originally_camel_cased')
+
+ def test_already_pythonized(self):
+ self.assertEqual(pythonize_name('already_pythonized'),
+ 'already_pythonized')
+
+ def test_multiple_upper_cased_letters(self):
+ self.assertEqual(pythonize_name('HTTPRequest'), 'http_request')
+ self.assertEqual(pythonize_name('RequestForHTTP'), 'request_for_http')
+
+ def test_string_with_numbers(self):
+ self.assertEqual(pythonize_name('HTTPStatus200Ok'), 'http_status_200_ok')
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests/utils/test_password.py b/tests/utils/test_password.py
deleted file mode 100644
index 9bfb638..0000000
--- a/tests/utils/test_password.py
+++ /dev/null
@@ -1,101 +0,0 @@
-# Copyright (c) 2010 Robert Mela
-#
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the
-# "Software"), to deal in the Software without restriction, including
-# without limitation the rights to use, copy, modify, merge, publish, dis-
-# tribute, sublicense, and/or sell copies of the Software, and to permit
-# persons to whom the Software is furnished to do so, subject to the fol-
-# lowing conditions:
-#
-# The above copyright notice and this permission notice shall be included
-# in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
-# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-# IN THE SOFTWARE.
-#
-import unittest
-
-
-import logging
-log = logging.getLogger(__file__)
-
-class TestPassword(unittest.TestCase):
- """Test basic password functionality"""
-
- def clstest(self,cls):
-
- """Insure that password.__eq__ hashes test value before compare"""
-
- password=cls('foo')
- log.debug( "Password %s" % password )
- self.assertNotEquals(password , 'foo')
-
- password.set('foo')
- hashed = str(password)
- self.assertEquals(password , 'foo')
- self.assertEquals(password.str, hashed)
-
- password = cls(hashed)
- self.assertNotEquals(password.str , 'foo')
- self.assertEquals(password , 'foo')
- self.assertEquals(password.str , hashed)
-
-
- def test_aaa_version_1_9_default_behavior(self):
- from boto.utils import Password
- self.clstest(Password)
-
- def test_custom_hashclass(self):
-
- from boto.utils import Password
- import hashlib
-
- class SHA224Password(Password):
- hashfunc=hashlib.sha224
-
- password=SHA224Password()
- password.set('foo')
- self.assertEquals( hashlib.sha224('foo').hexdigest(), str(password))
-
- def test_hmac(self):
- from boto.utils import Password
- import hmac
-
- def hmac_hashfunc(cls,msg):
- log.debug("\n%s %s" % (cls.__class__, cls) )
- return hmac.new('mysecretkey', msg)
-
- class HMACPassword(Password):
- hashfunc=hmac_hashfunc
-
- self.clstest(HMACPassword)
- password=HMACPassword()
- password.set('foo')
-
- self.assertEquals(str(password), hmac.new('mysecretkey','foo').hexdigest())
-
- def test_constructor(self):
- from boto.utils import Password
- import hmac
-
- hmac_hashfunc = lambda msg: hmac.new('mysecretkey', msg )
-
- password = Password(hashfunc=hmac_hashfunc)
- password.set('foo')
- self.assertEquals(password.str, hmac.new('mysecretkey','foo').hexdigest())
-
-
-
-if __name__ == '__main__':
- import sys
- sys.path = [ '../../' ] + sys.path
- #logging.basicConfig()
- #log.setLevel(logging.DEBUG)
- suite = unittest.TestLoader().loadTestsFromTestCase(TestPassword)
- unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 0000000..321b925
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,8 @@
+[tox]
+envlist = py26,py27
+
+
+[testenv]
+commands =
+ pip install -qr requirements.txt
+ python tests/test.py tests/unit